diff --git a/.devcontainer/devcontainer.json b/.devcontainer/devcontainer.json index 7a3ce1f2..89ad0210 100644 --- a/.devcontainer/devcontainer.json +++ b/.devcontainer/devcontainer.json @@ -8,6 +8,7 @@ "NODE_VERSION": "lts/*" } }, + "ghcr.io/devcontainers-extra/features/mysql-homebrew:1": {}, "settings": { "python.defaultInterpreterPath": "/usr/local/bin/python", "python.linting.enabled": true, @@ -22,6 +23,7 @@ "python.linting.pydocstylePath": "/usr/local/py-utils/bin/pydocstyle", "python.linting.pylintPath": "/usr/local/py-utils/bin/pylint", "python.linting.pylintArgs": ["--disable=C0111"] + }, "extensions": [ "ms-python.python", diff --git a/NOTICE b/NOTICE index 547595f9..6e2b44a4 100644 --- a/NOTICE +++ b/NOTICE @@ -1,12 +1,9 @@ -Copyright 2022 LinkedIn Corporation +Copyright 2023 LinkedIn Corporation All Rights Reserved. Licensed under the LinkedIn Learning Exercise File License (the "License"). See LICENSE in the project root for license information. -ATTRIBUTIONS: -[PLEASE PROVIDE ATTRIBUTIONS OR DELETE THIS AND THE ABOVE LINE “ATTRIBUTIONS”] - Please note, this project may automatically load third party code from external repositories (for example, NPM modules, Composer packages, or other dependencies). If so, such third party code may be subject to other license terms than as set diff --git a/README.md b/README.md index 422e0932..3e601fd1 100644 --- a/README.md +++ b/README.md @@ -1,9 +1,10 @@ -# COURSENAME -This is the repository for the LinkedIn Learning course `course-name`. The full course is available from [LinkedIn Learning][lil-course-url]. +# Advanced Python: Working with Databases +This is the repository for the LinkedIn Learning course Advanced Python: Working with Databases. The full course is available from [LinkedIn Learning][lil-course-url]. -![course-name-alt-text][lil-thumbnail-url] +![Advanced Python: Working with Databases][lil-thumbnail-url] + +To create functional and useful Python applications, you need a database. Databases allow you to store data from user sessions, track inventory, make recommendations, and more. However, Python is compatible with many options: SQLite, MySQL, and PostgreSQL, among others. Selecting the right database is a skill that advanced developers are expected to master. This course provides an excellent primer, comparing the different types of databases that can be connected through the Python Database API. Instructor Kathryn Hodge teaches the differences between SQLite, MySQL, and PostgreSQL and shows how to use the ORM tool SQLAlchemy to query a database. The final chapters put your knowledge to practical use in two hands-on projects: developing a full-stack application with Python, PostgreSQL, and Flask and creating a data analysis app with pandas and Jupyter Notebook. By the end, you should feel comfortable creating and using databases and be able to decide which Python database is right for you. -_See the readme file in the main branch for updated instructions and information._ ## Instructions This repository has branches for each of the videos in the course. You can use the branch pop up menu in github to switch to a specific branch and take a look at the course at that stage, or you can add `/tree/BRANCH_NAME` to the URL to go to the branch you want to access. @@ -22,15 +23,16 @@ To resolve this issue: Add changes to git using this command: git add . Commit changes using this command: git commit -m "some message" -## Installing -1. To use these exercise files, you must have the following installed: - - [list of requirements for course] -2. Clone this repository into your local machine using the terminal (Mac), CMD (Windows), or a GUI tool like SourceTree. -3. [Course-specific instructions] +### Instructor + +Kathryn Hodge + +Software Engineer -[0]: # (Replace these placeholder URLs with actual course URLs) + -[lil-course-url]: https://www.linkedin.com/learning/ -[lil-thumbnail-url]: http:// +Check out my other courses on [LinkedIn Learning](https://www.linkedin.com/learning/instructors/kathryn-hodge). +[lil-course-url]: https://www.linkedin.com/learning/advanced-python-working-with-databases-22307421?dApp=59033956&leis=LAA +[lil-thumbnail-url]: https://media.licdn.com/dms/image/D560DAQGxhz_OMvM_mQ/learning-public-crop_675_1200/0/1683668063867?e=2147483647&v=beta&t=frT7osblpohhLDjZqRYyklw6-Fay7Mgtr5hsv0QvLuc diff --git a/mysql-sqlalchemy-workspace/bin/Activate.ps1 b/mysql-sqlalchemy-workspace/bin/Activate.ps1 new file mode 100644 index 00000000..2fb3852c --- /dev/null +++ b/mysql-sqlalchemy-workspace/bin/Activate.ps1 @@ -0,0 +1,241 @@ +<# +.Synopsis +Activate a Python virtual environment for the current PowerShell session. + +.Description +Pushes the python executable for a virtual environment to the front of the +$Env:PATH environment variable and sets the prompt to signify that you are +in a Python virtual environment. Makes use of the command line switches as +well as the `pyvenv.cfg` file values present in the virtual environment. + +.Parameter VenvDir +Path to the directory that contains the virtual environment to activate. The +default value for this is the parent of the directory that the Activate.ps1 +script is located within. + +.Parameter Prompt +The prompt prefix to display when this virtual environment is activated. By +default, this prompt is the name of the virtual environment folder (VenvDir) +surrounded by parentheses and followed by a single space (ie. '(.venv) '). + +.Example +Activate.ps1 +Activates the Python virtual environment that contains the Activate.ps1 script. + +.Example +Activate.ps1 -Verbose +Activates the Python virtual environment that contains the Activate.ps1 script, +and shows extra information about the activation as it executes. + +.Example +Activate.ps1 -VenvDir C:\Users\MyUser\Common\.venv +Activates the Python virtual environment located in the specified location. + +.Example +Activate.ps1 -Prompt "MyPython" +Activates the Python virtual environment that contains the Activate.ps1 script, +and prefixes the current prompt with the specified string (surrounded in +parentheses) while the virtual environment is active. + +.Notes +On Windows, it may be required to enable this Activate.ps1 script by setting the +execution policy for the user. You can do this by issuing the following PowerShell +command: + +PS C:\> Set-ExecutionPolicy -ExecutionPolicy RemoteSigned -Scope CurrentUser + +For more information on Execution Policies: +https://go.microsoft.com/fwlink/?LinkID=135170 + +#> +Param( + [Parameter(Mandatory = $false)] + [String] + $VenvDir, + [Parameter(Mandatory = $false)] + [String] + $Prompt +) + +<# Function declarations --------------------------------------------------- #> + +<# +.Synopsis +Remove all shell session elements added by the Activate script, including the +addition of the virtual environment's Python executable from the beginning of +the PATH variable. + +.Parameter NonDestructive +If present, do not remove this function from the global namespace for the +session. + +#> +function global:deactivate ([switch]$NonDestructive) { + # Revert to original values + + # The prior prompt: + if (Test-Path -Path Function:_OLD_VIRTUAL_PROMPT) { + Copy-Item -Path Function:_OLD_VIRTUAL_PROMPT -Destination Function:prompt + Remove-Item -Path Function:_OLD_VIRTUAL_PROMPT + } + + # The prior PYTHONHOME: + if (Test-Path -Path Env:_OLD_VIRTUAL_PYTHONHOME) { + Copy-Item -Path Env:_OLD_VIRTUAL_PYTHONHOME -Destination Env:PYTHONHOME + Remove-Item -Path Env:_OLD_VIRTUAL_PYTHONHOME + } + + # The prior PATH: + if (Test-Path -Path Env:_OLD_VIRTUAL_PATH) { + Copy-Item -Path Env:_OLD_VIRTUAL_PATH -Destination Env:PATH + Remove-Item -Path Env:_OLD_VIRTUAL_PATH + } + + # Just remove the VIRTUAL_ENV altogether: + if (Test-Path -Path Env:VIRTUAL_ENV) { + Remove-Item -Path env:VIRTUAL_ENV + } + + # Just remove the _PYTHON_VENV_PROMPT_PREFIX altogether: + if (Get-Variable -Name "_PYTHON_VENV_PROMPT_PREFIX" -ErrorAction SilentlyContinue) { + Remove-Variable -Name _PYTHON_VENV_PROMPT_PREFIX -Scope Global -Force + } + + # Leave deactivate function in the global namespace if requested: + if (-not $NonDestructive) { + Remove-Item -Path function:deactivate + } +} + +<# +.Description +Get-PyVenvConfig parses the values from the pyvenv.cfg file located in the +given folder, and returns them in a map. + +For each line in the pyvenv.cfg file, if that line can be parsed into exactly +two strings separated by `=` (with any amount of whitespace surrounding the =) +then it is considered a `key = value` line. The left hand string is the key, +the right hand is the value. + +If the value starts with a `'` or a `"` then the first and last character is +stripped from the value before being captured. + +.Parameter ConfigDir +Path to the directory that contains the `pyvenv.cfg` file. +#> +function Get-PyVenvConfig( + [String] + $ConfigDir +) { + Write-Verbose "Given ConfigDir=$ConfigDir, obtain values in pyvenv.cfg" + + # Ensure the file exists, and issue a warning if it doesn't (but still allow the function to continue). + $pyvenvConfigPath = Join-Path -Resolve -Path $ConfigDir -ChildPath 'pyvenv.cfg' -ErrorAction Continue + + # An empty map will be returned if no config file is found. + $pyvenvConfig = @{ } + + if ($pyvenvConfigPath) { + + Write-Verbose "File exists, parse `key = value` lines" + $pyvenvConfigContent = Get-Content -Path $pyvenvConfigPath + + $pyvenvConfigContent | ForEach-Object { + $keyval = $PSItem -split "\s*=\s*", 2 + if ($keyval[0] -and $keyval[1]) { + $val = $keyval[1] + + # Remove extraneous quotations around a string value. + if ("'""".Contains($val.Substring(0, 1))) { + $val = $val.Substring(1, $val.Length - 2) + } + + $pyvenvConfig[$keyval[0]] = $val + Write-Verbose "Adding Key: '$($keyval[0])'='$val'" + } + } + } + return $pyvenvConfig +} + + +<# Begin Activate script --------------------------------------------------- #> + +# Determine the containing directory of this script +$VenvExecPath = Split-Path -Parent $MyInvocation.MyCommand.Definition +$VenvExecDir = Get-Item -Path $VenvExecPath + +Write-Verbose "Activation script is located in path: '$VenvExecPath'" +Write-Verbose "VenvExecDir Fullname: '$($VenvExecDir.FullName)" +Write-Verbose "VenvExecDir Name: '$($VenvExecDir.Name)" + +# Set values required in priority: CmdLine, ConfigFile, Default +# First, get the location of the virtual environment, it might not be +# VenvExecDir if specified on the command line. +if ($VenvDir) { + Write-Verbose "VenvDir given as parameter, using '$VenvDir' to determine values" +} +else { + Write-Verbose "VenvDir not given as a parameter, using parent directory name as VenvDir." + $VenvDir = $VenvExecDir.Parent.FullName.TrimEnd("\\/") + Write-Verbose "VenvDir=$VenvDir" +} + +# Next, read the `pyvenv.cfg` file to determine any required value such +# as `prompt`. +$pyvenvCfg = Get-PyVenvConfig -ConfigDir $VenvDir + +# Next, set the prompt from the command line, or the config file, or +# just use the name of the virtual environment folder. +if ($Prompt) { + Write-Verbose "Prompt specified as argument, using '$Prompt'" +} +else { + Write-Verbose "Prompt not specified as argument to script, checking pyvenv.cfg value" + if ($pyvenvCfg -and $pyvenvCfg['prompt']) { + Write-Verbose " Setting based on value in pyvenv.cfg='$($pyvenvCfg['prompt'])'" + $Prompt = $pyvenvCfg['prompt']; + } + else { + Write-Verbose " Setting prompt based on parent's directory's name. (Is the directory name passed to venv module when creating the virutal environment)" + Write-Verbose " Got leaf-name of $VenvDir='$(Split-Path -Path $venvDir -Leaf)'" + $Prompt = Split-Path -Path $venvDir -Leaf + } +} + +Write-Verbose "Prompt = '$Prompt'" +Write-Verbose "VenvDir='$VenvDir'" + +# Deactivate any currently active virtual environment, but leave the +# deactivate function in place. +deactivate -nondestructive + +# Now set the environment variable VIRTUAL_ENV, used by many tools to determine +# that there is an activated venv. +$env:VIRTUAL_ENV = $VenvDir + +if (-not $Env:VIRTUAL_ENV_DISABLE_PROMPT) { + + Write-Verbose "Setting prompt to '$Prompt'" + + # Set the prompt to include the env name + # Make sure _OLD_VIRTUAL_PROMPT is global + function global:_OLD_VIRTUAL_PROMPT { "" } + Copy-Item -Path function:prompt -Destination function:_OLD_VIRTUAL_PROMPT + New-Variable -Name _PYTHON_VENV_PROMPT_PREFIX -Description "Python virtual environment prompt prefix" -Scope Global -Option ReadOnly -Visibility Public -Value $Prompt + + function global:prompt { + Write-Host -NoNewline -ForegroundColor Green "($_PYTHON_VENV_PROMPT_PREFIX) " + _OLD_VIRTUAL_PROMPT + } +} + +# Clear PYTHONHOME +if (Test-Path -Path Env:PYTHONHOME) { + Copy-Item -Path Env:PYTHONHOME -Destination Env:_OLD_VIRTUAL_PYTHONHOME + Remove-Item -Path Env:PYTHONHOME +} + +# Add the venv to the PATH +Copy-Item -Path Env:PATH -Destination Env:_OLD_VIRTUAL_PATH +$Env:PATH = "$VenvExecDir$([System.IO.Path]::PathSeparator)$Env:PATH" diff --git a/mysql-sqlalchemy-workspace/bin/activate b/mysql-sqlalchemy-workspace/bin/activate new file mode 100644 index 00000000..0137c25d --- /dev/null +++ b/mysql-sqlalchemy-workspace/bin/activate @@ -0,0 +1,66 @@ +# This file must be used with "source bin/activate" *from bash* +# you cannot run it directly + +deactivate () { + # reset old environment variables + if [ -n "${_OLD_VIRTUAL_PATH:-}" ] ; then + PATH="${_OLD_VIRTUAL_PATH:-}" + export PATH + unset _OLD_VIRTUAL_PATH + fi + if [ -n "${_OLD_VIRTUAL_PYTHONHOME:-}" ] ; then + PYTHONHOME="${_OLD_VIRTUAL_PYTHONHOME:-}" + export PYTHONHOME + unset _OLD_VIRTUAL_PYTHONHOME + fi + + # This should detect bash and zsh, which have a hash command that must + # be called to get it to forget past commands. Without forgetting + # past commands the $PATH changes we made may not be respected + if [ -n "${BASH:-}" -o -n "${ZSH_VERSION:-}" ] ; then + hash -r 2> /dev/null + fi + + if [ -n "${_OLD_VIRTUAL_PS1:-}" ] ; then + PS1="${_OLD_VIRTUAL_PS1:-}" + export PS1 + unset _OLD_VIRTUAL_PS1 + fi + + unset VIRTUAL_ENV + if [ ! "${1:-}" = "nondestructive" ] ; then + # Self destruct! + unset -f deactivate + fi +} + +# unset irrelevant variables +deactivate nondestructive + +VIRTUAL_ENV="/Users/linkedin/Desktop/advanced-python-working-with-databases-4365479/mysql-sqlalchemy-workspace" +export VIRTUAL_ENV + +_OLD_VIRTUAL_PATH="$PATH" +PATH="$VIRTUAL_ENV/bin:$PATH" +export PATH + +# unset PYTHONHOME if set +# this will fail if PYTHONHOME is set to the empty string (which is bad anyway) +# could use `if (set -u; : $PYTHONHOME) ;` in bash +if [ -n "${PYTHONHOME:-}" ] ; then + _OLD_VIRTUAL_PYTHONHOME="${PYTHONHOME:-}" + unset PYTHONHOME +fi + +if [ -z "${VIRTUAL_ENV_DISABLE_PROMPT:-}" ] ; then + _OLD_VIRTUAL_PS1="${PS1:-}" + PS1="(mysql-sqlalchemy-workspace) ${PS1:-}" + export PS1 +fi + +# This should detect bash and zsh, which have a hash command that must +# be called to get it to forget past commands. Without forgetting +# past commands the $PATH changes we made may not be respected +if [ -n "${BASH:-}" -o -n "${ZSH_VERSION:-}" ] ; then + hash -r 2> /dev/null +fi diff --git a/mysql-sqlalchemy-workspace/bin/activate.csh b/mysql-sqlalchemy-workspace/bin/activate.csh new file mode 100644 index 00000000..6e55766f --- /dev/null +++ b/mysql-sqlalchemy-workspace/bin/activate.csh @@ -0,0 +1,25 @@ +# This file must be used with "source bin/activate.csh" *from csh*. +# You cannot run it directly. +# Created by Davide Di Blasi . +# Ported to Python 3.3 venv by Andrew Svetlov + +alias deactivate 'test $?_OLD_VIRTUAL_PATH != 0 && setenv PATH "$_OLD_VIRTUAL_PATH" && unset _OLD_VIRTUAL_PATH; rehash; test $?_OLD_VIRTUAL_PROMPT != 0 && set prompt="$_OLD_VIRTUAL_PROMPT" && unset _OLD_VIRTUAL_PROMPT; unsetenv VIRTUAL_ENV; test "\!:*" != "nondestructive" && unalias deactivate' + +# Unset irrelevant variables. +deactivate nondestructive + +setenv VIRTUAL_ENV "/Users/linkedin/Desktop/advanced-python-working-with-databases-4365479/mysql-sqlalchemy-workspace" + +set _OLD_VIRTUAL_PATH="$PATH" +setenv PATH "$VIRTUAL_ENV/bin:$PATH" + + +set _OLD_VIRTUAL_PROMPT="$prompt" + +if (! "$?VIRTUAL_ENV_DISABLE_PROMPT") then + set prompt = "(mysql-sqlalchemy-workspace) $prompt" +endif + +alias pydoc python -m pydoc + +rehash diff --git a/mysql-sqlalchemy-workspace/bin/activate.fish b/mysql-sqlalchemy-workspace/bin/activate.fish new file mode 100644 index 00000000..01907635 --- /dev/null +++ b/mysql-sqlalchemy-workspace/bin/activate.fish @@ -0,0 +1,64 @@ +# This file must be used with "source /bin/activate.fish" *from fish* +# (https://fishshell.com/); you cannot run it directly. + +function deactivate -d "Exit virtual environment and return to normal shell environment" + # reset old environment variables + if test -n "$_OLD_VIRTUAL_PATH" + set -gx PATH $_OLD_VIRTUAL_PATH + set -e _OLD_VIRTUAL_PATH + end + if test -n "$_OLD_VIRTUAL_PYTHONHOME" + set -gx PYTHONHOME $_OLD_VIRTUAL_PYTHONHOME + set -e _OLD_VIRTUAL_PYTHONHOME + end + + if test -n "$_OLD_FISH_PROMPT_OVERRIDE" + functions -e fish_prompt + set -e _OLD_FISH_PROMPT_OVERRIDE + functions -c _old_fish_prompt fish_prompt + functions -e _old_fish_prompt + end + + set -e VIRTUAL_ENV + if test "$argv[1]" != "nondestructive" + # Self-destruct! + functions -e deactivate + end +end + +# Unset irrelevant variables. +deactivate nondestructive + +set -gx VIRTUAL_ENV "/Users/linkedin/Desktop/advanced-python-working-with-databases-4365479/mysql-sqlalchemy-workspace" + +set -gx _OLD_VIRTUAL_PATH $PATH +set -gx PATH "$VIRTUAL_ENV/bin" $PATH + +# Unset PYTHONHOME if set. +if set -q PYTHONHOME + set -gx _OLD_VIRTUAL_PYTHONHOME $PYTHONHOME + set -e PYTHONHOME +end + +if test -z "$VIRTUAL_ENV_DISABLE_PROMPT" + # fish uses a function instead of an env var to generate the prompt. + + # Save the current fish_prompt function as the function _old_fish_prompt. + functions -c fish_prompt _old_fish_prompt + + # With the original prompt function renamed, we can override with our own. + function fish_prompt + # Save the return status of the last command. + set -l old_status $status + + # Output the venv prompt; color taken from the blue of the Python logo. + printf "%s%s%s" (set_color 4B8BBE) "(mysql-sqlalchemy-workspace) " (set_color normal) + + # Restore the return status of the previous command. + echo "exit $old_status" | . + # Output the original/"old" prompt. + _old_fish_prompt + end + + set -gx _OLD_FISH_PROMPT_OVERRIDE "$VIRTUAL_ENV" +end diff --git a/mysql-sqlalchemy-workspace/bin/pip b/mysql-sqlalchemy-workspace/bin/pip new file mode 100755 index 00000000..c40badde --- /dev/null +++ b/mysql-sqlalchemy-workspace/bin/pip @@ -0,0 +1,8 @@ +#!/Users/linkedin/Desktop/advanced-python-working-with-databases-4365479/mysql-sqlalchemy-workspace/bin/python3 +# -*- coding: utf-8 -*- +import re +import sys +from pip._internal.cli.main import main +if __name__ == '__main__': + sys.argv[0] = re.sub(r'(-script\.pyw|\.exe)?$', '', sys.argv[0]) + sys.exit(main()) diff --git a/mysql-sqlalchemy-workspace/bin/pip3 b/mysql-sqlalchemy-workspace/bin/pip3 new file mode 100755 index 00000000..c40badde --- /dev/null +++ b/mysql-sqlalchemy-workspace/bin/pip3 @@ -0,0 +1,8 @@ +#!/Users/linkedin/Desktop/advanced-python-working-with-databases-4365479/mysql-sqlalchemy-workspace/bin/python3 +# -*- coding: utf-8 -*- +import re +import sys +from pip._internal.cli.main import main +if __name__ == '__main__': + sys.argv[0] = re.sub(r'(-script\.pyw|\.exe)?$', '', sys.argv[0]) + sys.exit(main()) diff --git a/mysql-sqlalchemy-workspace/bin/pip3.9 b/mysql-sqlalchemy-workspace/bin/pip3.9 new file mode 100755 index 00000000..c40badde --- /dev/null +++ b/mysql-sqlalchemy-workspace/bin/pip3.9 @@ -0,0 +1,8 @@ +#!/Users/linkedin/Desktop/advanced-python-working-with-databases-4365479/mysql-sqlalchemy-workspace/bin/python3 +# -*- coding: utf-8 -*- +import re +import sys +from pip._internal.cli.main import main +if __name__ == '__main__': + sys.argv[0] = re.sub(r'(-script\.pyw|\.exe)?$', '', sys.argv[0]) + sys.exit(main()) diff --git a/mysql-sqlalchemy-workspace/bin/python b/mysql-sqlalchemy-workspace/bin/python new file mode 120000 index 00000000..b8a0adbb --- /dev/null +++ b/mysql-sqlalchemy-workspace/bin/python @@ -0,0 +1 @@ +python3 \ No newline at end of file diff --git a/mysql-sqlalchemy-workspace/bin/python3 b/mysql-sqlalchemy-workspace/bin/python3 new file mode 120000 index 00000000..f25545fe --- /dev/null +++ b/mysql-sqlalchemy-workspace/bin/python3 @@ -0,0 +1 @@ +/Library/Developer/CommandLineTools/usr/bin/python3 \ No newline at end of file diff --git a/mysql-sqlalchemy-workspace/bin/python3.9 b/mysql-sqlalchemy-workspace/bin/python3.9 new file mode 120000 index 00000000..b8a0adbb --- /dev/null +++ b/mysql-sqlalchemy-workspace/bin/python3.9 @@ -0,0 +1 @@ +python3 \ No newline at end of file diff --git a/mysql-sqlalchemy-workspace/database.py b/mysql-sqlalchemy-workspace/database.py new file mode 100644 index 00000000..500b9643 --- /dev/null +++ b/mysql-sqlalchemy-workspace/database.py @@ -0,0 +1,47 @@ +from sqlalchemy import Column, String, Integer, ForeignKey, create_engine +from sqlalchemy.orm import registry, relationship + +engine = create_engine('mysql+mysqlconnector://root:password@localhost:3306/projects', + echo=True) + +mapper_registry = registry() +#mapper_registry.metadata + +Base = mapper_registry.generate_base() + +class Project(Base): + __tablename__ = 'projects' + project_id = Column(Integer, primary_key=True) + title = Column(String(length=50)) + description = Column(String(length=50)) + + def __repr__(self): + return "".format( + self.title, self.description) + +class Task(Base): + __tablename__ = 'tasks' + task_id = Column(Integer, primary_key=True) + project_id = Column(Integer, ForeignKey('projects.project_id')) + description = Column(String(length=50)) + + project = relationship("Project") + + def __repr__(self): + return "".format(self.description) + +Base.metadata.create_all(engine) + + + + + + + + + + + + + + diff --git a/mysql-sqlalchemy-workspace/include/site/python3.9/greenlet/greenlet.h b/mysql-sqlalchemy-workspace/include/site/python3.9/greenlet/greenlet.h new file mode 100644 index 00000000..d02a16e4 --- /dev/null +++ b/mysql-sqlalchemy-workspace/include/site/python3.9/greenlet/greenlet.h @@ -0,0 +1,164 @@ +/* -*- indent-tabs-mode: nil; tab-width: 4; -*- */ + +/* Greenlet object interface */ + +#ifndef Py_GREENLETOBJECT_H +#define Py_GREENLETOBJECT_H + + +#include + +#ifdef __cplusplus +extern "C" { +#endif + +/* This is deprecated and undocumented. It does not change. */ +#define GREENLET_VERSION "1.0.0" + +#ifndef GREENLET_MODULE +#define implementation_ptr_t void* +#endif + +typedef struct _greenlet { + PyObject_HEAD + PyObject* weakreflist; + PyObject* dict; + implementation_ptr_t pimpl; +} PyGreenlet; + +#define PyGreenlet_Check(op) (op && PyObject_TypeCheck(op, &PyGreenlet_Type)) + + +/* C API functions */ + +/* Total number of symbols that are exported */ +#define PyGreenlet_API_pointers 12 + +#define PyGreenlet_Type_NUM 0 +#define PyExc_GreenletError_NUM 1 +#define PyExc_GreenletExit_NUM 2 + +#define PyGreenlet_New_NUM 3 +#define PyGreenlet_GetCurrent_NUM 4 +#define PyGreenlet_Throw_NUM 5 +#define PyGreenlet_Switch_NUM 6 +#define PyGreenlet_SetParent_NUM 7 + +#define PyGreenlet_MAIN_NUM 8 +#define PyGreenlet_STARTED_NUM 9 +#define PyGreenlet_ACTIVE_NUM 10 +#define PyGreenlet_GET_PARENT_NUM 11 + +#ifndef GREENLET_MODULE +/* This section is used by modules that uses the greenlet C API */ +static void** _PyGreenlet_API = NULL; + +# define PyGreenlet_Type \ + (*(PyTypeObject*)_PyGreenlet_API[PyGreenlet_Type_NUM]) + +# define PyExc_GreenletError \ + ((PyObject*)_PyGreenlet_API[PyExc_GreenletError_NUM]) + +# define PyExc_GreenletExit \ + ((PyObject*)_PyGreenlet_API[PyExc_GreenletExit_NUM]) + +/* + * PyGreenlet_New(PyObject *args) + * + * greenlet.greenlet(run, parent=None) + */ +# define PyGreenlet_New \ + (*(PyGreenlet * (*)(PyObject * run, PyGreenlet * parent)) \ + _PyGreenlet_API[PyGreenlet_New_NUM]) + +/* + * PyGreenlet_GetCurrent(void) + * + * greenlet.getcurrent() + */ +# define PyGreenlet_GetCurrent \ + (*(PyGreenlet * (*)(void)) _PyGreenlet_API[PyGreenlet_GetCurrent_NUM]) + +/* + * PyGreenlet_Throw( + * PyGreenlet *greenlet, + * PyObject *typ, + * PyObject *val, + * PyObject *tb) + * + * g.throw(...) + */ +# define PyGreenlet_Throw \ + (*(PyObject * (*)(PyGreenlet * self, \ + PyObject * typ, \ + PyObject * val, \ + PyObject * tb)) \ + _PyGreenlet_API[PyGreenlet_Throw_NUM]) + +/* + * PyGreenlet_Switch(PyGreenlet *greenlet, PyObject *args) + * + * g.switch(*args, **kwargs) + */ +# define PyGreenlet_Switch \ + (*(PyObject * \ + (*)(PyGreenlet * greenlet, PyObject * args, PyObject * kwargs)) \ + _PyGreenlet_API[PyGreenlet_Switch_NUM]) + +/* + * PyGreenlet_SetParent(PyObject *greenlet, PyObject *new_parent) + * + * g.parent = new_parent + */ +# define PyGreenlet_SetParent \ + (*(int (*)(PyGreenlet * greenlet, PyGreenlet * nparent)) \ + _PyGreenlet_API[PyGreenlet_SetParent_NUM]) + +/* + * PyGreenlet_GetParent(PyObject* greenlet) + * + * return greenlet.parent; + * + * This could return NULL even if there is no exception active. + * If it does not return NULL, you are responsible for decrementing the + * reference count. + */ +# define PyGreenlet_GetParent \ + (*(PyGreenlet* (*)(PyGreenlet*)) \ + _PyGreenlet_API[PyGreenlet_GET_PARENT_NUM]) + +/* + * deprecated, undocumented alias. + */ +# define PyGreenlet_GET_PARENT PyGreenlet_GetParent + +# define PyGreenlet_MAIN \ + (*(int (*)(PyGreenlet*)) \ + _PyGreenlet_API[PyGreenlet_MAIN_NUM]) + +# define PyGreenlet_STARTED \ + (*(int (*)(PyGreenlet*)) \ + _PyGreenlet_API[PyGreenlet_STARTED_NUM]) + +# define PyGreenlet_ACTIVE \ + (*(int (*)(PyGreenlet*)) \ + _PyGreenlet_API[PyGreenlet_ACTIVE_NUM]) + + + + +/* Macro that imports greenlet and initializes C API */ +/* NOTE: This has actually moved to ``greenlet._greenlet._C_API``, but we + keep the older definition to be sure older code that might have a copy of + the header still works. */ +# define PyGreenlet_Import() \ + { \ + _PyGreenlet_API = (void**)PyCapsule_Import("greenlet._C_API", 0); \ + } + +#endif /* GREENLET_MODULE */ + +#ifdef __cplusplus +} +#endif +#endif /* !Py_GREENLETOBJECT_H */ diff --git a/mysql-sqlalchemy-workspace/lib/python3.9/site-packages/SQLAlchemy-2.0.6.dist-info/INSTALLER b/mysql-sqlalchemy-workspace/lib/python3.9/site-packages/SQLAlchemy-2.0.6.dist-info/INSTALLER new file mode 100644 index 00000000..a1b589e3 --- /dev/null +++ b/mysql-sqlalchemy-workspace/lib/python3.9/site-packages/SQLAlchemy-2.0.6.dist-info/INSTALLER @@ -0,0 +1 @@ +pip diff --git a/mysql-sqlalchemy-workspace/lib/python3.9/site-packages/SQLAlchemy-2.0.6.dist-info/LICENSE b/mysql-sqlalchemy-workspace/lib/python3.9/site-packages/SQLAlchemy-2.0.6.dist-info/LICENSE new file mode 100644 index 00000000..7bf9bbe9 --- /dev/null +++ b/mysql-sqlalchemy-workspace/lib/python3.9/site-packages/SQLAlchemy-2.0.6.dist-info/LICENSE @@ -0,0 +1,19 @@ +Copyright 2005-2023 SQLAlchemy authors and contributors . + +Permission is hereby granted, free of charge, to any person obtaining a copy of +this software and associated documentation files (the "Software"), to deal in +the Software without restriction, including without limitation the rights to +use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies +of the Software, and to permit persons to whom the Software is furnished to do +so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/mysql-sqlalchemy-workspace/lib/python3.9/site-packages/SQLAlchemy-2.0.6.dist-info/METADATA b/mysql-sqlalchemy-workspace/lib/python3.9/site-packages/SQLAlchemy-2.0.6.dist-info/METADATA new file mode 100644 index 00000000..9366536d --- /dev/null +++ b/mysql-sqlalchemy-workspace/lib/python3.9/site-packages/SQLAlchemy-2.0.6.dist-info/METADATA @@ -0,0 +1,236 @@ +Metadata-Version: 2.1 +Name: SQLAlchemy +Version: 2.0.6 +Summary: Database Abstraction Library +Home-page: https://www.sqlalchemy.org +Author: Mike Bayer +Author-email: mike_mp@zzzcomputing.com +License: MIT +Project-URL: Documentation, https://docs.sqlalchemy.org +Project-URL: Issue Tracker, https://github.com/sqlalchemy/sqlalchemy/ +Classifier: Development Status :: 5 - Production/Stable +Classifier: Intended Audience :: Developers +Classifier: License :: OSI Approved :: MIT License +Classifier: Operating System :: OS Independent +Classifier: Programming Language :: Python +Classifier: Programming Language :: Python :: 3 +Classifier: Programming Language :: Python :: 3.7 +Classifier: Programming Language :: Python :: 3.8 +Classifier: Programming Language :: Python :: 3.9 +Classifier: Programming Language :: Python :: 3.10 +Classifier: Programming Language :: Python :: 3.11 +Classifier: Programming Language :: Python :: Implementation :: CPython +Classifier: Programming Language :: Python :: Implementation :: PyPy +Classifier: Topic :: Database :: Front-Ends +Requires-Python: >=3.7 +Description-Content-Type: text/x-rst +License-File: LICENSE +Requires-Dist: typing-extensions (>=4.2.0) +Requires-Dist: greenlet (!=0.4.17) ; platform_machine == "aarch64" or (platform_machine == "ppc64le" or (platform_machine == "x86_64" or (platform_machine == "amd64" or (platform_machine == "AMD64" or (platform_machine == "win32" or platform_machine == "WIN32"))))) +Requires-Dist: importlib-metadata ; python_version < "3.8" +Provides-Extra: aiomysql +Requires-Dist: greenlet (!=0.4.17) ; extra == 'aiomysql' +Requires-Dist: aiomysql ; extra == 'aiomysql' +Provides-Extra: aiosqlite +Requires-Dist: greenlet (!=0.4.17) ; extra == 'aiosqlite' +Requires-Dist: aiosqlite ; extra == 'aiosqlite' +Requires-Dist: typing-extensions (!=3.10.0.1) ; extra == 'aiosqlite' +Provides-Extra: asyncio +Requires-Dist: greenlet (!=0.4.17) ; extra == 'asyncio' +Provides-Extra: asyncmy +Requires-Dist: greenlet (!=0.4.17) ; extra == 'asyncmy' +Requires-Dist: asyncmy (!=0.2.4,!=0.2.6,>=0.2.3) ; extra == 'asyncmy' +Provides-Extra: mariadb_connector +Requires-Dist: mariadb (!=1.1.2,!=1.1.5,>=1.0.1) ; extra == 'mariadb_connector' +Provides-Extra: mssql +Requires-Dist: pyodbc ; extra == 'mssql' +Provides-Extra: mssql_pymssql +Requires-Dist: pymssql ; extra == 'mssql_pymssql' +Provides-Extra: mssql_pyodbc +Requires-Dist: pyodbc ; extra == 'mssql_pyodbc' +Provides-Extra: mypy +Requires-Dist: mypy (>=0.910) ; extra == 'mypy' +Provides-Extra: mysql +Requires-Dist: mysqlclient (>=1.4.0) ; extra == 'mysql' +Provides-Extra: mysql_connector +Requires-Dist: mysql-connector-python ; extra == 'mysql_connector' +Provides-Extra: oracle +Requires-Dist: cx-oracle (>=7) ; extra == 'oracle' +Provides-Extra: oracle_oracledb +Requires-Dist: oracledb (>=1.0.1) ; extra == 'oracle_oracledb' +Provides-Extra: postgresql +Requires-Dist: psycopg2 (>=2.7) ; extra == 'postgresql' +Provides-Extra: postgresql_asyncpg +Requires-Dist: greenlet (!=0.4.17) ; extra == 'postgresql_asyncpg' +Requires-Dist: asyncpg ; extra == 'postgresql_asyncpg' +Provides-Extra: postgresql_pg8000 +Requires-Dist: pg8000 (>=1.29.1) ; extra == 'postgresql_pg8000' +Provides-Extra: postgresql_psycopg +Requires-Dist: psycopg (>=3.0.7) ; extra == 'postgresql_psycopg' +Provides-Extra: postgresql_psycopg2binary +Requires-Dist: psycopg2-binary ; extra == 'postgresql_psycopg2binary' +Provides-Extra: postgresql_psycopg2cffi +Requires-Dist: psycopg2cffi ; extra == 'postgresql_psycopg2cffi' +Provides-Extra: pymysql +Requires-Dist: pymysql ; extra == 'pymysql' +Provides-Extra: sqlcipher +Requires-Dist: sqlcipher3-binary ; extra == 'sqlcipher' + +SQLAlchemy +========== + +|PyPI| |Python| |Downloads| + +.. |PyPI| image:: https://img.shields.io/pypi/v/sqlalchemy + :target: https://pypi.org/project/sqlalchemy + :alt: PyPI + +.. |Python| image:: https://img.shields.io/pypi/pyversions/sqlalchemy + :target: https://pypi.org/project/sqlalchemy + :alt: PyPI - Python Version + +.. |Downloads| image:: https://img.shields.io/pypi/dm/sqlalchemy + :target: https://pypi.org/project/sqlalchemy + :alt: PyPI - Downloads + + +The Python SQL Toolkit and Object Relational Mapper + +Introduction +------------- + +SQLAlchemy is the Python SQL toolkit and Object Relational Mapper +that gives application developers the full power and +flexibility of SQL. SQLAlchemy provides a full suite +of well known enterprise-level persistence patterns, +designed for efficient and high-performing database +access, adapted into a simple and Pythonic domain +language. + +Major SQLAlchemy features include: + +* An industrial strength ORM, built + from the core on the identity map, unit of work, + and data mapper patterns. These patterns + allow transparent persistence of objects + using a declarative configuration system. + Domain models + can be constructed and manipulated naturally, + and changes are synchronized with the + current transaction automatically. +* A relationally-oriented query system, exposing + the full range of SQL's capabilities + explicitly, including joins, subqueries, + correlation, and most everything else, + in terms of the object model. + Writing queries with the ORM uses the same + techniques of relational composition you use + when writing SQL. While you can drop into + literal SQL at any time, it's virtually never + needed. +* A comprehensive and flexible system + of eager loading for related collections and objects. + Collections are cached within a session, + and can be loaded on individual access, all + at once using joins, or by query per collection + across the full result set. +* A Core SQL construction system and DBAPI + interaction layer. The SQLAlchemy Core is + separate from the ORM and is a full database + abstraction layer in its own right, and includes + an extensible Python-based SQL expression + language, schema metadata, connection pooling, + type coercion, and custom types. +* All primary and foreign key constraints are + assumed to be composite and natural. Surrogate + integer primary keys are of course still the + norm, but SQLAlchemy never assumes or hardcodes + to this model. +* Database introspection and generation. Database + schemas can be "reflected" in one step into + Python structures representing database metadata; + those same structures can then generate + CREATE statements right back out - all within + the Core, independent of the ORM. + +SQLAlchemy's philosophy: + +* SQL databases behave less and less like object + collections the more size and performance start to + matter; object collections behave less and less like + tables and rows the more abstraction starts to matter. + SQLAlchemy aims to accommodate both of these + principles. +* An ORM doesn't need to hide the "R". A relational + database provides rich, set-based functionality + that should be fully exposed. SQLAlchemy's + ORM provides an open-ended set of patterns + that allow a developer to construct a custom + mediation layer between a domain model and + a relational schema, turning the so-called + "object relational impedance" issue into + a distant memory. +* The developer, in all cases, makes all decisions + regarding the design, structure, and naming conventions + of both the object model as well as the relational + schema. SQLAlchemy only provides the means + to automate the execution of these decisions. +* With SQLAlchemy, there's no such thing as + "the ORM generated a bad query" - you + retain full control over the structure of + queries, including how joins are organized, + how subqueries and correlation is used, what + columns are requested. Everything SQLAlchemy + does is ultimately the result of a developer-initiated + decision. +* Don't use an ORM if the problem doesn't need one. + SQLAlchemy consists of a Core and separate ORM + component. The Core offers a full SQL expression + language that allows Pythonic construction + of SQL constructs that render directly to SQL + strings for a target database, returning + result sets that are essentially enhanced DBAPI + cursors. +* Transactions should be the norm. With SQLAlchemy's + ORM, nothing goes to permanent storage until + commit() is called. SQLAlchemy encourages applications + to create a consistent means of delineating + the start and end of a series of operations. +* Never render a literal value in a SQL statement. + Bound parameters are used to the greatest degree + possible, allowing query optimizers to cache + query plans effectively and making SQL injection + attacks a non-issue. + +Documentation +------------- + +Latest documentation is at: + +https://www.sqlalchemy.org/docs/ + +Installation / Requirements +--------------------------- + +Full documentation for installation is at +`Installation `_. + +Getting Help / Development / Bug reporting +------------------------------------------ + +Please refer to the `SQLAlchemy Community Guide `_. + +Code of Conduct +--------------- + +Above all, SQLAlchemy places great emphasis on polite, thoughtful, and +constructive communication between users and developers. +Please see our current Code of Conduct at +`Code of Conduct `_. + +License +------- + +SQLAlchemy is distributed under the `MIT license +`_. + diff --git a/mysql-sqlalchemy-workspace/lib/python3.9/site-packages/SQLAlchemy-2.0.6.dist-info/RECORD b/mysql-sqlalchemy-workspace/lib/python3.9/site-packages/SQLAlchemy-2.0.6.dist-info/RECORD new file mode 100644 index 00000000..f0857496 --- /dev/null +++ b/mysql-sqlalchemy-workspace/lib/python3.9/site-packages/SQLAlchemy-2.0.6.dist-info/RECORD @@ -0,0 +1,512 @@ +../../../../../../Library/Caches/com.apple.python/Users/linkedin/Desktop/advanced-python-working-with-databases-4365479/mysql-sqlalchemy-workspace/lib/python3.9/site-packages/sqlalchemy/__init__.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/linkedin/Desktop/advanced-python-working-with-databases-4365479/mysql-sqlalchemy-workspace/lib/python3.9/site-packages/sqlalchemy/connectors/__init__.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/linkedin/Desktop/advanced-python-working-with-databases-4365479/mysql-sqlalchemy-workspace/lib/python3.9/site-packages/sqlalchemy/connectors/pyodbc.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/linkedin/Desktop/advanced-python-working-with-databases-4365479/mysql-sqlalchemy-workspace/lib/python3.9/site-packages/sqlalchemy/cyextension/__init__.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/linkedin/Desktop/advanced-python-working-with-databases-4365479/mysql-sqlalchemy-workspace/lib/python3.9/site-packages/sqlalchemy/dialects/__init__.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/linkedin/Desktop/advanced-python-working-with-databases-4365479/mysql-sqlalchemy-workspace/lib/python3.9/site-packages/sqlalchemy/dialects/mssql/__init__.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/linkedin/Desktop/advanced-python-working-with-databases-4365479/mysql-sqlalchemy-workspace/lib/python3.9/site-packages/sqlalchemy/dialects/mssql/base.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/linkedin/Desktop/advanced-python-working-with-databases-4365479/mysql-sqlalchemy-workspace/lib/python3.9/site-packages/sqlalchemy/dialects/mssql/information_schema.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/linkedin/Desktop/advanced-python-working-with-databases-4365479/mysql-sqlalchemy-workspace/lib/python3.9/site-packages/sqlalchemy/dialects/mssql/json.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/linkedin/Desktop/advanced-python-working-with-databases-4365479/mysql-sqlalchemy-workspace/lib/python3.9/site-packages/sqlalchemy/dialects/mssql/provision.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/linkedin/Desktop/advanced-python-working-with-databases-4365479/mysql-sqlalchemy-workspace/lib/python3.9/site-packages/sqlalchemy/dialects/mssql/pymssql.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/linkedin/Desktop/advanced-python-working-with-databases-4365479/mysql-sqlalchemy-workspace/lib/python3.9/site-packages/sqlalchemy/dialects/mssql/pyodbc.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/linkedin/Desktop/advanced-python-working-with-databases-4365479/mysql-sqlalchemy-workspace/lib/python3.9/site-packages/sqlalchemy/dialects/mysql/__init__.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/linkedin/Desktop/advanced-python-working-with-databases-4365479/mysql-sqlalchemy-workspace/lib/python3.9/site-packages/sqlalchemy/dialects/mysql/aiomysql.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/linkedin/Desktop/advanced-python-working-with-databases-4365479/mysql-sqlalchemy-workspace/lib/python3.9/site-packages/sqlalchemy/dialects/mysql/asyncmy.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/linkedin/Desktop/advanced-python-working-with-databases-4365479/mysql-sqlalchemy-workspace/lib/python3.9/site-packages/sqlalchemy/dialects/mysql/base.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/linkedin/Desktop/advanced-python-working-with-databases-4365479/mysql-sqlalchemy-workspace/lib/python3.9/site-packages/sqlalchemy/dialects/mysql/cymysql.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/linkedin/Desktop/advanced-python-working-with-databases-4365479/mysql-sqlalchemy-workspace/lib/python3.9/site-packages/sqlalchemy/dialects/mysql/dml.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/linkedin/Desktop/advanced-python-working-with-databases-4365479/mysql-sqlalchemy-workspace/lib/python3.9/site-packages/sqlalchemy/dialects/mysql/enumerated.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/linkedin/Desktop/advanced-python-working-with-databases-4365479/mysql-sqlalchemy-workspace/lib/python3.9/site-packages/sqlalchemy/dialects/mysql/expression.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/linkedin/Desktop/advanced-python-working-with-databases-4365479/mysql-sqlalchemy-workspace/lib/python3.9/site-packages/sqlalchemy/dialects/mysql/json.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/linkedin/Desktop/advanced-python-working-with-databases-4365479/mysql-sqlalchemy-workspace/lib/python3.9/site-packages/sqlalchemy/dialects/mysql/mariadb.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/linkedin/Desktop/advanced-python-working-with-databases-4365479/mysql-sqlalchemy-workspace/lib/python3.9/site-packages/sqlalchemy/dialects/mysql/mariadbconnector.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/linkedin/Desktop/advanced-python-working-with-databases-4365479/mysql-sqlalchemy-workspace/lib/python3.9/site-packages/sqlalchemy/dialects/mysql/mysqlconnector.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/linkedin/Desktop/advanced-python-working-with-databases-4365479/mysql-sqlalchemy-workspace/lib/python3.9/site-packages/sqlalchemy/dialects/mysql/mysqldb.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/linkedin/Desktop/advanced-python-working-with-databases-4365479/mysql-sqlalchemy-workspace/lib/python3.9/site-packages/sqlalchemy/dialects/mysql/provision.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/linkedin/Desktop/advanced-python-working-with-databases-4365479/mysql-sqlalchemy-workspace/lib/python3.9/site-packages/sqlalchemy/dialects/mysql/pymysql.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/linkedin/Desktop/advanced-python-working-with-databases-4365479/mysql-sqlalchemy-workspace/lib/python3.9/site-packages/sqlalchemy/dialects/mysql/pyodbc.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/linkedin/Desktop/advanced-python-working-with-databases-4365479/mysql-sqlalchemy-workspace/lib/python3.9/site-packages/sqlalchemy/dialects/mysql/reflection.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/linkedin/Desktop/advanced-python-working-with-databases-4365479/mysql-sqlalchemy-workspace/lib/python3.9/site-packages/sqlalchemy/dialects/mysql/reserved_words.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/linkedin/Desktop/advanced-python-working-with-databases-4365479/mysql-sqlalchemy-workspace/lib/python3.9/site-packages/sqlalchemy/dialects/mysql/types.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/linkedin/Desktop/advanced-python-working-with-databases-4365479/mysql-sqlalchemy-workspace/lib/python3.9/site-packages/sqlalchemy/dialects/oracle/__init__.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/linkedin/Desktop/advanced-python-working-with-databases-4365479/mysql-sqlalchemy-workspace/lib/python3.9/site-packages/sqlalchemy/dialects/oracle/base.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/linkedin/Desktop/advanced-python-working-with-databases-4365479/mysql-sqlalchemy-workspace/lib/python3.9/site-packages/sqlalchemy/dialects/oracle/cx_oracle.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/linkedin/Desktop/advanced-python-working-with-databases-4365479/mysql-sqlalchemy-workspace/lib/python3.9/site-packages/sqlalchemy/dialects/oracle/dictionary.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/linkedin/Desktop/advanced-python-working-with-databases-4365479/mysql-sqlalchemy-workspace/lib/python3.9/site-packages/sqlalchemy/dialects/oracle/oracledb.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/linkedin/Desktop/advanced-python-working-with-databases-4365479/mysql-sqlalchemy-workspace/lib/python3.9/site-packages/sqlalchemy/dialects/oracle/provision.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/linkedin/Desktop/advanced-python-working-with-databases-4365479/mysql-sqlalchemy-workspace/lib/python3.9/site-packages/sqlalchemy/dialects/oracle/types.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/linkedin/Desktop/advanced-python-working-with-databases-4365479/mysql-sqlalchemy-workspace/lib/python3.9/site-packages/sqlalchemy/dialects/postgresql/__init__.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/linkedin/Desktop/advanced-python-working-with-databases-4365479/mysql-sqlalchemy-workspace/lib/python3.9/site-packages/sqlalchemy/dialects/postgresql/_psycopg_common.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/linkedin/Desktop/advanced-python-working-with-databases-4365479/mysql-sqlalchemy-workspace/lib/python3.9/site-packages/sqlalchemy/dialects/postgresql/array.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/linkedin/Desktop/advanced-python-working-with-databases-4365479/mysql-sqlalchemy-workspace/lib/python3.9/site-packages/sqlalchemy/dialects/postgresql/asyncpg.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/linkedin/Desktop/advanced-python-working-with-databases-4365479/mysql-sqlalchemy-workspace/lib/python3.9/site-packages/sqlalchemy/dialects/postgresql/base.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/linkedin/Desktop/advanced-python-working-with-databases-4365479/mysql-sqlalchemy-workspace/lib/python3.9/site-packages/sqlalchemy/dialects/postgresql/dml.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/linkedin/Desktop/advanced-python-working-with-databases-4365479/mysql-sqlalchemy-workspace/lib/python3.9/site-packages/sqlalchemy/dialects/postgresql/ext.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/linkedin/Desktop/advanced-python-working-with-databases-4365479/mysql-sqlalchemy-workspace/lib/python3.9/site-packages/sqlalchemy/dialects/postgresql/hstore.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/linkedin/Desktop/advanced-python-working-with-databases-4365479/mysql-sqlalchemy-workspace/lib/python3.9/site-packages/sqlalchemy/dialects/postgresql/json.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/linkedin/Desktop/advanced-python-working-with-databases-4365479/mysql-sqlalchemy-workspace/lib/python3.9/site-packages/sqlalchemy/dialects/postgresql/named_types.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/linkedin/Desktop/advanced-python-working-with-databases-4365479/mysql-sqlalchemy-workspace/lib/python3.9/site-packages/sqlalchemy/dialects/postgresql/pg8000.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/linkedin/Desktop/advanced-python-working-with-databases-4365479/mysql-sqlalchemy-workspace/lib/python3.9/site-packages/sqlalchemy/dialects/postgresql/pg_catalog.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/linkedin/Desktop/advanced-python-working-with-databases-4365479/mysql-sqlalchemy-workspace/lib/python3.9/site-packages/sqlalchemy/dialects/postgresql/provision.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/linkedin/Desktop/advanced-python-working-with-databases-4365479/mysql-sqlalchemy-workspace/lib/python3.9/site-packages/sqlalchemy/dialects/postgresql/psycopg.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/linkedin/Desktop/advanced-python-working-with-databases-4365479/mysql-sqlalchemy-workspace/lib/python3.9/site-packages/sqlalchemy/dialects/postgresql/psycopg2.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/linkedin/Desktop/advanced-python-working-with-databases-4365479/mysql-sqlalchemy-workspace/lib/python3.9/site-packages/sqlalchemy/dialects/postgresql/psycopg2cffi.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/linkedin/Desktop/advanced-python-working-with-databases-4365479/mysql-sqlalchemy-workspace/lib/python3.9/site-packages/sqlalchemy/dialects/postgresql/ranges.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/linkedin/Desktop/advanced-python-working-with-databases-4365479/mysql-sqlalchemy-workspace/lib/python3.9/site-packages/sqlalchemy/dialects/postgresql/types.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/linkedin/Desktop/advanced-python-working-with-databases-4365479/mysql-sqlalchemy-workspace/lib/python3.9/site-packages/sqlalchemy/dialects/sqlite/__init__.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/linkedin/Desktop/advanced-python-working-with-databases-4365479/mysql-sqlalchemy-workspace/lib/python3.9/site-packages/sqlalchemy/dialects/sqlite/aiosqlite.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/linkedin/Desktop/advanced-python-working-with-databases-4365479/mysql-sqlalchemy-workspace/lib/python3.9/site-packages/sqlalchemy/dialects/sqlite/base.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/linkedin/Desktop/advanced-python-working-with-databases-4365479/mysql-sqlalchemy-workspace/lib/python3.9/site-packages/sqlalchemy/dialects/sqlite/dml.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/linkedin/Desktop/advanced-python-working-with-databases-4365479/mysql-sqlalchemy-workspace/lib/python3.9/site-packages/sqlalchemy/dialects/sqlite/json.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/linkedin/Desktop/advanced-python-working-with-databases-4365479/mysql-sqlalchemy-workspace/lib/python3.9/site-packages/sqlalchemy/dialects/sqlite/provision.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/linkedin/Desktop/advanced-python-working-with-databases-4365479/mysql-sqlalchemy-workspace/lib/python3.9/site-packages/sqlalchemy/dialects/sqlite/pysqlcipher.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/linkedin/Desktop/advanced-python-working-with-databases-4365479/mysql-sqlalchemy-workspace/lib/python3.9/site-packages/sqlalchemy/dialects/sqlite/pysqlite.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/linkedin/Desktop/advanced-python-working-with-databases-4365479/mysql-sqlalchemy-workspace/lib/python3.9/site-packages/sqlalchemy/engine/__init__.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/linkedin/Desktop/advanced-python-working-with-databases-4365479/mysql-sqlalchemy-workspace/lib/python3.9/site-packages/sqlalchemy/engine/_py_processors.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/linkedin/Desktop/advanced-python-working-with-databases-4365479/mysql-sqlalchemy-workspace/lib/python3.9/site-packages/sqlalchemy/engine/_py_row.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/linkedin/Desktop/advanced-python-working-with-databases-4365479/mysql-sqlalchemy-workspace/lib/python3.9/site-packages/sqlalchemy/engine/_py_util.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/linkedin/Desktop/advanced-python-working-with-databases-4365479/mysql-sqlalchemy-workspace/lib/python3.9/site-packages/sqlalchemy/engine/base.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/linkedin/Desktop/advanced-python-working-with-databases-4365479/mysql-sqlalchemy-workspace/lib/python3.9/site-packages/sqlalchemy/engine/characteristics.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/linkedin/Desktop/advanced-python-working-with-databases-4365479/mysql-sqlalchemy-workspace/lib/python3.9/site-packages/sqlalchemy/engine/create.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/linkedin/Desktop/advanced-python-working-with-databases-4365479/mysql-sqlalchemy-workspace/lib/python3.9/site-packages/sqlalchemy/engine/cursor.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/linkedin/Desktop/advanced-python-working-with-databases-4365479/mysql-sqlalchemy-workspace/lib/python3.9/site-packages/sqlalchemy/engine/default.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/linkedin/Desktop/advanced-python-working-with-databases-4365479/mysql-sqlalchemy-workspace/lib/python3.9/site-packages/sqlalchemy/engine/events.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/linkedin/Desktop/advanced-python-working-with-databases-4365479/mysql-sqlalchemy-workspace/lib/python3.9/site-packages/sqlalchemy/engine/interfaces.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/linkedin/Desktop/advanced-python-working-with-databases-4365479/mysql-sqlalchemy-workspace/lib/python3.9/site-packages/sqlalchemy/engine/mock.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/linkedin/Desktop/advanced-python-working-with-databases-4365479/mysql-sqlalchemy-workspace/lib/python3.9/site-packages/sqlalchemy/engine/processors.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/linkedin/Desktop/advanced-python-working-with-databases-4365479/mysql-sqlalchemy-workspace/lib/python3.9/site-packages/sqlalchemy/engine/reflection.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/linkedin/Desktop/advanced-python-working-with-databases-4365479/mysql-sqlalchemy-workspace/lib/python3.9/site-packages/sqlalchemy/engine/result.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/linkedin/Desktop/advanced-python-working-with-databases-4365479/mysql-sqlalchemy-workspace/lib/python3.9/site-packages/sqlalchemy/engine/row.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/linkedin/Desktop/advanced-python-working-with-databases-4365479/mysql-sqlalchemy-workspace/lib/python3.9/site-packages/sqlalchemy/engine/strategies.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/linkedin/Desktop/advanced-python-working-with-databases-4365479/mysql-sqlalchemy-workspace/lib/python3.9/site-packages/sqlalchemy/engine/url.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/linkedin/Desktop/advanced-python-working-with-databases-4365479/mysql-sqlalchemy-workspace/lib/python3.9/site-packages/sqlalchemy/engine/util.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/linkedin/Desktop/advanced-python-working-with-databases-4365479/mysql-sqlalchemy-workspace/lib/python3.9/site-packages/sqlalchemy/event/__init__.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/linkedin/Desktop/advanced-python-working-with-databases-4365479/mysql-sqlalchemy-workspace/lib/python3.9/site-packages/sqlalchemy/event/api.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/linkedin/Desktop/advanced-python-working-with-databases-4365479/mysql-sqlalchemy-workspace/lib/python3.9/site-packages/sqlalchemy/event/attr.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/linkedin/Desktop/advanced-python-working-with-databases-4365479/mysql-sqlalchemy-workspace/lib/python3.9/site-packages/sqlalchemy/event/base.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/linkedin/Desktop/advanced-python-working-with-databases-4365479/mysql-sqlalchemy-workspace/lib/python3.9/site-packages/sqlalchemy/event/legacy.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/linkedin/Desktop/advanced-python-working-with-databases-4365479/mysql-sqlalchemy-workspace/lib/python3.9/site-packages/sqlalchemy/event/registry.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/linkedin/Desktop/advanced-python-working-with-databases-4365479/mysql-sqlalchemy-workspace/lib/python3.9/site-packages/sqlalchemy/events.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/linkedin/Desktop/advanced-python-working-with-databases-4365479/mysql-sqlalchemy-workspace/lib/python3.9/site-packages/sqlalchemy/exc.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/linkedin/Desktop/advanced-python-working-with-databases-4365479/mysql-sqlalchemy-workspace/lib/python3.9/site-packages/sqlalchemy/ext/__init__.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/linkedin/Desktop/advanced-python-working-with-databases-4365479/mysql-sqlalchemy-workspace/lib/python3.9/site-packages/sqlalchemy/ext/associationproxy.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/linkedin/Desktop/advanced-python-working-with-databases-4365479/mysql-sqlalchemy-workspace/lib/python3.9/site-packages/sqlalchemy/ext/asyncio/__init__.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/linkedin/Desktop/advanced-python-working-with-databases-4365479/mysql-sqlalchemy-workspace/lib/python3.9/site-packages/sqlalchemy/ext/asyncio/base.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/linkedin/Desktop/advanced-python-working-with-databases-4365479/mysql-sqlalchemy-workspace/lib/python3.9/site-packages/sqlalchemy/ext/asyncio/engine.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/linkedin/Desktop/advanced-python-working-with-databases-4365479/mysql-sqlalchemy-workspace/lib/python3.9/site-packages/sqlalchemy/ext/asyncio/exc.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/linkedin/Desktop/advanced-python-working-with-databases-4365479/mysql-sqlalchemy-workspace/lib/python3.9/site-packages/sqlalchemy/ext/asyncio/result.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/linkedin/Desktop/advanced-python-working-with-databases-4365479/mysql-sqlalchemy-workspace/lib/python3.9/site-packages/sqlalchemy/ext/asyncio/scoping.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/linkedin/Desktop/advanced-python-working-with-databases-4365479/mysql-sqlalchemy-workspace/lib/python3.9/site-packages/sqlalchemy/ext/asyncio/session.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/linkedin/Desktop/advanced-python-working-with-databases-4365479/mysql-sqlalchemy-workspace/lib/python3.9/site-packages/sqlalchemy/ext/automap.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/linkedin/Desktop/advanced-python-working-with-databases-4365479/mysql-sqlalchemy-workspace/lib/python3.9/site-packages/sqlalchemy/ext/baked.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/linkedin/Desktop/advanced-python-working-with-databases-4365479/mysql-sqlalchemy-workspace/lib/python3.9/site-packages/sqlalchemy/ext/compiler.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/linkedin/Desktop/advanced-python-working-with-databases-4365479/mysql-sqlalchemy-workspace/lib/python3.9/site-packages/sqlalchemy/ext/declarative/__init__.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/linkedin/Desktop/advanced-python-working-with-databases-4365479/mysql-sqlalchemy-workspace/lib/python3.9/site-packages/sqlalchemy/ext/declarative/extensions.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/linkedin/Desktop/advanced-python-working-with-databases-4365479/mysql-sqlalchemy-workspace/lib/python3.9/site-packages/sqlalchemy/ext/horizontal_shard.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/linkedin/Desktop/advanced-python-working-with-databases-4365479/mysql-sqlalchemy-workspace/lib/python3.9/site-packages/sqlalchemy/ext/hybrid.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/linkedin/Desktop/advanced-python-working-with-databases-4365479/mysql-sqlalchemy-workspace/lib/python3.9/site-packages/sqlalchemy/ext/indexable.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/linkedin/Desktop/advanced-python-working-with-databases-4365479/mysql-sqlalchemy-workspace/lib/python3.9/site-packages/sqlalchemy/ext/instrumentation.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/linkedin/Desktop/advanced-python-working-with-databases-4365479/mysql-sqlalchemy-workspace/lib/python3.9/site-packages/sqlalchemy/ext/mutable.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/linkedin/Desktop/advanced-python-working-with-databases-4365479/mysql-sqlalchemy-workspace/lib/python3.9/site-packages/sqlalchemy/ext/mypy/__init__.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/linkedin/Desktop/advanced-python-working-with-databases-4365479/mysql-sqlalchemy-workspace/lib/python3.9/site-packages/sqlalchemy/ext/mypy/apply.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/linkedin/Desktop/advanced-python-working-with-databases-4365479/mysql-sqlalchemy-workspace/lib/python3.9/site-packages/sqlalchemy/ext/mypy/decl_class.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/linkedin/Desktop/advanced-python-working-with-databases-4365479/mysql-sqlalchemy-workspace/lib/python3.9/site-packages/sqlalchemy/ext/mypy/infer.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/linkedin/Desktop/advanced-python-working-with-databases-4365479/mysql-sqlalchemy-workspace/lib/python3.9/site-packages/sqlalchemy/ext/mypy/names.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/linkedin/Desktop/advanced-python-working-with-databases-4365479/mysql-sqlalchemy-workspace/lib/python3.9/site-packages/sqlalchemy/ext/mypy/plugin.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/linkedin/Desktop/advanced-python-working-with-databases-4365479/mysql-sqlalchemy-workspace/lib/python3.9/site-packages/sqlalchemy/ext/mypy/util.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/linkedin/Desktop/advanced-python-working-with-databases-4365479/mysql-sqlalchemy-workspace/lib/python3.9/site-packages/sqlalchemy/ext/orderinglist.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/linkedin/Desktop/advanced-python-working-with-databases-4365479/mysql-sqlalchemy-workspace/lib/python3.9/site-packages/sqlalchemy/ext/serializer.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/linkedin/Desktop/advanced-python-working-with-databases-4365479/mysql-sqlalchemy-workspace/lib/python3.9/site-packages/sqlalchemy/future/__init__.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/linkedin/Desktop/advanced-python-working-with-databases-4365479/mysql-sqlalchemy-workspace/lib/python3.9/site-packages/sqlalchemy/future/engine.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/linkedin/Desktop/advanced-python-working-with-databases-4365479/mysql-sqlalchemy-workspace/lib/python3.9/site-packages/sqlalchemy/inspection.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/linkedin/Desktop/advanced-python-working-with-databases-4365479/mysql-sqlalchemy-workspace/lib/python3.9/site-packages/sqlalchemy/log.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/linkedin/Desktop/advanced-python-working-with-databases-4365479/mysql-sqlalchemy-workspace/lib/python3.9/site-packages/sqlalchemy/orm/__init__.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/linkedin/Desktop/advanced-python-working-with-databases-4365479/mysql-sqlalchemy-workspace/lib/python3.9/site-packages/sqlalchemy/orm/_orm_constructors.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/linkedin/Desktop/advanced-python-working-with-databases-4365479/mysql-sqlalchemy-workspace/lib/python3.9/site-packages/sqlalchemy/orm/_typing.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/linkedin/Desktop/advanced-python-working-with-databases-4365479/mysql-sqlalchemy-workspace/lib/python3.9/site-packages/sqlalchemy/orm/attributes.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/linkedin/Desktop/advanced-python-working-with-databases-4365479/mysql-sqlalchemy-workspace/lib/python3.9/site-packages/sqlalchemy/orm/base.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/linkedin/Desktop/advanced-python-working-with-databases-4365479/mysql-sqlalchemy-workspace/lib/python3.9/site-packages/sqlalchemy/orm/bulk_persistence.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/linkedin/Desktop/advanced-python-working-with-databases-4365479/mysql-sqlalchemy-workspace/lib/python3.9/site-packages/sqlalchemy/orm/clsregistry.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/linkedin/Desktop/advanced-python-working-with-databases-4365479/mysql-sqlalchemy-workspace/lib/python3.9/site-packages/sqlalchemy/orm/collections.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/linkedin/Desktop/advanced-python-working-with-databases-4365479/mysql-sqlalchemy-workspace/lib/python3.9/site-packages/sqlalchemy/orm/context.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/linkedin/Desktop/advanced-python-working-with-databases-4365479/mysql-sqlalchemy-workspace/lib/python3.9/site-packages/sqlalchemy/orm/decl_api.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/linkedin/Desktop/advanced-python-working-with-databases-4365479/mysql-sqlalchemy-workspace/lib/python3.9/site-packages/sqlalchemy/orm/decl_base.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/linkedin/Desktop/advanced-python-working-with-databases-4365479/mysql-sqlalchemy-workspace/lib/python3.9/site-packages/sqlalchemy/orm/dependency.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/linkedin/Desktop/advanced-python-working-with-databases-4365479/mysql-sqlalchemy-workspace/lib/python3.9/site-packages/sqlalchemy/orm/descriptor_props.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/linkedin/Desktop/advanced-python-working-with-databases-4365479/mysql-sqlalchemy-workspace/lib/python3.9/site-packages/sqlalchemy/orm/dynamic.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/linkedin/Desktop/advanced-python-working-with-databases-4365479/mysql-sqlalchemy-workspace/lib/python3.9/site-packages/sqlalchemy/orm/evaluator.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/linkedin/Desktop/advanced-python-working-with-databases-4365479/mysql-sqlalchemy-workspace/lib/python3.9/site-packages/sqlalchemy/orm/events.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/linkedin/Desktop/advanced-python-working-with-databases-4365479/mysql-sqlalchemy-workspace/lib/python3.9/site-packages/sqlalchemy/orm/exc.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/linkedin/Desktop/advanced-python-working-with-databases-4365479/mysql-sqlalchemy-workspace/lib/python3.9/site-packages/sqlalchemy/orm/identity.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/linkedin/Desktop/advanced-python-working-with-databases-4365479/mysql-sqlalchemy-workspace/lib/python3.9/site-packages/sqlalchemy/orm/instrumentation.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/linkedin/Desktop/advanced-python-working-with-databases-4365479/mysql-sqlalchemy-workspace/lib/python3.9/site-packages/sqlalchemy/orm/interfaces.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/linkedin/Desktop/advanced-python-working-with-databases-4365479/mysql-sqlalchemy-workspace/lib/python3.9/site-packages/sqlalchemy/orm/loading.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/linkedin/Desktop/advanced-python-working-with-databases-4365479/mysql-sqlalchemy-workspace/lib/python3.9/site-packages/sqlalchemy/orm/mapped_collection.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/linkedin/Desktop/advanced-python-working-with-databases-4365479/mysql-sqlalchemy-workspace/lib/python3.9/site-packages/sqlalchemy/orm/mapper.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/linkedin/Desktop/advanced-python-working-with-databases-4365479/mysql-sqlalchemy-workspace/lib/python3.9/site-packages/sqlalchemy/orm/path_registry.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/linkedin/Desktop/advanced-python-working-with-databases-4365479/mysql-sqlalchemy-workspace/lib/python3.9/site-packages/sqlalchemy/orm/persistence.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/linkedin/Desktop/advanced-python-working-with-databases-4365479/mysql-sqlalchemy-workspace/lib/python3.9/site-packages/sqlalchemy/orm/properties.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/linkedin/Desktop/advanced-python-working-with-databases-4365479/mysql-sqlalchemy-workspace/lib/python3.9/site-packages/sqlalchemy/orm/query.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/linkedin/Desktop/advanced-python-working-with-databases-4365479/mysql-sqlalchemy-workspace/lib/python3.9/site-packages/sqlalchemy/orm/relationships.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/linkedin/Desktop/advanced-python-working-with-databases-4365479/mysql-sqlalchemy-workspace/lib/python3.9/site-packages/sqlalchemy/orm/scoping.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/linkedin/Desktop/advanced-python-working-with-databases-4365479/mysql-sqlalchemy-workspace/lib/python3.9/site-packages/sqlalchemy/orm/session.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/linkedin/Desktop/advanced-python-working-with-databases-4365479/mysql-sqlalchemy-workspace/lib/python3.9/site-packages/sqlalchemy/orm/state.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/linkedin/Desktop/advanced-python-working-with-databases-4365479/mysql-sqlalchemy-workspace/lib/python3.9/site-packages/sqlalchemy/orm/state_changes.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/linkedin/Desktop/advanced-python-working-with-databases-4365479/mysql-sqlalchemy-workspace/lib/python3.9/site-packages/sqlalchemy/orm/strategies.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/linkedin/Desktop/advanced-python-working-with-databases-4365479/mysql-sqlalchemy-workspace/lib/python3.9/site-packages/sqlalchemy/orm/strategy_options.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/linkedin/Desktop/advanced-python-working-with-databases-4365479/mysql-sqlalchemy-workspace/lib/python3.9/site-packages/sqlalchemy/orm/sync.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/linkedin/Desktop/advanced-python-working-with-databases-4365479/mysql-sqlalchemy-workspace/lib/python3.9/site-packages/sqlalchemy/orm/unitofwork.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/linkedin/Desktop/advanced-python-working-with-databases-4365479/mysql-sqlalchemy-workspace/lib/python3.9/site-packages/sqlalchemy/orm/util.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/linkedin/Desktop/advanced-python-working-with-databases-4365479/mysql-sqlalchemy-workspace/lib/python3.9/site-packages/sqlalchemy/orm/writeonly.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/linkedin/Desktop/advanced-python-working-with-databases-4365479/mysql-sqlalchemy-workspace/lib/python3.9/site-packages/sqlalchemy/pool/__init__.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/linkedin/Desktop/advanced-python-working-with-databases-4365479/mysql-sqlalchemy-workspace/lib/python3.9/site-packages/sqlalchemy/pool/base.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/linkedin/Desktop/advanced-python-working-with-databases-4365479/mysql-sqlalchemy-workspace/lib/python3.9/site-packages/sqlalchemy/pool/events.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/linkedin/Desktop/advanced-python-working-with-databases-4365479/mysql-sqlalchemy-workspace/lib/python3.9/site-packages/sqlalchemy/pool/impl.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/linkedin/Desktop/advanced-python-working-with-databases-4365479/mysql-sqlalchemy-workspace/lib/python3.9/site-packages/sqlalchemy/schema.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/linkedin/Desktop/advanced-python-working-with-databases-4365479/mysql-sqlalchemy-workspace/lib/python3.9/site-packages/sqlalchemy/sql/__init__.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/linkedin/Desktop/advanced-python-working-with-databases-4365479/mysql-sqlalchemy-workspace/lib/python3.9/site-packages/sqlalchemy/sql/_dml_constructors.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/linkedin/Desktop/advanced-python-working-with-databases-4365479/mysql-sqlalchemy-workspace/lib/python3.9/site-packages/sqlalchemy/sql/_elements_constructors.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/linkedin/Desktop/advanced-python-working-with-databases-4365479/mysql-sqlalchemy-workspace/lib/python3.9/site-packages/sqlalchemy/sql/_orm_types.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/linkedin/Desktop/advanced-python-working-with-databases-4365479/mysql-sqlalchemy-workspace/lib/python3.9/site-packages/sqlalchemy/sql/_py_util.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/linkedin/Desktop/advanced-python-working-with-databases-4365479/mysql-sqlalchemy-workspace/lib/python3.9/site-packages/sqlalchemy/sql/_selectable_constructors.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/linkedin/Desktop/advanced-python-working-with-databases-4365479/mysql-sqlalchemy-workspace/lib/python3.9/site-packages/sqlalchemy/sql/_typing.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/linkedin/Desktop/advanced-python-working-with-databases-4365479/mysql-sqlalchemy-workspace/lib/python3.9/site-packages/sqlalchemy/sql/annotation.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/linkedin/Desktop/advanced-python-working-with-databases-4365479/mysql-sqlalchemy-workspace/lib/python3.9/site-packages/sqlalchemy/sql/base.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/linkedin/Desktop/advanced-python-working-with-databases-4365479/mysql-sqlalchemy-workspace/lib/python3.9/site-packages/sqlalchemy/sql/cache_key.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/linkedin/Desktop/advanced-python-working-with-databases-4365479/mysql-sqlalchemy-workspace/lib/python3.9/site-packages/sqlalchemy/sql/coercions.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/linkedin/Desktop/advanced-python-working-with-databases-4365479/mysql-sqlalchemy-workspace/lib/python3.9/site-packages/sqlalchemy/sql/compiler.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/linkedin/Desktop/advanced-python-working-with-databases-4365479/mysql-sqlalchemy-workspace/lib/python3.9/site-packages/sqlalchemy/sql/crud.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/linkedin/Desktop/advanced-python-working-with-databases-4365479/mysql-sqlalchemy-workspace/lib/python3.9/site-packages/sqlalchemy/sql/ddl.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/linkedin/Desktop/advanced-python-working-with-databases-4365479/mysql-sqlalchemy-workspace/lib/python3.9/site-packages/sqlalchemy/sql/default_comparator.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/linkedin/Desktop/advanced-python-working-with-databases-4365479/mysql-sqlalchemy-workspace/lib/python3.9/site-packages/sqlalchemy/sql/dml.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/linkedin/Desktop/advanced-python-working-with-databases-4365479/mysql-sqlalchemy-workspace/lib/python3.9/site-packages/sqlalchemy/sql/elements.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/linkedin/Desktop/advanced-python-working-with-databases-4365479/mysql-sqlalchemy-workspace/lib/python3.9/site-packages/sqlalchemy/sql/events.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/linkedin/Desktop/advanced-python-working-with-databases-4365479/mysql-sqlalchemy-workspace/lib/python3.9/site-packages/sqlalchemy/sql/expression.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/linkedin/Desktop/advanced-python-working-with-databases-4365479/mysql-sqlalchemy-workspace/lib/python3.9/site-packages/sqlalchemy/sql/functions.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/linkedin/Desktop/advanced-python-working-with-databases-4365479/mysql-sqlalchemy-workspace/lib/python3.9/site-packages/sqlalchemy/sql/lambdas.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/linkedin/Desktop/advanced-python-working-with-databases-4365479/mysql-sqlalchemy-workspace/lib/python3.9/site-packages/sqlalchemy/sql/naming.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/linkedin/Desktop/advanced-python-working-with-databases-4365479/mysql-sqlalchemy-workspace/lib/python3.9/site-packages/sqlalchemy/sql/operators.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/linkedin/Desktop/advanced-python-working-with-databases-4365479/mysql-sqlalchemy-workspace/lib/python3.9/site-packages/sqlalchemy/sql/roles.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/linkedin/Desktop/advanced-python-working-with-databases-4365479/mysql-sqlalchemy-workspace/lib/python3.9/site-packages/sqlalchemy/sql/schema.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/linkedin/Desktop/advanced-python-working-with-databases-4365479/mysql-sqlalchemy-workspace/lib/python3.9/site-packages/sqlalchemy/sql/selectable.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/linkedin/Desktop/advanced-python-working-with-databases-4365479/mysql-sqlalchemy-workspace/lib/python3.9/site-packages/sqlalchemy/sql/sqltypes.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/linkedin/Desktop/advanced-python-working-with-databases-4365479/mysql-sqlalchemy-workspace/lib/python3.9/site-packages/sqlalchemy/sql/traversals.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/linkedin/Desktop/advanced-python-working-with-databases-4365479/mysql-sqlalchemy-workspace/lib/python3.9/site-packages/sqlalchemy/sql/type_api.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/linkedin/Desktop/advanced-python-working-with-databases-4365479/mysql-sqlalchemy-workspace/lib/python3.9/site-packages/sqlalchemy/sql/util.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/linkedin/Desktop/advanced-python-working-with-databases-4365479/mysql-sqlalchemy-workspace/lib/python3.9/site-packages/sqlalchemy/sql/visitors.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/linkedin/Desktop/advanced-python-working-with-databases-4365479/mysql-sqlalchemy-workspace/lib/python3.9/site-packages/sqlalchemy/testing/__init__.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/linkedin/Desktop/advanced-python-working-with-databases-4365479/mysql-sqlalchemy-workspace/lib/python3.9/site-packages/sqlalchemy/testing/assertions.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/linkedin/Desktop/advanced-python-working-with-databases-4365479/mysql-sqlalchemy-workspace/lib/python3.9/site-packages/sqlalchemy/testing/assertsql.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/linkedin/Desktop/advanced-python-working-with-databases-4365479/mysql-sqlalchemy-workspace/lib/python3.9/site-packages/sqlalchemy/testing/asyncio.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/linkedin/Desktop/advanced-python-working-with-databases-4365479/mysql-sqlalchemy-workspace/lib/python3.9/site-packages/sqlalchemy/testing/config.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/linkedin/Desktop/advanced-python-working-with-databases-4365479/mysql-sqlalchemy-workspace/lib/python3.9/site-packages/sqlalchemy/testing/engines.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/linkedin/Desktop/advanced-python-working-with-databases-4365479/mysql-sqlalchemy-workspace/lib/python3.9/site-packages/sqlalchemy/testing/entities.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/linkedin/Desktop/advanced-python-working-with-databases-4365479/mysql-sqlalchemy-workspace/lib/python3.9/site-packages/sqlalchemy/testing/exclusions.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/linkedin/Desktop/advanced-python-working-with-databases-4365479/mysql-sqlalchemy-workspace/lib/python3.9/site-packages/sqlalchemy/testing/fixtures.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/linkedin/Desktop/advanced-python-working-with-databases-4365479/mysql-sqlalchemy-workspace/lib/python3.9/site-packages/sqlalchemy/testing/pickleable.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/linkedin/Desktop/advanced-python-working-with-databases-4365479/mysql-sqlalchemy-workspace/lib/python3.9/site-packages/sqlalchemy/testing/plugin/__init__.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/linkedin/Desktop/advanced-python-working-with-databases-4365479/mysql-sqlalchemy-workspace/lib/python3.9/site-packages/sqlalchemy/testing/plugin/bootstrap.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/linkedin/Desktop/advanced-python-working-with-databases-4365479/mysql-sqlalchemy-workspace/lib/python3.9/site-packages/sqlalchemy/testing/plugin/plugin_base.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/linkedin/Desktop/advanced-python-working-with-databases-4365479/mysql-sqlalchemy-workspace/lib/python3.9/site-packages/sqlalchemy/testing/plugin/pytestplugin.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/linkedin/Desktop/advanced-python-working-with-databases-4365479/mysql-sqlalchemy-workspace/lib/python3.9/site-packages/sqlalchemy/testing/profiling.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/linkedin/Desktop/advanced-python-working-with-databases-4365479/mysql-sqlalchemy-workspace/lib/python3.9/site-packages/sqlalchemy/testing/provision.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/linkedin/Desktop/advanced-python-working-with-databases-4365479/mysql-sqlalchemy-workspace/lib/python3.9/site-packages/sqlalchemy/testing/requirements.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/linkedin/Desktop/advanced-python-working-with-databases-4365479/mysql-sqlalchemy-workspace/lib/python3.9/site-packages/sqlalchemy/testing/schema.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/linkedin/Desktop/advanced-python-working-with-databases-4365479/mysql-sqlalchemy-workspace/lib/python3.9/site-packages/sqlalchemy/testing/suite/__init__.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/linkedin/Desktop/advanced-python-working-with-databases-4365479/mysql-sqlalchemy-workspace/lib/python3.9/site-packages/sqlalchemy/testing/suite/test_cte.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/linkedin/Desktop/advanced-python-working-with-databases-4365479/mysql-sqlalchemy-workspace/lib/python3.9/site-packages/sqlalchemy/testing/suite/test_ddl.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/linkedin/Desktop/advanced-python-working-with-databases-4365479/mysql-sqlalchemy-workspace/lib/python3.9/site-packages/sqlalchemy/testing/suite/test_deprecations.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/linkedin/Desktop/advanced-python-working-with-databases-4365479/mysql-sqlalchemy-workspace/lib/python3.9/site-packages/sqlalchemy/testing/suite/test_dialect.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/linkedin/Desktop/advanced-python-working-with-databases-4365479/mysql-sqlalchemy-workspace/lib/python3.9/site-packages/sqlalchemy/testing/suite/test_insert.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/linkedin/Desktop/advanced-python-working-with-databases-4365479/mysql-sqlalchemy-workspace/lib/python3.9/site-packages/sqlalchemy/testing/suite/test_reflection.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/linkedin/Desktop/advanced-python-working-with-databases-4365479/mysql-sqlalchemy-workspace/lib/python3.9/site-packages/sqlalchemy/testing/suite/test_results.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/linkedin/Desktop/advanced-python-working-with-databases-4365479/mysql-sqlalchemy-workspace/lib/python3.9/site-packages/sqlalchemy/testing/suite/test_rowcount.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/linkedin/Desktop/advanced-python-working-with-databases-4365479/mysql-sqlalchemy-workspace/lib/python3.9/site-packages/sqlalchemy/testing/suite/test_select.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/linkedin/Desktop/advanced-python-working-with-databases-4365479/mysql-sqlalchemy-workspace/lib/python3.9/site-packages/sqlalchemy/testing/suite/test_sequence.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/linkedin/Desktop/advanced-python-working-with-databases-4365479/mysql-sqlalchemy-workspace/lib/python3.9/site-packages/sqlalchemy/testing/suite/test_types.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/linkedin/Desktop/advanced-python-working-with-databases-4365479/mysql-sqlalchemy-workspace/lib/python3.9/site-packages/sqlalchemy/testing/suite/test_unicode_ddl.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/linkedin/Desktop/advanced-python-working-with-databases-4365479/mysql-sqlalchemy-workspace/lib/python3.9/site-packages/sqlalchemy/testing/suite/test_update_delete.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/linkedin/Desktop/advanced-python-working-with-databases-4365479/mysql-sqlalchemy-workspace/lib/python3.9/site-packages/sqlalchemy/testing/util.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/linkedin/Desktop/advanced-python-working-with-databases-4365479/mysql-sqlalchemy-workspace/lib/python3.9/site-packages/sqlalchemy/testing/warnings.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/linkedin/Desktop/advanced-python-working-with-databases-4365479/mysql-sqlalchemy-workspace/lib/python3.9/site-packages/sqlalchemy/types.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/linkedin/Desktop/advanced-python-working-with-databases-4365479/mysql-sqlalchemy-workspace/lib/python3.9/site-packages/sqlalchemy/util/__init__.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/linkedin/Desktop/advanced-python-working-with-databases-4365479/mysql-sqlalchemy-workspace/lib/python3.9/site-packages/sqlalchemy/util/_collections.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/linkedin/Desktop/advanced-python-working-with-databases-4365479/mysql-sqlalchemy-workspace/lib/python3.9/site-packages/sqlalchemy/util/_concurrency_py3k.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/linkedin/Desktop/advanced-python-working-with-databases-4365479/mysql-sqlalchemy-workspace/lib/python3.9/site-packages/sqlalchemy/util/_has_cy.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/linkedin/Desktop/advanced-python-working-with-databases-4365479/mysql-sqlalchemy-workspace/lib/python3.9/site-packages/sqlalchemy/util/_py_collections.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/linkedin/Desktop/advanced-python-working-with-databases-4365479/mysql-sqlalchemy-workspace/lib/python3.9/site-packages/sqlalchemy/util/compat.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/linkedin/Desktop/advanced-python-working-with-databases-4365479/mysql-sqlalchemy-workspace/lib/python3.9/site-packages/sqlalchemy/util/concurrency.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/linkedin/Desktop/advanced-python-working-with-databases-4365479/mysql-sqlalchemy-workspace/lib/python3.9/site-packages/sqlalchemy/util/deprecations.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/linkedin/Desktop/advanced-python-working-with-databases-4365479/mysql-sqlalchemy-workspace/lib/python3.9/site-packages/sqlalchemy/util/langhelpers.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/linkedin/Desktop/advanced-python-working-with-databases-4365479/mysql-sqlalchemy-workspace/lib/python3.9/site-packages/sqlalchemy/util/preloaded.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/linkedin/Desktop/advanced-python-working-with-databases-4365479/mysql-sqlalchemy-workspace/lib/python3.9/site-packages/sqlalchemy/util/queue.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/linkedin/Desktop/advanced-python-working-with-databases-4365479/mysql-sqlalchemy-workspace/lib/python3.9/site-packages/sqlalchemy/util/tool_support.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/linkedin/Desktop/advanced-python-working-with-databases-4365479/mysql-sqlalchemy-workspace/lib/python3.9/site-packages/sqlalchemy/util/topological.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/linkedin/Desktop/advanced-python-working-with-databases-4365479/mysql-sqlalchemy-workspace/lib/python3.9/site-packages/sqlalchemy/util/typing.cpython-39.pyc,, +SQLAlchemy-2.0.6.dist-info/INSTALLER,sha256=zuuue4knoyJ-UwPPXg8fezS7VCrXJQrAP7zeNuwvFQg,4 +SQLAlchemy-2.0.6.dist-info/LICENSE,sha256=2lSTeluT1aC-5eJXO8vhkzf93qCSeV_mFXLrv3tNdIU,1100 +SQLAlchemy-2.0.6.dist-info/METADATA,sha256=c_c4WRcohhhbPo7l4Gg7Prq679f0oAfVT5ZU2sxCS60,9329 +SQLAlchemy-2.0.6.dist-info/RECORD,, +SQLAlchemy-2.0.6.dist-info/REQUESTED,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +SQLAlchemy-2.0.6.dist-info/WHEEL,sha256=UlNw_PQRbc8y2O60L5paNCXmEdKCjgP3umWDtB6RxVY,109 +SQLAlchemy-2.0.6.dist-info/top_level.txt,sha256=rp-ZgB7D8G11ivXON5VGPjupT1voYmWqkciDt5Uaw_Q,11 +sqlalchemy/__init__.py,sha256=g1iryNxbwhKm1q6aux_FgFSlO_xW9bfKs-Yi49ng3i4,12339 +sqlalchemy/connectors/__init__.py,sha256=uKUYWQoXyleIyjWBuh7gzgnazJokx3DaasKJbFOfQGA,476 +sqlalchemy/connectors/pyodbc.py,sha256=FiOJGpgYZ3mNQOP5th-7IZdY0ro1eUGpYX2gBiGPKTE,8483 +sqlalchemy/cyextension/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +sqlalchemy/cyextension/collections.cpython-39-darwin.so,sha256=2nXqTgOZHqq30hB-td9pSMgIO-Osoo70RHG9IHmzayI,199104 +sqlalchemy/cyextension/collections.pyx,sha256=mByL8PNEVyw512q-fi30Ey36-mLDpUXwThEmlHQy1Vo,11875 +sqlalchemy/cyextension/immutabledict.cpython-39-darwin.so,sha256=6Y0sCIOsaarzv-Qhwg61V1CZYAgxJVoWuJnp8PeXpdY,102144 +sqlalchemy/cyextension/immutabledict.pxd,sha256=oc8BbnQwDg7pWAdThB-fzu8s9_ViOe1Ds-8T0r0POjI,41 +sqlalchemy/cyextension/immutabledict.pyx,sha256=aQJPZKjcqbO8jHDqpC9F-v-ew2qAjUscc5CntaheZUk,3285 +sqlalchemy/cyextension/processors.cpython-39-darwin.so,sha256=kB0r3TnA07c9tgBO_7ux-2Nf50igONM6KyQdY02pBOQ,70888 +sqlalchemy/cyextension/processors.pyx,sha256=0swFIBdR19x1kPRe-dijBaLW898AhH6QJizbv4ho9pk,1545 +sqlalchemy/cyextension/resultproxy.cpython-39-darwin.so,sha256=zSJT8K8WFuWJeV04NSM1ol6T5GuCQmA77NKdr5TTxtE,98856 +sqlalchemy/cyextension/resultproxy.pyx,sha256=1ho2KCiX6FWt98FCtQq8KNPTzRoFDMsXAlzhpSnJ2o8,2711 +sqlalchemy/cyextension/util.cpython-39-darwin.so,sha256=VhT2AbcFm46jisXeywPgl7xyBAwDib1b5vlVxvRw6Sg,97008 +sqlalchemy/cyextension/util.pyx,sha256=lv03p63oVn23jLhMI4_RYGewUnJfh-4FkrNMEFL7A3Y,2289 +sqlalchemy/dialects/__init__.py,sha256=-Tvtzn65H8RsXHCcXP2_WKy7pBR1blaWVw0fvsNlryA,1786 +sqlalchemy/dialects/mssql/__init__.py,sha256=Qk2zLW62sl_BVVgNFm80KOO99O2ePSJg52ZIBtccsbQ,1782 +sqlalchemy/dialects/mssql/base.py,sha256=OaXetBHk45kbeDFQrCLGfMtCK5SizD-xkJ2Hf1QUMBM,130639 +sqlalchemy/dialects/mssql/information_schema.py,sha256=vVsIan_fwm2HmU13SMoBP4QgrSJ52itHgr8dOXu5INk,8067 +sqlalchemy/dialects/mssql/json.py,sha256=B0m6H08CKuk-yomDHcCwfQbVuVN2WLufuVueA_qb1NQ,4573 +sqlalchemy/dialects/mssql/provision.py,sha256=dpJP5DopKhTgPEm8S6s7lXFSpuE_b-tRAtC8iNF-Y8g,4998 +sqlalchemy/dialects/mssql/pymssql.py,sha256=BfJp9t-IQabqWXySJBmP9pwNTWnJqbjA2jJM9M4XeWc,4029 +sqlalchemy/dialects/mssql/pyodbc.py,sha256=14IGCQPwNVupc3_MZ1lRZcmjwW7iZfGG-YgWATDXFyU,27130 +sqlalchemy/dialects/mysql/__init__.py,sha256=btLABiNnmbWt9ziW-XgVWEB1qHWQcSFz7zxZNw4m_LY,2144 +sqlalchemy/dialects/mysql/aiomysql.py,sha256=pDoJ0GzePmL0Q1_LVpH8uJomO1NNRjvH-iKU0VTEuKo,9548 +sqlalchemy/dialects/mysql/asyncmy.py,sha256=pTOQFpNK6_rRO13gJ1XAa3o13PMUi0JQgU0B7wJrF5g,9818 +sqlalchemy/dialects/mysql/base.py,sha256=yruqML-NOiJ9eHh1D2m-w09SACJp1L8Cp5Ro4CvyZPQ,119124 +sqlalchemy/dialects/mysql/cymysql.py,sha256=5CQVJAlqQ3pT4IDGSQJH2hCzj-EWjUitA21MLqJwEEs,2291 +sqlalchemy/dialects/mysql/dml.py,sha256=WJCt0bd7i5SbkEeG2nxP7xYBXZDzA4X7wOE3B4dcQXA,6998 +sqlalchemy/dialects/mysql/enumerated.py,sha256=soQuvpzbS_wuo1O1n5xBNi2zj6g7Nz6mvzDu7hPbbEc,8474 +sqlalchemy/dialects/mysql/expression.py,sha256=WW5G2XPwqJfXjuzHBt4BRP0pCLcPJkPD1mvZX1g0JL0,4066 +sqlalchemy/dialects/mysql/json.py,sha256=RZ1-mvT5jIDYXwjnl3L5m-yF0ufCTqJizxG2f3Sdbm8,2287 +sqlalchemy/dialects/mysql/mariadb.py,sha256=eV33eyd5PX_xFqiM8MQjvqkQExxiC1IYRefHGQ4P06U,608 +sqlalchemy/dialects/mysql/mariadbconnector.py,sha256=p5c4Mfpzz3eQwJu4z4ddRHJZftdmUOL6wSIBHFXqAcU,7466 +sqlalchemy/dialects/mysql/mysqlconnector.py,sha256=5glmkPhD_KP-Mci8ZXBr4yzqH1MDfzCJ9F_kZNyXcGo,5666 +sqlalchemy/dialects/mysql/mysqldb.py,sha256=dPXGcjZtvMx0CoH6530kDNginDE5YfGgxWUz0L28_0A,9654 +sqlalchemy/dialects/mysql/provision.py,sha256=lKAIFG89AS3n97XCFHg4moLeP8c7beg9sdy6Zj3TkDQ,3123 +sqlalchemy/dialects/mysql/pymysql.py,sha256=gLaQkaTU-RISblBiOlxvx5Kdumq04NI_p4fpVKXTRuQ,2944 +sqlalchemy/dialects/mysql/pyodbc.py,sha256=mkOvumrxpmAi6noZlkaTVKz2F7G5vLh2vx0cZSn9VTA,4288 +sqlalchemy/dialects/mysql/reflection.py,sha256=mL9c8qZmc-3i4Lt60mYmFsrLWMn0rFRdOj3IKBlIGk4,22173 +sqlalchemy/dialects/mysql/reserved_words.py,sha256=gxHKpBAi0Qdi2sZdCr0CI7Kn0uX78u3R3Zb5GljfgM8,9127 +sqlalchemy/dialects/mysql/types.py,sha256=i8DpRkOL1QhPErZ25AmCQOuFLciWhdjNL3I0CeHEhdY,24258 +sqlalchemy/dialects/oracle/__init__.py,sha256=HcAB9tvX7uAVHDMd2pWXKVFdcCwwjLWCXHMXgYU1EWY,1306 +sqlalchemy/dialects/oracle/base.py,sha256=YkLeneSCtg1LQE2Iao6uF1JZY5CXvvyVjQW4SDL39a4,117035 +sqlalchemy/dialects/oracle/cx_oracle.py,sha256=KgcgctokpPGFAQ8YnakE0CEA000hj4m89huN9IynNVU,54098 +sqlalchemy/dialects/oracle/dictionary.py,sha256=oPz_a7y5QdfJeatwufawpFbi8RPBSPYL8LHk5NtqtI0,19034 +sqlalchemy/dialects/oracle/oracledb.py,sha256=qy2IheYJ3WVAyLyCZfQQ9t9keW9fXareBhvLbJ3YHJg,3458 +sqlalchemy/dialects/oracle/provision.py,sha256=sBRDeqMfQeoOEDXy3b6i-3fAqu20ZNr6o9CNmXUpgO0,8058 +sqlalchemy/dialects/oracle/types.py,sha256=MmTYk5O1wYG5hm9-1yFCWOdDOH6TVa0IKfJCfJYuZVo,7509 +sqlalchemy/dialects/postgresql/__init__.py,sha256=XnMKOt3KBi4Bue6_wyE5Zjy2i-2m0DRF7tDGM08Ak6I,3694 +sqlalchemy/dialects/postgresql/_psycopg_common.py,sha256=SjzcZPZPunFDgj2puYl1Q-ohNxV1U9rw2MMwBU-nbcE,6024 +sqlalchemy/dialects/postgresql/array.py,sha256=EtMqkKkqoQOHMwBYEVUl7IlLFvzwP5-PumCL1PaHmKU,13971 +sqlalchemy/dialects/postgresql/asyncpg.py,sha256=P5ipZqpMGGOUoZEnSVmFxztEtlrKk6rKxjYxRSAgd3o,34350 +sqlalchemy/dialects/postgresql/base.py,sha256=-S4p6BvRD60LzHSiHb8Szmr2TjCBplsnJJepxp1XgdM,167456 +sqlalchemy/dialects/postgresql/dml.py,sha256=vI2wIceucDcG-D4DXF1TNZw3Nfcg7MqObhHf-Brdr-g,10186 +sqlalchemy/dialects/postgresql/ext.py,sha256=8r-TcHqQVe6rItuf7Tdy87kzwXf-TDN-aTrt6L2InJQ,16125 +sqlalchemy/dialects/postgresql/hstore.py,sha256=I8-HAllRmAzC1FmMko2abv3u19GwYPFWmH6JXNd2Zh8,12273 +sqlalchemy/dialects/postgresql/json.py,sha256=mIVvJ5kg0Rn7fAUX3qh8VpN2YM22w84wn_DLhPXR8Yw,12928 +sqlalchemy/dialects/postgresql/named_types.py,sha256=YOeA-4HFG1D_ZliIlSpaVYNP7xd-U_BlOFhPk0Jy65M,16907 +sqlalchemy/dialects/postgresql/pg8000.py,sha256=ZhpLe8fH-bXujyxJB1wiJiezhu8rP9epxPc1aHZFZZI,15604 +sqlalchemy/dialects/postgresql/pg_catalog.py,sha256=dDjEbKyg8blyck69tUazGkIkLhe9m0FA0D5jicVfNZ4,8799 +sqlalchemy/dialects/postgresql/provision.py,sha256=cVs3l9ikDhEEB2GBw1OvNSboHRATsoiaweT9zcwiwLc,4880 +sqlalchemy/dialects/postgresql/psycopg.py,sha256=BPCVJk2KKE9VX-t6JIpszpAdiGyKbgLb6-WGtPCAsgY,22098 +sqlalchemy/dialects/postgresql/psycopg2.py,sha256=AJRrwDHJPYp5XUp1PvQog_s1IJ0_qp7q9KsNeTAYgXI,30640 +sqlalchemy/dialects/postgresql/psycopg2cffi.py,sha256=X_uC1C5mXbw0MS-gu3RGTrd7tMEAyJniRYVNsSJ1D_0,1764 +sqlalchemy/dialects/postgresql/ranges.py,sha256=e6EoQ-evQM1oCJ7Zbap3A8pmHHu8TM3VV4_OB_Idnpo,29173 +sqlalchemy/dialects/postgresql/types.py,sha256=zb8-9isrXpsa3thyuseDqHG53E86RhS8SXqTil7Rdi4,5654 +sqlalchemy/dialects/sqlite/__init__.py,sha256=wnZ9vtfm0QXmth1jiGiubFgRiKxIoQoNthb1bp4FhCs,1173 +sqlalchemy/dialects/sqlite/aiosqlite.py,sha256=j73NJyEjDLAPg2qcJXUapad5ZLBhLakKU95x55IqnCM,10232 +sqlalchemy/dialects/sqlite/base.py,sha256=SI-xUk1yHK0ivVe_KQ1f9dTFXXPBEJ4PIzUwWyUr0_I,96592 +sqlalchemy/dialects/sqlite/dml.py,sha256=a2JjATf9JULij766YBJMkU4Wc7ccnMYi7nz-Kqhbq0M,7481 +sqlalchemy/dialects/sqlite/json.py,sha256=XFPwSdNx0DxDfxDZn7rmGGqsAgL4vpJbjjGaA73WruQ,2533 +sqlalchemy/dialects/sqlite/provision.py,sha256=fH_mrS6pGAbes-6oC13WvQ7DI-qEtyxzmLKmCcqbEHQ,5280 +sqlalchemy/dialects/sqlite/pysqlcipher.py,sha256=_JuOCoic--ehAGkCgnwUUKKTs6xYoBGag4Y_WkQUDwU,5347 +sqlalchemy/dialects/sqlite/pysqlite.py,sha256=CTZUtNmmrpcdfKzsP7EqIclmosPiqOoAJZL-WAU6Xj8,27860 +sqlalchemy/dialects/type_migration_guidelines.txt,sha256=-uHNdmYFGB7bzUNT6i8M5nb4j6j9YUKAtW4lcBZqsMg,8239 +sqlalchemy/engine/__init__.py,sha256=T5JfbA9Uz3gh9jk5nptwACGA2tIBGj0DcEvrJVCw-58,2753 +sqlalchemy/engine/_py_processors.py,sha256=RSVKm9YppSBDSCEi8xvbZdRCP9EsCYfbyEg9iDCMCiI,3744 +sqlalchemy/engine/_py_row.py,sha256=Z0M1qsPDwhKEsiziKJ51nCAQwcJ1wqughqs26WNg3bU,4405 +sqlalchemy/engine/_py_util.py,sha256=5m3MZbEqnUwP5kK_ghisFpzcXgBwSxTSkBEFB6afiD8,2245 +sqlalchemy/engine/base.py,sha256=NaEtMXCP--oSUvJQZAMFAdzn3DVVUX0DPgKdIT064Uk,120919 +sqlalchemy/engine/characteristics.py,sha256=YvMgrUVAt3wsSiQ0K8l44yBjFlMK3MGajxhg50t5yFM,2344 +sqlalchemy/engine/create.py,sha256=j609PwrPbfQdO6IFSLIXSsjb_u-dhdXIinDQknqMwa0,31662 +sqlalchemy/engine/cursor.py,sha256=Hzm1K2LgBKVO3kcLV0h8pJBbsHckS3QYLTvRGg5i8Vw,74009 +sqlalchemy/engine/default.py,sha256=iMXvcB5XN9N43XBT4SCNLazU7YbU4GY_FTzn_LVjsq0,74184 +sqlalchemy/engine/events.py,sha256=CqbDlfg_op1F636ZHC2bUl8XX0Wx7-TxxZyFxrH_neY,37522 +sqlalchemy/engine/interfaces.py,sha256=D8gHdmtjYLpJYOjAJ9TNhELOhWI6X7aK3r9wWhnczyQ,110754 +sqlalchemy/engine/mock.py,sha256=MMdaDvuKuAH_ugnaumq9xK1LOc0c81cocxUnar0hcNM,4177 +sqlalchemy/engine/processors.py,sha256=ENN6XwndxJPW-aXPu_3NzAZsy5SvNznHoa1Qn29ERAw,2383 +sqlalchemy/engine/reflection.py,sha256=rZ86zEqFMw997hb_psUeiuF2HEhQXui2SF5WtI7NohU,75263 +sqlalchemy/engine/result.py,sha256=ks2rkcYk2LUekV0yb11LgM8ktuC-OTOO1AdcfpU8ZYo,76697 +sqlalchemy/engine/row.py,sha256=22uTGqdsNV2fj3w0eB5EfhFQ5CGMQsFohEUVlI6LLZg,10927 +sqlalchemy/engine/strategies.py,sha256=HjCj_FHQOgkkhhtnVmcOEuHI_cftNo3P0hN5zkhZvDc,442 +sqlalchemy/engine/url.py,sha256=T9JYeeOVM4L2WbG3RLfypsz0Sl-fFgPIlS8MRH5VzZg,30468 +sqlalchemy/engine/util.py,sha256=Y5euVW6-DGJaxIgUachA2n1aiqm2M3cB-tCG2joRVt4,5683 +sqlalchemy/event/__init__.py,sha256=CSBMp0yu5joTC6tWvx40B4p87N7oGKxC-ZLx2ULKUnQ,997 +sqlalchemy/event/api.py,sha256=kT_1HiZ0WXG7V6b1BcEAxKraqlV0QkxGxcgUjHYrXgk,8470 +sqlalchemy/event/attr.py,sha256=NMe_sPQTju2PE-f68C8TcKJGW-Gxyi1CLXumAmE368Y,20438 +sqlalchemy/event/base.py,sha256=cHNiiR68uYIuBUXqgATqgEkSY8UdSgqON_54DYIYpQ8,14997 +sqlalchemy/event/legacy.py,sha256=pcXGijX6HtJnlfMfPVo06z9uzogbo5S29v2Azhofjeg,8212 +sqlalchemy/event/registry.py,sha256=Sf1qoGqjHs4LzK6brG_x-xEhrpbrgrH38kcSi-AsKUw,10862 +sqlalchemy/events.py,sha256=pRcPKKsPQHGPH_pvTtKRmzuEIy-QHCtkUiZl4MUbxKs,536 +sqlalchemy/exc.py,sha256=kuwpq3LDh__tRu48juvPH_E3JL8Ik55YYjcPQjwqhws,24046 +sqlalchemy/ext/__init__.py,sha256=w4h7EpXjKPr0LD4yHa0pDCfrvleU3rrX7mgyb8RuDYQ,322 +sqlalchemy/ext/associationproxy.py,sha256=3zd5Ir9wYcfe8Myiv1WY3LjCtuvTPWPM5qtsooGpWVE,64978 +sqlalchemy/ext/asyncio/__init__.py,sha256=_7MoqoUrKeo6hiWLCV5EX3NiI0LXqnt8PQyUpQ8JvCE,1132 +sqlalchemy/ext/asyncio/base.py,sha256=uikdreckmgx1PiJQ9GPDAy4M-jzmDGUsF5V9AEPf1jg,9009 +sqlalchemy/ext/asyncio/engine.py,sha256=4zErSCPyUqa6oGkpkuIHTGn35MmalGIblHT1wf0l9DY,43887 +sqlalchemy/ext/asyncio/exc.py,sha256=1hCdOKzvSryc_YE4jgj0l9JASOmZXutdzShEYPiLbGI,639 +sqlalchemy/ext/asyncio/result.py,sha256=YHLhgGklf77hEbhNwZ3JhK4Nj52FB-4YTkC-3PbRGE4,30570 +sqlalchemy/ext/asyncio/scoping.py,sha256=6JSYD9aYrHqWNHaK2t6fqf_YpFtGagyDh0RudbpVp40,49563 +sqlalchemy/ext/asyncio/session.py,sha256=cWtT_D9Di9WFKiep_82iZn4I9VmJMJFfq2HGPzdpyDg,55702 +sqlalchemy/ext/automap.py,sha256=Ux_7Z431LwVC3lp0gclUDjdQhKm3hDXarpkj4cVMcfU,61876 +sqlalchemy/ext/baked.py,sha256=78RS8Gp3YgjP8uEc2LEtX5HoDuDdWObaLKsJETeQ7b0,17917 +sqlalchemy/ext/compiler.py,sha256=h7eR0NcPJ4F_k8YGRP3R9YX75Y9pgiVxoCjRyvceF7g,20391 +sqlalchemy/ext/declarative/__init__.py,sha256=VJu8S1efxil20W48fJlpDn6gHorOudn5p3-lF72WcJ8,1818 +sqlalchemy/ext/declarative/extensions.py,sha256=AqWFzVc1dqgLjzkwFL1Ne1Uz7381r8Mkty4yJogrOHA,18568 +sqlalchemy/ext/horizontal_shard.py,sha256=o3JCcXdSkK689Go2IHgOA_ZuFXzDKiBaLYZyLPAEP14,16767 +sqlalchemy/ext/hybrid.py,sha256=7R0MzU3-81hSY7zycrnsDi7oI680FNYnXir_tq0T2EM,52527 +sqlalchemy/ext/indexable.py,sha256=M5KXPLz0IImyfW2MIg1zd_4dqWv0FyD-zALR89fBiWc,11079 +sqlalchemy/ext/instrumentation.py,sha256=rjjSbTGilYeGLdyEWV932TfTaGxiVP44_RajinANk54,15723 +sqlalchemy/ext/mutable.py,sha256=XoEEmAAqwWDLBUSdrn5Y4dySUhwwnj3dR4K9BYaFzyA,36994 +sqlalchemy/ext/mypy/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +sqlalchemy/ext/mypy/apply.py,sha256=VZNuYU9MV76chkxjWBZzVFGtKto9obnAXJgyM0WOGWM,10505 +sqlalchemy/ext/mypy/decl_class.py,sha256=fWfm5EpJ6ST08Z_AQPHB08e19km33xc-9VYe61x5HsQ,17380 +sqlalchemy/ext/mypy/infer.py,sha256=DzZnzQ7QrJ2lEQd7WiZoWtfvwXU_SQhYG-FWtPEYoPk,19367 +sqlalchemy/ext/mypy/names.py,sha256=hwogZIeYjlckL_NZi9u94EnbBqkoUcNwt9pBg6b0SmM,10476 +sqlalchemy/ext/mypy/plugin.py,sha256=Y5BYcZT7XwhiCJ1EZKuoxU3Uq2Zb14JMMeBmupX1hAM,9751 +sqlalchemy/ext/mypy/util.py,sha256=wci1awA8t5aKKUmfl6JDhXv8KnSjsGTDhwWjwzjRPes,8984 +sqlalchemy/ext/orderinglist.py,sha256=8Vcg7UUkLg-QbYAbLVDSqu-5REkR6L-FLLhCYsHYxCQ,14384 +sqlalchemy/ext/serializer.py,sha256=ox6dbMOBmFR0H2RQFt17mcYBOGKgn1cNVFfqY8-jpgQ,6178 +sqlalchemy/future/__init__.py,sha256=79DZx3v7TQZpkS_qThlmuCOm1a9UK2ObNZhyMmjfNB0,516 +sqlalchemy/future/engine.py,sha256=6uOpOedIqiT1-3qJSJIlv9_raMJU8NTkhQwN_Ngg8kI,499 +sqlalchemy/inspection.py,sha256=2-h6SqEs2OyjyHq20j3eo_1eomCKxExY5AVzdo0CiRk,4429 +sqlalchemy/log.py,sha256=7ZVNL8ZRYHDuxoynXmQd6XurCuIach62PxRiSzv1ucw,8629 +sqlalchemy/orm/__init__.py,sha256=Zr6vHNVIEguV4IMLUN_R4BckXRTe1nFEiu1JP1QfCkU,8390 +sqlalchemy/orm/_orm_constructors.py,sha256=sbn9ucxOkIQtfX8ztBQ3docTMS3z6k0zp9O_KRMudwc,95728 +sqlalchemy/orm/_typing.py,sha256=vY4ww1YTHOJXlbPNWujW4veChVD32PBH1B9NUmQH8_I,5230 +sqlalchemy/orm/attributes.py,sha256=YclvPbRY2mLaQqimfBkI1rRVBSv2q_NQ2-GfY7IVegk,91225 +sqlalchemy/orm/base.py,sha256=GDRkwC9LX9_0NMsPAl7xQw9HnxIh53e28KQsB4lDpHk,27058 +sqlalchemy/orm/bulk_persistence.py,sha256=HyeI1I49UKKzD7Yo9pFHvtlgayu3Jo2A3W2H4XYxWEk,64824 +sqlalchemy/orm/clsregistry.py,sha256=tV1JPwHhuBahXoaHDKBoIwRvK3KEjAltJ967JMk4-Ig,17950 +sqlalchemy/orm/collections.py,sha256=PXkSIuVEjfmpuJ7RgLqPvlrn3QJrw1Qr2w3WXHYc844,50940 +sqlalchemy/orm/context.py,sha256=a9BZ5bOJNuFFr_T-8grA_wKpVj5bQ1llDlO6UZjc0Y0,108123 +sqlalchemy/orm/decl_api.py,sha256=l6oKlZPnCVkRXUWACojYooo-bdttDoZKv7HXDgoaMec,63034 +sqlalchemy/orm/decl_base.py,sha256=Yz7JcqgKsHDXpGKbs6FKCY2xG-dDg9sp93apD6WK484,78053 +sqlalchemy/orm/dependency.py,sha256=iM3AHi3ObbrWP5EJtlSGTCNr8T7u2KSXWvuJNo-jpGg,47038 +sqlalchemy/orm/descriptor_props.py,sha256=uyQ_GHZS9SfO7ZM3FiNLK2iV7oGFUk5JNXiwdu3wiBM,37430 +sqlalchemy/orm/dynamic.py,sha256=-4kTkjq6Z88L1STsjybi27cMXc1OVlmmvtPxE_DmJKI,8622 +sqlalchemy/orm/evaluator.py,sha256=jPjVrP7XbVOG6aXTCBREq0rF3oNHLqB4XAT-gt_cpaA,11925 +sqlalchemy/orm/events.py,sha256=f9vhI8oMDRS14iSQlcsvoTr6fCSCKCYrZIcezgHVtl4,128434 +sqlalchemy/orm/exc.py,sha256=A3wvZVs5sC5XCef4LoTUBG-UfhmliFpU9rYMdS2t_To,7356 +sqlalchemy/orm/identity.py,sha256=gRiuQSrurHGEAJXH9QGYioXL49Im5EGcYQ-IKUEpHmQ,9249 +sqlalchemy/orm/instrumentation.py,sha256=XoDqwNtCTUF5250tAIn3dm5_j3yBcTc9PtW-j3VuQdQ,24452 +sqlalchemy/orm/interfaces.py,sha256=uvuNGXY57KW_sDxauuhpcRt_3_94R4oxWIWlDoG7sqI,47138 +sqlalchemy/orm/loading.py,sha256=71ESreN1ndlezRfIdFFINEttqipWT7k9zyLw_k4-uOs,56414 +sqlalchemy/orm/mapped_collection.py,sha256=MlbcBTcfzl20mq5C8x5uGcc0v7qlGyrWPaveOPygqQQ,19278 +sqlalchemy/orm/mapper.py,sha256=X4sHUKK-z3yNPEqER_Ke6y_B0xGb0QvIkzlnh41Q1kQ,169809 +sqlalchemy/orm/path_registry.py,sha256=wNPlhX6z8xoAVGtknUbJsJXMWFLnqIjUKSnn35fhv6w,24423 +sqlalchemy/orm/persistence.py,sha256=QhxgJ-jYqL2jPsbFSRWkjhhmH1ujtaNW9WX6xGxTlxc,58524 +sqlalchemy/orm/properties.py,sha256=DELlNLPAw7m6qLk6yBGCCIz97l3S77F-3UxHrrfKziY,26065 +sqlalchemy/orm/query.py,sha256=k3F5LVUW8F2QuXg0kR2G9Oq-35EBxfRFwvO5YpvcF_w,118160 +sqlalchemy/orm/relationships.py,sha256=UtoA6m_w-PAhlP40tyfP0ouneUXoMxXoVJ84Si7Jnk4,126416 +sqlalchemy/orm/scoping.py,sha256=SbX0u4mhQApf8OerTtNpQm1gHirv7Ln79yyfKx5bRAE,75063 +sqlalchemy/orm/session.py,sha256=l9jt-owBq6nCpCRDSOKrR9iOh1rEyp3-PTpBLA7DCsY,186662 +sqlalchemy/orm/state.py,sha256=jDkgR-tMEMjAX4QOIn1yYr2DdOig7sc6oyPIogMxAQA,37724 +sqlalchemy/orm/state_changes.py,sha256=VxborriedCoQi45RT7d2sIjwGY0Ug3yl99-zlLHQBt4,6587 +sqlalchemy/orm/strategies.py,sha256=1Ij_WNMBkWSw8Dok3sdzB24seJ4DB6xGWj5Lj6mWXQc,114473 +sqlalchemy/orm/strategy_options.py,sha256=bs3Pm3vQw4gdk8JhCTu9WAAsBuH-WSjkRaTPmOF2t_Q,83337 +sqlalchemy/orm/sync.py,sha256=FEOjVlML9fOTL8Kc1-mksLv6_KjEyC0NLVkoI6_bJd8,5750 +sqlalchemy/orm/unitofwork.py,sha256=Z9zO7Fk9YR8yNXv_TCnyZPKOfzSb0mEqDYXkgxyqeKQ,27035 +sqlalchemy/orm/util.py,sha256=SibdKDvcy6MAOiz7_AqzT2F1uf7_fedpHbKt3wYWR-8,79730 +sqlalchemy/orm/writeonly.py,sha256=6F0YIdndSngwiz4Ngyc6wtqgMdUp6l9Ivbe-3IxQ7yI,19539 +sqlalchemy/pool/__init__.py,sha256=CIv4b6ctueY7w3sML_LxyLKAdl59esYOhz3O7W5w7WE,1815 +sqlalchemy/pool/base.py,sha256=P2hrsxGDigClgrQc0gYL4r4f5nS8vGuyDbsejOMptwk,52464 +sqlalchemy/pool/events.py,sha256=7_UL7QiwuBhbyUg7y2JxPQ7lcqlx4fEoU3N-R5vS6bM,13424 +sqlalchemy/pool/impl.py,sha256=3ER8gRv2dDYOa3Dc1hf4KMdt7C6Tf5CIpfrd5coMrYk,17707 +sqlalchemy/py.typed,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +sqlalchemy/schema.py,sha256=hJeWr60lrNaG_D2JTpjSP6aLxs8c92J7MFEBpwCqORQ,3135 +sqlalchemy/sql/__init__.py,sha256=Tj_EG7cQMB1LL_vix267IJh0HH3UXkQDq1NBBDUdRZ0,5611 +sqlalchemy/sql/_dml_constructors.py,sha256=hoNyINY3FNi1ZQajR6lbcRN7oYsNghM1wuzzVWxIv3c,3867 +sqlalchemy/sql/_elements_constructors.py,sha256=IztYe9gg2YSXyDXt1bBEYkwjMgVXTi6QbbkPSVrIOdE,62134 +sqlalchemy/sql/_orm_types.py,sha256=_vR3_HQYgZR_of6_ZpTQByie2gaVScxQjVAVWAP3Ztg,620 +sqlalchemy/sql/_py_util.py,sha256=uopyvI1gHg-9BZv1zAamorDFKmE5Woz275dcFOIT4ts,2174 +sqlalchemy/sql/_selectable_constructors.py,sha256=diNJxg4gzh_BgMg5wp_yfVhv2poil-jnq09K4RvoaUo,19054 +sqlalchemy/sql/_typing.py,sha256=kvkTOajngaXFHu9cYHW3LHBWNXjFa0AlrnXwg0g2rgk,10080 +sqlalchemy/sql/annotation.py,sha256=CoRI328FpvXcZzX2IGnKlolpi_ZhfqZyowxvv9dRSs8,17628 +sqlalchemy/sql/base.py,sha256=_RxPzpmSviWTVy3dF5A76L8VbCRcd4DWOtk1e00h4O0,71893 +sqlalchemy/sql/cache_key.py,sha256=kMFPCqdsclRKvf1wFotjgQetp947FKhBbs53QU--Wuw,32662 +sqlalchemy/sql/coercions.py,sha256=qQrp3pVXTD0M7UODD79gBxrKPFpc6uEH5SdjcJbmIfo,40488 +sqlalchemy/sql/compiler.py,sha256=CDzVOq-YChPk4zSjMWEwyqmhuR-0SZnYxnkGeDhJYGo,245985 +sqlalchemy/sql/crud.py,sha256=ZjfBND1mWCElROHKr4zpX4PGH7xF2D-UeCxJor9KUkk,52295 +sqlalchemy/sql/ddl.py,sha256=MtAHSZRCesPBEgZr9tz0rO3VD2NsG24UFUKB1chSfd0,45756 +sqlalchemy/sql/default_comparator.py,sha256=OL4rZrvThfsBh4kfJtq-6jHiTWrmj6y54vs5kj9P2us,16745 +sqlalchemy/sql/dml.py,sha256=UDFVFRjGF56uwdJh4i3ZgMz6JHjDhYM0Jg18RRikOlE,63886 +sqlalchemy/sql/elements.py,sha256=kICYdOSrB5zIHsNG4U75kSGn3JkFavtOREARWxwpwlw,167543 +sqlalchemy/sql/events.py,sha256=xe3vJ6pQJau3dJWBAY0zU7Lz52UKuMrpLycriLm3AWA,18301 +sqlalchemy/sql/expression.py,sha256=r9sDe3C9dcJ2HI8gmt8ZXB30ikAcnfo08rj-XYR4TVM,7414 +sqlalchemy/sql/functions.py,sha256=oC4jfqz7GkK0eHa9R-Y2FXRq0PKi2t0S3M77N6QtGQ8,54033 +sqlalchemy/sql/lambdas.py,sha256=wLfoYJqnGLp7A7_xlMXwBudCROos0spuJUEkEjpJS20,49312 +sqlalchemy/sql/naming.py,sha256=xjmAepd5RfhyIu_tZVKh3TtRKnxVTgxeG--niCZhwq8,6866 +sqlalchemy/sql/operators.py,sha256=G-3ccNucbl1m1HuQn4wkh4uV-3PQnJLVtYAKkLLL484,73645 +sqlalchemy/sql/roles.py,sha256=fRi_bOduJED_Dk2PjudvAzEdpf0JXgfv1b5onjKHmsU,7628 +sqlalchemy/sql/schema.py,sha256=tcZFCCjwOj2SvAVDuG9UdkmeMpwrsLPHK8TUo2cdtCE,218926 +sqlalchemy/sql/selectable.py,sha256=32EynftlSlrkYfjZu4GDauj1bmc-DXilZ48031iP9Cs,232616 +sqlalchemy/sql/sqltypes.py,sha256=E8i6cFSrQyYo8BdhmgtEAEqMpq_06UT6eq-sk0ugZUk,125157 +sqlalchemy/sql/traversals.py,sha256=IfVnOjvZIqigeYt3_Nu6D-QNREKQ7EoVF57M63DKj_0,33605 +sqlalchemy/sql/type_api.py,sha256=ftTiDXDvMLknIiSUOh0iZSD00l7tQmzV08mssbBUPYU,84480 +sqlalchemy/sql/util.py,sha256=5CGmTf3LueJ34nDUCrpfB_WqmMSLBDYC6UgK-H7LJik,48269 +sqlalchemy/sql/visitors.py,sha256=ojpSyJUaIYeu4QDEMpt640XhH9azDrzkaszwFpY47NE,36355 +sqlalchemy/testing/__init__.py,sha256=9M2SMxBBLJ8xLUWXNCWDzkcvOqFznWcJzrSd712vATU,3126 +sqlalchemy/testing/assertions.py,sha256=EEg6RuVF_IjicmsH8bHT8f_k4aAOqA0NPLhS-PbwjBA,30787 +sqlalchemy/testing/assertsql.py,sha256=IbPja8h8FWl_-K6J4A9LDsGP_oJtco7xvXbQPM8ngvY,16777 +sqlalchemy/testing/asyncio.py,sha256=x2R20JXuj5D7r16THih-3MIhD2tfSJEeM35kpakzjxw,3729 +sqlalchemy/testing/config.py,sha256=PhgrIVkHfuJqPtnRnZqT_Hu-RnMRJ93YsN-jPi8Mxxk,10660 +sqlalchemy/testing/engines.py,sha256=sVkoNTJOHjNPOg9Q-EEQ1y7tPjK1TnivMMfJc2lSidQ,13356 +sqlalchemy/testing/entities.py,sha256=rysywsnjXHlIIC-uv0L7-fLmTAuNpHJvcSd1HeAdY5M,3354 +sqlalchemy/testing/exclusions.py,sha256=uoYLEwyNOK1eR8rpfOZ2Q3dxgY0akM-RtsIFML-FPrY,12444 +sqlalchemy/testing/fixtures.py,sha256=Pu1HicBUrP8cEHi4kJXOj-Pm2c8d4SrxGi21W2uKkF8,31542 +sqlalchemy/testing/pickleable.py,sha256=0AqRQGexDo-lgsEx_GDMRqMIyG1QAQONbGsqWxLyoog,2889 +sqlalchemy/testing/plugin/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +sqlalchemy/testing/plugin/bootstrap.py,sha256=GrBB27KbswjE3Tt-zJlj6uSqGh9N-_CXkonnJSSBz84,1437 +sqlalchemy/testing/plugin/plugin_base.py,sha256=ZkB5Ctea96eHYkCsV-YR44L77xHVEoyN4Ri-IEjYVJE,21281 +sqlalchemy/testing/plugin/pytestplugin.py,sha256=B31nWg5PahC5cQI-fNP3KgvEj3u7zC4uUTZk2EN_J-c,26204 +sqlalchemy/testing/profiling.py,sha256=rKG3wdcJSRGRA1vw9c9s2gx8LZ7jSVwY5jm_RCiEipE,10151 +sqlalchemy/testing/provision.py,sha256=TT-CIb-E8H6VBXreV8p8J9AV8-TSEZP9B4ZEespTdD4,14145 +sqlalchemy/testing/requirements.py,sha256=d14BkmwrL7EBGhXcv_XsE8WIZwOhPSnfTkxizxDzsR8,49983 +sqlalchemy/testing/schema.py,sha256=mvKy6ftmNbM5UKVxCCr-Ufik4_pyJh-v_hBc4UVrow8,6514 +sqlalchemy/testing/suite/__init__.py,sha256=_firVc2uS3TMZ3vH2baQzNb17ubM78RHtb9kniSybmk,476 +sqlalchemy/testing/suite/test_cte.py,sha256=rbC3JbA3T_gZQ8erCslf7t9UFHHBj2nejT6GdzVVeA0,6206 +sqlalchemy/testing/suite/test_ddl.py,sha256=xWimTjggpTe3S1Xfmt_IPofTXkUUcKuVSVCIfIyGMbA,11785 +sqlalchemy/testing/suite/test_deprecations.py,sha256=XI8ZU1NxC-6uvPDImaaq9O7Ov6MF5gmy-yk3TfesLAo,5082 +sqlalchemy/testing/suite/test_dialect.py,sha256=kvxBhCIqYGK22Rfair5EP-ae1icoibY-alRsHRsUAiA,21019 +sqlalchemy/testing/suite/test_insert.py,sha256=FcDSRYLCwpZ79A-MH1zluZ2Xb0P0L6lLBko2_XBSNpI,11629 +sqlalchemy/testing/suite/test_reflection.py,sha256=6CvXDSpxGiJpYmAWyjT73jB3e2mmL344-Mrb3PkpmWw,101170 +sqlalchemy/testing/suite/test_results.py,sha256=O6CTaMtOjuThgTAr_2SAhMcernAxpOQ6DvhVm5v14l0,15666 +sqlalchemy/testing/suite/test_rowcount.py,sha256=zA0Q3Guf-TQioyLmNZ6HWCUOEuEhf7q-uI2J72j2kjk,6147 +sqlalchemy/testing/suite/test_select.py,sha256=QjZibSKevww0bZPIdKixtyDHHyDXDYPYBLrjeDvcgKg,58326 +sqlalchemy/testing/suite/test_sequence.py,sha256=OYVSwmeMnfYt8nN1Ay17TqpyO_wfZOKvL4QDCHsw_60,9673 +sqlalchemy/testing/suite/test_types.py,sha256=eO6OIyPdAv6EE47l6Qlfs9rnzSv_xUdTlEpLOw022RI,59996 +sqlalchemy/testing/suite/test_unicode_ddl.py,sha256=7obItCpFt4qlWaDqe25HWgQT6FoUhgz1W7_Xycfz9Xk,5887 +sqlalchemy/testing/suite/test_update_delete.py,sha256=VxhsI37iivEYejQ38duuT4dida9iXH_4EK3QMvaXMZM,1648 +sqlalchemy/testing/util.py,sha256=kkVPERLK--KceWOGL7ivAKqhTa4WnRQihc_nXyMaRMM,14164 +sqlalchemy/testing/warnings.py,sha256=pmfT33PF1q1PI7DdHOsup3LxHq1AC4-aYl1oL8HmrYo,1546 +sqlalchemy/types.py,sha256=DgBpPaT-vtsn6_glx5wocrIhR2A1vy56SQNRY3NiPUw,3168 +sqlalchemy/util/__init__.py,sha256=FTiAxcebno3n1L71vdzUKPF010rwZBRdLy3gcocMf1U,8161 +sqlalchemy/util/_collections.py,sha256=CXyo111EoQ9mmA14vr85RJE5t2LHnn6GigvX1nPHQDs,20356 +sqlalchemy/util/_concurrency_py3k.py,sha256=DjpX4L5RZyTLHY73BuxMs-BVjzOCl4NC1lPJ7EgZYaI,7617 +sqlalchemy/util/_has_cy.py,sha256=Yat1IcCSR0M3ilxl-aCggJkLJQ2NubwkI964yZbZkHo,1052 +sqlalchemy/util/_py_collections.py,sha256=QCoGBscGWd9SdlZ-WgWuk8n4LQOOdU4NvAKxsN2zF40,16083 +sqlalchemy/util/compat.py,sha256=_fokD4qvK4Lb-FZIn7-Wn_dxvH4J4PJQDFgd3QnbJwM,8372 +sqlalchemy/util/concurrency.py,sha256=ZxcQYOKy-GBsQkPmCrBO5MzMpqW3JZme2Hiyqpbt9uc,2284 +sqlalchemy/util/deprecations.py,sha256=ZKn9jIuDJcVxqxFJsfpyhJa5iX30kqR7fZoVaHYU9b0,11901 +sqlalchemy/util/langhelpers.py,sha256=5nJAiq38cfC0hoPiHE3ZLAsIXeruImYrcy6Cd3Hh40I,64835 +sqlalchemy/util/preloaded.py,sha256=KKNLJEqChDW1TNUsM_TzKu7JYEA3kkuh2N-quM_2_Y4,5905 +sqlalchemy/util/queue.py,sha256=ITejs6KS4Hz_ojrss2oFeUO9MoIeR3qWmZQ8J7yyrNU,10205 +sqlalchemy/util/tool_support.py,sha256=epm8MzDZpVmhE6LIjrjJrP8BUf12Wab2m28A9lGq95s,5969 +sqlalchemy/util/topological.py,sha256=ipHMYHXniJMsNJTM0Ju7syNSjk46AksXX1iFmWWgTMA,3459 +sqlalchemy/util/typing.py,sha256=HihGX-lHlQSM9CMF5oMGLQaP-SkHKsUUaErW-E3-qMY,15178 diff --git a/mysql-sqlalchemy-workspace/lib/python3.9/site-packages/SQLAlchemy-2.0.6.dist-info/REQUESTED b/mysql-sqlalchemy-workspace/lib/python3.9/site-packages/SQLAlchemy-2.0.6.dist-info/REQUESTED new file mode 100644 index 00000000..e69de29b diff --git a/mysql-sqlalchemy-workspace/lib/python3.9/site-packages/SQLAlchemy-2.0.6.dist-info/WHEEL b/mysql-sqlalchemy-workspace/lib/python3.9/site-packages/SQLAlchemy-2.0.6.dist-info/WHEEL new file mode 100644 index 00000000..2e266ab9 --- /dev/null +++ b/mysql-sqlalchemy-workspace/lib/python3.9/site-packages/SQLAlchemy-2.0.6.dist-info/WHEEL @@ -0,0 +1,5 @@ +Wheel-Version: 1.0 +Generator: bdist_wheel (0.38.4) +Root-Is-Purelib: false +Tag: cp39-cp39-macosx_10_9_x86_64 + diff --git a/mysql-sqlalchemy-workspace/lib/python3.9/site-packages/SQLAlchemy-2.0.6.dist-info/top_level.txt b/mysql-sqlalchemy-workspace/lib/python3.9/site-packages/SQLAlchemy-2.0.6.dist-info/top_level.txt new file mode 100644 index 00000000..39fb2bef --- /dev/null +++ b/mysql-sqlalchemy-workspace/lib/python3.9/site-packages/SQLAlchemy-2.0.6.dist-info/top_level.txt @@ -0,0 +1 @@ +sqlalchemy diff --git a/mysql-sqlalchemy-workspace/lib/python3.9/site-packages/_distutils_hack/__init__.py b/mysql-sqlalchemy-workspace/lib/python3.9/site-packages/_distutils_hack/__init__.py new file mode 100644 index 00000000..5f40996a --- /dev/null +++ b/mysql-sqlalchemy-workspace/lib/python3.9/site-packages/_distutils_hack/__init__.py @@ -0,0 +1,128 @@ +import sys +import os +import re +import importlib +import warnings + + +is_pypy = '__pypy__' in sys.builtin_module_names + + +warnings.filterwarnings('ignore', + r'.+ distutils\b.+ deprecated', + DeprecationWarning) + + +def warn_distutils_present(): + if 'distutils' not in sys.modules: + return + if is_pypy and sys.version_info < (3, 7): + # PyPy for 3.6 unconditionally imports distutils, so bypass the warning + # https://foss.heptapod.net/pypy/pypy/-/blob/be829135bc0d758997b3566062999ee8b23872b4/lib-python/3/site.py#L250 + return + warnings.warn( + "Distutils was imported before Setuptools, but importing Setuptools " + "also replaces the `distutils` module in `sys.modules`. This may lead " + "to undesirable behaviors or errors. To avoid these issues, avoid " + "using distutils directly, ensure that setuptools is installed in the " + "traditional way (e.g. not an editable install), and/or make sure " + "that setuptools is always imported before distutils.") + + +def clear_distutils(): + if 'distutils' not in sys.modules: + return + warnings.warn("Setuptools is replacing distutils.") + mods = [name for name in sys.modules if re.match(r'distutils\b', name)] + for name in mods: + del sys.modules[name] + + +def enabled(): + """ + Allow selection of distutils by environment variable. + """ + which = os.environ.get('SETUPTOOLS_USE_DISTUTILS', 'stdlib') + return which == 'local' + + +def ensure_local_distutils(): + clear_distutils() + distutils = importlib.import_module('setuptools._distutils') + distutils.__name__ = 'distutils' + sys.modules['distutils'] = distutils + + # sanity check that submodules load as expected + core = importlib.import_module('distutils.core') + assert '_distutils' in core.__file__, core.__file__ + + +def do_override(): + """ + Ensure that the local copy of distutils is preferred over stdlib. + + See https://github.com/pypa/setuptools/issues/417#issuecomment-392298401 + for more motivation. + """ + if enabled(): + warn_distutils_present() + ensure_local_distutils() + + +class DistutilsMetaFinder: + def find_spec(self, fullname, path, target=None): + if path is not None: + return + + method_name = 'spec_for_{fullname}'.format(**locals()) + method = getattr(self, method_name, lambda: None) + return method() + + def spec_for_distutils(self): + import importlib.abc + import importlib.util + + class DistutilsLoader(importlib.abc.Loader): + + def create_module(self, spec): + return importlib.import_module('setuptools._distutils') + + def exec_module(self, module): + pass + + return importlib.util.spec_from_loader('distutils', DistutilsLoader()) + + def spec_for_pip(self): + """ + Ensure stdlib distutils when running under pip. + See pypa/pip#8761 for rationale. + """ + if self.pip_imported_during_build(): + return + clear_distutils() + self.spec_for_distutils = lambda: None + + @staticmethod + def pip_imported_during_build(): + """ + Detect if pip is being imported in a build script. Ref #2355. + """ + import traceback + return any( + frame.f_globals['__file__'].endswith('setup.py') + for frame, line in traceback.walk_stack(None) + ) + + +DISTUTILS_FINDER = DistutilsMetaFinder() + + +def add_shim(): + sys.meta_path.insert(0, DISTUTILS_FINDER) + + +def remove_shim(): + try: + sys.meta_path.remove(DISTUTILS_FINDER) + except ValueError: + pass diff --git a/mysql-sqlalchemy-workspace/lib/python3.9/site-packages/_distutils_hack/override.py b/mysql-sqlalchemy-workspace/lib/python3.9/site-packages/_distutils_hack/override.py new file mode 100644 index 00000000..2cc433a4 --- /dev/null +++ b/mysql-sqlalchemy-workspace/lib/python3.9/site-packages/_distutils_hack/override.py @@ -0,0 +1 @@ +__import__('_distutils_hack').do_override() diff --git a/mysql-sqlalchemy-workspace/lib/python3.9/site-packages/_mysql_connector.cpython-39-darwin.so b/mysql-sqlalchemy-workspace/lib/python3.9/site-packages/_mysql_connector.cpython-39-darwin.so new file mode 100755 index 00000000..8dc345d7 Binary files /dev/null and b/mysql-sqlalchemy-workspace/lib/python3.9/site-packages/_mysql_connector.cpython-39-darwin.so differ diff --git a/mysql-sqlalchemy-workspace/lib/python3.9/site-packages/_mysqlxpb.cpython-39-darwin.so b/mysql-sqlalchemy-workspace/lib/python3.9/site-packages/_mysqlxpb.cpython-39-darwin.so new file mode 100755 index 00000000..c4be7a10 Binary files /dev/null and b/mysql-sqlalchemy-workspace/lib/python3.9/site-packages/_mysqlxpb.cpython-39-darwin.so differ diff --git a/mysql-sqlalchemy-workspace/lib/python3.9/site-packages/distutils-precedence.pth b/mysql-sqlalchemy-workspace/lib/python3.9/site-packages/distutils-precedence.pth new file mode 100644 index 00000000..6de4198f --- /dev/null +++ b/mysql-sqlalchemy-workspace/lib/python3.9/site-packages/distutils-precedence.pth @@ -0,0 +1 @@ +import os; var = 'SETUPTOOLS_USE_DISTUTILS'; enabled = os.environ.get(var, 'stdlib') == 'local'; enabled and __import__('_distutils_hack').add_shim(); diff --git a/mysql-sqlalchemy-workspace/lib/python3.9/site-packages/google/protobuf/__init__.py b/mysql-sqlalchemy-workspace/lib/python3.9/site-packages/google/protobuf/__init__.py new file mode 100644 index 00000000..3087605b --- /dev/null +++ b/mysql-sqlalchemy-workspace/lib/python3.9/site-packages/google/protobuf/__init__.py @@ -0,0 +1,33 @@ +# Protocol Buffers - Google's data interchange format +# Copyright 2008 Google Inc. All rights reserved. +# https://developers.google.com/protocol-buffers/ +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are +# met: +# +# * Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above +# copyright notice, this list of conditions and the following disclaimer +# in the documentation and/or other materials provided with the +# distribution. +# * Neither the name of Google Inc. nor the names of its +# contributors may be used to endorse or promote products derived from +# this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +# Copyright 2007 Google Inc. All Rights Reserved. + +__version__ = '3.20.3' diff --git a/mysql-sqlalchemy-workspace/lib/python3.9/site-packages/google/protobuf/any_pb2.py b/mysql-sqlalchemy-workspace/lib/python3.9/site-packages/google/protobuf/any_pb2.py new file mode 100644 index 00000000..9121193d --- /dev/null +++ b/mysql-sqlalchemy-workspace/lib/python3.9/site-packages/google/protobuf/any_pb2.py @@ -0,0 +1,26 @@ +# -*- coding: utf-8 -*- +# Generated by the protocol buffer compiler. DO NOT EDIT! +# source: google/protobuf/any.proto +"""Generated protocol buffer code.""" +from google.protobuf.internal import builder as _builder +from google.protobuf import descriptor as _descriptor +from google.protobuf import descriptor_pool as _descriptor_pool +from google.protobuf import symbol_database as _symbol_database +# @@protoc_insertion_point(imports) + +_sym_db = _symbol_database.Default() + + + + +DESCRIPTOR = _descriptor_pool.Default().AddSerializedFile(b'\n\x19google/protobuf/any.proto\x12\x0fgoogle.protobuf\"&\n\x03\x41ny\x12\x10\n\x08type_url\x18\x01 \x01(\t\x12\r\n\x05value\x18\x02 \x01(\x0c\x42v\n\x13\x63om.google.protobufB\x08\x41nyProtoP\x01Z,google.golang.org/protobuf/types/known/anypb\xa2\x02\x03GPB\xaa\x02\x1eGoogle.Protobuf.WellKnownTypesb\x06proto3') + +_builder.BuildMessageAndEnumDescriptors(DESCRIPTOR, globals()) +_builder.BuildTopDescriptorsAndMessages(DESCRIPTOR, 'google.protobuf.any_pb2', globals()) +if _descriptor._USE_C_DESCRIPTORS == False: + + DESCRIPTOR._options = None + DESCRIPTOR._serialized_options = b'\n\023com.google.protobufB\010AnyProtoP\001Z,google.golang.org/protobuf/types/known/anypb\242\002\003GPB\252\002\036Google.Protobuf.WellKnownTypes' + _ANY._serialized_start=46 + _ANY._serialized_end=84 +# @@protoc_insertion_point(module_scope) diff --git a/mysql-sqlalchemy-workspace/lib/python3.9/site-packages/google/protobuf/api_pb2.py b/mysql-sqlalchemy-workspace/lib/python3.9/site-packages/google/protobuf/api_pb2.py new file mode 100644 index 00000000..1721b10a --- /dev/null +++ b/mysql-sqlalchemy-workspace/lib/python3.9/site-packages/google/protobuf/api_pb2.py @@ -0,0 +1,32 @@ +# -*- coding: utf-8 -*- +# Generated by the protocol buffer compiler. DO NOT EDIT! +# source: google/protobuf/api.proto +"""Generated protocol buffer code.""" +from google.protobuf.internal import builder as _builder +from google.protobuf import descriptor as _descriptor +from google.protobuf import descriptor_pool as _descriptor_pool +from google.protobuf import symbol_database as _symbol_database +# @@protoc_insertion_point(imports) + +_sym_db = _symbol_database.Default() + + +from google.protobuf import source_context_pb2 as google_dot_protobuf_dot_source__context__pb2 +from google.protobuf import type_pb2 as google_dot_protobuf_dot_type__pb2 + + +DESCRIPTOR = _descriptor_pool.Default().AddSerializedFile(b'\n\x19google/protobuf/api.proto\x12\x0fgoogle.protobuf\x1a$google/protobuf/source_context.proto\x1a\x1agoogle/protobuf/type.proto\"\x81\x02\n\x03\x41pi\x12\x0c\n\x04name\x18\x01 \x01(\t\x12(\n\x07methods\x18\x02 \x03(\x0b\x32\x17.google.protobuf.Method\x12(\n\x07options\x18\x03 \x03(\x0b\x32\x17.google.protobuf.Option\x12\x0f\n\x07version\x18\x04 \x01(\t\x12\x36\n\x0esource_context\x18\x05 \x01(\x0b\x32\x1e.google.protobuf.SourceContext\x12&\n\x06mixins\x18\x06 \x03(\x0b\x32\x16.google.protobuf.Mixin\x12\'\n\x06syntax\x18\x07 \x01(\x0e\x32\x17.google.protobuf.Syntax\"\xd5\x01\n\x06Method\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\x18\n\x10request_type_url\x18\x02 \x01(\t\x12\x19\n\x11request_streaming\x18\x03 \x01(\x08\x12\x19\n\x11response_type_url\x18\x04 \x01(\t\x12\x1a\n\x12response_streaming\x18\x05 \x01(\x08\x12(\n\x07options\x18\x06 \x03(\x0b\x32\x17.google.protobuf.Option\x12\'\n\x06syntax\x18\x07 \x01(\x0e\x32\x17.google.protobuf.Syntax\"#\n\x05Mixin\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\x0c\n\x04root\x18\x02 \x01(\tBv\n\x13\x63om.google.protobufB\x08\x41piProtoP\x01Z,google.golang.org/protobuf/types/known/apipb\xa2\x02\x03GPB\xaa\x02\x1eGoogle.Protobuf.WellKnownTypesb\x06proto3') + +_builder.BuildMessageAndEnumDescriptors(DESCRIPTOR, globals()) +_builder.BuildTopDescriptorsAndMessages(DESCRIPTOR, 'google.protobuf.api_pb2', globals()) +if _descriptor._USE_C_DESCRIPTORS == False: + + DESCRIPTOR._options = None + DESCRIPTOR._serialized_options = b'\n\023com.google.protobufB\010ApiProtoP\001Z,google.golang.org/protobuf/types/known/apipb\242\002\003GPB\252\002\036Google.Protobuf.WellKnownTypes' + _API._serialized_start=113 + _API._serialized_end=370 + _METHOD._serialized_start=373 + _METHOD._serialized_end=586 + _MIXIN._serialized_start=588 + _MIXIN._serialized_end=623 +# @@protoc_insertion_point(module_scope) diff --git a/mysql-sqlalchemy-workspace/lib/python3.9/site-packages/google/protobuf/compiler/__init__.py b/mysql-sqlalchemy-workspace/lib/python3.9/site-packages/google/protobuf/compiler/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/mysql-sqlalchemy-workspace/lib/python3.9/site-packages/google/protobuf/compiler/plugin_pb2.py b/mysql-sqlalchemy-workspace/lib/python3.9/site-packages/google/protobuf/compiler/plugin_pb2.py new file mode 100644 index 00000000..715a8913 --- /dev/null +++ b/mysql-sqlalchemy-workspace/lib/python3.9/site-packages/google/protobuf/compiler/plugin_pb2.py @@ -0,0 +1,35 @@ +# -*- coding: utf-8 -*- +# Generated by the protocol buffer compiler. DO NOT EDIT! +# source: google/protobuf/compiler/plugin.proto +"""Generated protocol buffer code.""" +from google.protobuf.internal import builder as _builder +from google.protobuf import descriptor as _descriptor +from google.protobuf import descriptor_pool as _descriptor_pool +from google.protobuf import symbol_database as _symbol_database +# @@protoc_insertion_point(imports) + +_sym_db = _symbol_database.Default() + + +from google.protobuf import descriptor_pb2 as google_dot_protobuf_dot_descriptor__pb2 + + +DESCRIPTOR = _descriptor_pool.Default().AddSerializedFile(b'\n%google/protobuf/compiler/plugin.proto\x12\x18google.protobuf.compiler\x1a google/protobuf/descriptor.proto\"F\n\x07Version\x12\r\n\x05major\x18\x01 \x01(\x05\x12\r\n\x05minor\x18\x02 \x01(\x05\x12\r\n\x05patch\x18\x03 \x01(\x05\x12\x0e\n\x06suffix\x18\x04 \x01(\t\"\xba\x01\n\x14\x43odeGeneratorRequest\x12\x18\n\x10\x66ile_to_generate\x18\x01 \x03(\t\x12\x11\n\tparameter\x18\x02 \x01(\t\x12\x38\n\nproto_file\x18\x0f \x03(\x0b\x32$.google.protobuf.FileDescriptorProto\x12;\n\x10\x63ompiler_version\x18\x03 \x01(\x0b\x32!.google.protobuf.compiler.Version\"\xc1\x02\n\x15\x43odeGeneratorResponse\x12\r\n\x05\x65rror\x18\x01 \x01(\t\x12\x1a\n\x12supported_features\x18\x02 \x01(\x04\x12\x42\n\x04\x66ile\x18\x0f \x03(\x0b\x32\x34.google.protobuf.compiler.CodeGeneratorResponse.File\x1a\x7f\n\x04\x46ile\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\x17\n\x0finsertion_point\x18\x02 \x01(\t\x12\x0f\n\x07\x63ontent\x18\x0f \x01(\t\x12?\n\x13generated_code_info\x18\x10 \x01(\x0b\x32\".google.protobuf.GeneratedCodeInfo\"8\n\x07\x46\x65\x61ture\x12\x10\n\x0c\x46\x45\x41TURE_NONE\x10\x00\x12\x1b\n\x17\x46\x45\x41TURE_PROTO3_OPTIONAL\x10\x01\x42W\n\x1c\x63om.google.protobuf.compilerB\x0cPluginProtosZ)google.golang.org/protobuf/types/pluginpb') + +_builder.BuildMessageAndEnumDescriptors(DESCRIPTOR, globals()) +_builder.BuildTopDescriptorsAndMessages(DESCRIPTOR, 'google.protobuf.compiler.plugin_pb2', globals()) +if _descriptor._USE_C_DESCRIPTORS == False: + + DESCRIPTOR._options = None + DESCRIPTOR._serialized_options = b'\n\034com.google.protobuf.compilerB\014PluginProtosZ)google.golang.org/protobuf/types/pluginpb' + _VERSION._serialized_start=101 + _VERSION._serialized_end=171 + _CODEGENERATORREQUEST._serialized_start=174 + _CODEGENERATORREQUEST._serialized_end=360 + _CODEGENERATORRESPONSE._serialized_start=363 + _CODEGENERATORRESPONSE._serialized_end=684 + _CODEGENERATORRESPONSE_FILE._serialized_start=499 + _CODEGENERATORRESPONSE_FILE._serialized_end=626 + _CODEGENERATORRESPONSE_FEATURE._serialized_start=628 + _CODEGENERATORRESPONSE_FEATURE._serialized_end=684 +# @@protoc_insertion_point(module_scope) diff --git a/mysql-sqlalchemy-workspace/lib/python3.9/site-packages/google/protobuf/descriptor.py b/mysql-sqlalchemy-workspace/lib/python3.9/site-packages/google/protobuf/descriptor.py new file mode 100644 index 00000000..ad70be9a --- /dev/null +++ b/mysql-sqlalchemy-workspace/lib/python3.9/site-packages/google/protobuf/descriptor.py @@ -0,0 +1,1224 @@ +# Protocol Buffers - Google's data interchange format +# Copyright 2008 Google Inc. All rights reserved. +# https://developers.google.com/protocol-buffers/ +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are +# met: +# +# * Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above +# copyright notice, this list of conditions and the following disclaimer +# in the documentation and/or other materials provided with the +# distribution. +# * Neither the name of Google Inc. nor the names of its +# contributors may be used to endorse or promote products derived from +# this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +"""Descriptors essentially contain exactly the information found in a .proto +file, in types that make this information accessible in Python. +""" + +__author__ = 'robinson@google.com (Will Robinson)' + +import threading +import warnings + +from google.protobuf.internal import api_implementation + +_USE_C_DESCRIPTORS = False +if api_implementation.Type() == 'cpp': + # Used by MakeDescriptor in cpp mode + import binascii + import os + from google.protobuf.pyext import _message + _USE_C_DESCRIPTORS = True + + +class Error(Exception): + """Base error for this module.""" + + +class TypeTransformationError(Error): + """Error transforming between python proto type and corresponding C++ type.""" + + +if _USE_C_DESCRIPTORS: + # This metaclass allows to override the behavior of code like + # isinstance(my_descriptor, FieldDescriptor) + # and make it return True when the descriptor is an instance of the extension + # type written in C++. + class DescriptorMetaclass(type): + def __instancecheck__(cls, obj): + if super(DescriptorMetaclass, cls).__instancecheck__(obj): + return True + if isinstance(obj, cls._C_DESCRIPTOR_CLASS): + return True + return False +else: + # The standard metaclass; nothing changes. + DescriptorMetaclass = type + + +class _Lock(object): + """Wrapper class of threading.Lock(), which is allowed by 'with'.""" + + def __new__(cls): + self = object.__new__(cls) + self._lock = threading.Lock() # pylint: disable=protected-access + return self + + def __enter__(self): + self._lock.acquire() + + def __exit__(self, exc_type, exc_value, exc_tb): + self._lock.release() + + +_lock = threading.Lock() + + +def _Deprecated(name): + if _Deprecated.count > 0: + _Deprecated.count -= 1 + warnings.warn( + 'Call to deprecated create function %s(). Note: Create unlinked ' + 'descriptors is going to go away. Please use get/find descriptors from ' + 'generated code or query the descriptor_pool.' + % name, + category=DeprecationWarning, stacklevel=3) + + +# Deprecated warnings will print 100 times at most which should be enough for +# users to notice and do not cause timeout. +_Deprecated.count = 100 + + +_internal_create_key = object() + + +class DescriptorBase(metaclass=DescriptorMetaclass): + + """Descriptors base class. + + This class is the base of all descriptor classes. It provides common options + related functionality. + + Attributes: + has_options: True if the descriptor has non-default options. Usually it + is not necessary to read this -- just call GetOptions() which will + happily return the default instance. However, it's sometimes useful + for efficiency, and also useful inside the protobuf implementation to + avoid some bootstrapping issues. + """ + + if _USE_C_DESCRIPTORS: + # The class, or tuple of classes, that are considered as "virtual + # subclasses" of this descriptor class. + _C_DESCRIPTOR_CLASS = () + + def __init__(self, options, serialized_options, options_class_name): + """Initialize the descriptor given its options message and the name of the + class of the options message. The name of the class is required in case + the options message is None and has to be created. + """ + self._options = options + self._options_class_name = options_class_name + self._serialized_options = serialized_options + + # Does this descriptor have non-default options? + self.has_options = (options is not None) or (serialized_options is not None) + + def _SetOptions(self, options, options_class_name): + """Sets the descriptor's options + + This function is used in generated proto2 files to update descriptor + options. It must not be used outside proto2. + """ + self._options = options + self._options_class_name = options_class_name + + # Does this descriptor have non-default options? + self.has_options = options is not None + + def GetOptions(self): + """Retrieves descriptor options. + + This method returns the options set or creates the default options for the + descriptor. + """ + if self._options: + return self._options + + from google.protobuf import descriptor_pb2 + try: + options_class = getattr(descriptor_pb2, + self._options_class_name) + except AttributeError: + raise RuntimeError('Unknown options class name %s!' % + (self._options_class_name)) + + with _lock: + if self._serialized_options is None: + self._options = options_class() + else: + self._options = _ParseOptions(options_class(), + self._serialized_options) + + return self._options + + +class _NestedDescriptorBase(DescriptorBase): + """Common class for descriptors that can be nested.""" + + def __init__(self, options, options_class_name, name, full_name, + file, containing_type, serialized_start=None, + serialized_end=None, serialized_options=None): + """Constructor. + + Args: + options: Protocol message options or None + to use default message options. + options_class_name (str): The class name of the above options. + name (str): Name of this protocol message type. + full_name (str): Fully-qualified name of this protocol message type, + which will include protocol "package" name and the name of any + enclosing types. + file (FileDescriptor): Reference to file info. + containing_type: if provided, this is a nested descriptor, with this + descriptor as parent, otherwise None. + serialized_start: The start index (inclusive) in block in the + file.serialized_pb that describes this descriptor. + serialized_end: The end index (exclusive) in block in the + file.serialized_pb that describes this descriptor. + serialized_options: Protocol message serialized options or None. + """ + super(_NestedDescriptorBase, self).__init__( + options, serialized_options, options_class_name) + + self.name = name + # TODO(falk): Add function to calculate full_name instead of having it in + # memory? + self.full_name = full_name + self.file = file + self.containing_type = containing_type + + self._serialized_start = serialized_start + self._serialized_end = serialized_end + + def CopyToProto(self, proto): + """Copies this to the matching proto in descriptor_pb2. + + Args: + proto: An empty proto instance from descriptor_pb2. + + Raises: + Error: If self couldn't be serialized, due to to few constructor + arguments. + """ + if (self.file is not None and + self._serialized_start is not None and + self._serialized_end is not None): + proto.ParseFromString(self.file.serialized_pb[ + self._serialized_start:self._serialized_end]) + else: + raise Error('Descriptor does not contain serialization.') + + +class Descriptor(_NestedDescriptorBase): + + """Descriptor for a protocol message type. + + Attributes: + name (str): Name of this protocol message type. + full_name (str): Fully-qualified name of this protocol message type, + which will include protocol "package" name and the name of any + enclosing types. + containing_type (Descriptor): Reference to the descriptor of the type + containing us, or None if this is top-level. + fields (list[FieldDescriptor]): Field descriptors for all fields in + this type. + fields_by_number (dict(int, FieldDescriptor)): Same + :class:`FieldDescriptor` objects as in :attr:`fields`, but indexed + by "number" attribute in each FieldDescriptor. + fields_by_name (dict(str, FieldDescriptor)): Same + :class:`FieldDescriptor` objects as in :attr:`fields`, but indexed by + "name" attribute in each :class:`FieldDescriptor`. + nested_types (list[Descriptor]): Descriptor references + for all protocol message types nested within this one. + nested_types_by_name (dict(str, Descriptor)): Same Descriptor + objects as in :attr:`nested_types`, but indexed by "name" attribute + in each Descriptor. + enum_types (list[EnumDescriptor]): :class:`EnumDescriptor` references + for all enums contained within this type. + enum_types_by_name (dict(str, EnumDescriptor)): Same + :class:`EnumDescriptor` objects as in :attr:`enum_types`, but + indexed by "name" attribute in each EnumDescriptor. + enum_values_by_name (dict(str, EnumValueDescriptor)): Dict mapping + from enum value name to :class:`EnumValueDescriptor` for that value. + extensions (list[FieldDescriptor]): All extensions defined directly + within this message type (NOT within a nested type). + extensions_by_name (dict(str, FieldDescriptor)): Same FieldDescriptor + objects as :attr:`extensions`, but indexed by "name" attribute of each + FieldDescriptor. + is_extendable (bool): Does this type define any extension ranges? + oneofs (list[OneofDescriptor]): The list of descriptors for oneof fields + in this message. + oneofs_by_name (dict(str, OneofDescriptor)): Same objects as in + :attr:`oneofs`, but indexed by "name" attribute. + file (FileDescriptor): Reference to file descriptor. + + """ + + if _USE_C_DESCRIPTORS: + _C_DESCRIPTOR_CLASS = _message.Descriptor + + def __new__( + cls, + name=None, + full_name=None, + filename=None, + containing_type=None, + fields=None, + nested_types=None, + enum_types=None, + extensions=None, + options=None, + serialized_options=None, + is_extendable=True, + extension_ranges=None, + oneofs=None, + file=None, # pylint: disable=redefined-builtin + serialized_start=None, + serialized_end=None, + syntax=None, + create_key=None): + _message.Message._CheckCalledFromGeneratedFile() + return _message.default_pool.FindMessageTypeByName(full_name) + + # NOTE(tmarek): The file argument redefining a builtin is nothing we can + # fix right now since we don't know how many clients already rely on the + # name of the argument. + def __init__(self, name, full_name, filename, containing_type, fields, + nested_types, enum_types, extensions, options=None, + serialized_options=None, + is_extendable=True, extension_ranges=None, oneofs=None, + file=None, serialized_start=None, serialized_end=None, # pylint: disable=redefined-builtin + syntax=None, create_key=None): + """Arguments to __init__() are as described in the description + of Descriptor fields above. + + Note that filename is an obsolete argument, that is not used anymore. + Please use file.name to access this as an attribute. + """ + if create_key is not _internal_create_key: + _Deprecated('Descriptor') + + super(Descriptor, self).__init__( + options, 'MessageOptions', name, full_name, file, + containing_type, serialized_start=serialized_start, + serialized_end=serialized_end, serialized_options=serialized_options) + + # We have fields in addition to fields_by_name and fields_by_number, + # so that: + # 1. Clients can index fields by "order in which they're listed." + # 2. Clients can easily iterate over all fields with the terse + # syntax: for f in descriptor.fields: ... + self.fields = fields + for field in self.fields: + field.containing_type = self + self.fields_by_number = dict((f.number, f) for f in fields) + self.fields_by_name = dict((f.name, f) for f in fields) + self._fields_by_camelcase_name = None + + self.nested_types = nested_types + for nested_type in nested_types: + nested_type.containing_type = self + self.nested_types_by_name = dict((t.name, t) for t in nested_types) + + self.enum_types = enum_types + for enum_type in self.enum_types: + enum_type.containing_type = self + self.enum_types_by_name = dict((t.name, t) for t in enum_types) + self.enum_values_by_name = dict( + (v.name, v) for t in enum_types for v in t.values) + + self.extensions = extensions + for extension in self.extensions: + extension.extension_scope = self + self.extensions_by_name = dict((f.name, f) for f in extensions) + self.is_extendable = is_extendable + self.extension_ranges = extension_ranges + self.oneofs = oneofs if oneofs is not None else [] + self.oneofs_by_name = dict((o.name, o) for o in self.oneofs) + for oneof in self.oneofs: + oneof.containing_type = self + self.syntax = syntax or "proto2" + + @property + def fields_by_camelcase_name(self): + """Same FieldDescriptor objects as in :attr:`fields`, but indexed by + :attr:`FieldDescriptor.camelcase_name`. + """ + if self._fields_by_camelcase_name is None: + self._fields_by_camelcase_name = dict( + (f.camelcase_name, f) for f in self.fields) + return self._fields_by_camelcase_name + + def EnumValueName(self, enum, value): + """Returns the string name of an enum value. + + This is just a small helper method to simplify a common operation. + + Args: + enum: string name of the Enum. + value: int, value of the enum. + + Returns: + string name of the enum value. + + Raises: + KeyError if either the Enum doesn't exist or the value is not a valid + value for the enum. + """ + return self.enum_types_by_name[enum].values_by_number[value].name + + def CopyToProto(self, proto): + """Copies this to a descriptor_pb2.DescriptorProto. + + Args: + proto: An empty descriptor_pb2.DescriptorProto. + """ + # This function is overridden to give a better doc comment. + super(Descriptor, self).CopyToProto(proto) + + +# TODO(robinson): We should have aggressive checking here, +# for example: +# * If you specify a repeated field, you should not be allowed +# to specify a default value. +# * [Other examples here as needed]. +# +# TODO(robinson): for this and other *Descriptor classes, we +# might also want to lock things down aggressively (e.g., +# prevent clients from setting the attributes). Having +# stronger invariants here in general will reduce the number +# of runtime checks we must do in reflection.py... +class FieldDescriptor(DescriptorBase): + + """Descriptor for a single field in a .proto file. + + Attributes: + name (str): Name of this field, exactly as it appears in .proto. + full_name (str): Name of this field, including containing scope. This is + particularly relevant for extensions. + index (int): Dense, 0-indexed index giving the order that this + field textually appears within its message in the .proto file. + number (int): Tag number declared for this field in the .proto file. + + type (int): (One of the TYPE_* constants below) Declared type. + cpp_type (int): (One of the CPPTYPE_* constants below) C++ type used to + represent this field. + + label (int): (One of the LABEL_* constants below) Tells whether this + field is optional, required, or repeated. + has_default_value (bool): True if this field has a default value defined, + otherwise false. + default_value (Varies): Default value of this field. Only + meaningful for non-repeated scalar fields. Repeated fields + should always set this to [], and non-repeated composite + fields should always set this to None. + + containing_type (Descriptor): Descriptor of the protocol message + type that contains this field. Set by the Descriptor constructor + if we're passed into one. + Somewhat confusingly, for extension fields, this is the + descriptor of the EXTENDED message, not the descriptor + of the message containing this field. (See is_extension and + extension_scope below). + message_type (Descriptor): If a composite field, a descriptor + of the message type contained in this field. Otherwise, this is None. + enum_type (EnumDescriptor): If this field contains an enum, a + descriptor of that enum. Otherwise, this is None. + + is_extension: True iff this describes an extension field. + extension_scope (Descriptor): Only meaningful if is_extension is True. + Gives the message that immediately contains this extension field. + Will be None iff we're a top-level (file-level) extension field. + + options (descriptor_pb2.FieldOptions): Protocol message field options or + None to use default field options. + + containing_oneof (OneofDescriptor): If the field is a member of a oneof + union, contains its descriptor. Otherwise, None. + + file (FileDescriptor): Reference to file descriptor. + """ + + # Must be consistent with C++ FieldDescriptor::Type enum in + # descriptor.h. + # + # TODO(robinson): Find a way to eliminate this repetition. + TYPE_DOUBLE = 1 + TYPE_FLOAT = 2 + TYPE_INT64 = 3 + TYPE_UINT64 = 4 + TYPE_INT32 = 5 + TYPE_FIXED64 = 6 + TYPE_FIXED32 = 7 + TYPE_BOOL = 8 + TYPE_STRING = 9 + TYPE_GROUP = 10 + TYPE_MESSAGE = 11 + TYPE_BYTES = 12 + TYPE_UINT32 = 13 + TYPE_ENUM = 14 + TYPE_SFIXED32 = 15 + TYPE_SFIXED64 = 16 + TYPE_SINT32 = 17 + TYPE_SINT64 = 18 + MAX_TYPE = 18 + + # Must be consistent with C++ FieldDescriptor::CppType enum in + # descriptor.h. + # + # TODO(robinson): Find a way to eliminate this repetition. + CPPTYPE_INT32 = 1 + CPPTYPE_INT64 = 2 + CPPTYPE_UINT32 = 3 + CPPTYPE_UINT64 = 4 + CPPTYPE_DOUBLE = 5 + CPPTYPE_FLOAT = 6 + CPPTYPE_BOOL = 7 + CPPTYPE_ENUM = 8 + CPPTYPE_STRING = 9 + CPPTYPE_MESSAGE = 10 + MAX_CPPTYPE = 10 + + _PYTHON_TO_CPP_PROTO_TYPE_MAP = { + TYPE_DOUBLE: CPPTYPE_DOUBLE, + TYPE_FLOAT: CPPTYPE_FLOAT, + TYPE_ENUM: CPPTYPE_ENUM, + TYPE_INT64: CPPTYPE_INT64, + TYPE_SINT64: CPPTYPE_INT64, + TYPE_SFIXED64: CPPTYPE_INT64, + TYPE_UINT64: CPPTYPE_UINT64, + TYPE_FIXED64: CPPTYPE_UINT64, + TYPE_INT32: CPPTYPE_INT32, + TYPE_SFIXED32: CPPTYPE_INT32, + TYPE_SINT32: CPPTYPE_INT32, + TYPE_UINT32: CPPTYPE_UINT32, + TYPE_FIXED32: CPPTYPE_UINT32, + TYPE_BYTES: CPPTYPE_STRING, + TYPE_STRING: CPPTYPE_STRING, + TYPE_BOOL: CPPTYPE_BOOL, + TYPE_MESSAGE: CPPTYPE_MESSAGE, + TYPE_GROUP: CPPTYPE_MESSAGE + } + + # Must be consistent with C++ FieldDescriptor::Label enum in + # descriptor.h. + # + # TODO(robinson): Find a way to eliminate this repetition. + LABEL_OPTIONAL = 1 + LABEL_REQUIRED = 2 + LABEL_REPEATED = 3 + MAX_LABEL = 3 + + # Must be consistent with C++ constants kMaxNumber, kFirstReservedNumber, + # and kLastReservedNumber in descriptor.h + MAX_FIELD_NUMBER = (1 << 29) - 1 + FIRST_RESERVED_FIELD_NUMBER = 19000 + LAST_RESERVED_FIELD_NUMBER = 19999 + + if _USE_C_DESCRIPTORS: + _C_DESCRIPTOR_CLASS = _message.FieldDescriptor + + def __new__(cls, name, full_name, index, number, type, cpp_type, label, + default_value, message_type, enum_type, containing_type, + is_extension, extension_scope, options=None, + serialized_options=None, + has_default_value=True, containing_oneof=None, json_name=None, + file=None, create_key=None): # pylint: disable=redefined-builtin + _message.Message._CheckCalledFromGeneratedFile() + if is_extension: + return _message.default_pool.FindExtensionByName(full_name) + else: + return _message.default_pool.FindFieldByName(full_name) + + def __init__(self, name, full_name, index, number, type, cpp_type, label, + default_value, message_type, enum_type, containing_type, + is_extension, extension_scope, options=None, + serialized_options=None, + has_default_value=True, containing_oneof=None, json_name=None, + file=None, create_key=None): # pylint: disable=redefined-builtin + """The arguments are as described in the description of FieldDescriptor + attributes above. + + Note that containing_type may be None, and may be set later if necessary + (to deal with circular references between message types, for example). + Likewise for extension_scope. + """ + if create_key is not _internal_create_key: + _Deprecated('FieldDescriptor') + + super(FieldDescriptor, self).__init__( + options, serialized_options, 'FieldOptions') + self.name = name + self.full_name = full_name + self.file = file + self._camelcase_name = None + if json_name is None: + self.json_name = _ToJsonName(name) + else: + self.json_name = json_name + self.index = index + self.number = number + self.type = type + self.cpp_type = cpp_type + self.label = label + self.has_default_value = has_default_value + self.default_value = default_value + self.containing_type = containing_type + self.message_type = message_type + self.enum_type = enum_type + self.is_extension = is_extension + self.extension_scope = extension_scope + self.containing_oneof = containing_oneof + if api_implementation.Type() == 'cpp': + if is_extension: + self._cdescriptor = _message.default_pool.FindExtensionByName(full_name) + else: + self._cdescriptor = _message.default_pool.FindFieldByName(full_name) + else: + self._cdescriptor = None + + @property + def camelcase_name(self): + """Camelcase name of this field. + + Returns: + str: the name in CamelCase. + """ + if self._camelcase_name is None: + self._camelcase_name = _ToCamelCase(self.name) + return self._camelcase_name + + @property + def has_presence(self): + """Whether the field distinguishes between unpopulated and default values. + + Raises: + RuntimeError: singular field that is not linked with message nor file. + """ + if self.label == FieldDescriptor.LABEL_REPEATED: + return False + if (self.cpp_type == FieldDescriptor.CPPTYPE_MESSAGE or + self.containing_oneof): + return True + if hasattr(self.file, 'syntax'): + return self.file.syntax == 'proto2' + if hasattr(self.message_type, 'syntax'): + return self.message_type.syntax == 'proto2' + raise RuntimeError( + 'has_presence is not ready to use because field %s is not' + ' linked with message type nor file' % self.full_name) + + @staticmethod + def ProtoTypeToCppProtoType(proto_type): + """Converts from a Python proto type to a C++ Proto Type. + + The Python ProtocolBuffer classes specify both the 'Python' datatype and the + 'C++' datatype - and they're not the same. This helper method should + translate from one to another. + + Args: + proto_type: the Python proto type (descriptor.FieldDescriptor.TYPE_*) + Returns: + int: descriptor.FieldDescriptor.CPPTYPE_*, the C++ type. + Raises: + TypeTransformationError: when the Python proto type isn't known. + """ + try: + return FieldDescriptor._PYTHON_TO_CPP_PROTO_TYPE_MAP[proto_type] + except KeyError: + raise TypeTransformationError('Unknown proto_type: %s' % proto_type) + + +class EnumDescriptor(_NestedDescriptorBase): + + """Descriptor for an enum defined in a .proto file. + + Attributes: + name (str): Name of the enum type. + full_name (str): Full name of the type, including package name + and any enclosing type(s). + + values (list[EnumValueDescriptor]): List of the values + in this enum. + values_by_name (dict(str, EnumValueDescriptor)): Same as :attr:`values`, + but indexed by the "name" field of each EnumValueDescriptor. + values_by_number (dict(int, EnumValueDescriptor)): Same as :attr:`values`, + but indexed by the "number" field of each EnumValueDescriptor. + containing_type (Descriptor): Descriptor of the immediate containing + type of this enum, or None if this is an enum defined at the + top level in a .proto file. Set by Descriptor's constructor + if we're passed into one. + file (FileDescriptor): Reference to file descriptor. + options (descriptor_pb2.EnumOptions): Enum options message or + None to use default enum options. + """ + + if _USE_C_DESCRIPTORS: + _C_DESCRIPTOR_CLASS = _message.EnumDescriptor + + def __new__(cls, name, full_name, filename, values, + containing_type=None, options=None, + serialized_options=None, file=None, # pylint: disable=redefined-builtin + serialized_start=None, serialized_end=None, create_key=None): + _message.Message._CheckCalledFromGeneratedFile() + return _message.default_pool.FindEnumTypeByName(full_name) + + def __init__(self, name, full_name, filename, values, + containing_type=None, options=None, + serialized_options=None, file=None, # pylint: disable=redefined-builtin + serialized_start=None, serialized_end=None, create_key=None): + """Arguments are as described in the attribute description above. + + Note that filename is an obsolete argument, that is not used anymore. + Please use file.name to access this as an attribute. + """ + if create_key is not _internal_create_key: + _Deprecated('EnumDescriptor') + + super(EnumDescriptor, self).__init__( + options, 'EnumOptions', name, full_name, file, + containing_type, serialized_start=serialized_start, + serialized_end=serialized_end, serialized_options=serialized_options) + + self.values = values + for value in self.values: + value.type = self + self.values_by_name = dict((v.name, v) for v in values) + # Values are reversed to ensure that the first alias is retained. + self.values_by_number = dict((v.number, v) for v in reversed(values)) + + def CopyToProto(self, proto): + """Copies this to a descriptor_pb2.EnumDescriptorProto. + + Args: + proto (descriptor_pb2.EnumDescriptorProto): An empty descriptor proto. + """ + # This function is overridden to give a better doc comment. + super(EnumDescriptor, self).CopyToProto(proto) + + +class EnumValueDescriptor(DescriptorBase): + + """Descriptor for a single value within an enum. + + Attributes: + name (str): Name of this value. + index (int): Dense, 0-indexed index giving the order that this + value appears textually within its enum in the .proto file. + number (int): Actual number assigned to this enum value. + type (EnumDescriptor): :class:`EnumDescriptor` to which this value + belongs. Set by :class:`EnumDescriptor`'s constructor if we're + passed into one. + options (descriptor_pb2.EnumValueOptions): Enum value options message or + None to use default enum value options options. + """ + + if _USE_C_DESCRIPTORS: + _C_DESCRIPTOR_CLASS = _message.EnumValueDescriptor + + def __new__(cls, name, index, number, + type=None, # pylint: disable=redefined-builtin + options=None, serialized_options=None, create_key=None): + _message.Message._CheckCalledFromGeneratedFile() + # There is no way we can build a complete EnumValueDescriptor with the + # given parameters (the name of the Enum is not known, for example). + # Fortunately generated files just pass it to the EnumDescriptor() + # constructor, which will ignore it, so returning None is good enough. + return None + + def __init__(self, name, index, number, + type=None, # pylint: disable=redefined-builtin + options=None, serialized_options=None, create_key=None): + """Arguments are as described in the attribute description above.""" + if create_key is not _internal_create_key: + _Deprecated('EnumValueDescriptor') + + super(EnumValueDescriptor, self).__init__( + options, serialized_options, 'EnumValueOptions') + self.name = name + self.index = index + self.number = number + self.type = type + + +class OneofDescriptor(DescriptorBase): + """Descriptor for a oneof field. + + Attributes: + name (str): Name of the oneof field. + full_name (str): Full name of the oneof field, including package name. + index (int): 0-based index giving the order of the oneof field inside + its containing type. + containing_type (Descriptor): :class:`Descriptor` of the protocol message + type that contains this field. Set by the :class:`Descriptor` constructor + if we're passed into one. + fields (list[FieldDescriptor]): The list of field descriptors this + oneof can contain. + """ + + if _USE_C_DESCRIPTORS: + _C_DESCRIPTOR_CLASS = _message.OneofDescriptor + + def __new__( + cls, name, full_name, index, containing_type, fields, options=None, + serialized_options=None, create_key=None): + _message.Message._CheckCalledFromGeneratedFile() + return _message.default_pool.FindOneofByName(full_name) + + def __init__( + self, name, full_name, index, containing_type, fields, options=None, + serialized_options=None, create_key=None): + """Arguments are as described in the attribute description above.""" + if create_key is not _internal_create_key: + _Deprecated('OneofDescriptor') + + super(OneofDescriptor, self).__init__( + options, serialized_options, 'OneofOptions') + self.name = name + self.full_name = full_name + self.index = index + self.containing_type = containing_type + self.fields = fields + + +class ServiceDescriptor(_NestedDescriptorBase): + + """Descriptor for a service. + + Attributes: + name (str): Name of the service. + full_name (str): Full name of the service, including package name. + index (int): 0-indexed index giving the order that this services + definition appears within the .proto file. + methods (list[MethodDescriptor]): List of methods provided by this + service. + methods_by_name (dict(str, MethodDescriptor)): Same + :class:`MethodDescriptor` objects as in :attr:`methods_by_name`, but + indexed by "name" attribute in each :class:`MethodDescriptor`. + options (descriptor_pb2.ServiceOptions): Service options message or + None to use default service options. + file (FileDescriptor): Reference to file info. + """ + + if _USE_C_DESCRIPTORS: + _C_DESCRIPTOR_CLASS = _message.ServiceDescriptor + + def __new__( + cls, + name=None, + full_name=None, + index=None, + methods=None, + options=None, + serialized_options=None, + file=None, # pylint: disable=redefined-builtin + serialized_start=None, + serialized_end=None, + create_key=None): + _message.Message._CheckCalledFromGeneratedFile() # pylint: disable=protected-access + return _message.default_pool.FindServiceByName(full_name) + + def __init__(self, name, full_name, index, methods, options=None, + serialized_options=None, file=None, # pylint: disable=redefined-builtin + serialized_start=None, serialized_end=None, create_key=None): + if create_key is not _internal_create_key: + _Deprecated('ServiceDescriptor') + + super(ServiceDescriptor, self).__init__( + options, 'ServiceOptions', name, full_name, file, + None, serialized_start=serialized_start, + serialized_end=serialized_end, serialized_options=serialized_options) + self.index = index + self.methods = methods + self.methods_by_name = dict((m.name, m) for m in methods) + # Set the containing service for each method in this service. + for method in self.methods: + method.containing_service = self + + def FindMethodByName(self, name): + """Searches for the specified method, and returns its descriptor. + + Args: + name (str): Name of the method. + Returns: + MethodDescriptor or None: the descriptor for the requested method, if + found. + """ + return self.methods_by_name.get(name, None) + + def CopyToProto(self, proto): + """Copies this to a descriptor_pb2.ServiceDescriptorProto. + + Args: + proto (descriptor_pb2.ServiceDescriptorProto): An empty descriptor proto. + """ + # This function is overridden to give a better doc comment. + super(ServiceDescriptor, self).CopyToProto(proto) + + +class MethodDescriptor(DescriptorBase): + + """Descriptor for a method in a service. + + Attributes: + name (str): Name of the method within the service. + full_name (str): Full name of method. + index (int): 0-indexed index of the method inside the service. + containing_service (ServiceDescriptor): The service that contains this + method. + input_type (Descriptor): The descriptor of the message that this method + accepts. + output_type (Descriptor): The descriptor of the message that this method + returns. + client_streaming (bool): Whether this method uses client streaming. + server_streaming (bool): Whether this method uses server streaming. + options (descriptor_pb2.MethodOptions or None): Method options message, or + None to use default method options. + """ + + if _USE_C_DESCRIPTORS: + _C_DESCRIPTOR_CLASS = _message.MethodDescriptor + + def __new__(cls, + name, + full_name, + index, + containing_service, + input_type, + output_type, + client_streaming=False, + server_streaming=False, + options=None, + serialized_options=None, + create_key=None): + _message.Message._CheckCalledFromGeneratedFile() # pylint: disable=protected-access + return _message.default_pool.FindMethodByName(full_name) + + def __init__(self, + name, + full_name, + index, + containing_service, + input_type, + output_type, + client_streaming=False, + server_streaming=False, + options=None, + serialized_options=None, + create_key=None): + """The arguments are as described in the description of MethodDescriptor + attributes above. + + Note that containing_service may be None, and may be set later if necessary. + """ + if create_key is not _internal_create_key: + _Deprecated('MethodDescriptor') + + super(MethodDescriptor, self).__init__( + options, serialized_options, 'MethodOptions') + self.name = name + self.full_name = full_name + self.index = index + self.containing_service = containing_service + self.input_type = input_type + self.output_type = output_type + self.client_streaming = client_streaming + self.server_streaming = server_streaming + + def CopyToProto(self, proto): + """Copies this to a descriptor_pb2.MethodDescriptorProto. + + Args: + proto (descriptor_pb2.MethodDescriptorProto): An empty descriptor proto. + + Raises: + Error: If self couldn't be serialized, due to too few constructor + arguments. + """ + if self.containing_service is not None: + from google.protobuf import descriptor_pb2 + service_proto = descriptor_pb2.ServiceDescriptorProto() + self.containing_service.CopyToProto(service_proto) + proto.CopyFrom(service_proto.method[self.index]) + else: + raise Error('Descriptor does not contain a service.') + + +class FileDescriptor(DescriptorBase): + """Descriptor for a file. Mimics the descriptor_pb2.FileDescriptorProto. + + Note that :attr:`enum_types_by_name`, :attr:`extensions_by_name`, and + :attr:`dependencies` fields are only set by the + :py:mod:`google.protobuf.message_factory` module, and not by the generated + proto code. + + Attributes: + name (str): Name of file, relative to root of source tree. + package (str): Name of the package + syntax (str): string indicating syntax of the file (can be "proto2" or + "proto3") + serialized_pb (bytes): Byte string of serialized + :class:`descriptor_pb2.FileDescriptorProto`. + dependencies (list[FileDescriptor]): List of other :class:`FileDescriptor` + objects this :class:`FileDescriptor` depends on. + public_dependencies (list[FileDescriptor]): A subset of + :attr:`dependencies`, which were declared as "public". + message_types_by_name (dict(str, Descriptor)): Mapping from message names + to their :class:`Descriptor`. + enum_types_by_name (dict(str, EnumDescriptor)): Mapping from enum names to + their :class:`EnumDescriptor`. + extensions_by_name (dict(str, FieldDescriptor)): Mapping from extension + names declared at file scope to their :class:`FieldDescriptor`. + services_by_name (dict(str, ServiceDescriptor)): Mapping from services' + names to their :class:`ServiceDescriptor`. + pool (DescriptorPool): The pool this descriptor belongs to. When not + passed to the constructor, the global default pool is used. + """ + + if _USE_C_DESCRIPTORS: + _C_DESCRIPTOR_CLASS = _message.FileDescriptor + + def __new__(cls, name, package, options=None, + serialized_options=None, serialized_pb=None, + dependencies=None, public_dependencies=None, + syntax=None, pool=None, create_key=None): + # FileDescriptor() is called from various places, not only from generated + # files, to register dynamic proto files and messages. + # pylint: disable=g-explicit-bool-comparison + if serialized_pb == b'': + # Cpp generated code must be linked in if serialized_pb is '' + try: + return _message.default_pool.FindFileByName(name) + except KeyError: + raise RuntimeError('Please link in cpp generated lib for %s' % (name)) + elif serialized_pb: + return _message.default_pool.AddSerializedFile(serialized_pb) + else: + return super(FileDescriptor, cls).__new__(cls) + + def __init__(self, name, package, options=None, + serialized_options=None, serialized_pb=None, + dependencies=None, public_dependencies=None, + syntax=None, pool=None, create_key=None): + """Constructor.""" + if create_key is not _internal_create_key: + _Deprecated('FileDescriptor') + + super(FileDescriptor, self).__init__( + options, serialized_options, 'FileOptions') + + if pool is None: + from google.protobuf import descriptor_pool + pool = descriptor_pool.Default() + self.pool = pool + self.message_types_by_name = {} + self.name = name + self.package = package + self.syntax = syntax or "proto2" + self.serialized_pb = serialized_pb + + self.enum_types_by_name = {} + self.extensions_by_name = {} + self.services_by_name = {} + self.dependencies = (dependencies or []) + self.public_dependencies = (public_dependencies or []) + + def CopyToProto(self, proto): + """Copies this to a descriptor_pb2.FileDescriptorProto. + + Args: + proto: An empty descriptor_pb2.FileDescriptorProto. + """ + proto.ParseFromString(self.serialized_pb) + + +def _ParseOptions(message, string): + """Parses serialized options. + + This helper function is used to parse serialized options in generated + proto2 files. It must not be used outside proto2. + """ + message.ParseFromString(string) + return message + + +def _ToCamelCase(name): + """Converts name to camel-case and returns it.""" + capitalize_next = False + result = [] + + for c in name: + if c == '_': + if result: + capitalize_next = True + elif capitalize_next: + result.append(c.upper()) + capitalize_next = False + else: + result += c + + # Lower-case the first letter. + if result and result[0].isupper(): + result[0] = result[0].lower() + return ''.join(result) + + +def _OptionsOrNone(descriptor_proto): + """Returns the value of the field `options`, or None if it is not set.""" + if descriptor_proto.HasField('options'): + return descriptor_proto.options + else: + return None + + +def _ToJsonName(name): + """Converts name to Json name and returns it.""" + capitalize_next = False + result = [] + + for c in name: + if c == '_': + capitalize_next = True + elif capitalize_next: + result.append(c.upper()) + capitalize_next = False + else: + result += c + + return ''.join(result) + + +def MakeDescriptor(desc_proto, package='', build_file_if_cpp=True, + syntax=None): + """Make a protobuf Descriptor given a DescriptorProto protobuf. + + Handles nested descriptors. Note that this is limited to the scope of defining + a message inside of another message. Composite fields can currently only be + resolved if the message is defined in the same scope as the field. + + Args: + desc_proto: The descriptor_pb2.DescriptorProto protobuf message. + package: Optional package name for the new message Descriptor (string). + build_file_if_cpp: Update the C++ descriptor pool if api matches. + Set to False on recursion, so no duplicates are created. + syntax: The syntax/semantics that should be used. Set to "proto3" to get + proto3 field presence semantics. + Returns: + A Descriptor for protobuf messages. + """ + if api_implementation.Type() == 'cpp' and build_file_if_cpp: + # The C++ implementation requires all descriptors to be backed by the same + # definition in the C++ descriptor pool. To do this, we build a + # FileDescriptorProto with the same definition as this descriptor and build + # it into the pool. + from google.protobuf import descriptor_pb2 + file_descriptor_proto = descriptor_pb2.FileDescriptorProto() + file_descriptor_proto.message_type.add().MergeFrom(desc_proto) + + # Generate a random name for this proto file to prevent conflicts with any + # imported ones. We need to specify a file name so the descriptor pool + # accepts our FileDescriptorProto, but it is not important what that file + # name is actually set to. + proto_name = binascii.hexlify(os.urandom(16)).decode('ascii') + + if package: + file_descriptor_proto.name = os.path.join(package.replace('.', '/'), + proto_name + '.proto') + file_descriptor_proto.package = package + else: + file_descriptor_proto.name = proto_name + '.proto' + + _message.default_pool.Add(file_descriptor_proto) + result = _message.default_pool.FindFileByName(file_descriptor_proto.name) + + if _USE_C_DESCRIPTORS: + return result.message_types_by_name[desc_proto.name] + + full_message_name = [desc_proto.name] + if package: full_message_name.insert(0, package) + + # Create Descriptors for enum types + enum_types = {} + for enum_proto in desc_proto.enum_type: + full_name = '.'.join(full_message_name + [enum_proto.name]) + enum_desc = EnumDescriptor( + enum_proto.name, full_name, None, [ + EnumValueDescriptor(enum_val.name, ii, enum_val.number, + create_key=_internal_create_key) + for ii, enum_val in enumerate(enum_proto.value)], + create_key=_internal_create_key) + enum_types[full_name] = enum_desc + + # Create Descriptors for nested types + nested_types = {} + for nested_proto in desc_proto.nested_type: + full_name = '.'.join(full_message_name + [nested_proto.name]) + # Nested types are just those defined inside of the message, not all types + # used by fields in the message, so no loops are possible here. + nested_desc = MakeDescriptor(nested_proto, + package='.'.join(full_message_name), + build_file_if_cpp=False, + syntax=syntax) + nested_types[full_name] = nested_desc + + fields = [] + for field_proto in desc_proto.field: + full_name = '.'.join(full_message_name + [field_proto.name]) + enum_desc = None + nested_desc = None + if field_proto.json_name: + json_name = field_proto.json_name + else: + json_name = None + if field_proto.HasField('type_name'): + type_name = field_proto.type_name + full_type_name = '.'.join(full_message_name + + [type_name[type_name.rfind('.')+1:]]) + if full_type_name in nested_types: + nested_desc = nested_types[full_type_name] + elif full_type_name in enum_types: + enum_desc = enum_types[full_type_name] + # Else type_name references a non-local type, which isn't implemented + field = FieldDescriptor( + field_proto.name, full_name, field_proto.number - 1, + field_proto.number, field_proto.type, + FieldDescriptor.ProtoTypeToCppProtoType(field_proto.type), + field_proto.label, None, nested_desc, enum_desc, None, False, None, + options=_OptionsOrNone(field_proto), has_default_value=False, + json_name=json_name, create_key=_internal_create_key) + fields.append(field) + + desc_name = '.'.join(full_message_name) + return Descriptor(desc_proto.name, desc_name, None, None, fields, + list(nested_types.values()), list(enum_types.values()), [], + options=_OptionsOrNone(desc_proto), + create_key=_internal_create_key) diff --git a/mysql-sqlalchemy-workspace/lib/python3.9/site-packages/google/protobuf/descriptor_database.py b/mysql-sqlalchemy-workspace/lib/python3.9/site-packages/google/protobuf/descriptor_database.py new file mode 100644 index 00000000..073eddc7 --- /dev/null +++ b/mysql-sqlalchemy-workspace/lib/python3.9/site-packages/google/protobuf/descriptor_database.py @@ -0,0 +1,177 @@ +# Protocol Buffers - Google's data interchange format +# Copyright 2008 Google Inc. All rights reserved. +# https://developers.google.com/protocol-buffers/ +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are +# met: +# +# * Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above +# copyright notice, this list of conditions and the following disclaimer +# in the documentation and/or other materials provided with the +# distribution. +# * Neither the name of Google Inc. nor the names of its +# contributors may be used to endorse or promote products derived from +# this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +"""Provides a container for DescriptorProtos.""" + +__author__ = 'matthewtoia@google.com (Matt Toia)' + +import warnings + + +class Error(Exception): + pass + + +class DescriptorDatabaseConflictingDefinitionError(Error): + """Raised when a proto is added with the same name & different descriptor.""" + + +class DescriptorDatabase(object): + """A container accepting FileDescriptorProtos and maps DescriptorProtos.""" + + def __init__(self): + self._file_desc_protos_by_file = {} + self._file_desc_protos_by_symbol = {} + + def Add(self, file_desc_proto): + """Adds the FileDescriptorProto and its types to this database. + + Args: + file_desc_proto: The FileDescriptorProto to add. + Raises: + DescriptorDatabaseConflictingDefinitionError: if an attempt is made to + add a proto with the same name but different definition than an + existing proto in the database. + """ + proto_name = file_desc_proto.name + if proto_name not in self._file_desc_protos_by_file: + self._file_desc_protos_by_file[proto_name] = file_desc_proto + elif self._file_desc_protos_by_file[proto_name] != file_desc_proto: + raise DescriptorDatabaseConflictingDefinitionError( + '%s already added, but with different descriptor.' % proto_name) + else: + return + + # Add all the top-level descriptors to the index. + package = file_desc_proto.package + for message in file_desc_proto.message_type: + for name in _ExtractSymbols(message, package): + self._AddSymbol(name, file_desc_proto) + for enum in file_desc_proto.enum_type: + self._AddSymbol(('.'.join((package, enum.name))), file_desc_proto) + for enum_value in enum.value: + self._file_desc_protos_by_symbol[ + '.'.join((package, enum_value.name))] = file_desc_proto + for extension in file_desc_proto.extension: + self._AddSymbol(('.'.join((package, extension.name))), file_desc_proto) + for service in file_desc_proto.service: + self._AddSymbol(('.'.join((package, service.name))), file_desc_proto) + + def FindFileByName(self, name): + """Finds the file descriptor proto by file name. + + Typically the file name is a relative path ending to a .proto file. The + proto with the given name will have to have been added to this database + using the Add method or else an error will be raised. + + Args: + name: The file name to find. + + Returns: + The file descriptor proto matching the name. + + Raises: + KeyError if no file by the given name was added. + """ + + return self._file_desc_protos_by_file[name] + + def FindFileContainingSymbol(self, symbol): + """Finds the file descriptor proto containing the specified symbol. + + The symbol should be a fully qualified name including the file descriptor's + package and any containing messages. Some examples: + + 'some.package.name.Message' + 'some.package.name.Message.NestedEnum' + 'some.package.name.Message.some_field' + + The file descriptor proto containing the specified symbol must be added to + this database using the Add method or else an error will be raised. + + Args: + symbol: The fully qualified symbol name. + + Returns: + The file descriptor proto containing the symbol. + + Raises: + KeyError if no file contains the specified symbol. + """ + try: + return self._file_desc_protos_by_symbol[symbol] + except KeyError: + # Fields, enum values, and nested extensions are not in + # _file_desc_protos_by_symbol. Try to find the top level + # descriptor. Non-existent nested symbol under a valid top level + # descriptor can also be found. The behavior is the same with + # protobuf C++. + top_level, _, _ = symbol.rpartition('.') + try: + return self._file_desc_protos_by_symbol[top_level] + except KeyError: + # Raise the original symbol as a KeyError for better diagnostics. + raise KeyError(symbol) + + def FindFileContainingExtension(self, extendee_name, extension_number): + # TODO(jieluo): implement this API. + return None + + def FindAllExtensionNumbers(self, extendee_name): + # TODO(jieluo): implement this API. + return [] + + def _AddSymbol(self, name, file_desc_proto): + if name in self._file_desc_protos_by_symbol: + warn_msg = ('Conflict register for file "' + file_desc_proto.name + + '": ' + name + + ' is already defined in file "' + + self._file_desc_protos_by_symbol[name].name + '"') + warnings.warn(warn_msg, RuntimeWarning) + self._file_desc_protos_by_symbol[name] = file_desc_proto + + +def _ExtractSymbols(desc_proto, package): + """Pulls out all the symbols from a descriptor proto. + + Args: + desc_proto: The proto to extract symbols from. + package: The package containing the descriptor type. + + Yields: + The fully qualified name found in the descriptor. + """ + message_name = package + '.' + desc_proto.name if package else desc_proto.name + yield message_name + for nested_type in desc_proto.nested_type: + for symbol in _ExtractSymbols(nested_type, message_name): + yield symbol + for enum_type in desc_proto.enum_type: + yield '.'.join((message_name, enum_type.name)) diff --git a/mysql-sqlalchemy-workspace/lib/python3.9/site-packages/google/protobuf/descriptor_pb2.py b/mysql-sqlalchemy-workspace/lib/python3.9/site-packages/google/protobuf/descriptor_pb2.py new file mode 100644 index 00000000..f5703864 --- /dev/null +++ b/mysql-sqlalchemy-workspace/lib/python3.9/site-packages/google/protobuf/descriptor_pb2.py @@ -0,0 +1,1925 @@ +# -*- coding: utf-8 -*- +# Generated by the protocol buffer compiler. DO NOT EDIT! +# source: google/protobuf/descriptor.proto +"""Generated protocol buffer code.""" +from google.protobuf.internal import builder as _builder +from google.protobuf import descriptor as _descriptor +from google.protobuf import descriptor_pool as _descriptor_pool +from google.protobuf import symbol_database as _symbol_database +# @@protoc_insertion_point(imports) + +_sym_db = _symbol_database.Default() + + + + +if _descriptor._USE_C_DESCRIPTORS == False: + DESCRIPTOR = _descriptor.FileDescriptor( + name='google/protobuf/descriptor.proto', + package='google.protobuf', + syntax='proto2', + serialized_options=None, + create_key=_descriptor._internal_create_key, + serialized_pb=b'\n google/protobuf/descriptor.proto\x12\x0fgoogle.protobuf\"G\n\x11\x46ileDescriptorSet\x12\x32\n\x04\x66ile\x18\x01 \x03(\x0b\x32$.google.protobuf.FileDescriptorProto\"\xdb\x03\n\x13\x46ileDescriptorProto\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\x0f\n\x07package\x18\x02 \x01(\t\x12\x12\n\ndependency\x18\x03 \x03(\t\x12\x19\n\x11public_dependency\x18\n \x03(\x05\x12\x17\n\x0fweak_dependency\x18\x0b \x03(\x05\x12\x36\n\x0cmessage_type\x18\x04 \x03(\x0b\x32 .google.protobuf.DescriptorProto\x12\x37\n\tenum_type\x18\x05 \x03(\x0b\x32$.google.protobuf.EnumDescriptorProto\x12\x38\n\x07service\x18\x06 \x03(\x0b\x32\'.google.protobuf.ServiceDescriptorProto\x12\x38\n\textension\x18\x07 \x03(\x0b\x32%.google.protobuf.FieldDescriptorProto\x12-\n\x07options\x18\x08 \x01(\x0b\x32\x1c.google.protobuf.FileOptions\x12\x39\n\x10source_code_info\x18\t \x01(\x0b\x32\x1f.google.protobuf.SourceCodeInfo\x12\x0e\n\x06syntax\x18\x0c \x01(\t\"\xa9\x05\n\x0f\x44\x65scriptorProto\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\x34\n\x05\x66ield\x18\x02 \x03(\x0b\x32%.google.protobuf.FieldDescriptorProto\x12\x38\n\textension\x18\x06 \x03(\x0b\x32%.google.protobuf.FieldDescriptorProto\x12\x35\n\x0bnested_type\x18\x03 \x03(\x0b\x32 .google.protobuf.DescriptorProto\x12\x37\n\tenum_type\x18\x04 \x03(\x0b\x32$.google.protobuf.EnumDescriptorProto\x12H\n\x0f\x65xtension_range\x18\x05 \x03(\x0b\x32/.google.protobuf.DescriptorProto.ExtensionRange\x12\x39\n\noneof_decl\x18\x08 \x03(\x0b\x32%.google.protobuf.OneofDescriptorProto\x12\x30\n\x07options\x18\x07 \x01(\x0b\x32\x1f.google.protobuf.MessageOptions\x12\x46\n\x0ereserved_range\x18\t \x03(\x0b\x32..google.protobuf.DescriptorProto.ReservedRange\x12\x15\n\rreserved_name\x18\n \x03(\t\x1a\x65\n\x0e\x45xtensionRange\x12\r\n\x05start\x18\x01 \x01(\x05\x12\x0b\n\x03\x65nd\x18\x02 \x01(\x05\x12\x37\n\x07options\x18\x03 \x01(\x0b\x32&.google.protobuf.ExtensionRangeOptions\x1a+\n\rReservedRange\x12\r\n\x05start\x18\x01 \x01(\x05\x12\x0b\n\x03\x65nd\x18\x02 \x01(\x05\"g\n\x15\x45xtensionRangeOptions\x12\x43\n\x14uninterpreted_option\x18\xe7\x07 \x03(\x0b\x32$.google.protobuf.UninterpretedOption*\t\x08\xe8\x07\x10\x80\x80\x80\x80\x02\"\xd5\x05\n\x14\x46ieldDescriptorProto\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\x0e\n\x06number\x18\x03 \x01(\x05\x12:\n\x05label\x18\x04 \x01(\x0e\x32+.google.protobuf.FieldDescriptorProto.Label\x12\x38\n\x04type\x18\x05 \x01(\x0e\x32*.google.protobuf.FieldDescriptorProto.Type\x12\x11\n\ttype_name\x18\x06 \x01(\t\x12\x10\n\x08\x65xtendee\x18\x02 \x01(\t\x12\x15\n\rdefault_value\x18\x07 \x01(\t\x12\x13\n\x0boneof_index\x18\t \x01(\x05\x12\x11\n\tjson_name\x18\n \x01(\t\x12.\n\x07options\x18\x08 \x01(\x0b\x32\x1d.google.protobuf.FieldOptions\x12\x17\n\x0fproto3_optional\x18\x11 \x01(\x08\"\xb6\x02\n\x04Type\x12\x0f\n\x0bTYPE_DOUBLE\x10\x01\x12\x0e\n\nTYPE_FLOAT\x10\x02\x12\x0e\n\nTYPE_INT64\x10\x03\x12\x0f\n\x0bTYPE_UINT64\x10\x04\x12\x0e\n\nTYPE_INT32\x10\x05\x12\x10\n\x0cTYPE_FIXED64\x10\x06\x12\x10\n\x0cTYPE_FIXED32\x10\x07\x12\r\n\tTYPE_BOOL\x10\x08\x12\x0f\n\x0bTYPE_STRING\x10\t\x12\x0e\n\nTYPE_GROUP\x10\n\x12\x10\n\x0cTYPE_MESSAGE\x10\x0b\x12\x0e\n\nTYPE_BYTES\x10\x0c\x12\x0f\n\x0bTYPE_UINT32\x10\r\x12\r\n\tTYPE_ENUM\x10\x0e\x12\x11\n\rTYPE_SFIXED32\x10\x0f\x12\x11\n\rTYPE_SFIXED64\x10\x10\x12\x0f\n\x0bTYPE_SINT32\x10\x11\x12\x0f\n\x0bTYPE_SINT64\x10\x12\"C\n\x05Label\x12\x12\n\x0eLABEL_OPTIONAL\x10\x01\x12\x12\n\x0eLABEL_REQUIRED\x10\x02\x12\x12\n\x0eLABEL_REPEATED\x10\x03\"T\n\x14OneofDescriptorProto\x12\x0c\n\x04name\x18\x01 \x01(\t\x12.\n\x07options\x18\x02 \x01(\x0b\x32\x1d.google.protobuf.OneofOptions\"\xa4\x02\n\x13\x45numDescriptorProto\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\x38\n\x05value\x18\x02 \x03(\x0b\x32).google.protobuf.EnumValueDescriptorProto\x12-\n\x07options\x18\x03 \x01(\x0b\x32\x1c.google.protobuf.EnumOptions\x12N\n\x0ereserved_range\x18\x04 \x03(\x0b\x32\x36.google.protobuf.EnumDescriptorProto.EnumReservedRange\x12\x15\n\rreserved_name\x18\x05 \x03(\t\x1a/\n\x11\x45numReservedRange\x12\r\n\x05start\x18\x01 \x01(\x05\x12\x0b\n\x03\x65nd\x18\x02 \x01(\x05\"l\n\x18\x45numValueDescriptorProto\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\x0e\n\x06number\x18\x02 \x01(\x05\x12\x32\n\x07options\x18\x03 \x01(\x0b\x32!.google.protobuf.EnumValueOptions\"\x90\x01\n\x16ServiceDescriptorProto\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\x36\n\x06method\x18\x02 \x03(\x0b\x32&.google.protobuf.MethodDescriptorProto\x12\x30\n\x07options\x18\x03 \x01(\x0b\x32\x1f.google.protobuf.ServiceOptions\"\xc1\x01\n\x15MethodDescriptorProto\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\x12\n\ninput_type\x18\x02 \x01(\t\x12\x13\n\x0boutput_type\x18\x03 \x01(\t\x12/\n\x07options\x18\x04 \x01(\x0b\x32\x1e.google.protobuf.MethodOptions\x12\x1f\n\x10\x63lient_streaming\x18\x05 \x01(\x08:\x05\x66\x61lse\x12\x1f\n\x10server_streaming\x18\x06 \x01(\x08:\x05\x66\x61lse\"\xa5\x06\n\x0b\x46ileOptions\x12\x14\n\x0cjava_package\x18\x01 \x01(\t\x12\x1c\n\x14java_outer_classname\x18\x08 \x01(\t\x12\"\n\x13java_multiple_files\x18\n \x01(\x08:\x05\x66\x61lse\x12)\n\x1djava_generate_equals_and_hash\x18\x14 \x01(\x08\x42\x02\x18\x01\x12%\n\x16java_string_check_utf8\x18\x1b \x01(\x08:\x05\x66\x61lse\x12\x46\n\x0coptimize_for\x18\t \x01(\x0e\x32).google.protobuf.FileOptions.OptimizeMode:\x05SPEED\x12\x12\n\ngo_package\x18\x0b \x01(\t\x12\"\n\x13\x63\x63_generic_services\x18\x10 \x01(\x08:\x05\x66\x61lse\x12$\n\x15java_generic_services\x18\x11 \x01(\x08:\x05\x66\x61lse\x12\"\n\x13py_generic_services\x18\x12 \x01(\x08:\x05\x66\x61lse\x12#\n\x14php_generic_services\x18* \x01(\x08:\x05\x66\x61lse\x12\x19\n\ndeprecated\x18\x17 \x01(\x08:\x05\x66\x61lse\x12\x1e\n\x10\x63\x63_enable_arenas\x18\x1f \x01(\x08:\x04true\x12\x19\n\x11objc_class_prefix\x18$ \x01(\t\x12\x18\n\x10\x63sharp_namespace\x18% \x01(\t\x12\x14\n\x0cswift_prefix\x18\' \x01(\t\x12\x18\n\x10php_class_prefix\x18( \x01(\t\x12\x15\n\rphp_namespace\x18) \x01(\t\x12\x1e\n\x16php_metadata_namespace\x18, \x01(\t\x12\x14\n\x0cruby_package\x18- \x01(\t\x12\x43\n\x14uninterpreted_option\x18\xe7\x07 \x03(\x0b\x32$.google.protobuf.UninterpretedOption\":\n\x0cOptimizeMode\x12\t\n\x05SPEED\x10\x01\x12\r\n\tCODE_SIZE\x10\x02\x12\x10\n\x0cLITE_RUNTIME\x10\x03*\t\x08\xe8\x07\x10\x80\x80\x80\x80\x02J\x04\x08&\x10\'\"\x84\x02\n\x0eMessageOptions\x12&\n\x17message_set_wire_format\x18\x01 \x01(\x08:\x05\x66\x61lse\x12.\n\x1fno_standard_descriptor_accessor\x18\x02 \x01(\x08:\x05\x66\x61lse\x12\x19\n\ndeprecated\x18\x03 \x01(\x08:\x05\x66\x61lse\x12\x11\n\tmap_entry\x18\x07 \x01(\x08\x12\x43\n\x14uninterpreted_option\x18\xe7\x07 \x03(\x0b\x32$.google.protobuf.UninterpretedOption*\t\x08\xe8\x07\x10\x80\x80\x80\x80\x02J\x04\x08\x04\x10\x05J\x04\x08\x05\x10\x06J\x04\x08\x06\x10\x07J\x04\x08\x08\x10\tJ\x04\x08\t\x10\n\"\xbe\x03\n\x0c\x46ieldOptions\x12:\n\x05\x63type\x18\x01 \x01(\x0e\x32#.google.protobuf.FieldOptions.CType:\x06STRING\x12\x0e\n\x06packed\x18\x02 \x01(\x08\x12?\n\x06jstype\x18\x06 \x01(\x0e\x32$.google.protobuf.FieldOptions.JSType:\tJS_NORMAL\x12\x13\n\x04lazy\x18\x05 \x01(\x08:\x05\x66\x61lse\x12\x1e\n\x0funverified_lazy\x18\x0f \x01(\x08:\x05\x66\x61lse\x12\x19\n\ndeprecated\x18\x03 \x01(\x08:\x05\x66\x61lse\x12\x13\n\x04weak\x18\n \x01(\x08:\x05\x66\x61lse\x12\x43\n\x14uninterpreted_option\x18\xe7\x07 \x03(\x0b\x32$.google.protobuf.UninterpretedOption\"/\n\x05\x43Type\x12\n\n\x06STRING\x10\x00\x12\x08\n\x04\x43ORD\x10\x01\x12\x10\n\x0cSTRING_PIECE\x10\x02\"5\n\x06JSType\x12\r\n\tJS_NORMAL\x10\x00\x12\r\n\tJS_STRING\x10\x01\x12\r\n\tJS_NUMBER\x10\x02*\t\x08\xe8\x07\x10\x80\x80\x80\x80\x02J\x04\x08\x04\x10\x05\"^\n\x0cOneofOptions\x12\x43\n\x14uninterpreted_option\x18\xe7\x07 \x03(\x0b\x32$.google.protobuf.UninterpretedOption*\t\x08\xe8\x07\x10\x80\x80\x80\x80\x02\"\x93\x01\n\x0b\x45numOptions\x12\x13\n\x0b\x61llow_alias\x18\x02 \x01(\x08\x12\x19\n\ndeprecated\x18\x03 \x01(\x08:\x05\x66\x61lse\x12\x43\n\x14uninterpreted_option\x18\xe7\x07 \x03(\x0b\x32$.google.protobuf.UninterpretedOption*\t\x08\xe8\x07\x10\x80\x80\x80\x80\x02J\x04\x08\x05\x10\x06\"}\n\x10\x45numValueOptions\x12\x19\n\ndeprecated\x18\x01 \x01(\x08:\x05\x66\x61lse\x12\x43\n\x14uninterpreted_option\x18\xe7\x07 \x03(\x0b\x32$.google.protobuf.UninterpretedOption*\t\x08\xe8\x07\x10\x80\x80\x80\x80\x02\"{\n\x0eServiceOptions\x12\x19\n\ndeprecated\x18! \x01(\x08:\x05\x66\x61lse\x12\x43\n\x14uninterpreted_option\x18\xe7\x07 \x03(\x0b\x32$.google.protobuf.UninterpretedOption*\t\x08\xe8\x07\x10\x80\x80\x80\x80\x02\"\xad\x02\n\rMethodOptions\x12\x19\n\ndeprecated\x18! \x01(\x08:\x05\x66\x61lse\x12_\n\x11idempotency_level\x18\" \x01(\x0e\x32/.google.protobuf.MethodOptions.IdempotencyLevel:\x13IDEMPOTENCY_UNKNOWN\x12\x43\n\x14uninterpreted_option\x18\xe7\x07 \x03(\x0b\x32$.google.protobuf.UninterpretedOption\"P\n\x10IdempotencyLevel\x12\x17\n\x13IDEMPOTENCY_UNKNOWN\x10\x00\x12\x13\n\x0fNO_SIDE_EFFECTS\x10\x01\x12\x0e\n\nIDEMPOTENT\x10\x02*\t\x08\xe8\x07\x10\x80\x80\x80\x80\x02\"\x9e\x02\n\x13UninterpretedOption\x12;\n\x04name\x18\x02 \x03(\x0b\x32-.google.protobuf.UninterpretedOption.NamePart\x12\x18\n\x10identifier_value\x18\x03 \x01(\t\x12\x1a\n\x12positive_int_value\x18\x04 \x01(\x04\x12\x1a\n\x12negative_int_value\x18\x05 \x01(\x03\x12\x14\n\x0c\x64ouble_value\x18\x06 \x01(\x01\x12\x14\n\x0cstring_value\x18\x07 \x01(\x0c\x12\x17\n\x0f\x61ggregate_value\x18\x08 \x01(\t\x1a\x33\n\x08NamePart\x12\x11\n\tname_part\x18\x01 \x02(\t\x12\x14\n\x0cis_extension\x18\x02 \x02(\x08\"\xd5\x01\n\x0eSourceCodeInfo\x12:\n\x08location\x18\x01 \x03(\x0b\x32(.google.protobuf.SourceCodeInfo.Location\x1a\x86\x01\n\x08Location\x12\x10\n\x04path\x18\x01 \x03(\x05\x42\x02\x10\x01\x12\x10\n\x04span\x18\x02 \x03(\x05\x42\x02\x10\x01\x12\x18\n\x10leading_comments\x18\x03 \x01(\t\x12\x19\n\x11trailing_comments\x18\x04 \x01(\t\x12!\n\x19leading_detached_comments\x18\x06 \x03(\t\"\xa7\x01\n\x11GeneratedCodeInfo\x12\x41\n\nannotation\x18\x01 \x03(\x0b\x32-.google.protobuf.GeneratedCodeInfo.Annotation\x1aO\n\nAnnotation\x12\x10\n\x04path\x18\x01 \x03(\x05\x42\x02\x10\x01\x12\x13\n\x0bsource_file\x18\x02 \x01(\t\x12\r\n\x05\x62\x65gin\x18\x03 \x01(\x05\x12\x0b\n\x03\x65nd\x18\x04 \x01(\x05\x42~\n\x13\x63om.google.protobufB\x10\x44\x65scriptorProtosH\x01Z-google.golang.org/protobuf/types/descriptorpb\xf8\x01\x01\xa2\x02\x03GPB\xaa\x02\x1aGoogle.Protobuf.Reflection' + ) +else: + DESCRIPTOR = _descriptor_pool.Default().AddSerializedFile(b'\n google/protobuf/descriptor.proto\x12\x0fgoogle.protobuf\"G\n\x11\x46ileDescriptorSet\x12\x32\n\x04\x66ile\x18\x01 \x03(\x0b\x32$.google.protobuf.FileDescriptorProto\"\xdb\x03\n\x13\x46ileDescriptorProto\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\x0f\n\x07package\x18\x02 \x01(\t\x12\x12\n\ndependency\x18\x03 \x03(\t\x12\x19\n\x11public_dependency\x18\n \x03(\x05\x12\x17\n\x0fweak_dependency\x18\x0b \x03(\x05\x12\x36\n\x0cmessage_type\x18\x04 \x03(\x0b\x32 .google.protobuf.DescriptorProto\x12\x37\n\tenum_type\x18\x05 \x03(\x0b\x32$.google.protobuf.EnumDescriptorProto\x12\x38\n\x07service\x18\x06 \x03(\x0b\x32\'.google.protobuf.ServiceDescriptorProto\x12\x38\n\textension\x18\x07 \x03(\x0b\x32%.google.protobuf.FieldDescriptorProto\x12-\n\x07options\x18\x08 \x01(\x0b\x32\x1c.google.protobuf.FileOptions\x12\x39\n\x10source_code_info\x18\t \x01(\x0b\x32\x1f.google.protobuf.SourceCodeInfo\x12\x0e\n\x06syntax\x18\x0c \x01(\t\"\xa9\x05\n\x0f\x44\x65scriptorProto\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\x34\n\x05\x66ield\x18\x02 \x03(\x0b\x32%.google.protobuf.FieldDescriptorProto\x12\x38\n\textension\x18\x06 \x03(\x0b\x32%.google.protobuf.FieldDescriptorProto\x12\x35\n\x0bnested_type\x18\x03 \x03(\x0b\x32 .google.protobuf.DescriptorProto\x12\x37\n\tenum_type\x18\x04 \x03(\x0b\x32$.google.protobuf.EnumDescriptorProto\x12H\n\x0f\x65xtension_range\x18\x05 \x03(\x0b\x32/.google.protobuf.DescriptorProto.ExtensionRange\x12\x39\n\noneof_decl\x18\x08 \x03(\x0b\x32%.google.protobuf.OneofDescriptorProto\x12\x30\n\x07options\x18\x07 \x01(\x0b\x32\x1f.google.protobuf.MessageOptions\x12\x46\n\x0ereserved_range\x18\t \x03(\x0b\x32..google.protobuf.DescriptorProto.ReservedRange\x12\x15\n\rreserved_name\x18\n \x03(\t\x1a\x65\n\x0e\x45xtensionRange\x12\r\n\x05start\x18\x01 \x01(\x05\x12\x0b\n\x03\x65nd\x18\x02 \x01(\x05\x12\x37\n\x07options\x18\x03 \x01(\x0b\x32&.google.protobuf.ExtensionRangeOptions\x1a+\n\rReservedRange\x12\r\n\x05start\x18\x01 \x01(\x05\x12\x0b\n\x03\x65nd\x18\x02 \x01(\x05\"g\n\x15\x45xtensionRangeOptions\x12\x43\n\x14uninterpreted_option\x18\xe7\x07 \x03(\x0b\x32$.google.protobuf.UninterpretedOption*\t\x08\xe8\x07\x10\x80\x80\x80\x80\x02\"\xd5\x05\n\x14\x46ieldDescriptorProto\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\x0e\n\x06number\x18\x03 \x01(\x05\x12:\n\x05label\x18\x04 \x01(\x0e\x32+.google.protobuf.FieldDescriptorProto.Label\x12\x38\n\x04type\x18\x05 \x01(\x0e\x32*.google.protobuf.FieldDescriptorProto.Type\x12\x11\n\ttype_name\x18\x06 \x01(\t\x12\x10\n\x08\x65xtendee\x18\x02 \x01(\t\x12\x15\n\rdefault_value\x18\x07 \x01(\t\x12\x13\n\x0boneof_index\x18\t \x01(\x05\x12\x11\n\tjson_name\x18\n \x01(\t\x12.\n\x07options\x18\x08 \x01(\x0b\x32\x1d.google.protobuf.FieldOptions\x12\x17\n\x0fproto3_optional\x18\x11 \x01(\x08\"\xb6\x02\n\x04Type\x12\x0f\n\x0bTYPE_DOUBLE\x10\x01\x12\x0e\n\nTYPE_FLOAT\x10\x02\x12\x0e\n\nTYPE_INT64\x10\x03\x12\x0f\n\x0bTYPE_UINT64\x10\x04\x12\x0e\n\nTYPE_INT32\x10\x05\x12\x10\n\x0cTYPE_FIXED64\x10\x06\x12\x10\n\x0cTYPE_FIXED32\x10\x07\x12\r\n\tTYPE_BOOL\x10\x08\x12\x0f\n\x0bTYPE_STRING\x10\t\x12\x0e\n\nTYPE_GROUP\x10\n\x12\x10\n\x0cTYPE_MESSAGE\x10\x0b\x12\x0e\n\nTYPE_BYTES\x10\x0c\x12\x0f\n\x0bTYPE_UINT32\x10\r\x12\r\n\tTYPE_ENUM\x10\x0e\x12\x11\n\rTYPE_SFIXED32\x10\x0f\x12\x11\n\rTYPE_SFIXED64\x10\x10\x12\x0f\n\x0bTYPE_SINT32\x10\x11\x12\x0f\n\x0bTYPE_SINT64\x10\x12\"C\n\x05Label\x12\x12\n\x0eLABEL_OPTIONAL\x10\x01\x12\x12\n\x0eLABEL_REQUIRED\x10\x02\x12\x12\n\x0eLABEL_REPEATED\x10\x03\"T\n\x14OneofDescriptorProto\x12\x0c\n\x04name\x18\x01 \x01(\t\x12.\n\x07options\x18\x02 \x01(\x0b\x32\x1d.google.protobuf.OneofOptions\"\xa4\x02\n\x13\x45numDescriptorProto\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\x38\n\x05value\x18\x02 \x03(\x0b\x32).google.protobuf.EnumValueDescriptorProto\x12-\n\x07options\x18\x03 \x01(\x0b\x32\x1c.google.protobuf.EnumOptions\x12N\n\x0ereserved_range\x18\x04 \x03(\x0b\x32\x36.google.protobuf.EnumDescriptorProto.EnumReservedRange\x12\x15\n\rreserved_name\x18\x05 \x03(\t\x1a/\n\x11\x45numReservedRange\x12\r\n\x05start\x18\x01 \x01(\x05\x12\x0b\n\x03\x65nd\x18\x02 \x01(\x05\"l\n\x18\x45numValueDescriptorProto\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\x0e\n\x06number\x18\x02 \x01(\x05\x12\x32\n\x07options\x18\x03 \x01(\x0b\x32!.google.protobuf.EnumValueOptions\"\x90\x01\n\x16ServiceDescriptorProto\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\x36\n\x06method\x18\x02 \x03(\x0b\x32&.google.protobuf.MethodDescriptorProto\x12\x30\n\x07options\x18\x03 \x01(\x0b\x32\x1f.google.protobuf.ServiceOptions\"\xc1\x01\n\x15MethodDescriptorProto\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\x12\n\ninput_type\x18\x02 \x01(\t\x12\x13\n\x0boutput_type\x18\x03 \x01(\t\x12/\n\x07options\x18\x04 \x01(\x0b\x32\x1e.google.protobuf.MethodOptions\x12\x1f\n\x10\x63lient_streaming\x18\x05 \x01(\x08:\x05\x66\x61lse\x12\x1f\n\x10server_streaming\x18\x06 \x01(\x08:\x05\x66\x61lse\"\xa5\x06\n\x0b\x46ileOptions\x12\x14\n\x0cjava_package\x18\x01 \x01(\t\x12\x1c\n\x14java_outer_classname\x18\x08 \x01(\t\x12\"\n\x13java_multiple_files\x18\n \x01(\x08:\x05\x66\x61lse\x12)\n\x1djava_generate_equals_and_hash\x18\x14 \x01(\x08\x42\x02\x18\x01\x12%\n\x16java_string_check_utf8\x18\x1b \x01(\x08:\x05\x66\x61lse\x12\x46\n\x0coptimize_for\x18\t \x01(\x0e\x32).google.protobuf.FileOptions.OptimizeMode:\x05SPEED\x12\x12\n\ngo_package\x18\x0b \x01(\t\x12\"\n\x13\x63\x63_generic_services\x18\x10 \x01(\x08:\x05\x66\x61lse\x12$\n\x15java_generic_services\x18\x11 \x01(\x08:\x05\x66\x61lse\x12\"\n\x13py_generic_services\x18\x12 \x01(\x08:\x05\x66\x61lse\x12#\n\x14php_generic_services\x18* \x01(\x08:\x05\x66\x61lse\x12\x19\n\ndeprecated\x18\x17 \x01(\x08:\x05\x66\x61lse\x12\x1e\n\x10\x63\x63_enable_arenas\x18\x1f \x01(\x08:\x04true\x12\x19\n\x11objc_class_prefix\x18$ \x01(\t\x12\x18\n\x10\x63sharp_namespace\x18% \x01(\t\x12\x14\n\x0cswift_prefix\x18\' \x01(\t\x12\x18\n\x10php_class_prefix\x18( \x01(\t\x12\x15\n\rphp_namespace\x18) \x01(\t\x12\x1e\n\x16php_metadata_namespace\x18, \x01(\t\x12\x14\n\x0cruby_package\x18- \x01(\t\x12\x43\n\x14uninterpreted_option\x18\xe7\x07 \x03(\x0b\x32$.google.protobuf.UninterpretedOption\":\n\x0cOptimizeMode\x12\t\n\x05SPEED\x10\x01\x12\r\n\tCODE_SIZE\x10\x02\x12\x10\n\x0cLITE_RUNTIME\x10\x03*\t\x08\xe8\x07\x10\x80\x80\x80\x80\x02J\x04\x08&\x10\'\"\x84\x02\n\x0eMessageOptions\x12&\n\x17message_set_wire_format\x18\x01 \x01(\x08:\x05\x66\x61lse\x12.\n\x1fno_standard_descriptor_accessor\x18\x02 \x01(\x08:\x05\x66\x61lse\x12\x19\n\ndeprecated\x18\x03 \x01(\x08:\x05\x66\x61lse\x12\x11\n\tmap_entry\x18\x07 \x01(\x08\x12\x43\n\x14uninterpreted_option\x18\xe7\x07 \x03(\x0b\x32$.google.protobuf.UninterpretedOption*\t\x08\xe8\x07\x10\x80\x80\x80\x80\x02J\x04\x08\x04\x10\x05J\x04\x08\x05\x10\x06J\x04\x08\x06\x10\x07J\x04\x08\x08\x10\tJ\x04\x08\t\x10\n\"\xbe\x03\n\x0c\x46ieldOptions\x12:\n\x05\x63type\x18\x01 \x01(\x0e\x32#.google.protobuf.FieldOptions.CType:\x06STRING\x12\x0e\n\x06packed\x18\x02 \x01(\x08\x12?\n\x06jstype\x18\x06 \x01(\x0e\x32$.google.protobuf.FieldOptions.JSType:\tJS_NORMAL\x12\x13\n\x04lazy\x18\x05 \x01(\x08:\x05\x66\x61lse\x12\x1e\n\x0funverified_lazy\x18\x0f \x01(\x08:\x05\x66\x61lse\x12\x19\n\ndeprecated\x18\x03 \x01(\x08:\x05\x66\x61lse\x12\x13\n\x04weak\x18\n \x01(\x08:\x05\x66\x61lse\x12\x43\n\x14uninterpreted_option\x18\xe7\x07 \x03(\x0b\x32$.google.protobuf.UninterpretedOption\"/\n\x05\x43Type\x12\n\n\x06STRING\x10\x00\x12\x08\n\x04\x43ORD\x10\x01\x12\x10\n\x0cSTRING_PIECE\x10\x02\"5\n\x06JSType\x12\r\n\tJS_NORMAL\x10\x00\x12\r\n\tJS_STRING\x10\x01\x12\r\n\tJS_NUMBER\x10\x02*\t\x08\xe8\x07\x10\x80\x80\x80\x80\x02J\x04\x08\x04\x10\x05\"^\n\x0cOneofOptions\x12\x43\n\x14uninterpreted_option\x18\xe7\x07 \x03(\x0b\x32$.google.protobuf.UninterpretedOption*\t\x08\xe8\x07\x10\x80\x80\x80\x80\x02\"\x93\x01\n\x0b\x45numOptions\x12\x13\n\x0b\x61llow_alias\x18\x02 \x01(\x08\x12\x19\n\ndeprecated\x18\x03 \x01(\x08:\x05\x66\x61lse\x12\x43\n\x14uninterpreted_option\x18\xe7\x07 \x03(\x0b\x32$.google.protobuf.UninterpretedOption*\t\x08\xe8\x07\x10\x80\x80\x80\x80\x02J\x04\x08\x05\x10\x06\"}\n\x10\x45numValueOptions\x12\x19\n\ndeprecated\x18\x01 \x01(\x08:\x05\x66\x61lse\x12\x43\n\x14uninterpreted_option\x18\xe7\x07 \x03(\x0b\x32$.google.protobuf.UninterpretedOption*\t\x08\xe8\x07\x10\x80\x80\x80\x80\x02\"{\n\x0eServiceOptions\x12\x19\n\ndeprecated\x18! \x01(\x08:\x05\x66\x61lse\x12\x43\n\x14uninterpreted_option\x18\xe7\x07 \x03(\x0b\x32$.google.protobuf.UninterpretedOption*\t\x08\xe8\x07\x10\x80\x80\x80\x80\x02\"\xad\x02\n\rMethodOptions\x12\x19\n\ndeprecated\x18! \x01(\x08:\x05\x66\x61lse\x12_\n\x11idempotency_level\x18\" \x01(\x0e\x32/.google.protobuf.MethodOptions.IdempotencyLevel:\x13IDEMPOTENCY_UNKNOWN\x12\x43\n\x14uninterpreted_option\x18\xe7\x07 \x03(\x0b\x32$.google.protobuf.UninterpretedOption\"P\n\x10IdempotencyLevel\x12\x17\n\x13IDEMPOTENCY_UNKNOWN\x10\x00\x12\x13\n\x0fNO_SIDE_EFFECTS\x10\x01\x12\x0e\n\nIDEMPOTENT\x10\x02*\t\x08\xe8\x07\x10\x80\x80\x80\x80\x02\"\x9e\x02\n\x13UninterpretedOption\x12;\n\x04name\x18\x02 \x03(\x0b\x32-.google.protobuf.UninterpretedOption.NamePart\x12\x18\n\x10identifier_value\x18\x03 \x01(\t\x12\x1a\n\x12positive_int_value\x18\x04 \x01(\x04\x12\x1a\n\x12negative_int_value\x18\x05 \x01(\x03\x12\x14\n\x0c\x64ouble_value\x18\x06 \x01(\x01\x12\x14\n\x0cstring_value\x18\x07 \x01(\x0c\x12\x17\n\x0f\x61ggregate_value\x18\x08 \x01(\t\x1a\x33\n\x08NamePart\x12\x11\n\tname_part\x18\x01 \x02(\t\x12\x14\n\x0cis_extension\x18\x02 \x02(\x08\"\xd5\x01\n\x0eSourceCodeInfo\x12:\n\x08location\x18\x01 \x03(\x0b\x32(.google.protobuf.SourceCodeInfo.Location\x1a\x86\x01\n\x08Location\x12\x10\n\x04path\x18\x01 \x03(\x05\x42\x02\x10\x01\x12\x10\n\x04span\x18\x02 \x03(\x05\x42\x02\x10\x01\x12\x18\n\x10leading_comments\x18\x03 \x01(\t\x12\x19\n\x11trailing_comments\x18\x04 \x01(\t\x12!\n\x19leading_detached_comments\x18\x06 \x03(\t\"\xa7\x01\n\x11GeneratedCodeInfo\x12\x41\n\nannotation\x18\x01 \x03(\x0b\x32-.google.protobuf.GeneratedCodeInfo.Annotation\x1aO\n\nAnnotation\x12\x10\n\x04path\x18\x01 \x03(\x05\x42\x02\x10\x01\x12\x13\n\x0bsource_file\x18\x02 \x01(\t\x12\r\n\x05\x62\x65gin\x18\x03 \x01(\x05\x12\x0b\n\x03\x65nd\x18\x04 \x01(\x05\x42~\n\x13\x63om.google.protobufB\x10\x44\x65scriptorProtosH\x01Z-google.golang.org/protobuf/types/descriptorpb\xf8\x01\x01\xa2\x02\x03GPB\xaa\x02\x1aGoogle.Protobuf.Reflection') + +if _descriptor._USE_C_DESCRIPTORS == False: + _FIELDDESCRIPTORPROTO_TYPE = _descriptor.EnumDescriptor( + name='Type', + full_name='google.protobuf.FieldDescriptorProto.Type', + filename=None, + file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, + values=[ + _descriptor.EnumValueDescriptor( + name='TYPE_DOUBLE', index=0, number=1, + serialized_options=None, + type=None, + create_key=_descriptor._internal_create_key), + _descriptor.EnumValueDescriptor( + name='TYPE_FLOAT', index=1, number=2, + serialized_options=None, + type=None, + create_key=_descriptor._internal_create_key), + _descriptor.EnumValueDescriptor( + name='TYPE_INT64', index=2, number=3, + serialized_options=None, + type=None, + create_key=_descriptor._internal_create_key), + _descriptor.EnumValueDescriptor( + name='TYPE_UINT64', index=3, number=4, + serialized_options=None, + type=None, + create_key=_descriptor._internal_create_key), + _descriptor.EnumValueDescriptor( + name='TYPE_INT32', index=4, number=5, + serialized_options=None, + type=None, + create_key=_descriptor._internal_create_key), + _descriptor.EnumValueDescriptor( + name='TYPE_FIXED64', index=5, number=6, + serialized_options=None, + type=None, + create_key=_descriptor._internal_create_key), + _descriptor.EnumValueDescriptor( + name='TYPE_FIXED32', index=6, number=7, + serialized_options=None, + type=None, + create_key=_descriptor._internal_create_key), + _descriptor.EnumValueDescriptor( + name='TYPE_BOOL', index=7, number=8, + serialized_options=None, + type=None, + create_key=_descriptor._internal_create_key), + _descriptor.EnumValueDescriptor( + name='TYPE_STRING', index=8, number=9, + serialized_options=None, + type=None, + create_key=_descriptor._internal_create_key), + _descriptor.EnumValueDescriptor( + name='TYPE_GROUP', index=9, number=10, + serialized_options=None, + type=None, + create_key=_descriptor._internal_create_key), + _descriptor.EnumValueDescriptor( + name='TYPE_MESSAGE', index=10, number=11, + serialized_options=None, + type=None, + create_key=_descriptor._internal_create_key), + _descriptor.EnumValueDescriptor( + name='TYPE_BYTES', index=11, number=12, + serialized_options=None, + type=None, + create_key=_descriptor._internal_create_key), + _descriptor.EnumValueDescriptor( + name='TYPE_UINT32', index=12, number=13, + serialized_options=None, + type=None, + create_key=_descriptor._internal_create_key), + _descriptor.EnumValueDescriptor( + name='TYPE_ENUM', index=13, number=14, + serialized_options=None, + type=None, + create_key=_descriptor._internal_create_key), + _descriptor.EnumValueDescriptor( + name='TYPE_SFIXED32', index=14, number=15, + serialized_options=None, + type=None, + create_key=_descriptor._internal_create_key), + _descriptor.EnumValueDescriptor( + name='TYPE_SFIXED64', index=15, number=16, + serialized_options=None, + type=None, + create_key=_descriptor._internal_create_key), + _descriptor.EnumValueDescriptor( + name='TYPE_SINT32', index=16, number=17, + serialized_options=None, + type=None, + create_key=_descriptor._internal_create_key), + _descriptor.EnumValueDescriptor( + name='TYPE_SINT64', index=17, number=18, + serialized_options=None, + type=None, + create_key=_descriptor._internal_create_key), + ], + containing_type=None, + serialized_options=None, + ) + _sym_db.RegisterEnumDescriptor(_FIELDDESCRIPTORPROTO_TYPE) + + _FIELDDESCRIPTORPROTO_LABEL = _descriptor.EnumDescriptor( + name='Label', + full_name='google.protobuf.FieldDescriptorProto.Label', + filename=None, + file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, + values=[ + _descriptor.EnumValueDescriptor( + name='LABEL_OPTIONAL', index=0, number=1, + serialized_options=None, + type=None, + create_key=_descriptor._internal_create_key), + _descriptor.EnumValueDescriptor( + name='LABEL_REQUIRED', index=1, number=2, + serialized_options=None, + type=None, + create_key=_descriptor._internal_create_key), + _descriptor.EnumValueDescriptor( + name='LABEL_REPEATED', index=2, number=3, + serialized_options=None, + type=None, + create_key=_descriptor._internal_create_key), + ], + containing_type=None, + serialized_options=None, + ) + _sym_db.RegisterEnumDescriptor(_FIELDDESCRIPTORPROTO_LABEL) + + _FILEOPTIONS_OPTIMIZEMODE = _descriptor.EnumDescriptor( + name='OptimizeMode', + full_name='google.protobuf.FileOptions.OptimizeMode', + filename=None, + file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, + values=[ + _descriptor.EnumValueDescriptor( + name='SPEED', index=0, number=1, + serialized_options=None, + type=None, + create_key=_descriptor._internal_create_key), + _descriptor.EnumValueDescriptor( + name='CODE_SIZE', index=1, number=2, + serialized_options=None, + type=None, + create_key=_descriptor._internal_create_key), + _descriptor.EnumValueDescriptor( + name='LITE_RUNTIME', index=2, number=3, + serialized_options=None, + type=None, + create_key=_descriptor._internal_create_key), + ], + containing_type=None, + serialized_options=None, + ) + _sym_db.RegisterEnumDescriptor(_FILEOPTIONS_OPTIMIZEMODE) + + _FIELDOPTIONS_CTYPE = _descriptor.EnumDescriptor( + name='CType', + full_name='google.protobuf.FieldOptions.CType', + filename=None, + file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, + values=[ + _descriptor.EnumValueDescriptor( + name='STRING', index=0, number=0, + serialized_options=None, + type=None, + create_key=_descriptor._internal_create_key), + _descriptor.EnumValueDescriptor( + name='CORD', index=1, number=1, + serialized_options=None, + type=None, + create_key=_descriptor._internal_create_key), + _descriptor.EnumValueDescriptor( + name='STRING_PIECE', index=2, number=2, + serialized_options=None, + type=None, + create_key=_descriptor._internal_create_key), + ], + containing_type=None, + serialized_options=None, + ) + _sym_db.RegisterEnumDescriptor(_FIELDOPTIONS_CTYPE) + + _FIELDOPTIONS_JSTYPE = _descriptor.EnumDescriptor( + name='JSType', + full_name='google.protobuf.FieldOptions.JSType', + filename=None, + file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, + values=[ + _descriptor.EnumValueDescriptor( + name='JS_NORMAL', index=0, number=0, + serialized_options=None, + type=None, + create_key=_descriptor._internal_create_key), + _descriptor.EnumValueDescriptor( + name='JS_STRING', index=1, number=1, + serialized_options=None, + type=None, + create_key=_descriptor._internal_create_key), + _descriptor.EnumValueDescriptor( + name='JS_NUMBER', index=2, number=2, + serialized_options=None, + type=None, + create_key=_descriptor._internal_create_key), + ], + containing_type=None, + serialized_options=None, + ) + _sym_db.RegisterEnumDescriptor(_FIELDOPTIONS_JSTYPE) + + _METHODOPTIONS_IDEMPOTENCYLEVEL = _descriptor.EnumDescriptor( + name='IdempotencyLevel', + full_name='google.protobuf.MethodOptions.IdempotencyLevel', + filename=None, + file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, + values=[ + _descriptor.EnumValueDescriptor( + name='IDEMPOTENCY_UNKNOWN', index=0, number=0, + serialized_options=None, + type=None, + create_key=_descriptor._internal_create_key), + _descriptor.EnumValueDescriptor( + name='NO_SIDE_EFFECTS', index=1, number=1, + serialized_options=None, + type=None, + create_key=_descriptor._internal_create_key), + _descriptor.EnumValueDescriptor( + name='IDEMPOTENT', index=2, number=2, + serialized_options=None, + type=None, + create_key=_descriptor._internal_create_key), + ], + containing_type=None, + serialized_options=None, + ) + _sym_db.RegisterEnumDescriptor(_METHODOPTIONS_IDEMPOTENCYLEVEL) + + + _FILEDESCRIPTORSET = _descriptor.Descriptor( + name='FileDescriptorSet', + full_name='google.protobuf.FileDescriptorSet', + filename=None, + file=DESCRIPTOR, + containing_type=None, + create_key=_descriptor._internal_create_key, + fields=[ + _descriptor.FieldDescriptor( + name='file', full_name='google.protobuf.FileDescriptorSet.file', index=0, + number=1, type=11, cpp_type=10, label=3, + has_default_value=False, default_value=[], + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + serialized_options=None, + is_extendable=False, + syntax='proto2', + extension_ranges=[], + oneofs=[ + ], + ) + + + _FILEDESCRIPTORPROTO = _descriptor.Descriptor( + name='FileDescriptorProto', + full_name='google.protobuf.FileDescriptorProto', + filename=None, + file=DESCRIPTOR, + containing_type=None, + create_key=_descriptor._internal_create_key, + fields=[ + _descriptor.FieldDescriptor( + name='name', full_name='google.protobuf.FileDescriptorProto.name', index=0, + number=1, type=9, cpp_type=9, label=1, + has_default_value=False, default_value=b"".decode('utf-8'), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + _descriptor.FieldDescriptor( + name='package', full_name='google.protobuf.FileDescriptorProto.package', index=1, + number=2, type=9, cpp_type=9, label=1, + has_default_value=False, default_value=b"".decode('utf-8'), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + _descriptor.FieldDescriptor( + name='dependency', full_name='google.protobuf.FileDescriptorProto.dependency', index=2, + number=3, type=9, cpp_type=9, label=3, + has_default_value=False, default_value=[], + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + _descriptor.FieldDescriptor( + name='public_dependency', full_name='google.protobuf.FileDescriptorProto.public_dependency', index=3, + number=10, type=5, cpp_type=1, label=3, + has_default_value=False, default_value=[], + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + _descriptor.FieldDescriptor( + name='weak_dependency', full_name='google.protobuf.FileDescriptorProto.weak_dependency', index=4, + number=11, type=5, cpp_type=1, label=3, + has_default_value=False, default_value=[], + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + _descriptor.FieldDescriptor( + name='message_type', full_name='google.protobuf.FileDescriptorProto.message_type', index=5, + number=4, type=11, cpp_type=10, label=3, + has_default_value=False, default_value=[], + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + _descriptor.FieldDescriptor( + name='enum_type', full_name='google.protobuf.FileDescriptorProto.enum_type', index=6, + number=5, type=11, cpp_type=10, label=3, + has_default_value=False, default_value=[], + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + _descriptor.FieldDescriptor( + name='service', full_name='google.protobuf.FileDescriptorProto.service', index=7, + number=6, type=11, cpp_type=10, label=3, + has_default_value=False, default_value=[], + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + _descriptor.FieldDescriptor( + name='extension', full_name='google.protobuf.FileDescriptorProto.extension', index=8, + number=7, type=11, cpp_type=10, label=3, + has_default_value=False, default_value=[], + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + _descriptor.FieldDescriptor( + name='options', full_name='google.protobuf.FileDescriptorProto.options', index=9, + number=8, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + _descriptor.FieldDescriptor( + name='source_code_info', full_name='google.protobuf.FileDescriptorProto.source_code_info', index=10, + number=9, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + _descriptor.FieldDescriptor( + name='syntax', full_name='google.protobuf.FileDescriptorProto.syntax', index=11, + number=12, type=9, cpp_type=9, label=1, + has_default_value=False, default_value=b"".decode('utf-8'), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + serialized_options=None, + is_extendable=False, + syntax='proto2', + extension_ranges=[], + oneofs=[ + ], + ) + + + _DESCRIPTORPROTO_EXTENSIONRANGE = _descriptor.Descriptor( + name='ExtensionRange', + full_name='google.protobuf.DescriptorProto.ExtensionRange', + filename=None, + file=DESCRIPTOR, + containing_type=None, + create_key=_descriptor._internal_create_key, + fields=[ + _descriptor.FieldDescriptor( + name='start', full_name='google.protobuf.DescriptorProto.ExtensionRange.start', index=0, + number=1, type=5, cpp_type=1, label=1, + has_default_value=False, default_value=0, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + _descriptor.FieldDescriptor( + name='end', full_name='google.protobuf.DescriptorProto.ExtensionRange.end', index=1, + number=2, type=5, cpp_type=1, label=1, + has_default_value=False, default_value=0, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + _descriptor.FieldDescriptor( + name='options', full_name='google.protobuf.DescriptorProto.ExtensionRange.options', index=2, + number=3, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + serialized_options=None, + is_extendable=False, + syntax='proto2', + extension_ranges=[], + oneofs=[ + ], + ) + + _DESCRIPTORPROTO_RESERVEDRANGE = _descriptor.Descriptor( + name='ReservedRange', + full_name='google.protobuf.DescriptorProto.ReservedRange', + filename=None, + file=DESCRIPTOR, + containing_type=None, + create_key=_descriptor._internal_create_key, + fields=[ + _descriptor.FieldDescriptor( + name='start', full_name='google.protobuf.DescriptorProto.ReservedRange.start', index=0, + number=1, type=5, cpp_type=1, label=1, + has_default_value=False, default_value=0, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + _descriptor.FieldDescriptor( + name='end', full_name='google.protobuf.DescriptorProto.ReservedRange.end', index=1, + number=2, type=5, cpp_type=1, label=1, + has_default_value=False, default_value=0, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + serialized_options=None, + is_extendable=False, + syntax='proto2', + extension_ranges=[], + oneofs=[ + ], + ) + + _DESCRIPTORPROTO = _descriptor.Descriptor( + name='DescriptorProto', + full_name='google.protobuf.DescriptorProto', + filename=None, + file=DESCRIPTOR, + containing_type=None, + create_key=_descriptor._internal_create_key, + fields=[ + _descriptor.FieldDescriptor( + name='name', full_name='google.protobuf.DescriptorProto.name', index=0, + number=1, type=9, cpp_type=9, label=1, + has_default_value=False, default_value=b"".decode('utf-8'), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + _descriptor.FieldDescriptor( + name='field', full_name='google.protobuf.DescriptorProto.field', index=1, + number=2, type=11, cpp_type=10, label=3, + has_default_value=False, default_value=[], + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + _descriptor.FieldDescriptor( + name='extension', full_name='google.protobuf.DescriptorProto.extension', index=2, + number=6, type=11, cpp_type=10, label=3, + has_default_value=False, default_value=[], + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + _descriptor.FieldDescriptor( + name='nested_type', full_name='google.protobuf.DescriptorProto.nested_type', index=3, + number=3, type=11, cpp_type=10, label=3, + has_default_value=False, default_value=[], + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + _descriptor.FieldDescriptor( + name='enum_type', full_name='google.protobuf.DescriptorProto.enum_type', index=4, + number=4, type=11, cpp_type=10, label=3, + has_default_value=False, default_value=[], + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + _descriptor.FieldDescriptor( + name='extension_range', full_name='google.protobuf.DescriptorProto.extension_range', index=5, + number=5, type=11, cpp_type=10, label=3, + has_default_value=False, default_value=[], + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + _descriptor.FieldDescriptor( + name='oneof_decl', full_name='google.protobuf.DescriptorProto.oneof_decl', index=6, + number=8, type=11, cpp_type=10, label=3, + has_default_value=False, default_value=[], + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + _descriptor.FieldDescriptor( + name='options', full_name='google.protobuf.DescriptorProto.options', index=7, + number=7, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + _descriptor.FieldDescriptor( + name='reserved_range', full_name='google.protobuf.DescriptorProto.reserved_range', index=8, + number=9, type=11, cpp_type=10, label=3, + has_default_value=False, default_value=[], + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + _descriptor.FieldDescriptor( + name='reserved_name', full_name='google.protobuf.DescriptorProto.reserved_name', index=9, + number=10, type=9, cpp_type=9, label=3, + has_default_value=False, default_value=[], + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + ], + extensions=[ + ], + nested_types=[_DESCRIPTORPROTO_EXTENSIONRANGE, _DESCRIPTORPROTO_RESERVEDRANGE, ], + enum_types=[ + ], + serialized_options=None, + is_extendable=False, + syntax='proto2', + extension_ranges=[], + oneofs=[ + ], + ) + + + _EXTENSIONRANGEOPTIONS = _descriptor.Descriptor( + name='ExtensionRangeOptions', + full_name='google.protobuf.ExtensionRangeOptions', + filename=None, + file=DESCRIPTOR, + containing_type=None, + create_key=_descriptor._internal_create_key, + fields=[ + _descriptor.FieldDescriptor( + name='uninterpreted_option', full_name='google.protobuf.ExtensionRangeOptions.uninterpreted_option', index=0, + number=999, type=11, cpp_type=10, label=3, + has_default_value=False, default_value=[], + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + serialized_options=None, + is_extendable=True, + syntax='proto2', + extension_ranges=[(1000, 536870912), ], + oneofs=[ + ], + ) + + + _FIELDDESCRIPTORPROTO = _descriptor.Descriptor( + name='FieldDescriptorProto', + full_name='google.protobuf.FieldDescriptorProto', + filename=None, + file=DESCRIPTOR, + containing_type=None, + create_key=_descriptor._internal_create_key, + fields=[ + _descriptor.FieldDescriptor( + name='name', full_name='google.protobuf.FieldDescriptorProto.name', index=0, + number=1, type=9, cpp_type=9, label=1, + has_default_value=False, default_value=b"".decode('utf-8'), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + _descriptor.FieldDescriptor( + name='number', full_name='google.protobuf.FieldDescriptorProto.number', index=1, + number=3, type=5, cpp_type=1, label=1, + has_default_value=False, default_value=0, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + _descriptor.FieldDescriptor( + name='label', full_name='google.protobuf.FieldDescriptorProto.label', index=2, + number=4, type=14, cpp_type=8, label=1, + has_default_value=False, default_value=1, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + _descriptor.FieldDescriptor( + name='type', full_name='google.protobuf.FieldDescriptorProto.type', index=3, + number=5, type=14, cpp_type=8, label=1, + has_default_value=False, default_value=1, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + _descriptor.FieldDescriptor( + name='type_name', full_name='google.protobuf.FieldDescriptorProto.type_name', index=4, + number=6, type=9, cpp_type=9, label=1, + has_default_value=False, default_value=b"".decode('utf-8'), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + _descriptor.FieldDescriptor( + name='extendee', full_name='google.protobuf.FieldDescriptorProto.extendee', index=5, + number=2, type=9, cpp_type=9, label=1, + has_default_value=False, default_value=b"".decode('utf-8'), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + _descriptor.FieldDescriptor( + name='default_value', full_name='google.protobuf.FieldDescriptorProto.default_value', index=6, + number=7, type=9, cpp_type=9, label=1, + has_default_value=False, default_value=b"".decode('utf-8'), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + _descriptor.FieldDescriptor( + name='oneof_index', full_name='google.protobuf.FieldDescriptorProto.oneof_index', index=7, + number=9, type=5, cpp_type=1, label=1, + has_default_value=False, default_value=0, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + _descriptor.FieldDescriptor( + name='json_name', full_name='google.protobuf.FieldDescriptorProto.json_name', index=8, + number=10, type=9, cpp_type=9, label=1, + has_default_value=False, default_value=b"".decode('utf-8'), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + _descriptor.FieldDescriptor( + name='options', full_name='google.protobuf.FieldDescriptorProto.options', index=9, + number=8, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + _descriptor.FieldDescriptor( + name='proto3_optional', full_name='google.protobuf.FieldDescriptorProto.proto3_optional', index=10, + number=17, type=8, cpp_type=7, label=1, + has_default_value=False, default_value=False, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + _FIELDDESCRIPTORPROTO_TYPE, + _FIELDDESCRIPTORPROTO_LABEL, + ], + serialized_options=None, + is_extendable=False, + syntax='proto2', + extension_ranges=[], + oneofs=[ + ], + ) + + + _ONEOFDESCRIPTORPROTO = _descriptor.Descriptor( + name='OneofDescriptorProto', + full_name='google.protobuf.OneofDescriptorProto', + filename=None, + file=DESCRIPTOR, + containing_type=None, + create_key=_descriptor._internal_create_key, + fields=[ + _descriptor.FieldDescriptor( + name='name', full_name='google.protobuf.OneofDescriptorProto.name', index=0, + number=1, type=9, cpp_type=9, label=1, + has_default_value=False, default_value=b"".decode('utf-8'), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + _descriptor.FieldDescriptor( + name='options', full_name='google.protobuf.OneofDescriptorProto.options', index=1, + number=2, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + serialized_options=None, + is_extendable=False, + syntax='proto2', + extension_ranges=[], + oneofs=[ + ], + ) + + + _ENUMDESCRIPTORPROTO_ENUMRESERVEDRANGE = _descriptor.Descriptor( + name='EnumReservedRange', + full_name='google.protobuf.EnumDescriptorProto.EnumReservedRange', + filename=None, + file=DESCRIPTOR, + containing_type=None, + create_key=_descriptor._internal_create_key, + fields=[ + _descriptor.FieldDescriptor( + name='start', full_name='google.protobuf.EnumDescriptorProto.EnumReservedRange.start', index=0, + number=1, type=5, cpp_type=1, label=1, + has_default_value=False, default_value=0, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + _descriptor.FieldDescriptor( + name='end', full_name='google.protobuf.EnumDescriptorProto.EnumReservedRange.end', index=1, + number=2, type=5, cpp_type=1, label=1, + has_default_value=False, default_value=0, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + serialized_options=None, + is_extendable=False, + syntax='proto2', + extension_ranges=[], + oneofs=[ + ], + ) + + _ENUMDESCRIPTORPROTO = _descriptor.Descriptor( + name='EnumDescriptorProto', + full_name='google.protobuf.EnumDescriptorProto', + filename=None, + file=DESCRIPTOR, + containing_type=None, + create_key=_descriptor._internal_create_key, + fields=[ + _descriptor.FieldDescriptor( + name='name', full_name='google.protobuf.EnumDescriptorProto.name', index=0, + number=1, type=9, cpp_type=9, label=1, + has_default_value=False, default_value=b"".decode('utf-8'), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + _descriptor.FieldDescriptor( + name='value', full_name='google.protobuf.EnumDescriptorProto.value', index=1, + number=2, type=11, cpp_type=10, label=3, + has_default_value=False, default_value=[], + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + _descriptor.FieldDescriptor( + name='options', full_name='google.protobuf.EnumDescriptorProto.options', index=2, + number=3, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + _descriptor.FieldDescriptor( + name='reserved_range', full_name='google.protobuf.EnumDescriptorProto.reserved_range', index=3, + number=4, type=11, cpp_type=10, label=3, + has_default_value=False, default_value=[], + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + _descriptor.FieldDescriptor( + name='reserved_name', full_name='google.protobuf.EnumDescriptorProto.reserved_name', index=4, + number=5, type=9, cpp_type=9, label=3, + has_default_value=False, default_value=[], + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + ], + extensions=[ + ], + nested_types=[_ENUMDESCRIPTORPROTO_ENUMRESERVEDRANGE, ], + enum_types=[ + ], + serialized_options=None, + is_extendable=False, + syntax='proto2', + extension_ranges=[], + oneofs=[ + ], + ) + + + _ENUMVALUEDESCRIPTORPROTO = _descriptor.Descriptor( + name='EnumValueDescriptorProto', + full_name='google.protobuf.EnumValueDescriptorProto', + filename=None, + file=DESCRIPTOR, + containing_type=None, + create_key=_descriptor._internal_create_key, + fields=[ + _descriptor.FieldDescriptor( + name='name', full_name='google.protobuf.EnumValueDescriptorProto.name', index=0, + number=1, type=9, cpp_type=9, label=1, + has_default_value=False, default_value=b"".decode('utf-8'), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + _descriptor.FieldDescriptor( + name='number', full_name='google.protobuf.EnumValueDescriptorProto.number', index=1, + number=2, type=5, cpp_type=1, label=1, + has_default_value=False, default_value=0, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + _descriptor.FieldDescriptor( + name='options', full_name='google.protobuf.EnumValueDescriptorProto.options', index=2, + number=3, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + serialized_options=None, + is_extendable=False, + syntax='proto2', + extension_ranges=[], + oneofs=[ + ], + ) + + + _SERVICEDESCRIPTORPROTO = _descriptor.Descriptor( + name='ServiceDescriptorProto', + full_name='google.protobuf.ServiceDescriptorProto', + filename=None, + file=DESCRIPTOR, + containing_type=None, + create_key=_descriptor._internal_create_key, + fields=[ + _descriptor.FieldDescriptor( + name='name', full_name='google.protobuf.ServiceDescriptorProto.name', index=0, + number=1, type=9, cpp_type=9, label=1, + has_default_value=False, default_value=b"".decode('utf-8'), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + _descriptor.FieldDescriptor( + name='method', full_name='google.protobuf.ServiceDescriptorProto.method', index=1, + number=2, type=11, cpp_type=10, label=3, + has_default_value=False, default_value=[], + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + _descriptor.FieldDescriptor( + name='options', full_name='google.protobuf.ServiceDescriptorProto.options', index=2, + number=3, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + serialized_options=None, + is_extendable=False, + syntax='proto2', + extension_ranges=[], + oneofs=[ + ], + ) + + + _METHODDESCRIPTORPROTO = _descriptor.Descriptor( + name='MethodDescriptorProto', + full_name='google.protobuf.MethodDescriptorProto', + filename=None, + file=DESCRIPTOR, + containing_type=None, + create_key=_descriptor._internal_create_key, + fields=[ + _descriptor.FieldDescriptor( + name='name', full_name='google.protobuf.MethodDescriptorProto.name', index=0, + number=1, type=9, cpp_type=9, label=1, + has_default_value=False, default_value=b"".decode('utf-8'), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + _descriptor.FieldDescriptor( + name='input_type', full_name='google.protobuf.MethodDescriptorProto.input_type', index=1, + number=2, type=9, cpp_type=9, label=1, + has_default_value=False, default_value=b"".decode('utf-8'), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + _descriptor.FieldDescriptor( + name='output_type', full_name='google.protobuf.MethodDescriptorProto.output_type', index=2, + number=3, type=9, cpp_type=9, label=1, + has_default_value=False, default_value=b"".decode('utf-8'), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + _descriptor.FieldDescriptor( + name='options', full_name='google.protobuf.MethodDescriptorProto.options', index=3, + number=4, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + _descriptor.FieldDescriptor( + name='client_streaming', full_name='google.protobuf.MethodDescriptorProto.client_streaming', index=4, + number=5, type=8, cpp_type=7, label=1, + has_default_value=True, default_value=False, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + _descriptor.FieldDescriptor( + name='server_streaming', full_name='google.protobuf.MethodDescriptorProto.server_streaming', index=5, + number=6, type=8, cpp_type=7, label=1, + has_default_value=True, default_value=False, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + serialized_options=None, + is_extendable=False, + syntax='proto2', + extension_ranges=[], + oneofs=[ + ], + ) + + + _FILEOPTIONS = _descriptor.Descriptor( + name='FileOptions', + full_name='google.protobuf.FileOptions', + filename=None, + file=DESCRIPTOR, + containing_type=None, + create_key=_descriptor._internal_create_key, + fields=[ + _descriptor.FieldDescriptor( + name='java_package', full_name='google.protobuf.FileOptions.java_package', index=0, + number=1, type=9, cpp_type=9, label=1, + has_default_value=False, default_value=b"".decode('utf-8'), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + _descriptor.FieldDescriptor( + name='java_outer_classname', full_name='google.protobuf.FileOptions.java_outer_classname', index=1, + number=8, type=9, cpp_type=9, label=1, + has_default_value=False, default_value=b"".decode('utf-8'), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + _descriptor.FieldDescriptor( + name='java_multiple_files', full_name='google.protobuf.FileOptions.java_multiple_files', index=2, + number=10, type=8, cpp_type=7, label=1, + has_default_value=True, default_value=False, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + _descriptor.FieldDescriptor( + name='java_generate_equals_and_hash', full_name='google.protobuf.FileOptions.java_generate_equals_and_hash', index=3, + number=20, type=8, cpp_type=7, label=1, + has_default_value=False, default_value=False, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + _descriptor.FieldDescriptor( + name='java_string_check_utf8', full_name='google.protobuf.FileOptions.java_string_check_utf8', index=4, + number=27, type=8, cpp_type=7, label=1, + has_default_value=True, default_value=False, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + _descriptor.FieldDescriptor( + name='optimize_for', full_name='google.protobuf.FileOptions.optimize_for', index=5, + number=9, type=14, cpp_type=8, label=1, + has_default_value=True, default_value=1, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + _descriptor.FieldDescriptor( + name='go_package', full_name='google.protobuf.FileOptions.go_package', index=6, + number=11, type=9, cpp_type=9, label=1, + has_default_value=False, default_value=b"".decode('utf-8'), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + _descriptor.FieldDescriptor( + name='cc_generic_services', full_name='google.protobuf.FileOptions.cc_generic_services', index=7, + number=16, type=8, cpp_type=7, label=1, + has_default_value=True, default_value=False, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + _descriptor.FieldDescriptor( + name='java_generic_services', full_name='google.protobuf.FileOptions.java_generic_services', index=8, + number=17, type=8, cpp_type=7, label=1, + has_default_value=True, default_value=False, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + _descriptor.FieldDescriptor( + name='py_generic_services', full_name='google.protobuf.FileOptions.py_generic_services', index=9, + number=18, type=8, cpp_type=7, label=1, + has_default_value=True, default_value=False, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + _descriptor.FieldDescriptor( + name='php_generic_services', full_name='google.protobuf.FileOptions.php_generic_services', index=10, + number=42, type=8, cpp_type=7, label=1, + has_default_value=True, default_value=False, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + _descriptor.FieldDescriptor( + name='deprecated', full_name='google.protobuf.FileOptions.deprecated', index=11, + number=23, type=8, cpp_type=7, label=1, + has_default_value=True, default_value=False, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + _descriptor.FieldDescriptor( + name='cc_enable_arenas', full_name='google.protobuf.FileOptions.cc_enable_arenas', index=12, + number=31, type=8, cpp_type=7, label=1, + has_default_value=True, default_value=True, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + _descriptor.FieldDescriptor( + name='objc_class_prefix', full_name='google.protobuf.FileOptions.objc_class_prefix', index=13, + number=36, type=9, cpp_type=9, label=1, + has_default_value=False, default_value=b"".decode('utf-8'), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + _descriptor.FieldDescriptor( + name='csharp_namespace', full_name='google.protobuf.FileOptions.csharp_namespace', index=14, + number=37, type=9, cpp_type=9, label=1, + has_default_value=False, default_value=b"".decode('utf-8'), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + _descriptor.FieldDescriptor( + name='swift_prefix', full_name='google.protobuf.FileOptions.swift_prefix', index=15, + number=39, type=9, cpp_type=9, label=1, + has_default_value=False, default_value=b"".decode('utf-8'), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + _descriptor.FieldDescriptor( + name='php_class_prefix', full_name='google.protobuf.FileOptions.php_class_prefix', index=16, + number=40, type=9, cpp_type=9, label=1, + has_default_value=False, default_value=b"".decode('utf-8'), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + _descriptor.FieldDescriptor( + name='php_namespace', full_name='google.protobuf.FileOptions.php_namespace', index=17, + number=41, type=9, cpp_type=9, label=1, + has_default_value=False, default_value=b"".decode('utf-8'), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + _descriptor.FieldDescriptor( + name='php_metadata_namespace', full_name='google.protobuf.FileOptions.php_metadata_namespace', index=18, + number=44, type=9, cpp_type=9, label=1, + has_default_value=False, default_value=b"".decode('utf-8'), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + _descriptor.FieldDescriptor( + name='ruby_package', full_name='google.protobuf.FileOptions.ruby_package', index=19, + number=45, type=9, cpp_type=9, label=1, + has_default_value=False, default_value=b"".decode('utf-8'), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + _descriptor.FieldDescriptor( + name='uninterpreted_option', full_name='google.protobuf.FileOptions.uninterpreted_option', index=20, + number=999, type=11, cpp_type=10, label=3, + has_default_value=False, default_value=[], + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + _FILEOPTIONS_OPTIMIZEMODE, + ], + serialized_options=None, + is_extendable=True, + syntax='proto2', + extension_ranges=[(1000, 536870912), ], + oneofs=[ + ], + ) + + + _MESSAGEOPTIONS = _descriptor.Descriptor( + name='MessageOptions', + full_name='google.protobuf.MessageOptions', + filename=None, + file=DESCRIPTOR, + containing_type=None, + create_key=_descriptor._internal_create_key, + fields=[ + _descriptor.FieldDescriptor( + name='message_set_wire_format', full_name='google.protobuf.MessageOptions.message_set_wire_format', index=0, + number=1, type=8, cpp_type=7, label=1, + has_default_value=True, default_value=False, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + _descriptor.FieldDescriptor( + name='no_standard_descriptor_accessor', full_name='google.protobuf.MessageOptions.no_standard_descriptor_accessor', index=1, + number=2, type=8, cpp_type=7, label=1, + has_default_value=True, default_value=False, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + _descriptor.FieldDescriptor( + name='deprecated', full_name='google.protobuf.MessageOptions.deprecated', index=2, + number=3, type=8, cpp_type=7, label=1, + has_default_value=True, default_value=False, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + _descriptor.FieldDescriptor( + name='map_entry', full_name='google.protobuf.MessageOptions.map_entry', index=3, + number=7, type=8, cpp_type=7, label=1, + has_default_value=False, default_value=False, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + _descriptor.FieldDescriptor( + name='uninterpreted_option', full_name='google.protobuf.MessageOptions.uninterpreted_option', index=4, + number=999, type=11, cpp_type=10, label=3, + has_default_value=False, default_value=[], + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + serialized_options=None, + is_extendable=True, + syntax='proto2', + extension_ranges=[(1000, 536870912), ], + oneofs=[ + ], + ) + + + _FIELDOPTIONS = _descriptor.Descriptor( + name='FieldOptions', + full_name='google.protobuf.FieldOptions', + filename=None, + file=DESCRIPTOR, + containing_type=None, + create_key=_descriptor._internal_create_key, + fields=[ + _descriptor.FieldDescriptor( + name='ctype', full_name='google.protobuf.FieldOptions.ctype', index=0, + number=1, type=14, cpp_type=8, label=1, + has_default_value=True, default_value=0, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + _descriptor.FieldDescriptor( + name='packed', full_name='google.protobuf.FieldOptions.packed', index=1, + number=2, type=8, cpp_type=7, label=1, + has_default_value=False, default_value=False, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + _descriptor.FieldDescriptor( + name='jstype', full_name='google.protobuf.FieldOptions.jstype', index=2, + number=6, type=14, cpp_type=8, label=1, + has_default_value=True, default_value=0, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + _descriptor.FieldDescriptor( + name='lazy', full_name='google.protobuf.FieldOptions.lazy', index=3, + number=5, type=8, cpp_type=7, label=1, + has_default_value=True, default_value=False, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + _descriptor.FieldDescriptor( + name='unverified_lazy', full_name='google.protobuf.FieldOptions.unverified_lazy', index=4, + number=15, type=8, cpp_type=7, label=1, + has_default_value=True, default_value=False, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + _descriptor.FieldDescriptor( + name='deprecated', full_name='google.protobuf.FieldOptions.deprecated', index=5, + number=3, type=8, cpp_type=7, label=1, + has_default_value=True, default_value=False, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + _descriptor.FieldDescriptor( + name='weak', full_name='google.protobuf.FieldOptions.weak', index=6, + number=10, type=8, cpp_type=7, label=1, + has_default_value=True, default_value=False, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + _descriptor.FieldDescriptor( + name='uninterpreted_option', full_name='google.protobuf.FieldOptions.uninterpreted_option', index=7, + number=999, type=11, cpp_type=10, label=3, + has_default_value=False, default_value=[], + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + _FIELDOPTIONS_CTYPE, + _FIELDOPTIONS_JSTYPE, + ], + serialized_options=None, + is_extendable=True, + syntax='proto2', + extension_ranges=[(1000, 536870912), ], + oneofs=[ + ], + ) + + + _ONEOFOPTIONS = _descriptor.Descriptor( + name='OneofOptions', + full_name='google.protobuf.OneofOptions', + filename=None, + file=DESCRIPTOR, + containing_type=None, + create_key=_descriptor._internal_create_key, + fields=[ + _descriptor.FieldDescriptor( + name='uninterpreted_option', full_name='google.protobuf.OneofOptions.uninterpreted_option', index=0, + number=999, type=11, cpp_type=10, label=3, + has_default_value=False, default_value=[], + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + serialized_options=None, + is_extendable=True, + syntax='proto2', + extension_ranges=[(1000, 536870912), ], + oneofs=[ + ], + ) + + + _ENUMOPTIONS = _descriptor.Descriptor( + name='EnumOptions', + full_name='google.protobuf.EnumOptions', + filename=None, + file=DESCRIPTOR, + containing_type=None, + create_key=_descriptor._internal_create_key, + fields=[ + _descriptor.FieldDescriptor( + name='allow_alias', full_name='google.protobuf.EnumOptions.allow_alias', index=0, + number=2, type=8, cpp_type=7, label=1, + has_default_value=False, default_value=False, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + _descriptor.FieldDescriptor( + name='deprecated', full_name='google.protobuf.EnumOptions.deprecated', index=1, + number=3, type=8, cpp_type=7, label=1, + has_default_value=True, default_value=False, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + _descriptor.FieldDescriptor( + name='uninterpreted_option', full_name='google.protobuf.EnumOptions.uninterpreted_option', index=2, + number=999, type=11, cpp_type=10, label=3, + has_default_value=False, default_value=[], + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + serialized_options=None, + is_extendable=True, + syntax='proto2', + extension_ranges=[(1000, 536870912), ], + oneofs=[ + ], + ) + + + _ENUMVALUEOPTIONS = _descriptor.Descriptor( + name='EnumValueOptions', + full_name='google.protobuf.EnumValueOptions', + filename=None, + file=DESCRIPTOR, + containing_type=None, + create_key=_descriptor._internal_create_key, + fields=[ + _descriptor.FieldDescriptor( + name='deprecated', full_name='google.protobuf.EnumValueOptions.deprecated', index=0, + number=1, type=8, cpp_type=7, label=1, + has_default_value=True, default_value=False, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + _descriptor.FieldDescriptor( + name='uninterpreted_option', full_name='google.protobuf.EnumValueOptions.uninterpreted_option', index=1, + number=999, type=11, cpp_type=10, label=3, + has_default_value=False, default_value=[], + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + serialized_options=None, + is_extendable=True, + syntax='proto2', + extension_ranges=[(1000, 536870912), ], + oneofs=[ + ], + ) + + + _SERVICEOPTIONS = _descriptor.Descriptor( + name='ServiceOptions', + full_name='google.protobuf.ServiceOptions', + filename=None, + file=DESCRIPTOR, + containing_type=None, + create_key=_descriptor._internal_create_key, + fields=[ + _descriptor.FieldDescriptor( + name='deprecated', full_name='google.protobuf.ServiceOptions.deprecated', index=0, + number=33, type=8, cpp_type=7, label=1, + has_default_value=True, default_value=False, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + _descriptor.FieldDescriptor( + name='uninterpreted_option', full_name='google.protobuf.ServiceOptions.uninterpreted_option', index=1, + number=999, type=11, cpp_type=10, label=3, + has_default_value=False, default_value=[], + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + serialized_options=None, + is_extendable=True, + syntax='proto2', + extension_ranges=[(1000, 536870912), ], + oneofs=[ + ], + ) + + + _METHODOPTIONS = _descriptor.Descriptor( + name='MethodOptions', + full_name='google.protobuf.MethodOptions', + filename=None, + file=DESCRIPTOR, + containing_type=None, + create_key=_descriptor._internal_create_key, + fields=[ + _descriptor.FieldDescriptor( + name='deprecated', full_name='google.protobuf.MethodOptions.deprecated', index=0, + number=33, type=8, cpp_type=7, label=1, + has_default_value=True, default_value=False, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + _descriptor.FieldDescriptor( + name='idempotency_level', full_name='google.protobuf.MethodOptions.idempotency_level', index=1, + number=34, type=14, cpp_type=8, label=1, + has_default_value=True, default_value=0, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + _descriptor.FieldDescriptor( + name='uninterpreted_option', full_name='google.protobuf.MethodOptions.uninterpreted_option', index=2, + number=999, type=11, cpp_type=10, label=3, + has_default_value=False, default_value=[], + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + _METHODOPTIONS_IDEMPOTENCYLEVEL, + ], + serialized_options=None, + is_extendable=True, + syntax='proto2', + extension_ranges=[(1000, 536870912), ], + oneofs=[ + ], + ) + + + _UNINTERPRETEDOPTION_NAMEPART = _descriptor.Descriptor( + name='NamePart', + full_name='google.protobuf.UninterpretedOption.NamePart', + filename=None, + file=DESCRIPTOR, + containing_type=None, + create_key=_descriptor._internal_create_key, + fields=[ + _descriptor.FieldDescriptor( + name='name_part', full_name='google.protobuf.UninterpretedOption.NamePart.name_part', index=0, + number=1, type=9, cpp_type=9, label=2, + has_default_value=False, default_value=b"".decode('utf-8'), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + _descriptor.FieldDescriptor( + name='is_extension', full_name='google.protobuf.UninterpretedOption.NamePart.is_extension', index=1, + number=2, type=8, cpp_type=7, label=2, + has_default_value=False, default_value=False, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + serialized_options=None, + is_extendable=False, + syntax='proto2', + extension_ranges=[], + oneofs=[ + ], + ) + + _UNINTERPRETEDOPTION = _descriptor.Descriptor( + name='UninterpretedOption', + full_name='google.protobuf.UninterpretedOption', + filename=None, + file=DESCRIPTOR, + containing_type=None, + create_key=_descriptor._internal_create_key, + fields=[ + _descriptor.FieldDescriptor( + name='name', full_name='google.protobuf.UninterpretedOption.name', index=0, + number=2, type=11, cpp_type=10, label=3, + has_default_value=False, default_value=[], + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + _descriptor.FieldDescriptor( + name='identifier_value', full_name='google.protobuf.UninterpretedOption.identifier_value', index=1, + number=3, type=9, cpp_type=9, label=1, + has_default_value=False, default_value=b"".decode('utf-8'), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + _descriptor.FieldDescriptor( + name='positive_int_value', full_name='google.protobuf.UninterpretedOption.positive_int_value', index=2, + number=4, type=4, cpp_type=4, label=1, + has_default_value=False, default_value=0, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + _descriptor.FieldDescriptor( + name='negative_int_value', full_name='google.protobuf.UninterpretedOption.negative_int_value', index=3, + number=5, type=3, cpp_type=2, label=1, + has_default_value=False, default_value=0, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + _descriptor.FieldDescriptor( + name='double_value', full_name='google.protobuf.UninterpretedOption.double_value', index=4, + number=6, type=1, cpp_type=5, label=1, + has_default_value=False, default_value=float(0), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + _descriptor.FieldDescriptor( + name='string_value', full_name='google.protobuf.UninterpretedOption.string_value', index=5, + number=7, type=12, cpp_type=9, label=1, + has_default_value=False, default_value=b"", + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + _descriptor.FieldDescriptor( + name='aggregate_value', full_name='google.protobuf.UninterpretedOption.aggregate_value', index=6, + number=8, type=9, cpp_type=9, label=1, + has_default_value=False, default_value=b"".decode('utf-8'), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + ], + extensions=[ + ], + nested_types=[_UNINTERPRETEDOPTION_NAMEPART, ], + enum_types=[ + ], + serialized_options=None, + is_extendable=False, + syntax='proto2', + extension_ranges=[], + oneofs=[ + ], + ) + + + _SOURCECODEINFO_LOCATION = _descriptor.Descriptor( + name='Location', + full_name='google.protobuf.SourceCodeInfo.Location', + filename=None, + file=DESCRIPTOR, + containing_type=None, + create_key=_descriptor._internal_create_key, + fields=[ + _descriptor.FieldDescriptor( + name='path', full_name='google.protobuf.SourceCodeInfo.Location.path', index=0, + number=1, type=5, cpp_type=1, label=3, + has_default_value=False, default_value=[], + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + _descriptor.FieldDescriptor( + name='span', full_name='google.protobuf.SourceCodeInfo.Location.span', index=1, + number=2, type=5, cpp_type=1, label=3, + has_default_value=False, default_value=[], + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + _descriptor.FieldDescriptor( + name='leading_comments', full_name='google.protobuf.SourceCodeInfo.Location.leading_comments', index=2, + number=3, type=9, cpp_type=9, label=1, + has_default_value=False, default_value=b"".decode('utf-8'), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + _descriptor.FieldDescriptor( + name='trailing_comments', full_name='google.protobuf.SourceCodeInfo.Location.trailing_comments', index=3, + number=4, type=9, cpp_type=9, label=1, + has_default_value=False, default_value=b"".decode('utf-8'), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + _descriptor.FieldDescriptor( + name='leading_detached_comments', full_name='google.protobuf.SourceCodeInfo.Location.leading_detached_comments', index=4, + number=6, type=9, cpp_type=9, label=3, + has_default_value=False, default_value=[], + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + serialized_options=None, + is_extendable=False, + syntax='proto2', + extension_ranges=[], + oneofs=[ + ], + ) + + _SOURCECODEINFO = _descriptor.Descriptor( + name='SourceCodeInfo', + full_name='google.protobuf.SourceCodeInfo', + filename=None, + file=DESCRIPTOR, + containing_type=None, + create_key=_descriptor._internal_create_key, + fields=[ + _descriptor.FieldDescriptor( + name='location', full_name='google.protobuf.SourceCodeInfo.location', index=0, + number=1, type=11, cpp_type=10, label=3, + has_default_value=False, default_value=[], + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + ], + extensions=[ + ], + nested_types=[_SOURCECODEINFO_LOCATION, ], + enum_types=[ + ], + serialized_options=None, + is_extendable=False, + syntax='proto2', + extension_ranges=[], + oneofs=[ + ], + ) + + + _GENERATEDCODEINFO_ANNOTATION = _descriptor.Descriptor( + name='Annotation', + full_name='google.protobuf.GeneratedCodeInfo.Annotation', + filename=None, + file=DESCRIPTOR, + containing_type=None, + create_key=_descriptor._internal_create_key, + fields=[ + _descriptor.FieldDescriptor( + name='path', full_name='google.protobuf.GeneratedCodeInfo.Annotation.path', index=0, + number=1, type=5, cpp_type=1, label=3, + has_default_value=False, default_value=[], + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + _descriptor.FieldDescriptor( + name='source_file', full_name='google.protobuf.GeneratedCodeInfo.Annotation.source_file', index=1, + number=2, type=9, cpp_type=9, label=1, + has_default_value=False, default_value=b"".decode('utf-8'), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + _descriptor.FieldDescriptor( + name='begin', full_name='google.protobuf.GeneratedCodeInfo.Annotation.begin', index=2, + number=3, type=5, cpp_type=1, label=1, + has_default_value=False, default_value=0, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + _descriptor.FieldDescriptor( + name='end', full_name='google.protobuf.GeneratedCodeInfo.Annotation.end', index=3, + number=4, type=5, cpp_type=1, label=1, + has_default_value=False, default_value=0, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + serialized_options=None, + is_extendable=False, + syntax='proto2', + extension_ranges=[], + oneofs=[ + ], + ) + + _GENERATEDCODEINFO = _descriptor.Descriptor( + name='GeneratedCodeInfo', + full_name='google.protobuf.GeneratedCodeInfo', + filename=None, + file=DESCRIPTOR, + containing_type=None, + create_key=_descriptor._internal_create_key, + fields=[ + _descriptor.FieldDescriptor( + name='annotation', full_name='google.protobuf.GeneratedCodeInfo.annotation', index=0, + number=1, type=11, cpp_type=10, label=3, + has_default_value=False, default_value=[], + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + ], + extensions=[ + ], + nested_types=[_GENERATEDCODEINFO_ANNOTATION, ], + enum_types=[ + ], + serialized_options=None, + is_extendable=False, + syntax='proto2', + extension_ranges=[], + oneofs=[ + ], + ) + + _FILEDESCRIPTORSET.fields_by_name['file'].message_type = _FILEDESCRIPTORPROTO + _FILEDESCRIPTORPROTO.fields_by_name['message_type'].message_type = _DESCRIPTORPROTO + _FILEDESCRIPTORPROTO.fields_by_name['enum_type'].message_type = _ENUMDESCRIPTORPROTO + _FILEDESCRIPTORPROTO.fields_by_name['service'].message_type = _SERVICEDESCRIPTORPROTO + _FILEDESCRIPTORPROTO.fields_by_name['extension'].message_type = _FIELDDESCRIPTORPROTO + _FILEDESCRIPTORPROTO.fields_by_name['options'].message_type = _FILEOPTIONS + _FILEDESCRIPTORPROTO.fields_by_name['source_code_info'].message_type = _SOURCECODEINFO + _DESCRIPTORPROTO_EXTENSIONRANGE.fields_by_name['options'].message_type = _EXTENSIONRANGEOPTIONS + _DESCRIPTORPROTO_EXTENSIONRANGE.containing_type = _DESCRIPTORPROTO + _DESCRIPTORPROTO_RESERVEDRANGE.containing_type = _DESCRIPTORPROTO + _DESCRIPTORPROTO.fields_by_name['field'].message_type = _FIELDDESCRIPTORPROTO + _DESCRIPTORPROTO.fields_by_name['extension'].message_type = _FIELDDESCRIPTORPROTO + _DESCRIPTORPROTO.fields_by_name['nested_type'].message_type = _DESCRIPTORPROTO + _DESCRIPTORPROTO.fields_by_name['enum_type'].message_type = _ENUMDESCRIPTORPROTO + _DESCRIPTORPROTO.fields_by_name['extension_range'].message_type = _DESCRIPTORPROTO_EXTENSIONRANGE + _DESCRIPTORPROTO.fields_by_name['oneof_decl'].message_type = _ONEOFDESCRIPTORPROTO + _DESCRIPTORPROTO.fields_by_name['options'].message_type = _MESSAGEOPTIONS + _DESCRIPTORPROTO.fields_by_name['reserved_range'].message_type = _DESCRIPTORPROTO_RESERVEDRANGE + _EXTENSIONRANGEOPTIONS.fields_by_name['uninterpreted_option'].message_type = _UNINTERPRETEDOPTION + _FIELDDESCRIPTORPROTO.fields_by_name['label'].enum_type = _FIELDDESCRIPTORPROTO_LABEL + _FIELDDESCRIPTORPROTO.fields_by_name['type'].enum_type = _FIELDDESCRIPTORPROTO_TYPE + _FIELDDESCRIPTORPROTO.fields_by_name['options'].message_type = _FIELDOPTIONS + _FIELDDESCRIPTORPROTO_TYPE.containing_type = _FIELDDESCRIPTORPROTO + _FIELDDESCRIPTORPROTO_LABEL.containing_type = _FIELDDESCRIPTORPROTO + _ONEOFDESCRIPTORPROTO.fields_by_name['options'].message_type = _ONEOFOPTIONS + _ENUMDESCRIPTORPROTO_ENUMRESERVEDRANGE.containing_type = _ENUMDESCRIPTORPROTO + _ENUMDESCRIPTORPROTO.fields_by_name['value'].message_type = _ENUMVALUEDESCRIPTORPROTO + _ENUMDESCRIPTORPROTO.fields_by_name['options'].message_type = _ENUMOPTIONS + _ENUMDESCRIPTORPROTO.fields_by_name['reserved_range'].message_type = _ENUMDESCRIPTORPROTO_ENUMRESERVEDRANGE + _ENUMVALUEDESCRIPTORPROTO.fields_by_name['options'].message_type = _ENUMVALUEOPTIONS + _SERVICEDESCRIPTORPROTO.fields_by_name['method'].message_type = _METHODDESCRIPTORPROTO + _SERVICEDESCRIPTORPROTO.fields_by_name['options'].message_type = _SERVICEOPTIONS + _METHODDESCRIPTORPROTO.fields_by_name['options'].message_type = _METHODOPTIONS + _FILEOPTIONS.fields_by_name['optimize_for'].enum_type = _FILEOPTIONS_OPTIMIZEMODE + _FILEOPTIONS.fields_by_name['uninterpreted_option'].message_type = _UNINTERPRETEDOPTION + _FILEOPTIONS_OPTIMIZEMODE.containing_type = _FILEOPTIONS + _MESSAGEOPTIONS.fields_by_name['uninterpreted_option'].message_type = _UNINTERPRETEDOPTION + _FIELDOPTIONS.fields_by_name['ctype'].enum_type = _FIELDOPTIONS_CTYPE + _FIELDOPTIONS.fields_by_name['jstype'].enum_type = _FIELDOPTIONS_JSTYPE + _FIELDOPTIONS.fields_by_name['uninterpreted_option'].message_type = _UNINTERPRETEDOPTION + _FIELDOPTIONS_CTYPE.containing_type = _FIELDOPTIONS + _FIELDOPTIONS_JSTYPE.containing_type = _FIELDOPTIONS + _ONEOFOPTIONS.fields_by_name['uninterpreted_option'].message_type = _UNINTERPRETEDOPTION + _ENUMOPTIONS.fields_by_name['uninterpreted_option'].message_type = _UNINTERPRETEDOPTION + _ENUMVALUEOPTIONS.fields_by_name['uninterpreted_option'].message_type = _UNINTERPRETEDOPTION + _SERVICEOPTIONS.fields_by_name['uninterpreted_option'].message_type = _UNINTERPRETEDOPTION + _METHODOPTIONS.fields_by_name['idempotency_level'].enum_type = _METHODOPTIONS_IDEMPOTENCYLEVEL + _METHODOPTIONS.fields_by_name['uninterpreted_option'].message_type = _UNINTERPRETEDOPTION + _METHODOPTIONS_IDEMPOTENCYLEVEL.containing_type = _METHODOPTIONS + _UNINTERPRETEDOPTION_NAMEPART.containing_type = _UNINTERPRETEDOPTION + _UNINTERPRETEDOPTION.fields_by_name['name'].message_type = _UNINTERPRETEDOPTION_NAMEPART + _SOURCECODEINFO_LOCATION.containing_type = _SOURCECODEINFO + _SOURCECODEINFO.fields_by_name['location'].message_type = _SOURCECODEINFO_LOCATION + _GENERATEDCODEINFO_ANNOTATION.containing_type = _GENERATEDCODEINFO + _GENERATEDCODEINFO.fields_by_name['annotation'].message_type = _GENERATEDCODEINFO_ANNOTATION + DESCRIPTOR.message_types_by_name['FileDescriptorSet'] = _FILEDESCRIPTORSET + DESCRIPTOR.message_types_by_name['FileDescriptorProto'] = _FILEDESCRIPTORPROTO + DESCRIPTOR.message_types_by_name['DescriptorProto'] = _DESCRIPTORPROTO + DESCRIPTOR.message_types_by_name['ExtensionRangeOptions'] = _EXTENSIONRANGEOPTIONS + DESCRIPTOR.message_types_by_name['FieldDescriptorProto'] = _FIELDDESCRIPTORPROTO + DESCRIPTOR.message_types_by_name['OneofDescriptorProto'] = _ONEOFDESCRIPTORPROTO + DESCRIPTOR.message_types_by_name['EnumDescriptorProto'] = _ENUMDESCRIPTORPROTO + DESCRIPTOR.message_types_by_name['EnumValueDescriptorProto'] = _ENUMVALUEDESCRIPTORPROTO + DESCRIPTOR.message_types_by_name['ServiceDescriptorProto'] = _SERVICEDESCRIPTORPROTO + DESCRIPTOR.message_types_by_name['MethodDescriptorProto'] = _METHODDESCRIPTORPROTO + DESCRIPTOR.message_types_by_name['FileOptions'] = _FILEOPTIONS + DESCRIPTOR.message_types_by_name['MessageOptions'] = _MESSAGEOPTIONS + DESCRIPTOR.message_types_by_name['FieldOptions'] = _FIELDOPTIONS + DESCRIPTOR.message_types_by_name['OneofOptions'] = _ONEOFOPTIONS + DESCRIPTOR.message_types_by_name['EnumOptions'] = _ENUMOPTIONS + DESCRIPTOR.message_types_by_name['EnumValueOptions'] = _ENUMVALUEOPTIONS + DESCRIPTOR.message_types_by_name['ServiceOptions'] = _SERVICEOPTIONS + DESCRIPTOR.message_types_by_name['MethodOptions'] = _METHODOPTIONS + DESCRIPTOR.message_types_by_name['UninterpretedOption'] = _UNINTERPRETEDOPTION + DESCRIPTOR.message_types_by_name['SourceCodeInfo'] = _SOURCECODEINFO + DESCRIPTOR.message_types_by_name['GeneratedCodeInfo'] = _GENERATEDCODEINFO + _sym_db.RegisterFileDescriptor(DESCRIPTOR) + +else: + _builder.BuildMessageAndEnumDescriptors(DESCRIPTOR, globals()) +_builder.BuildTopDescriptorsAndMessages(DESCRIPTOR, 'google.protobuf.descriptor_pb2', globals()) +if _descriptor._USE_C_DESCRIPTORS == False: + + DESCRIPTOR._options = None + _FILEDESCRIPTORSET._serialized_start=53 + _FILEDESCRIPTORSET._serialized_end=124 + _FILEDESCRIPTORPROTO._serialized_start=127 + _FILEDESCRIPTORPROTO._serialized_end=602 + _DESCRIPTORPROTO._serialized_start=605 + _DESCRIPTORPROTO._serialized_end=1286 + _DESCRIPTORPROTO_EXTENSIONRANGE._serialized_start=1140 + _DESCRIPTORPROTO_EXTENSIONRANGE._serialized_end=1241 + _DESCRIPTORPROTO_RESERVEDRANGE._serialized_start=1243 + _DESCRIPTORPROTO_RESERVEDRANGE._serialized_end=1286 + _EXTENSIONRANGEOPTIONS._serialized_start=1288 + _EXTENSIONRANGEOPTIONS._serialized_end=1391 + _FIELDDESCRIPTORPROTO._serialized_start=1394 + _FIELDDESCRIPTORPROTO._serialized_end=2119 + _FIELDDESCRIPTORPROTO_TYPE._serialized_start=1740 + _FIELDDESCRIPTORPROTO_TYPE._serialized_end=2050 + _FIELDDESCRIPTORPROTO_LABEL._serialized_start=2052 + _FIELDDESCRIPTORPROTO_LABEL._serialized_end=2119 + _ONEOFDESCRIPTORPROTO._serialized_start=2121 + _ONEOFDESCRIPTORPROTO._serialized_end=2205 + _ENUMDESCRIPTORPROTO._serialized_start=2208 + _ENUMDESCRIPTORPROTO._serialized_end=2500 + _ENUMDESCRIPTORPROTO_ENUMRESERVEDRANGE._serialized_start=2453 + _ENUMDESCRIPTORPROTO_ENUMRESERVEDRANGE._serialized_end=2500 + _ENUMVALUEDESCRIPTORPROTO._serialized_start=2502 + _ENUMVALUEDESCRIPTORPROTO._serialized_end=2610 + _SERVICEDESCRIPTORPROTO._serialized_start=2613 + _SERVICEDESCRIPTORPROTO._serialized_end=2757 + _METHODDESCRIPTORPROTO._serialized_start=2760 + _METHODDESCRIPTORPROTO._serialized_end=2953 + _FILEOPTIONS._serialized_start=2956 + _FILEOPTIONS._serialized_end=3761 + _FILEOPTIONS_OPTIMIZEMODE._serialized_start=3686 + _FILEOPTIONS_OPTIMIZEMODE._serialized_end=3744 + _MESSAGEOPTIONS._serialized_start=3764 + _MESSAGEOPTIONS._serialized_end=4024 + _FIELDOPTIONS._serialized_start=4027 + _FIELDOPTIONS._serialized_end=4473 + _FIELDOPTIONS_CTYPE._serialized_start=4354 + _FIELDOPTIONS_CTYPE._serialized_end=4401 + _FIELDOPTIONS_JSTYPE._serialized_start=4403 + _FIELDOPTIONS_JSTYPE._serialized_end=4456 + _ONEOFOPTIONS._serialized_start=4475 + _ONEOFOPTIONS._serialized_end=4569 + _ENUMOPTIONS._serialized_start=4572 + _ENUMOPTIONS._serialized_end=4719 + _ENUMVALUEOPTIONS._serialized_start=4721 + _ENUMVALUEOPTIONS._serialized_end=4846 + _SERVICEOPTIONS._serialized_start=4848 + _SERVICEOPTIONS._serialized_end=4971 + _METHODOPTIONS._serialized_start=4974 + _METHODOPTIONS._serialized_end=5275 + _METHODOPTIONS_IDEMPOTENCYLEVEL._serialized_start=5184 + _METHODOPTIONS_IDEMPOTENCYLEVEL._serialized_end=5264 + _UNINTERPRETEDOPTION._serialized_start=5278 + _UNINTERPRETEDOPTION._serialized_end=5564 + _UNINTERPRETEDOPTION_NAMEPART._serialized_start=5513 + _UNINTERPRETEDOPTION_NAMEPART._serialized_end=5564 + _SOURCECODEINFO._serialized_start=5567 + _SOURCECODEINFO._serialized_end=5780 + _SOURCECODEINFO_LOCATION._serialized_start=5646 + _SOURCECODEINFO_LOCATION._serialized_end=5780 + _GENERATEDCODEINFO._serialized_start=5783 + _GENERATEDCODEINFO._serialized_end=5950 + _GENERATEDCODEINFO_ANNOTATION._serialized_start=5871 + _GENERATEDCODEINFO_ANNOTATION._serialized_end=5950 +# @@protoc_insertion_point(module_scope) diff --git a/mysql-sqlalchemy-workspace/lib/python3.9/site-packages/google/protobuf/descriptor_pool.py b/mysql-sqlalchemy-workspace/lib/python3.9/site-packages/google/protobuf/descriptor_pool.py new file mode 100644 index 00000000..911372a8 --- /dev/null +++ b/mysql-sqlalchemy-workspace/lib/python3.9/site-packages/google/protobuf/descriptor_pool.py @@ -0,0 +1,1295 @@ +# Protocol Buffers - Google's data interchange format +# Copyright 2008 Google Inc. All rights reserved. +# https://developers.google.com/protocol-buffers/ +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are +# met: +# +# * Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above +# copyright notice, this list of conditions and the following disclaimer +# in the documentation and/or other materials provided with the +# distribution. +# * Neither the name of Google Inc. nor the names of its +# contributors may be used to endorse or promote products derived from +# this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +"""Provides DescriptorPool to use as a container for proto2 descriptors. + +The DescriptorPool is used in conjection with a DescriptorDatabase to maintain +a collection of protocol buffer descriptors for use when dynamically creating +message types at runtime. + +For most applications protocol buffers should be used via modules generated by +the protocol buffer compiler tool. This should only be used when the type of +protocol buffers used in an application or library cannot be predetermined. + +Below is a straightforward example on how to use this class:: + + pool = DescriptorPool() + file_descriptor_protos = [ ... ] + for file_descriptor_proto in file_descriptor_protos: + pool.Add(file_descriptor_proto) + my_message_descriptor = pool.FindMessageTypeByName('some.package.MessageType') + +The message descriptor can be used in conjunction with the message_factory +module in order to create a protocol buffer class that can be encoded and +decoded. + +If you want to get a Python class for the specified proto, use the +helper functions inside google.protobuf.message_factory +directly instead of this class. +""" + +__author__ = 'matthewtoia@google.com (Matt Toia)' + +import collections +import warnings + +from google.protobuf import descriptor +from google.protobuf import descriptor_database +from google.protobuf import text_encoding + + +_USE_C_DESCRIPTORS = descriptor._USE_C_DESCRIPTORS # pylint: disable=protected-access + + +def _Deprecated(func): + """Mark functions as deprecated.""" + + def NewFunc(*args, **kwargs): + warnings.warn( + 'Call to deprecated function %s(). Note: Do add unlinked descriptors ' + 'to descriptor_pool is wrong. Use Add() or AddSerializedFile() ' + 'instead.' % func.__name__, + category=DeprecationWarning) + return func(*args, **kwargs) + NewFunc.__name__ = func.__name__ + NewFunc.__doc__ = func.__doc__ + NewFunc.__dict__.update(func.__dict__) + return NewFunc + + +def _NormalizeFullyQualifiedName(name): + """Remove leading period from fully-qualified type name. + + Due to b/13860351 in descriptor_database.py, types in the root namespace are + generated with a leading period. This function removes that prefix. + + Args: + name (str): The fully-qualified symbol name. + + Returns: + str: The normalized fully-qualified symbol name. + """ + return name.lstrip('.') + + +def _OptionsOrNone(descriptor_proto): + """Returns the value of the field `options`, or None if it is not set.""" + if descriptor_proto.HasField('options'): + return descriptor_proto.options + else: + return None + + +def _IsMessageSetExtension(field): + return (field.is_extension and + field.containing_type.has_options and + field.containing_type.GetOptions().message_set_wire_format and + field.type == descriptor.FieldDescriptor.TYPE_MESSAGE and + field.label == descriptor.FieldDescriptor.LABEL_OPTIONAL) + + +class DescriptorPool(object): + """A collection of protobufs dynamically constructed by descriptor protos.""" + + if _USE_C_DESCRIPTORS: + + def __new__(cls, descriptor_db=None): + # pylint: disable=protected-access + return descriptor._message.DescriptorPool(descriptor_db) + + def __init__(self, descriptor_db=None): + """Initializes a Pool of proto buffs. + + The descriptor_db argument to the constructor is provided to allow + specialized file descriptor proto lookup code to be triggered on demand. An + example would be an implementation which will read and compile a file + specified in a call to FindFileByName() and not require the call to Add() + at all. Results from this database will be cached internally here as well. + + Args: + descriptor_db: A secondary source of file descriptors. + """ + + self._internal_db = descriptor_database.DescriptorDatabase() + self._descriptor_db = descriptor_db + self._descriptors = {} + self._enum_descriptors = {} + self._service_descriptors = {} + self._file_descriptors = {} + self._toplevel_extensions = {} + # TODO(jieluo): Remove _file_desc_by_toplevel_extension after + # maybe year 2020 for compatibility issue (with 3.4.1 only). + self._file_desc_by_toplevel_extension = {} + self._top_enum_values = {} + # We store extensions in two two-level mappings: The first key is the + # descriptor of the message being extended, the second key is the extension + # full name or its tag number. + self._extensions_by_name = collections.defaultdict(dict) + self._extensions_by_number = collections.defaultdict(dict) + + def _CheckConflictRegister(self, desc, desc_name, file_name): + """Check if the descriptor name conflicts with another of the same name. + + Args: + desc: Descriptor of a message, enum, service, extension or enum value. + desc_name (str): the full name of desc. + file_name (str): The file name of descriptor. + """ + for register, descriptor_type in [ + (self._descriptors, descriptor.Descriptor), + (self._enum_descriptors, descriptor.EnumDescriptor), + (self._service_descriptors, descriptor.ServiceDescriptor), + (self._toplevel_extensions, descriptor.FieldDescriptor), + (self._top_enum_values, descriptor.EnumValueDescriptor)]: + if desc_name in register: + old_desc = register[desc_name] + if isinstance(old_desc, descriptor.EnumValueDescriptor): + old_file = old_desc.type.file.name + else: + old_file = old_desc.file.name + + if not isinstance(desc, descriptor_type) or ( + old_file != file_name): + error_msg = ('Conflict register for file "' + file_name + + '": ' + desc_name + + ' is already defined in file "' + + old_file + '". Please fix the conflict by adding ' + 'package name on the proto file, or use different ' + 'name for the duplication.') + if isinstance(desc, descriptor.EnumValueDescriptor): + error_msg += ('\nNote: enum values appear as ' + 'siblings of the enum type instead of ' + 'children of it.') + + raise TypeError(error_msg) + + return + + def Add(self, file_desc_proto): + """Adds the FileDescriptorProto and its types to this pool. + + Args: + file_desc_proto (FileDescriptorProto): The file descriptor to add. + """ + + self._internal_db.Add(file_desc_proto) + + def AddSerializedFile(self, serialized_file_desc_proto): + """Adds the FileDescriptorProto and its types to this pool. + + Args: + serialized_file_desc_proto (bytes): A bytes string, serialization of the + :class:`FileDescriptorProto` to add. + + Returns: + FileDescriptor: Descriptor for the added file. + """ + + # pylint: disable=g-import-not-at-top + from google.protobuf import descriptor_pb2 + file_desc_proto = descriptor_pb2.FileDescriptorProto.FromString( + serialized_file_desc_proto) + file_desc = self._ConvertFileProtoToFileDescriptor(file_desc_proto) + file_desc.serialized_pb = serialized_file_desc_proto + return file_desc + + # Add Descriptor to descriptor pool is dreprecated. Please use Add() + # or AddSerializedFile() to add a FileDescriptorProto instead. + @_Deprecated + def AddDescriptor(self, desc): + self._AddDescriptor(desc) + + # Never call this method. It is for internal usage only. + def _AddDescriptor(self, desc): + """Adds a Descriptor to the pool, non-recursively. + + If the Descriptor contains nested messages or enums, the caller must + explicitly register them. This method also registers the FileDescriptor + associated with the message. + + Args: + desc: A Descriptor. + """ + if not isinstance(desc, descriptor.Descriptor): + raise TypeError('Expected instance of descriptor.Descriptor.') + + self._CheckConflictRegister(desc, desc.full_name, desc.file.name) + + self._descriptors[desc.full_name] = desc + self._AddFileDescriptor(desc.file) + + # Add EnumDescriptor to descriptor pool is dreprecated. Please use Add() + # or AddSerializedFile() to add a FileDescriptorProto instead. + @_Deprecated + def AddEnumDescriptor(self, enum_desc): + self._AddEnumDescriptor(enum_desc) + + # Never call this method. It is for internal usage only. + def _AddEnumDescriptor(self, enum_desc): + """Adds an EnumDescriptor to the pool. + + This method also registers the FileDescriptor associated with the enum. + + Args: + enum_desc: An EnumDescriptor. + """ + + if not isinstance(enum_desc, descriptor.EnumDescriptor): + raise TypeError('Expected instance of descriptor.EnumDescriptor.') + + file_name = enum_desc.file.name + self._CheckConflictRegister(enum_desc, enum_desc.full_name, file_name) + self._enum_descriptors[enum_desc.full_name] = enum_desc + + # Top enum values need to be indexed. + # Count the number of dots to see whether the enum is toplevel or nested + # in a message. We cannot use enum_desc.containing_type at this stage. + if enum_desc.file.package: + top_level = (enum_desc.full_name.count('.') + - enum_desc.file.package.count('.') == 1) + else: + top_level = enum_desc.full_name.count('.') == 0 + if top_level: + file_name = enum_desc.file.name + package = enum_desc.file.package + for enum_value in enum_desc.values: + full_name = _NormalizeFullyQualifiedName( + '.'.join((package, enum_value.name))) + self._CheckConflictRegister(enum_value, full_name, file_name) + self._top_enum_values[full_name] = enum_value + self._AddFileDescriptor(enum_desc.file) + + # Add ServiceDescriptor to descriptor pool is dreprecated. Please use Add() + # or AddSerializedFile() to add a FileDescriptorProto instead. + @_Deprecated + def AddServiceDescriptor(self, service_desc): + self._AddServiceDescriptor(service_desc) + + # Never call this method. It is for internal usage only. + def _AddServiceDescriptor(self, service_desc): + """Adds a ServiceDescriptor to the pool. + + Args: + service_desc: A ServiceDescriptor. + """ + + if not isinstance(service_desc, descriptor.ServiceDescriptor): + raise TypeError('Expected instance of descriptor.ServiceDescriptor.') + + self._CheckConflictRegister(service_desc, service_desc.full_name, + service_desc.file.name) + self._service_descriptors[service_desc.full_name] = service_desc + + # Add ExtensionDescriptor to descriptor pool is dreprecated. Please use Add() + # or AddSerializedFile() to add a FileDescriptorProto instead. + @_Deprecated + def AddExtensionDescriptor(self, extension): + self._AddExtensionDescriptor(extension) + + # Never call this method. It is for internal usage only. + def _AddExtensionDescriptor(self, extension): + """Adds a FieldDescriptor describing an extension to the pool. + + Args: + extension: A FieldDescriptor. + + Raises: + AssertionError: when another extension with the same number extends the + same message. + TypeError: when the specified extension is not a + descriptor.FieldDescriptor. + """ + if not (isinstance(extension, descriptor.FieldDescriptor) and + extension.is_extension): + raise TypeError('Expected an extension descriptor.') + + if extension.extension_scope is None: + self._toplevel_extensions[extension.full_name] = extension + + try: + existing_desc = self._extensions_by_number[ + extension.containing_type][extension.number] + except KeyError: + pass + else: + if extension is not existing_desc: + raise AssertionError( + 'Extensions "%s" and "%s" both try to extend message type "%s" ' + 'with field number %d.' % + (extension.full_name, existing_desc.full_name, + extension.containing_type.full_name, extension.number)) + + self._extensions_by_number[extension.containing_type][ + extension.number] = extension + self._extensions_by_name[extension.containing_type][ + extension.full_name] = extension + + # Also register MessageSet extensions with the type name. + if _IsMessageSetExtension(extension): + self._extensions_by_name[extension.containing_type][ + extension.message_type.full_name] = extension + + @_Deprecated + def AddFileDescriptor(self, file_desc): + self._InternalAddFileDescriptor(file_desc) + + # Never call this method. It is for internal usage only. + def _InternalAddFileDescriptor(self, file_desc): + """Adds a FileDescriptor to the pool, non-recursively. + + If the FileDescriptor contains messages or enums, the caller must explicitly + register them. + + Args: + file_desc: A FileDescriptor. + """ + + self._AddFileDescriptor(file_desc) + # TODO(jieluo): This is a temporary solution for FieldDescriptor.file. + # FieldDescriptor.file is added in code gen. Remove this solution after + # maybe 2020 for compatibility reason (with 3.4.1 only). + for extension in file_desc.extensions_by_name.values(): + self._file_desc_by_toplevel_extension[ + extension.full_name] = file_desc + + def _AddFileDescriptor(self, file_desc): + """Adds a FileDescriptor to the pool, non-recursively. + + If the FileDescriptor contains messages or enums, the caller must explicitly + register them. + + Args: + file_desc: A FileDescriptor. + """ + + if not isinstance(file_desc, descriptor.FileDescriptor): + raise TypeError('Expected instance of descriptor.FileDescriptor.') + self._file_descriptors[file_desc.name] = file_desc + + def FindFileByName(self, file_name): + """Gets a FileDescriptor by file name. + + Args: + file_name (str): The path to the file to get a descriptor for. + + Returns: + FileDescriptor: The descriptor for the named file. + + Raises: + KeyError: if the file cannot be found in the pool. + """ + + try: + return self._file_descriptors[file_name] + except KeyError: + pass + + try: + file_proto = self._internal_db.FindFileByName(file_name) + except KeyError as error: + if self._descriptor_db: + file_proto = self._descriptor_db.FindFileByName(file_name) + else: + raise error + if not file_proto: + raise KeyError('Cannot find a file named %s' % file_name) + return self._ConvertFileProtoToFileDescriptor(file_proto) + + def FindFileContainingSymbol(self, symbol): + """Gets the FileDescriptor for the file containing the specified symbol. + + Args: + symbol (str): The name of the symbol to search for. + + Returns: + FileDescriptor: Descriptor for the file that contains the specified + symbol. + + Raises: + KeyError: if the file cannot be found in the pool. + """ + + symbol = _NormalizeFullyQualifiedName(symbol) + try: + return self._InternalFindFileContainingSymbol(symbol) + except KeyError: + pass + + try: + # Try fallback database. Build and find again if possible. + self._FindFileContainingSymbolInDb(symbol) + return self._InternalFindFileContainingSymbol(symbol) + except KeyError: + raise KeyError('Cannot find a file containing %s' % symbol) + + def _InternalFindFileContainingSymbol(self, symbol): + """Gets the already built FileDescriptor containing the specified symbol. + + Args: + symbol (str): The name of the symbol to search for. + + Returns: + FileDescriptor: Descriptor for the file that contains the specified + symbol. + + Raises: + KeyError: if the file cannot be found in the pool. + """ + try: + return self._descriptors[symbol].file + except KeyError: + pass + + try: + return self._enum_descriptors[symbol].file + except KeyError: + pass + + try: + return self._service_descriptors[symbol].file + except KeyError: + pass + + try: + return self._top_enum_values[symbol].type.file + except KeyError: + pass + + try: + return self._file_desc_by_toplevel_extension[symbol] + except KeyError: + pass + + # Try fields, enum values and nested extensions inside a message. + top_name, _, sub_name = symbol.rpartition('.') + try: + message = self.FindMessageTypeByName(top_name) + assert (sub_name in message.extensions_by_name or + sub_name in message.fields_by_name or + sub_name in message.enum_values_by_name) + return message.file + except (KeyError, AssertionError): + raise KeyError('Cannot find a file containing %s' % symbol) + + def FindMessageTypeByName(self, full_name): + """Loads the named descriptor from the pool. + + Args: + full_name (str): The full name of the descriptor to load. + + Returns: + Descriptor: The descriptor for the named type. + + Raises: + KeyError: if the message cannot be found in the pool. + """ + + full_name = _NormalizeFullyQualifiedName(full_name) + if full_name not in self._descriptors: + self._FindFileContainingSymbolInDb(full_name) + return self._descriptors[full_name] + + def FindEnumTypeByName(self, full_name): + """Loads the named enum descriptor from the pool. + + Args: + full_name (str): The full name of the enum descriptor to load. + + Returns: + EnumDescriptor: The enum descriptor for the named type. + + Raises: + KeyError: if the enum cannot be found in the pool. + """ + + full_name = _NormalizeFullyQualifiedName(full_name) + if full_name not in self._enum_descriptors: + self._FindFileContainingSymbolInDb(full_name) + return self._enum_descriptors[full_name] + + def FindFieldByName(self, full_name): + """Loads the named field descriptor from the pool. + + Args: + full_name (str): The full name of the field descriptor to load. + + Returns: + FieldDescriptor: The field descriptor for the named field. + + Raises: + KeyError: if the field cannot be found in the pool. + """ + full_name = _NormalizeFullyQualifiedName(full_name) + message_name, _, field_name = full_name.rpartition('.') + message_descriptor = self.FindMessageTypeByName(message_name) + return message_descriptor.fields_by_name[field_name] + + def FindOneofByName(self, full_name): + """Loads the named oneof descriptor from the pool. + + Args: + full_name (str): The full name of the oneof descriptor to load. + + Returns: + OneofDescriptor: The oneof descriptor for the named oneof. + + Raises: + KeyError: if the oneof cannot be found in the pool. + """ + full_name = _NormalizeFullyQualifiedName(full_name) + message_name, _, oneof_name = full_name.rpartition('.') + message_descriptor = self.FindMessageTypeByName(message_name) + return message_descriptor.oneofs_by_name[oneof_name] + + def FindExtensionByName(self, full_name): + """Loads the named extension descriptor from the pool. + + Args: + full_name (str): The full name of the extension descriptor to load. + + Returns: + FieldDescriptor: The field descriptor for the named extension. + + Raises: + KeyError: if the extension cannot be found in the pool. + """ + full_name = _NormalizeFullyQualifiedName(full_name) + try: + # The proto compiler does not give any link between the FileDescriptor + # and top-level extensions unless the FileDescriptorProto is added to + # the DescriptorDatabase, but this can impact memory usage. + # So we registered these extensions by name explicitly. + return self._toplevel_extensions[full_name] + except KeyError: + pass + message_name, _, extension_name = full_name.rpartition('.') + try: + # Most extensions are nested inside a message. + scope = self.FindMessageTypeByName(message_name) + except KeyError: + # Some extensions are defined at file scope. + scope = self._FindFileContainingSymbolInDb(full_name) + return scope.extensions_by_name[extension_name] + + def FindExtensionByNumber(self, message_descriptor, number): + """Gets the extension of the specified message with the specified number. + + Extensions have to be registered to this pool by calling :func:`Add` or + :func:`AddExtensionDescriptor`. + + Args: + message_descriptor (Descriptor): descriptor of the extended message. + number (int): Number of the extension field. + + Returns: + FieldDescriptor: The descriptor for the extension. + + Raises: + KeyError: when no extension with the given number is known for the + specified message. + """ + try: + return self._extensions_by_number[message_descriptor][number] + except KeyError: + self._TryLoadExtensionFromDB(message_descriptor, number) + return self._extensions_by_number[message_descriptor][number] + + def FindAllExtensions(self, message_descriptor): + """Gets all the known extensions of a given message. + + Extensions have to be registered to this pool by build related + :func:`Add` or :func:`AddExtensionDescriptor`. + + Args: + message_descriptor (Descriptor): Descriptor of the extended message. + + Returns: + list[FieldDescriptor]: Field descriptors describing the extensions. + """ + # Fallback to descriptor db if FindAllExtensionNumbers is provided. + if self._descriptor_db and hasattr( + self._descriptor_db, 'FindAllExtensionNumbers'): + full_name = message_descriptor.full_name + all_numbers = self._descriptor_db.FindAllExtensionNumbers(full_name) + for number in all_numbers: + if number in self._extensions_by_number[message_descriptor]: + continue + self._TryLoadExtensionFromDB(message_descriptor, number) + + return list(self._extensions_by_number[message_descriptor].values()) + + def _TryLoadExtensionFromDB(self, message_descriptor, number): + """Try to Load extensions from descriptor db. + + Args: + message_descriptor: descriptor of the extended message. + number: the extension number that needs to be loaded. + """ + if not self._descriptor_db: + return + # Only supported when FindFileContainingExtension is provided. + if not hasattr( + self._descriptor_db, 'FindFileContainingExtension'): + return + + full_name = message_descriptor.full_name + file_proto = self._descriptor_db.FindFileContainingExtension( + full_name, number) + + if file_proto is None: + return + + try: + self._ConvertFileProtoToFileDescriptor(file_proto) + except: + warn_msg = ('Unable to load proto file %s for extension number %d.' % + (file_proto.name, number)) + warnings.warn(warn_msg, RuntimeWarning) + + def FindServiceByName(self, full_name): + """Loads the named service descriptor from the pool. + + Args: + full_name (str): The full name of the service descriptor to load. + + Returns: + ServiceDescriptor: The service descriptor for the named service. + + Raises: + KeyError: if the service cannot be found in the pool. + """ + full_name = _NormalizeFullyQualifiedName(full_name) + if full_name not in self._service_descriptors: + self._FindFileContainingSymbolInDb(full_name) + return self._service_descriptors[full_name] + + def FindMethodByName(self, full_name): + """Loads the named service method descriptor from the pool. + + Args: + full_name (str): The full name of the method descriptor to load. + + Returns: + MethodDescriptor: The method descriptor for the service method. + + Raises: + KeyError: if the method cannot be found in the pool. + """ + full_name = _NormalizeFullyQualifiedName(full_name) + service_name, _, method_name = full_name.rpartition('.') + service_descriptor = self.FindServiceByName(service_name) + return service_descriptor.methods_by_name[method_name] + + def _FindFileContainingSymbolInDb(self, symbol): + """Finds the file in descriptor DB containing the specified symbol. + + Args: + symbol (str): The name of the symbol to search for. + + Returns: + FileDescriptor: The file that contains the specified symbol. + + Raises: + KeyError: if the file cannot be found in the descriptor database. + """ + try: + file_proto = self._internal_db.FindFileContainingSymbol(symbol) + except KeyError as error: + if self._descriptor_db: + file_proto = self._descriptor_db.FindFileContainingSymbol(symbol) + else: + raise error + if not file_proto: + raise KeyError('Cannot find a file containing %s' % symbol) + return self._ConvertFileProtoToFileDescriptor(file_proto) + + def _ConvertFileProtoToFileDescriptor(self, file_proto): + """Creates a FileDescriptor from a proto or returns a cached copy. + + This method also has the side effect of loading all the symbols found in + the file into the appropriate dictionaries in the pool. + + Args: + file_proto: The proto to convert. + + Returns: + A FileDescriptor matching the passed in proto. + """ + if file_proto.name not in self._file_descriptors: + built_deps = list(self._GetDeps(file_proto.dependency)) + direct_deps = [self.FindFileByName(n) for n in file_proto.dependency] + public_deps = [direct_deps[i] for i in file_proto.public_dependency] + + file_descriptor = descriptor.FileDescriptor( + pool=self, + name=file_proto.name, + package=file_proto.package, + syntax=file_proto.syntax, + options=_OptionsOrNone(file_proto), + serialized_pb=file_proto.SerializeToString(), + dependencies=direct_deps, + public_dependencies=public_deps, + # pylint: disable=protected-access + create_key=descriptor._internal_create_key) + scope = {} + + # This loop extracts all the message and enum types from all the + # dependencies of the file_proto. This is necessary to create the + # scope of available message types when defining the passed in + # file proto. + for dependency in built_deps: + scope.update(self._ExtractSymbols( + dependency.message_types_by_name.values())) + scope.update((_PrefixWithDot(enum.full_name), enum) + for enum in dependency.enum_types_by_name.values()) + + for message_type in file_proto.message_type: + message_desc = self._ConvertMessageDescriptor( + message_type, file_proto.package, file_descriptor, scope, + file_proto.syntax) + file_descriptor.message_types_by_name[message_desc.name] = ( + message_desc) + + for enum_type in file_proto.enum_type: + file_descriptor.enum_types_by_name[enum_type.name] = ( + self._ConvertEnumDescriptor(enum_type, file_proto.package, + file_descriptor, None, scope, True)) + + for index, extension_proto in enumerate(file_proto.extension): + extension_desc = self._MakeFieldDescriptor( + extension_proto, file_proto.package, index, file_descriptor, + is_extension=True) + extension_desc.containing_type = self._GetTypeFromScope( + file_descriptor.package, extension_proto.extendee, scope) + self._SetFieldType(extension_proto, extension_desc, + file_descriptor.package, scope) + file_descriptor.extensions_by_name[extension_desc.name] = ( + extension_desc) + self._file_desc_by_toplevel_extension[extension_desc.full_name] = ( + file_descriptor) + + for desc_proto in file_proto.message_type: + self._SetAllFieldTypes(file_proto.package, desc_proto, scope) + + if file_proto.package: + desc_proto_prefix = _PrefixWithDot(file_proto.package) + else: + desc_proto_prefix = '' + + for desc_proto in file_proto.message_type: + desc = self._GetTypeFromScope( + desc_proto_prefix, desc_proto.name, scope) + file_descriptor.message_types_by_name[desc_proto.name] = desc + + for index, service_proto in enumerate(file_proto.service): + file_descriptor.services_by_name[service_proto.name] = ( + self._MakeServiceDescriptor(service_proto, index, scope, + file_proto.package, file_descriptor)) + + self._file_descriptors[file_proto.name] = file_descriptor + + # Add extensions to the pool + file_desc = self._file_descriptors[file_proto.name] + for extension in file_desc.extensions_by_name.values(): + self._AddExtensionDescriptor(extension) + for message_type in file_desc.message_types_by_name.values(): + for extension in message_type.extensions: + self._AddExtensionDescriptor(extension) + + return file_desc + + def _ConvertMessageDescriptor(self, desc_proto, package=None, file_desc=None, + scope=None, syntax=None): + """Adds the proto to the pool in the specified package. + + Args: + desc_proto: The descriptor_pb2.DescriptorProto protobuf message. + package: The package the proto should be located in. + file_desc: The file containing this message. + scope: Dict mapping short and full symbols to message and enum types. + syntax: string indicating syntax of the file ("proto2" or "proto3") + + Returns: + The added descriptor. + """ + + if package: + desc_name = '.'.join((package, desc_proto.name)) + else: + desc_name = desc_proto.name + + if file_desc is None: + file_name = None + else: + file_name = file_desc.name + + if scope is None: + scope = {} + + nested = [ + self._ConvertMessageDescriptor( + nested, desc_name, file_desc, scope, syntax) + for nested in desc_proto.nested_type] + enums = [ + self._ConvertEnumDescriptor(enum, desc_name, file_desc, None, + scope, False) + for enum in desc_proto.enum_type] + fields = [self._MakeFieldDescriptor(field, desc_name, index, file_desc) + for index, field in enumerate(desc_proto.field)] + extensions = [ + self._MakeFieldDescriptor(extension, desc_name, index, file_desc, + is_extension=True) + for index, extension in enumerate(desc_proto.extension)] + oneofs = [ + # pylint: disable=g-complex-comprehension + descriptor.OneofDescriptor( + desc.name, + '.'.join((desc_name, desc.name)), + index, + None, + [], + _OptionsOrNone(desc), + # pylint: disable=protected-access + create_key=descriptor._internal_create_key) + for index, desc in enumerate(desc_proto.oneof_decl) + ] + extension_ranges = [(r.start, r.end) for r in desc_proto.extension_range] + if extension_ranges: + is_extendable = True + else: + is_extendable = False + desc = descriptor.Descriptor( + name=desc_proto.name, + full_name=desc_name, + filename=file_name, + containing_type=None, + fields=fields, + oneofs=oneofs, + nested_types=nested, + enum_types=enums, + extensions=extensions, + options=_OptionsOrNone(desc_proto), + is_extendable=is_extendable, + extension_ranges=extension_ranges, + file=file_desc, + serialized_start=None, + serialized_end=None, + syntax=syntax, + # pylint: disable=protected-access + create_key=descriptor._internal_create_key) + for nested in desc.nested_types: + nested.containing_type = desc + for enum in desc.enum_types: + enum.containing_type = desc + for field_index, field_desc in enumerate(desc_proto.field): + if field_desc.HasField('oneof_index'): + oneof_index = field_desc.oneof_index + oneofs[oneof_index].fields.append(fields[field_index]) + fields[field_index].containing_oneof = oneofs[oneof_index] + + scope[_PrefixWithDot(desc_name)] = desc + self._CheckConflictRegister(desc, desc.full_name, desc.file.name) + self._descriptors[desc_name] = desc + return desc + + def _ConvertEnumDescriptor(self, enum_proto, package=None, file_desc=None, + containing_type=None, scope=None, top_level=False): + """Make a protobuf EnumDescriptor given an EnumDescriptorProto protobuf. + + Args: + enum_proto: The descriptor_pb2.EnumDescriptorProto protobuf message. + package: Optional package name for the new message EnumDescriptor. + file_desc: The file containing the enum descriptor. + containing_type: The type containing this enum. + scope: Scope containing available types. + top_level: If True, the enum is a top level symbol. If False, the enum + is defined inside a message. + + Returns: + The added descriptor + """ + + if package: + enum_name = '.'.join((package, enum_proto.name)) + else: + enum_name = enum_proto.name + + if file_desc is None: + file_name = None + else: + file_name = file_desc.name + + values = [self._MakeEnumValueDescriptor(value, index) + for index, value in enumerate(enum_proto.value)] + desc = descriptor.EnumDescriptor(name=enum_proto.name, + full_name=enum_name, + filename=file_name, + file=file_desc, + values=values, + containing_type=containing_type, + options=_OptionsOrNone(enum_proto), + # pylint: disable=protected-access + create_key=descriptor._internal_create_key) + scope['.%s' % enum_name] = desc + self._CheckConflictRegister(desc, desc.full_name, desc.file.name) + self._enum_descriptors[enum_name] = desc + + # Add top level enum values. + if top_level: + for value in values: + full_name = _NormalizeFullyQualifiedName( + '.'.join((package, value.name))) + self._CheckConflictRegister(value, full_name, file_name) + self._top_enum_values[full_name] = value + + return desc + + def _MakeFieldDescriptor(self, field_proto, message_name, index, + file_desc, is_extension=False): + """Creates a field descriptor from a FieldDescriptorProto. + + For message and enum type fields, this method will do a look up + in the pool for the appropriate descriptor for that type. If it + is unavailable, it will fall back to the _source function to + create it. If this type is still unavailable, construction will + fail. + + Args: + field_proto: The proto describing the field. + message_name: The name of the containing message. + index: Index of the field + file_desc: The file containing the field descriptor. + is_extension: Indication that this field is for an extension. + + Returns: + An initialized FieldDescriptor object + """ + + if message_name: + full_name = '.'.join((message_name, field_proto.name)) + else: + full_name = field_proto.name + + if field_proto.json_name: + json_name = field_proto.json_name + else: + json_name = None + + return descriptor.FieldDescriptor( + name=field_proto.name, + full_name=full_name, + index=index, + number=field_proto.number, + type=field_proto.type, + cpp_type=None, + message_type=None, + enum_type=None, + containing_type=None, + label=field_proto.label, + has_default_value=False, + default_value=None, + is_extension=is_extension, + extension_scope=None, + options=_OptionsOrNone(field_proto), + json_name=json_name, + file=file_desc, + # pylint: disable=protected-access + create_key=descriptor._internal_create_key) + + def _SetAllFieldTypes(self, package, desc_proto, scope): + """Sets all the descriptor's fields's types. + + This method also sets the containing types on any extensions. + + Args: + package: The current package of desc_proto. + desc_proto: The message descriptor to update. + scope: Enclosing scope of available types. + """ + + package = _PrefixWithDot(package) + + main_desc = self._GetTypeFromScope(package, desc_proto.name, scope) + + if package == '.': + nested_package = _PrefixWithDot(desc_proto.name) + else: + nested_package = '.'.join([package, desc_proto.name]) + + for field_proto, field_desc in zip(desc_proto.field, main_desc.fields): + self._SetFieldType(field_proto, field_desc, nested_package, scope) + + for extension_proto, extension_desc in ( + zip(desc_proto.extension, main_desc.extensions)): + extension_desc.containing_type = self._GetTypeFromScope( + nested_package, extension_proto.extendee, scope) + self._SetFieldType(extension_proto, extension_desc, nested_package, scope) + + for nested_type in desc_proto.nested_type: + self._SetAllFieldTypes(nested_package, nested_type, scope) + + def _SetFieldType(self, field_proto, field_desc, package, scope): + """Sets the field's type, cpp_type, message_type and enum_type. + + Args: + field_proto: Data about the field in proto format. + field_desc: The descriptor to modify. + package: The package the field's container is in. + scope: Enclosing scope of available types. + """ + if field_proto.type_name: + desc = self._GetTypeFromScope(package, field_proto.type_name, scope) + else: + desc = None + + if not field_proto.HasField('type'): + if isinstance(desc, descriptor.Descriptor): + field_proto.type = descriptor.FieldDescriptor.TYPE_MESSAGE + else: + field_proto.type = descriptor.FieldDescriptor.TYPE_ENUM + + field_desc.cpp_type = descriptor.FieldDescriptor.ProtoTypeToCppProtoType( + field_proto.type) + + if (field_proto.type == descriptor.FieldDescriptor.TYPE_MESSAGE + or field_proto.type == descriptor.FieldDescriptor.TYPE_GROUP): + field_desc.message_type = desc + + if field_proto.type == descriptor.FieldDescriptor.TYPE_ENUM: + field_desc.enum_type = desc + + if field_proto.label == descriptor.FieldDescriptor.LABEL_REPEATED: + field_desc.has_default_value = False + field_desc.default_value = [] + elif field_proto.HasField('default_value'): + field_desc.has_default_value = True + if (field_proto.type == descriptor.FieldDescriptor.TYPE_DOUBLE or + field_proto.type == descriptor.FieldDescriptor.TYPE_FLOAT): + field_desc.default_value = float(field_proto.default_value) + elif field_proto.type == descriptor.FieldDescriptor.TYPE_STRING: + field_desc.default_value = field_proto.default_value + elif field_proto.type == descriptor.FieldDescriptor.TYPE_BOOL: + field_desc.default_value = field_proto.default_value.lower() == 'true' + elif field_proto.type == descriptor.FieldDescriptor.TYPE_ENUM: + field_desc.default_value = field_desc.enum_type.values_by_name[ + field_proto.default_value].number + elif field_proto.type == descriptor.FieldDescriptor.TYPE_BYTES: + field_desc.default_value = text_encoding.CUnescape( + field_proto.default_value) + elif field_proto.type == descriptor.FieldDescriptor.TYPE_MESSAGE: + field_desc.default_value = None + else: + # All other types are of the "int" type. + field_desc.default_value = int(field_proto.default_value) + else: + field_desc.has_default_value = False + if (field_proto.type == descriptor.FieldDescriptor.TYPE_DOUBLE or + field_proto.type == descriptor.FieldDescriptor.TYPE_FLOAT): + field_desc.default_value = 0.0 + elif field_proto.type == descriptor.FieldDescriptor.TYPE_STRING: + field_desc.default_value = u'' + elif field_proto.type == descriptor.FieldDescriptor.TYPE_BOOL: + field_desc.default_value = False + elif field_proto.type == descriptor.FieldDescriptor.TYPE_ENUM: + field_desc.default_value = field_desc.enum_type.values[0].number + elif field_proto.type == descriptor.FieldDescriptor.TYPE_BYTES: + field_desc.default_value = b'' + elif field_proto.type == descriptor.FieldDescriptor.TYPE_MESSAGE: + field_desc.default_value = None + elif field_proto.type == descriptor.FieldDescriptor.TYPE_GROUP: + field_desc.default_value = None + else: + # All other types are of the "int" type. + field_desc.default_value = 0 + + field_desc.type = field_proto.type + + def _MakeEnumValueDescriptor(self, value_proto, index): + """Creates a enum value descriptor object from a enum value proto. + + Args: + value_proto: The proto describing the enum value. + index: The index of the enum value. + + Returns: + An initialized EnumValueDescriptor object. + """ + + return descriptor.EnumValueDescriptor( + name=value_proto.name, + index=index, + number=value_proto.number, + options=_OptionsOrNone(value_proto), + type=None, + # pylint: disable=protected-access + create_key=descriptor._internal_create_key) + + def _MakeServiceDescriptor(self, service_proto, service_index, scope, + package, file_desc): + """Make a protobuf ServiceDescriptor given a ServiceDescriptorProto. + + Args: + service_proto: The descriptor_pb2.ServiceDescriptorProto protobuf message. + service_index: The index of the service in the File. + scope: Dict mapping short and full symbols to message and enum types. + package: Optional package name for the new message EnumDescriptor. + file_desc: The file containing the service descriptor. + + Returns: + The added descriptor. + """ + + if package: + service_name = '.'.join((package, service_proto.name)) + else: + service_name = service_proto.name + + methods = [self._MakeMethodDescriptor(method_proto, service_name, package, + scope, index) + for index, method_proto in enumerate(service_proto.method)] + desc = descriptor.ServiceDescriptor( + name=service_proto.name, + full_name=service_name, + index=service_index, + methods=methods, + options=_OptionsOrNone(service_proto), + file=file_desc, + # pylint: disable=protected-access + create_key=descriptor._internal_create_key) + self._CheckConflictRegister(desc, desc.full_name, desc.file.name) + self._service_descriptors[service_name] = desc + return desc + + def _MakeMethodDescriptor(self, method_proto, service_name, package, scope, + index): + """Creates a method descriptor from a MethodDescriptorProto. + + Args: + method_proto: The proto describing the method. + service_name: The name of the containing service. + package: Optional package name to look up for types. + scope: Scope containing available types. + index: Index of the method in the service. + + Returns: + An initialized MethodDescriptor object. + """ + full_name = '.'.join((service_name, method_proto.name)) + input_type = self._GetTypeFromScope( + package, method_proto.input_type, scope) + output_type = self._GetTypeFromScope( + package, method_proto.output_type, scope) + return descriptor.MethodDescriptor( + name=method_proto.name, + full_name=full_name, + index=index, + containing_service=None, + input_type=input_type, + output_type=output_type, + client_streaming=method_proto.client_streaming, + server_streaming=method_proto.server_streaming, + options=_OptionsOrNone(method_proto), + # pylint: disable=protected-access + create_key=descriptor._internal_create_key) + + def _ExtractSymbols(self, descriptors): + """Pulls out all the symbols from descriptor protos. + + Args: + descriptors: The messages to extract descriptors from. + Yields: + A two element tuple of the type name and descriptor object. + """ + + for desc in descriptors: + yield (_PrefixWithDot(desc.full_name), desc) + for symbol in self._ExtractSymbols(desc.nested_types): + yield symbol + for enum in desc.enum_types: + yield (_PrefixWithDot(enum.full_name), enum) + + def _GetDeps(self, dependencies, visited=None): + """Recursively finds dependencies for file protos. + + Args: + dependencies: The names of the files being depended on. + visited: The names of files already found. + + Yields: + Each direct and indirect dependency. + """ + + visited = visited or set() + for dependency in dependencies: + if dependency not in visited: + visited.add(dependency) + dep_desc = self.FindFileByName(dependency) + yield dep_desc + public_files = [d.name for d in dep_desc.public_dependencies] + yield from self._GetDeps(public_files, visited) + + def _GetTypeFromScope(self, package, type_name, scope): + """Finds a given type name in the current scope. + + Args: + package: The package the proto should be located in. + type_name: The name of the type to be found in the scope. + scope: Dict mapping short and full symbols to message and enum types. + + Returns: + The descriptor for the requested type. + """ + if type_name not in scope: + components = _PrefixWithDot(package).split('.') + while components: + possible_match = '.'.join(components + [type_name]) + if possible_match in scope: + type_name = possible_match + break + else: + components.pop(-1) + return scope[type_name] + + +def _PrefixWithDot(name): + return name if name.startswith('.') else '.%s' % name + + +if _USE_C_DESCRIPTORS: + # TODO(amauryfa): This pool could be constructed from Python code, when we + # support a flag like 'use_cpp_generated_pool=True'. + # pylint: disable=protected-access + _DEFAULT = descriptor._message.default_pool +else: + _DEFAULT = DescriptorPool() + + +def Default(): + return _DEFAULT diff --git a/mysql-sqlalchemy-workspace/lib/python3.9/site-packages/google/protobuf/duration_pb2.py b/mysql-sqlalchemy-workspace/lib/python3.9/site-packages/google/protobuf/duration_pb2.py new file mode 100644 index 00000000..a8ecc07b --- /dev/null +++ b/mysql-sqlalchemy-workspace/lib/python3.9/site-packages/google/protobuf/duration_pb2.py @@ -0,0 +1,26 @@ +# -*- coding: utf-8 -*- +# Generated by the protocol buffer compiler. DO NOT EDIT! +# source: google/protobuf/duration.proto +"""Generated protocol buffer code.""" +from google.protobuf.internal import builder as _builder +from google.protobuf import descriptor as _descriptor +from google.protobuf import descriptor_pool as _descriptor_pool +from google.protobuf import symbol_database as _symbol_database +# @@protoc_insertion_point(imports) + +_sym_db = _symbol_database.Default() + + + + +DESCRIPTOR = _descriptor_pool.Default().AddSerializedFile(b'\n\x1egoogle/protobuf/duration.proto\x12\x0fgoogle.protobuf\"*\n\x08\x44uration\x12\x0f\n\x07seconds\x18\x01 \x01(\x03\x12\r\n\x05nanos\x18\x02 \x01(\x05\x42\x83\x01\n\x13\x63om.google.protobufB\rDurationProtoP\x01Z1google.golang.org/protobuf/types/known/durationpb\xf8\x01\x01\xa2\x02\x03GPB\xaa\x02\x1eGoogle.Protobuf.WellKnownTypesb\x06proto3') + +_builder.BuildMessageAndEnumDescriptors(DESCRIPTOR, globals()) +_builder.BuildTopDescriptorsAndMessages(DESCRIPTOR, 'google.protobuf.duration_pb2', globals()) +if _descriptor._USE_C_DESCRIPTORS == False: + + DESCRIPTOR._options = None + DESCRIPTOR._serialized_options = b'\n\023com.google.protobufB\rDurationProtoP\001Z1google.golang.org/protobuf/types/known/durationpb\370\001\001\242\002\003GPB\252\002\036Google.Protobuf.WellKnownTypes' + _DURATION._serialized_start=51 + _DURATION._serialized_end=93 +# @@protoc_insertion_point(module_scope) diff --git a/mysql-sqlalchemy-workspace/lib/python3.9/site-packages/google/protobuf/empty_pb2.py b/mysql-sqlalchemy-workspace/lib/python3.9/site-packages/google/protobuf/empty_pb2.py new file mode 100644 index 00000000..0b4d554d --- /dev/null +++ b/mysql-sqlalchemy-workspace/lib/python3.9/site-packages/google/protobuf/empty_pb2.py @@ -0,0 +1,26 @@ +# -*- coding: utf-8 -*- +# Generated by the protocol buffer compiler. DO NOT EDIT! +# source: google/protobuf/empty.proto +"""Generated protocol buffer code.""" +from google.protobuf.internal import builder as _builder +from google.protobuf import descriptor as _descriptor +from google.protobuf import descriptor_pool as _descriptor_pool +from google.protobuf import symbol_database as _symbol_database +# @@protoc_insertion_point(imports) + +_sym_db = _symbol_database.Default() + + + + +DESCRIPTOR = _descriptor_pool.Default().AddSerializedFile(b'\n\x1bgoogle/protobuf/empty.proto\x12\x0fgoogle.protobuf\"\x07\n\x05\x45mptyB}\n\x13\x63om.google.protobufB\nEmptyProtoP\x01Z.google.golang.org/protobuf/types/known/emptypb\xf8\x01\x01\xa2\x02\x03GPB\xaa\x02\x1eGoogle.Protobuf.WellKnownTypesb\x06proto3') + +_builder.BuildMessageAndEnumDescriptors(DESCRIPTOR, globals()) +_builder.BuildTopDescriptorsAndMessages(DESCRIPTOR, 'google.protobuf.empty_pb2', globals()) +if _descriptor._USE_C_DESCRIPTORS == False: + + DESCRIPTOR._options = None + DESCRIPTOR._serialized_options = b'\n\023com.google.protobufB\nEmptyProtoP\001Z.google.golang.org/protobuf/types/known/emptypb\370\001\001\242\002\003GPB\252\002\036Google.Protobuf.WellKnownTypes' + _EMPTY._serialized_start=48 + _EMPTY._serialized_end=55 +# @@protoc_insertion_point(module_scope) diff --git a/mysql-sqlalchemy-workspace/lib/python3.9/site-packages/google/protobuf/field_mask_pb2.py b/mysql-sqlalchemy-workspace/lib/python3.9/site-packages/google/protobuf/field_mask_pb2.py new file mode 100644 index 00000000..80a4e96e --- /dev/null +++ b/mysql-sqlalchemy-workspace/lib/python3.9/site-packages/google/protobuf/field_mask_pb2.py @@ -0,0 +1,26 @@ +# -*- coding: utf-8 -*- +# Generated by the protocol buffer compiler. DO NOT EDIT! +# source: google/protobuf/field_mask.proto +"""Generated protocol buffer code.""" +from google.protobuf.internal import builder as _builder +from google.protobuf import descriptor as _descriptor +from google.protobuf import descriptor_pool as _descriptor_pool +from google.protobuf import symbol_database as _symbol_database +# @@protoc_insertion_point(imports) + +_sym_db = _symbol_database.Default() + + + + +DESCRIPTOR = _descriptor_pool.Default().AddSerializedFile(b'\n google/protobuf/field_mask.proto\x12\x0fgoogle.protobuf\"\x1a\n\tFieldMask\x12\r\n\x05paths\x18\x01 \x03(\tB\x85\x01\n\x13\x63om.google.protobufB\x0e\x46ieldMaskProtoP\x01Z2google.golang.org/protobuf/types/known/fieldmaskpb\xf8\x01\x01\xa2\x02\x03GPB\xaa\x02\x1eGoogle.Protobuf.WellKnownTypesb\x06proto3') + +_builder.BuildMessageAndEnumDescriptors(DESCRIPTOR, globals()) +_builder.BuildTopDescriptorsAndMessages(DESCRIPTOR, 'google.protobuf.field_mask_pb2', globals()) +if _descriptor._USE_C_DESCRIPTORS == False: + + DESCRIPTOR._options = None + DESCRIPTOR._serialized_options = b'\n\023com.google.protobufB\016FieldMaskProtoP\001Z2google.golang.org/protobuf/types/known/fieldmaskpb\370\001\001\242\002\003GPB\252\002\036Google.Protobuf.WellKnownTypes' + _FIELDMASK._serialized_start=53 + _FIELDMASK._serialized_end=79 +# @@protoc_insertion_point(module_scope) diff --git a/mysql-sqlalchemy-workspace/lib/python3.9/site-packages/google/protobuf/internal/__init__.py b/mysql-sqlalchemy-workspace/lib/python3.9/site-packages/google/protobuf/internal/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/mysql-sqlalchemy-workspace/lib/python3.9/site-packages/google/protobuf/internal/_api_implementation.cpython-39-darwin.so b/mysql-sqlalchemy-workspace/lib/python3.9/site-packages/google/protobuf/internal/_api_implementation.cpython-39-darwin.so new file mode 100755 index 00000000..a8923e88 Binary files /dev/null and b/mysql-sqlalchemy-workspace/lib/python3.9/site-packages/google/protobuf/internal/_api_implementation.cpython-39-darwin.so differ diff --git a/mysql-sqlalchemy-workspace/lib/python3.9/site-packages/google/protobuf/internal/api_implementation.py b/mysql-sqlalchemy-workspace/lib/python3.9/site-packages/google/protobuf/internal/api_implementation.py new file mode 100644 index 00000000..7fef2376 --- /dev/null +++ b/mysql-sqlalchemy-workspace/lib/python3.9/site-packages/google/protobuf/internal/api_implementation.py @@ -0,0 +1,112 @@ +# Protocol Buffers - Google's data interchange format +# Copyright 2008 Google Inc. All rights reserved. +# https://developers.google.com/protocol-buffers/ +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are +# met: +# +# * Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above +# copyright notice, this list of conditions and the following disclaimer +# in the documentation and/or other materials provided with the +# distribution. +# * Neither the name of Google Inc. nor the names of its +# contributors may be used to endorse or promote products derived from +# this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +"""Determine which implementation of the protobuf API is used in this process. +""" + +import os +import sys +import warnings + +try: + # pylint: disable=g-import-not-at-top + from google.protobuf.internal import _api_implementation + # The compile-time constants in the _api_implementation module can be used to + # switch to a certain implementation of the Python API at build time. + _api_version = _api_implementation.api_version +except ImportError: + _api_version = -1 # Unspecified by compiler flags. + +if _api_version == 1: + raise ValueError('api_version=1 is no longer supported.') + + +_default_implementation_type = ('cpp' if _api_version > 0 else 'python') + + +# This environment variable can be used to switch to a certain implementation +# of the Python API, overriding the compile-time constants in the +# _api_implementation module. Right now only 'python' and 'cpp' are valid +# values. Any other value will be ignored. +_implementation_type = os.getenv('PROTOCOL_BUFFERS_PYTHON_IMPLEMENTATION', + _default_implementation_type) + +if _implementation_type != 'python': + _implementation_type = 'cpp' + +if 'PyPy' in sys.version and _implementation_type == 'cpp': + warnings.warn('PyPy does not work yet with cpp protocol buffers. ' + 'Falling back to the python implementation.') + _implementation_type = 'python' + + +# Detect if serialization should be deterministic by default +try: + # The presence of this module in a build allows the proto implementation to + # be upgraded merely via build deps. + # + # NOTE: Merely importing this automatically enables deterministic proto + # serialization for C++ code, but we still need to export it as a boolean so + # that we can do the same for `_implementation_type == 'python'`. + # + # NOTE2: It is possible for C++ code to enable deterministic serialization by + # default _without_ affecting Python code, if the C++ implementation is not in + # use by this module. That is intended behavior, so we don't actually expose + # this boolean outside of this module. + # + # pylint: disable=g-import-not-at-top,unused-import + from google.protobuf import enable_deterministic_proto_serialization + _python_deterministic_proto_serialization = True +except ImportError: + _python_deterministic_proto_serialization = False + + +# Usage of this function is discouraged. Clients shouldn't care which +# implementation of the API is in use. Note that there is no guarantee +# that differences between APIs will be maintained. +# Please don't use this function if possible. +def Type(): + return _implementation_type + + +def _SetType(implementation_type): + """Never use! Only for protobuf benchmark.""" + global _implementation_type + _implementation_type = implementation_type + + +# See comment on 'Type' above. +def Version(): + return 2 + + +# For internal use only +def IsPythonDefaultSerializationDeterministic(): + return _python_deterministic_proto_serialization diff --git a/mysql-sqlalchemy-workspace/lib/python3.9/site-packages/google/protobuf/internal/builder.py b/mysql-sqlalchemy-workspace/lib/python3.9/site-packages/google/protobuf/internal/builder.py new file mode 100644 index 00000000..64353ee4 --- /dev/null +++ b/mysql-sqlalchemy-workspace/lib/python3.9/site-packages/google/protobuf/internal/builder.py @@ -0,0 +1,130 @@ +# Protocol Buffers - Google's data interchange format +# Copyright 2008 Google Inc. All rights reserved. +# https://developers.google.com/protocol-buffers/ +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are +# met: +# +# * Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above +# copyright notice, this list of conditions and the following disclaimer +# in the documentation and/or other materials provided with the +# distribution. +# * Neither the name of Google Inc. nor the names of its +# contributors may be used to endorse or promote products derived from +# this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +"""Builds descriptors, message classes and services for generated _pb2.py. + +This file is only called in python generated _pb2.py files. It builds +descriptors, message classes and services that users can directly use +in generated code. +""" + +__author__ = 'jieluo@google.com (Jie Luo)' + +from google.protobuf.internal import enum_type_wrapper +from google.protobuf import message as _message +from google.protobuf import reflection as _reflection +from google.protobuf import symbol_database as _symbol_database + +_sym_db = _symbol_database.Default() + + +def BuildMessageAndEnumDescriptors(file_des, module): + """Builds message and enum descriptors. + + Args: + file_des: FileDescriptor of the .proto file + module: Generated _pb2 module + """ + + def BuildNestedDescriptors(msg_des, prefix): + for (name, nested_msg) in msg_des.nested_types_by_name.items(): + module_name = prefix + name.upper() + module[module_name] = nested_msg + BuildNestedDescriptors(nested_msg, module_name + '_') + for enum_des in msg_des.enum_types: + module[prefix + enum_des.name.upper()] = enum_des + + for (name, msg_des) in file_des.message_types_by_name.items(): + module_name = '_' + name.upper() + module[module_name] = msg_des + BuildNestedDescriptors(msg_des, module_name + '_') + + +def BuildTopDescriptorsAndMessages(file_des, module_name, module): + """Builds top level descriptors and message classes. + + Args: + file_des: FileDescriptor of the .proto file + module_name: str, the name of generated _pb2 module + module: Generated _pb2 module + """ + + def BuildMessage(msg_des): + create_dict = {} + for (name, nested_msg) in msg_des.nested_types_by_name.items(): + create_dict[name] = BuildMessage(nested_msg) + create_dict['DESCRIPTOR'] = msg_des + create_dict['__module__'] = module_name + message_class = _reflection.GeneratedProtocolMessageType( + msg_des.name, (_message.Message,), create_dict) + _sym_db.RegisterMessage(message_class) + return message_class + + # top level enums + for (name, enum_des) in file_des.enum_types_by_name.items(): + module['_' + name.upper()] = enum_des + module[name] = enum_type_wrapper.EnumTypeWrapper(enum_des) + for enum_value in enum_des.values: + module[enum_value.name] = enum_value.number + + # top level extensions + for (name, extension_des) in file_des.extensions_by_name.items(): + module[name.upper() + '_FIELD_NUMBER'] = extension_des.number + module[name] = extension_des + + # services + for (name, service) in file_des.services_by_name.items(): + module['_' + name.upper()] = service + + # Build messages. + for (name, msg_des) in file_des.message_types_by_name.items(): + module[name] = BuildMessage(msg_des) + + +def BuildServices(file_des, module_name, module): + """Builds services classes and services stub class. + + Args: + file_des: FileDescriptor of the .proto file + module_name: str, the name of generated _pb2 module + module: Generated _pb2 module + """ + # pylint: disable=g-import-not-at-top + from google.protobuf import service as _service + from google.protobuf import service_reflection + # pylint: enable=g-import-not-at-top + for (name, service) in file_des.services_by_name.items(): + module[name] = service_reflection.GeneratedServiceType( + name, (_service.Service,), + dict(DESCRIPTOR=service, __module__=module_name)) + stub_name = name + '_Stub' + module[stub_name] = service_reflection.GeneratedServiceStubType( + stub_name, (module[name],), + dict(DESCRIPTOR=service, __module__=module_name)) diff --git a/mysql-sqlalchemy-workspace/lib/python3.9/site-packages/google/protobuf/internal/containers.py b/mysql-sqlalchemy-workspace/lib/python3.9/site-packages/google/protobuf/internal/containers.py new file mode 100644 index 00000000..29fbb53d --- /dev/null +++ b/mysql-sqlalchemy-workspace/lib/python3.9/site-packages/google/protobuf/internal/containers.py @@ -0,0 +1,710 @@ +# Protocol Buffers - Google's data interchange format +# Copyright 2008 Google Inc. All rights reserved. +# https://developers.google.com/protocol-buffers/ +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are +# met: +# +# * Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above +# copyright notice, this list of conditions and the following disclaimer +# in the documentation and/or other materials provided with the +# distribution. +# * Neither the name of Google Inc. nor the names of its +# contributors may be used to endorse or promote products derived from +# this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +"""Contains container classes to represent different protocol buffer types. + +This file defines container classes which represent categories of protocol +buffer field types which need extra maintenance. Currently these categories +are: + +- Repeated scalar fields - These are all repeated fields which aren't + composite (e.g. they are of simple types like int32, string, etc). +- Repeated composite fields - Repeated fields which are composite. This + includes groups and nested messages. +""" + +import collections.abc +import copy +import pickle +from typing import ( + Any, + Iterable, + Iterator, + List, + MutableMapping, + MutableSequence, + NoReturn, + Optional, + Sequence, + TypeVar, + Union, + overload, +) + + +_T = TypeVar('_T') +_K = TypeVar('_K') +_V = TypeVar('_V') + + +class BaseContainer(Sequence[_T]): + """Base container class.""" + + # Minimizes memory usage and disallows assignment to other attributes. + __slots__ = ['_message_listener', '_values'] + + def __init__(self, message_listener: Any) -> None: + """ + Args: + message_listener: A MessageListener implementation. + The RepeatedScalarFieldContainer will call this object's + Modified() method when it is modified. + """ + self._message_listener = message_listener + self._values = [] + + @overload + def __getitem__(self, key: int) -> _T: + ... + + @overload + def __getitem__(self, key: slice) -> List[_T]: + ... + + def __getitem__(self, key): + """Retrieves item by the specified key.""" + return self._values[key] + + def __len__(self) -> int: + """Returns the number of elements in the container.""" + return len(self._values) + + def __ne__(self, other: Any) -> bool: + """Checks if another instance isn't equal to this one.""" + # The concrete classes should define __eq__. + return not self == other + + __hash__ = None + + def __repr__(self) -> str: + return repr(self._values) + + def sort(self, *args, **kwargs) -> None: + # Continue to support the old sort_function keyword argument. + # This is expected to be a rare occurrence, so use LBYL to avoid + # the overhead of actually catching KeyError. + if 'sort_function' in kwargs: + kwargs['cmp'] = kwargs.pop('sort_function') + self._values.sort(*args, **kwargs) + + def reverse(self) -> None: + self._values.reverse() + + +# TODO(slebedev): Remove this. BaseContainer does *not* conform to +# MutableSequence, only its subclasses do. +collections.abc.MutableSequence.register(BaseContainer) + + +class RepeatedScalarFieldContainer(BaseContainer[_T], MutableSequence[_T]): + """Simple, type-checked, list-like container for holding repeated scalars.""" + + # Disallows assignment to other attributes. + __slots__ = ['_type_checker'] + + def __init__( + self, + message_listener: Any, + type_checker: Any, + ) -> None: + """Args: + + message_listener: A MessageListener implementation. The + RepeatedScalarFieldContainer will call this object's Modified() method + when it is modified. + type_checker: A type_checkers.ValueChecker instance to run on elements + inserted into this container. + """ + super().__init__(message_listener) + self._type_checker = type_checker + + def append(self, value: _T) -> None: + """Appends an item to the list. Similar to list.append().""" + self._values.append(self._type_checker.CheckValue(value)) + if not self._message_listener.dirty: + self._message_listener.Modified() + + def insert(self, key: int, value: _T) -> None: + """Inserts the item at the specified position. Similar to list.insert().""" + self._values.insert(key, self._type_checker.CheckValue(value)) + if not self._message_listener.dirty: + self._message_listener.Modified() + + def extend(self, elem_seq: Iterable[_T]) -> None: + """Extends by appending the given iterable. Similar to list.extend().""" + if elem_seq is None: + return + try: + elem_seq_iter = iter(elem_seq) + except TypeError: + if not elem_seq: + # silently ignore falsy inputs :-/. + # TODO(ptucker): Deprecate this behavior. b/18413862 + return + raise + + new_values = [self._type_checker.CheckValue(elem) for elem in elem_seq_iter] + if new_values: + self._values.extend(new_values) + self._message_listener.Modified() + + def MergeFrom( + self, + other: Union['RepeatedScalarFieldContainer[_T]', Iterable[_T]], + ) -> None: + """Appends the contents of another repeated field of the same type to this + one. We do not check the types of the individual fields. + """ + self._values.extend(other) + self._message_listener.Modified() + + def remove(self, elem: _T): + """Removes an item from the list. Similar to list.remove().""" + self._values.remove(elem) + self._message_listener.Modified() + + def pop(self, key: Optional[int] = -1) -> _T: + """Removes and returns an item at a given index. Similar to list.pop().""" + value = self._values[key] + self.__delitem__(key) + return value + + @overload + def __setitem__(self, key: int, value: _T) -> None: + ... + + @overload + def __setitem__(self, key: slice, value: Iterable[_T]) -> None: + ... + + def __setitem__(self, key, value) -> None: + """Sets the item on the specified position.""" + if isinstance(key, slice): + if key.step is not None: + raise ValueError('Extended slices not supported') + self._values[key] = map(self._type_checker.CheckValue, value) + self._message_listener.Modified() + else: + self._values[key] = self._type_checker.CheckValue(value) + self._message_listener.Modified() + + def __delitem__(self, key: Union[int, slice]) -> None: + """Deletes the item at the specified position.""" + del self._values[key] + self._message_listener.Modified() + + def __eq__(self, other: Any) -> bool: + """Compares the current instance with another one.""" + if self is other: + return True + # Special case for the same type which should be common and fast. + if isinstance(other, self.__class__): + return other._values == self._values + # We are presumably comparing against some other sequence type. + return other == self._values + + def __deepcopy__( + self, + unused_memo: Any = None, + ) -> 'RepeatedScalarFieldContainer[_T]': + clone = RepeatedScalarFieldContainer( + copy.deepcopy(self._message_listener), self._type_checker) + clone.MergeFrom(self) + return clone + + def __reduce__(self, **kwargs) -> NoReturn: + raise pickle.PickleError( + "Can't pickle repeated scalar fields, convert to list first") + + +# TODO(slebedev): Constrain T to be a subtype of Message. +class RepeatedCompositeFieldContainer(BaseContainer[_T], MutableSequence[_T]): + """Simple, list-like container for holding repeated composite fields.""" + + # Disallows assignment to other attributes. + __slots__ = ['_message_descriptor'] + + def __init__(self, message_listener: Any, message_descriptor: Any) -> None: + """ + Note that we pass in a descriptor instead of the generated directly, + since at the time we construct a _RepeatedCompositeFieldContainer we + haven't yet necessarily initialized the type that will be contained in the + container. + + Args: + message_listener: A MessageListener implementation. + The RepeatedCompositeFieldContainer will call this object's + Modified() method when it is modified. + message_descriptor: A Descriptor instance describing the protocol type + that should be present in this container. We'll use the + _concrete_class field of this descriptor when the client calls add(). + """ + super().__init__(message_listener) + self._message_descriptor = message_descriptor + + def add(self, **kwargs: Any) -> _T: + """Adds a new element at the end of the list and returns it. Keyword + arguments may be used to initialize the element. + """ + new_element = self._message_descriptor._concrete_class(**kwargs) + new_element._SetListener(self._message_listener) + self._values.append(new_element) + if not self._message_listener.dirty: + self._message_listener.Modified() + return new_element + + def append(self, value: _T) -> None: + """Appends one element by copying the message.""" + new_element = self._message_descriptor._concrete_class() + new_element._SetListener(self._message_listener) + new_element.CopyFrom(value) + self._values.append(new_element) + if not self._message_listener.dirty: + self._message_listener.Modified() + + def insert(self, key: int, value: _T) -> None: + """Inserts the item at the specified position by copying.""" + new_element = self._message_descriptor._concrete_class() + new_element._SetListener(self._message_listener) + new_element.CopyFrom(value) + self._values.insert(key, new_element) + if not self._message_listener.dirty: + self._message_listener.Modified() + + def extend(self, elem_seq: Iterable[_T]) -> None: + """Extends by appending the given sequence of elements of the same type + + as this one, copying each individual message. + """ + message_class = self._message_descriptor._concrete_class + listener = self._message_listener + values = self._values + for message in elem_seq: + new_element = message_class() + new_element._SetListener(listener) + new_element.MergeFrom(message) + values.append(new_element) + listener.Modified() + + def MergeFrom( + self, + other: Union['RepeatedCompositeFieldContainer[_T]', Iterable[_T]], + ) -> None: + """Appends the contents of another repeated field of the same type to this + one, copying each individual message. + """ + self.extend(other) + + def remove(self, elem: _T) -> None: + """Removes an item from the list. Similar to list.remove().""" + self._values.remove(elem) + self._message_listener.Modified() + + def pop(self, key: Optional[int] = -1) -> _T: + """Removes and returns an item at a given index. Similar to list.pop().""" + value = self._values[key] + self.__delitem__(key) + return value + + @overload + def __setitem__(self, key: int, value: _T) -> None: + ... + + @overload + def __setitem__(self, key: slice, value: Iterable[_T]) -> None: + ... + + def __setitem__(self, key, value): + # This method is implemented to make RepeatedCompositeFieldContainer + # structurally compatible with typing.MutableSequence. It is + # otherwise unsupported and will always raise an error. + raise TypeError( + f'{self.__class__.__name__} object does not support item assignment') + + def __delitem__(self, key: Union[int, slice]) -> None: + """Deletes the item at the specified position.""" + del self._values[key] + self._message_listener.Modified() + + def __eq__(self, other: Any) -> bool: + """Compares the current instance with another one.""" + if self is other: + return True + if not isinstance(other, self.__class__): + raise TypeError('Can only compare repeated composite fields against ' + 'other repeated composite fields.') + return self._values == other._values + + +class ScalarMap(MutableMapping[_K, _V]): + """Simple, type-checked, dict-like container for holding repeated scalars.""" + + # Disallows assignment to other attributes. + __slots__ = ['_key_checker', '_value_checker', '_values', '_message_listener', + '_entry_descriptor'] + + def __init__( + self, + message_listener: Any, + key_checker: Any, + value_checker: Any, + entry_descriptor: Any, + ) -> None: + """ + Args: + message_listener: A MessageListener implementation. + The ScalarMap will call this object's Modified() method when it + is modified. + key_checker: A type_checkers.ValueChecker instance to run on keys + inserted into this container. + value_checker: A type_checkers.ValueChecker instance to run on values + inserted into this container. + entry_descriptor: The MessageDescriptor of a map entry: key and value. + """ + self._message_listener = message_listener + self._key_checker = key_checker + self._value_checker = value_checker + self._entry_descriptor = entry_descriptor + self._values = {} + + def __getitem__(self, key: _K) -> _V: + try: + return self._values[key] + except KeyError: + key = self._key_checker.CheckValue(key) + val = self._value_checker.DefaultValue() + self._values[key] = val + return val + + def __contains__(self, item: _K) -> bool: + # We check the key's type to match the strong-typing flavor of the API. + # Also this makes it easier to match the behavior of the C++ implementation. + self._key_checker.CheckValue(item) + return item in self._values + + @overload + def get(self, key: _K) -> Optional[_V]: + ... + + @overload + def get(self, key: _K, default: _T) -> Union[_V, _T]: + ... + + # We need to override this explicitly, because our defaultdict-like behavior + # will make the default implementation (from our base class) always insert + # the key. + def get(self, key, default=None): + if key in self: + return self[key] + else: + return default + + def __setitem__(self, key: _K, value: _V) -> _T: + checked_key = self._key_checker.CheckValue(key) + checked_value = self._value_checker.CheckValue(value) + self._values[checked_key] = checked_value + self._message_listener.Modified() + + def __delitem__(self, key: _K) -> None: + del self._values[key] + self._message_listener.Modified() + + def __len__(self) -> int: + return len(self._values) + + def __iter__(self) -> Iterator[_K]: + return iter(self._values) + + def __repr__(self) -> str: + return repr(self._values) + + def MergeFrom(self, other: 'ScalarMap[_K, _V]') -> None: + self._values.update(other._values) + self._message_listener.Modified() + + def InvalidateIterators(self) -> None: + # It appears that the only way to reliably invalidate iterators to + # self._values is to ensure that its size changes. + original = self._values + self._values = original.copy() + original[None] = None + + # This is defined in the abstract base, but we can do it much more cheaply. + def clear(self) -> None: + self._values.clear() + self._message_listener.Modified() + + def GetEntryClass(self) -> Any: + return self._entry_descriptor._concrete_class + + +class MessageMap(MutableMapping[_K, _V]): + """Simple, type-checked, dict-like container for with submessage values.""" + + # Disallows assignment to other attributes. + __slots__ = ['_key_checker', '_values', '_message_listener', + '_message_descriptor', '_entry_descriptor'] + + def __init__( + self, + message_listener: Any, + message_descriptor: Any, + key_checker: Any, + entry_descriptor: Any, + ) -> None: + """ + Args: + message_listener: A MessageListener implementation. + The ScalarMap will call this object's Modified() method when it + is modified. + key_checker: A type_checkers.ValueChecker instance to run on keys + inserted into this container. + value_checker: A type_checkers.ValueChecker instance to run on values + inserted into this container. + entry_descriptor: The MessageDescriptor of a map entry: key and value. + """ + self._message_listener = message_listener + self._message_descriptor = message_descriptor + self._key_checker = key_checker + self._entry_descriptor = entry_descriptor + self._values = {} + + def __getitem__(self, key: _K) -> _V: + key = self._key_checker.CheckValue(key) + try: + return self._values[key] + except KeyError: + new_element = self._message_descriptor._concrete_class() + new_element._SetListener(self._message_listener) + self._values[key] = new_element + self._message_listener.Modified() + return new_element + + def get_or_create(self, key: _K) -> _V: + """get_or_create() is an alias for getitem (ie. map[key]). + + Args: + key: The key to get or create in the map. + + This is useful in cases where you want to be explicit that the call is + mutating the map. This can avoid lint errors for statements like this + that otherwise would appear to be pointless statements: + + msg.my_map[key] + """ + return self[key] + + @overload + def get(self, key: _K) -> Optional[_V]: + ... + + @overload + def get(self, key: _K, default: _T) -> Union[_V, _T]: + ... + + # We need to override this explicitly, because our defaultdict-like behavior + # will make the default implementation (from our base class) always insert + # the key. + def get(self, key, default=None): + if key in self: + return self[key] + else: + return default + + def __contains__(self, item: _K) -> bool: + item = self._key_checker.CheckValue(item) + return item in self._values + + def __setitem__(self, key: _K, value: _V) -> NoReturn: + raise ValueError('May not set values directly, call my_map[key].foo = 5') + + def __delitem__(self, key: _K) -> None: + key = self._key_checker.CheckValue(key) + del self._values[key] + self._message_listener.Modified() + + def __len__(self) -> int: + return len(self._values) + + def __iter__(self) -> Iterator[_K]: + return iter(self._values) + + def __repr__(self) -> str: + return repr(self._values) + + def MergeFrom(self, other: 'MessageMap[_K, _V]') -> None: + # pylint: disable=protected-access + for key in other._values: + # According to documentation: "When parsing from the wire or when merging, + # if there are duplicate map keys the last key seen is used". + if key in self: + del self[key] + self[key].CopyFrom(other[key]) + # self._message_listener.Modified() not required here, because + # mutations to submessages already propagate. + + def InvalidateIterators(self) -> None: + # It appears that the only way to reliably invalidate iterators to + # self._values is to ensure that its size changes. + original = self._values + self._values = original.copy() + original[None] = None + + # This is defined in the abstract base, but we can do it much more cheaply. + def clear(self) -> None: + self._values.clear() + self._message_listener.Modified() + + def GetEntryClass(self) -> Any: + return self._entry_descriptor._concrete_class + + +class _UnknownField: + """A parsed unknown field.""" + + # Disallows assignment to other attributes. + __slots__ = ['_field_number', '_wire_type', '_data'] + + def __init__(self, field_number, wire_type, data): + self._field_number = field_number + self._wire_type = wire_type + self._data = data + return + + def __lt__(self, other): + # pylint: disable=protected-access + return self._field_number < other._field_number + + def __eq__(self, other): + if self is other: + return True + # pylint: disable=protected-access + return (self._field_number == other._field_number and + self._wire_type == other._wire_type and + self._data == other._data) + + +class UnknownFieldRef: # pylint: disable=missing-class-docstring + + def __init__(self, parent, index): + self._parent = parent + self._index = index + + def _check_valid(self): + if not self._parent: + raise ValueError('UnknownField does not exist. ' + 'The parent message might be cleared.') + if self._index >= len(self._parent): + raise ValueError('UnknownField does not exist. ' + 'The parent message might be cleared.') + + @property + def field_number(self): + self._check_valid() + # pylint: disable=protected-access + return self._parent._internal_get(self._index)._field_number + + @property + def wire_type(self): + self._check_valid() + # pylint: disable=protected-access + return self._parent._internal_get(self._index)._wire_type + + @property + def data(self): + self._check_valid() + # pylint: disable=protected-access + return self._parent._internal_get(self._index)._data + + +class UnknownFieldSet: + """UnknownField container""" + + # Disallows assignment to other attributes. + __slots__ = ['_values'] + + def __init__(self): + self._values = [] + + def __getitem__(self, index): + if self._values is None: + raise ValueError('UnknownFields does not exist. ' + 'The parent message might be cleared.') + size = len(self._values) + if index < 0: + index += size + if index < 0 or index >= size: + raise IndexError('index %d out of range'.index) + + return UnknownFieldRef(self, index) + + def _internal_get(self, index): + return self._values[index] + + def __len__(self): + if self._values is None: + raise ValueError('UnknownFields does not exist. ' + 'The parent message might be cleared.') + return len(self._values) + + def _add(self, field_number, wire_type, data): + unknown_field = _UnknownField(field_number, wire_type, data) + self._values.append(unknown_field) + return unknown_field + + def __iter__(self): + for i in range(len(self)): + yield UnknownFieldRef(self, i) + + def _extend(self, other): + if other is None: + return + # pylint: disable=protected-access + self._values.extend(other._values) + + def __eq__(self, other): + if self is other: + return True + # Sort unknown fields because their order shouldn't + # affect equality test. + values = list(self._values) + if other is None: + return not values + values.sort() + # pylint: disable=protected-access + other_values = sorted(other._values) + return values == other_values + + def _clear(self): + for value in self._values: + # pylint: disable=protected-access + if isinstance(value._data, UnknownFieldSet): + value._data._clear() # pylint: disable=protected-access + self._values = None diff --git a/mysql-sqlalchemy-workspace/lib/python3.9/site-packages/google/protobuf/internal/decoder.py b/mysql-sqlalchemy-workspace/lib/python3.9/site-packages/google/protobuf/internal/decoder.py new file mode 100644 index 00000000..bc1b7b78 --- /dev/null +++ b/mysql-sqlalchemy-workspace/lib/python3.9/site-packages/google/protobuf/internal/decoder.py @@ -0,0 +1,1029 @@ +# Protocol Buffers - Google's data interchange format +# Copyright 2008 Google Inc. All rights reserved. +# https://developers.google.com/protocol-buffers/ +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are +# met: +# +# * Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above +# copyright notice, this list of conditions and the following disclaimer +# in the documentation and/or other materials provided with the +# distribution. +# * Neither the name of Google Inc. nor the names of its +# contributors may be used to endorse or promote products derived from +# this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +"""Code for decoding protocol buffer primitives. + +This code is very similar to encoder.py -- read the docs for that module first. + +A "decoder" is a function with the signature: + Decode(buffer, pos, end, message, field_dict) +The arguments are: + buffer: The string containing the encoded message. + pos: The current position in the string. + end: The position in the string where the current message ends. May be + less than len(buffer) if we're reading a sub-message. + message: The message object into which we're parsing. + field_dict: message._fields (avoids a hashtable lookup). +The decoder reads the field and stores it into field_dict, returning the new +buffer position. A decoder for a repeated field may proactively decode all of +the elements of that field, if they appear consecutively. + +Note that decoders may throw any of the following: + IndexError: Indicates a truncated message. + struct.error: Unpacking of a fixed-width field failed. + message.DecodeError: Other errors. + +Decoders are expected to raise an exception if they are called with pos > end. +This allows callers to be lax about bounds checking: it's fineto read past +"end" as long as you are sure that someone else will notice and throw an +exception later on. + +Something up the call stack is expected to catch IndexError and struct.error +and convert them to message.DecodeError. + +Decoders are constructed using decoder constructors with the signature: + MakeDecoder(field_number, is_repeated, is_packed, key, new_default) +The arguments are: + field_number: The field number of the field we want to decode. + is_repeated: Is the field a repeated field? (bool) + is_packed: Is the field a packed field? (bool) + key: The key to use when looking up the field within field_dict. + (This is actually the FieldDescriptor but nothing in this + file should depend on that.) + new_default: A function which takes a message object as a parameter and + returns a new instance of the default value for this field. + (This is called for repeated fields and sub-messages, when an + instance does not already exist.) + +As with encoders, we define a decoder constructor for every type of field. +Then, for every field of every message class we construct an actual decoder. +That decoder goes into a dict indexed by tag, so when we decode a message +we repeatedly read a tag, look up the corresponding decoder, and invoke it. +""" + +__author__ = 'kenton@google.com (Kenton Varda)' + +import math +import struct + +from google.protobuf.internal import containers +from google.protobuf.internal import encoder +from google.protobuf.internal import wire_format +from google.protobuf import message + + +# This is not for optimization, but rather to avoid conflicts with local +# variables named "message". +_DecodeError = message.DecodeError + + +def _VarintDecoder(mask, result_type): + """Return an encoder for a basic varint value (does not include tag). + + Decoded values will be bitwise-anded with the given mask before being + returned, e.g. to limit them to 32 bits. The returned decoder does not + take the usual "end" parameter -- the caller is expected to do bounds checking + after the fact (often the caller can defer such checking until later). The + decoder returns a (value, new_pos) pair. + """ + + def DecodeVarint(buffer, pos): + result = 0 + shift = 0 + while 1: + b = buffer[pos] + result |= ((b & 0x7f) << shift) + pos += 1 + if not (b & 0x80): + result &= mask + result = result_type(result) + return (result, pos) + shift += 7 + if shift >= 64: + raise _DecodeError('Too many bytes when decoding varint.') + return DecodeVarint + + +def _SignedVarintDecoder(bits, result_type): + """Like _VarintDecoder() but decodes signed values.""" + + signbit = 1 << (bits - 1) + mask = (1 << bits) - 1 + + def DecodeVarint(buffer, pos): + result = 0 + shift = 0 + while 1: + b = buffer[pos] + result |= ((b & 0x7f) << shift) + pos += 1 + if not (b & 0x80): + result &= mask + result = (result ^ signbit) - signbit + result = result_type(result) + return (result, pos) + shift += 7 + if shift >= 64: + raise _DecodeError('Too many bytes when decoding varint.') + return DecodeVarint + +# All 32-bit and 64-bit values are represented as int. +_DecodeVarint = _VarintDecoder((1 << 64) - 1, int) +_DecodeSignedVarint = _SignedVarintDecoder(64, int) + +# Use these versions for values which must be limited to 32 bits. +_DecodeVarint32 = _VarintDecoder((1 << 32) - 1, int) +_DecodeSignedVarint32 = _SignedVarintDecoder(32, int) + + +def ReadTag(buffer, pos): + """Read a tag from the memoryview, and return a (tag_bytes, new_pos) tuple. + + We return the raw bytes of the tag rather than decoding them. The raw + bytes can then be used to look up the proper decoder. This effectively allows + us to trade some work that would be done in pure-python (decoding a varint) + for work that is done in C (searching for a byte string in a hash table). + In a low-level language it would be much cheaper to decode the varint and + use that, but not in Python. + + Args: + buffer: memoryview object of the encoded bytes + pos: int of the current position to start from + + Returns: + Tuple[bytes, int] of the tag data and new position. + """ + start = pos + while buffer[pos] & 0x80: + pos += 1 + pos += 1 + + tag_bytes = buffer[start:pos].tobytes() + return tag_bytes, pos + + +# -------------------------------------------------------------------- + + +def _SimpleDecoder(wire_type, decode_value): + """Return a constructor for a decoder for fields of a particular type. + + Args: + wire_type: The field's wire type. + decode_value: A function which decodes an individual value, e.g. + _DecodeVarint() + """ + + def SpecificDecoder(field_number, is_repeated, is_packed, key, new_default, + clear_if_default=False): + if is_packed: + local_DecodeVarint = _DecodeVarint + def DecodePackedField(buffer, pos, end, message, field_dict): + value = field_dict.get(key) + if value is None: + value = field_dict.setdefault(key, new_default(message)) + (endpoint, pos) = local_DecodeVarint(buffer, pos) + endpoint += pos + if endpoint > end: + raise _DecodeError('Truncated message.') + while pos < endpoint: + (element, pos) = decode_value(buffer, pos) + value.append(element) + if pos > endpoint: + del value[-1] # Discard corrupt value. + raise _DecodeError('Packed element was truncated.') + return pos + return DecodePackedField + elif is_repeated: + tag_bytes = encoder.TagBytes(field_number, wire_type) + tag_len = len(tag_bytes) + def DecodeRepeatedField(buffer, pos, end, message, field_dict): + value = field_dict.get(key) + if value is None: + value = field_dict.setdefault(key, new_default(message)) + while 1: + (element, new_pos) = decode_value(buffer, pos) + value.append(element) + # Predict that the next tag is another copy of the same repeated + # field. + pos = new_pos + tag_len + if buffer[new_pos:pos] != tag_bytes or new_pos >= end: + # Prediction failed. Return. + if new_pos > end: + raise _DecodeError('Truncated message.') + return new_pos + return DecodeRepeatedField + else: + def DecodeField(buffer, pos, end, message, field_dict): + (new_value, pos) = decode_value(buffer, pos) + if pos > end: + raise _DecodeError('Truncated message.') + if clear_if_default and not new_value: + field_dict.pop(key, None) + else: + field_dict[key] = new_value + return pos + return DecodeField + + return SpecificDecoder + + +def _ModifiedDecoder(wire_type, decode_value, modify_value): + """Like SimpleDecoder but additionally invokes modify_value on every value + before storing it. Usually modify_value is ZigZagDecode. + """ + + # Reusing _SimpleDecoder is slightly slower than copying a bunch of code, but + # not enough to make a significant difference. + + def InnerDecode(buffer, pos): + (result, new_pos) = decode_value(buffer, pos) + return (modify_value(result), new_pos) + return _SimpleDecoder(wire_type, InnerDecode) + + +def _StructPackDecoder(wire_type, format): + """Return a constructor for a decoder for a fixed-width field. + + Args: + wire_type: The field's wire type. + format: The format string to pass to struct.unpack(). + """ + + value_size = struct.calcsize(format) + local_unpack = struct.unpack + + # Reusing _SimpleDecoder is slightly slower than copying a bunch of code, but + # not enough to make a significant difference. + + # Note that we expect someone up-stack to catch struct.error and convert + # it to _DecodeError -- this way we don't have to set up exception- + # handling blocks every time we parse one value. + + def InnerDecode(buffer, pos): + new_pos = pos + value_size + result = local_unpack(format, buffer[pos:new_pos])[0] + return (result, new_pos) + return _SimpleDecoder(wire_type, InnerDecode) + + +def _FloatDecoder(): + """Returns a decoder for a float field. + + This code works around a bug in struct.unpack for non-finite 32-bit + floating-point values. + """ + + local_unpack = struct.unpack + + def InnerDecode(buffer, pos): + """Decode serialized float to a float and new position. + + Args: + buffer: memoryview of the serialized bytes + pos: int, position in the memory view to start at. + + Returns: + Tuple[float, int] of the deserialized float value and new position + in the serialized data. + """ + # We expect a 32-bit value in little-endian byte order. Bit 1 is the sign + # bit, bits 2-9 represent the exponent, and bits 10-32 are the significand. + new_pos = pos + 4 + float_bytes = buffer[pos:new_pos].tobytes() + + # If this value has all its exponent bits set, then it's non-finite. + # In Python 2.4, struct.unpack will convert it to a finite 64-bit value. + # To avoid that, we parse it specially. + if (float_bytes[3:4] in b'\x7F\xFF' and float_bytes[2:3] >= b'\x80'): + # If at least one significand bit is set... + if float_bytes[0:3] != b'\x00\x00\x80': + return (math.nan, new_pos) + # If sign bit is set... + if float_bytes[3:4] == b'\xFF': + return (-math.inf, new_pos) + return (math.inf, new_pos) + + # Note that we expect someone up-stack to catch struct.error and convert + # it to _DecodeError -- this way we don't have to set up exception- + # handling blocks every time we parse one value. + result = local_unpack('= b'\xF0') + and (double_bytes[0:7] != b'\x00\x00\x00\x00\x00\x00\xF0')): + return (math.nan, new_pos) + + # Note that we expect someone up-stack to catch struct.error and convert + # it to _DecodeError -- this way we don't have to set up exception- + # handling blocks every time we parse one value. + result = local_unpack(' end: + raise _DecodeError('Truncated message.') + while pos < endpoint: + value_start_pos = pos + (element, pos) = _DecodeSignedVarint32(buffer, pos) + # pylint: disable=protected-access + if element in enum_type.values_by_number: + value.append(element) + else: + if not message._unknown_fields: + message._unknown_fields = [] + tag_bytes = encoder.TagBytes(field_number, + wire_format.WIRETYPE_VARINT) + + message._unknown_fields.append( + (tag_bytes, buffer[value_start_pos:pos].tobytes())) + if message._unknown_field_set is None: + message._unknown_field_set = containers.UnknownFieldSet() + message._unknown_field_set._add( + field_number, wire_format.WIRETYPE_VARINT, element) + # pylint: enable=protected-access + if pos > endpoint: + if element in enum_type.values_by_number: + del value[-1] # Discard corrupt value. + else: + del message._unknown_fields[-1] + # pylint: disable=protected-access + del message._unknown_field_set._values[-1] + # pylint: enable=protected-access + raise _DecodeError('Packed element was truncated.') + return pos + return DecodePackedField + elif is_repeated: + tag_bytes = encoder.TagBytes(field_number, wire_format.WIRETYPE_VARINT) + tag_len = len(tag_bytes) + def DecodeRepeatedField(buffer, pos, end, message, field_dict): + """Decode serialized repeated enum to its value and a new position. + + Args: + buffer: memoryview of the serialized bytes. + pos: int, position in the memory view to start at. + end: int, end position of serialized data + message: Message object to store unknown fields in + field_dict: Map[Descriptor, Any] to store decoded values in. + + Returns: + int, new position in serialized data. + """ + value = field_dict.get(key) + if value is None: + value = field_dict.setdefault(key, new_default(message)) + while 1: + (element, new_pos) = _DecodeSignedVarint32(buffer, pos) + # pylint: disable=protected-access + if element in enum_type.values_by_number: + value.append(element) + else: + if not message._unknown_fields: + message._unknown_fields = [] + message._unknown_fields.append( + (tag_bytes, buffer[pos:new_pos].tobytes())) + if message._unknown_field_set is None: + message._unknown_field_set = containers.UnknownFieldSet() + message._unknown_field_set._add( + field_number, wire_format.WIRETYPE_VARINT, element) + # pylint: enable=protected-access + # Predict that the next tag is another copy of the same repeated + # field. + pos = new_pos + tag_len + if buffer[new_pos:pos] != tag_bytes or new_pos >= end: + # Prediction failed. Return. + if new_pos > end: + raise _DecodeError('Truncated message.') + return new_pos + return DecodeRepeatedField + else: + def DecodeField(buffer, pos, end, message, field_dict): + """Decode serialized repeated enum to its value and a new position. + + Args: + buffer: memoryview of the serialized bytes. + pos: int, position in the memory view to start at. + end: int, end position of serialized data + message: Message object to store unknown fields in + field_dict: Map[Descriptor, Any] to store decoded values in. + + Returns: + int, new position in serialized data. + """ + value_start_pos = pos + (enum_value, pos) = _DecodeSignedVarint32(buffer, pos) + if pos > end: + raise _DecodeError('Truncated message.') + if clear_if_default and not enum_value: + field_dict.pop(key, None) + return pos + # pylint: disable=protected-access + if enum_value in enum_type.values_by_number: + field_dict[key] = enum_value + else: + if not message._unknown_fields: + message._unknown_fields = [] + tag_bytes = encoder.TagBytes(field_number, + wire_format.WIRETYPE_VARINT) + message._unknown_fields.append( + (tag_bytes, buffer[value_start_pos:pos].tobytes())) + if message._unknown_field_set is None: + message._unknown_field_set = containers.UnknownFieldSet() + message._unknown_field_set._add( + field_number, wire_format.WIRETYPE_VARINT, enum_value) + # pylint: enable=protected-access + return pos + return DecodeField + + +# -------------------------------------------------------------------- + + +Int32Decoder = _SimpleDecoder( + wire_format.WIRETYPE_VARINT, _DecodeSignedVarint32) + +Int64Decoder = _SimpleDecoder( + wire_format.WIRETYPE_VARINT, _DecodeSignedVarint) + +UInt32Decoder = _SimpleDecoder(wire_format.WIRETYPE_VARINT, _DecodeVarint32) +UInt64Decoder = _SimpleDecoder(wire_format.WIRETYPE_VARINT, _DecodeVarint) + +SInt32Decoder = _ModifiedDecoder( + wire_format.WIRETYPE_VARINT, _DecodeVarint32, wire_format.ZigZagDecode) +SInt64Decoder = _ModifiedDecoder( + wire_format.WIRETYPE_VARINT, _DecodeVarint, wire_format.ZigZagDecode) + +# Note that Python conveniently guarantees that when using the '<' prefix on +# formats, they will also have the same size across all platforms (as opposed +# to without the prefix, where their sizes depend on the C compiler's basic +# type sizes). +Fixed32Decoder = _StructPackDecoder(wire_format.WIRETYPE_FIXED32, ' end: + raise _DecodeError('Truncated string.') + value.append(_ConvertToUnicode(buffer[pos:new_pos])) + # Predict that the next tag is another copy of the same repeated field. + pos = new_pos + tag_len + if buffer[new_pos:pos] != tag_bytes or new_pos == end: + # Prediction failed. Return. + return new_pos + return DecodeRepeatedField + else: + def DecodeField(buffer, pos, end, message, field_dict): + (size, pos) = local_DecodeVarint(buffer, pos) + new_pos = pos + size + if new_pos > end: + raise _DecodeError('Truncated string.') + if clear_if_default and not size: + field_dict.pop(key, None) + else: + field_dict[key] = _ConvertToUnicode(buffer[pos:new_pos]) + return new_pos + return DecodeField + + +def BytesDecoder(field_number, is_repeated, is_packed, key, new_default, + clear_if_default=False): + """Returns a decoder for a bytes field.""" + + local_DecodeVarint = _DecodeVarint + + assert not is_packed + if is_repeated: + tag_bytes = encoder.TagBytes(field_number, + wire_format.WIRETYPE_LENGTH_DELIMITED) + tag_len = len(tag_bytes) + def DecodeRepeatedField(buffer, pos, end, message, field_dict): + value = field_dict.get(key) + if value is None: + value = field_dict.setdefault(key, new_default(message)) + while 1: + (size, pos) = local_DecodeVarint(buffer, pos) + new_pos = pos + size + if new_pos > end: + raise _DecodeError('Truncated string.') + value.append(buffer[pos:new_pos].tobytes()) + # Predict that the next tag is another copy of the same repeated field. + pos = new_pos + tag_len + if buffer[new_pos:pos] != tag_bytes or new_pos == end: + # Prediction failed. Return. + return new_pos + return DecodeRepeatedField + else: + def DecodeField(buffer, pos, end, message, field_dict): + (size, pos) = local_DecodeVarint(buffer, pos) + new_pos = pos + size + if new_pos > end: + raise _DecodeError('Truncated string.') + if clear_if_default and not size: + field_dict.pop(key, None) + else: + field_dict[key] = buffer[pos:new_pos].tobytes() + return new_pos + return DecodeField + + +def GroupDecoder(field_number, is_repeated, is_packed, key, new_default): + """Returns a decoder for a group field.""" + + end_tag_bytes = encoder.TagBytes(field_number, + wire_format.WIRETYPE_END_GROUP) + end_tag_len = len(end_tag_bytes) + + assert not is_packed + if is_repeated: + tag_bytes = encoder.TagBytes(field_number, + wire_format.WIRETYPE_START_GROUP) + tag_len = len(tag_bytes) + def DecodeRepeatedField(buffer, pos, end, message, field_dict): + value = field_dict.get(key) + if value is None: + value = field_dict.setdefault(key, new_default(message)) + while 1: + value = field_dict.get(key) + if value is None: + value = field_dict.setdefault(key, new_default(message)) + # Read sub-message. + pos = value.add()._InternalParse(buffer, pos, end) + # Read end tag. + new_pos = pos+end_tag_len + if buffer[pos:new_pos] != end_tag_bytes or new_pos > end: + raise _DecodeError('Missing group end tag.') + # Predict that the next tag is another copy of the same repeated field. + pos = new_pos + tag_len + if buffer[new_pos:pos] != tag_bytes or new_pos == end: + # Prediction failed. Return. + return new_pos + return DecodeRepeatedField + else: + def DecodeField(buffer, pos, end, message, field_dict): + value = field_dict.get(key) + if value is None: + value = field_dict.setdefault(key, new_default(message)) + # Read sub-message. + pos = value._InternalParse(buffer, pos, end) + # Read end tag. + new_pos = pos+end_tag_len + if buffer[pos:new_pos] != end_tag_bytes or new_pos > end: + raise _DecodeError('Missing group end tag.') + return new_pos + return DecodeField + + +def MessageDecoder(field_number, is_repeated, is_packed, key, new_default): + """Returns a decoder for a message field.""" + + local_DecodeVarint = _DecodeVarint + + assert not is_packed + if is_repeated: + tag_bytes = encoder.TagBytes(field_number, + wire_format.WIRETYPE_LENGTH_DELIMITED) + tag_len = len(tag_bytes) + def DecodeRepeatedField(buffer, pos, end, message, field_dict): + value = field_dict.get(key) + if value is None: + value = field_dict.setdefault(key, new_default(message)) + while 1: + # Read length. + (size, pos) = local_DecodeVarint(buffer, pos) + new_pos = pos + size + if new_pos > end: + raise _DecodeError('Truncated message.') + # Read sub-message. + if value.add()._InternalParse(buffer, pos, new_pos) != new_pos: + # The only reason _InternalParse would return early is if it + # encountered an end-group tag. + raise _DecodeError('Unexpected end-group tag.') + # Predict that the next tag is another copy of the same repeated field. + pos = new_pos + tag_len + if buffer[new_pos:pos] != tag_bytes or new_pos == end: + # Prediction failed. Return. + return new_pos + return DecodeRepeatedField + else: + def DecodeField(buffer, pos, end, message, field_dict): + value = field_dict.get(key) + if value is None: + value = field_dict.setdefault(key, new_default(message)) + # Read length. + (size, pos) = local_DecodeVarint(buffer, pos) + new_pos = pos + size + if new_pos > end: + raise _DecodeError('Truncated message.') + # Read sub-message. + if value._InternalParse(buffer, pos, new_pos) != new_pos: + # The only reason _InternalParse would return early is if it encountered + # an end-group tag. + raise _DecodeError('Unexpected end-group tag.') + return new_pos + return DecodeField + + +# -------------------------------------------------------------------- + +MESSAGE_SET_ITEM_TAG = encoder.TagBytes(1, wire_format.WIRETYPE_START_GROUP) + +def MessageSetItemDecoder(descriptor): + """Returns a decoder for a MessageSet item. + + The parameter is the message Descriptor. + + The message set message looks like this: + message MessageSet { + repeated group Item = 1 { + required int32 type_id = 2; + required string message = 3; + } + } + """ + + type_id_tag_bytes = encoder.TagBytes(2, wire_format.WIRETYPE_VARINT) + message_tag_bytes = encoder.TagBytes(3, wire_format.WIRETYPE_LENGTH_DELIMITED) + item_end_tag_bytes = encoder.TagBytes(1, wire_format.WIRETYPE_END_GROUP) + + local_ReadTag = ReadTag + local_DecodeVarint = _DecodeVarint + local_SkipField = SkipField + + def DecodeItem(buffer, pos, end, message, field_dict): + """Decode serialized message set to its value and new position. + + Args: + buffer: memoryview of the serialized bytes. + pos: int, position in the memory view to start at. + end: int, end position of serialized data + message: Message object to store unknown fields in + field_dict: Map[Descriptor, Any] to store decoded values in. + + Returns: + int, new position in serialized data. + """ + message_set_item_start = pos + type_id = -1 + message_start = -1 + message_end = -1 + + # Technically, type_id and message can appear in any order, so we need + # a little loop here. + while 1: + (tag_bytes, pos) = local_ReadTag(buffer, pos) + if tag_bytes == type_id_tag_bytes: + (type_id, pos) = local_DecodeVarint(buffer, pos) + elif tag_bytes == message_tag_bytes: + (size, message_start) = local_DecodeVarint(buffer, pos) + pos = message_end = message_start + size + elif tag_bytes == item_end_tag_bytes: + break + else: + pos = SkipField(buffer, pos, end, tag_bytes) + if pos == -1: + raise _DecodeError('Missing group end tag.') + + if pos > end: + raise _DecodeError('Truncated message.') + + if type_id == -1: + raise _DecodeError('MessageSet item missing type_id.') + if message_start == -1: + raise _DecodeError('MessageSet item missing message.') + + extension = message.Extensions._FindExtensionByNumber(type_id) + # pylint: disable=protected-access + if extension is not None: + value = field_dict.get(extension) + if value is None: + message_type = extension.message_type + if not hasattr(message_type, '_concrete_class'): + # pylint: disable=protected-access + message._FACTORY.GetPrototype(message_type) + value = field_dict.setdefault( + extension, message_type._concrete_class()) + if value._InternalParse(buffer, message_start,message_end) != message_end: + # The only reason _InternalParse would return early is if it encountered + # an end-group tag. + raise _DecodeError('Unexpected end-group tag.') + else: + if not message._unknown_fields: + message._unknown_fields = [] + message._unknown_fields.append( + (MESSAGE_SET_ITEM_TAG, buffer[message_set_item_start:pos].tobytes())) + if message._unknown_field_set is None: + message._unknown_field_set = containers.UnknownFieldSet() + message._unknown_field_set._add( + type_id, + wire_format.WIRETYPE_LENGTH_DELIMITED, + buffer[message_start:message_end].tobytes()) + # pylint: enable=protected-access + + return pos + + return DecodeItem + +# -------------------------------------------------------------------- + +def MapDecoder(field_descriptor, new_default, is_message_map): + """Returns a decoder for a map field.""" + + key = field_descriptor + tag_bytes = encoder.TagBytes(field_descriptor.number, + wire_format.WIRETYPE_LENGTH_DELIMITED) + tag_len = len(tag_bytes) + local_DecodeVarint = _DecodeVarint + # Can't read _concrete_class yet; might not be initialized. + message_type = field_descriptor.message_type + + def DecodeMap(buffer, pos, end, message, field_dict): + submsg = message_type._concrete_class() + value = field_dict.get(key) + if value is None: + value = field_dict.setdefault(key, new_default(message)) + while 1: + # Read length. + (size, pos) = local_DecodeVarint(buffer, pos) + new_pos = pos + size + if new_pos > end: + raise _DecodeError('Truncated message.') + # Read sub-message. + submsg.Clear() + if submsg._InternalParse(buffer, pos, new_pos) != new_pos: + # The only reason _InternalParse would return early is if it + # encountered an end-group tag. + raise _DecodeError('Unexpected end-group tag.') + + if is_message_map: + value[submsg.key].CopyFrom(submsg.value) + else: + value[submsg.key] = submsg.value + + # Predict that the next tag is another copy of the same repeated field. + pos = new_pos + tag_len + if buffer[new_pos:pos] != tag_bytes or new_pos == end: + # Prediction failed. Return. + return new_pos + + return DecodeMap + +# -------------------------------------------------------------------- +# Optimization is not as heavy here because calls to SkipField() are rare, +# except for handling end-group tags. + +def _SkipVarint(buffer, pos, end): + """Skip a varint value. Returns the new position.""" + # Previously ord(buffer[pos]) raised IndexError when pos is out of range. + # With this code, ord(b'') raises TypeError. Both are handled in + # python_message.py to generate a 'Truncated message' error. + while ord(buffer[pos:pos+1].tobytes()) & 0x80: + pos += 1 + pos += 1 + if pos > end: + raise _DecodeError('Truncated message.') + return pos + +def _SkipFixed64(buffer, pos, end): + """Skip a fixed64 value. Returns the new position.""" + + pos += 8 + if pos > end: + raise _DecodeError('Truncated message.') + return pos + + +def _DecodeFixed64(buffer, pos): + """Decode a fixed64.""" + new_pos = pos + 8 + return (struct.unpack(' end: + raise _DecodeError('Truncated message.') + return pos + + +def _SkipGroup(buffer, pos, end): + """Skip sub-group. Returns the new position.""" + + while 1: + (tag_bytes, pos) = ReadTag(buffer, pos) + new_pos = SkipField(buffer, pos, end, tag_bytes) + if new_pos == -1: + return pos + pos = new_pos + + +def _DecodeUnknownFieldSet(buffer, pos, end_pos=None): + """Decode UnknownFieldSet. Returns the UnknownFieldSet and new position.""" + + unknown_field_set = containers.UnknownFieldSet() + while end_pos is None or pos < end_pos: + (tag_bytes, pos) = ReadTag(buffer, pos) + (tag, _) = _DecodeVarint(tag_bytes, 0) + field_number, wire_type = wire_format.UnpackTag(tag) + if wire_type == wire_format.WIRETYPE_END_GROUP: + break + (data, pos) = _DecodeUnknownField(buffer, pos, wire_type) + # pylint: disable=protected-access + unknown_field_set._add(field_number, wire_type, data) + + return (unknown_field_set, pos) + + +def _DecodeUnknownField(buffer, pos, wire_type): + """Decode a unknown field. Returns the UnknownField and new position.""" + + if wire_type == wire_format.WIRETYPE_VARINT: + (data, pos) = _DecodeVarint(buffer, pos) + elif wire_type == wire_format.WIRETYPE_FIXED64: + (data, pos) = _DecodeFixed64(buffer, pos) + elif wire_type == wire_format.WIRETYPE_FIXED32: + (data, pos) = _DecodeFixed32(buffer, pos) + elif wire_type == wire_format.WIRETYPE_LENGTH_DELIMITED: + (size, pos) = _DecodeVarint(buffer, pos) + data = buffer[pos:pos+size].tobytes() + pos += size + elif wire_type == wire_format.WIRETYPE_START_GROUP: + (data, pos) = _DecodeUnknownFieldSet(buffer, pos) + elif wire_type == wire_format.WIRETYPE_END_GROUP: + return (0, -1) + else: + raise _DecodeError('Wrong wire type in tag.') + + return (data, pos) + + +def _EndGroup(buffer, pos, end): + """Skipping an END_GROUP tag returns -1 to tell the parent loop to break.""" + + return -1 + + +def _SkipFixed32(buffer, pos, end): + """Skip a fixed32 value. Returns the new position.""" + + pos += 4 + if pos > end: + raise _DecodeError('Truncated message.') + return pos + + +def _DecodeFixed32(buffer, pos): + """Decode a fixed32.""" + + new_pos = pos + 4 + return (struct.unpack('B').pack + + def EncodeVarint(write, value, unused_deterministic=None): + bits = value & 0x7f + value >>= 7 + while value: + write(local_int2byte(0x80|bits)) + bits = value & 0x7f + value >>= 7 + return write(local_int2byte(bits)) + + return EncodeVarint + + +def _SignedVarintEncoder(): + """Return an encoder for a basic signed varint value (does not include + tag).""" + + local_int2byte = struct.Struct('>B').pack + + def EncodeSignedVarint(write, value, unused_deterministic=None): + if value < 0: + value += (1 << 64) + bits = value & 0x7f + value >>= 7 + while value: + write(local_int2byte(0x80|bits)) + bits = value & 0x7f + value >>= 7 + return write(local_int2byte(bits)) + + return EncodeSignedVarint + + +_EncodeVarint = _VarintEncoder() +_EncodeSignedVarint = _SignedVarintEncoder() + + +def _VarintBytes(value): + """Encode the given integer as a varint and return the bytes. This is only + called at startup time so it doesn't need to be fast.""" + + pieces = [] + _EncodeVarint(pieces.append, value, True) + return b"".join(pieces) + + +def TagBytes(field_number, wire_type): + """Encode the given tag and return the bytes. Only called at startup.""" + + return bytes(_VarintBytes(wire_format.PackTag(field_number, wire_type))) + +# -------------------------------------------------------------------- +# As with sizers (see above), we have a number of common encoder +# implementations. + + +def _SimpleEncoder(wire_type, encode_value, compute_value_size): + """Return a constructor for an encoder for fields of a particular type. + + Args: + wire_type: The field's wire type, for encoding tags. + encode_value: A function which encodes an individual value, e.g. + _EncodeVarint(). + compute_value_size: A function which computes the size of an individual + value, e.g. _VarintSize(). + """ + + def SpecificEncoder(field_number, is_repeated, is_packed): + if is_packed: + tag_bytes = TagBytes(field_number, wire_format.WIRETYPE_LENGTH_DELIMITED) + local_EncodeVarint = _EncodeVarint + def EncodePackedField(write, value, deterministic): + write(tag_bytes) + size = 0 + for element in value: + size += compute_value_size(element) + local_EncodeVarint(write, size, deterministic) + for element in value: + encode_value(write, element, deterministic) + return EncodePackedField + elif is_repeated: + tag_bytes = TagBytes(field_number, wire_type) + def EncodeRepeatedField(write, value, deterministic): + for element in value: + write(tag_bytes) + encode_value(write, element, deterministic) + return EncodeRepeatedField + else: + tag_bytes = TagBytes(field_number, wire_type) + def EncodeField(write, value, deterministic): + write(tag_bytes) + return encode_value(write, value, deterministic) + return EncodeField + + return SpecificEncoder + + +def _ModifiedEncoder(wire_type, encode_value, compute_value_size, modify_value): + """Like SimpleEncoder but additionally invokes modify_value on every value + before passing it to encode_value. Usually modify_value is ZigZagEncode.""" + + def SpecificEncoder(field_number, is_repeated, is_packed): + if is_packed: + tag_bytes = TagBytes(field_number, wire_format.WIRETYPE_LENGTH_DELIMITED) + local_EncodeVarint = _EncodeVarint + def EncodePackedField(write, value, deterministic): + write(tag_bytes) + size = 0 + for element in value: + size += compute_value_size(modify_value(element)) + local_EncodeVarint(write, size, deterministic) + for element in value: + encode_value(write, modify_value(element), deterministic) + return EncodePackedField + elif is_repeated: + tag_bytes = TagBytes(field_number, wire_type) + def EncodeRepeatedField(write, value, deterministic): + for element in value: + write(tag_bytes) + encode_value(write, modify_value(element), deterministic) + return EncodeRepeatedField + else: + tag_bytes = TagBytes(field_number, wire_type) + def EncodeField(write, value, deterministic): + write(tag_bytes) + return encode_value(write, modify_value(value), deterministic) + return EncodeField + + return SpecificEncoder + + +def _StructPackEncoder(wire_type, format): + """Return a constructor for an encoder for a fixed-width field. + + Args: + wire_type: The field's wire type, for encoding tags. + format: The format string to pass to struct.pack(). + """ + + value_size = struct.calcsize(format) + + def SpecificEncoder(field_number, is_repeated, is_packed): + local_struct_pack = struct.pack + if is_packed: + tag_bytes = TagBytes(field_number, wire_format.WIRETYPE_LENGTH_DELIMITED) + local_EncodeVarint = _EncodeVarint + def EncodePackedField(write, value, deterministic): + write(tag_bytes) + local_EncodeVarint(write, len(value) * value_size, deterministic) + for element in value: + write(local_struct_pack(format, element)) + return EncodePackedField + elif is_repeated: + tag_bytes = TagBytes(field_number, wire_type) + def EncodeRepeatedField(write, value, unused_deterministic=None): + for element in value: + write(tag_bytes) + write(local_struct_pack(format, element)) + return EncodeRepeatedField + else: + tag_bytes = TagBytes(field_number, wire_type) + def EncodeField(write, value, unused_deterministic=None): + write(tag_bytes) + return write(local_struct_pack(format, value)) + return EncodeField + + return SpecificEncoder + + +def _FloatingPointEncoder(wire_type, format): + """Return a constructor for an encoder for float fields. + + This is like StructPackEncoder, but catches errors that may be due to + passing non-finite floating-point values to struct.pack, and makes a + second attempt to encode those values. + + Args: + wire_type: The field's wire type, for encoding tags. + format: The format string to pass to struct.pack(). + """ + + value_size = struct.calcsize(format) + if value_size == 4: + def EncodeNonFiniteOrRaise(write, value): + # Remember that the serialized form uses little-endian byte order. + if value == _POS_INF: + write(b'\x00\x00\x80\x7F') + elif value == _NEG_INF: + write(b'\x00\x00\x80\xFF') + elif value != value: # NaN + write(b'\x00\x00\xC0\x7F') + else: + raise + elif value_size == 8: + def EncodeNonFiniteOrRaise(write, value): + if value == _POS_INF: + write(b'\x00\x00\x00\x00\x00\x00\xF0\x7F') + elif value == _NEG_INF: + write(b'\x00\x00\x00\x00\x00\x00\xF0\xFF') + elif value != value: # NaN + write(b'\x00\x00\x00\x00\x00\x00\xF8\x7F') + else: + raise + else: + raise ValueError('Can\'t encode floating-point values that are ' + '%d bytes long (only 4 or 8)' % value_size) + + def SpecificEncoder(field_number, is_repeated, is_packed): + local_struct_pack = struct.pack + if is_packed: + tag_bytes = TagBytes(field_number, wire_format.WIRETYPE_LENGTH_DELIMITED) + local_EncodeVarint = _EncodeVarint + def EncodePackedField(write, value, deterministic): + write(tag_bytes) + local_EncodeVarint(write, len(value) * value_size, deterministic) + for element in value: + # This try/except block is going to be faster than any code that + # we could write to check whether element is finite. + try: + write(local_struct_pack(format, element)) + except SystemError: + EncodeNonFiniteOrRaise(write, element) + return EncodePackedField + elif is_repeated: + tag_bytes = TagBytes(field_number, wire_type) + def EncodeRepeatedField(write, value, unused_deterministic=None): + for element in value: + write(tag_bytes) + try: + write(local_struct_pack(format, element)) + except SystemError: + EncodeNonFiniteOrRaise(write, element) + return EncodeRepeatedField + else: + tag_bytes = TagBytes(field_number, wire_type) + def EncodeField(write, value, unused_deterministic=None): + write(tag_bytes) + try: + write(local_struct_pack(format, value)) + except SystemError: + EncodeNonFiniteOrRaise(write, value) + return EncodeField + + return SpecificEncoder + + +# ==================================================================== +# Here we declare an encoder constructor for each field type. These work +# very similarly to sizer constructors, described earlier. + + +Int32Encoder = Int64Encoder = EnumEncoder = _SimpleEncoder( + wire_format.WIRETYPE_VARINT, _EncodeSignedVarint, _SignedVarintSize) + +UInt32Encoder = UInt64Encoder = _SimpleEncoder( + wire_format.WIRETYPE_VARINT, _EncodeVarint, _VarintSize) + +SInt32Encoder = SInt64Encoder = _ModifiedEncoder( + wire_format.WIRETYPE_VARINT, _EncodeVarint, _VarintSize, + wire_format.ZigZagEncode) + +# Note that Python conveniently guarantees that when using the '<' prefix on +# formats, they will also have the same size across all platforms (as opposed +# to without the prefix, where their sizes depend on the C compiler's basic +# type sizes). +Fixed32Encoder = _StructPackEncoder(wire_format.WIRETYPE_FIXED32, ' str + ValueType = int + + def __init__(self, enum_type): + """Inits EnumTypeWrapper with an EnumDescriptor.""" + self._enum_type = enum_type + self.DESCRIPTOR = enum_type # pylint: disable=invalid-name + + def Name(self, number): # pylint: disable=invalid-name + """Returns a string containing the name of an enum value.""" + try: + return self._enum_type.values_by_number[number].name + except KeyError: + pass # fall out to break exception chaining + + if not isinstance(number, int): + raise TypeError( + 'Enum value for {} must be an int, but got {} {!r}.'.format( + self._enum_type.name, type(number), number)) + else: + # repr here to handle the odd case when you pass in a boolean. + raise ValueError('Enum {} has no name defined for value {!r}'.format( + self._enum_type.name, number)) + + def Value(self, name): # pylint: disable=invalid-name + """Returns the value corresponding to the given enum name.""" + try: + return self._enum_type.values_by_name[name].number + except KeyError: + pass # fall out to break exception chaining + raise ValueError('Enum {} has no value defined for name {!r}'.format( + self._enum_type.name, name)) + + def keys(self): + """Return a list of the string names in the enum. + + Returns: + A list of strs, in the order they were defined in the .proto file. + """ + + return [value_descriptor.name + for value_descriptor in self._enum_type.values] + + def values(self): + """Return a list of the integer values in the enum. + + Returns: + A list of ints, in the order they were defined in the .proto file. + """ + + return [value_descriptor.number + for value_descriptor in self._enum_type.values] + + def items(self): + """Return a list of the (name, value) pairs of the enum. + + Returns: + A list of (str, int) pairs, in the order they were defined + in the .proto file. + """ + return [(value_descriptor.name, value_descriptor.number) + for value_descriptor in self._enum_type.values] + + def __getattr__(self, name): + """Returns the value corresponding to the given enum name.""" + try: + return super( + EnumTypeWrapper, + self).__getattribute__('_enum_type').values_by_name[name].number + except KeyError: + pass # fall out to break exception chaining + raise AttributeError('Enum {} has no value defined for name {!r}'.format( + self._enum_type.name, name)) diff --git a/mysql-sqlalchemy-workspace/lib/python3.9/site-packages/google/protobuf/internal/extension_dict.py b/mysql-sqlalchemy-workspace/lib/python3.9/site-packages/google/protobuf/internal/extension_dict.py new file mode 100644 index 00000000..b346cf28 --- /dev/null +++ b/mysql-sqlalchemy-workspace/lib/python3.9/site-packages/google/protobuf/internal/extension_dict.py @@ -0,0 +1,213 @@ +# Protocol Buffers - Google's data interchange format +# Copyright 2008 Google Inc. All rights reserved. +# https://developers.google.com/protocol-buffers/ +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are +# met: +# +# * Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above +# copyright notice, this list of conditions and the following disclaimer +# in the documentation and/or other materials provided with the +# distribution. +# * Neither the name of Google Inc. nor the names of its +# contributors may be used to endorse or promote products derived from +# this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +"""Contains _ExtensionDict class to represent extensions. +""" + +from google.protobuf.internal import type_checkers +from google.protobuf.descriptor import FieldDescriptor + + +def _VerifyExtensionHandle(message, extension_handle): + """Verify that the given extension handle is valid.""" + + if not isinstance(extension_handle, FieldDescriptor): + raise KeyError('HasExtension() expects an extension handle, got: %s' % + extension_handle) + + if not extension_handle.is_extension: + raise KeyError('"%s" is not an extension.' % extension_handle.full_name) + + if not extension_handle.containing_type: + raise KeyError('"%s" is missing a containing_type.' + % extension_handle.full_name) + + if extension_handle.containing_type is not message.DESCRIPTOR: + raise KeyError('Extension "%s" extends message type "%s", but this ' + 'message is of type "%s".' % + (extension_handle.full_name, + extension_handle.containing_type.full_name, + message.DESCRIPTOR.full_name)) + + +# TODO(robinson): Unify error handling of "unknown extension" crap. +# TODO(robinson): Support iteritems()-style iteration over all +# extensions with the "has" bits turned on? +class _ExtensionDict(object): + + """Dict-like container for Extension fields on proto instances. + + Note that in all cases we expect extension handles to be + FieldDescriptors. + """ + + def __init__(self, extended_message): + """ + Args: + extended_message: Message instance for which we are the Extensions dict. + """ + self._extended_message = extended_message + + def __getitem__(self, extension_handle): + """Returns the current value of the given extension handle.""" + + _VerifyExtensionHandle(self._extended_message, extension_handle) + + result = self._extended_message._fields.get(extension_handle) + if result is not None: + return result + + if extension_handle.label == FieldDescriptor.LABEL_REPEATED: + result = extension_handle._default_constructor(self._extended_message) + elif extension_handle.cpp_type == FieldDescriptor.CPPTYPE_MESSAGE: + message_type = extension_handle.message_type + if not hasattr(message_type, '_concrete_class'): + # pylint: disable=protected-access + self._extended_message._FACTORY.GetPrototype(message_type) + assert getattr(extension_handle.message_type, '_concrete_class', None), ( + 'Uninitialized concrete class found for field %r (message type %r)' + % (extension_handle.full_name, + extension_handle.message_type.full_name)) + result = extension_handle.message_type._concrete_class() + try: + result._SetListener(self._extended_message._listener_for_children) + except ReferenceError: + pass + else: + # Singular scalar -- just return the default without inserting into the + # dict. + return extension_handle.default_value + + # Atomically check if another thread has preempted us and, if not, swap + # in the new object we just created. If someone has preempted us, we + # take that object and discard ours. + # WARNING: We are relying on setdefault() being atomic. This is true + # in CPython but we haven't investigated others. This warning appears + # in several other locations in this file. + result = self._extended_message._fields.setdefault( + extension_handle, result) + + return result + + def __eq__(self, other): + if not isinstance(other, self.__class__): + return False + + my_fields = self._extended_message.ListFields() + other_fields = other._extended_message.ListFields() + + # Get rid of non-extension fields. + my_fields = [field for field in my_fields if field.is_extension] + other_fields = [field for field in other_fields if field.is_extension] + + return my_fields == other_fields + + def __ne__(self, other): + return not self == other + + def __len__(self): + fields = self._extended_message.ListFields() + # Get rid of non-extension fields. + extension_fields = [field for field in fields if field[0].is_extension] + return len(extension_fields) + + def __hash__(self): + raise TypeError('unhashable object') + + # Note that this is only meaningful for non-repeated, scalar extension + # fields. Note also that we may have to call _Modified() when we do + # successfully set a field this way, to set any necessary "has" bits in the + # ancestors of the extended message. + def __setitem__(self, extension_handle, value): + """If extension_handle specifies a non-repeated, scalar extension + field, sets the value of that field. + """ + + _VerifyExtensionHandle(self._extended_message, extension_handle) + + if (extension_handle.label == FieldDescriptor.LABEL_REPEATED or + extension_handle.cpp_type == FieldDescriptor.CPPTYPE_MESSAGE): + raise TypeError( + 'Cannot assign to extension "%s" because it is a repeated or ' + 'composite type.' % extension_handle.full_name) + + # It's slightly wasteful to lookup the type checker each time, + # but we expect this to be a vanishingly uncommon case anyway. + type_checker = type_checkers.GetTypeChecker(extension_handle) + # pylint: disable=protected-access + self._extended_message._fields[extension_handle] = ( + type_checker.CheckValue(value)) + self._extended_message._Modified() + + def __delitem__(self, extension_handle): + self._extended_message.ClearExtension(extension_handle) + + def _FindExtensionByName(self, name): + """Tries to find a known extension with the specified name. + + Args: + name: Extension full name. + + Returns: + Extension field descriptor. + """ + return self._extended_message._extensions_by_name.get(name, None) + + def _FindExtensionByNumber(self, number): + """Tries to find a known extension with the field number. + + Args: + number: Extension field number. + + Returns: + Extension field descriptor. + """ + return self._extended_message._extensions_by_number.get(number, None) + + def __iter__(self): + # Return a generator over the populated extension fields + return (f[0] for f in self._extended_message.ListFields() + if f[0].is_extension) + + def __contains__(self, extension_handle): + _VerifyExtensionHandle(self._extended_message, extension_handle) + + if extension_handle not in self._extended_message._fields: + return False + + if extension_handle.label == FieldDescriptor.LABEL_REPEATED: + return bool(self._extended_message._fields.get(extension_handle)) + + if extension_handle.cpp_type == FieldDescriptor.CPPTYPE_MESSAGE: + value = self._extended_message._fields.get(extension_handle) + # pylint: disable=protected-access + return value is not None and value._is_present_in_parent + + return True diff --git a/mysql-sqlalchemy-workspace/lib/python3.9/site-packages/google/protobuf/internal/message_listener.py b/mysql-sqlalchemy-workspace/lib/python3.9/site-packages/google/protobuf/internal/message_listener.py new file mode 100644 index 00000000..0fc255a7 --- /dev/null +++ b/mysql-sqlalchemy-workspace/lib/python3.9/site-packages/google/protobuf/internal/message_listener.py @@ -0,0 +1,78 @@ +# Protocol Buffers - Google's data interchange format +# Copyright 2008 Google Inc. All rights reserved. +# https://developers.google.com/protocol-buffers/ +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are +# met: +# +# * Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above +# copyright notice, this list of conditions and the following disclaimer +# in the documentation and/or other materials provided with the +# distribution. +# * Neither the name of Google Inc. nor the names of its +# contributors may be used to endorse or promote products derived from +# this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +"""Defines a listener interface for observing certain +state transitions on Message objects. + +Also defines a null implementation of this interface. +""" + +__author__ = 'robinson@google.com (Will Robinson)' + + +class MessageListener(object): + + """Listens for modifications made to a message. Meant to be registered via + Message._SetListener(). + + Attributes: + dirty: If True, then calling Modified() would be a no-op. This can be + used to avoid these calls entirely in the common case. + """ + + def Modified(self): + """Called every time the message is modified in such a way that the parent + message may need to be updated. This currently means either: + (a) The message was modified for the first time, so the parent message + should henceforth mark the message as present. + (b) The message's cached byte size became dirty -- i.e. the message was + modified for the first time after a previous call to ByteSize(). + Therefore the parent should also mark its byte size as dirty. + Note that (a) implies (b), since new objects start out with a client cached + size (zero). However, we document (a) explicitly because it is important. + + Modified() will *only* be called in response to one of these two events -- + not every time the sub-message is modified. + + Note that if the listener's |dirty| attribute is true, then calling + Modified at the moment would be a no-op, so it can be skipped. Performance- + sensitive callers should check this attribute directly before calling since + it will be true most of the time. + """ + + raise NotImplementedError + + +class NullMessageListener(object): + + """No-op MessageListener implementation.""" + + def Modified(self): + pass diff --git a/mysql-sqlalchemy-workspace/lib/python3.9/site-packages/google/protobuf/internal/python_message.py b/mysql-sqlalchemy-workspace/lib/python3.9/site-packages/google/protobuf/internal/python_message.py new file mode 100644 index 00000000..2921d5cb --- /dev/null +++ b/mysql-sqlalchemy-workspace/lib/python3.9/site-packages/google/protobuf/internal/python_message.py @@ -0,0 +1,1539 @@ +# Protocol Buffers - Google's data interchange format +# Copyright 2008 Google Inc. All rights reserved. +# https://developers.google.com/protocol-buffers/ +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are +# met: +# +# * Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above +# copyright notice, this list of conditions and the following disclaimer +# in the documentation and/or other materials provided with the +# distribution. +# * Neither the name of Google Inc. nor the names of its +# contributors may be used to endorse or promote products derived from +# this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +# This code is meant to work on Python 2.4 and above only. +# +# TODO(robinson): Helpers for verbose, common checks like seeing if a +# descriptor's cpp_type is CPPTYPE_MESSAGE. + +"""Contains a metaclass and helper functions used to create +protocol message classes from Descriptor objects at runtime. + +Recall that a metaclass is the "type" of a class. +(A class is to a metaclass what an instance is to a class.) + +In this case, we use the GeneratedProtocolMessageType metaclass +to inject all the useful functionality into the classes +output by the protocol compiler at compile-time. + +The upshot of all this is that the real implementation +details for ALL pure-Python protocol buffers are *here in +this file*. +""" + +__author__ = 'robinson@google.com (Will Robinson)' + +from io import BytesIO +import struct +import sys +import weakref + +# We use "as" to avoid name collisions with variables. +from google.protobuf.internal import api_implementation +from google.protobuf.internal import containers +from google.protobuf.internal import decoder +from google.protobuf.internal import encoder +from google.protobuf.internal import enum_type_wrapper +from google.protobuf.internal import extension_dict +from google.protobuf.internal import message_listener as message_listener_mod +from google.protobuf.internal import type_checkers +from google.protobuf.internal import well_known_types +from google.protobuf.internal import wire_format +from google.protobuf import descriptor as descriptor_mod +from google.protobuf import message as message_mod +from google.protobuf import text_format + +_FieldDescriptor = descriptor_mod.FieldDescriptor +_AnyFullTypeName = 'google.protobuf.Any' +_ExtensionDict = extension_dict._ExtensionDict + +class GeneratedProtocolMessageType(type): + + """Metaclass for protocol message classes created at runtime from Descriptors. + + We add implementations for all methods described in the Message class. We + also create properties to allow getting/setting all fields in the protocol + message. Finally, we create slots to prevent users from accidentally + "setting" nonexistent fields in the protocol message, which then wouldn't get + serialized / deserialized properly. + + The protocol compiler currently uses this metaclass to create protocol + message classes at runtime. Clients can also manually create their own + classes at runtime, as in this example: + + mydescriptor = Descriptor(.....) + factory = symbol_database.Default() + factory.pool.AddDescriptor(mydescriptor) + MyProtoClass = factory.GetPrototype(mydescriptor) + myproto_instance = MyProtoClass() + myproto.foo_field = 23 + ... + """ + + # Must be consistent with the protocol-compiler code in + # proto2/compiler/internal/generator.*. + _DESCRIPTOR_KEY = 'DESCRIPTOR' + + def __new__(cls, name, bases, dictionary): + """Custom allocation for runtime-generated class types. + + We override __new__ because this is apparently the only place + where we can meaningfully set __slots__ on the class we're creating(?). + (The interplay between metaclasses and slots is not very well-documented). + + Args: + name: Name of the class (ignored, but required by the + metaclass protocol). + bases: Base classes of the class we're constructing. + (Should be message.Message). We ignore this field, but + it's required by the metaclass protocol + dictionary: The class dictionary of the class we're + constructing. dictionary[_DESCRIPTOR_KEY] must contain + a Descriptor object describing this protocol message + type. + + Returns: + Newly-allocated class. + + Raises: + RuntimeError: Generated code only work with python cpp extension. + """ + descriptor = dictionary[GeneratedProtocolMessageType._DESCRIPTOR_KEY] + + if isinstance(descriptor, str): + raise RuntimeError('The generated code only work with python cpp ' + 'extension, but it is using pure python runtime.') + + # If a concrete class already exists for this descriptor, don't try to + # create another. Doing so will break any messages that already exist with + # the existing class. + # + # The C++ implementation appears to have its own internal `PyMessageFactory` + # to achieve similar results. + # + # This most commonly happens in `text_format.py` when using descriptors from + # a custom pool; it calls symbol_database.Global().getPrototype() on a + # descriptor which already has an existing concrete class. + new_class = getattr(descriptor, '_concrete_class', None) + if new_class: + return new_class + + if descriptor.full_name in well_known_types.WKTBASES: + bases += (well_known_types.WKTBASES[descriptor.full_name],) + _AddClassAttributesForNestedExtensions(descriptor, dictionary) + _AddSlots(descriptor, dictionary) + + superclass = super(GeneratedProtocolMessageType, cls) + new_class = superclass.__new__(cls, name, bases, dictionary) + return new_class + + def __init__(cls, name, bases, dictionary): + """Here we perform the majority of our work on the class. + We add enum getters, an __init__ method, implementations + of all Message methods, and properties for all fields + in the protocol type. + + Args: + name: Name of the class (ignored, but required by the + metaclass protocol). + bases: Base classes of the class we're constructing. + (Should be message.Message). We ignore this field, but + it's required by the metaclass protocol + dictionary: The class dictionary of the class we're + constructing. dictionary[_DESCRIPTOR_KEY] must contain + a Descriptor object describing this protocol message + type. + """ + descriptor = dictionary[GeneratedProtocolMessageType._DESCRIPTOR_KEY] + + # If this is an _existing_ class looked up via `_concrete_class` in the + # __new__ method above, then we don't need to re-initialize anything. + existing_class = getattr(descriptor, '_concrete_class', None) + if existing_class: + assert existing_class is cls, ( + 'Duplicate `GeneratedProtocolMessageType` created for descriptor %r' + % (descriptor.full_name)) + return + + cls._decoders_by_tag = {} + if (descriptor.has_options and + descriptor.GetOptions().message_set_wire_format): + cls._decoders_by_tag[decoder.MESSAGE_SET_ITEM_TAG] = ( + decoder.MessageSetItemDecoder(descriptor), None) + + # Attach stuff to each FieldDescriptor for quick lookup later on. + for field in descriptor.fields: + _AttachFieldHelpers(cls, field) + + descriptor._concrete_class = cls # pylint: disable=protected-access + _AddEnumValues(descriptor, cls) + _AddInitMethod(descriptor, cls) + _AddPropertiesForFields(descriptor, cls) + _AddPropertiesForExtensions(descriptor, cls) + _AddStaticMethods(cls) + _AddMessageMethods(descriptor, cls) + _AddPrivateHelperMethods(descriptor, cls) + + superclass = super(GeneratedProtocolMessageType, cls) + superclass.__init__(name, bases, dictionary) + + +# Stateless helpers for GeneratedProtocolMessageType below. +# Outside clients should not access these directly. +# +# I opted not to make any of these methods on the metaclass, to make it more +# clear that I'm not really using any state there and to keep clients from +# thinking that they have direct access to these construction helpers. + + +def _PropertyName(proto_field_name): + """Returns the name of the public property attribute which + clients can use to get and (in some cases) set the value + of a protocol message field. + + Args: + proto_field_name: The protocol message field name, exactly + as it appears (or would appear) in a .proto file. + """ + # TODO(robinson): Escape Python keywords (e.g., yield), and test this support. + # nnorwitz makes my day by writing: + # """ + # FYI. See the keyword module in the stdlib. This could be as simple as: + # + # if keyword.iskeyword(proto_field_name): + # return proto_field_name + "_" + # return proto_field_name + # """ + # Kenton says: The above is a BAD IDEA. People rely on being able to use + # getattr() and setattr() to reflectively manipulate field values. If we + # rename the properties, then every such user has to also make sure to apply + # the same transformation. Note that currently if you name a field "yield", + # you can still access it just fine using getattr/setattr -- it's not even + # that cumbersome to do so. + # TODO(kenton): Remove this method entirely if/when everyone agrees with my + # position. + return proto_field_name + + +def _AddSlots(message_descriptor, dictionary): + """Adds a __slots__ entry to dictionary, containing the names of all valid + attributes for this message type. + + Args: + message_descriptor: A Descriptor instance describing this message type. + dictionary: Class dictionary to which we'll add a '__slots__' entry. + """ + dictionary['__slots__'] = ['_cached_byte_size', + '_cached_byte_size_dirty', + '_fields', + '_unknown_fields', + '_unknown_field_set', + '_is_present_in_parent', + '_listener', + '_listener_for_children', + '__weakref__', + '_oneofs'] + + +def _IsMessageSetExtension(field): + return (field.is_extension and + field.containing_type.has_options and + field.containing_type.GetOptions().message_set_wire_format and + field.type == _FieldDescriptor.TYPE_MESSAGE and + field.label == _FieldDescriptor.LABEL_OPTIONAL) + + +def _IsMapField(field): + return (field.type == _FieldDescriptor.TYPE_MESSAGE and + field.message_type.has_options and + field.message_type.GetOptions().map_entry) + + +def _IsMessageMapField(field): + value_type = field.message_type.fields_by_name['value'] + return value_type.cpp_type == _FieldDescriptor.CPPTYPE_MESSAGE + + +def _AttachFieldHelpers(cls, field_descriptor): + is_repeated = (field_descriptor.label == _FieldDescriptor.LABEL_REPEATED) + is_packable = (is_repeated and + wire_format.IsTypePackable(field_descriptor.type)) + is_proto3 = field_descriptor.containing_type.syntax == 'proto3' + if not is_packable: + is_packed = False + elif field_descriptor.containing_type.syntax == 'proto2': + is_packed = (field_descriptor.has_options and + field_descriptor.GetOptions().packed) + else: + has_packed_false = (field_descriptor.has_options and + field_descriptor.GetOptions().HasField('packed') and + field_descriptor.GetOptions().packed == False) + is_packed = not has_packed_false + is_map_entry = _IsMapField(field_descriptor) + + if is_map_entry: + field_encoder = encoder.MapEncoder(field_descriptor) + sizer = encoder.MapSizer(field_descriptor, + _IsMessageMapField(field_descriptor)) + elif _IsMessageSetExtension(field_descriptor): + field_encoder = encoder.MessageSetItemEncoder(field_descriptor.number) + sizer = encoder.MessageSetItemSizer(field_descriptor.number) + else: + field_encoder = type_checkers.TYPE_TO_ENCODER[field_descriptor.type]( + field_descriptor.number, is_repeated, is_packed) + sizer = type_checkers.TYPE_TO_SIZER[field_descriptor.type]( + field_descriptor.number, is_repeated, is_packed) + + field_descriptor._encoder = field_encoder + field_descriptor._sizer = sizer + field_descriptor._default_constructor = _DefaultValueConstructorForField( + field_descriptor) + + def AddDecoder(wiretype, is_packed): + tag_bytes = encoder.TagBytes(field_descriptor.number, wiretype) + decode_type = field_descriptor.type + if (decode_type == _FieldDescriptor.TYPE_ENUM and + type_checkers.SupportsOpenEnums(field_descriptor)): + decode_type = _FieldDescriptor.TYPE_INT32 + + oneof_descriptor = None + clear_if_default = False + if field_descriptor.containing_oneof is not None: + oneof_descriptor = field_descriptor + elif (is_proto3 and not is_repeated and + field_descriptor.cpp_type != _FieldDescriptor.CPPTYPE_MESSAGE): + clear_if_default = True + + if is_map_entry: + is_message_map = _IsMessageMapField(field_descriptor) + + field_decoder = decoder.MapDecoder( + field_descriptor, _GetInitializeDefaultForMap(field_descriptor), + is_message_map) + elif decode_type == _FieldDescriptor.TYPE_STRING: + field_decoder = decoder.StringDecoder( + field_descriptor.number, is_repeated, is_packed, + field_descriptor, field_descriptor._default_constructor, + clear_if_default) + elif field_descriptor.cpp_type == _FieldDescriptor.CPPTYPE_MESSAGE: + field_decoder = type_checkers.TYPE_TO_DECODER[decode_type]( + field_descriptor.number, is_repeated, is_packed, + field_descriptor, field_descriptor._default_constructor) + else: + field_decoder = type_checkers.TYPE_TO_DECODER[decode_type]( + field_descriptor.number, is_repeated, is_packed, + # pylint: disable=protected-access + field_descriptor, field_descriptor._default_constructor, + clear_if_default) + + cls._decoders_by_tag[tag_bytes] = (field_decoder, oneof_descriptor) + + AddDecoder(type_checkers.FIELD_TYPE_TO_WIRE_TYPE[field_descriptor.type], + False) + + if is_repeated and wire_format.IsTypePackable(field_descriptor.type): + # To support wire compatibility of adding packed = true, add a decoder for + # packed values regardless of the field's options. + AddDecoder(wire_format.WIRETYPE_LENGTH_DELIMITED, True) + + +def _AddClassAttributesForNestedExtensions(descriptor, dictionary): + extensions = descriptor.extensions_by_name + for extension_name, extension_field in extensions.items(): + assert extension_name not in dictionary + dictionary[extension_name] = extension_field + + +def _AddEnumValues(descriptor, cls): + """Sets class-level attributes for all enum fields defined in this message. + + Also exporting a class-level object that can name enum values. + + Args: + descriptor: Descriptor object for this message type. + cls: Class we're constructing for this message type. + """ + for enum_type in descriptor.enum_types: + setattr(cls, enum_type.name, enum_type_wrapper.EnumTypeWrapper(enum_type)) + for enum_value in enum_type.values: + setattr(cls, enum_value.name, enum_value.number) + + +def _GetInitializeDefaultForMap(field): + if field.label != _FieldDescriptor.LABEL_REPEATED: + raise ValueError('map_entry set on non-repeated field %s' % ( + field.name)) + fields_by_name = field.message_type.fields_by_name + key_checker = type_checkers.GetTypeChecker(fields_by_name['key']) + + value_field = fields_by_name['value'] + if _IsMessageMapField(field): + def MakeMessageMapDefault(message): + return containers.MessageMap( + message._listener_for_children, value_field.message_type, key_checker, + field.message_type) + return MakeMessageMapDefault + else: + value_checker = type_checkers.GetTypeChecker(value_field) + def MakePrimitiveMapDefault(message): + return containers.ScalarMap( + message._listener_for_children, key_checker, value_checker, + field.message_type) + return MakePrimitiveMapDefault + +def _DefaultValueConstructorForField(field): + """Returns a function which returns a default value for a field. + + Args: + field: FieldDescriptor object for this field. + + The returned function has one argument: + message: Message instance containing this field, or a weakref proxy + of same. + + That function in turn returns a default value for this field. The default + value may refer back to |message| via a weak reference. + """ + + if _IsMapField(field): + return _GetInitializeDefaultForMap(field) + + if field.label == _FieldDescriptor.LABEL_REPEATED: + if field.has_default_value and field.default_value != []: + raise ValueError('Repeated field default value not empty list: %s' % ( + field.default_value)) + if field.cpp_type == _FieldDescriptor.CPPTYPE_MESSAGE: + # We can't look at _concrete_class yet since it might not have + # been set. (Depends on order in which we initialize the classes). + message_type = field.message_type + def MakeRepeatedMessageDefault(message): + return containers.RepeatedCompositeFieldContainer( + message._listener_for_children, field.message_type) + return MakeRepeatedMessageDefault + else: + type_checker = type_checkers.GetTypeChecker(field) + def MakeRepeatedScalarDefault(message): + return containers.RepeatedScalarFieldContainer( + message._listener_for_children, type_checker) + return MakeRepeatedScalarDefault + + if field.cpp_type == _FieldDescriptor.CPPTYPE_MESSAGE: + # _concrete_class may not yet be initialized. + message_type = field.message_type + def MakeSubMessageDefault(message): + assert getattr(message_type, '_concrete_class', None), ( + 'Uninitialized concrete class found for field %r (message type %r)' + % (field.full_name, message_type.full_name)) + result = message_type._concrete_class() + result._SetListener( + _OneofListener(message, field) + if field.containing_oneof is not None + else message._listener_for_children) + return result + return MakeSubMessageDefault + + def MakeScalarDefault(message): + # TODO(protobuf-team): This may be broken since there may not be + # default_value. Combine with has_default_value somehow. + return field.default_value + return MakeScalarDefault + + +def _ReraiseTypeErrorWithFieldName(message_name, field_name): + """Re-raise the currently-handled TypeError with the field name added.""" + exc = sys.exc_info()[1] + if len(exc.args) == 1 and type(exc) is TypeError: + # simple TypeError; add field name to exception message + exc = TypeError('%s for field %s.%s' % (str(exc), message_name, field_name)) + + # re-raise possibly-amended exception with original traceback: + raise exc.with_traceback(sys.exc_info()[2]) + + +def _AddInitMethod(message_descriptor, cls): + """Adds an __init__ method to cls.""" + + def _GetIntegerEnumValue(enum_type, value): + """Convert a string or integer enum value to an integer. + + If the value is a string, it is converted to the enum value in + enum_type with the same name. If the value is not a string, it's + returned as-is. (No conversion or bounds-checking is done.) + """ + if isinstance(value, str): + try: + return enum_type.values_by_name[value].number + except KeyError: + raise ValueError('Enum type %s: unknown label "%s"' % ( + enum_type.full_name, value)) + return value + + def init(self, **kwargs): + self._cached_byte_size = 0 + self._cached_byte_size_dirty = len(kwargs) > 0 + self._fields = {} + # Contains a mapping from oneof field descriptors to the descriptor + # of the currently set field in that oneof field. + self._oneofs = {} + + # _unknown_fields is () when empty for efficiency, and will be turned into + # a list if fields are added. + self._unknown_fields = () + # _unknown_field_set is None when empty for efficiency, and will be + # turned into UnknownFieldSet struct if fields are added. + self._unknown_field_set = None # pylint: disable=protected-access + self._is_present_in_parent = False + self._listener = message_listener_mod.NullMessageListener() + self._listener_for_children = _Listener(self) + for field_name, field_value in kwargs.items(): + field = _GetFieldByName(message_descriptor, field_name) + if field is None: + raise TypeError('%s() got an unexpected keyword argument "%s"' % + (message_descriptor.name, field_name)) + if field_value is None: + # field=None is the same as no field at all. + continue + if field.label == _FieldDescriptor.LABEL_REPEATED: + copy = field._default_constructor(self) + if field.cpp_type == _FieldDescriptor.CPPTYPE_MESSAGE: # Composite + if _IsMapField(field): + if _IsMessageMapField(field): + for key in field_value: + copy[key].MergeFrom(field_value[key]) + else: + copy.update(field_value) + else: + for val in field_value: + if isinstance(val, dict): + copy.add(**val) + else: + copy.add().MergeFrom(val) + else: # Scalar + if field.cpp_type == _FieldDescriptor.CPPTYPE_ENUM: + field_value = [_GetIntegerEnumValue(field.enum_type, val) + for val in field_value] + copy.extend(field_value) + self._fields[field] = copy + elif field.cpp_type == _FieldDescriptor.CPPTYPE_MESSAGE: + copy = field._default_constructor(self) + new_val = field_value + if isinstance(field_value, dict): + new_val = field.message_type._concrete_class(**field_value) + try: + copy.MergeFrom(new_val) + except TypeError: + _ReraiseTypeErrorWithFieldName(message_descriptor.name, field_name) + self._fields[field] = copy + else: + if field.cpp_type == _FieldDescriptor.CPPTYPE_ENUM: + field_value = _GetIntegerEnumValue(field.enum_type, field_value) + try: + setattr(self, field_name, field_value) + except TypeError: + _ReraiseTypeErrorWithFieldName(message_descriptor.name, field_name) + + init.__module__ = None + init.__doc__ = None + cls.__init__ = init + + +def _GetFieldByName(message_descriptor, field_name): + """Returns a field descriptor by field name. + + Args: + message_descriptor: A Descriptor describing all fields in message. + field_name: The name of the field to retrieve. + Returns: + The field descriptor associated with the field name. + """ + try: + return message_descriptor.fields_by_name[field_name] + except KeyError: + raise ValueError('Protocol message %s has no "%s" field.' % + (message_descriptor.name, field_name)) + + +def _AddPropertiesForFields(descriptor, cls): + """Adds properties for all fields in this protocol message type.""" + for field in descriptor.fields: + _AddPropertiesForField(field, cls) + + if descriptor.is_extendable: + # _ExtensionDict is just an adaptor with no state so we allocate a new one + # every time it is accessed. + cls.Extensions = property(lambda self: _ExtensionDict(self)) + + +def _AddPropertiesForField(field, cls): + """Adds a public property for a protocol message field. + Clients can use this property to get and (in the case + of non-repeated scalar fields) directly set the value + of a protocol message field. + + Args: + field: A FieldDescriptor for this field. + cls: The class we're constructing. + """ + # Catch it if we add other types that we should + # handle specially here. + assert _FieldDescriptor.MAX_CPPTYPE == 10 + + constant_name = field.name.upper() + '_FIELD_NUMBER' + setattr(cls, constant_name, field.number) + + if field.label == _FieldDescriptor.LABEL_REPEATED: + _AddPropertiesForRepeatedField(field, cls) + elif field.cpp_type == _FieldDescriptor.CPPTYPE_MESSAGE: + _AddPropertiesForNonRepeatedCompositeField(field, cls) + else: + _AddPropertiesForNonRepeatedScalarField(field, cls) + + +class _FieldProperty(property): + __slots__ = ('DESCRIPTOR',) + + def __init__(self, descriptor, getter, setter, doc): + property.__init__(self, getter, setter, doc=doc) + self.DESCRIPTOR = descriptor + + +def _AddPropertiesForRepeatedField(field, cls): + """Adds a public property for a "repeated" protocol message field. Clients + can use this property to get the value of the field, which will be either a + RepeatedScalarFieldContainer or RepeatedCompositeFieldContainer (see + below). + + Note that when clients add values to these containers, we perform + type-checking in the case of repeated scalar fields, and we also set any + necessary "has" bits as a side-effect. + + Args: + field: A FieldDescriptor for this field. + cls: The class we're constructing. + """ + proto_field_name = field.name + property_name = _PropertyName(proto_field_name) + + def getter(self): + field_value = self._fields.get(field) + if field_value is None: + # Construct a new object to represent this field. + field_value = field._default_constructor(self) + + # Atomically check if another thread has preempted us and, if not, swap + # in the new object we just created. If someone has preempted us, we + # take that object and discard ours. + # WARNING: We are relying on setdefault() being atomic. This is true + # in CPython but we haven't investigated others. This warning appears + # in several other locations in this file. + field_value = self._fields.setdefault(field, field_value) + return field_value + getter.__module__ = None + getter.__doc__ = 'Getter for %s.' % proto_field_name + + # We define a setter just so we can throw an exception with a more + # helpful error message. + def setter(self, new_value): + raise AttributeError('Assignment not allowed to repeated field ' + '"%s" in protocol message object.' % proto_field_name) + + doc = 'Magic attribute generated for "%s" proto field.' % proto_field_name + setattr(cls, property_name, _FieldProperty(field, getter, setter, doc=doc)) + + +def _AddPropertiesForNonRepeatedScalarField(field, cls): + """Adds a public property for a nonrepeated, scalar protocol message field. + Clients can use this property to get and directly set the value of the field. + Note that when the client sets the value of a field by using this property, + all necessary "has" bits are set as a side-effect, and we also perform + type-checking. + + Args: + field: A FieldDescriptor for this field. + cls: The class we're constructing. + """ + proto_field_name = field.name + property_name = _PropertyName(proto_field_name) + type_checker = type_checkers.GetTypeChecker(field) + default_value = field.default_value + is_proto3 = field.containing_type.syntax == 'proto3' + + def getter(self): + # TODO(protobuf-team): This may be broken since there may not be + # default_value. Combine with has_default_value somehow. + return self._fields.get(field, default_value) + getter.__module__ = None + getter.__doc__ = 'Getter for %s.' % proto_field_name + + clear_when_set_to_default = is_proto3 and not field.containing_oneof + + def field_setter(self, new_value): + # pylint: disable=protected-access + # Testing the value for truthiness captures all of the proto3 defaults + # (0, 0.0, enum 0, and False). + try: + new_value = type_checker.CheckValue(new_value) + except TypeError as e: + raise TypeError( + 'Cannot set %s to %.1024r: %s' % (field.full_name, new_value, e)) + if clear_when_set_to_default and not new_value: + self._fields.pop(field, None) + else: + self._fields[field] = new_value + # Check _cached_byte_size_dirty inline to improve performance, since scalar + # setters are called frequently. + if not self._cached_byte_size_dirty: + self._Modified() + + if field.containing_oneof: + def setter(self, new_value): + field_setter(self, new_value) + self._UpdateOneofState(field) + else: + setter = field_setter + + setter.__module__ = None + setter.__doc__ = 'Setter for %s.' % proto_field_name + + # Add a property to encapsulate the getter/setter. + doc = 'Magic attribute generated for "%s" proto field.' % proto_field_name + setattr(cls, property_name, _FieldProperty(field, getter, setter, doc=doc)) + + +def _AddPropertiesForNonRepeatedCompositeField(field, cls): + """Adds a public property for a nonrepeated, composite protocol message field. + A composite field is a "group" or "message" field. + + Clients can use this property to get the value of the field, but cannot + assign to the property directly. + + Args: + field: A FieldDescriptor for this field. + cls: The class we're constructing. + """ + # TODO(robinson): Remove duplication with similar method + # for non-repeated scalars. + proto_field_name = field.name + property_name = _PropertyName(proto_field_name) + + def getter(self): + field_value = self._fields.get(field) + if field_value is None: + # Construct a new object to represent this field. + field_value = field._default_constructor(self) + + # Atomically check if another thread has preempted us and, if not, swap + # in the new object we just created. If someone has preempted us, we + # take that object and discard ours. + # WARNING: We are relying on setdefault() being atomic. This is true + # in CPython but we haven't investigated others. This warning appears + # in several other locations in this file. + field_value = self._fields.setdefault(field, field_value) + return field_value + getter.__module__ = None + getter.__doc__ = 'Getter for %s.' % proto_field_name + + # We define a setter just so we can throw an exception with a more + # helpful error message. + def setter(self, new_value): + raise AttributeError('Assignment not allowed to composite field ' + '"%s" in protocol message object.' % proto_field_name) + + # Add a property to encapsulate the getter. + doc = 'Magic attribute generated for "%s" proto field.' % proto_field_name + setattr(cls, property_name, _FieldProperty(field, getter, setter, doc=doc)) + + +def _AddPropertiesForExtensions(descriptor, cls): + """Adds properties for all fields in this protocol message type.""" + extensions = descriptor.extensions_by_name + for extension_name, extension_field in extensions.items(): + constant_name = extension_name.upper() + '_FIELD_NUMBER' + setattr(cls, constant_name, extension_field.number) + + # TODO(amauryfa): Migrate all users of these attributes to functions like + # pool.FindExtensionByNumber(descriptor). + if descriptor.file is not None: + # TODO(amauryfa): Use cls.MESSAGE_FACTORY.pool when available. + pool = descriptor.file.pool + cls._extensions_by_number = pool._extensions_by_number[descriptor] + cls._extensions_by_name = pool._extensions_by_name[descriptor] + +def _AddStaticMethods(cls): + # TODO(robinson): This probably needs to be thread-safe(?) + def RegisterExtension(extension_handle): + extension_handle.containing_type = cls.DESCRIPTOR + # TODO(amauryfa): Use cls.MESSAGE_FACTORY.pool when available. + # pylint: disable=protected-access + cls.DESCRIPTOR.file.pool._AddExtensionDescriptor(extension_handle) + _AttachFieldHelpers(cls, extension_handle) + cls.RegisterExtension = staticmethod(RegisterExtension) + + def FromString(s): + message = cls() + message.MergeFromString(s) + return message + cls.FromString = staticmethod(FromString) + + +def _IsPresent(item): + """Given a (FieldDescriptor, value) tuple from _fields, return true if the + value should be included in the list returned by ListFields().""" + + if item[0].label == _FieldDescriptor.LABEL_REPEATED: + return bool(item[1]) + elif item[0].cpp_type == _FieldDescriptor.CPPTYPE_MESSAGE: + return item[1]._is_present_in_parent + else: + return True + + +def _AddListFieldsMethod(message_descriptor, cls): + """Helper for _AddMessageMethods().""" + + def ListFields(self): + all_fields = [item for item in self._fields.items() if _IsPresent(item)] + all_fields.sort(key = lambda item: item[0].number) + return all_fields + + cls.ListFields = ListFields + +_PROTO3_ERROR_TEMPLATE = \ + ('Protocol message %s has no non-repeated submessage field "%s" ' + 'nor marked as optional') +_PROTO2_ERROR_TEMPLATE = 'Protocol message %s has no non-repeated field "%s"' + +def _AddHasFieldMethod(message_descriptor, cls): + """Helper for _AddMessageMethods().""" + + is_proto3 = (message_descriptor.syntax == "proto3") + error_msg = _PROTO3_ERROR_TEMPLATE if is_proto3 else _PROTO2_ERROR_TEMPLATE + + hassable_fields = {} + for field in message_descriptor.fields: + if field.label == _FieldDescriptor.LABEL_REPEATED: + continue + # For proto3, only submessages and fields inside a oneof have presence. + if (is_proto3 and field.cpp_type != _FieldDescriptor.CPPTYPE_MESSAGE and + not field.containing_oneof): + continue + hassable_fields[field.name] = field + + # Has methods are supported for oneof descriptors. + for oneof in message_descriptor.oneofs: + hassable_fields[oneof.name] = oneof + + def HasField(self, field_name): + try: + field = hassable_fields[field_name] + except KeyError: + raise ValueError(error_msg % (message_descriptor.full_name, field_name)) + + if isinstance(field, descriptor_mod.OneofDescriptor): + try: + return HasField(self, self._oneofs[field].name) + except KeyError: + return False + else: + if field.cpp_type == _FieldDescriptor.CPPTYPE_MESSAGE: + value = self._fields.get(field) + return value is not None and value._is_present_in_parent + else: + return field in self._fields + + cls.HasField = HasField + + +def _AddClearFieldMethod(message_descriptor, cls): + """Helper for _AddMessageMethods().""" + def ClearField(self, field_name): + try: + field = message_descriptor.fields_by_name[field_name] + except KeyError: + try: + field = message_descriptor.oneofs_by_name[field_name] + if field in self._oneofs: + field = self._oneofs[field] + else: + return + except KeyError: + raise ValueError('Protocol message %s has no "%s" field.' % + (message_descriptor.name, field_name)) + + if field in self._fields: + # To match the C++ implementation, we need to invalidate iterators + # for map fields when ClearField() happens. + if hasattr(self._fields[field], 'InvalidateIterators'): + self._fields[field].InvalidateIterators() + + # Note: If the field is a sub-message, its listener will still point + # at us. That's fine, because the worst than can happen is that it + # will call _Modified() and invalidate our byte size. Big deal. + del self._fields[field] + + if self._oneofs.get(field.containing_oneof, None) is field: + del self._oneofs[field.containing_oneof] + + # Always call _Modified() -- even if nothing was changed, this is + # a mutating method, and thus calling it should cause the field to become + # present in the parent message. + self._Modified() + + cls.ClearField = ClearField + + +def _AddClearExtensionMethod(cls): + """Helper for _AddMessageMethods().""" + def ClearExtension(self, extension_handle): + extension_dict._VerifyExtensionHandle(self, extension_handle) + + # Similar to ClearField(), above. + if extension_handle in self._fields: + del self._fields[extension_handle] + self._Modified() + cls.ClearExtension = ClearExtension + + +def _AddHasExtensionMethod(cls): + """Helper for _AddMessageMethods().""" + def HasExtension(self, extension_handle): + extension_dict._VerifyExtensionHandle(self, extension_handle) + if extension_handle.label == _FieldDescriptor.LABEL_REPEATED: + raise KeyError('"%s" is repeated.' % extension_handle.full_name) + + if extension_handle.cpp_type == _FieldDescriptor.CPPTYPE_MESSAGE: + value = self._fields.get(extension_handle) + return value is not None and value._is_present_in_parent + else: + return extension_handle in self._fields + cls.HasExtension = HasExtension + +def _InternalUnpackAny(msg): + """Unpacks Any message and returns the unpacked message. + + This internal method is different from public Any Unpack method which takes + the target message as argument. _InternalUnpackAny method does not have + target message type and need to find the message type in descriptor pool. + + Args: + msg: An Any message to be unpacked. + + Returns: + The unpacked message. + """ + # TODO(amauryfa): Don't use the factory of generated messages. + # To make Any work with custom factories, use the message factory of the + # parent message. + # pylint: disable=g-import-not-at-top + from google.protobuf import symbol_database + factory = symbol_database.Default() + + type_url = msg.type_url + + if not type_url: + return None + + # TODO(haberman): For now we just strip the hostname. Better logic will be + # required. + type_name = type_url.split('/')[-1] + descriptor = factory.pool.FindMessageTypeByName(type_name) + + if descriptor is None: + return None + + message_class = factory.GetPrototype(descriptor) + message = message_class() + + message.ParseFromString(msg.value) + return message + + +def _AddEqualsMethod(message_descriptor, cls): + """Helper for _AddMessageMethods().""" + def __eq__(self, other): + if (not isinstance(other, message_mod.Message) or + other.DESCRIPTOR != self.DESCRIPTOR): + return False + + if self is other: + return True + + if self.DESCRIPTOR.full_name == _AnyFullTypeName: + any_a = _InternalUnpackAny(self) + any_b = _InternalUnpackAny(other) + if any_a and any_b: + return any_a == any_b + + if not self.ListFields() == other.ListFields(): + return False + + # TODO(jieluo): Fix UnknownFieldSet to consider MessageSet extensions, + # then use it for the comparison. + unknown_fields = list(self._unknown_fields) + unknown_fields.sort() + other_unknown_fields = list(other._unknown_fields) + other_unknown_fields.sort() + return unknown_fields == other_unknown_fields + + cls.__eq__ = __eq__ + + +def _AddStrMethod(message_descriptor, cls): + """Helper for _AddMessageMethods().""" + def __str__(self): + return text_format.MessageToString(self) + cls.__str__ = __str__ + + +def _AddReprMethod(message_descriptor, cls): + """Helper for _AddMessageMethods().""" + def __repr__(self): + return text_format.MessageToString(self) + cls.__repr__ = __repr__ + + +def _AddUnicodeMethod(unused_message_descriptor, cls): + """Helper for _AddMessageMethods().""" + + def __unicode__(self): + return text_format.MessageToString(self, as_utf8=True).decode('utf-8') + cls.__unicode__ = __unicode__ + + +def _BytesForNonRepeatedElement(value, field_number, field_type): + """Returns the number of bytes needed to serialize a non-repeated element. + The returned byte count includes space for tag information and any + other additional space associated with serializing value. + + Args: + value: Value we're serializing. + field_number: Field number of this value. (Since the field number + is stored as part of a varint-encoded tag, this has an impact + on the total bytes required to serialize the value). + field_type: The type of the field. One of the TYPE_* constants + within FieldDescriptor. + """ + try: + fn = type_checkers.TYPE_TO_BYTE_SIZE_FN[field_type] + return fn(field_number, value) + except KeyError: + raise message_mod.EncodeError('Unrecognized field type: %d' % field_type) + + +def _AddByteSizeMethod(message_descriptor, cls): + """Helper for _AddMessageMethods().""" + + def ByteSize(self): + if not self._cached_byte_size_dirty: + return self._cached_byte_size + + size = 0 + descriptor = self.DESCRIPTOR + if descriptor.GetOptions().map_entry: + # Fields of map entry should always be serialized. + size = descriptor.fields_by_name['key']._sizer(self.key) + size += descriptor.fields_by_name['value']._sizer(self.value) + else: + for field_descriptor, field_value in self.ListFields(): + size += field_descriptor._sizer(field_value) + for tag_bytes, value_bytes in self._unknown_fields: + size += len(tag_bytes) + len(value_bytes) + + self._cached_byte_size = size + self._cached_byte_size_dirty = False + self._listener_for_children.dirty = False + return size + + cls.ByteSize = ByteSize + + +def _AddSerializeToStringMethod(message_descriptor, cls): + """Helper for _AddMessageMethods().""" + + def SerializeToString(self, **kwargs): + # Check if the message has all of its required fields set. + if not self.IsInitialized(): + raise message_mod.EncodeError( + 'Message %s is missing required fields: %s' % ( + self.DESCRIPTOR.full_name, ','.join(self.FindInitializationErrors()))) + return self.SerializePartialToString(**kwargs) + cls.SerializeToString = SerializeToString + + +def _AddSerializePartialToStringMethod(message_descriptor, cls): + """Helper for _AddMessageMethods().""" + + def SerializePartialToString(self, **kwargs): + out = BytesIO() + self._InternalSerialize(out.write, **kwargs) + return out.getvalue() + cls.SerializePartialToString = SerializePartialToString + + def InternalSerialize(self, write_bytes, deterministic=None): + if deterministic is None: + deterministic = ( + api_implementation.IsPythonDefaultSerializationDeterministic()) + else: + deterministic = bool(deterministic) + + descriptor = self.DESCRIPTOR + if descriptor.GetOptions().map_entry: + # Fields of map entry should always be serialized. + descriptor.fields_by_name['key']._encoder( + write_bytes, self.key, deterministic) + descriptor.fields_by_name['value']._encoder( + write_bytes, self.value, deterministic) + else: + for field_descriptor, field_value in self.ListFields(): + field_descriptor._encoder(write_bytes, field_value, deterministic) + for tag_bytes, value_bytes in self._unknown_fields: + write_bytes(tag_bytes) + write_bytes(value_bytes) + cls._InternalSerialize = InternalSerialize + + +def _AddMergeFromStringMethod(message_descriptor, cls): + """Helper for _AddMessageMethods().""" + def MergeFromString(self, serialized): + serialized = memoryview(serialized) + length = len(serialized) + try: + if self._InternalParse(serialized, 0, length) != length: + # The only reason _InternalParse would return early is if it + # encountered an end-group tag. + raise message_mod.DecodeError('Unexpected end-group tag.') + except (IndexError, TypeError): + # Now ord(buf[p:p+1]) == ord('') gets TypeError. + raise message_mod.DecodeError('Truncated message.') + except struct.error as e: + raise message_mod.DecodeError(e) + return length # Return this for legacy reasons. + cls.MergeFromString = MergeFromString + + local_ReadTag = decoder.ReadTag + local_SkipField = decoder.SkipField + decoders_by_tag = cls._decoders_by_tag + + def InternalParse(self, buffer, pos, end): + """Create a message from serialized bytes. + + Args: + self: Message, instance of the proto message object. + buffer: memoryview of the serialized data. + pos: int, position to start in the serialized data. + end: int, end position of the serialized data. + + Returns: + Message object. + """ + # Guard against internal misuse, since this function is called internally + # quite extensively, and its easy to accidentally pass bytes. + assert isinstance(buffer, memoryview) + self._Modified() + field_dict = self._fields + # pylint: disable=protected-access + unknown_field_set = self._unknown_field_set + while pos != end: + (tag_bytes, new_pos) = local_ReadTag(buffer, pos) + field_decoder, field_desc = decoders_by_tag.get(tag_bytes, (None, None)) + if field_decoder is None: + if not self._unknown_fields: # pylint: disable=protected-access + self._unknown_fields = [] # pylint: disable=protected-access + if unknown_field_set is None: + # pylint: disable=protected-access + self._unknown_field_set = containers.UnknownFieldSet() + # pylint: disable=protected-access + unknown_field_set = self._unknown_field_set + # pylint: disable=protected-access + (tag, _) = decoder._DecodeVarint(tag_bytes, 0) + field_number, wire_type = wire_format.UnpackTag(tag) + if field_number == 0: + raise message_mod.DecodeError('Field number 0 is illegal.') + # TODO(jieluo): remove old_pos. + old_pos = new_pos + (data, new_pos) = decoder._DecodeUnknownField( + buffer, new_pos, wire_type) # pylint: disable=protected-access + if new_pos == -1: + return pos + # pylint: disable=protected-access + unknown_field_set._add(field_number, wire_type, data) + # TODO(jieluo): remove _unknown_fields. + new_pos = local_SkipField(buffer, old_pos, end, tag_bytes) + if new_pos == -1: + return pos + self._unknown_fields.append( + (tag_bytes, buffer[old_pos:new_pos].tobytes())) + pos = new_pos + else: + pos = field_decoder(buffer, new_pos, end, self, field_dict) + if field_desc: + self._UpdateOneofState(field_desc) + return pos + cls._InternalParse = InternalParse + + +def _AddIsInitializedMethod(message_descriptor, cls): + """Adds the IsInitialized and FindInitializationError methods to the + protocol message class.""" + + required_fields = [field for field in message_descriptor.fields + if field.label == _FieldDescriptor.LABEL_REQUIRED] + + def IsInitialized(self, errors=None): + """Checks if all required fields of a message are set. + + Args: + errors: A list which, if provided, will be populated with the field + paths of all missing required fields. + + Returns: + True iff the specified message has all required fields set. + """ + + # Performance is critical so we avoid HasField() and ListFields(). + + for field in required_fields: + if (field not in self._fields or + (field.cpp_type == _FieldDescriptor.CPPTYPE_MESSAGE and + not self._fields[field]._is_present_in_parent)): + if errors is not None: + errors.extend(self.FindInitializationErrors()) + return False + + for field, value in list(self._fields.items()): # dict can change size! + if field.cpp_type == _FieldDescriptor.CPPTYPE_MESSAGE: + if field.label == _FieldDescriptor.LABEL_REPEATED: + if (field.message_type.has_options and + field.message_type.GetOptions().map_entry): + continue + for element in value: + if not element.IsInitialized(): + if errors is not None: + errors.extend(self.FindInitializationErrors()) + return False + elif value._is_present_in_parent and not value.IsInitialized(): + if errors is not None: + errors.extend(self.FindInitializationErrors()) + return False + + return True + + cls.IsInitialized = IsInitialized + + def FindInitializationErrors(self): + """Finds required fields which are not initialized. + + Returns: + A list of strings. Each string is a path to an uninitialized field from + the top-level message, e.g. "foo.bar[5].baz". + """ + + errors = [] # simplify things + + for field in required_fields: + if not self.HasField(field.name): + errors.append(field.name) + + for field, value in self.ListFields(): + if field.cpp_type == _FieldDescriptor.CPPTYPE_MESSAGE: + if field.is_extension: + name = '(%s)' % field.full_name + else: + name = field.name + + if _IsMapField(field): + if _IsMessageMapField(field): + for key in value: + element = value[key] + prefix = '%s[%s].' % (name, key) + sub_errors = element.FindInitializationErrors() + errors += [prefix + error for error in sub_errors] + else: + # ScalarMaps can't have any initialization errors. + pass + elif field.label == _FieldDescriptor.LABEL_REPEATED: + for i in range(len(value)): + element = value[i] + prefix = '%s[%d].' % (name, i) + sub_errors = element.FindInitializationErrors() + errors += [prefix + error for error in sub_errors] + else: + prefix = name + '.' + sub_errors = value.FindInitializationErrors() + errors += [prefix + error for error in sub_errors] + + return errors + + cls.FindInitializationErrors = FindInitializationErrors + + +def _FullyQualifiedClassName(klass): + module = klass.__module__ + name = getattr(klass, '__qualname__', klass.__name__) + if module in (None, 'builtins', '__builtin__'): + return name + return module + '.' + name + + +def _AddMergeFromMethod(cls): + LABEL_REPEATED = _FieldDescriptor.LABEL_REPEATED + CPPTYPE_MESSAGE = _FieldDescriptor.CPPTYPE_MESSAGE + + def MergeFrom(self, msg): + if not isinstance(msg, cls): + raise TypeError( + 'Parameter to MergeFrom() must be instance of same class: ' + 'expected %s got %s.' % (_FullyQualifiedClassName(cls), + _FullyQualifiedClassName(msg.__class__))) + + assert msg is not self + self._Modified() + + fields = self._fields + + for field, value in msg._fields.items(): + if field.label == LABEL_REPEATED: + field_value = fields.get(field) + if field_value is None: + # Construct a new object to represent this field. + field_value = field._default_constructor(self) + fields[field] = field_value + field_value.MergeFrom(value) + elif field.cpp_type == CPPTYPE_MESSAGE: + if value._is_present_in_parent: + field_value = fields.get(field) + if field_value is None: + # Construct a new object to represent this field. + field_value = field._default_constructor(self) + fields[field] = field_value + field_value.MergeFrom(value) + else: + self._fields[field] = value + if field.containing_oneof: + self._UpdateOneofState(field) + + if msg._unknown_fields: + if not self._unknown_fields: + self._unknown_fields = [] + self._unknown_fields.extend(msg._unknown_fields) + # pylint: disable=protected-access + if self._unknown_field_set is None: + self._unknown_field_set = containers.UnknownFieldSet() + self._unknown_field_set._extend(msg._unknown_field_set) + + cls.MergeFrom = MergeFrom + + +def _AddWhichOneofMethod(message_descriptor, cls): + def WhichOneof(self, oneof_name): + """Returns the name of the currently set field inside a oneof, or None.""" + try: + field = message_descriptor.oneofs_by_name[oneof_name] + except KeyError: + raise ValueError( + 'Protocol message has no oneof "%s" field.' % oneof_name) + + nested_field = self._oneofs.get(field, None) + if nested_field is not None and self.HasField(nested_field.name): + return nested_field.name + else: + return None + + cls.WhichOneof = WhichOneof + + +def _Clear(self): + # Clear fields. + self._fields = {} + self._unknown_fields = () + # pylint: disable=protected-access + if self._unknown_field_set is not None: + self._unknown_field_set._clear() + self._unknown_field_set = None + + self._oneofs = {} + self._Modified() + + +def _UnknownFields(self): + if self._unknown_field_set is None: # pylint: disable=protected-access + # pylint: disable=protected-access + self._unknown_field_set = containers.UnknownFieldSet() + return self._unknown_field_set # pylint: disable=protected-access + + +def _DiscardUnknownFields(self): + self._unknown_fields = [] + self._unknown_field_set = None # pylint: disable=protected-access + for field, value in self.ListFields(): + if field.cpp_type == _FieldDescriptor.CPPTYPE_MESSAGE: + if _IsMapField(field): + if _IsMessageMapField(field): + for key in value: + value[key].DiscardUnknownFields() + elif field.label == _FieldDescriptor.LABEL_REPEATED: + for sub_message in value: + sub_message.DiscardUnknownFields() + else: + value.DiscardUnknownFields() + + +def _SetListener(self, listener): + if listener is None: + self._listener = message_listener_mod.NullMessageListener() + else: + self._listener = listener + + +def _AddMessageMethods(message_descriptor, cls): + """Adds implementations of all Message methods to cls.""" + _AddListFieldsMethod(message_descriptor, cls) + _AddHasFieldMethod(message_descriptor, cls) + _AddClearFieldMethod(message_descriptor, cls) + if message_descriptor.is_extendable: + _AddClearExtensionMethod(cls) + _AddHasExtensionMethod(cls) + _AddEqualsMethod(message_descriptor, cls) + _AddStrMethod(message_descriptor, cls) + _AddReprMethod(message_descriptor, cls) + _AddUnicodeMethod(message_descriptor, cls) + _AddByteSizeMethod(message_descriptor, cls) + _AddSerializeToStringMethod(message_descriptor, cls) + _AddSerializePartialToStringMethod(message_descriptor, cls) + _AddMergeFromStringMethod(message_descriptor, cls) + _AddIsInitializedMethod(message_descriptor, cls) + _AddMergeFromMethod(cls) + _AddWhichOneofMethod(message_descriptor, cls) + # Adds methods which do not depend on cls. + cls.Clear = _Clear + cls.UnknownFields = _UnknownFields + cls.DiscardUnknownFields = _DiscardUnknownFields + cls._SetListener = _SetListener + + +def _AddPrivateHelperMethods(message_descriptor, cls): + """Adds implementation of private helper methods to cls.""" + + def Modified(self): + """Sets the _cached_byte_size_dirty bit to true, + and propagates this to our listener iff this was a state change. + """ + + # Note: Some callers check _cached_byte_size_dirty before calling + # _Modified() as an extra optimization. So, if this method is ever + # changed such that it does stuff even when _cached_byte_size_dirty is + # already true, the callers need to be updated. + if not self._cached_byte_size_dirty: + self._cached_byte_size_dirty = True + self._listener_for_children.dirty = True + self._is_present_in_parent = True + self._listener.Modified() + + def _UpdateOneofState(self, field): + """Sets field as the active field in its containing oneof. + + Will also delete currently active field in the oneof, if it is different + from the argument. Does not mark the message as modified. + """ + other_field = self._oneofs.setdefault(field.containing_oneof, field) + if other_field is not field: + del self._fields[other_field] + self._oneofs[field.containing_oneof] = field + + cls._Modified = Modified + cls.SetInParent = Modified + cls._UpdateOneofState = _UpdateOneofState + + +class _Listener(object): + + """MessageListener implementation that a parent message registers with its + child message. + + In order to support semantics like: + + foo.bar.baz.qux = 23 + assert foo.HasField('bar') + + ...child objects must have back references to their parents. + This helper class is at the heart of this support. + """ + + def __init__(self, parent_message): + """Args: + parent_message: The message whose _Modified() method we should call when + we receive Modified() messages. + """ + # This listener establishes a back reference from a child (contained) object + # to its parent (containing) object. We make this a weak reference to avoid + # creating cyclic garbage when the client finishes with the 'parent' object + # in the tree. + if isinstance(parent_message, weakref.ProxyType): + self._parent_message_weakref = parent_message + else: + self._parent_message_weakref = weakref.proxy(parent_message) + + # As an optimization, we also indicate directly on the listener whether + # or not the parent message is dirty. This way we can avoid traversing + # up the tree in the common case. + self.dirty = False + + def Modified(self): + if self.dirty: + return + try: + # Propagate the signal to our parents iff this is the first field set. + self._parent_message_weakref._Modified() + except ReferenceError: + # We can get here if a client has kept a reference to a child object, + # and is now setting a field on it, but the child's parent has been + # garbage-collected. This is not an error. + pass + + +class _OneofListener(_Listener): + """Special listener implementation for setting composite oneof fields.""" + + def __init__(self, parent_message, field): + """Args: + parent_message: The message whose _Modified() method we should call when + we receive Modified() messages. + field: The descriptor of the field being set in the parent message. + """ + super(_OneofListener, self).__init__(parent_message) + self._field = field + + def Modified(self): + """Also updates the state of the containing oneof in the parent message.""" + try: + self._parent_message_weakref._UpdateOneofState(self._field) + super(_OneofListener, self).Modified() + except ReferenceError: + pass diff --git a/mysql-sqlalchemy-workspace/lib/python3.9/site-packages/google/protobuf/internal/type_checkers.py b/mysql-sqlalchemy-workspace/lib/python3.9/site-packages/google/protobuf/internal/type_checkers.py new file mode 100644 index 00000000..a53e71fe --- /dev/null +++ b/mysql-sqlalchemy-workspace/lib/python3.9/site-packages/google/protobuf/internal/type_checkers.py @@ -0,0 +1,435 @@ +# Protocol Buffers - Google's data interchange format +# Copyright 2008 Google Inc. All rights reserved. +# https://developers.google.com/protocol-buffers/ +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are +# met: +# +# * Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above +# copyright notice, this list of conditions and the following disclaimer +# in the documentation and/or other materials provided with the +# distribution. +# * Neither the name of Google Inc. nor the names of its +# contributors may be used to endorse or promote products derived from +# this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +"""Provides type checking routines. + +This module defines type checking utilities in the forms of dictionaries: + +VALUE_CHECKERS: A dictionary of field types and a value validation object. +TYPE_TO_BYTE_SIZE_FN: A dictionary with field types and a size computing + function. +TYPE_TO_SERIALIZE_METHOD: A dictionary with field types and serialization + function. +FIELD_TYPE_TO_WIRE_TYPE: A dictionary with field typed and their + corresponding wire types. +TYPE_TO_DESERIALIZE_METHOD: A dictionary with field types and deserialization + function. +""" + +__author__ = 'robinson@google.com (Will Robinson)' + +import ctypes +import numbers + +from google.protobuf.internal import decoder +from google.protobuf.internal import encoder +from google.protobuf.internal import wire_format +from google.protobuf import descriptor + +_FieldDescriptor = descriptor.FieldDescriptor + + +def TruncateToFourByteFloat(original): + return ctypes.c_float(original).value + + +def ToShortestFloat(original): + """Returns the shortest float that has same value in wire.""" + # All 4 byte floats have between 6 and 9 significant digits, so we + # start with 6 as the lower bound. + # It has to be iterative because use '.9g' directly can not get rid + # of the noises for most values. For example if set a float_field=0.9 + # use '.9g' will print 0.899999976. + precision = 6 + rounded = float('{0:.{1}g}'.format(original, precision)) + while TruncateToFourByteFloat(rounded) != original: + precision += 1 + rounded = float('{0:.{1}g}'.format(original, precision)) + return rounded + + +def SupportsOpenEnums(field_descriptor): + return field_descriptor.containing_type.syntax == 'proto3' + + +def GetTypeChecker(field): + """Returns a type checker for a message field of the specified types. + + Args: + field: FieldDescriptor object for this field. + + Returns: + An instance of TypeChecker which can be used to verify the types + of values assigned to a field of the specified type. + """ + if (field.cpp_type == _FieldDescriptor.CPPTYPE_STRING and + field.type == _FieldDescriptor.TYPE_STRING): + return UnicodeValueChecker() + if field.cpp_type == _FieldDescriptor.CPPTYPE_ENUM: + if SupportsOpenEnums(field): + # When open enums are supported, any int32 can be assigned. + return _VALUE_CHECKERS[_FieldDescriptor.CPPTYPE_INT32] + else: + return EnumValueChecker(field.enum_type) + return _VALUE_CHECKERS[field.cpp_type] + + +# None of the typecheckers below make any attempt to guard against people +# subclassing builtin types and doing weird things. We're not trying to +# protect against malicious clients here, just people accidentally shooting +# themselves in the foot in obvious ways. +class TypeChecker(object): + + """Type checker used to catch type errors as early as possible + when the client is setting scalar fields in protocol messages. + """ + + def __init__(self, *acceptable_types): + self._acceptable_types = acceptable_types + + def CheckValue(self, proposed_value): + """Type check the provided value and return it. + + The returned value might have been normalized to another type. + """ + if not isinstance(proposed_value, self._acceptable_types): + message = ('%.1024r has type %s, but expected one of: %s' % + (proposed_value, type(proposed_value), self._acceptable_types)) + raise TypeError(message) + return proposed_value + + +class TypeCheckerWithDefault(TypeChecker): + + def __init__(self, default_value, *acceptable_types): + TypeChecker.__init__(self, *acceptable_types) + self._default_value = default_value + + def DefaultValue(self): + return self._default_value + + +class BoolValueChecker(object): + """Type checker used for bool fields.""" + + def CheckValue(self, proposed_value): + if not hasattr(proposed_value, '__index__') or ( + type(proposed_value).__module__ == 'numpy' and + type(proposed_value).__name__ == 'ndarray'): + message = ('%.1024r has type %s, but expected one of: %s' % + (proposed_value, type(proposed_value), (bool, int))) + raise TypeError(message) + return bool(proposed_value) + + def DefaultValue(self): + return False + + +# IntValueChecker and its subclasses perform integer type-checks +# and bounds-checks. +class IntValueChecker(object): + + """Checker used for integer fields. Performs type-check and range check.""" + + def CheckValue(self, proposed_value): + if not hasattr(proposed_value, '__index__') or ( + type(proposed_value).__module__ == 'numpy' and + type(proposed_value).__name__ == 'ndarray'): + message = ('%.1024r has type %s, but expected one of: %s' % + (proposed_value, type(proposed_value), (int,))) + raise TypeError(message) + + if not self._MIN <= int(proposed_value) <= self._MAX: + raise ValueError('Value out of range: %d' % proposed_value) + # We force all values to int to make alternate implementations where the + # distinction is more significant (e.g. the C++ implementation) simpler. + proposed_value = int(proposed_value) + return proposed_value + + def DefaultValue(self): + return 0 + + +class EnumValueChecker(object): + + """Checker used for enum fields. Performs type-check and range check.""" + + def __init__(self, enum_type): + self._enum_type = enum_type + + def CheckValue(self, proposed_value): + if not isinstance(proposed_value, numbers.Integral): + message = ('%.1024r has type %s, but expected one of: %s' % + (proposed_value, type(proposed_value), (int,))) + raise TypeError(message) + if int(proposed_value) not in self._enum_type.values_by_number: + raise ValueError('Unknown enum value: %d' % proposed_value) + return proposed_value + + def DefaultValue(self): + return self._enum_type.values[0].number + + +class UnicodeValueChecker(object): + + """Checker used for string fields. + + Always returns a unicode value, even if the input is of type str. + """ + + def CheckValue(self, proposed_value): + if not isinstance(proposed_value, (bytes, str)): + message = ('%.1024r has type %s, but expected one of: %s' % + (proposed_value, type(proposed_value), (bytes, str))) + raise TypeError(message) + + # If the value is of type 'bytes' make sure that it is valid UTF-8 data. + if isinstance(proposed_value, bytes): + try: + proposed_value = proposed_value.decode('utf-8') + except UnicodeDecodeError: + raise ValueError('%.1024r has type bytes, but isn\'t valid UTF-8 ' + 'encoding. Non-UTF-8 strings must be converted to ' + 'unicode objects before being added.' % + (proposed_value)) + else: + try: + proposed_value.encode('utf8') + except UnicodeEncodeError: + raise ValueError('%.1024r isn\'t a valid unicode string and ' + 'can\'t be encoded in UTF-8.'% + (proposed_value)) + + return proposed_value + + def DefaultValue(self): + return u"" + + +class Int32ValueChecker(IntValueChecker): + # We're sure to use ints instead of longs here since comparison may be more + # efficient. + _MIN = -2147483648 + _MAX = 2147483647 + + +class Uint32ValueChecker(IntValueChecker): + _MIN = 0 + _MAX = (1 << 32) - 1 + + +class Int64ValueChecker(IntValueChecker): + _MIN = -(1 << 63) + _MAX = (1 << 63) - 1 + + +class Uint64ValueChecker(IntValueChecker): + _MIN = 0 + _MAX = (1 << 64) - 1 + + +# The max 4 bytes float is about 3.4028234663852886e+38 +_FLOAT_MAX = float.fromhex('0x1.fffffep+127') +_FLOAT_MIN = -_FLOAT_MAX +_INF = float('inf') +_NEG_INF = float('-inf') + + +class DoubleValueChecker(object): + """Checker used for double fields. + + Performs type-check and range check. + """ + + def CheckValue(self, proposed_value): + """Check and convert proposed_value to float.""" + if (not hasattr(proposed_value, '__float__') and + not hasattr(proposed_value, '__index__')) or ( + type(proposed_value).__module__ == 'numpy' and + type(proposed_value).__name__ == 'ndarray'): + message = ('%.1024r has type %s, but expected one of: int, float' % + (proposed_value, type(proposed_value))) + raise TypeError(message) + return float(proposed_value) + + def DefaultValue(self): + return 0.0 + + +class FloatValueChecker(DoubleValueChecker): + """Checker used for float fields. + + Performs type-check and range check. + + Values exceeding a 32-bit float will be converted to inf/-inf. + """ + + def CheckValue(self, proposed_value): + """Check and convert proposed_value to float.""" + converted_value = super().CheckValue(proposed_value) + # This inf rounding matches the C++ proto SafeDoubleToFloat logic. + if converted_value > _FLOAT_MAX: + return _INF + if converted_value < _FLOAT_MIN: + return _NEG_INF + + return TruncateToFourByteFloat(converted_value) + +# Type-checkers for all scalar CPPTYPEs. +_VALUE_CHECKERS = { + _FieldDescriptor.CPPTYPE_INT32: Int32ValueChecker(), + _FieldDescriptor.CPPTYPE_INT64: Int64ValueChecker(), + _FieldDescriptor.CPPTYPE_UINT32: Uint32ValueChecker(), + _FieldDescriptor.CPPTYPE_UINT64: Uint64ValueChecker(), + _FieldDescriptor.CPPTYPE_DOUBLE: DoubleValueChecker(), + _FieldDescriptor.CPPTYPE_FLOAT: FloatValueChecker(), + _FieldDescriptor.CPPTYPE_BOOL: BoolValueChecker(), + _FieldDescriptor.CPPTYPE_STRING: TypeCheckerWithDefault(b'', bytes), +} + + +# Map from field type to a function F, such that F(field_num, value) +# gives the total byte size for a value of the given type. This +# byte size includes tag information and any other additional space +# associated with serializing "value". +TYPE_TO_BYTE_SIZE_FN = { + _FieldDescriptor.TYPE_DOUBLE: wire_format.DoubleByteSize, + _FieldDescriptor.TYPE_FLOAT: wire_format.FloatByteSize, + _FieldDescriptor.TYPE_INT64: wire_format.Int64ByteSize, + _FieldDescriptor.TYPE_UINT64: wire_format.UInt64ByteSize, + _FieldDescriptor.TYPE_INT32: wire_format.Int32ByteSize, + _FieldDescriptor.TYPE_FIXED64: wire_format.Fixed64ByteSize, + _FieldDescriptor.TYPE_FIXED32: wire_format.Fixed32ByteSize, + _FieldDescriptor.TYPE_BOOL: wire_format.BoolByteSize, + _FieldDescriptor.TYPE_STRING: wire_format.StringByteSize, + _FieldDescriptor.TYPE_GROUP: wire_format.GroupByteSize, + _FieldDescriptor.TYPE_MESSAGE: wire_format.MessageByteSize, + _FieldDescriptor.TYPE_BYTES: wire_format.BytesByteSize, + _FieldDescriptor.TYPE_UINT32: wire_format.UInt32ByteSize, + _FieldDescriptor.TYPE_ENUM: wire_format.EnumByteSize, + _FieldDescriptor.TYPE_SFIXED32: wire_format.SFixed32ByteSize, + _FieldDescriptor.TYPE_SFIXED64: wire_format.SFixed64ByteSize, + _FieldDescriptor.TYPE_SINT32: wire_format.SInt32ByteSize, + _FieldDescriptor.TYPE_SINT64: wire_format.SInt64ByteSize + } + + +# Maps from field types to encoder constructors. +TYPE_TO_ENCODER = { + _FieldDescriptor.TYPE_DOUBLE: encoder.DoubleEncoder, + _FieldDescriptor.TYPE_FLOAT: encoder.FloatEncoder, + _FieldDescriptor.TYPE_INT64: encoder.Int64Encoder, + _FieldDescriptor.TYPE_UINT64: encoder.UInt64Encoder, + _FieldDescriptor.TYPE_INT32: encoder.Int32Encoder, + _FieldDescriptor.TYPE_FIXED64: encoder.Fixed64Encoder, + _FieldDescriptor.TYPE_FIXED32: encoder.Fixed32Encoder, + _FieldDescriptor.TYPE_BOOL: encoder.BoolEncoder, + _FieldDescriptor.TYPE_STRING: encoder.StringEncoder, + _FieldDescriptor.TYPE_GROUP: encoder.GroupEncoder, + _FieldDescriptor.TYPE_MESSAGE: encoder.MessageEncoder, + _FieldDescriptor.TYPE_BYTES: encoder.BytesEncoder, + _FieldDescriptor.TYPE_UINT32: encoder.UInt32Encoder, + _FieldDescriptor.TYPE_ENUM: encoder.EnumEncoder, + _FieldDescriptor.TYPE_SFIXED32: encoder.SFixed32Encoder, + _FieldDescriptor.TYPE_SFIXED64: encoder.SFixed64Encoder, + _FieldDescriptor.TYPE_SINT32: encoder.SInt32Encoder, + _FieldDescriptor.TYPE_SINT64: encoder.SInt64Encoder, + } + + +# Maps from field types to sizer constructors. +TYPE_TO_SIZER = { + _FieldDescriptor.TYPE_DOUBLE: encoder.DoubleSizer, + _FieldDescriptor.TYPE_FLOAT: encoder.FloatSizer, + _FieldDescriptor.TYPE_INT64: encoder.Int64Sizer, + _FieldDescriptor.TYPE_UINT64: encoder.UInt64Sizer, + _FieldDescriptor.TYPE_INT32: encoder.Int32Sizer, + _FieldDescriptor.TYPE_FIXED64: encoder.Fixed64Sizer, + _FieldDescriptor.TYPE_FIXED32: encoder.Fixed32Sizer, + _FieldDescriptor.TYPE_BOOL: encoder.BoolSizer, + _FieldDescriptor.TYPE_STRING: encoder.StringSizer, + _FieldDescriptor.TYPE_GROUP: encoder.GroupSizer, + _FieldDescriptor.TYPE_MESSAGE: encoder.MessageSizer, + _FieldDescriptor.TYPE_BYTES: encoder.BytesSizer, + _FieldDescriptor.TYPE_UINT32: encoder.UInt32Sizer, + _FieldDescriptor.TYPE_ENUM: encoder.EnumSizer, + _FieldDescriptor.TYPE_SFIXED32: encoder.SFixed32Sizer, + _FieldDescriptor.TYPE_SFIXED64: encoder.SFixed64Sizer, + _FieldDescriptor.TYPE_SINT32: encoder.SInt32Sizer, + _FieldDescriptor.TYPE_SINT64: encoder.SInt64Sizer, + } + + +# Maps from field type to a decoder constructor. +TYPE_TO_DECODER = { + _FieldDescriptor.TYPE_DOUBLE: decoder.DoubleDecoder, + _FieldDescriptor.TYPE_FLOAT: decoder.FloatDecoder, + _FieldDescriptor.TYPE_INT64: decoder.Int64Decoder, + _FieldDescriptor.TYPE_UINT64: decoder.UInt64Decoder, + _FieldDescriptor.TYPE_INT32: decoder.Int32Decoder, + _FieldDescriptor.TYPE_FIXED64: decoder.Fixed64Decoder, + _FieldDescriptor.TYPE_FIXED32: decoder.Fixed32Decoder, + _FieldDescriptor.TYPE_BOOL: decoder.BoolDecoder, + _FieldDescriptor.TYPE_STRING: decoder.StringDecoder, + _FieldDescriptor.TYPE_GROUP: decoder.GroupDecoder, + _FieldDescriptor.TYPE_MESSAGE: decoder.MessageDecoder, + _FieldDescriptor.TYPE_BYTES: decoder.BytesDecoder, + _FieldDescriptor.TYPE_UINT32: decoder.UInt32Decoder, + _FieldDescriptor.TYPE_ENUM: decoder.EnumDecoder, + _FieldDescriptor.TYPE_SFIXED32: decoder.SFixed32Decoder, + _FieldDescriptor.TYPE_SFIXED64: decoder.SFixed64Decoder, + _FieldDescriptor.TYPE_SINT32: decoder.SInt32Decoder, + _FieldDescriptor.TYPE_SINT64: decoder.SInt64Decoder, + } + +# Maps from field type to expected wiretype. +FIELD_TYPE_TO_WIRE_TYPE = { + _FieldDescriptor.TYPE_DOUBLE: wire_format.WIRETYPE_FIXED64, + _FieldDescriptor.TYPE_FLOAT: wire_format.WIRETYPE_FIXED32, + _FieldDescriptor.TYPE_INT64: wire_format.WIRETYPE_VARINT, + _FieldDescriptor.TYPE_UINT64: wire_format.WIRETYPE_VARINT, + _FieldDescriptor.TYPE_INT32: wire_format.WIRETYPE_VARINT, + _FieldDescriptor.TYPE_FIXED64: wire_format.WIRETYPE_FIXED64, + _FieldDescriptor.TYPE_FIXED32: wire_format.WIRETYPE_FIXED32, + _FieldDescriptor.TYPE_BOOL: wire_format.WIRETYPE_VARINT, + _FieldDescriptor.TYPE_STRING: + wire_format.WIRETYPE_LENGTH_DELIMITED, + _FieldDescriptor.TYPE_GROUP: wire_format.WIRETYPE_START_GROUP, + _FieldDescriptor.TYPE_MESSAGE: + wire_format.WIRETYPE_LENGTH_DELIMITED, + _FieldDescriptor.TYPE_BYTES: + wire_format.WIRETYPE_LENGTH_DELIMITED, + _FieldDescriptor.TYPE_UINT32: wire_format.WIRETYPE_VARINT, + _FieldDescriptor.TYPE_ENUM: wire_format.WIRETYPE_VARINT, + _FieldDescriptor.TYPE_SFIXED32: wire_format.WIRETYPE_FIXED32, + _FieldDescriptor.TYPE_SFIXED64: wire_format.WIRETYPE_FIXED64, + _FieldDescriptor.TYPE_SINT32: wire_format.WIRETYPE_VARINT, + _FieldDescriptor.TYPE_SINT64: wire_format.WIRETYPE_VARINT, + } diff --git a/mysql-sqlalchemy-workspace/lib/python3.9/site-packages/google/protobuf/internal/well_known_types.py b/mysql-sqlalchemy-workspace/lib/python3.9/site-packages/google/protobuf/internal/well_known_types.py new file mode 100644 index 00000000..b581ab75 --- /dev/null +++ b/mysql-sqlalchemy-workspace/lib/python3.9/site-packages/google/protobuf/internal/well_known_types.py @@ -0,0 +1,878 @@ +# Protocol Buffers - Google's data interchange format +# Copyright 2008 Google Inc. All rights reserved. +# https://developers.google.com/protocol-buffers/ +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are +# met: +# +# * Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above +# copyright notice, this list of conditions and the following disclaimer +# in the documentation and/or other materials provided with the +# distribution. +# * Neither the name of Google Inc. nor the names of its +# contributors may be used to endorse or promote products derived from +# this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +"""Contains well known classes. + +This files defines well known classes which need extra maintenance including: + - Any + - Duration + - FieldMask + - Struct + - Timestamp +""" + +__author__ = 'jieluo@google.com (Jie Luo)' + +import calendar +import collections.abc +import datetime + +from google.protobuf.descriptor import FieldDescriptor + +_TIMESTAMPFOMAT = '%Y-%m-%dT%H:%M:%S' +_NANOS_PER_SECOND = 1000000000 +_NANOS_PER_MILLISECOND = 1000000 +_NANOS_PER_MICROSECOND = 1000 +_MILLIS_PER_SECOND = 1000 +_MICROS_PER_SECOND = 1000000 +_SECONDS_PER_DAY = 24 * 3600 +_DURATION_SECONDS_MAX = 315576000000 + + +class Any(object): + """Class for Any Message type.""" + + __slots__ = () + + def Pack(self, msg, type_url_prefix='type.googleapis.com/', + deterministic=None): + """Packs the specified message into current Any message.""" + if len(type_url_prefix) < 1 or type_url_prefix[-1] != '/': + self.type_url = '%s/%s' % (type_url_prefix, msg.DESCRIPTOR.full_name) + else: + self.type_url = '%s%s' % (type_url_prefix, msg.DESCRIPTOR.full_name) + self.value = msg.SerializeToString(deterministic=deterministic) + + def Unpack(self, msg): + """Unpacks the current Any message into specified message.""" + descriptor = msg.DESCRIPTOR + if not self.Is(descriptor): + return False + msg.ParseFromString(self.value) + return True + + def TypeName(self): + """Returns the protobuf type name of the inner message.""" + # Only last part is to be used: b/25630112 + return self.type_url.split('/')[-1] + + def Is(self, descriptor): + """Checks if this Any represents the given protobuf type.""" + return '/' in self.type_url and self.TypeName() == descriptor.full_name + + +_EPOCH_DATETIME_NAIVE = datetime.datetime.utcfromtimestamp(0) +_EPOCH_DATETIME_AWARE = datetime.datetime.fromtimestamp( + 0, tz=datetime.timezone.utc) + + +class Timestamp(object): + """Class for Timestamp message type.""" + + __slots__ = () + + def ToJsonString(self): + """Converts Timestamp to RFC 3339 date string format. + + Returns: + A string converted from timestamp. The string is always Z-normalized + and uses 3, 6 or 9 fractional digits as required to represent the + exact time. Example of the return format: '1972-01-01T10:00:20.021Z' + """ + nanos = self.nanos % _NANOS_PER_SECOND + total_sec = self.seconds + (self.nanos - nanos) // _NANOS_PER_SECOND + seconds = total_sec % _SECONDS_PER_DAY + days = (total_sec - seconds) // _SECONDS_PER_DAY + dt = datetime.datetime(1970, 1, 1) + datetime.timedelta(days, seconds) + + result = dt.isoformat() + if (nanos % 1e9) == 0: + # If there are 0 fractional digits, the fractional + # point '.' should be omitted when serializing. + return result + 'Z' + if (nanos % 1e6) == 0: + # Serialize 3 fractional digits. + return result + '.%03dZ' % (nanos / 1e6) + if (nanos % 1e3) == 0: + # Serialize 6 fractional digits. + return result + '.%06dZ' % (nanos / 1e3) + # Serialize 9 fractional digits. + return result + '.%09dZ' % nanos + + def FromJsonString(self, value): + """Parse a RFC 3339 date string format to Timestamp. + + Args: + value: A date string. Any fractional digits (or none) and any offset are + accepted as long as they fit into nano-seconds precision. + Example of accepted format: '1972-01-01T10:00:20.021-05:00' + + Raises: + ValueError: On parsing problems. + """ + if not isinstance(value, str): + raise ValueError('Timestamp JSON value not a string: {!r}'.format(value)) + timezone_offset = value.find('Z') + if timezone_offset == -1: + timezone_offset = value.find('+') + if timezone_offset == -1: + timezone_offset = value.rfind('-') + if timezone_offset == -1: + raise ValueError( + 'Failed to parse timestamp: missing valid timezone offset.') + time_value = value[0:timezone_offset] + # Parse datetime and nanos. + point_position = time_value.find('.') + if point_position == -1: + second_value = time_value + nano_value = '' + else: + second_value = time_value[:point_position] + nano_value = time_value[point_position + 1:] + if 't' in second_value: + raise ValueError( + 'time data \'{0}\' does not match format \'%Y-%m-%dT%H:%M:%S\', ' + 'lowercase \'t\' is not accepted'.format(second_value)) + date_object = datetime.datetime.strptime(second_value, _TIMESTAMPFOMAT) + td = date_object - datetime.datetime(1970, 1, 1) + seconds = td.seconds + td.days * _SECONDS_PER_DAY + if len(nano_value) > 9: + raise ValueError( + 'Failed to parse Timestamp: nanos {0} more than ' + '9 fractional digits.'.format(nano_value)) + if nano_value: + nanos = round(float('0.' + nano_value) * 1e9) + else: + nanos = 0 + # Parse timezone offsets. + if value[timezone_offset] == 'Z': + if len(value) != timezone_offset + 1: + raise ValueError('Failed to parse timestamp: invalid trailing' + ' data {0}.'.format(value)) + else: + timezone = value[timezone_offset:] + pos = timezone.find(':') + if pos == -1: + raise ValueError( + 'Invalid timezone offset value: {0}.'.format(timezone)) + if timezone[0] == '+': + seconds -= (int(timezone[1:pos])*60+int(timezone[pos+1:]))*60 + else: + seconds += (int(timezone[1:pos])*60+int(timezone[pos+1:]))*60 + # Set seconds and nanos + self.seconds = int(seconds) + self.nanos = int(nanos) + + def GetCurrentTime(self): + """Get the current UTC into Timestamp.""" + self.FromDatetime(datetime.datetime.utcnow()) + + def ToNanoseconds(self): + """Converts Timestamp to nanoseconds since epoch.""" + return self.seconds * _NANOS_PER_SECOND + self.nanos + + def ToMicroseconds(self): + """Converts Timestamp to microseconds since epoch.""" + return (self.seconds * _MICROS_PER_SECOND + + self.nanos // _NANOS_PER_MICROSECOND) + + def ToMilliseconds(self): + """Converts Timestamp to milliseconds since epoch.""" + return (self.seconds * _MILLIS_PER_SECOND + + self.nanos // _NANOS_PER_MILLISECOND) + + def ToSeconds(self): + """Converts Timestamp to seconds since epoch.""" + return self.seconds + + def FromNanoseconds(self, nanos): + """Converts nanoseconds since epoch to Timestamp.""" + self.seconds = nanos // _NANOS_PER_SECOND + self.nanos = nanos % _NANOS_PER_SECOND + + def FromMicroseconds(self, micros): + """Converts microseconds since epoch to Timestamp.""" + self.seconds = micros // _MICROS_PER_SECOND + self.nanos = (micros % _MICROS_PER_SECOND) * _NANOS_PER_MICROSECOND + + def FromMilliseconds(self, millis): + """Converts milliseconds since epoch to Timestamp.""" + self.seconds = millis // _MILLIS_PER_SECOND + self.nanos = (millis % _MILLIS_PER_SECOND) * _NANOS_PER_MILLISECOND + + def FromSeconds(self, seconds): + """Converts seconds since epoch to Timestamp.""" + self.seconds = seconds + self.nanos = 0 + + def ToDatetime(self, tzinfo=None): + """Converts Timestamp to a datetime. + + Args: + tzinfo: A datetime.tzinfo subclass; defaults to None. + + Returns: + If tzinfo is None, returns a timezone-naive UTC datetime (with no timezone + information, i.e. not aware that it's UTC). + + Otherwise, returns a timezone-aware datetime in the input timezone. + """ + delta = datetime.timedelta( + seconds=self.seconds, + microseconds=_RoundTowardZero(self.nanos, _NANOS_PER_MICROSECOND)) + if tzinfo is None: + return _EPOCH_DATETIME_NAIVE + delta + else: + return _EPOCH_DATETIME_AWARE.astimezone(tzinfo) + delta + + def FromDatetime(self, dt): + """Converts datetime to Timestamp. + + Args: + dt: A datetime. If it's timezone-naive, it's assumed to be in UTC. + """ + # Using this guide: http://wiki.python.org/moin/WorkingWithTime + # And this conversion guide: http://docs.python.org/library/time.html + + # Turn the date parameter into a tuple (struct_time) that can then be + # manipulated into a long value of seconds. During the conversion from + # struct_time to long, the source date in UTC, and so it follows that the + # correct transformation is calendar.timegm() + self.seconds = calendar.timegm(dt.utctimetuple()) + self.nanos = dt.microsecond * _NANOS_PER_MICROSECOND + + +class Duration(object): + """Class for Duration message type.""" + + __slots__ = () + + def ToJsonString(self): + """Converts Duration to string format. + + Returns: + A string converted from self. The string format will contains + 3, 6, or 9 fractional digits depending on the precision required to + represent the exact Duration value. For example: "1s", "1.010s", + "1.000000100s", "-3.100s" + """ + _CheckDurationValid(self.seconds, self.nanos) + if self.seconds < 0 or self.nanos < 0: + result = '-' + seconds = - self.seconds + int((0 - self.nanos) // 1e9) + nanos = (0 - self.nanos) % 1e9 + else: + result = '' + seconds = self.seconds + int(self.nanos // 1e9) + nanos = self.nanos % 1e9 + result += '%d' % seconds + if (nanos % 1e9) == 0: + # If there are 0 fractional digits, the fractional + # point '.' should be omitted when serializing. + return result + 's' + if (nanos % 1e6) == 0: + # Serialize 3 fractional digits. + return result + '.%03ds' % (nanos / 1e6) + if (nanos % 1e3) == 0: + # Serialize 6 fractional digits. + return result + '.%06ds' % (nanos / 1e3) + # Serialize 9 fractional digits. + return result + '.%09ds' % nanos + + def FromJsonString(self, value): + """Converts a string to Duration. + + Args: + value: A string to be converted. The string must end with 's'. Any + fractional digits (or none) are accepted as long as they fit into + precision. For example: "1s", "1.01s", "1.0000001s", "-3.100s + + Raises: + ValueError: On parsing problems. + """ + if not isinstance(value, str): + raise ValueError('Duration JSON value not a string: {!r}'.format(value)) + if len(value) < 1 or value[-1] != 's': + raise ValueError( + 'Duration must end with letter "s": {0}.'.format(value)) + try: + pos = value.find('.') + if pos == -1: + seconds = int(value[:-1]) + nanos = 0 + else: + seconds = int(value[:pos]) + if value[0] == '-': + nanos = int(round(float('-0{0}'.format(value[pos: -1])) *1e9)) + else: + nanos = int(round(float('0{0}'.format(value[pos: -1])) *1e9)) + _CheckDurationValid(seconds, nanos) + self.seconds = seconds + self.nanos = nanos + except ValueError as e: + raise ValueError( + 'Couldn\'t parse duration: {0} : {1}.'.format(value, e)) + + def ToNanoseconds(self): + """Converts a Duration to nanoseconds.""" + return self.seconds * _NANOS_PER_SECOND + self.nanos + + def ToMicroseconds(self): + """Converts a Duration to microseconds.""" + micros = _RoundTowardZero(self.nanos, _NANOS_PER_MICROSECOND) + return self.seconds * _MICROS_PER_SECOND + micros + + def ToMilliseconds(self): + """Converts a Duration to milliseconds.""" + millis = _RoundTowardZero(self.nanos, _NANOS_PER_MILLISECOND) + return self.seconds * _MILLIS_PER_SECOND + millis + + def ToSeconds(self): + """Converts a Duration to seconds.""" + return self.seconds + + def FromNanoseconds(self, nanos): + """Converts nanoseconds to Duration.""" + self._NormalizeDuration(nanos // _NANOS_PER_SECOND, + nanos % _NANOS_PER_SECOND) + + def FromMicroseconds(self, micros): + """Converts microseconds to Duration.""" + self._NormalizeDuration( + micros // _MICROS_PER_SECOND, + (micros % _MICROS_PER_SECOND) * _NANOS_PER_MICROSECOND) + + def FromMilliseconds(self, millis): + """Converts milliseconds to Duration.""" + self._NormalizeDuration( + millis // _MILLIS_PER_SECOND, + (millis % _MILLIS_PER_SECOND) * _NANOS_PER_MILLISECOND) + + def FromSeconds(self, seconds): + """Converts seconds to Duration.""" + self.seconds = seconds + self.nanos = 0 + + def ToTimedelta(self): + """Converts Duration to timedelta.""" + return datetime.timedelta( + seconds=self.seconds, microseconds=_RoundTowardZero( + self.nanos, _NANOS_PER_MICROSECOND)) + + def FromTimedelta(self, td): + """Converts timedelta to Duration.""" + self._NormalizeDuration(td.seconds + td.days * _SECONDS_PER_DAY, + td.microseconds * _NANOS_PER_MICROSECOND) + + def _NormalizeDuration(self, seconds, nanos): + """Set Duration by seconds and nanos.""" + # Force nanos to be negative if the duration is negative. + if seconds < 0 and nanos > 0: + seconds += 1 + nanos -= _NANOS_PER_SECOND + self.seconds = seconds + self.nanos = nanos + + +def _CheckDurationValid(seconds, nanos): + if seconds < -_DURATION_SECONDS_MAX or seconds > _DURATION_SECONDS_MAX: + raise ValueError( + 'Duration is not valid: Seconds {0} must be in range ' + '[-315576000000, 315576000000].'.format(seconds)) + if nanos <= -_NANOS_PER_SECOND or nanos >= _NANOS_PER_SECOND: + raise ValueError( + 'Duration is not valid: Nanos {0} must be in range ' + '[-999999999, 999999999].'.format(nanos)) + if (nanos < 0 and seconds > 0) or (nanos > 0 and seconds < 0): + raise ValueError( + 'Duration is not valid: Sign mismatch.') + + +def _RoundTowardZero(value, divider): + """Truncates the remainder part after division.""" + # For some languages, the sign of the remainder is implementation + # dependent if any of the operands is negative. Here we enforce + # "rounded toward zero" semantics. For example, for (-5) / 2 an + # implementation may give -3 as the result with the remainder being + # 1. This function ensures we always return -2 (closer to zero). + result = value // divider + remainder = value % divider + if result < 0 and remainder > 0: + return result + 1 + else: + return result + + +class FieldMask(object): + """Class for FieldMask message type.""" + + __slots__ = () + + def ToJsonString(self): + """Converts FieldMask to string according to proto3 JSON spec.""" + camelcase_paths = [] + for path in self.paths: + camelcase_paths.append(_SnakeCaseToCamelCase(path)) + return ','.join(camelcase_paths) + + def FromJsonString(self, value): + """Converts string to FieldMask according to proto3 JSON spec.""" + if not isinstance(value, str): + raise ValueError('FieldMask JSON value not a string: {!r}'.format(value)) + self.Clear() + if value: + for path in value.split(','): + self.paths.append(_CamelCaseToSnakeCase(path)) + + def IsValidForDescriptor(self, message_descriptor): + """Checks whether the FieldMask is valid for Message Descriptor.""" + for path in self.paths: + if not _IsValidPath(message_descriptor, path): + return False + return True + + def AllFieldsFromDescriptor(self, message_descriptor): + """Gets all direct fields of Message Descriptor to FieldMask.""" + self.Clear() + for field in message_descriptor.fields: + self.paths.append(field.name) + + def CanonicalFormFromMask(self, mask): + """Converts a FieldMask to the canonical form. + + Removes paths that are covered by another path. For example, + "foo.bar" is covered by "foo" and will be removed if "foo" + is also in the FieldMask. Then sorts all paths in alphabetical order. + + Args: + mask: The original FieldMask to be converted. + """ + tree = _FieldMaskTree(mask) + tree.ToFieldMask(self) + + def Union(self, mask1, mask2): + """Merges mask1 and mask2 into this FieldMask.""" + _CheckFieldMaskMessage(mask1) + _CheckFieldMaskMessage(mask2) + tree = _FieldMaskTree(mask1) + tree.MergeFromFieldMask(mask2) + tree.ToFieldMask(self) + + def Intersect(self, mask1, mask2): + """Intersects mask1 and mask2 into this FieldMask.""" + _CheckFieldMaskMessage(mask1) + _CheckFieldMaskMessage(mask2) + tree = _FieldMaskTree(mask1) + intersection = _FieldMaskTree() + for path in mask2.paths: + tree.IntersectPath(path, intersection) + intersection.ToFieldMask(self) + + def MergeMessage( + self, source, destination, + replace_message_field=False, replace_repeated_field=False): + """Merges fields specified in FieldMask from source to destination. + + Args: + source: Source message. + destination: The destination message to be merged into. + replace_message_field: Replace message field if True. Merge message + field if False. + replace_repeated_field: Replace repeated field if True. Append + elements of repeated field if False. + """ + tree = _FieldMaskTree(self) + tree.MergeMessage( + source, destination, replace_message_field, replace_repeated_field) + + +def _IsValidPath(message_descriptor, path): + """Checks whether the path is valid for Message Descriptor.""" + parts = path.split('.') + last = parts.pop() + for name in parts: + field = message_descriptor.fields_by_name.get(name) + if (field is None or + field.label == FieldDescriptor.LABEL_REPEATED or + field.type != FieldDescriptor.TYPE_MESSAGE): + return False + message_descriptor = field.message_type + return last in message_descriptor.fields_by_name + + +def _CheckFieldMaskMessage(message): + """Raises ValueError if message is not a FieldMask.""" + message_descriptor = message.DESCRIPTOR + if (message_descriptor.name != 'FieldMask' or + message_descriptor.file.name != 'google/protobuf/field_mask.proto'): + raise ValueError('Message {0} is not a FieldMask.'.format( + message_descriptor.full_name)) + + +def _SnakeCaseToCamelCase(path_name): + """Converts a path name from snake_case to camelCase.""" + result = [] + after_underscore = False + for c in path_name: + if c.isupper(): + raise ValueError( + 'Fail to print FieldMask to Json string: Path name ' + '{0} must not contain uppercase letters.'.format(path_name)) + if after_underscore: + if c.islower(): + result.append(c.upper()) + after_underscore = False + else: + raise ValueError( + 'Fail to print FieldMask to Json string: The ' + 'character after a "_" must be a lowercase letter ' + 'in path name {0}.'.format(path_name)) + elif c == '_': + after_underscore = True + else: + result += c + + if after_underscore: + raise ValueError('Fail to print FieldMask to Json string: Trailing "_" ' + 'in path name {0}.'.format(path_name)) + return ''.join(result) + + +def _CamelCaseToSnakeCase(path_name): + """Converts a field name from camelCase to snake_case.""" + result = [] + for c in path_name: + if c == '_': + raise ValueError('Fail to parse FieldMask: Path name ' + '{0} must not contain "_"s.'.format(path_name)) + if c.isupper(): + result += '_' + result += c.lower() + else: + result += c + return ''.join(result) + + +class _FieldMaskTree(object): + """Represents a FieldMask in a tree structure. + + For example, given a FieldMask "foo.bar,foo.baz,bar.baz", + the FieldMaskTree will be: + [_root] -+- foo -+- bar + | | + | +- baz + | + +- bar --- baz + In the tree, each leaf node represents a field path. + """ + + __slots__ = ('_root',) + + def __init__(self, field_mask=None): + """Initializes the tree by FieldMask.""" + self._root = {} + if field_mask: + self.MergeFromFieldMask(field_mask) + + def MergeFromFieldMask(self, field_mask): + """Merges a FieldMask to the tree.""" + for path in field_mask.paths: + self.AddPath(path) + + def AddPath(self, path): + """Adds a field path into the tree. + + If the field path to add is a sub-path of an existing field path + in the tree (i.e., a leaf node), it means the tree already matches + the given path so nothing will be added to the tree. If the path + matches an existing non-leaf node in the tree, that non-leaf node + will be turned into a leaf node with all its children removed because + the path matches all the node's children. Otherwise, a new path will + be added. + + Args: + path: The field path to add. + """ + node = self._root + for name in path.split('.'): + if name not in node: + node[name] = {} + elif not node[name]: + # Pre-existing empty node implies we already have this entire tree. + return + node = node[name] + # Remove any sub-trees we might have had. + node.clear() + + def ToFieldMask(self, field_mask): + """Converts the tree to a FieldMask.""" + field_mask.Clear() + _AddFieldPaths(self._root, '', field_mask) + + def IntersectPath(self, path, intersection): + """Calculates the intersection part of a field path with this tree. + + Args: + path: The field path to calculates. + intersection: The out tree to record the intersection part. + """ + node = self._root + for name in path.split('.'): + if name not in node: + return + elif not node[name]: + intersection.AddPath(path) + return + node = node[name] + intersection.AddLeafNodes(path, node) + + def AddLeafNodes(self, prefix, node): + """Adds leaf nodes begin with prefix to this tree.""" + if not node: + self.AddPath(prefix) + for name in node: + child_path = prefix + '.' + name + self.AddLeafNodes(child_path, node[name]) + + def MergeMessage( + self, source, destination, + replace_message, replace_repeated): + """Merge all fields specified by this tree from source to destination.""" + _MergeMessage( + self._root, source, destination, replace_message, replace_repeated) + + +def _StrConvert(value): + """Converts value to str if it is not.""" + # This file is imported by c extension and some methods like ClearField + # requires string for the field name. py2/py3 has different text + # type and may use unicode. + if not isinstance(value, str): + return value.encode('utf-8') + return value + + +def _MergeMessage( + node, source, destination, replace_message, replace_repeated): + """Merge all fields specified by a sub-tree from source to destination.""" + source_descriptor = source.DESCRIPTOR + for name in node: + child = node[name] + field = source_descriptor.fields_by_name[name] + if field is None: + raise ValueError('Error: Can\'t find field {0} in message {1}.'.format( + name, source_descriptor.full_name)) + if child: + # Sub-paths are only allowed for singular message fields. + if (field.label == FieldDescriptor.LABEL_REPEATED or + field.cpp_type != FieldDescriptor.CPPTYPE_MESSAGE): + raise ValueError('Error: Field {0} in message {1} is not a singular ' + 'message field and cannot have sub-fields.'.format( + name, source_descriptor.full_name)) + if source.HasField(name): + _MergeMessage( + child, getattr(source, name), getattr(destination, name), + replace_message, replace_repeated) + continue + if field.label == FieldDescriptor.LABEL_REPEATED: + if replace_repeated: + destination.ClearField(_StrConvert(name)) + repeated_source = getattr(source, name) + repeated_destination = getattr(destination, name) + repeated_destination.MergeFrom(repeated_source) + else: + if field.cpp_type == FieldDescriptor.CPPTYPE_MESSAGE: + if replace_message: + destination.ClearField(_StrConvert(name)) + if source.HasField(name): + getattr(destination, name).MergeFrom(getattr(source, name)) + else: + setattr(destination, name, getattr(source, name)) + + +def _AddFieldPaths(node, prefix, field_mask): + """Adds the field paths descended from node to field_mask.""" + if not node and prefix: + field_mask.paths.append(prefix) + return + for name in sorted(node): + if prefix: + child_path = prefix + '.' + name + else: + child_path = name + _AddFieldPaths(node[name], child_path, field_mask) + + +def _SetStructValue(struct_value, value): + if value is None: + struct_value.null_value = 0 + elif isinstance(value, bool): + # Note: this check must come before the number check because in Python + # True and False are also considered numbers. + struct_value.bool_value = value + elif isinstance(value, str): + struct_value.string_value = value + elif isinstance(value, (int, float)): + struct_value.number_value = value + elif isinstance(value, (dict, Struct)): + struct_value.struct_value.Clear() + struct_value.struct_value.update(value) + elif isinstance(value, (list, ListValue)): + struct_value.list_value.Clear() + struct_value.list_value.extend(value) + else: + raise ValueError('Unexpected type') + + +def _GetStructValue(struct_value): + which = struct_value.WhichOneof('kind') + if which == 'struct_value': + return struct_value.struct_value + elif which == 'null_value': + return None + elif which == 'number_value': + return struct_value.number_value + elif which == 'string_value': + return struct_value.string_value + elif which == 'bool_value': + return struct_value.bool_value + elif which == 'list_value': + return struct_value.list_value + elif which is None: + raise ValueError('Value not set') + + +class Struct(object): + """Class for Struct message type.""" + + __slots__ = () + + def __getitem__(self, key): + return _GetStructValue(self.fields[key]) + + def __contains__(self, item): + return item in self.fields + + def __setitem__(self, key, value): + _SetStructValue(self.fields[key], value) + + def __delitem__(self, key): + del self.fields[key] + + def __len__(self): + return len(self.fields) + + def __iter__(self): + return iter(self.fields) + + def keys(self): # pylint: disable=invalid-name + return self.fields.keys() + + def values(self): # pylint: disable=invalid-name + return [self[key] for key in self] + + def items(self): # pylint: disable=invalid-name + return [(key, self[key]) for key in self] + + def get_or_create_list(self, key): + """Returns a list for this key, creating if it didn't exist already.""" + if not self.fields[key].HasField('list_value'): + # Clear will mark list_value modified which will indeed create a list. + self.fields[key].list_value.Clear() + return self.fields[key].list_value + + def get_or_create_struct(self, key): + """Returns a struct for this key, creating if it didn't exist already.""" + if not self.fields[key].HasField('struct_value'): + # Clear will mark struct_value modified which will indeed create a struct. + self.fields[key].struct_value.Clear() + return self.fields[key].struct_value + + def update(self, dictionary): # pylint: disable=invalid-name + for key, value in dictionary.items(): + _SetStructValue(self.fields[key], value) + +collections.abc.MutableMapping.register(Struct) + + +class ListValue(object): + """Class for ListValue message type.""" + + __slots__ = () + + def __len__(self): + return len(self.values) + + def append(self, value): + _SetStructValue(self.values.add(), value) + + def extend(self, elem_seq): + for value in elem_seq: + self.append(value) + + def __getitem__(self, index): + """Retrieves item by the specified index.""" + return _GetStructValue(self.values.__getitem__(index)) + + def __setitem__(self, index, value): + _SetStructValue(self.values.__getitem__(index), value) + + def __delitem__(self, key): + del self.values[key] + + def items(self): + for i in range(len(self)): + yield self[i] + + def add_struct(self): + """Appends and returns a struct value as the next value in the list.""" + struct_value = self.values.add().struct_value + # Clear will mark struct_value modified which will indeed create a struct. + struct_value.Clear() + return struct_value + + def add_list(self): + """Appends and returns a list value as the next value in the list.""" + list_value = self.values.add().list_value + # Clear will mark list_value modified which will indeed create a list. + list_value.Clear() + return list_value + +collections.abc.MutableSequence.register(ListValue) + + +WKTBASES = { + 'google.protobuf.Any': Any, + 'google.protobuf.Duration': Duration, + 'google.protobuf.FieldMask': FieldMask, + 'google.protobuf.ListValue': ListValue, + 'google.protobuf.Struct': Struct, + 'google.protobuf.Timestamp': Timestamp, +} diff --git a/mysql-sqlalchemy-workspace/lib/python3.9/site-packages/google/protobuf/internal/wire_format.py b/mysql-sqlalchemy-workspace/lib/python3.9/site-packages/google/protobuf/internal/wire_format.py new file mode 100644 index 00000000..883f5255 --- /dev/null +++ b/mysql-sqlalchemy-workspace/lib/python3.9/site-packages/google/protobuf/internal/wire_format.py @@ -0,0 +1,268 @@ +# Protocol Buffers - Google's data interchange format +# Copyright 2008 Google Inc. All rights reserved. +# https://developers.google.com/protocol-buffers/ +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are +# met: +# +# * Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above +# copyright notice, this list of conditions and the following disclaimer +# in the documentation and/or other materials provided with the +# distribution. +# * Neither the name of Google Inc. nor the names of its +# contributors may be used to endorse or promote products derived from +# this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +"""Constants and static functions to support protocol buffer wire format.""" + +__author__ = 'robinson@google.com (Will Robinson)' + +import struct +from google.protobuf import descriptor +from google.protobuf import message + + +TAG_TYPE_BITS = 3 # Number of bits used to hold type info in a proto tag. +TAG_TYPE_MASK = (1 << TAG_TYPE_BITS) - 1 # 0x7 + +# These numbers identify the wire type of a protocol buffer value. +# We use the least-significant TAG_TYPE_BITS bits of the varint-encoded +# tag-and-type to store one of these WIRETYPE_* constants. +# These values must match WireType enum in google/protobuf/wire_format.h. +WIRETYPE_VARINT = 0 +WIRETYPE_FIXED64 = 1 +WIRETYPE_LENGTH_DELIMITED = 2 +WIRETYPE_START_GROUP = 3 +WIRETYPE_END_GROUP = 4 +WIRETYPE_FIXED32 = 5 +_WIRETYPE_MAX = 5 + + +# Bounds for various integer types. +INT32_MAX = int((1 << 31) - 1) +INT32_MIN = int(-(1 << 31)) +UINT32_MAX = (1 << 32) - 1 + +INT64_MAX = (1 << 63) - 1 +INT64_MIN = -(1 << 63) +UINT64_MAX = (1 << 64) - 1 + +# "struct" format strings that will encode/decode the specified formats. +FORMAT_UINT32_LITTLE_ENDIAN = '> TAG_TYPE_BITS), (tag & TAG_TYPE_MASK) + + +def ZigZagEncode(value): + """ZigZag Transform: Encodes signed integers so that they can be + effectively used with varint encoding. See wire_format.h for + more details. + """ + if value >= 0: + return value << 1 + return (value << 1) ^ (~0) + + +def ZigZagDecode(value): + """Inverse of ZigZagEncode().""" + if not value & 0x1: + return value >> 1 + return (value >> 1) ^ (~0) + + + +# The *ByteSize() functions below return the number of bytes required to +# serialize "field number + type" information and then serialize the value. + + +def Int32ByteSize(field_number, int32): + return Int64ByteSize(field_number, int32) + + +def Int32ByteSizeNoTag(int32): + return _VarUInt64ByteSizeNoTag(0xffffffffffffffff & int32) + + +def Int64ByteSize(field_number, int64): + # Have to convert to uint before calling UInt64ByteSize(). + return UInt64ByteSize(field_number, 0xffffffffffffffff & int64) + + +def UInt32ByteSize(field_number, uint32): + return UInt64ByteSize(field_number, uint32) + + +def UInt64ByteSize(field_number, uint64): + return TagByteSize(field_number) + _VarUInt64ByteSizeNoTag(uint64) + + +def SInt32ByteSize(field_number, int32): + return UInt32ByteSize(field_number, ZigZagEncode(int32)) + + +def SInt64ByteSize(field_number, int64): + return UInt64ByteSize(field_number, ZigZagEncode(int64)) + + +def Fixed32ByteSize(field_number, fixed32): + return TagByteSize(field_number) + 4 + + +def Fixed64ByteSize(field_number, fixed64): + return TagByteSize(field_number) + 8 + + +def SFixed32ByteSize(field_number, sfixed32): + return TagByteSize(field_number) + 4 + + +def SFixed64ByteSize(field_number, sfixed64): + return TagByteSize(field_number) + 8 + + +def FloatByteSize(field_number, flt): + return TagByteSize(field_number) + 4 + + +def DoubleByteSize(field_number, double): + return TagByteSize(field_number) + 8 + + +def BoolByteSize(field_number, b): + return TagByteSize(field_number) + 1 + + +def EnumByteSize(field_number, enum): + return UInt32ByteSize(field_number, enum) + + +def StringByteSize(field_number, string): + return BytesByteSize(field_number, string.encode('utf-8')) + + +def BytesByteSize(field_number, b): + return (TagByteSize(field_number) + + _VarUInt64ByteSizeNoTag(len(b)) + + len(b)) + + +def GroupByteSize(field_number, message): + return (2 * TagByteSize(field_number) # START and END group. + + message.ByteSize()) + + +def MessageByteSize(field_number, message): + return (TagByteSize(field_number) + + _VarUInt64ByteSizeNoTag(message.ByteSize()) + + message.ByteSize()) + + +def MessageSetItemByteSize(field_number, msg): + # First compute the sizes of the tags. + # There are 2 tags for the beginning and ending of the repeated group, that + # is field number 1, one with field number 2 (type_id) and one with field + # number 3 (message). + total_size = (2 * TagByteSize(1) + TagByteSize(2) + TagByteSize(3)) + + # Add the number of bytes for type_id. + total_size += _VarUInt64ByteSizeNoTag(field_number) + + message_size = msg.ByteSize() + + # The number of bytes for encoding the length of the message. + total_size += _VarUInt64ByteSizeNoTag(message_size) + + # The size of the message. + total_size += message_size + return total_size + + +def TagByteSize(field_number): + """Returns the bytes required to serialize a tag with this field number.""" + # Just pass in type 0, since the type won't affect the tag+type size. + return _VarUInt64ByteSizeNoTag(PackTag(field_number, 0)) + + +# Private helper function for the *ByteSize() functions above. + +def _VarUInt64ByteSizeNoTag(uint64): + """Returns the number of bytes required to serialize a single varint + using boundary value comparisons. (unrolled loop optimization -WPierce) + uint64 must be unsigned. + """ + if uint64 <= 0x7f: return 1 + if uint64 <= 0x3fff: return 2 + if uint64 <= 0x1fffff: return 3 + if uint64 <= 0xfffffff: return 4 + if uint64 <= 0x7ffffffff: return 5 + if uint64 <= 0x3ffffffffff: return 6 + if uint64 <= 0x1ffffffffffff: return 7 + if uint64 <= 0xffffffffffffff: return 8 + if uint64 <= 0x7fffffffffffffff: return 9 + if uint64 > UINT64_MAX: + raise message.EncodeError('Value out of range: %d' % uint64) + return 10 + + +NON_PACKABLE_TYPES = ( + descriptor.FieldDescriptor.TYPE_STRING, + descriptor.FieldDescriptor.TYPE_GROUP, + descriptor.FieldDescriptor.TYPE_MESSAGE, + descriptor.FieldDescriptor.TYPE_BYTES +) + + +def IsTypePackable(field_type): + """Return true iff packable = true is valid for fields of this type. + + Args: + field_type: a FieldDescriptor::Type value. + + Returns: + True iff fields of this type are packable. + """ + return field_type not in NON_PACKABLE_TYPES diff --git a/mysql-sqlalchemy-workspace/lib/python3.9/site-packages/google/protobuf/json_format.py b/mysql-sqlalchemy-workspace/lib/python3.9/site-packages/google/protobuf/json_format.py new file mode 100644 index 00000000..5024ed89 --- /dev/null +++ b/mysql-sqlalchemy-workspace/lib/python3.9/site-packages/google/protobuf/json_format.py @@ -0,0 +1,912 @@ +# Protocol Buffers - Google's data interchange format +# Copyright 2008 Google Inc. All rights reserved. +# https://developers.google.com/protocol-buffers/ +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are +# met: +# +# * Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above +# copyright notice, this list of conditions and the following disclaimer +# in the documentation and/or other materials provided with the +# distribution. +# * Neither the name of Google Inc. nor the names of its +# contributors may be used to endorse or promote products derived from +# this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +"""Contains routines for printing protocol messages in JSON format. + +Simple usage example: + + # Create a proto object and serialize it to a json format string. + message = my_proto_pb2.MyMessage(foo='bar') + json_string = json_format.MessageToJson(message) + + # Parse a json format string to proto object. + message = json_format.Parse(json_string, my_proto_pb2.MyMessage()) +""" + +__author__ = 'jieluo@google.com (Jie Luo)' + + +import base64 +from collections import OrderedDict +import json +import math +from operator import methodcaller +import re +import sys + +from google.protobuf.internal import type_checkers +from google.protobuf import descriptor +from google.protobuf import symbol_database + + +_TIMESTAMPFOMAT = '%Y-%m-%dT%H:%M:%S' +_INT_TYPES = frozenset([descriptor.FieldDescriptor.CPPTYPE_INT32, + descriptor.FieldDescriptor.CPPTYPE_UINT32, + descriptor.FieldDescriptor.CPPTYPE_INT64, + descriptor.FieldDescriptor.CPPTYPE_UINT64]) +_INT64_TYPES = frozenset([descriptor.FieldDescriptor.CPPTYPE_INT64, + descriptor.FieldDescriptor.CPPTYPE_UINT64]) +_FLOAT_TYPES = frozenset([descriptor.FieldDescriptor.CPPTYPE_FLOAT, + descriptor.FieldDescriptor.CPPTYPE_DOUBLE]) +_INFINITY = 'Infinity' +_NEG_INFINITY = '-Infinity' +_NAN = 'NaN' + +_UNPAIRED_SURROGATE_PATTERN = re.compile( + u'[\ud800-\udbff](?![\udc00-\udfff])|(? self.max_recursion_depth: + raise ParseError('Message too deep. Max recursion depth is {0}'.format( + self.max_recursion_depth)) + message_descriptor = message.DESCRIPTOR + full_name = message_descriptor.full_name + if not path: + path = message_descriptor.name + if _IsWrapperMessage(message_descriptor): + self._ConvertWrapperMessage(value, message, path) + elif full_name in _WKTJSONMETHODS: + methodcaller(_WKTJSONMETHODS[full_name][1], value, message, path)(self) + else: + self._ConvertFieldValuePair(value, message, path) + self.recursion_depth -= 1 + + def _ConvertFieldValuePair(self, js, message, path): + """Convert field value pairs into regular message. + + Args: + js: A JSON object to convert the field value pairs. + message: A regular protocol message to record the data. + path: parent path to log parse error info. + + Raises: + ParseError: In case of problems converting. + """ + names = [] + message_descriptor = message.DESCRIPTOR + fields_by_json_name = dict((f.json_name, f) + for f in message_descriptor.fields) + for name in js: + try: + field = fields_by_json_name.get(name, None) + if not field: + field = message_descriptor.fields_by_name.get(name, None) + if not field and _VALID_EXTENSION_NAME.match(name): + if not message_descriptor.is_extendable: + raise ParseError( + 'Message type {0} does not have extensions at {1}'.format( + message_descriptor.full_name, path)) + identifier = name[1:-1] # strip [] brackets + # pylint: disable=protected-access + field = message.Extensions._FindExtensionByName(identifier) + # pylint: enable=protected-access + if not field: + # Try looking for extension by the message type name, dropping the + # field name following the final . separator in full_name. + identifier = '.'.join(identifier.split('.')[:-1]) + # pylint: disable=protected-access + field = message.Extensions._FindExtensionByName(identifier) + # pylint: enable=protected-access + if not field: + if self.ignore_unknown_fields: + continue + raise ParseError( + ('Message type "{0}" has no field named "{1}" at "{2}".\n' + ' Available Fields(except extensions): "{3}"').format( + message_descriptor.full_name, name, path, + [f.json_name for f in message_descriptor.fields])) + if name in names: + raise ParseError('Message type "{0}" should not have multiple ' + '"{1}" fields at "{2}".'.format( + message.DESCRIPTOR.full_name, name, path)) + names.append(name) + value = js[name] + # Check no other oneof field is parsed. + if field.containing_oneof is not None and value is not None: + oneof_name = field.containing_oneof.name + if oneof_name in names: + raise ParseError('Message type "{0}" should not have multiple ' + '"{1}" oneof fields at "{2}".'.format( + message.DESCRIPTOR.full_name, oneof_name, + path)) + names.append(oneof_name) + + if value is None: + if (field.cpp_type == descriptor.FieldDescriptor.CPPTYPE_MESSAGE + and field.message_type.full_name == 'google.protobuf.Value'): + sub_message = getattr(message, field.name) + sub_message.null_value = 0 + elif (field.cpp_type == descriptor.FieldDescriptor.CPPTYPE_ENUM + and field.enum_type.full_name == 'google.protobuf.NullValue'): + setattr(message, field.name, 0) + else: + message.ClearField(field.name) + continue + + # Parse field value. + if _IsMapEntry(field): + message.ClearField(field.name) + self._ConvertMapFieldValue(value, message, field, + '{0}.{1}'.format(path, name)) + elif field.label == descriptor.FieldDescriptor.LABEL_REPEATED: + message.ClearField(field.name) + if not isinstance(value, list): + raise ParseError('repeated field {0} must be in [] which is ' + '{1} at {2}'.format(name, value, path)) + if field.cpp_type == descriptor.FieldDescriptor.CPPTYPE_MESSAGE: + # Repeated message field. + for index, item in enumerate(value): + sub_message = getattr(message, field.name).add() + # None is a null_value in Value. + if (item is None and + sub_message.DESCRIPTOR.full_name != 'google.protobuf.Value'): + raise ParseError('null is not allowed to be used as an element' + ' in a repeated field at {0}.{1}[{2}]'.format( + path, name, index)) + self.ConvertMessage(item, sub_message, + '{0}.{1}[{2}]'.format(path, name, index)) + else: + # Repeated scalar field. + for index, item in enumerate(value): + if item is None: + raise ParseError('null is not allowed to be used as an element' + ' in a repeated field at {0}.{1}[{2}]'.format( + path, name, index)) + getattr(message, field.name).append( + _ConvertScalarFieldValue( + item, field, '{0}.{1}[{2}]'.format(path, name, index))) + elif field.cpp_type == descriptor.FieldDescriptor.CPPTYPE_MESSAGE: + if field.is_extension: + sub_message = message.Extensions[field] + else: + sub_message = getattr(message, field.name) + sub_message.SetInParent() + self.ConvertMessage(value, sub_message, '{0}.{1}'.format(path, name)) + else: + if field.is_extension: + message.Extensions[field] = _ConvertScalarFieldValue( + value, field, '{0}.{1}'.format(path, name)) + else: + setattr( + message, field.name, + _ConvertScalarFieldValue(value, field, + '{0}.{1}'.format(path, name))) + except ParseError as e: + if field and field.containing_oneof is None: + raise ParseError('Failed to parse {0} field: {1}.'.format(name, e)) + else: + raise ParseError(str(e)) + except ValueError as e: + raise ParseError('Failed to parse {0} field: {1}.'.format(name, e)) + except TypeError as e: + raise ParseError('Failed to parse {0} field: {1}.'.format(name, e)) + + def _ConvertAnyMessage(self, value, message, path): + """Convert a JSON representation into Any message.""" + if isinstance(value, dict) and not value: + return + try: + type_url = value['@type'] + except KeyError: + raise ParseError( + '@type is missing when parsing any message at {0}'.format(path)) + + try: + sub_message = _CreateMessageFromTypeUrl(type_url, self.descriptor_pool) + except TypeError as e: + raise ParseError('{0} at {1}'.format(e, path)) + message_descriptor = sub_message.DESCRIPTOR + full_name = message_descriptor.full_name + if _IsWrapperMessage(message_descriptor): + self._ConvertWrapperMessage(value['value'], sub_message, + '{0}.value'.format(path)) + elif full_name in _WKTJSONMETHODS: + methodcaller(_WKTJSONMETHODS[full_name][1], value['value'], sub_message, + '{0}.value'.format(path))( + self) + else: + del value['@type'] + self._ConvertFieldValuePair(value, sub_message, path) + value['@type'] = type_url + # Sets Any message + message.value = sub_message.SerializeToString() + message.type_url = type_url + + def _ConvertGenericMessage(self, value, message, path): + """Convert a JSON representation into message with FromJsonString.""" + # Duration, Timestamp, FieldMask have a FromJsonString method to do the + # conversion. Users can also call the method directly. + try: + message.FromJsonString(value) + except ValueError as e: + raise ParseError('{0} at {1}'.format(e, path)) + + def _ConvertValueMessage(self, value, message, path): + """Convert a JSON representation into Value message.""" + if isinstance(value, dict): + self._ConvertStructMessage(value, message.struct_value, path) + elif isinstance(value, list): + self._ConvertListValueMessage(value, message.list_value, path) + elif value is None: + message.null_value = 0 + elif isinstance(value, bool): + message.bool_value = value + elif isinstance(value, str): + message.string_value = value + elif isinstance(value, _INT_OR_FLOAT): + message.number_value = value + else: + raise ParseError('Value {0} has unexpected type {1} at {2}'.format( + value, type(value), path)) + + def _ConvertListValueMessage(self, value, message, path): + """Convert a JSON representation into ListValue message.""" + if not isinstance(value, list): + raise ParseError('ListValue must be in [] which is {0} at {1}'.format( + value, path)) + message.ClearField('values') + for index, item in enumerate(value): + self._ConvertValueMessage(item, message.values.add(), + '{0}[{1}]'.format(path, index)) + + def _ConvertStructMessage(self, value, message, path): + """Convert a JSON representation into Struct message.""" + if not isinstance(value, dict): + raise ParseError('Struct must be in a dict which is {0} at {1}'.format( + value, path)) + # Clear will mark the struct as modified so it will be created even if + # there are no values. + message.Clear() + for key in value: + self._ConvertValueMessage(value[key], message.fields[key], + '{0}.{1}'.format(path, key)) + return + + def _ConvertWrapperMessage(self, value, message, path): + """Convert a JSON representation into Wrapper message.""" + field = message.DESCRIPTOR.fields_by_name['value'] + setattr( + message, 'value', + _ConvertScalarFieldValue(value, field, path='{0}.value'.format(path))) + + def _ConvertMapFieldValue(self, value, message, field, path): + """Convert map field value for a message map field. + + Args: + value: A JSON object to convert the map field value. + message: A protocol message to record the converted data. + field: The descriptor of the map field to be converted. + path: parent path to log parse error info. + + Raises: + ParseError: In case of convert problems. + """ + if not isinstance(value, dict): + raise ParseError( + 'Map field {0} must be in a dict which is {1} at {2}'.format( + field.name, value, path)) + key_field = field.message_type.fields_by_name['key'] + value_field = field.message_type.fields_by_name['value'] + for key in value: + key_value = _ConvertScalarFieldValue(key, key_field, + '{0}.key'.format(path), True) + if value_field.cpp_type == descriptor.FieldDescriptor.CPPTYPE_MESSAGE: + self.ConvertMessage(value[key], + getattr(message, field.name)[key_value], + '{0}[{1}]'.format(path, key_value)) + else: + getattr(message, field.name)[key_value] = _ConvertScalarFieldValue( + value[key], value_field, path='{0}[{1}]'.format(path, key_value)) + + +def _ConvertScalarFieldValue(value, field, path, require_str=False): + """Convert a single scalar field value. + + Args: + value: A scalar value to convert the scalar field value. + field: The descriptor of the field to convert. + path: parent path to log parse error info. + require_str: If True, the field value must be a str. + + Returns: + The converted scalar field value + + Raises: + ParseError: In case of convert problems. + """ + try: + if field.cpp_type in _INT_TYPES: + return _ConvertInteger(value) + elif field.cpp_type in _FLOAT_TYPES: + return _ConvertFloat(value, field) + elif field.cpp_type == descriptor.FieldDescriptor.CPPTYPE_BOOL: + return _ConvertBool(value, require_str) + elif field.cpp_type == descriptor.FieldDescriptor.CPPTYPE_STRING: + if field.type == descriptor.FieldDescriptor.TYPE_BYTES: + if isinstance(value, str): + encoded = value.encode('utf-8') + else: + encoded = value + # Add extra padding '=' + padded_value = encoded + b'=' * (4 - len(encoded) % 4) + return base64.urlsafe_b64decode(padded_value) + else: + # Checking for unpaired surrogates appears to be unreliable, + # depending on the specific Python version, so we check manually. + if _UNPAIRED_SURROGATE_PATTERN.search(value): + raise ParseError('Unpaired surrogate') + return value + elif field.cpp_type == descriptor.FieldDescriptor.CPPTYPE_ENUM: + # Convert an enum value. + enum_value = field.enum_type.values_by_name.get(value, None) + if enum_value is None: + try: + number = int(value) + enum_value = field.enum_type.values_by_number.get(number, None) + except ValueError: + raise ParseError('Invalid enum value {0} for enum type {1}'.format( + value, field.enum_type.full_name)) + if enum_value is None: + if field.file.syntax == 'proto3': + # Proto3 accepts unknown enums. + return number + raise ParseError('Invalid enum value {0} for enum type {1}'.format( + value, field.enum_type.full_name)) + return enum_value.number + except ParseError as e: + raise ParseError('{0} at {1}'.format(e, path)) + + +def _ConvertInteger(value): + """Convert an integer. + + Args: + value: A scalar value to convert. + + Returns: + The integer value. + + Raises: + ParseError: If an integer couldn't be consumed. + """ + if isinstance(value, float) and not value.is_integer(): + raise ParseError('Couldn\'t parse integer: {0}'.format(value)) + + if isinstance(value, str) and value.find(' ') != -1: + raise ParseError('Couldn\'t parse integer: "{0}"'.format(value)) + + if isinstance(value, bool): + raise ParseError('Bool value {0} is not acceptable for ' + 'integer field'.format(value)) + + return int(value) + + +def _ConvertFloat(value, field): + """Convert an floating point number.""" + if isinstance(value, float): + if math.isnan(value): + raise ParseError('Couldn\'t parse NaN, use quoted "NaN" instead') + if math.isinf(value): + if value > 0: + raise ParseError('Couldn\'t parse Infinity or value too large, ' + 'use quoted "Infinity" instead') + else: + raise ParseError('Couldn\'t parse -Infinity or value too small, ' + 'use quoted "-Infinity" instead') + if field.cpp_type == descriptor.FieldDescriptor.CPPTYPE_FLOAT: + # pylint: disable=protected-access + if value > type_checkers._FLOAT_MAX: + raise ParseError('Float value too large') + # pylint: disable=protected-access + if value < type_checkers._FLOAT_MIN: + raise ParseError('Float value too small') + if value == 'nan': + raise ParseError('Couldn\'t parse float "nan", use "NaN" instead') + try: + # Assume Python compatible syntax. + return float(value) + except ValueError: + # Check alternative spellings. + if value == _NEG_INFINITY: + return float('-inf') + elif value == _INFINITY: + return float('inf') + elif value == _NAN: + return float('nan') + else: + raise ParseError('Couldn\'t parse float: {0}'.format(value)) + + +def _ConvertBool(value, require_str): + """Convert a boolean value. + + Args: + value: A scalar value to convert. + require_str: If True, value must be a str. + + Returns: + The bool parsed. + + Raises: + ParseError: If a boolean value couldn't be consumed. + """ + if require_str: + if value == 'true': + return True + elif value == 'false': + return False + else: + raise ParseError('Expected "true" or "false", not {0}'.format(value)) + + if not isinstance(value, bool): + raise ParseError('Expected true or false without quotes') + return value + +_WKTJSONMETHODS = { + 'google.protobuf.Any': ['_AnyMessageToJsonObject', + '_ConvertAnyMessage'], + 'google.protobuf.Duration': ['_GenericMessageToJsonObject', + '_ConvertGenericMessage'], + 'google.protobuf.FieldMask': ['_GenericMessageToJsonObject', + '_ConvertGenericMessage'], + 'google.protobuf.ListValue': ['_ListValueMessageToJsonObject', + '_ConvertListValueMessage'], + 'google.protobuf.Struct': ['_StructMessageToJsonObject', + '_ConvertStructMessage'], + 'google.protobuf.Timestamp': ['_GenericMessageToJsonObject', + '_ConvertGenericMessage'], + 'google.protobuf.Value': ['_ValueMessageToJsonObject', + '_ConvertValueMessage'] +} diff --git a/mysql-sqlalchemy-workspace/lib/python3.9/site-packages/google/protobuf/message.py b/mysql-sqlalchemy-workspace/lib/python3.9/site-packages/google/protobuf/message.py new file mode 100644 index 00000000..76c6802f --- /dev/null +++ b/mysql-sqlalchemy-workspace/lib/python3.9/site-packages/google/protobuf/message.py @@ -0,0 +1,424 @@ +# Protocol Buffers - Google's data interchange format +# Copyright 2008 Google Inc. All rights reserved. +# https://developers.google.com/protocol-buffers/ +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are +# met: +# +# * Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above +# copyright notice, this list of conditions and the following disclaimer +# in the documentation and/or other materials provided with the +# distribution. +# * Neither the name of Google Inc. nor the names of its +# contributors may be used to endorse or promote products derived from +# this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +# TODO(robinson): We should just make these methods all "pure-virtual" and move +# all implementation out, into reflection.py for now. + + +"""Contains an abstract base class for protocol messages.""" + +__author__ = 'robinson@google.com (Will Robinson)' + +class Error(Exception): + """Base error type for this module.""" + pass + + +class DecodeError(Error): + """Exception raised when deserializing messages.""" + pass + + +class EncodeError(Error): + """Exception raised when serializing messages.""" + pass + + +class Message(object): + + """Abstract base class for protocol messages. + + Protocol message classes are almost always generated by the protocol + compiler. These generated types subclass Message and implement the methods + shown below. + """ + + # TODO(robinson): Link to an HTML document here. + + # TODO(robinson): Document that instances of this class will also + # have an Extensions attribute with __getitem__ and __setitem__. + # Again, not sure how to best convey this. + + # TODO(robinson): Document that the class must also have a static + # RegisterExtension(extension_field) method. + # Not sure how to best express at this point. + + # TODO(robinson): Document these fields and methods. + + __slots__ = [] + + #: The :class:`google.protobuf.descriptor.Descriptor` for this message type. + DESCRIPTOR = None + + def __deepcopy__(self, memo=None): + clone = type(self)() + clone.MergeFrom(self) + return clone + + def __eq__(self, other_msg): + """Recursively compares two messages by value and structure.""" + raise NotImplementedError + + def __ne__(self, other_msg): + # Can't just say self != other_msg, since that would infinitely recurse. :) + return not self == other_msg + + def __hash__(self): + raise TypeError('unhashable object') + + def __str__(self): + """Outputs a human-readable representation of the message.""" + raise NotImplementedError + + def __unicode__(self): + """Outputs a human-readable representation of the message.""" + raise NotImplementedError + + def MergeFrom(self, other_msg): + """Merges the contents of the specified message into current message. + + This method merges the contents of the specified message into the current + message. Singular fields that are set in the specified message overwrite + the corresponding fields in the current message. Repeated fields are + appended. Singular sub-messages and groups are recursively merged. + + Args: + other_msg (Message): A message to merge into the current message. + """ + raise NotImplementedError + + def CopyFrom(self, other_msg): + """Copies the content of the specified message into the current message. + + The method clears the current message and then merges the specified + message using MergeFrom. + + Args: + other_msg (Message): A message to copy into the current one. + """ + if self is other_msg: + return + self.Clear() + self.MergeFrom(other_msg) + + def Clear(self): + """Clears all data that was set in the message.""" + raise NotImplementedError + + def SetInParent(self): + """Mark this as present in the parent. + + This normally happens automatically when you assign a field of a + sub-message, but sometimes you want to make the sub-message + present while keeping it empty. If you find yourself using this, + you may want to reconsider your design. + """ + raise NotImplementedError + + def IsInitialized(self): + """Checks if the message is initialized. + + Returns: + bool: The method returns True if the message is initialized (i.e. all of + its required fields are set). + """ + raise NotImplementedError + + # TODO(robinson): MergeFromString() should probably return None and be + # implemented in terms of a helper that returns the # of bytes read. Our + # deserialization routines would use the helper when recursively + # deserializing, but the end user would almost always just want the no-return + # MergeFromString(). + + def MergeFromString(self, serialized): + """Merges serialized protocol buffer data into this message. + + When we find a field in `serialized` that is already present + in this message: + + - If it's a "repeated" field, we append to the end of our list. + - Else, if it's a scalar, we overwrite our field. + - Else, (it's a nonrepeated composite), we recursively merge + into the existing composite. + + Args: + serialized (bytes): Any object that allows us to call + ``memoryview(serialized)`` to access a string of bytes using the + buffer interface. + + Returns: + int: The number of bytes read from `serialized`. + For non-group messages, this will always be `len(serialized)`, + but for messages which are actually groups, this will + generally be less than `len(serialized)`, since we must + stop when we reach an ``END_GROUP`` tag. Note that if + we *do* stop because of an ``END_GROUP`` tag, the number + of bytes returned does not include the bytes + for the ``END_GROUP`` tag information. + + Raises: + DecodeError: if the input cannot be parsed. + """ + # TODO(robinson): Document handling of unknown fields. + # TODO(robinson): When we switch to a helper, this will return None. + raise NotImplementedError + + def ParseFromString(self, serialized): + """Parse serialized protocol buffer data into this message. + + Like :func:`MergeFromString()`, except we clear the object first. + + Raises: + message.DecodeError if the input cannot be parsed. + """ + self.Clear() + return self.MergeFromString(serialized) + + def SerializeToString(self, **kwargs): + """Serializes the protocol message to a binary string. + + Keyword Args: + deterministic (bool): If true, requests deterministic serialization + of the protobuf, with predictable ordering of map keys. + + Returns: + A binary string representation of the message if all of the required + fields in the message are set (i.e. the message is initialized). + + Raises: + EncodeError: if the message isn't initialized (see :func:`IsInitialized`). + """ + raise NotImplementedError + + def SerializePartialToString(self, **kwargs): + """Serializes the protocol message to a binary string. + + This method is similar to SerializeToString but doesn't check if the + message is initialized. + + Keyword Args: + deterministic (bool): If true, requests deterministic serialization + of the protobuf, with predictable ordering of map keys. + + Returns: + bytes: A serialized representation of the partial message. + """ + raise NotImplementedError + + # TODO(robinson): Decide whether we like these better + # than auto-generated has_foo() and clear_foo() methods + # on the instances themselves. This way is less consistent + # with C++, but it makes reflection-type access easier and + # reduces the number of magically autogenerated things. + # + # TODO(robinson): Be sure to document (and test) exactly + # which field names are accepted here. Are we case-sensitive? + # What do we do with fields that share names with Python keywords + # like 'lambda' and 'yield'? + # + # nnorwitz says: + # """ + # Typically (in python), an underscore is appended to names that are + # keywords. So they would become lambda_ or yield_. + # """ + def ListFields(self): + """Returns a list of (FieldDescriptor, value) tuples for present fields. + + A message field is non-empty if HasField() would return true. A singular + primitive field is non-empty if HasField() would return true in proto2 or it + is non zero in proto3. A repeated field is non-empty if it contains at least + one element. The fields are ordered by field number. + + Returns: + list[tuple(FieldDescriptor, value)]: field descriptors and values + for all fields in the message which are not empty. The values vary by + field type. + """ + raise NotImplementedError + + def HasField(self, field_name): + """Checks if a certain field is set for the message. + + For a oneof group, checks if any field inside is set. Note that if the + field_name is not defined in the message descriptor, :exc:`ValueError` will + be raised. + + Args: + field_name (str): The name of the field to check for presence. + + Returns: + bool: Whether a value has been set for the named field. + + Raises: + ValueError: if the `field_name` is not a member of this message. + """ + raise NotImplementedError + + def ClearField(self, field_name): + """Clears the contents of a given field. + + Inside a oneof group, clears the field set. If the name neither refers to a + defined field or oneof group, :exc:`ValueError` is raised. + + Args: + field_name (str): The name of the field to check for presence. + + Raises: + ValueError: if the `field_name` is not a member of this message. + """ + raise NotImplementedError + + def WhichOneof(self, oneof_group): + """Returns the name of the field that is set inside a oneof group. + + If no field is set, returns None. + + Args: + oneof_group (str): the name of the oneof group to check. + + Returns: + str or None: The name of the group that is set, or None. + + Raises: + ValueError: no group with the given name exists + """ + raise NotImplementedError + + def HasExtension(self, extension_handle): + """Checks if a certain extension is present for this message. + + Extensions are retrieved using the :attr:`Extensions` mapping (if present). + + Args: + extension_handle: The handle for the extension to check. + + Returns: + bool: Whether the extension is present for this message. + + Raises: + KeyError: if the extension is repeated. Similar to repeated fields, + there is no separate notion of presence: a "not present" repeated + extension is an empty list. + """ + raise NotImplementedError + + def ClearExtension(self, extension_handle): + """Clears the contents of a given extension. + + Args: + extension_handle: The handle for the extension to clear. + """ + raise NotImplementedError + + def UnknownFields(self): + """Returns the UnknownFieldSet. + + Returns: + UnknownFieldSet: The unknown fields stored in this message. + """ + raise NotImplementedError + + def DiscardUnknownFields(self): + """Clears all fields in the :class:`UnknownFieldSet`. + + This operation is recursive for nested message. + """ + raise NotImplementedError + + def ByteSize(self): + """Returns the serialized size of this message. + + Recursively calls ByteSize() on all contained messages. + + Returns: + int: The number of bytes required to serialize this message. + """ + raise NotImplementedError + + @classmethod + def FromString(cls, s): + raise NotImplementedError + + @staticmethod + def RegisterExtension(extension_handle): + raise NotImplementedError + + def _SetListener(self, message_listener): + """Internal method used by the protocol message implementation. + Clients should not call this directly. + + Sets a listener that this message will call on certain state transitions. + + The purpose of this method is to register back-edges from children to + parents at runtime, for the purpose of setting "has" bits and + byte-size-dirty bits in the parent and ancestor objects whenever a child or + descendant object is modified. + + If the client wants to disconnect this Message from the object tree, she + explicitly sets callback to None. + + If message_listener is None, unregisters any existing listener. Otherwise, + message_listener must implement the MessageListener interface in + internal/message_listener.py, and we discard any listener registered + via a previous _SetListener() call. + """ + raise NotImplementedError + + def __getstate__(self): + """Support the pickle protocol.""" + return dict(serialized=self.SerializePartialToString()) + + def __setstate__(self, state): + """Support the pickle protocol.""" + self.__init__() + serialized = state['serialized'] + # On Python 3, using encoding='latin1' is required for unpickling + # protos pickled by Python 2. + if not isinstance(serialized, bytes): + serialized = serialized.encode('latin1') + self.ParseFromString(serialized) + + def __reduce__(self): + message_descriptor = self.DESCRIPTOR + if message_descriptor.containing_type is None: + return type(self), (), self.__getstate__() + # the message type must be nested. + # Python does not pickle nested classes; use the symbol_database on the + # receiving end. + container = message_descriptor + return (_InternalConstructMessage, (container.full_name,), + self.__getstate__()) + + +def _InternalConstructMessage(full_name): + """Constructs a nested message.""" + from google.protobuf import symbol_database # pylint:disable=g-import-not-at-top + + return symbol_database.Default().GetSymbol(full_name)() diff --git a/mysql-sqlalchemy-workspace/lib/python3.9/site-packages/google/protobuf/message_factory.py b/mysql-sqlalchemy-workspace/lib/python3.9/site-packages/google/protobuf/message_factory.py new file mode 100644 index 00000000..3656fa68 --- /dev/null +++ b/mysql-sqlalchemy-workspace/lib/python3.9/site-packages/google/protobuf/message_factory.py @@ -0,0 +1,185 @@ +# Protocol Buffers - Google's data interchange format +# Copyright 2008 Google Inc. All rights reserved. +# https://developers.google.com/protocol-buffers/ +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are +# met: +# +# * Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above +# copyright notice, this list of conditions and the following disclaimer +# in the documentation and/or other materials provided with the +# distribution. +# * Neither the name of Google Inc. nor the names of its +# contributors may be used to endorse or promote products derived from +# this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +"""Provides a factory class for generating dynamic messages. + +The easiest way to use this class is if you have access to the FileDescriptor +protos containing the messages you want to create you can just do the following: + +message_classes = message_factory.GetMessages(iterable_of_file_descriptors) +my_proto_instance = message_classes['some.proto.package.MessageName']() +""" + +__author__ = 'matthewtoia@google.com (Matt Toia)' + +from google.protobuf.internal import api_implementation +from google.protobuf import descriptor_pool +from google.protobuf import message + +if api_implementation.Type() == 'cpp': + from google.protobuf.pyext import cpp_message as message_impl +else: + from google.protobuf.internal import python_message as message_impl + + +# The type of all Message classes. +_GENERATED_PROTOCOL_MESSAGE_TYPE = message_impl.GeneratedProtocolMessageType + + +class MessageFactory(object): + """Factory for creating Proto2 messages from descriptors in a pool.""" + + def __init__(self, pool=None): + """Initializes a new factory.""" + self.pool = pool or descriptor_pool.DescriptorPool() + + # local cache of all classes built from protobuf descriptors + self._classes = {} + + def GetPrototype(self, descriptor): + """Obtains a proto2 message class based on the passed in descriptor. + + Passing a descriptor with a fully qualified name matching a previous + invocation will cause the same class to be returned. + + Args: + descriptor: The descriptor to build from. + + Returns: + A class describing the passed in descriptor. + """ + if descriptor not in self._classes: + result_class = self.CreatePrototype(descriptor) + # The assignment to _classes is redundant for the base implementation, but + # might avoid confusion in cases where CreatePrototype gets overridden and + # does not call the base implementation. + self._classes[descriptor] = result_class + return result_class + return self._classes[descriptor] + + def CreatePrototype(self, descriptor): + """Builds a proto2 message class based on the passed in descriptor. + + Don't call this function directly, it always creates a new class. Call + GetPrototype() instead. This method is meant to be overridden in subblasses + to perform additional operations on the newly constructed class. + + Args: + descriptor: The descriptor to build from. + + Returns: + A class describing the passed in descriptor. + """ + descriptor_name = descriptor.name + result_class = _GENERATED_PROTOCOL_MESSAGE_TYPE( + descriptor_name, + (message.Message,), + { + 'DESCRIPTOR': descriptor, + # If module not set, it wrongly points to message_factory module. + '__module__': None, + }) + result_class._FACTORY = self # pylint: disable=protected-access + # Assign in _classes before doing recursive calls to avoid infinite + # recursion. + self._classes[descriptor] = result_class + for field in descriptor.fields: + if field.message_type: + self.GetPrototype(field.message_type) + for extension in result_class.DESCRIPTOR.extensions: + if extension.containing_type not in self._classes: + self.GetPrototype(extension.containing_type) + extended_class = self._classes[extension.containing_type] + extended_class.RegisterExtension(extension) + return result_class + + def GetMessages(self, files): + """Gets all the messages from a specified file. + + This will find and resolve dependencies, failing if the descriptor + pool cannot satisfy them. + + Args: + files: The file names to extract messages from. + + Returns: + A dictionary mapping proto names to the message classes. This will include + any dependent messages as well as any messages defined in the same file as + a specified message. + """ + result = {} + for file_name in files: + file_desc = self.pool.FindFileByName(file_name) + for desc in file_desc.message_types_by_name.values(): + result[desc.full_name] = self.GetPrototype(desc) + + # While the extension FieldDescriptors are created by the descriptor pool, + # the python classes created in the factory need them to be registered + # explicitly, which is done below. + # + # The call to RegisterExtension will specifically check if the + # extension was already registered on the object and either + # ignore the registration if the original was the same, or raise + # an error if they were different. + + for extension in file_desc.extensions_by_name.values(): + if extension.containing_type not in self._classes: + self.GetPrototype(extension.containing_type) + extended_class = self._classes[extension.containing_type] + extended_class.RegisterExtension(extension) + return result + + +_FACTORY = MessageFactory() + + +def GetMessages(file_protos): + """Builds a dictionary of all the messages available in a set of files. + + Args: + file_protos: Iterable of FileDescriptorProto to build messages out of. + + Returns: + A dictionary mapping proto names to the message classes. This will include + any dependent messages as well as any messages defined in the same file as + a specified message. + """ + # The cpp implementation of the protocol buffer library requires to add the + # message in topological order of the dependency graph. + file_by_name = {file_proto.name: file_proto for file_proto in file_protos} + def _AddFile(file_proto): + for dependency in file_proto.dependency: + if dependency in file_by_name: + # Remove from elements to be visited, in order to cut cycles. + _AddFile(file_by_name.pop(dependency)) + _FACTORY.pool.Add(file_proto) + while file_by_name: + _AddFile(file_by_name.popitem()[1]) + return _FACTORY.GetMessages([file_proto.name for file_proto in file_protos]) diff --git a/mysql-sqlalchemy-workspace/lib/python3.9/site-packages/google/protobuf/proto_builder.py b/mysql-sqlalchemy-workspace/lib/python3.9/site-packages/google/protobuf/proto_builder.py new file mode 100644 index 00000000..a4667ce6 --- /dev/null +++ b/mysql-sqlalchemy-workspace/lib/python3.9/site-packages/google/protobuf/proto_builder.py @@ -0,0 +1,134 @@ +# Protocol Buffers - Google's data interchange format +# Copyright 2008 Google Inc. All rights reserved. +# https://developers.google.com/protocol-buffers/ +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are +# met: +# +# * Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above +# copyright notice, this list of conditions and the following disclaimer +# in the documentation and/or other materials provided with the +# distribution. +# * Neither the name of Google Inc. nor the names of its +# contributors may be used to endorse or promote products derived from +# this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +"""Dynamic Protobuf class creator.""" + +from collections import OrderedDict +import hashlib +import os + +from google.protobuf import descriptor_pb2 +from google.protobuf import descriptor +from google.protobuf import message_factory + + +def _GetMessageFromFactory(factory, full_name): + """Get a proto class from the MessageFactory by name. + + Args: + factory: a MessageFactory instance. + full_name: str, the fully qualified name of the proto type. + Returns: + A class, for the type identified by full_name. + Raises: + KeyError, if the proto is not found in the factory's descriptor pool. + """ + proto_descriptor = factory.pool.FindMessageTypeByName(full_name) + proto_cls = factory.GetPrototype(proto_descriptor) + return proto_cls + + +def MakeSimpleProtoClass(fields, full_name=None, pool=None): + """Create a Protobuf class whose fields are basic types. + + Note: this doesn't validate field names! + + Args: + fields: dict of {name: field_type} mappings for each field in the proto. If + this is an OrderedDict the order will be maintained, otherwise the + fields will be sorted by name. + full_name: optional str, the fully-qualified name of the proto type. + pool: optional DescriptorPool instance. + Returns: + a class, the new protobuf class with a FileDescriptor. + """ + factory = message_factory.MessageFactory(pool=pool) + + if full_name is not None: + try: + proto_cls = _GetMessageFromFactory(factory, full_name) + return proto_cls + except KeyError: + # The factory's DescriptorPool doesn't know about this class yet. + pass + + # Get a list of (name, field_type) tuples from the fields dict. If fields was + # an OrderedDict we keep the order, but otherwise we sort the field to ensure + # consistent ordering. + field_items = fields.items() + if not isinstance(fields, OrderedDict): + field_items = sorted(field_items) + + # Use a consistent file name that is unlikely to conflict with any imported + # proto files. + fields_hash = hashlib.sha1() + for f_name, f_type in field_items: + fields_hash.update(f_name.encode('utf-8')) + fields_hash.update(str(f_type).encode('utf-8')) + proto_file_name = fields_hash.hexdigest() + '.proto' + + # If the proto is anonymous, use the same hash to name it. + if full_name is None: + full_name = ('net.proto2.python.public.proto_builder.AnonymousProto_' + + fields_hash.hexdigest()) + try: + proto_cls = _GetMessageFromFactory(factory, full_name) + return proto_cls + except KeyError: + # The factory's DescriptorPool doesn't know about this class yet. + pass + + # This is the first time we see this proto: add a new descriptor to the pool. + factory.pool.Add( + _MakeFileDescriptorProto(proto_file_name, full_name, field_items)) + return _GetMessageFromFactory(factory, full_name) + + +def _MakeFileDescriptorProto(proto_file_name, full_name, field_items): + """Populate FileDescriptorProto for MessageFactory's DescriptorPool.""" + package, name = full_name.rsplit('.', 1) + file_proto = descriptor_pb2.FileDescriptorProto() + file_proto.name = os.path.join(package.replace('.', '/'), proto_file_name) + file_proto.package = package + desc_proto = file_proto.message_type.add() + desc_proto.name = name + for f_number, (f_name, f_type) in enumerate(field_items, 1): + field_proto = desc_proto.field.add() + field_proto.name = f_name + # # If the number falls in the reserved range, reassign it to the correct + # # number after the range. + if f_number >= descriptor.FieldDescriptor.FIRST_RESERVED_FIELD_NUMBER: + f_number += ( + descriptor.FieldDescriptor.LAST_RESERVED_FIELD_NUMBER - + descriptor.FieldDescriptor.FIRST_RESERVED_FIELD_NUMBER + 1) + field_proto.number = f_number + field_proto.label = descriptor_pb2.FieldDescriptorProto.LABEL_OPTIONAL + field_proto.type = f_type + return file_proto diff --git a/mysql-sqlalchemy-workspace/lib/python3.9/site-packages/google/protobuf/pyext/__init__.py b/mysql-sqlalchemy-workspace/lib/python3.9/site-packages/google/protobuf/pyext/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/mysql-sqlalchemy-workspace/lib/python3.9/site-packages/google/protobuf/pyext/_message.cpython-39-darwin.so b/mysql-sqlalchemy-workspace/lib/python3.9/site-packages/google/protobuf/pyext/_message.cpython-39-darwin.so new file mode 100755 index 00000000..b858bc61 Binary files /dev/null and b/mysql-sqlalchemy-workspace/lib/python3.9/site-packages/google/protobuf/pyext/_message.cpython-39-darwin.so differ diff --git a/mysql-sqlalchemy-workspace/lib/python3.9/site-packages/google/protobuf/pyext/cpp_message.py b/mysql-sqlalchemy-workspace/lib/python3.9/site-packages/google/protobuf/pyext/cpp_message.py new file mode 100644 index 00000000..fc8eb32d --- /dev/null +++ b/mysql-sqlalchemy-workspace/lib/python3.9/site-packages/google/protobuf/pyext/cpp_message.py @@ -0,0 +1,65 @@ +# Protocol Buffers - Google's data interchange format +# Copyright 2008 Google Inc. All rights reserved. +# https://developers.google.com/protocol-buffers/ +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are +# met: +# +# * Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above +# copyright notice, this list of conditions and the following disclaimer +# in the documentation and/or other materials provided with the +# distribution. +# * Neither the name of Google Inc. nor the names of its +# contributors may be used to endorse or promote products derived from +# this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +"""Protocol message implementation hooks for C++ implementation. + +Contains helper functions used to create protocol message classes from +Descriptor objects at runtime backed by the protocol buffer C++ API. +""" + +__author__ = 'tibell@google.com (Johan Tibell)' + +from google.protobuf.pyext import _message + + +class GeneratedProtocolMessageType(_message.MessageMeta): + + """Metaclass for protocol message classes created at runtime from Descriptors. + + The protocol compiler currently uses this metaclass to create protocol + message classes at runtime. Clients can also manually create their own + classes at runtime, as in this example: + + mydescriptor = Descriptor(.....) + factory = symbol_database.Default() + factory.pool.AddDescriptor(mydescriptor) + MyProtoClass = factory.GetPrototype(mydescriptor) + myproto_instance = MyProtoClass() + myproto.foo_field = 23 + ... + + The above example will not work for nested types. If you wish to include them, + use reflection.MakeClass() instead of manually instantiating the class in + order to create the appropriate class structure. + """ + + # Must be consistent with the protocol-compiler code in + # proto2/compiler/internal/generator.*. + _DESCRIPTOR_KEY = 'DESCRIPTOR' diff --git a/mysql-sqlalchemy-workspace/lib/python3.9/site-packages/google/protobuf/reflection.py b/mysql-sqlalchemy-workspace/lib/python3.9/site-packages/google/protobuf/reflection.py new file mode 100644 index 00000000..81e18859 --- /dev/null +++ b/mysql-sqlalchemy-workspace/lib/python3.9/site-packages/google/protobuf/reflection.py @@ -0,0 +1,95 @@ +# Protocol Buffers - Google's data interchange format +# Copyright 2008 Google Inc. All rights reserved. +# https://developers.google.com/protocol-buffers/ +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are +# met: +# +# * Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above +# copyright notice, this list of conditions and the following disclaimer +# in the documentation and/or other materials provided with the +# distribution. +# * Neither the name of Google Inc. nor the names of its +# contributors may be used to endorse or promote products derived from +# this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +# This code is meant to work on Python 2.4 and above only. + +"""Contains a metaclass and helper functions used to create +protocol message classes from Descriptor objects at runtime. + +Recall that a metaclass is the "type" of a class. +(A class is to a metaclass what an instance is to a class.) + +In this case, we use the GeneratedProtocolMessageType metaclass +to inject all the useful functionality into the classes +output by the protocol compiler at compile-time. + +The upshot of all this is that the real implementation +details for ALL pure-Python protocol buffers are *here in +this file*. +""" + +__author__ = 'robinson@google.com (Will Robinson)' + + +from google.protobuf import message_factory +from google.protobuf import symbol_database + +# The type of all Message classes. +# Part of the public interface, but normally only used by message factories. +GeneratedProtocolMessageType = message_factory._GENERATED_PROTOCOL_MESSAGE_TYPE + +MESSAGE_CLASS_CACHE = {} + + +# Deprecated. Please NEVER use reflection.ParseMessage(). +def ParseMessage(descriptor, byte_str): + """Generate a new Message instance from this Descriptor and a byte string. + + DEPRECATED: ParseMessage is deprecated because it is using MakeClass(). + Please use MessageFactory.GetPrototype() instead. + + Args: + descriptor: Protobuf Descriptor object + byte_str: Serialized protocol buffer byte string + + Returns: + Newly created protobuf Message object. + """ + result_class = MakeClass(descriptor) + new_msg = result_class() + new_msg.ParseFromString(byte_str) + return new_msg + + +# Deprecated. Please NEVER use reflection.MakeClass(). +def MakeClass(descriptor): + """Construct a class object for a protobuf described by descriptor. + + DEPRECATED: use MessageFactory.GetPrototype() instead. + + Args: + descriptor: A descriptor.Descriptor object describing the protobuf. + Returns: + The Message class object described by the descriptor. + """ + # Original implementation leads to duplicate message classes, which won't play + # well with extensions. Message factory info is also missing. + # Redirect to message_factory. + return symbol_database.Default().GetPrototype(descriptor) diff --git a/mysql-sqlalchemy-workspace/lib/python3.9/site-packages/google/protobuf/service.py b/mysql-sqlalchemy-workspace/lib/python3.9/site-packages/google/protobuf/service.py new file mode 100644 index 00000000..56252463 --- /dev/null +++ b/mysql-sqlalchemy-workspace/lib/python3.9/site-packages/google/protobuf/service.py @@ -0,0 +1,228 @@ +# Protocol Buffers - Google's data interchange format +# Copyright 2008 Google Inc. All rights reserved. +# https://developers.google.com/protocol-buffers/ +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are +# met: +# +# * Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above +# copyright notice, this list of conditions and the following disclaimer +# in the documentation and/or other materials provided with the +# distribution. +# * Neither the name of Google Inc. nor the names of its +# contributors may be used to endorse or promote products derived from +# this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +"""DEPRECATED: Declares the RPC service interfaces. + +This module declares the abstract interfaces underlying proto2 RPC +services. These are intended to be independent of any particular RPC +implementation, so that proto2 services can be used on top of a variety +of implementations. Starting with version 2.3.0, RPC implementations should +not try to build on these, but should instead provide code generator plugins +which generate code specific to the particular RPC implementation. This way +the generated code can be more appropriate for the implementation in use +and can avoid unnecessary layers of indirection. +""" + +__author__ = 'petar@google.com (Petar Petrov)' + + +class RpcException(Exception): + """Exception raised on failed blocking RPC method call.""" + pass + + +class Service(object): + + """Abstract base interface for protocol-buffer-based RPC services. + + Services themselves are abstract classes (implemented either by servers or as + stubs), but they subclass this base interface. The methods of this + interface can be used to call the methods of the service without knowing + its exact type at compile time (analogous to the Message interface). + """ + + def GetDescriptor(): + """Retrieves this service's descriptor.""" + raise NotImplementedError + + def CallMethod(self, method_descriptor, rpc_controller, + request, done): + """Calls a method of the service specified by method_descriptor. + + If "done" is None then the call is blocking and the response + message will be returned directly. Otherwise the call is asynchronous + and "done" will later be called with the response value. + + In the blocking case, RpcException will be raised on error. + + Preconditions: + + * method_descriptor.service == GetDescriptor + * request is of the exact same classes as returned by + GetRequestClass(method). + * After the call has started, the request must not be modified. + * "rpc_controller" is of the correct type for the RPC implementation being + used by this Service. For stubs, the "correct type" depends on the + RpcChannel which the stub is using. + + Postconditions: + + * "done" will be called when the method is complete. This may be + before CallMethod() returns or it may be at some point in the future. + * If the RPC failed, the response value passed to "done" will be None. + Further details about the failure can be found by querying the + RpcController. + """ + raise NotImplementedError + + def GetRequestClass(self, method_descriptor): + """Returns the class of the request message for the specified method. + + CallMethod() requires that the request is of a particular subclass of + Message. GetRequestClass() gets the default instance of this required + type. + + Example: + method = service.GetDescriptor().FindMethodByName("Foo") + request = stub.GetRequestClass(method)() + request.ParseFromString(input) + service.CallMethod(method, request, callback) + """ + raise NotImplementedError + + def GetResponseClass(self, method_descriptor): + """Returns the class of the response message for the specified method. + + This method isn't really needed, as the RpcChannel's CallMethod constructs + the response protocol message. It's provided anyway in case it is useful + for the caller to know the response type in advance. + """ + raise NotImplementedError + + +class RpcController(object): + + """An RpcController mediates a single method call. + + The primary purpose of the controller is to provide a way to manipulate + settings specific to the RPC implementation and to find out about RPC-level + errors. The methods provided by the RpcController interface are intended + to be a "least common denominator" set of features which we expect all + implementations to support. Specific implementations may provide more + advanced features (e.g. deadline propagation). + """ + + # Client-side methods below + + def Reset(self): + """Resets the RpcController to its initial state. + + After the RpcController has been reset, it may be reused in + a new call. Must not be called while an RPC is in progress. + """ + raise NotImplementedError + + def Failed(self): + """Returns true if the call failed. + + After a call has finished, returns true if the call failed. The possible + reasons for failure depend on the RPC implementation. Failed() must not + be called before a call has finished. If Failed() returns true, the + contents of the response message are undefined. + """ + raise NotImplementedError + + def ErrorText(self): + """If Failed is true, returns a human-readable description of the error.""" + raise NotImplementedError + + def StartCancel(self): + """Initiate cancellation. + + Advises the RPC system that the caller desires that the RPC call be + canceled. The RPC system may cancel it immediately, may wait awhile and + then cancel it, or may not even cancel the call at all. If the call is + canceled, the "done" callback will still be called and the RpcController + will indicate that the call failed at that time. + """ + raise NotImplementedError + + # Server-side methods below + + def SetFailed(self, reason): + """Sets a failure reason. + + Causes Failed() to return true on the client side. "reason" will be + incorporated into the message returned by ErrorText(). If you find + you need to return machine-readable information about failures, you + should incorporate it into your response protocol buffer and should + NOT call SetFailed(). + """ + raise NotImplementedError + + def IsCanceled(self): + """Checks if the client cancelled the RPC. + + If true, indicates that the client canceled the RPC, so the server may + as well give up on replying to it. The server should still call the + final "done" callback. + """ + raise NotImplementedError + + def NotifyOnCancel(self, callback): + """Sets a callback to invoke on cancel. + + Asks that the given callback be called when the RPC is canceled. The + callback will always be called exactly once. If the RPC completes without + being canceled, the callback will be called after completion. If the RPC + has already been canceled when NotifyOnCancel() is called, the callback + will be called immediately. + + NotifyOnCancel() must be called no more than once per request. + """ + raise NotImplementedError + + +class RpcChannel(object): + + """Abstract interface for an RPC channel. + + An RpcChannel represents a communication line to a service which can be used + to call that service's methods. The service may be running on another + machine. Normally, you should not use an RpcChannel directly, but instead + construct a stub {@link Service} wrapping it. Example: + + Example: + RpcChannel channel = rpcImpl.Channel("remotehost.example.com:1234") + RpcController controller = rpcImpl.Controller() + MyService service = MyService_Stub(channel) + service.MyMethod(controller, request, callback) + """ + + def CallMethod(self, method_descriptor, rpc_controller, + request, response_class, done): + """Calls the method identified by the descriptor. + + Call the given method of the remote service. The signature of this + procedure looks the same as Service.CallMethod(), but the requirements + are less strict in one important way: the request object doesn't have to + be of any specific class as long as its descriptor is method.input_type. + """ + raise NotImplementedError diff --git a/mysql-sqlalchemy-workspace/lib/python3.9/site-packages/google/protobuf/service_reflection.py b/mysql-sqlalchemy-workspace/lib/python3.9/site-packages/google/protobuf/service_reflection.py new file mode 100644 index 00000000..f82ab714 --- /dev/null +++ b/mysql-sqlalchemy-workspace/lib/python3.9/site-packages/google/protobuf/service_reflection.py @@ -0,0 +1,295 @@ +# Protocol Buffers - Google's data interchange format +# Copyright 2008 Google Inc. All rights reserved. +# https://developers.google.com/protocol-buffers/ +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are +# met: +# +# * Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above +# copyright notice, this list of conditions and the following disclaimer +# in the documentation and/or other materials provided with the +# distribution. +# * Neither the name of Google Inc. nor the names of its +# contributors may be used to endorse or promote products derived from +# this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +"""Contains metaclasses used to create protocol service and service stub +classes from ServiceDescriptor objects at runtime. + +The GeneratedServiceType and GeneratedServiceStubType metaclasses are used to +inject all useful functionality into the classes output by the protocol +compiler at compile-time. +""" + +__author__ = 'petar@google.com (Petar Petrov)' + + +class GeneratedServiceType(type): + + """Metaclass for service classes created at runtime from ServiceDescriptors. + + Implementations for all methods described in the Service class are added here + by this class. We also create properties to allow getting/setting all fields + in the protocol message. + + The protocol compiler currently uses this metaclass to create protocol service + classes at runtime. Clients can also manually create their own classes at + runtime, as in this example:: + + mydescriptor = ServiceDescriptor(.....) + class MyProtoService(service.Service): + __metaclass__ = GeneratedServiceType + DESCRIPTOR = mydescriptor + myservice_instance = MyProtoService() + # ... + """ + + _DESCRIPTOR_KEY = 'DESCRIPTOR' + + def __init__(cls, name, bases, dictionary): + """Creates a message service class. + + Args: + name: Name of the class (ignored, but required by the metaclass + protocol). + bases: Base classes of the class being constructed. + dictionary: The class dictionary of the class being constructed. + dictionary[_DESCRIPTOR_KEY] must contain a ServiceDescriptor object + describing this protocol service type. + """ + # Don't do anything if this class doesn't have a descriptor. This happens + # when a service class is subclassed. + if GeneratedServiceType._DESCRIPTOR_KEY not in dictionary: + return + + descriptor = dictionary[GeneratedServiceType._DESCRIPTOR_KEY] + service_builder = _ServiceBuilder(descriptor) + service_builder.BuildService(cls) + cls.DESCRIPTOR = descriptor + + +class GeneratedServiceStubType(GeneratedServiceType): + + """Metaclass for service stubs created at runtime from ServiceDescriptors. + + This class has similar responsibilities as GeneratedServiceType, except that + it creates the service stub classes. + """ + + _DESCRIPTOR_KEY = 'DESCRIPTOR' + + def __init__(cls, name, bases, dictionary): + """Creates a message service stub class. + + Args: + name: Name of the class (ignored, here). + bases: Base classes of the class being constructed. + dictionary: The class dictionary of the class being constructed. + dictionary[_DESCRIPTOR_KEY] must contain a ServiceDescriptor object + describing this protocol service type. + """ + super(GeneratedServiceStubType, cls).__init__(name, bases, dictionary) + # Don't do anything if this class doesn't have a descriptor. This happens + # when a service stub is subclassed. + if GeneratedServiceStubType._DESCRIPTOR_KEY not in dictionary: + return + + descriptor = dictionary[GeneratedServiceStubType._DESCRIPTOR_KEY] + service_stub_builder = _ServiceStubBuilder(descriptor) + service_stub_builder.BuildServiceStub(cls) + + +class _ServiceBuilder(object): + + """This class constructs a protocol service class using a service descriptor. + + Given a service descriptor, this class constructs a class that represents + the specified service descriptor. One service builder instance constructs + exactly one service class. That means all instances of that class share the + same builder. + """ + + def __init__(self, service_descriptor): + """Initializes an instance of the service class builder. + + Args: + service_descriptor: ServiceDescriptor to use when constructing the + service class. + """ + self.descriptor = service_descriptor + + def BuildService(builder, cls): + """Constructs the service class. + + Args: + cls: The class that will be constructed. + """ + + # CallMethod needs to operate with an instance of the Service class. This + # internal wrapper function exists only to be able to pass the service + # instance to the method that does the real CallMethod work. + # Making sure to use exact argument names from the abstract interface in + # service.py to match the type signature + def _WrapCallMethod(self, method_descriptor, rpc_controller, request, done): + return builder._CallMethod(self, method_descriptor, rpc_controller, + request, done) + + def _WrapGetRequestClass(self, method_descriptor): + return builder._GetRequestClass(method_descriptor) + + def _WrapGetResponseClass(self, method_descriptor): + return builder._GetResponseClass(method_descriptor) + + builder.cls = cls + cls.CallMethod = _WrapCallMethod + cls.GetDescriptor = staticmethod(lambda: builder.descriptor) + cls.GetDescriptor.__doc__ = 'Returns the service descriptor.' + cls.GetRequestClass = _WrapGetRequestClass + cls.GetResponseClass = _WrapGetResponseClass + for method in builder.descriptor.methods: + setattr(cls, method.name, builder._GenerateNonImplementedMethod(method)) + + def _CallMethod(self, srvc, method_descriptor, + rpc_controller, request, callback): + """Calls the method described by a given method descriptor. + + Args: + srvc: Instance of the service for which this method is called. + method_descriptor: Descriptor that represent the method to call. + rpc_controller: RPC controller to use for this method's execution. + request: Request protocol message. + callback: A callback to invoke after the method has completed. + """ + if method_descriptor.containing_service != self.descriptor: + raise RuntimeError( + 'CallMethod() given method descriptor for wrong service type.') + method = getattr(srvc, method_descriptor.name) + return method(rpc_controller, request, callback) + + def _GetRequestClass(self, method_descriptor): + """Returns the class of the request protocol message. + + Args: + method_descriptor: Descriptor of the method for which to return the + request protocol message class. + + Returns: + A class that represents the input protocol message of the specified + method. + """ + if method_descriptor.containing_service != self.descriptor: + raise RuntimeError( + 'GetRequestClass() given method descriptor for wrong service type.') + return method_descriptor.input_type._concrete_class + + def _GetResponseClass(self, method_descriptor): + """Returns the class of the response protocol message. + + Args: + method_descriptor: Descriptor of the method for which to return the + response protocol message class. + + Returns: + A class that represents the output protocol message of the specified + method. + """ + if method_descriptor.containing_service != self.descriptor: + raise RuntimeError( + 'GetResponseClass() given method descriptor for wrong service type.') + return method_descriptor.output_type._concrete_class + + def _GenerateNonImplementedMethod(self, method): + """Generates and returns a method that can be set for a service methods. + + Args: + method: Descriptor of the service method for which a method is to be + generated. + + Returns: + A method that can be added to the service class. + """ + return lambda inst, rpc_controller, request, callback: ( + self._NonImplementedMethod(method.name, rpc_controller, callback)) + + def _NonImplementedMethod(self, method_name, rpc_controller, callback): + """The body of all methods in the generated service class. + + Args: + method_name: Name of the method being executed. + rpc_controller: RPC controller used to execute this method. + callback: A callback which will be invoked when the method finishes. + """ + rpc_controller.SetFailed('Method %s not implemented.' % method_name) + callback(None) + + +class _ServiceStubBuilder(object): + + """Constructs a protocol service stub class using a service descriptor. + + Given a service descriptor, this class constructs a suitable stub class. + A stub is just a type-safe wrapper around an RpcChannel which emulates a + local implementation of the service. + + One service stub builder instance constructs exactly one class. It means all + instances of that class share the same service stub builder. + """ + + def __init__(self, service_descriptor): + """Initializes an instance of the service stub class builder. + + Args: + service_descriptor: ServiceDescriptor to use when constructing the + stub class. + """ + self.descriptor = service_descriptor + + def BuildServiceStub(self, cls): + """Constructs the stub class. + + Args: + cls: The class that will be constructed. + """ + + def _ServiceStubInit(stub, rpc_channel): + stub.rpc_channel = rpc_channel + self.cls = cls + cls.__init__ = _ServiceStubInit + for method in self.descriptor.methods: + setattr(cls, method.name, self._GenerateStubMethod(method)) + + def _GenerateStubMethod(self, method): + return (lambda inst, rpc_controller, request, callback=None: + self._StubMethod(inst, method, rpc_controller, request, callback)) + + def _StubMethod(self, stub, method_descriptor, + rpc_controller, request, callback): + """The body of all service methods in the generated stub class. + + Args: + stub: Stub instance. + method_descriptor: Descriptor of the invoked method. + rpc_controller: Rpc controller to execute the method. + request: Request protocol message. + callback: A callback to execute when the method finishes. + Returns: + Response message (in case of blocking call). + """ + return stub.rpc_channel.CallMethod( + method_descriptor, rpc_controller, request, + method_descriptor.output_type._concrete_class, callback) diff --git a/mysql-sqlalchemy-workspace/lib/python3.9/site-packages/google/protobuf/source_context_pb2.py b/mysql-sqlalchemy-workspace/lib/python3.9/site-packages/google/protobuf/source_context_pb2.py new file mode 100644 index 00000000..30cca2e0 --- /dev/null +++ b/mysql-sqlalchemy-workspace/lib/python3.9/site-packages/google/protobuf/source_context_pb2.py @@ -0,0 +1,26 @@ +# -*- coding: utf-8 -*- +# Generated by the protocol buffer compiler. DO NOT EDIT! +# source: google/protobuf/source_context.proto +"""Generated protocol buffer code.""" +from google.protobuf.internal import builder as _builder +from google.protobuf import descriptor as _descriptor +from google.protobuf import descriptor_pool as _descriptor_pool +from google.protobuf import symbol_database as _symbol_database +# @@protoc_insertion_point(imports) + +_sym_db = _symbol_database.Default() + + + + +DESCRIPTOR = _descriptor_pool.Default().AddSerializedFile(b'\n$google/protobuf/source_context.proto\x12\x0fgoogle.protobuf\"\"\n\rSourceContext\x12\x11\n\tfile_name\x18\x01 \x01(\tB\x8a\x01\n\x13\x63om.google.protobufB\x12SourceContextProtoP\x01Z6google.golang.org/protobuf/types/known/sourcecontextpb\xa2\x02\x03GPB\xaa\x02\x1eGoogle.Protobuf.WellKnownTypesb\x06proto3') + +_builder.BuildMessageAndEnumDescriptors(DESCRIPTOR, globals()) +_builder.BuildTopDescriptorsAndMessages(DESCRIPTOR, 'google.protobuf.source_context_pb2', globals()) +if _descriptor._USE_C_DESCRIPTORS == False: + + DESCRIPTOR._options = None + DESCRIPTOR._serialized_options = b'\n\023com.google.protobufB\022SourceContextProtoP\001Z6google.golang.org/protobuf/types/known/sourcecontextpb\242\002\003GPB\252\002\036Google.Protobuf.WellKnownTypes' + _SOURCECONTEXT._serialized_start=57 + _SOURCECONTEXT._serialized_end=91 +# @@protoc_insertion_point(module_scope) diff --git a/mysql-sqlalchemy-workspace/lib/python3.9/site-packages/google/protobuf/struct_pb2.py b/mysql-sqlalchemy-workspace/lib/python3.9/site-packages/google/protobuf/struct_pb2.py new file mode 100644 index 00000000..149728ca --- /dev/null +++ b/mysql-sqlalchemy-workspace/lib/python3.9/site-packages/google/protobuf/struct_pb2.py @@ -0,0 +1,36 @@ +# -*- coding: utf-8 -*- +# Generated by the protocol buffer compiler. DO NOT EDIT! +# source: google/protobuf/struct.proto +"""Generated protocol buffer code.""" +from google.protobuf.internal import builder as _builder +from google.protobuf import descriptor as _descriptor +from google.protobuf import descriptor_pool as _descriptor_pool +from google.protobuf import symbol_database as _symbol_database +# @@protoc_insertion_point(imports) + +_sym_db = _symbol_database.Default() + + + + +DESCRIPTOR = _descriptor_pool.Default().AddSerializedFile(b'\n\x1cgoogle/protobuf/struct.proto\x12\x0fgoogle.protobuf\"\x84\x01\n\x06Struct\x12\x33\n\x06\x66ields\x18\x01 \x03(\x0b\x32#.google.protobuf.Struct.FieldsEntry\x1a\x45\n\x0b\x46ieldsEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12%\n\x05value\x18\x02 \x01(\x0b\x32\x16.google.protobuf.Value:\x02\x38\x01\"\xea\x01\n\x05Value\x12\x30\n\nnull_value\x18\x01 \x01(\x0e\x32\x1a.google.protobuf.NullValueH\x00\x12\x16\n\x0cnumber_value\x18\x02 \x01(\x01H\x00\x12\x16\n\x0cstring_value\x18\x03 \x01(\tH\x00\x12\x14\n\nbool_value\x18\x04 \x01(\x08H\x00\x12/\n\x0cstruct_value\x18\x05 \x01(\x0b\x32\x17.google.protobuf.StructH\x00\x12\x30\n\nlist_value\x18\x06 \x01(\x0b\x32\x1a.google.protobuf.ListValueH\x00\x42\x06\n\x04kind\"3\n\tListValue\x12&\n\x06values\x18\x01 \x03(\x0b\x32\x16.google.protobuf.Value*\x1b\n\tNullValue\x12\x0e\n\nNULL_VALUE\x10\x00\x42\x7f\n\x13\x63om.google.protobufB\x0bStructProtoP\x01Z/google.golang.org/protobuf/types/known/structpb\xf8\x01\x01\xa2\x02\x03GPB\xaa\x02\x1eGoogle.Protobuf.WellKnownTypesb\x06proto3') + +_builder.BuildMessageAndEnumDescriptors(DESCRIPTOR, globals()) +_builder.BuildTopDescriptorsAndMessages(DESCRIPTOR, 'google.protobuf.struct_pb2', globals()) +if _descriptor._USE_C_DESCRIPTORS == False: + + DESCRIPTOR._options = None + DESCRIPTOR._serialized_options = b'\n\023com.google.protobufB\013StructProtoP\001Z/google.golang.org/protobuf/types/known/structpb\370\001\001\242\002\003GPB\252\002\036Google.Protobuf.WellKnownTypes' + _STRUCT_FIELDSENTRY._options = None + _STRUCT_FIELDSENTRY._serialized_options = b'8\001' + _NULLVALUE._serialized_start=474 + _NULLVALUE._serialized_end=501 + _STRUCT._serialized_start=50 + _STRUCT._serialized_end=182 + _STRUCT_FIELDSENTRY._serialized_start=113 + _STRUCT_FIELDSENTRY._serialized_end=182 + _VALUE._serialized_start=185 + _VALUE._serialized_end=419 + _LISTVALUE._serialized_start=421 + _LISTVALUE._serialized_end=472 +# @@protoc_insertion_point(module_scope) diff --git a/mysql-sqlalchemy-workspace/lib/python3.9/site-packages/google/protobuf/symbol_database.py b/mysql-sqlalchemy-workspace/lib/python3.9/site-packages/google/protobuf/symbol_database.py new file mode 100644 index 00000000..fdcf8cf0 --- /dev/null +++ b/mysql-sqlalchemy-workspace/lib/python3.9/site-packages/google/protobuf/symbol_database.py @@ -0,0 +1,194 @@ +# Protocol Buffers - Google's data interchange format +# Copyright 2008 Google Inc. All rights reserved. +# https://developers.google.com/protocol-buffers/ +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are +# met: +# +# * Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above +# copyright notice, this list of conditions and the following disclaimer +# in the documentation and/or other materials provided with the +# distribution. +# * Neither the name of Google Inc. nor the names of its +# contributors may be used to endorse or promote products derived from +# this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +"""A database of Python protocol buffer generated symbols. + +SymbolDatabase is the MessageFactory for messages generated at compile time, +and makes it easy to create new instances of a registered type, given only the +type's protocol buffer symbol name. + +Example usage:: + + db = symbol_database.SymbolDatabase() + + # Register symbols of interest, from one or multiple files. + db.RegisterFileDescriptor(my_proto_pb2.DESCRIPTOR) + db.RegisterMessage(my_proto_pb2.MyMessage) + db.RegisterEnumDescriptor(my_proto_pb2.MyEnum.DESCRIPTOR) + + # The database can be used as a MessageFactory, to generate types based on + # their name: + types = db.GetMessages(['my_proto.proto']) + my_message_instance = types['MyMessage']() + + # The database's underlying descriptor pool can be queried, so it's not + # necessary to know a type's filename to be able to generate it: + filename = db.pool.FindFileContainingSymbol('MyMessage') + my_message_instance = db.GetMessages([filename])['MyMessage']() + + # This functionality is also provided directly via a convenience method: + my_message_instance = db.GetSymbol('MyMessage')() +""" + + +from google.protobuf.internal import api_implementation +from google.protobuf import descriptor_pool +from google.protobuf import message_factory + + +class SymbolDatabase(message_factory.MessageFactory): + """A database of Python generated symbols.""" + + def RegisterMessage(self, message): + """Registers the given message type in the local database. + + Calls to GetSymbol() and GetMessages() will return messages registered here. + + Args: + message: A :class:`google.protobuf.message.Message` subclass (or + instance); its descriptor will be registered. + + Returns: + The provided message. + """ + + desc = message.DESCRIPTOR + self._classes[desc] = message + self.RegisterMessageDescriptor(desc) + return message + + def RegisterMessageDescriptor(self, message_descriptor): + """Registers the given message descriptor in the local database. + + Args: + message_descriptor (Descriptor): the message descriptor to add. + """ + if api_implementation.Type() == 'python': + # pylint: disable=protected-access + self.pool._AddDescriptor(message_descriptor) + + def RegisterEnumDescriptor(self, enum_descriptor): + """Registers the given enum descriptor in the local database. + + Args: + enum_descriptor (EnumDescriptor): The enum descriptor to register. + + Returns: + EnumDescriptor: The provided descriptor. + """ + if api_implementation.Type() == 'python': + # pylint: disable=protected-access + self.pool._AddEnumDescriptor(enum_descriptor) + return enum_descriptor + + def RegisterServiceDescriptor(self, service_descriptor): + """Registers the given service descriptor in the local database. + + Args: + service_descriptor (ServiceDescriptor): the service descriptor to + register. + """ + if api_implementation.Type() == 'python': + # pylint: disable=protected-access + self.pool._AddServiceDescriptor(service_descriptor) + + def RegisterFileDescriptor(self, file_descriptor): + """Registers the given file descriptor in the local database. + + Args: + file_descriptor (FileDescriptor): The file descriptor to register. + """ + if api_implementation.Type() == 'python': + # pylint: disable=protected-access + self.pool._InternalAddFileDescriptor(file_descriptor) + + def GetSymbol(self, symbol): + """Tries to find a symbol in the local database. + + Currently, this method only returns message.Message instances, however, if + may be extended in future to support other symbol types. + + Args: + symbol (str): a protocol buffer symbol. + + Returns: + A Python class corresponding to the symbol. + + Raises: + KeyError: if the symbol could not be found. + """ + + return self._classes[self.pool.FindMessageTypeByName(symbol)] + + def GetMessages(self, files): + # TODO(amauryfa): Fix the differences with MessageFactory. + """Gets all registered messages from a specified file. + + Only messages already created and registered will be returned; (this is the + case for imported _pb2 modules) + But unlike MessageFactory, this version also returns already defined nested + messages, but does not register any message extensions. + + Args: + files (list[str]): The file names to extract messages from. + + Returns: + A dictionary mapping proto names to the message classes. + + Raises: + KeyError: if a file could not be found. + """ + + def _GetAllMessages(desc): + """Walk a message Descriptor and recursively yields all message names.""" + yield desc + for msg_desc in desc.nested_types: + for nested_desc in _GetAllMessages(msg_desc): + yield nested_desc + + result = {} + for file_name in files: + file_desc = self.pool.FindFileByName(file_name) + for msg_desc in file_desc.message_types_by_name.values(): + for desc in _GetAllMessages(msg_desc): + try: + result[desc.full_name] = self._classes[desc] + except KeyError: + # This descriptor has no registered class, skip it. + pass + return result + + +_DEFAULT = SymbolDatabase(pool=descriptor_pool.Default()) + + +def Default(): + """Returns the default SymbolDatabase.""" + return _DEFAULT diff --git a/mysql-sqlalchemy-workspace/lib/python3.9/site-packages/google/protobuf/text_encoding.py b/mysql-sqlalchemy-workspace/lib/python3.9/site-packages/google/protobuf/text_encoding.py new file mode 100644 index 00000000..759cf11f --- /dev/null +++ b/mysql-sqlalchemy-workspace/lib/python3.9/site-packages/google/protobuf/text_encoding.py @@ -0,0 +1,110 @@ +# Protocol Buffers - Google's data interchange format +# Copyright 2008 Google Inc. All rights reserved. +# https://developers.google.com/protocol-buffers/ +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are +# met: +# +# * Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above +# copyright notice, this list of conditions and the following disclaimer +# in the documentation and/or other materials provided with the +# distribution. +# * Neither the name of Google Inc. nor the names of its +# contributors may be used to endorse or promote products derived from +# this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +"""Encoding related utilities.""" +import re + +_cescape_chr_to_symbol_map = {} +_cescape_chr_to_symbol_map[9] = r'\t' # optional escape +_cescape_chr_to_symbol_map[10] = r'\n' # optional escape +_cescape_chr_to_symbol_map[13] = r'\r' # optional escape +_cescape_chr_to_symbol_map[34] = r'\"' # necessary escape +_cescape_chr_to_symbol_map[39] = r"\'" # optional escape +_cescape_chr_to_symbol_map[92] = r'\\' # necessary escape + +# Lookup table for unicode +_cescape_unicode_to_str = [chr(i) for i in range(0, 256)] +for byte, string in _cescape_chr_to_symbol_map.items(): + _cescape_unicode_to_str[byte] = string + +# Lookup table for non-utf8, with necessary escapes at (o >= 127 or o < 32) +_cescape_byte_to_str = ([r'\%03o' % i for i in range(0, 32)] + + [chr(i) for i in range(32, 127)] + + [r'\%03o' % i for i in range(127, 256)]) +for byte, string in _cescape_chr_to_symbol_map.items(): + _cescape_byte_to_str[byte] = string +del byte, string + + +def CEscape(text, as_utf8): + # type: (...) -> str + """Escape a bytes string for use in an text protocol buffer. + + Args: + text: A byte string to be escaped. + as_utf8: Specifies if result may contain non-ASCII characters. + In Python 3 this allows unescaped non-ASCII Unicode characters. + In Python 2 the return value will be valid UTF-8 rather than only ASCII. + Returns: + Escaped string (str). + """ + # Python's text.encode() 'string_escape' or 'unicode_escape' codecs do not + # satisfy our needs; they encodes unprintable characters using two-digit hex + # escapes whereas our C++ unescaping function allows hex escapes to be any + # length. So, "\0011".encode('string_escape') ends up being "\\x011", which + # will be decoded in C++ as a single-character string with char code 0x11. + text_is_unicode = isinstance(text, str) + if as_utf8 and text_is_unicode: + # We're already unicode, no processing beyond control char escapes. + return text.translate(_cescape_chr_to_symbol_map) + ord_ = ord if text_is_unicode else lambda x: x # bytes iterate as ints. + if as_utf8: + return ''.join(_cescape_unicode_to_str[ord_(c)] for c in text) + return ''.join(_cescape_byte_to_str[ord_(c)] for c in text) + + +_CUNESCAPE_HEX = re.compile(r'(\\+)x([0-9a-fA-F])(?![0-9a-fA-F])') + + +def CUnescape(text): + # type: (str) -> bytes + """Unescape a text string with C-style escape sequences to UTF-8 bytes. + + Args: + text: The data to parse in a str. + Returns: + A byte string. + """ + + def ReplaceHex(m): + # Only replace the match if the number of leading back slashes is odd. i.e. + # the slash itself is not escaped. + if len(m.group(1)) & 1: + return m.group(1) + 'x0' + m.group(2) + return m.group(0) + + # This is required because the 'string_escape' encoding doesn't + # allow single-digit hex escapes (like '\xf'). + result = _CUNESCAPE_HEX.sub(ReplaceHex, text) + + return (result.encode('utf-8') # Make it bytes to allow decode. + .decode('unicode_escape') + # Make it bytes again to return the proper type. + .encode('raw_unicode_escape')) diff --git a/mysql-sqlalchemy-workspace/lib/python3.9/site-packages/google/protobuf/text_format.py b/mysql-sqlalchemy-workspace/lib/python3.9/site-packages/google/protobuf/text_format.py new file mode 100644 index 00000000..412385c2 --- /dev/null +++ b/mysql-sqlalchemy-workspace/lib/python3.9/site-packages/google/protobuf/text_format.py @@ -0,0 +1,1795 @@ +# Protocol Buffers - Google's data interchange format +# Copyright 2008 Google Inc. All rights reserved. +# https://developers.google.com/protocol-buffers/ +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are +# met: +# +# * Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above +# copyright notice, this list of conditions and the following disclaimer +# in the documentation and/or other materials provided with the +# distribution. +# * Neither the name of Google Inc. nor the names of its +# contributors may be used to endorse or promote products derived from +# this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +"""Contains routines for printing protocol messages in text format. + +Simple usage example:: + + # Create a proto object and serialize it to a text proto string. + message = my_proto_pb2.MyMessage(foo='bar') + text_proto = text_format.MessageToString(message) + + # Parse a text proto string. + message = text_format.Parse(text_proto, my_proto_pb2.MyMessage()) +""" + +__author__ = 'kenton@google.com (Kenton Varda)' + +# TODO(b/129989314) Import thread contention leads to test failures. +import encodings.raw_unicode_escape # pylint: disable=unused-import +import encodings.unicode_escape # pylint: disable=unused-import +import io +import math +import re + +from google.protobuf.internal import decoder +from google.protobuf.internal import type_checkers +from google.protobuf import descriptor +from google.protobuf import text_encoding + +# pylint: disable=g-import-not-at-top +__all__ = ['MessageToString', 'Parse', 'PrintMessage', 'PrintField', + 'PrintFieldValue', 'Merge', 'MessageToBytes'] + +_INTEGER_CHECKERS = (type_checkers.Uint32ValueChecker(), + type_checkers.Int32ValueChecker(), + type_checkers.Uint64ValueChecker(), + type_checkers.Int64ValueChecker()) +_FLOAT_INFINITY = re.compile('-?inf(?:inity)?f?$', re.IGNORECASE) +_FLOAT_NAN = re.compile('nanf?$', re.IGNORECASE) +_QUOTES = frozenset(("'", '"')) +_ANY_FULL_TYPE_NAME = 'google.protobuf.Any' + + +class Error(Exception): + """Top-level module error for text_format.""" + + +class ParseError(Error): + """Thrown in case of text parsing or tokenizing error.""" + + def __init__(self, message=None, line=None, column=None): + if message is not None and line is not None: + loc = str(line) + if column is not None: + loc += ':{0}'.format(column) + message = '{0} : {1}'.format(loc, message) + if message is not None: + super(ParseError, self).__init__(message) + else: + super(ParseError, self).__init__() + self._line = line + self._column = column + + def GetLine(self): + return self._line + + def GetColumn(self): + return self._column + + +class TextWriter(object): + + def __init__(self, as_utf8): + self._writer = io.StringIO() + + def write(self, val): + return self._writer.write(val) + + def close(self): + return self._writer.close() + + def getvalue(self): + return self._writer.getvalue() + + +def MessageToString( + message, + as_utf8=False, + as_one_line=False, + use_short_repeated_primitives=False, + pointy_brackets=False, + use_index_order=False, + float_format=None, + double_format=None, + use_field_number=False, + descriptor_pool=None, + indent=0, + message_formatter=None, + print_unknown_fields=False, + force_colon=False): + # type: (...) -> str + """Convert protobuf message to text format. + + Double values can be formatted compactly with 15 digits of + precision (which is the most that IEEE 754 "double" can guarantee) + using double_format='.15g'. To ensure that converting to text and back to a + proto will result in an identical value, double_format='.17g' should be used. + + Args: + message: The protocol buffers message. + as_utf8: Return unescaped Unicode for non-ASCII characters. + In Python 3 actual Unicode characters may appear as is in strings. + In Python 2 the return value will be valid UTF-8 rather than only ASCII. + as_one_line: Don't introduce newlines between fields. + use_short_repeated_primitives: Use short repeated format for primitives. + pointy_brackets: If True, use angle brackets instead of curly braces for + nesting. + use_index_order: If True, fields of a proto message will be printed using + the order defined in source code instead of the field number, extensions + will be printed at the end of the message and their relative order is + determined by the extension number. By default, use the field number + order. + float_format (str): If set, use this to specify float field formatting + (per the "Format Specification Mini-Language"); otherwise, shortest float + that has same value in wire will be printed. Also affect double field + if double_format is not set but float_format is set. + double_format (str): If set, use this to specify double field formatting + (per the "Format Specification Mini-Language"); if it is not set but + float_format is set, use float_format. Otherwise, use ``str()`` + use_field_number: If True, print field numbers instead of names. + descriptor_pool (DescriptorPool): Descriptor pool used to resolve Any types. + indent (int): The initial indent level, in terms of spaces, for pretty + print. + message_formatter (function(message, indent, as_one_line) -> unicode|None): + Custom formatter for selected sub-messages (usually based on message + type). Use to pretty print parts of the protobuf for easier diffing. + print_unknown_fields: If True, unknown fields will be printed. + force_colon: If set, a colon will be added after the field name even if the + field is a proto message. + + Returns: + str: A string of the text formatted protocol buffer message. + """ + out = TextWriter(as_utf8) + printer = _Printer( + out, + indent, + as_utf8, + as_one_line, + use_short_repeated_primitives, + pointy_brackets, + use_index_order, + float_format, + double_format, + use_field_number, + descriptor_pool, + message_formatter, + print_unknown_fields=print_unknown_fields, + force_colon=force_colon) + printer.PrintMessage(message) + result = out.getvalue() + out.close() + if as_one_line: + return result.rstrip() + return result + + +def MessageToBytes(message, **kwargs): + # type: (...) -> bytes + """Convert protobuf message to encoded text format. See MessageToString.""" + text = MessageToString(message, **kwargs) + if isinstance(text, bytes): + return text + codec = 'utf-8' if kwargs.get('as_utf8') else 'ascii' + return text.encode(codec) + + +def _IsMapEntry(field): + return (field.type == descriptor.FieldDescriptor.TYPE_MESSAGE and + field.message_type.has_options and + field.message_type.GetOptions().map_entry) + + +def PrintMessage(message, + out, + indent=0, + as_utf8=False, + as_one_line=False, + use_short_repeated_primitives=False, + pointy_brackets=False, + use_index_order=False, + float_format=None, + double_format=None, + use_field_number=False, + descriptor_pool=None, + message_formatter=None, + print_unknown_fields=False, + force_colon=False): + printer = _Printer( + out=out, indent=indent, as_utf8=as_utf8, + as_one_line=as_one_line, + use_short_repeated_primitives=use_short_repeated_primitives, + pointy_brackets=pointy_brackets, + use_index_order=use_index_order, + float_format=float_format, + double_format=double_format, + use_field_number=use_field_number, + descriptor_pool=descriptor_pool, + message_formatter=message_formatter, + print_unknown_fields=print_unknown_fields, + force_colon=force_colon) + printer.PrintMessage(message) + + +def PrintField(field, + value, + out, + indent=0, + as_utf8=False, + as_one_line=False, + use_short_repeated_primitives=False, + pointy_brackets=False, + use_index_order=False, + float_format=None, + double_format=None, + message_formatter=None, + print_unknown_fields=False, + force_colon=False): + """Print a single field name/value pair.""" + printer = _Printer(out, indent, as_utf8, as_one_line, + use_short_repeated_primitives, pointy_brackets, + use_index_order, float_format, double_format, + message_formatter=message_formatter, + print_unknown_fields=print_unknown_fields, + force_colon=force_colon) + printer.PrintField(field, value) + + +def PrintFieldValue(field, + value, + out, + indent=0, + as_utf8=False, + as_one_line=False, + use_short_repeated_primitives=False, + pointy_brackets=False, + use_index_order=False, + float_format=None, + double_format=None, + message_formatter=None, + print_unknown_fields=False, + force_colon=False): + """Print a single field value (not including name).""" + printer = _Printer(out, indent, as_utf8, as_one_line, + use_short_repeated_primitives, pointy_brackets, + use_index_order, float_format, double_format, + message_formatter=message_formatter, + print_unknown_fields=print_unknown_fields, + force_colon=force_colon) + printer.PrintFieldValue(field, value) + + +def _BuildMessageFromTypeName(type_name, descriptor_pool): + """Returns a protobuf message instance. + + Args: + type_name: Fully-qualified protobuf message type name string. + descriptor_pool: DescriptorPool instance. + + Returns: + A Message instance of type matching type_name, or None if the a Descriptor + wasn't found matching type_name. + """ + # pylint: disable=g-import-not-at-top + if descriptor_pool is None: + from google.protobuf import descriptor_pool as pool_mod + descriptor_pool = pool_mod.Default() + from google.protobuf import symbol_database + database = symbol_database.Default() + try: + message_descriptor = descriptor_pool.FindMessageTypeByName(type_name) + except KeyError: + return None + message_type = database.GetPrototype(message_descriptor) + return message_type() + + +# These values must match WireType enum in google/protobuf/wire_format.h. +WIRETYPE_LENGTH_DELIMITED = 2 +WIRETYPE_START_GROUP = 3 + + +class _Printer(object): + """Text format printer for protocol message.""" + + def __init__( + self, + out, + indent=0, + as_utf8=False, + as_one_line=False, + use_short_repeated_primitives=False, + pointy_brackets=False, + use_index_order=False, + float_format=None, + double_format=None, + use_field_number=False, + descriptor_pool=None, + message_formatter=None, + print_unknown_fields=False, + force_colon=False): + """Initialize the Printer. + + Double values can be formatted compactly with 15 digits of precision + (which is the most that IEEE 754 "double" can guarantee) using + double_format='.15g'. To ensure that converting to text and back to a proto + will result in an identical value, double_format='.17g' should be used. + + Args: + out: To record the text format result. + indent: The initial indent level for pretty print. + as_utf8: Return unescaped Unicode for non-ASCII characters. + In Python 3 actual Unicode characters may appear as is in strings. + In Python 2 the return value will be valid UTF-8 rather than ASCII. + as_one_line: Don't introduce newlines between fields. + use_short_repeated_primitives: Use short repeated format for primitives. + pointy_brackets: If True, use angle brackets instead of curly braces for + nesting. + use_index_order: If True, print fields of a proto message using the order + defined in source code instead of the field number. By default, use the + field number order. + float_format: If set, use this to specify float field formatting + (per the "Format Specification Mini-Language"); otherwise, shortest + float that has same value in wire will be printed. Also affect double + field if double_format is not set but float_format is set. + double_format: If set, use this to specify double field formatting + (per the "Format Specification Mini-Language"); if it is not set but + float_format is set, use float_format. Otherwise, str() is used. + use_field_number: If True, print field numbers instead of names. + descriptor_pool: A DescriptorPool used to resolve Any types. + message_formatter: A function(message, indent, as_one_line): unicode|None + to custom format selected sub-messages (usually based on message type). + Use to pretty print parts of the protobuf for easier diffing. + print_unknown_fields: If True, unknown fields will be printed. + force_colon: If set, a colon will be added after the field name even if + the field is a proto message. + """ + self.out = out + self.indent = indent + self.as_utf8 = as_utf8 + self.as_one_line = as_one_line + self.use_short_repeated_primitives = use_short_repeated_primitives + self.pointy_brackets = pointy_brackets + self.use_index_order = use_index_order + self.float_format = float_format + if double_format is not None: + self.double_format = double_format + else: + self.double_format = float_format + self.use_field_number = use_field_number + self.descriptor_pool = descriptor_pool + self.message_formatter = message_formatter + self.print_unknown_fields = print_unknown_fields + self.force_colon = force_colon + + def _TryPrintAsAnyMessage(self, message): + """Serializes if message is a google.protobuf.Any field.""" + if '/' not in message.type_url: + return False + packed_message = _BuildMessageFromTypeName(message.TypeName(), + self.descriptor_pool) + if packed_message: + packed_message.MergeFromString(message.value) + colon = ':' if self.force_colon else '' + self.out.write('%s[%s]%s ' % (self.indent * ' ', message.type_url, colon)) + self._PrintMessageFieldValue(packed_message) + self.out.write(' ' if self.as_one_line else '\n') + return True + else: + return False + + def _TryCustomFormatMessage(self, message): + formatted = self.message_formatter(message, self.indent, self.as_one_line) + if formatted is None: + return False + + out = self.out + out.write(' ' * self.indent) + out.write(formatted) + out.write(' ' if self.as_one_line else '\n') + return True + + def PrintMessage(self, message): + """Convert protobuf message to text format. + + Args: + message: The protocol buffers message. + """ + if self.message_formatter and self._TryCustomFormatMessage(message): + return + if (message.DESCRIPTOR.full_name == _ANY_FULL_TYPE_NAME and + self._TryPrintAsAnyMessage(message)): + return + fields = message.ListFields() + if self.use_index_order: + fields.sort( + key=lambda x: x[0].number if x[0].is_extension else x[0].index) + for field, value in fields: + if _IsMapEntry(field): + for key in sorted(value): + # This is slow for maps with submessage entries because it copies the + # entire tree. Unfortunately this would take significant refactoring + # of this file to work around. + # + # TODO(haberman): refactor and optimize if this becomes an issue. + entry_submsg = value.GetEntryClass()(key=key, value=value[key]) + self.PrintField(field, entry_submsg) + elif field.label == descriptor.FieldDescriptor.LABEL_REPEATED: + if (self.use_short_repeated_primitives + and field.cpp_type != descriptor.FieldDescriptor.CPPTYPE_MESSAGE + and field.cpp_type != descriptor.FieldDescriptor.CPPTYPE_STRING): + self._PrintShortRepeatedPrimitivesValue(field, value) + else: + for element in value: + self.PrintField(field, element) + else: + self.PrintField(field, value) + + if self.print_unknown_fields: + self._PrintUnknownFields(message.UnknownFields()) + + def _PrintUnknownFields(self, unknown_fields): + """Print unknown fields.""" + out = self.out + for field in unknown_fields: + out.write(' ' * self.indent) + out.write(str(field.field_number)) + if field.wire_type == WIRETYPE_START_GROUP: + if self.as_one_line: + out.write(' { ') + else: + out.write(' {\n') + self.indent += 2 + + self._PrintUnknownFields(field.data) + + if self.as_one_line: + out.write('} ') + else: + self.indent -= 2 + out.write(' ' * self.indent + '}\n') + elif field.wire_type == WIRETYPE_LENGTH_DELIMITED: + try: + # If this field is parseable as a Message, it is probably + # an embedded message. + # pylint: disable=protected-access + (embedded_unknown_message, pos) = decoder._DecodeUnknownFieldSet( + memoryview(field.data), 0, len(field.data)) + except Exception: # pylint: disable=broad-except + pos = 0 + + if pos == len(field.data): + if self.as_one_line: + out.write(' { ') + else: + out.write(' {\n') + self.indent += 2 + + self._PrintUnknownFields(embedded_unknown_message) + + if self.as_one_line: + out.write('} ') + else: + self.indent -= 2 + out.write(' ' * self.indent + '}\n') + else: + # A string or bytes field. self.as_utf8 may not work. + out.write(': \"') + out.write(text_encoding.CEscape(field.data, False)) + out.write('\" ' if self.as_one_line else '\"\n') + else: + # varint, fixed32, fixed64 + out.write(': ') + out.write(str(field.data)) + out.write(' ' if self.as_one_line else '\n') + + def _PrintFieldName(self, field): + """Print field name.""" + out = self.out + out.write(' ' * self.indent) + if self.use_field_number: + out.write(str(field.number)) + else: + if field.is_extension: + out.write('[') + if (field.containing_type.GetOptions().message_set_wire_format and + field.type == descriptor.FieldDescriptor.TYPE_MESSAGE and + field.label == descriptor.FieldDescriptor.LABEL_OPTIONAL): + out.write(field.message_type.full_name) + else: + out.write(field.full_name) + out.write(']') + elif field.type == descriptor.FieldDescriptor.TYPE_GROUP: + # For groups, use the capitalized name. + out.write(field.message_type.name) + else: + out.write(field.name) + + if (self.force_colon or + field.cpp_type != descriptor.FieldDescriptor.CPPTYPE_MESSAGE): + # The colon is optional in this case, but our cross-language golden files + # don't include it. Here, the colon is only included if force_colon is + # set to True + out.write(':') + + def PrintField(self, field, value): + """Print a single field name/value pair.""" + self._PrintFieldName(field) + self.out.write(' ') + self.PrintFieldValue(field, value) + self.out.write(' ' if self.as_one_line else '\n') + + def _PrintShortRepeatedPrimitivesValue(self, field, value): + """"Prints short repeated primitives value.""" + # Note: this is called only when value has at least one element. + self._PrintFieldName(field) + self.out.write(' [') + for i in range(len(value) - 1): + self.PrintFieldValue(field, value[i]) + self.out.write(', ') + self.PrintFieldValue(field, value[-1]) + self.out.write(']') + self.out.write(' ' if self.as_one_line else '\n') + + def _PrintMessageFieldValue(self, value): + if self.pointy_brackets: + openb = '<' + closeb = '>' + else: + openb = '{' + closeb = '}' + + if self.as_one_line: + self.out.write('%s ' % openb) + self.PrintMessage(value) + self.out.write(closeb) + else: + self.out.write('%s\n' % openb) + self.indent += 2 + self.PrintMessage(value) + self.indent -= 2 + self.out.write(' ' * self.indent + closeb) + + def PrintFieldValue(self, field, value): + """Print a single field value (not including name). + + For repeated fields, the value should be a single element. + + Args: + field: The descriptor of the field to be printed. + value: The value of the field. + """ + out = self.out + if field.cpp_type == descriptor.FieldDescriptor.CPPTYPE_MESSAGE: + self._PrintMessageFieldValue(value) + elif field.cpp_type == descriptor.FieldDescriptor.CPPTYPE_ENUM: + enum_value = field.enum_type.values_by_number.get(value, None) + if enum_value is not None: + out.write(enum_value.name) + else: + out.write(str(value)) + elif field.cpp_type == descriptor.FieldDescriptor.CPPTYPE_STRING: + out.write('\"') + if isinstance(value, str) and not self.as_utf8: + out_value = value.encode('utf-8') + else: + out_value = value + if field.type == descriptor.FieldDescriptor.TYPE_BYTES: + # We always need to escape all binary data in TYPE_BYTES fields. + out_as_utf8 = False + else: + out_as_utf8 = self.as_utf8 + out.write(text_encoding.CEscape(out_value, out_as_utf8)) + out.write('\"') + elif field.cpp_type == descriptor.FieldDescriptor.CPPTYPE_BOOL: + if value: + out.write('true') + else: + out.write('false') + elif field.cpp_type == descriptor.FieldDescriptor.CPPTYPE_FLOAT: + if self.float_format is not None: + out.write('{1:{0}}'.format(self.float_format, value)) + else: + if math.isnan(value): + out.write(str(value)) + else: + out.write(str(type_checkers.ToShortestFloat(value))) + elif (field.cpp_type == descriptor.FieldDescriptor.CPPTYPE_DOUBLE and + self.double_format is not None): + out.write('{1:{0}}'.format(self.double_format, value)) + else: + out.write(str(value)) + + +def Parse(text, + message, + allow_unknown_extension=False, + allow_field_number=False, + descriptor_pool=None, + allow_unknown_field=False): + """Parses a text representation of a protocol message into a message. + + NOTE: for historical reasons this function does not clear the input + message. This is different from what the binary msg.ParseFrom(...) does. + If text contains a field already set in message, the value is appended if the + field is repeated. Otherwise, an error is raised. + + Example:: + + a = MyProto() + a.repeated_field.append('test') + b = MyProto() + + # Repeated fields are combined + text_format.Parse(repr(a), b) + text_format.Parse(repr(a), b) # repeated_field contains ["test", "test"] + + # Non-repeated fields cannot be overwritten + a.singular_field = 1 + b.singular_field = 2 + text_format.Parse(repr(a), b) # ParseError + + # Binary version: + b.ParseFromString(a.SerializeToString()) # repeated_field is now "test" + + Caller is responsible for clearing the message as needed. + + Args: + text (str): Message text representation. + message (Message): A protocol buffer message to merge into. + allow_unknown_extension: if True, skip over missing extensions and keep + parsing + allow_field_number: if True, both field number and field name are allowed. + descriptor_pool (DescriptorPool): Descriptor pool used to resolve Any types. + allow_unknown_field: if True, skip over unknown field and keep + parsing. Avoid to use this option if possible. It may hide some + errors (e.g. spelling error on field name) + + Returns: + Message: The same message passed as argument. + + Raises: + ParseError: On text parsing problems. + """ + return ParseLines(text.split(b'\n' if isinstance(text, bytes) else u'\n'), + message, + allow_unknown_extension, + allow_field_number, + descriptor_pool=descriptor_pool, + allow_unknown_field=allow_unknown_field) + + +def Merge(text, + message, + allow_unknown_extension=False, + allow_field_number=False, + descriptor_pool=None, + allow_unknown_field=False): + """Parses a text representation of a protocol message into a message. + + Like Parse(), but allows repeated values for a non-repeated field, and uses + the last one. This means any non-repeated, top-level fields specified in text + replace those in the message. + + Args: + text (str): Message text representation. + message (Message): A protocol buffer message to merge into. + allow_unknown_extension: if True, skip over missing extensions and keep + parsing + allow_field_number: if True, both field number and field name are allowed. + descriptor_pool (DescriptorPool): Descriptor pool used to resolve Any types. + allow_unknown_field: if True, skip over unknown field and keep + parsing. Avoid to use this option if possible. It may hide some + errors (e.g. spelling error on field name) + + Returns: + Message: The same message passed as argument. + + Raises: + ParseError: On text parsing problems. + """ + return MergeLines( + text.split(b'\n' if isinstance(text, bytes) else u'\n'), + message, + allow_unknown_extension, + allow_field_number, + descriptor_pool=descriptor_pool, + allow_unknown_field=allow_unknown_field) + + +def ParseLines(lines, + message, + allow_unknown_extension=False, + allow_field_number=False, + descriptor_pool=None, + allow_unknown_field=False): + """Parses a text representation of a protocol message into a message. + + See Parse() for caveats. + + Args: + lines: An iterable of lines of a message's text representation. + message: A protocol buffer message to merge into. + allow_unknown_extension: if True, skip over missing extensions and keep + parsing + allow_field_number: if True, both field number and field name are allowed. + descriptor_pool: A DescriptorPool used to resolve Any types. + allow_unknown_field: if True, skip over unknown field and keep + parsing. Avoid to use this option if possible. It may hide some + errors (e.g. spelling error on field name) + + Returns: + The same message passed as argument. + + Raises: + ParseError: On text parsing problems. + """ + parser = _Parser(allow_unknown_extension, + allow_field_number, + descriptor_pool=descriptor_pool, + allow_unknown_field=allow_unknown_field) + return parser.ParseLines(lines, message) + + +def MergeLines(lines, + message, + allow_unknown_extension=False, + allow_field_number=False, + descriptor_pool=None, + allow_unknown_field=False): + """Parses a text representation of a protocol message into a message. + + See Merge() for more details. + + Args: + lines: An iterable of lines of a message's text representation. + message: A protocol buffer message to merge into. + allow_unknown_extension: if True, skip over missing extensions and keep + parsing + allow_field_number: if True, both field number and field name are allowed. + descriptor_pool: A DescriptorPool used to resolve Any types. + allow_unknown_field: if True, skip over unknown field and keep + parsing. Avoid to use this option if possible. It may hide some + errors (e.g. spelling error on field name) + + Returns: + The same message passed as argument. + + Raises: + ParseError: On text parsing problems. + """ + parser = _Parser(allow_unknown_extension, + allow_field_number, + descriptor_pool=descriptor_pool, + allow_unknown_field=allow_unknown_field) + return parser.MergeLines(lines, message) + + +class _Parser(object): + """Text format parser for protocol message.""" + + def __init__(self, + allow_unknown_extension=False, + allow_field_number=False, + descriptor_pool=None, + allow_unknown_field=False): + self.allow_unknown_extension = allow_unknown_extension + self.allow_field_number = allow_field_number + self.descriptor_pool = descriptor_pool + self.allow_unknown_field = allow_unknown_field + + def ParseLines(self, lines, message): + """Parses a text representation of a protocol message into a message.""" + self._allow_multiple_scalars = False + self._ParseOrMerge(lines, message) + return message + + def MergeLines(self, lines, message): + """Merges a text representation of a protocol message into a message.""" + self._allow_multiple_scalars = True + self._ParseOrMerge(lines, message) + return message + + def _ParseOrMerge(self, lines, message): + """Converts a text representation of a protocol message into a message. + + Args: + lines: Lines of a message's text representation. + message: A protocol buffer message to merge into. + + Raises: + ParseError: On text parsing problems. + """ + # Tokenize expects native str lines. + str_lines = ( + line if isinstance(line, str) else line.decode('utf-8') + for line in lines) + tokenizer = Tokenizer(str_lines) + while not tokenizer.AtEnd(): + self._MergeField(tokenizer, message) + + def _MergeField(self, tokenizer, message): + """Merges a single protocol message field into a message. + + Args: + tokenizer: A tokenizer to parse the field name and values. + message: A protocol message to record the data. + + Raises: + ParseError: In case of text parsing problems. + """ + message_descriptor = message.DESCRIPTOR + if (message_descriptor.full_name == _ANY_FULL_TYPE_NAME and + tokenizer.TryConsume('[')): + type_url_prefix, packed_type_name = self._ConsumeAnyTypeUrl(tokenizer) + tokenizer.Consume(']') + tokenizer.TryConsume(':') + if tokenizer.TryConsume('<'): + expanded_any_end_token = '>' + else: + tokenizer.Consume('{') + expanded_any_end_token = '}' + expanded_any_sub_message = _BuildMessageFromTypeName(packed_type_name, + self.descriptor_pool) + if not expanded_any_sub_message: + raise ParseError('Type %s not found in descriptor pool' % + packed_type_name) + while not tokenizer.TryConsume(expanded_any_end_token): + if tokenizer.AtEnd(): + raise tokenizer.ParseErrorPreviousToken('Expected "%s".' % + (expanded_any_end_token,)) + self._MergeField(tokenizer, expanded_any_sub_message) + deterministic = False + + message.Pack(expanded_any_sub_message, + type_url_prefix=type_url_prefix, + deterministic=deterministic) + return + + if tokenizer.TryConsume('['): + name = [tokenizer.ConsumeIdentifier()] + while tokenizer.TryConsume('.'): + name.append(tokenizer.ConsumeIdentifier()) + name = '.'.join(name) + + if not message_descriptor.is_extendable: + raise tokenizer.ParseErrorPreviousToken( + 'Message type "%s" does not have extensions.' % + message_descriptor.full_name) + # pylint: disable=protected-access + field = message.Extensions._FindExtensionByName(name) + # pylint: enable=protected-access + + + if not field: + if self.allow_unknown_extension: + field = None + else: + raise tokenizer.ParseErrorPreviousToken( + 'Extension "%s" not registered. ' + 'Did you import the _pb2 module which defines it? ' + 'If you are trying to place the extension in the MessageSet ' + 'field of another message that is in an Any or MessageSet field, ' + 'that message\'s _pb2 module must be imported as well' % name) + elif message_descriptor != field.containing_type: + raise tokenizer.ParseErrorPreviousToken( + 'Extension "%s" does not extend message type "%s".' % + (name, message_descriptor.full_name)) + + tokenizer.Consume(']') + + else: + name = tokenizer.ConsumeIdentifierOrNumber() + if self.allow_field_number and name.isdigit(): + number = ParseInteger(name, True, True) + field = message_descriptor.fields_by_number.get(number, None) + if not field and message_descriptor.is_extendable: + field = message.Extensions._FindExtensionByNumber(number) + else: + field = message_descriptor.fields_by_name.get(name, None) + + # Group names are expected to be capitalized as they appear in the + # .proto file, which actually matches their type names, not their field + # names. + if not field: + field = message_descriptor.fields_by_name.get(name.lower(), None) + if field and field.type != descriptor.FieldDescriptor.TYPE_GROUP: + field = None + + if (field and field.type == descriptor.FieldDescriptor.TYPE_GROUP and + field.message_type.name != name): + field = None + + if not field and not self.allow_unknown_field: + raise tokenizer.ParseErrorPreviousToken( + 'Message type "%s" has no field named "%s".' % + (message_descriptor.full_name, name)) + + if field: + if not self._allow_multiple_scalars and field.containing_oneof: + # Check if there's a different field set in this oneof. + # Note that we ignore the case if the same field was set before, and we + # apply _allow_multiple_scalars to non-scalar fields as well. + which_oneof = message.WhichOneof(field.containing_oneof.name) + if which_oneof is not None and which_oneof != field.name: + raise tokenizer.ParseErrorPreviousToken( + 'Field "%s" is specified along with field "%s", another member ' + 'of oneof "%s" for message type "%s".' % + (field.name, which_oneof, field.containing_oneof.name, + message_descriptor.full_name)) + + if field.cpp_type == descriptor.FieldDescriptor.CPPTYPE_MESSAGE: + tokenizer.TryConsume(':') + merger = self._MergeMessageField + else: + tokenizer.Consume(':') + merger = self._MergeScalarField + + if (field.label == descriptor.FieldDescriptor.LABEL_REPEATED and + tokenizer.TryConsume('[')): + # Short repeated format, e.g. "foo: [1, 2, 3]" + if not tokenizer.TryConsume(']'): + while True: + merger(tokenizer, message, field) + if tokenizer.TryConsume(']'): + break + tokenizer.Consume(',') + + else: + merger(tokenizer, message, field) + + else: # Proto field is unknown. + assert (self.allow_unknown_extension or self.allow_unknown_field) + _SkipFieldContents(tokenizer) + + # For historical reasons, fields may optionally be separated by commas or + # semicolons. + if not tokenizer.TryConsume(','): + tokenizer.TryConsume(';') + + + def _ConsumeAnyTypeUrl(self, tokenizer): + """Consumes a google.protobuf.Any type URL and returns the type name.""" + # Consume "type.googleapis.com/". + prefix = [tokenizer.ConsumeIdentifier()] + tokenizer.Consume('.') + prefix.append(tokenizer.ConsumeIdentifier()) + tokenizer.Consume('.') + prefix.append(tokenizer.ConsumeIdentifier()) + tokenizer.Consume('/') + # Consume the fully-qualified type name. + name = [tokenizer.ConsumeIdentifier()] + while tokenizer.TryConsume('.'): + name.append(tokenizer.ConsumeIdentifier()) + return '.'.join(prefix), '.'.join(name) + + def _MergeMessageField(self, tokenizer, message, field): + """Merges a single scalar field into a message. + + Args: + tokenizer: A tokenizer to parse the field value. + message: The message of which field is a member. + field: The descriptor of the field to be merged. + + Raises: + ParseError: In case of text parsing problems. + """ + is_map_entry = _IsMapEntry(field) + + if tokenizer.TryConsume('<'): + end_token = '>' + else: + tokenizer.Consume('{') + end_token = '}' + + if field.label == descriptor.FieldDescriptor.LABEL_REPEATED: + if field.is_extension: + sub_message = message.Extensions[field].add() + elif is_map_entry: + sub_message = getattr(message, field.name).GetEntryClass()() + else: + sub_message = getattr(message, field.name).add() + else: + if field.is_extension: + if (not self._allow_multiple_scalars and + message.HasExtension(field)): + raise tokenizer.ParseErrorPreviousToken( + 'Message type "%s" should not have multiple "%s" extensions.' % + (message.DESCRIPTOR.full_name, field.full_name)) + sub_message = message.Extensions[field] + else: + # Also apply _allow_multiple_scalars to message field. + # TODO(jieluo): Change to _allow_singular_overwrites. + if (not self._allow_multiple_scalars and + message.HasField(field.name)): + raise tokenizer.ParseErrorPreviousToken( + 'Message type "%s" should not have multiple "%s" fields.' % + (message.DESCRIPTOR.full_name, field.name)) + sub_message = getattr(message, field.name) + sub_message.SetInParent() + + while not tokenizer.TryConsume(end_token): + if tokenizer.AtEnd(): + raise tokenizer.ParseErrorPreviousToken('Expected "%s".' % (end_token,)) + self._MergeField(tokenizer, sub_message) + + if is_map_entry: + value_cpptype = field.message_type.fields_by_name['value'].cpp_type + if value_cpptype == descriptor.FieldDescriptor.CPPTYPE_MESSAGE: + value = getattr(message, field.name)[sub_message.key] + value.CopyFrom(sub_message.value) + else: + getattr(message, field.name)[sub_message.key] = sub_message.value + + @staticmethod + def _IsProto3Syntax(message): + message_descriptor = message.DESCRIPTOR + return (hasattr(message_descriptor, 'syntax') and + message_descriptor.syntax == 'proto3') + + def _MergeScalarField(self, tokenizer, message, field): + """Merges a single scalar field into a message. + + Args: + tokenizer: A tokenizer to parse the field value. + message: A protocol message to record the data. + field: The descriptor of the field to be merged. + + Raises: + ParseError: In case of text parsing problems. + RuntimeError: On runtime errors. + """ + _ = self.allow_unknown_extension + value = None + + if field.type in (descriptor.FieldDescriptor.TYPE_INT32, + descriptor.FieldDescriptor.TYPE_SINT32, + descriptor.FieldDescriptor.TYPE_SFIXED32): + value = _ConsumeInt32(tokenizer) + elif field.type in (descriptor.FieldDescriptor.TYPE_INT64, + descriptor.FieldDescriptor.TYPE_SINT64, + descriptor.FieldDescriptor.TYPE_SFIXED64): + value = _ConsumeInt64(tokenizer) + elif field.type in (descriptor.FieldDescriptor.TYPE_UINT32, + descriptor.FieldDescriptor.TYPE_FIXED32): + value = _ConsumeUint32(tokenizer) + elif field.type in (descriptor.FieldDescriptor.TYPE_UINT64, + descriptor.FieldDescriptor.TYPE_FIXED64): + value = _ConsumeUint64(tokenizer) + elif field.type in (descriptor.FieldDescriptor.TYPE_FLOAT, + descriptor.FieldDescriptor.TYPE_DOUBLE): + value = tokenizer.ConsumeFloat() + elif field.type == descriptor.FieldDescriptor.TYPE_BOOL: + value = tokenizer.ConsumeBool() + elif field.type == descriptor.FieldDescriptor.TYPE_STRING: + value = tokenizer.ConsumeString() + elif field.type == descriptor.FieldDescriptor.TYPE_BYTES: + value = tokenizer.ConsumeByteString() + elif field.type == descriptor.FieldDescriptor.TYPE_ENUM: + value = tokenizer.ConsumeEnum(field) + else: + raise RuntimeError('Unknown field type %d' % field.type) + + if field.label == descriptor.FieldDescriptor.LABEL_REPEATED: + if field.is_extension: + message.Extensions[field].append(value) + else: + getattr(message, field.name).append(value) + else: + if field.is_extension: + if (not self._allow_multiple_scalars and + not self._IsProto3Syntax(message) and + message.HasExtension(field)): + raise tokenizer.ParseErrorPreviousToken( + 'Message type "%s" should not have multiple "%s" extensions.' % + (message.DESCRIPTOR.full_name, field.full_name)) + else: + message.Extensions[field] = value + else: + duplicate_error = False + if not self._allow_multiple_scalars: + if self._IsProto3Syntax(message): + # Proto3 doesn't represent presence so we try best effort to check + # multiple scalars by compare to default values. + duplicate_error = bool(getattr(message, field.name)) + else: + duplicate_error = message.HasField(field.name) + + if duplicate_error: + raise tokenizer.ParseErrorPreviousToken( + 'Message type "%s" should not have multiple "%s" fields.' % + (message.DESCRIPTOR.full_name, field.name)) + else: + setattr(message, field.name, value) + + +def _SkipFieldContents(tokenizer): + """Skips over contents (value or message) of a field. + + Args: + tokenizer: A tokenizer to parse the field name and values. + """ + # Try to guess the type of this field. + # If this field is not a message, there should be a ":" between the + # field name and the field value and also the field value should not + # start with "{" or "<" which indicates the beginning of a message body. + # If there is no ":" or there is a "{" or "<" after ":", this field has + # to be a message or the input is ill-formed. + if tokenizer.TryConsume(':') and not tokenizer.LookingAt( + '{') and not tokenizer.LookingAt('<'): + _SkipFieldValue(tokenizer) + else: + _SkipFieldMessage(tokenizer) + + +def _SkipField(tokenizer): + """Skips over a complete field (name and value/message). + + Args: + tokenizer: A tokenizer to parse the field name and values. + """ + if tokenizer.TryConsume('['): + # Consume extension name. + tokenizer.ConsumeIdentifier() + while tokenizer.TryConsume('.'): + tokenizer.ConsumeIdentifier() + tokenizer.Consume(']') + else: + tokenizer.ConsumeIdentifierOrNumber() + + _SkipFieldContents(tokenizer) + + # For historical reasons, fields may optionally be separated by commas or + # semicolons. + if not tokenizer.TryConsume(','): + tokenizer.TryConsume(';') + + +def _SkipFieldMessage(tokenizer): + """Skips over a field message. + + Args: + tokenizer: A tokenizer to parse the field name and values. + """ + + if tokenizer.TryConsume('<'): + delimiter = '>' + else: + tokenizer.Consume('{') + delimiter = '}' + + while not tokenizer.LookingAt('>') and not tokenizer.LookingAt('}'): + _SkipField(tokenizer) + + tokenizer.Consume(delimiter) + + +def _SkipFieldValue(tokenizer): + """Skips over a field value. + + Args: + tokenizer: A tokenizer to parse the field name and values. + + Raises: + ParseError: In case an invalid field value is found. + """ + # String/bytes tokens can come in multiple adjacent string literals. + # If we can consume one, consume as many as we can. + if tokenizer.TryConsumeByteString(): + while tokenizer.TryConsumeByteString(): + pass + return + + if (not tokenizer.TryConsumeIdentifier() and + not _TryConsumeInt64(tokenizer) and not _TryConsumeUint64(tokenizer) and + not tokenizer.TryConsumeFloat()): + raise ParseError('Invalid field value: ' + tokenizer.token) + + +class Tokenizer(object): + """Protocol buffer text representation tokenizer. + + This class handles the lower level string parsing by splitting it into + meaningful tokens. + + It was directly ported from the Java protocol buffer API. + """ + + _WHITESPACE = re.compile(r'\s+') + _COMMENT = re.compile(r'(\s*#.*$)', re.MULTILINE) + _WHITESPACE_OR_COMMENT = re.compile(r'(\s|(#.*$))+', re.MULTILINE) + _TOKEN = re.compile('|'.join([ + r'[a-zA-Z_][0-9a-zA-Z_+-]*', # an identifier + r'([0-9+-]|(\.[0-9]))[0-9a-zA-Z_.+-]*', # a number + ] + [ # quoted str for each quote mark + # Avoid backtracking! https://stackoverflow.com/a/844267 + r'{qt}[^{qt}\n\\]*((\\.)+[^{qt}\n\\]*)*({qt}|\\?$)'.format(qt=mark) + for mark in _QUOTES + ])) + + _IDENTIFIER = re.compile(r'[^\d\W]\w*') + _IDENTIFIER_OR_NUMBER = re.compile(r'\w+') + + def __init__(self, lines, skip_comments=True): + self._position = 0 + self._line = -1 + self._column = 0 + self._token_start = None + self.token = '' + self._lines = iter(lines) + self._current_line = '' + self._previous_line = 0 + self._previous_column = 0 + self._more_lines = True + self._skip_comments = skip_comments + self._whitespace_pattern = (skip_comments and self._WHITESPACE_OR_COMMENT + or self._WHITESPACE) + self._SkipWhitespace() + self.NextToken() + + def LookingAt(self, token): + return self.token == token + + def AtEnd(self): + """Checks the end of the text was reached. + + Returns: + True iff the end was reached. + """ + return not self.token + + def _PopLine(self): + while len(self._current_line) <= self._column: + try: + self._current_line = next(self._lines) + except StopIteration: + self._current_line = '' + self._more_lines = False + return + else: + self._line += 1 + self._column = 0 + + def _SkipWhitespace(self): + while True: + self._PopLine() + match = self._whitespace_pattern.match(self._current_line, self._column) + if not match: + break + length = len(match.group(0)) + self._column += length + + def TryConsume(self, token): + """Tries to consume a given piece of text. + + Args: + token: Text to consume. + + Returns: + True iff the text was consumed. + """ + if self.token == token: + self.NextToken() + return True + return False + + def Consume(self, token): + """Consumes a piece of text. + + Args: + token: Text to consume. + + Raises: + ParseError: If the text couldn't be consumed. + """ + if not self.TryConsume(token): + raise self.ParseError('Expected "%s".' % token) + + def ConsumeComment(self): + result = self.token + if not self._COMMENT.match(result): + raise self.ParseError('Expected comment.') + self.NextToken() + return result + + def ConsumeCommentOrTrailingComment(self): + """Consumes a comment, returns a 2-tuple (trailing bool, comment str).""" + + # Tokenizer initializes _previous_line and _previous_column to 0. As the + # tokenizer starts, it looks like there is a previous token on the line. + just_started = self._line == 0 and self._column == 0 + + before_parsing = self._previous_line + comment = self.ConsumeComment() + + # A trailing comment is a comment on the same line than the previous token. + trailing = (self._previous_line == before_parsing + and not just_started) + + return trailing, comment + + def TryConsumeIdentifier(self): + try: + self.ConsumeIdentifier() + return True + except ParseError: + return False + + def ConsumeIdentifier(self): + """Consumes protocol message field identifier. + + Returns: + Identifier string. + + Raises: + ParseError: If an identifier couldn't be consumed. + """ + result = self.token + if not self._IDENTIFIER.match(result): + raise self.ParseError('Expected identifier.') + self.NextToken() + return result + + def TryConsumeIdentifierOrNumber(self): + try: + self.ConsumeIdentifierOrNumber() + return True + except ParseError: + return False + + def ConsumeIdentifierOrNumber(self): + """Consumes protocol message field identifier. + + Returns: + Identifier string. + + Raises: + ParseError: If an identifier couldn't be consumed. + """ + result = self.token + if not self._IDENTIFIER_OR_NUMBER.match(result): + raise self.ParseError('Expected identifier or number, got %s.' % result) + self.NextToken() + return result + + def TryConsumeInteger(self): + try: + self.ConsumeInteger() + return True + except ParseError: + return False + + def ConsumeInteger(self): + """Consumes an integer number. + + Returns: + The integer parsed. + + Raises: + ParseError: If an integer couldn't be consumed. + """ + try: + result = _ParseAbstractInteger(self.token) + except ValueError as e: + raise self.ParseError(str(e)) + self.NextToken() + return result + + def TryConsumeFloat(self): + try: + self.ConsumeFloat() + return True + except ParseError: + return False + + def ConsumeFloat(self): + """Consumes an floating point number. + + Returns: + The number parsed. + + Raises: + ParseError: If a floating point number couldn't be consumed. + """ + try: + result = ParseFloat(self.token) + except ValueError as e: + raise self.ParseError(str(e)) + self.NextToken() + return result + + def ConsumeBool(self): + """Consumes a boolean value. + + Returns: + The bool parsed. + + Raises: + ParseError: If a boolean value couldn't be consumed. + """ + try: + result = ParseBool(self.token) + except ValueError as e: + raise self.ParseError(str(e)) + self.NextToken() + return result + + def TryConsumeByteString(self): + try: + self.ConsumeByteString() + return True + except ParseError: + return False + + def ConsumeString(self): + """Consumes a string value. + + Returns: + The string parsed. + + Raises: + ParseError: If a string value couldn't be consumed. + """ + the_bytes = self.ConsumeByteString() + try: + return str(the_bytes, 'utf-8') + except UnicodeDecodeError as e: + raise self._StringParseError(e) + + def ConsumeByteString(self): + """Consumes a byte array value. + + Returns: + The array parsed (as a string). + + Raises: + ParseError: If a byte array value couldn't be consumed. + """ + the_list = [self._ConsumeSingleByteString()] + while self.token and self.token[0] in _QUOTES: + the_list.append(self._ConsumeSingleByteString()) + return b''.join(the_list) + + def _ConsumeSingleByteString(self): + """Consume one token of a string literal. + + String literals (whether bytes or text) can come in multiple adjacent + tokens which are automatically concatenated, like in C or Python. This + method only consumes one token. + + Returns: + The token parsed. + Raises: + ParseError: When the wrong format data is found. + """ + text = self.token + if len(text) < 1 or text[0] not in _QUOTES: + raise self.ParseError('Expected string but found: %r' % (text,)) + + if len(text) < 2 or text[-1] != text[0]: + raise self.ParseError('String missing ending quote: %r' % (text,)) + + try: + result = text_encoding.CUnescape(text[1:-1]) + except ValueError as e: + raise self.ParseError(str(e)) + self.NextToken() + return result + + def ConsumeEnum(self, field): + try: + result = ParseEnum(field, self.token) + except ValueError as e: + raise self.ParseError(str(e)) + self.NextToken() + return result + + def ParseErrorPreviousToken(self, message): + """Creates and *returns* a ParseError for the previously read token. + + Args: + message: A message to set for the exception. + + Returns: + A ParseError instance. + """ + return ParseError(message, self._previous_line + 1, + self._previous_column + 1) + + def ParseError(self, message): + """Creates and *returns* a ParseError for the current token.""" + return ParseError('\'' + self._current_line + '\': ' + message, + self._line + 1, self._column + 1) + + def _StringParseError(self, e): + return self.ParseError('Couldn\'t parse string: ' + str(e)) + + def NextToken(self): + """Reads the next meaningful token.""" + self._previous_line = self._line + self._previous_column = self._column + + self._column += len(self.token) + self._SkipWhitespace() + + if not self._more_lines: + self.token = '' + return + + match = self._TOKEN.match(self._current_line, self._column) + if not match and not self._skip_comments: + match = self._COMMENT.match(self._current_line, self._column) + if match: + token = match.group(0) + self.token = token + else: + self.token = self._current_line[self._column] + +# Aliased so it can still be accessed by current visibility violators. +# TODO(dbarnett): Migrate violators to textformat_tokenizer. +_Tokenizer = Tokenizer # pylint: disable=invalid-name + + +def _ConsumeInt32(tokenizer): + """Consumes a signed 32bit integer number from tokenizer. + + Args: + tokenizer: A tokenizer used to parse the number. + + Returns: + The integer parsed. + + Raises: + ParseError: If a signed 32bit integer couldn't be consumed. + """ + return _ConsumeInteger(tokenizer, is_signed=True, is_long=False) + + +def _ConsumeUint32(tokenizer): + """Consumes an unsigned 32bit integer number from tokenizer. + + Args: + tokenizer: A tokenizer used to parse the number. + + Returns: + The integer parsed. + + Raises: + ParseError: If an unsigned 32bit integer couldn't be consumed. + """ + return _ConsumeInteger(tokenizer, is_signed=False, is_long=False) + + +def _TryConsumeInt64(tokenizer): + try: + _ConsumeInt64(tokenizer) + return True + except ParseError: + return False + + +def _ConsumeInt64(tokenizer): + """Consumes a signed 32bit integer number from tokenizer. + + Args: + tokenizer: A tokenizer used to parse the number. + + Returns: + The integer parsed. + + Raises: + ParseError: If a signed 32bit integer couldn't be consumed. + """ + return _ConsumeInteger(tokenizer, is_signed=True, is_long=True) + + +def _TryConsumeUint64(tokenizer): + try: + _ConsumeUint64(tokenizer) + return True + except ParseError: + return False + + +def _ConsumeUint64(tokenizer): + """Consumes an unsigned 64bit integer number from tokenizer. + + Args: + tokenizer: A tokenizer used to parse the number. + + Returns: + The integer parsed. + + Raises: + ParseError: If an unsigned 64bit integer couldn't be consumed. + """ + return _ConsumeInteger(tokenizer, is_signed=False, is_long=True) + + +def _ConsumeInteger(tokenizer, is_signed=False, is_long=False): + """Consumes an integer number from tokenizer. + + Args: + tokenizer: A tokenizer used to parse the number. + is_signed: True if a signed integer must be parsed. + is_long: True if a long integer must be parsed. + + Returns: + The integer parsed. + + Raises: + ParseError: If an integer with given characteristics couldn't be consumed. + """ + try: + result = ParseInteger(tokenizer.token, is_signed=is_signed, is_long=is_long) + except ValueError as e: + raise tokenizer.ParseError(str(e)) + tokenizer.NextToken() + return result + + +def ParseInteger(text, is_signed=False, is_long=False): + """Parses an integer. + + Args: + text: The text to parse. + is_signed: True if a signed integer must be parsed. + is_long: True if a long integer must be parsed. + + Returns: + The integer value. + + Raises: + ValueError: Thrown Iff the text is not a valid integer. + """ + # Do the actual parsing. Exception handling is propagated to caller. + result = _ParseAbstractInteger(text) + + # Check if the integer is sane. Exceptions handled by callers. + checker = _INTEGER_CHECKERS[2 * int(is_long) + int(is_signed)] + checker.CheckValue(result) + return result + + +def _ParseAbstractInteger(text): + """Parses an integer without checking size/signedness. + + Args: + text: The text to parse. + + Returns: + The integer value. + + Raises: + ValueError: Thrown Iff the text is not a valid integer. + """ + # Do the actual parsing. Exception handling is propagated to caller. + orig_text = text + c_octal_match = re.match(r'(-?)0(\d+)$', text) + if c_octal_match: + # Python 3 no longer supports 0755 octal syntax without the 'o', so + # we always use the '0o' prefix for multi-digit numbers starting with 0. + text = c_octal_match.group(1) + '0o' + c_octal_match.group(2) + try: + return int(text, 0) + except ValueError: + raise ValueError('Couldn\'t parse integer: %s' % orig_text) + + +def ParseFloat(text): + """Parse a floating point number. + + Args: + text: Text to parse. + + Returns: + The number parsed. + + Raises: + ValueError: If a floating point number couldn't be parsed. + """ + try: + # Assume Python compatible syntax. + return float(text) + except ValueError: + # Check alternative spellings. + if _FLOAT_INFINITY.match(text): + if text[0] == '-': + return float('-inf') + else: + return float('inf') + elif _FLOAT_NAN.match(text): + return float('nan') + else: + # assume '1.0f' format + try: + return float(text.rstrip('f')) + except ValueError: + raise ValueError('Couldn\'t parse float: %s' % text) + + +def ParseBool(text): + """Parse a boolean value. + + Args: + text: Text to parse. + + Returns: + Boolean values parsed + + Raises: + ValueError: If text is not a valid boolean. + """ + if text in ('true', 't', '1', 'True'): + return True + elif text in ('false', 'f', '0', 'False'): + return False + else: + raise ValueError('Expected "true" or "false".') + + +def ParseEnum(field, value): + """Parse an enum value. + + The value can be specified by a number (the enum value), or by + a string literal (the enum name). + + Args: + field: Enum field descriptor. + value: String value. + + Returns: + Enum value number. + + Raises: + ValueError: If the enum value could not be parsed. + """ + enum_descriptor = field.enum_type + try: + number = int(value, 0) + except ValueError: + # Identifier. + enum_value = enum_descriptor.values_by_name.get(value, None) + if enum_value is None: + raise ValueError('Enum type "%s" has no value named %s.' % + (enum_descriptor.full_name, value)) + else: + # Numeric value. + if hasattr(field.file, 'syntax'): + # Attribute is checked for compatibility. + if field.file.syntax == 'proto3': + # Proto3 accept numeric unknown enums. + return number + enum_value = enum_descriptor.values_by_number.get(number, None) + if enum_value is None: + raise ValueError('Enum type "%s" has no value with number %d.' % + (enum_descriptor.full_name, number)) + return enum_value.number diff --git a/mysql-sqlalchemy-workspace/lib/python3.9/site-packages/google/protobuf/timestamp_pb2.py b/mysql-sqlalchemy-workspace/lib/python3.9/site-packages/google/protobuf/timestamp_pb2.py new file mode 100644 index 00000000..558d4969 --- /dev/null +++ b/mysql-sqlalchemy-workspace/lib/python3.9/site-packages/google/protobuf/timestamp_pb2.py @@ -0,0 +1,26 @@ +# -*- coding: utf-8 -*- +# Generated by the protocol buffer compiler. DO NOT EDIT! +# source: google/protobuf/timestamp.proto +"""Generated protocol buffer code.""" +from google.protobuf.internal import builder as _builder +from google.protobuf import descriptor as _descriptor +from google.protobuf import descriptor_pool as _descriptor_pool +from google.protobuf import symbol_database as _symbol_database +# @@protoc_insertion_point(imports) + +_sym_db = _symbol_database.Default() + + + + +DESCRIPTOR = _descriptor_pool.Default().AddSerializedFile(b'\n\x1fgoogle/protobuf/timestamp.proto\x12\x0fgoogle.protobuf\"+\n\tTimestamp\x12\x0f\n\x07seconds\x18\x01 \x01(\x03\x12\r\n\x05nanos\x18\x02 \x01(\x05\x42\x85\x01\n\x13\x63om.google.protobufB\x0eTimestampProtoP\x01Z2google.golang.org/protobuf/types/known/timestamppb\xf8\x01\x01\xa2\x02\x03GPB\xaa\x02\x1eGoogle.Protobuf.WellKnownTypesb\x06proto3') + +_builder.BuildMessageAndEnumDescriptors(DESCRIPTOR, globals()) +_builder.BuildTopDescriptorsAndMessages(DESCRIPTOR, 'google.protobuf.timestamp_pb2', globals()) +if _descriptor._USE_C_DESCRIPTORS == False: + + DESCRIPTOR._options = None + DESCRIPTOR._serialized_options = b'\n\023com.google.protobufB\016TimestampProtoP\001Z2google.golang.org/protobuf/types/known/timestamppb\370\001\001\242\002\003GPB\252\002\036Google.Protobuf.WellKnownTypes' + _TIMESTAMP._serialized_start=52 + _TIMESTAMP._serialized_end=95 +# @@protoc_insertion_point(module_scope) diff --git a/mysql-sqlalchemy-workspace/lib/python3.9/site-packages/google/protobuf/type_pb2.py b/mysql-sqlalchemy-workspace/lib/python3.9/site-packages/google/protobuf/type_pb2.py new file mode 100644 index 00000000..19903fb6 --- /dev/null +++ b/mysql-sqlalchemy-workspace/lib/python3.9/site-packages/google/protobuf/type_pb2.py @@ -0,0 +1,42 @@ +# -*- coding: utf-8 -*- +# Generated by the protocol buffer compiler. DO NOT EDIT! +# source: google/protobuf/type.proto +"""Generated protocol buffer code.""" +from google.protobuf.internal import builder as _builder +from google.protobuf import descriptor as _descriptor +from google.protobuf import descriptor_pool as _descriptor_pool +from google.protobuf import symbol_database as _symbol_database +# @@protoc_insertion_point(imports) + +_sym_db = _symbol_database.Default() + + +from google.protobuf import any_pb2 as google_dot_protobuf_dot_any__pb2 +from google.protobuf import source_context_pb2 as google_dot_protobuf_dot_source__context__pb2 + + +DESCRIPTOR = _descriptor_pool.Default().AddSerializedFile(b'\n\x1agoogle/protobuf/type.proto\x12\x0fgoogle.protobuf\x1a\x19google/protobuf/any.proto\x1a$google/protobuf/source_context.proto\"\xd7\x01\n\x04Type\x12\x0c\n\x04name\x18\x01 \x01(\t\x12&\n\x06\x66ields\x18\x02 \x03(\x0b\x32\x16.google.protobuf.Field\x12\x0e\n\x06oneofs\x18\x03 \x03(\t\x12(\n\x07options\x18\x04 \x03(\x0b\x32\x17.google.protobuf.Option\x12\x36\n\x0esource_context\x18\x05 \x01(\x0b\x32\x1e.google.protobuf.SourceContext\x12\'\n\x06syntax\x18\x06 \x01(\x0e\x32\x17.google.protobuf.Syntax\"\xd5\x05\n\x05\x46ield\x12)\n\x04kind\x18\x01 \x01(\x0e\x32\x1b.google.protobuf.Field.Kind\x12\x37\n\x0b\x63\x61rdinality\x18\x02 \x01(\x0e\x32\".google.protobuf.Field.Cardinality\x12\x0e\n\x06number\x18\x03 \x01(\x05\x12\x0c\n\x04name\x18\x04 \x01(\t\x12\x10\n\x08type_url\x18\x06 \x01(\t\x12\x13\n\x0boneof_index\x18\x07 \x01(\x05\x12\x0e\n\x06packed\x18\x08 \x01(\x08\x12(\n\x07options\x18\t \x03(\x0b\x32\x17.google.protobuf.Option\x12\x11\n\tjson_name\x18\n \x01(\t\x12\x15\n\rdefault_value\x18\x0b \x01(\t\"\xc8\x02\n\x04Kind\x12\x10\n\x0cTYPE_UNKNOWN\x10\x00\x12\x0f\n\x0bTYPE_DOUBLE\x10\x01\x12\x0e\n\nTYPE_FLOAT\x10\x02\x12\x0e\n\nTYPE_INT64\x10\x03\x12\x0f\n\x0bTYPE_UINT64\x10\x04\x12\x0e\n\nTYPE_INT32\x10\x05\x12\x10\n\x0cTYPE_FIXED64\x10\x06\x12\x10\n\x0cTYPE_FIXED32\x10\x07\x12\r\n\tTYPE_BOOL\x10\x08\x12\x0f\n\x0bTYPE_STRING\x10\t\x12\x0e\n\nTYPE_GROUP\x10\n\x12\x10\n\x0cTYPE_MESSAGE\x10\x0b\x12\x0e\n\nTYPE_BYTES\x10\x0c\x12\x0f\n\x0bTYPE_UINT32\x10\r\x12\r\n\tTYPE_ENUM\x10\x0e\x12\x11\n\rTYPE_SFIXED32\x10\x0f\x12\x11\n\rTYPE_SFIXED64\x10\x10\x12\x0f\n\x0bTYPE_SINT32\x10\x11\x12\x0f\n\x0bTYPE_SINT64\x10\x12\"t\n\x0b\x43\x61rdinality\x12\x17\n\x13\x43\x41RDINALITY_UNKNOWN\x10\x00\x12\x18\n\x14\x43\x41RDINALITY_OPTIONAL\x10\x01\x12\x18\n\x14\x43\x41RDINALITY_REQUIRED\x10\x02\x12\x18\n\x14\x43\x41RDINALITY_REPEATED\x10\x03\"\xce\x01\n\x04\x45num\x12\x0c\n\x04name\x18\x01 \x01(\t\x12-\n\tenumvalue\x18\x02 \x03(\x0b\x32\x1a.google.protobuf.EnumValue\x12(\n\x07options\x18\x03 \x03(\x0b\x32\x17.google.protobuf.Option\x12\x36\n\x0esource_context\x18\x04 \x01(\x0b\x32\x1e.google.protobuf.SourceContext\x12\'\n\x06syntax\x18\x05 \x01(\x0e\x32\x17.google.protobuf.Syntax\"S\n\tEnumValue\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\x0e\n\x06number\x18\x02 \x01(\x05\x12(\n\x07options\x18\x03 \x03(\x0b\x32\x17.google.protobuf.Option\";\n\x06Option\x12\x0c\n\x04name\x18\x01 \x01(\t\x12#\n\x05value\x18\x02 \x01(\x0b\x32\x14.google.protobuf.Any*.\n\x06Syntax\x12\x11\n\rSYNTAX_PROTO2\x10\x00\x12\x11\n\rSYNTAX_PROTO3\x10\x01\x42{\n\x13\x63om.google.protobufB\tTypeProtoP\x01Z-google.golang.org/protobuf/types/known/typepb\xf8\x01\x01\xa2\x02\x03GPB\xaa\x02\x1eGoogle.Protobuf.WellKnownTypesb\x06proto3') + +_builder.BuildMessageAndEnumDescriptors(DESCRIPTOR, globals()) +_builder.BuildTopDescriptorsAndMessages(DESCRIPTOR, 'google.protobuf.type_pb2', globals()) +if _descriptor._USE_C_DESCRIPTORS == False: + + DESCRIPTOR._options = None + DESCRIPTOR._serialized_options = b'\n\023com.google.protobufB\tTypeProtoP\001Z-google.golang.org/protobuf/types/known/typepb\370\001\001\242\002\003GPB\252\002\036Google.Protobuf.WellKnownTypes' + _SYNTAX._serialized_start=1413 + _SYNTAX._serialized_end=1459 + _TYPE._serialized_start=113 + _TYPE._serialized_end=328 + _FIELD._serialized_start=331 + _FIELD._serialized_end=1056 + _FIELD_KIND._serialized_start=610 + _FIELD_KIND._serialized_end=938 + _FIELD_CARDINALITY._serialized_start=940 + _FIELD_CARDINALITY._serialized_end=1056 + _ENUM._serialized_start=1059 + _ENUM._serialized_end=1265 + _ENUMVALUE._serialized_start=1267 + _ENUMVALUE._serialized_end=1350 + _OPTION._serialized_start=1352 + _OPTION._serialized_end=1411 +# @@protoc_insertion_point(module_scope) diff --git a/mysql-sqlalchemy-workspace/lib/python3.9/site-packages/google/protobuf/util/__init__.py b/mysql-sqlalchemy-workspace/lib/python3.9/site-packages/google/protobuf/util/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/mysql-sqlalchemy-workspace/lib/python3.9/site-packages/google/protobuf/util/json_format_pb2.py b/mysql-sqlalchemy-workspace/lib/python3.9/site-packages/google/protobuf/util/json_format_pb2.py new file mode 100644 index 00000000..66a5836c --- /dev/null +++ b/mysql-sqlalchemy-workspace/lib/python3.9/site-packages/google/protobuf/util/json_format_pb2.py @@ -0,0 +1,72 @@ +# -*- coding: utf-8 -*- +# Generated by the protocol buffer compiler. DO NOT EDIT! +# source: google/protobuf/util/json_format.proto +"""Generated protocol buffer code.""" +from google.protobuf.internal import builder as _builder +from google.protobuf import descriptor as _descriptor +from google.protobuf import descriptor_pool as _descriptor_pool +from google.protobuf import symbol_database as _symbol_database +# @@protoc_insertion_point(imports) + +_sym_db = _symbol_database.Default() + + + + +DESCRIPTOR = _descriptor_pool.Default().AddSerializedFile(b'\n&google/protobuf/util/json_format.proto\x12\x11protobuf_unittest\"\x89\x01\n\x13TestFlagsAndStrings\x12\t\n\x01\x41\x18\x01 \x02(\x05\x12K\n\rrepeatedgroup\x18\x02 \x03(\n24.protobuf_unittest.TestFlagsAndStrings.RepeatedGroup\x1a\x1a\n\rRepeatedGroup\x12\t\n\x01\x66\x18\x03 \x02(\t\"!\n\x14TestBase64ByteArrays\x12\t\n\x01\x61\x18\x01 \x02(\x0c\"G\n\x12TestJavaScriptJSON\x12\t\n\x01\x61\x18\x01 \x01(\x05\x12\r\n\x05\x66inal\x18\x02 \x01(\x02\x12\n\n\x02in\x18\x03 \x01(\t\x12\x0b\n\x03Var\x18\x04 \x01(\t\"Q\n\x18TestJavaScriptOrderJSON1\x12\t\n\x01\x64\x18\x01 \x01(\x05\x12\t\n\x01\x63\x18\x02 \x01(\x05\x12\t\n\x01x\x18\x03 \x01(\x08\x12\t\n\x01\x62\x18\x04 \x01(\x05\x12\t\n\x01\x61\x18\x05 \x01(\x05\"\x89\x01\n\x18TestJavaScriptOrderJSON2\x12\t\n\x01\x64\x18\x01 \x01(\x05\x12\t\n\x01\x63\x18\x02 \x01(\x05\x12\t\n\x01x\x18\x03 \x01(\x08\x12\t\n\x01\x62\x18\x04 \x01(\x05\x12\t\n\x01\x61\x18\x05 \x01(\x05\x12\x36\n\x01z\x18\x06 \x03(\x0b\x32+.protobuf_unittest.TestJavaScriptOrderJSON1\"$\n\x0cTestLargeInt\x12\t\n\x01\x61\x18\x01 \x02(\x03\x12\t\n\x01\x62\x18\x02 \x02(\x04\"\xa0\x01\n\x0bTestNumbers\x12\x30\n\x01\x61\x18\x01 \x01(\x0e\x32%.protobuf_unittest.TestNumbers.MyType\x12\t\n\x01\x62\x18\x02 \x01(\x05\x12\t\n\x01\x63\x18\x03 \x01(\x02\x12\t\n\x01\x64\x18\x04 \x01(\x08\x12\t\n\x01\x65\x18\x05 \x01(\x01\x12\t\n\x01\x66\x18\x06 \x01(\r\"(\n\x06MyType\x12\x06\n\x02OK\x10\x00\x12\x0b\n\x07WARNING\x10\x01\x12\t\n\x05\x45RROR\x10\x02\"T\n\rTestCamelCase\x12\x14\n\x0cnormal_field\x18\x01 \x01(\t\x12\x15\n\rCAPITAL_FIELD\x18\x02 \x01(\x05\x12\x16\n\x0e\x43\x61melCaseField\x18\x03 \x01(\x05\"|\n\x0bTestBoolMap\x12=\n\x08\x62ool_map\x18\x01 \x03(\x0b\x32+.protobuf_unittest.TestBoolMap.BoolMapEntry\x1a.\n\x0c\x42oolMapEntry\x12\x0b\n\x03key\x18\x01 \x01(\x08\x12\r\n\x05value\x18\x02 \x01(\x05:\x02\x38\x01\"O\n\rTestRecursion\x12\r\n\x05value\x18\x01 \x01(\x05\x12/\n\x05\x63hild\x18\x02 \x01(\x0b\x32 .protobuf_unittest.TestRecursion\"\x86\x01\n\rTestStringMap\x12\x43\n\nstring_map\x18\x01 \x03(\x0b\x32/.protobuf_unittest.TestStringMap.StringMapEntry\x1a\x30\n\x0eStringMapEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\r\n\x05value\x18\x02 \x01(\t:\x02\x38\x01\"\xc4\x01\n\x14TestStringSerializer\x12\x15\n\rscalar_string\x18\x01 \x01(\t\x12\x17\n\x0frepeated_string\x18\x02 \x03(\t\x12J\n\nstring_map\x18\x03 \x03(\x0b\x32\x36.protobuf_unittest.TestStringSerializer.StringMapEntry\x1a\x30\n\x0eStringMapEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\r\n\x05value\x18\x02 \x01(\t:\x02\x38\x01\"$\n\x18TestMessageWithExtension*\x08\x08\x64\x10\x80\x80\x80\x80\x02\"z\n\rTestExtension\x12\r\n\x05value\x18\x01 \x01(\t2Z\n\x03\x65xt\x12+.protobuf_unittest.TestMessageWithExtension\x18\x64 \x01(\x0b\x32 .protobuf_unittest.TestExtension\"Q\n\x14TestDefaultEnumValue\x12\x39\n\nenum_value\x18\x01 \x01(\x0e\x32\x1c.protobuf_unittest.EnumValue:\x07\x44\x45\x46\x41ULT*2\n\tEnumValue\x12\x0c\n\x08PROTOCOL\x10\x00\x12\n\n\x06\x42UFFER\x10\x01\x12\x0b\n\x07\x44\x45\x46\x41ULT\x10\x02') + +_builder.BuildMessageAndEnumDescriptors(DESCRIPTOR, globals()) +_builder.BuildTopDescriptorsAndMessages(DESCRIPTOR, 'google.protobuf.util.json_format_pb2', globals()) +if _descriptor._USE_C_DESCRIPTORS == False: + TestMessageWithExtension.RegisterExtension(_TESTEXTENSION.extensions_by_name['ext']) + + DESCRIPTOR._options = None + _TESTBOOLMAP_BOOLMAPENTRY._options = None + _TESTBOOLMAP_BOOLMAPENTRY._serialized_options = b'8\001' + _TESTSTRINGMAP_STRINGMAPENTRY._options = None + _TESTSTRINGMAP_STRINGMAPENTRY._serialized_options = b'8\001' + _TESTSTRINGSERIALIZER_STRINGMAPENTRY._options = None + _TESTSTRINGSERIALIZER_STRINGMAPENTRY._serialized_options = b'8\001' + _ENUMVALUE._serialized_start=1607 + _ENUMVALUE._serialized_end=1657 + _TESTFLAGSANDSTRINGS._serialized_start=62 + _TESTFLAGSANDSTRINGS._serialized_end=199 + _TESTFLAGSANDSTRINGS_REPEATEDGROUP._serialized_start=173 + _TESTFLAGSANDSTRINGS_REPEATEDGROUP._serialized_end=199 + _TESTBASE64BYTEARRAYS._serialized_start=201 + _TESTBASE64BYTEARRAYS._serialized_end=234 + _TESTJAVASCRIPTJSON._serialized_start=236 + _TESTJAVASCRIPTJSON._serialized_end=307 + _TESTJAVASCRIPTORDERJSON1._serialized_start=309 + _TESTJAVASCRIPTORDERJSON1._serialized_end=390 + _TESTJAVASCRIPTORDERJSON2._serialized_start=393 + _TESTJAVASCRIPTORDERJSON2._serialized_end=530 + _TESTLARGEINT._serialized_start=532 + _TESTLARGEINT._serialized_end=568 + _TESTNUMBERS._serialized_start=571 + _TESTNUMBERS._serialized_end=731 + _TESTNUMBERS_MYTYPE._serialized_start=691 + _TESTNUMBERS_MYTYPE._serialized_end=731 + _TESTCAMELCASE._serialized_start=733 + _TESTCAMELCASE._serialized_end=817 + _TESTBOOLMAP._serialized_start=819 + _TESTBOOLMAP._serialized_end=943 + _TESTBOOLMAP_BOOLMAPENTRY._serialized_start=897 + _TESTBOOLMAP_BOOLMAPENTRY._serialized_end=943 + _TESTRECURSION._serialized_start=945 + _TESTRECURSION._serialized_end=1024 + _TESTSTRINGMAP._serialized_start=1027 + _TESTSTRINGMAP._serialized_end=1161 + _TESTSTRINGMAP_STRINGMAPENTRY._serialized_start=1113 + _TESTSTRINGMAP_STRINGMAPENTRY._serialized_end=1161 + _TESTSTRINGSERIALIZER._serialized_start=1164 + _TESTSTRINGSERIALIZER._serialized_end=1360 + _TESTSTRINGSERIALIZER_STRINGMAPENTRY._serialized_start=1113 + _TESTSTRINGSERIALIZER_STRINGMAPENTRY._serialized_end=1161 + _TESTMESSAGEWITHEXTENSION._serialized_start=1362 + _TESTMESSAGEWITHEXTENSION._serialized_end=1398 + _TESTEXTENSION._serialized_start=1400 + _TESTEXTENSION._serialized_end=1522 + _TESTDEFAULTENUMVALUE._serialized_start=1524 + _TESTDEFAULTENUMVALUE._serialized_end=1605 +# @@protoc_insertion_point(module_scope) diff --git a/mysql-sqlalchemy-workspace/lib/python3.9/site-packages/google/protobuf/util/json_format_proto3_pb2.py b/mysql-sqlalchemy-workspace/lib/python3.9/site-packages/google/protobuf/util/json_format_proto3_pb2.py new file mode 100644 index 00000000..5498deaf --- /dev/null +++ b/mysql-sqlalchemy-workspace/lib/python3.9/site-packages/google/protobuf/util/json_format_proto3_pb2.py @@ -0,0 +1,129 @@ +# -*- coding: utf-8 -*- +# Generated by the protocol buffer compiler. DO NOT EDIT! +# source: google/protobuf/util/json_format_proto3.proto +"""Generated protocol buffer code.""" +from google.protobuf.internal import builder as _builder +from google.protobuf import descriptor as _descriptor +from google.protobuf import descriptor_pool as _descriptor_pool +from google.protobuf import symbol_database as _symbol_database +# @@protoc_insertion_point(imports) + +_sym_db = _symbol_database.Default() + + +from google.protobuf import any_pb2 as google_dot_protobuf_dot_any__pb2 +from google.protobuf import duration_pb2 as google_dot_protobuf_dot_duration__pb2 +from google.protobuf import field_mask_pb2 as google_dot_protobuf_dot_field__mask__pb2 +from google.protobuf import struct_pb2 as google_dot_protobuf_dot_struct__pb2 +from google.protobuf import timestamp_pb2 as google_dot_protobuf_dot_timestamp__pb2 +from google.protobuf import wrappers_pb2 as google_dot_protobuf_dot_wrappers__pb2 +from google.protobuf import unittest_pb2 as google_dot_protobuf_dot_unittest__pb2 + + +DESCRIPTOR = _descriptor_pool.Default().AddSerializedFile(b'\n-google/protobuf/util/json_format_proto3.proto\x12\x06proto3\x1a\x19google/protobuf/any.proto\x1a\x1egoogle/protobuf/duration.proto\x1a google/protobuf/field_mask.proto\x1a\x1cgoogle/protobuf/struct.proto\x1a\x1fgoogle/protobuf/timestamp.proto\x1a\x1egoogle/protobuf/wrappers.proto\x1a\x1egoogle/protobuf/unittest.proto\"\x1c\n\x0bMessageType\x12\r\n\x05value\x18\x01 \x01(\x05\"\x94\x05\n\x0bTestMessage\x12\x12\n\nbool_value\x18\x01 \x01(\x08\x12\x13\n\x0bint32_value\x18\x02 \x01(\x05\x12\x13\n\x0bint64_value\x18\x03 \x01(\x03\x12\x14\n\x0cuint32_value\x18\x04 \x01(\r\x12\x14\n\x0cuint64_value\x18\x05 \x01(\x04\x12\x13\n\x0b\x66loat_value\x18\x06 \x01(\x02\x12\x14\n\x0c\x64ouble_value\x18\x07 \x01(\x01\x12\x14\n\x0cstring_value\x18\x08 \x01(\t\x12\x13\n\x0b\x62ytes_value\x18\t \x01(\x0c\x12$\n\nenum_value\x18\n \x01(\x0e\x32\x10.proto3.EnumType\x12*\n\rmessage_value\x18\x0b \x01(\x0b\x32\x13.proto3.MessageType\x12\x1b\n\x13repeated_bool_value\x18\x15 \x03(\x08\x12\x1c\n\x14repeated_int32_value\x18\x16 \x03(\x05\x12\x1c\n\x14repeated_int64_value\x18\x17 \x03(\x03\x12\x1d\n\x15repeated_uint32_value\x18\x18 \x03(\r\x12\x1d\n\x15repeated_uint64_value\x18\x19 \x03(\x04\x12\x1c\n\x14repeated_float_value\x18\x1a \x03(\x02\x12\x1d\n\x15repeated_double_value\x18\x1b \x03(\x01\x12\x1d\n\x15repeated_string_value\x18\x1c \x03(\t\x12\x1c\n\x14repeated_bytes_value\x18\x1d \x03(\x0c\x12-\n\x13repeated_enum_value\x18\x1e \x03(\x0e\x32\x10.proto3.EnumType\x12\x33\n\x16repeated_message_value\x18\x1f \x03(\x0b\x32\x13.proto3.MessageType\"\x8c\x02\n\tTestOneof\x12\x1b\n\x11oneof_int32_value\x18\x01 \x01(\x05H\x00\x12\x1c\n\x12oneof_string_value\x18\x02 \x01(\tH\x00\x12\x1b\n\x11oneof_bytes_value\x18\x03 \x01(\x0cH\x00\x12,\n\x10oneof_enum_value\x18\x04 \x01(\x0e\x32\x10.proto3.EnumTypeH\x00\x12\x32\n\x13oneof_message_value\x18\x05 \x01(\x0b\x32\x13.proto3.MessageTypeH\x00\x12\x36\n\x10oneof_null_value\x18\x06 \x01(\x0e\x32\x1a.google.protobuf.NullValueH\x00\x42\r\n\x0boneof_value\"\xe1\x04\n\x07TestMap\x12.\n\x08\x62ool_map\x18\x01 \x03(\x0b\x32\x1c.proto3.TestMap.BoolMapEntry\x12\x30\n\tint32_map\x18\x02 \x03(\x0b\x32\x1d.proto3.TestMap.Int32MapEntry\x12\x30\n\tint64_map\x18\x03 \x03(\x0b\x32\x1d.proto3.TestMap.Int64MapEntry\x12\x32\n\nuint32_map\x18\x04 \x03(\x0b\x32\x1e.proto3.TestMap.Uint32MapEntry\x12\x32\n\nuint64_map\x18\x05 \x03(\x0b\x32\x1e.proto3.TestMap.Uint64MapEntry\x12\x32\n\nstring_map\x18\x06 \x03(\x0b\x32\x1e.proto3.TestMap.StringMapEntry\x1a.\n\x0c\x42oolMapEntry\x12\x0b\n\x03key\x18\x01 \x01(\x08\x12\r\n\x05value\x18\x02 \x01(\x05:\x02\x38\x01\x1a/\n\rInt32MapEntry\x12\x0b\n\x03key\x18\x01 \x01(\x05\x12\r\n\x05value\x18\x02 \x01(\x05:\x02\x38\x01\x1a/\n\rInt64MapEntry\x12\x0b\n\x03key\x18\x01 \x01(\x03\x12\r\n\x05value\x18\x02 \x01(\x05:\x02\x38\x01\x1a\x30\n\x0eUint32MapEntry\x12\x0b\n\x03key\x18\x01 \x01(\r\x12\r\n\x05value\x18\x02 \x01(\x05:\x02\x38\x01\x1a\x30\n\x0eUint64MapEntry\x12\x0b\n\x03key\x18\x01 \x01(\x04\x12\r\n\x05value\x18\x02 \x01(\x05:\x02\x38\x01\x1a\x30\n\x0eStringMapEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\r\n\x05value\x18\x02 \x01(\x05:\x02\x38\x01\"\x85\x06\n\rTestNestedMap\x12\x34\n\x08\x62ool_map\x18\x01 \x03(\x0b\x32\".proto3.TestNestedMap.BoolMapEntry\x12\x36\n\tint32_map\x18\x02 \x03(\x0b\x32#.proto3.TestNestedMap.Int32MapEntry\x12\x36\n\tint64_map\x18\x03 \x03(\x0b\x32#.proto3.TestNestedMap.Int64MapEntry\x12\x38\n\nuint32_map\x18\x04 \x03(\x0b\x32$.proto3.TestNestedMap.Uint32MapEntry\x12\x38\n\nuint64_map\x18\x05 \x03(\x0b\x32$.proto3.TestNestedMap.Uint64MapEntry\x12\x38\n\nstring_map\x18\x06 \x03(\x0b\x32$.proto3.TestNestedMap.StringMapEntry\x12\x32\n\x07map_map\x18\x07 \x03(\x0b\x32!.proto3.TestNestedMap.MapMapEntry\x1a.\n\x0c\x42oolMapEntry\x12\x0b\n\x03key\x18\x01 \x01(\x08\x12\r\n\x05value\x18\x02 \x01(\x05:\x02\x38\x01\x1a/\n\rInt32MapEntry\x12\x0b\n\x03key\x18\x01 \x01(\x05\x12\r\n\x05value\x18\x02 \x01(\x05:\x02\x38\x01\x1a/\n\rInt64MapEntry\x12\x0b\n\x03key\x18\x01 \x01(\x03\x12\r\n\x05value\x18\x02 \x01(\x05:\x02\x38\x01\x1a\x30\n\x0eUint32MapEntry\x12\x0b\n\x03key\x18\x01 \x01(\r\x12\r\n\x05value\x18\x02 \x01(\x05:\x02\x38\x01\x1a\x30\n\x0eUint64MapEntry\x12\x0b\n\x03key\x18\x01 \x01(\x04\x12\r\n\x05value\x18\x02 \x01(\x05:\x02\x38\x01\x1a\x30\n\x0eStringMapEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\r\n\x05value\x18\x02 \x01(\x05:\x02\x38\x01\x1a\x44\n\x0bMapMapEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12$\n\x05value\x18\x02 \x01(\x0b\x32\x15.proto3.TestNestedMap:\x02\x38\x01\"{\n\rTestStringMap\x12\x38\n\nstring_map\x18\x01 \x03(\x0b\x32$.proto3.TestStringMap.StringMapEntry\x1a\x30\n\x0eStringMapEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\r\n\x05value\x18\x02 \x01(\t:\x02\x38\x01\"\xee\x07\n\x0bTestWrapper\x12.\n\nbool_value\x18\x01 \x01(\x0b\x32\x1a.google.protobuf.BoolValue\x12\x30\n\x0bint32_value\x18\x02 \x01(\x0b\x32\x1b.google.protobuf.Int32Value\x12\x30\n\x0bint64_value\x18\x03 \x01(\x0b\x32\x1b.google.protobuf.Int64Value\x12\x32\n\x0cuint32_value\x18\x04 \x01(\x0b\x32\x1c.google.protobuf.UInt32Value\x12\x32\n\x0cuint64_value\x18\x05 \x01(\x0b\x32\x1c.google.protobuf.UInt64Value\x12\x30\n\x0b\x66loat_value\x18\x06 \x01(\x0b\x32\x1b.google.protobuf.FloatValue\x12\x32\n\x0c\x64ouble_value\x18\x07 \x01(\x0b\x32\x1c.google.protobuf.DoubleValue\x12\x32\n\x0cstring_value\x18\x08 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12\x30\n\x0b\x62ytes_value\x18\t \x01(\x0b\x32\x1b.google.protobuf.BytesValue\x12\x37\n\x13repeated_bool_value\x18\x0b \x03(\x0b\x32\x1a.google.protobuf.BoolValue\x12\x39\n\x14repeated_int32_value\x18\x0c \x03(\x0b\x32\x1b.google.protobuf.Int32Value\x12\x39\n\x14repeated_int64_value\x18\r \x03(\x0b\x32\x1b.google.protobuf.Int64Value\x12;\n\x15repeated_uint32_value\x18\x0e \x03(\x0b\x32\x1c.google.protobuf.UInt32Value\x12;\n\x15repeated_uint64_value\x18\x0f \x03(\x0b\x32\x1c.google.protobuf.UInt64Value\x12\x39\n\x14repeated_float_value\x18\x10 \x03(\x0b\x32\x1b.google.protobuf.FloatValue\x12;\n\x15repeated_double_value\x18\x11 \x03(\x0b\x32\x1c.google.protobuf.DoubleValue\x12;\n\x15repeated_string_value\x18\x12 \x03(\x0b\x32\x1c.google.protobuf.StringValue\x12\x39\n\x14repeated_bytes_value\x18\x13 \x03(\x0b\x32\x1b.google.protobuf.BytesValue\"n\n\rTestTimestamp\x12)\n\x05value\x18\x01 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\x12\x32\n\x0erepeated_value\x18\x02 \x03(\x0b\x32\x1a.google.protobuf.Timestamp\"k\n\x0cTestDuration\x12(\n\x05value\x18\x01 \x01(\x0b\x32\x19.google.protobuf.Duration\x12\x31\n\x0erepeated_value\x18\x02 \x03(\x0b\x32\x19.google.protobuf.Duration\":\n\rTestFieldMask\x12)\n\x05value\x18\x01 \x01(\x0b\x32\x1a.google.protobuf.FieldMask\"e\n\nTestStruct\x12&\n\x05value\x18\x01 \x01(\x0b\x32\x17.google.protobuf.Struct\x12/\n\x0erepeated_value\x18\x02 \x03(\x0b\x32\x17.google.protobuf.Struct\"\\\n\x07TestAny\x12#\n\x05value\x18\x01 \x01(\x0b\x32\x14.google.protobuf.Any\x12,\n\x0erepeated_value\x18\x02 \x03(\x0b\x32\x14.google.protobuf.Any\"b\n\tTestValue\x12%\n\x05value\x18\x01 \x01(\x0b\x32\x16.google.protobuf.Value\x12.\n\x0erepeated_value\x18\x02 \x03(\x0b\x32\x16.google.protobuf.Value\"n\n\rTestListValue\x12)\n\x05value\x18\x01 \x01(\x0b\x32\x1a.google.protobuf.ListValue\x12\x32\n\x0erepeated_value\x18\x02 \x03(\x0b\x32\x1a.google.protobuf.ListValue\"\x89\x01\n\rTestBoolValue\x12\x12\n\nbool_value\x18\x01 \x01(\x08\x12\x34\n\x08\x62ool_map\x18\x02 \x03(\x0b\x32\".proto3.TestBoolValue.BoolMapEntry\x1a.\n\x0c\x42oolMapEntry\x12\x0b\n\x03key\x18\x01 \x01(\x08\x12\r\n\x05value\x18\x02 \x01(\x05:\x02\x38\x01\"+\n\x12TestCustomJsonName\x12\x15\n\x05value\x18\x01 \x01(\x05R\x06@value\"J\n\x0eTestExtensions\x12\x38\n\nextensions\x18\x01 \x01(\x0b\x32$.protobuf_unittest.TestAllExtensions\"\x84\x01\n\rTestEnumValue\x12%\n\x0b\x65num_value1\x18\x01 \x01(\x0e\x32\x10.proto3.EnumType\x12%\n\x0b\x65num_value2\x18\x02 \x01(\x0e\x32\x10.proto3.EnumType\x12%\n\x0b\x65num_value3\x18\x03 \x01(\x0e\x32\x10.proto3.EnumType*\x1c\n\x08\x45numType\x12\x07\n\x03\x46OO\x10\x00\x12\x07\n\x03\x42\x41R\x10\x01\x42,\n\x18\x63om.google.protobuf.utilB\x10JsonFormatProto3b\x06proto3') + +_builder.BuildMessageAndEnumDescriptors(DESCRIPTOR, globals()) +_builder.BuildTopDescriptorsAndMessages(DESCRIPTOR, 'google.protobuf.util.json_format_proto3_pb2', globals()) +if _descriptor._USE_C_DESCRIPTORS == False: + + DESCRIPTOR._options = None + DESCRIPTOR._serialized_options = b'\n\030com.google.protobuf.utilB\020JsonFormatProto3' + _TESTMAP_BOOLMAPENTRY._options = None + _TESTMAP_BOOLMAPENTRY._serialized_options = b'8\001' + _TESTMAP_INT32MAPENTRY._options = None + _TESTMAP_INT32MAPENTRY._serialized_options = b'8\001' + _TESTMAP_INT64MAPENTRY._options = None + _TESTMAP_INT64MAPENTRY._serialized_options = b'8\001' + _TESTMAP_UINT32MAPENTRY._options = None + _TESTMAP_UINT32MAPENTRY._serialized_options = b'8\001' + _TESTMAP_UINT64MAPENTRY._options = None + _TESTMAP_UINT64MAPENTRY._serialized_options = b'8\001' + _TESTMAP_STRINGMAPENTRY._options = None + _TESTMAP_STRINGMAPENTRY._serialized_options = b'8\001' + _TESTNESTEDMAP_BOOLMAPENTRY._options = None + _TESTNESTEDMAP_BOOLMAPENTRY._serialized_options = b'8\001' + _TESTNESTEDMAP_INT32MAPENTRY._options = None + _TESTNESTEDMAP_INT32MAPENTRY._serialized_options = b'8\001' + _TESTNESTEDMAP_INT64MAPENTRY._options = None + _TESTNESTEDMAP_INT64MAPENTRY._serialized_options = b'8\001' + _TESTNESTEDMAP_UINT32MAPENTRY._options = None + _TESTNESTEDMAP_UINT32MAPENTRY._serialized_options = b'8\001' + _TESTNESTEDMAP_UINT64MAPENTRY._options = None + _TESTNESTEDMAP_UINT64MAPENTRY._serialized_options = b'8\001' + _TESTNESTEDMAP_STRINGMAPENTRY._options = None + _TESTNESTEDMAP_STRINGMAPENTRY._serialized_options = b'8\001' + _TESTNESTEDMAP_MAPMAPENTRY._options = None + _TESTNESTEDMAP_MAPMAPENTRY._serialized_options = b'8\001' + _TESTSTRINGMAP_STRINGMAPENTRY._options = None + _TESTSTRINGMAP_STRINGMAPENTRY._serialized_options = b'8\001' + _TESTBOOLVALUE_BOOLMAPENTRY._options = None + _TESTBOOLVALUE_BOOLMAPENTRY._serialized_options = b'8\001' + _ENUMTYPE._serialized_start=4849 + _ENUMTYPE._serialized_end=4877 + _MESSAGETYPE._serialized_start=277 + _MESSAGETYPE._serialized_end=305 + _TESTMESSAGE._serialized_start=308 + _TESTMESSAGE._serialized_end=968 + _TESTONEOF._serialized_start=971 + _TESTONEOF._serialized_end=1239 + _TESTMAP._serialized_start=1242 + _TESTMAP._serialized_end=1851 + _TESTMAP_BOOLMAPENTRY._serialized_start=1557 + _TESTMAP_BOOLMAPENTRY._serialized_end=1603 + _TESTMAP_INT32MAPENTRY._serialized_start=1605 + _TESTMAP_INT32MAPENTRY._serialized_end=1652 + _TESTMAP_INT64MAPENTRY._serialized_start=1654 + _TESTMAP_INT64MAPENTRY._serialized_end=1701 + _TESTMAP_UINT32MAPENTRY._serialized_start=1703 + _TESTMAP_UINT32MAPENTRY._serialized_end=1751 + _TESTMAP_UINT64MAPENTRY._serialized_start=1753 + _TESTMAP_UINT64MAPENTRY._serialized_end=1801 + _TESTMAP_STRINGMAPENTRY._serialized_start=1803 + _TESTMAP_STRINGMAPENTRY._serialized_end=1851 + _TESTNESTEDMAP._serialized_start=1854 + _TESTNESTEDMAP._serialized_end=2627 + _TESTNESTEDMAP_BOOLMAPENTRY._serialized_start=1557 + _TESTNESTEDMAP_BOOLMAPENTRY._serialized_end=1603 + _TESTNESTEDMAP_INT32MAPENTRY._serialized_start=1605 + _TESTNESTEDMAP_INT32MAPENTRY._serialized_end=1652 + _TESTNESTEDMAP_INT64MAPENTRY._serialized_start=1654 + _TESTNESTEDMAP_INT64MAPENTRY._serialized_end=1701 + _TESTNESTEDMAP_UINT32MAPENTRY._serialized_start=1703 + _TESTNESTEDMAP_UINT32MAPENTRY._serialized_end=1751 + _TESTNESTEDMAP_UINT64MAPENTRY._serialized_start=1753 + _TESTNESTEDMAP_UINT64MAPENTRY._serialized_end=1801 + _TESTNESTEDMAP_STRINGMAPENTRY._serialized_start=1803 + _TESTNESTEDMAP_STRINGMAPENTRY._serialized_end=1851 + _TESTNESTEDMAP_MAPMAPENTRY._serialized_start=2559 + _TESTNESTEDMAP_MAPMAPENTRY._serialized_end=2627 + _TESTSTRINGMAP._serialized_start=2629 + _TESTSTRINGMAP._serialized_end=2752 + _TESTSTRINGMAP_STRINGMAPENTRY._serialized_start=2704 + _TESTSTRINGMAP_STRINGMAPENTRY._serialized_end=2752 + _TESTWRAPPER._serialized_start=2755 + _TESTWRAPPER._serialized_end=3761 + _TESTTIMESTAMP._serialized_start=3763 + _TESTTIMESTAMP._serialized_end=3873 + _TESTDURATION._serialized_start=3875 + _TESTDURATION._serialized_end=3982 + _TESTFIELDMASK._serialized_start=3984 + _TESTFIELDMASK._serialized_end=4042 + _TESTSTRUCT._serialized_start=4044 + _TESTSTRUCT._serialized_end=4145 + _TESTANY._serialized_start=4147 + _TESTANY._serialized_end=4239 + _TESTVALUE._serialized_start=4241 + _TESTVALUE._serialized_end=4339 + _TESTLISTVALUE._serialized_start=4341 + _TESTLISTVALUE._serialized_end=4451 + _TESTBOOLVALUE._serialized_start=4454 + _TESTBOOLVALUE._serialized_end=4591 + _TESTBOOLVALUE_BOOLMAPENTRY._serialized_start=1557 + _TESTBOOLVALUE_BOOLMAPENTRY._serialized_end=1603 + _TESTCUSTOMJSONNAME._serialized_start=4593 + _TESTCUSTOMJSONNAME._serialized_end=4636 + _TESTEXTENSIONS._serialized_start=4638 + _TESTEXTENSIONS._serialized_end=4712 + _TESTENUMVALUE._serialized_start=4715 + _TESTENUMVALUE._serialized_end=4847 +# @@protoc_insertion_point(module_scope) diff --git a/mysql-sqlalchemy-workspace/lib/python3.9/site-packages/google/protobuf/wrappers_pb2.py b/mysql-sqlalchemy-workspace/lib/python3.9/site-packages/google/protobuf/wrappers_pb2.py new file mode 100644 index 00000000..e49eb4c1 --- /dev/null +++ b/mysql-sqlalchemy-workspace/lib/python3.9/site-packages/google/protobuf/wrappers_pb2.py @@ -0,0 +1,42 @@ +# -*- coding: utf-8 -*- +# Generated by the protocol buffer compiler. DO NOT EDIT! +# source: google/protobuf/wrappers.proto +"""Generated protocol buffer code.""" +from google.protobuf.internal import builder as _builder +from google.protobuf import descriptor as _descriptor +from google.protobuf import descriptor_pool as _descriptor_pool +from google.protobuf import symbol_database as _symbol_database +# @@protoc_insertion_point(imports) + +_sym_db = _symbol_database.Default() + + + + +DESCRIPTOR = _descriptor_pool.Default().AddSerializedFile(b'\n\x1egoogle/protobuf/wrappers.proto\x12\x0fgoogle.protobuf\"\x1c\n\x0b\x44oubleValue\x12\r\n\x05value\x18\x01 \x01(\x01\"\x1b\n\nFloatValue\x12\r\n\x05value\x18\x01 \x01(\x02\"\x1b\n\nInt64Value\x12\r\n\x05value\x18\x01 \x01(\x03\"\x1c\n\x0bUInt64Value\x12\r\n\x05value\x18\x01 \x01(\x04\"\x1b\n\nInt32Value\x12\r\n\x05value\x18\x01 \x01(\x05\"\x1c\n\x0bUInt32Value\x12\r\n\x05value\x18\x01 \x01(\r\"\x1a\n\tBoolValue\x12\r\n\x05value\x18\x01 \x01(\x08\"\x1c\n\x0bStringValue\x12\r\n\x05value\x18\x01 \x01(\t\"\x1b\n\nBytesValue\x12\r\n\x05value\x18\x01 \x01(\x0c\x42\x83\x01\n\x13\x63om.google.protobufB\rWrappersProtoP\x01Z1google.golang.org/protobuf/types/known/wrapperspb\xf8\x01\x01\xa2\x02\x03GPB\xaa\x02\x1eGoogle.Protobuf.WellKnownTypesb\x06proto3') + +_builder.BuildMessageAndEnumDescriptors(DESCRIPTOR, globals()) +_builder.BuildTopDescriptorsAndMessages(DESCRIPTOR, 'google.protobuf.wrappers_pb2', globals()) +if _descriptor._USE_C_DESCRIPTORS == False: + + DESCRIPTOR._options = None + DESCRIPTOR._serialized_options = b'\n\023com.google.protobufB\rWrappersProtoP\001Z1google.golang.org/protobuf/types/known/wrapperspb\370\001\001\242\002\003GPB\252\002\036Google.Protobuf.WellKnownTypes' + _DOUBLEVALUE._serialized_start=51 + _DOUBLEVALUE._serialized_end=79 + _FLOATVALUE._serialized_start=81 + _FLOATVALUE._serialized_end=108 + _INT64VALUE._serialized_start=110 + _INT64VALUE._serialized_end=137 + _UINT64VALUE._serialized_start=139 + _UINT64VALUE._serialized_end=167 + _INT32VALUE._serialized_start=169 + _INT32VALUE._serialized_end=196 + _UINT32VALUE._serialized_start=198 + _UINT32VALUE._serialized_end=226 + _BOOLVALUE._serialized_start=228 + _BOOLVALUE._serialized_end=254 + _STRINGVALUE._serialized_start=256 + _STRINGVALUE._serialized_end=284 + _BYTESVALUE._serialized_start=286 + _BYTESVALUE._serialized_end=313 +# @@protoc_insertion_point(module_scope) diff --git a/mysql-sqlalchemy-workspace/lib/python3.9/site-packages/greenlet-2.0.2.dist-info/AUTHORS b/mysql-sqlalchemy-workspace/lib/python3.9/site-packages/greenlet-2.0.2.dist-info/AUTHORS new file mode 100644 index 00000000..42a5c227 --- /dev/null +++ b/mysql-sqlalchemy-workspace/lib/python3.9/site-packages/greenlet-2.0.2.dist-info/AUTHORS @@ -0,0 +1,51 @@ +Original Authors +---------------- +* Armin Rigo +* Christian Tismer + +Contributors +------------ +* Al Stone +* Alexander Schmidt +* Alexey Borzenkov +* Andreas Schwab +* Armin Ronacher +* Bin Wang +* Bob Ippolito +* ChangBo Guo +* Christoph Gohlke +* Denis Bilenko +* Dirk Mueller +* Donovan Preston +* Fantix King +* Floris Bruynooghe +* Fredrik Fornwall +* Gerd Woetzel +* Giel van Schijndel +* Gökhan Karabulut +* Gustavo Niemeyer +* Guy Rozendorn +* Hye-Shik Chang +* Jared Kuolt +* Jason Madden +* Josh Snyder +* Kyle Ambroff +* Laszlo Boszormenyi +* Mao Han +* Marc Abramowitz +* Marc Schlaich +* Marcin Bachry +* Matt Madison +* Matt Turner +* Michael Ellerman +* Michael Matz +* Ralf Schmitt +* Robie Basak +* Ronny Pfannschmidt +* Samual M. Rushing +* Tony Bowles +* Tony Breeds +* Trevor Bowen +* Tulio Magno Quites Machado Filho +* Ulrich Weigand +* Victor Stinner diff --git a/mysql-sqlalchemy-workspace/lib/python3.9/site-packages/greenlet-2.0.2.dist-info/INSTALLER b/mysql-sqlalchemy-workspace/lib/python3.9/site-packages/greenlet-2.0.2.dist-info/INSTALLER new file mode 100644 index 00000000..a1b589e3 --- /dev/null +++ b/mysql-sqlalchemy-workspace/lib/python3.9/site-packages/greenlet-2.0.2.dist-info/INSTALLER @@ -0,0 +1 @@ +pip diff --git a/mysql-sqlalchemy-workspace/lib/python3.9/site-packages/greenlet-2.0.2.dist-info/LICENSE b/mysql-sqlalchemy-workspace/lib/python3.9/site-packages/greenlet-2.0.2.dist-info/LICENSE new file mode 100644 index 00000000..b73a4a10 --- /dev/null +++ b/mysql-sqlalchemy-workspace/lib/python3.9/site-packages/greenlet-2.0.2.dist-info/LICENSE @@ -0,0 +1,30 @@ +The following files are derived from Stackless Python and are subject to the +same license as Stackless Python: + + src/greenlet/slp_platformselect.h + files in src/greenlet/platform/ directory + +See LICENSE.PSF and http://www.stackless.com/ for details. + +Unless otherwise noted, the files in greenlet have been released under the +following MIT license: + +Copyright (c) Armin Rigo, Christian Tismer and contributors + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +THE SOFTWARE. diff --git a/mysql-sqlalchemy-workspace/lib/python3.9/site-packages/greenlet-2.0.2.dist-info/LICENSE.PSF b/mysql-sqlalchemy-workspace/lib/python3.9/site-packages/greenlet-2.0.2.dist-info/LICENSE.PSF new file mode 100644 index 00000000..d3b509a2 --- /dev/null +++ b/mysql-sqlalchemy-workspace/lib/python3.9/site-packages/greenlet-2.0.2.dist-info/LICENSE.PSF @@ -0,0 +1,47 @@ +PYTHON SOFTWARE FOUNDATION LICENSE VERSION 2 +-------------------------------------------- + +1. This LICENSE AGREEMENT is between the Python Software Foundation +("PSF"), and the Individual or Organization ("Licensee") accessing and +otherwise using this software ("Python") in source or binary form and +its associated documentation. + +2. Subject to the terms and conditions of this License Agreement, PSF hereby +grants Licensee a nonexclusive, royalty-free, world-wide license to reproduce, +analyze, test, perform and/or display publicly, prepare derivative works, +distribute, and otherwise use Python alone or in any derivative version, +provided, however, that PSF's License Agreement and PSF's notice of copyright, +i.e., "Copyright (c) 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010, +2011 Python Software Foundation; All Rights Reserved" are retained in Python +alone or in any derivative version prepared by Licensee. + +3. In the event Licensee prepares a derivative work that is based on +or incorporates Python or any part thereof, and wants to make +the derivative work available to others as provided herein, then +Licensee hereby agrees to include in any such work a brief summary of +the changes made to Python. + +4. PSF is making Python available to Licensee on an "AS IS" +basis. PSF MAKES NO REPRESENTATIONS OR WARRANTIES, EXPRESS OR +IMPLIED. BY WAY OF EXAMPLE, BUT NOT LIMITATION, PSF MAKES NO AND +DISCLAIMS ANY REPRESENTATION OR WARRANTY OF MERCHANTABILITY OR FITNESS +FOR ANY PARTICULAR PURPOSE OR THAT THE USE OF PYTHON WILL NOT +INFRINGE ANY THIRD PARTY RIGHTS. + +5. PSF SHALL NOT BE LIABLE TO LICENSEE OR ANY OTHER USERS OF PYTHON +FOR ANY INCIDENTAL, SPECIAL, OR CONSEQUENTIAL DAMAGES OR LOSS AS +A RESULT OF MODIFYING, DISTRIBUTING, OR OTHERWISE USING PYTHON, +OR ANY DERIVATIVE THEREOF, EVEN IF ADVISED OF THE POSSIBILITY THEREOF. + +6. This License Agreement will automatically terminate upon a material +breach of its terms and conditions. + +7. Nothing in this License Agreement shall be deemed to create any +relationship of agency, partnership, or joint venture between PSF and +Licensee. This License Agreement does not grant permission to use PSF +trademarks or trade name in a trademark sense to endorse or promote +products or services of Licensee, or any third party. + +8. By copying, installing or otherwise using Python, Licensee +agrees to be bound by the terms and conditions of this License +Agreement. diff --git a/mysql-sqlalchemy-workspace/lib/python3.9/site-packages/greenlet-2.0.2.dist-info/METADATA b/mysql-sqlalchemy-workspace/lib/python3.9/site-packages/greenlet-2.0.2.dist-info/METADATA new file mode 100644 index 00000000..2100a3e7 --- /dev/null +++ b/mysql-sqlalchemy-workspace/lib/python3.9/site-packages/greenlet-2.0.2.dist-info/METADATA @@ -0,0 +1,106 @@ +Metadata-Version: 2.1 +Name: greenlet +Version: 2.0.2 +Summary: Lightweight in-process concurrent programming +Home-page: https://greenlet.readthedocs.io/ +Author: Alexey Borzenkov +Author-email: snaury@gmail.com +Maintainer: Jason Madden +Maintainer-email: jason@seecoresoftware.com +License: MIT License +Project-URL: Bug Tracker, https://github.com/python-greenlet/greenlet/issues +Project-URL: Source Code, https://github.com/python-greenlet/greenlet/ +Project-URL: Documentation, https://greenlet.readthedocs.io/ +Keywords: greenlet coroutine concurrency threads cooperative +Platform: any +Classifier: Development Status :: 5 - Production/Stable +Classifier: Intended Audience :: Developers +Classifier: License :: OSI Approved :: MIT License +Classifier: Natural Language :: English +Classifier: Programming Language :: C +Classifier: Programming Language :: Python +Classifier: Programming Language :: Python :: 2 +Classifier: Programming Language :: Python :: 2.7 +Classifier: Programming Language :: Python :: 3 +Classifier: Programming Language :: Python :: 3.5 +Classifier: Programming Language :: Python :: 3.6 +Classifier: Programming Language :: Python :: 3.7 +Classifier: Programming Language :: Python :: 3.8 +Classifier: Programming Language :: Python :: 3.9 +Classifier: Programming Language :: Python :: 3.10 +Classifier: Programming Language :: Python :: 3.11 +Classifier: Operating System :: OS Independent +Classifier: Topic :: Software Development :: Libraries :: Python Modules +Requires-Python: >=2.7,!=3.0.*,!=3.1.*,!=3.2.*,!=3.3.*,!=3.4.* +Description-Content-Type: text/x-rst +License-File: LICENSE +License-File: LICENSE.PSF +License-File: AUTHORS +Provides-Extra: docs +Requires-Dist: Sphinx ; extra == 'docs' +Requires-Dist: docutils (<0.18) ; (python_version < "3") and extra == 'docs' +Provides-Extra: test +Requires-Dist: objgraph ; extra == 'test' +Requires-Dist: psutil ; extra == 'test' + +.. This file is included into docs/history.rst + +.. image:: https://github.com/python-greenlet/greenlet/workflows/tests/badge.svg + :target: https://github.com/python-greenlet/greenlet/actions + +Greenlets are lightweight coroutines for in-process concurrent +programming. + +The "greenlet" package is a spin-off of `Stackless`_, a version of +CPython that supports micro-threads called "tasklets". Tasklets run +pseudo-concurrently (typically in a single or a few OS-level threads) +and are synchronized with data exchanges on "channels". + +A "greenlet", on the other hand, is a still more primitive notion of +micro-thread with no implicit scheduling; coroutines, in other words. +This is useful when you want to control exactly when your code runs. +You can build custom scheduled micro-threads on top of greenlet; +however, it seems that greenlets are useful on their own as a way to +make advanced control flow structures. For example, we can recreate +generators; the difference with Python's own generators is that our +generators can call nested functions and the nested functions can +yield values too. (Additionally, you don't need a "yield" keyword. See +the example in `test_generator.py +`_). + +Greenlets are provided as a C extension module for the regular unmodified +interpreter. + +.. _`Stackless`: http://www.stackless.com + + +Who is using Greenlet? +====================== + +There are several libraries that use Greenlet as a more flexible +alternative to Python's built in coroutine support: + + - `Concurrence`_ + - `Eventlet`_ + - `Gevent`_ + +.. _Concurrence: http://opensource.hyves.org/concurrence/ +.. _Eventlet: http://eventlet.net/ +.. _Gevent: http://www.gevent.org/ + +Getting Greenlet +================ + +The easiest way to get Greenlet is to install it with pip:: + + pip install greenlet + + +Source code archives and binary distributions are available on the +python package index at https://pypi.org/project/greenlet + +The source code repository is hosted on github: +https://github.com/python-greenlet/greenlet + +Documentation is available on readthedocs.org: +https://greenlet.readthedocs.io diff --git a/mysql-sqlalchemy-workspace/lib/python3.9/site-packages/greenlet-2.0.2.dist-info/RECORD b/mysql-sqlalchemy-workspace/lib/python3.9/site-packages/greenlet-2.0.2.dist-info/RECORD new file mode 100644 index 00000000..5700c077 --- /dev/null +++ b/mysql-sqlalchemy-workspace/lib/python3.9/site-packages/greenlet-2.0.2.dist-info/RECORD @@ -0,0 +1,91 @@ +../../../../../../Library/Caches/com.apple.python/Users/linkedin/Desktop/advanced-python-working-with-databases-4365479/mysql-sqlalchemy-workspace/lib/python3.9/site-packages/greenlet/__init__.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/linkedin/Desktop/advanced-python-working-with-databases-4365479/mysql-sqlalchemy-workspace/lib/python3.9/site-packages/greenlet/platform/__init__.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/linkedin/Desktop/advanced-python-working-with-databases-4365479/mysql-sqlalchemy-workspace/lib/python3.9/site-packages/greenlet/tests/__init__.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/linkedin/Desktop/advanced-python-working-with-databases-4365479/mysql-sqlalchemy-workspace/lib/python3.9/site-packages/greenlet/tests/leakcheck.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/linkedin/Desktop/advanced-python-working-with-databases-4365479/mysql-sqlalchemy-workspace/lib/python3.9/site-packages/greenlet/tests/test_contextvars.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/linkedin/Desktop/advanced-python-working-with-databases-4365479/mysql-sqlalchemy-workspace/lib/python3.9/site-packages/greenlet/tests/test_cpp.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/linkedin/Desktop/advanced-python-working-with-databases-4365479/mysql-sqlalchemy-workspace/lib/python3.9/site-packages/greenlet/tests/test_extension_interface.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/linkedin/Desktop/advanced-python-working-with-databases-4365479/mysql-sqlalchemy-workspace/lib/python3.9/site-packages/greenlet/tests/test_gc.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/linkedin/Desktop/advanced-python-working-with-databases-4365479/mysql-sqlalchemy-workspace/lib/python3.9/site-packages/greenlet/tests/test_generator.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/linkedin/Desktop/advanced-python-working-with-databases-4365479/mysql-sqlalchemy-workspace/lib/python3.9/site-packages/greenlet/tests/test_generator_nested.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/linkedin/Desktop/advanced-python-working-with-databases-4365479/mysql-sqlalchemy-workspace/lib/python3.9/site-packages/greenlet/tests/test_greenlet.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/linkedin/Desktop/advanced-python-working-with-databases-4365479/mysql-sqlalchemy-workspace/lib/python3.9/site-packages/greenlet/tests/test_greenlet_trash.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/linkedin/Desktop/advanced-python-working-with-databases-4365479/mysql-sqlalchemy-workspace/lib/python3.9/site-packages/greenlet/tests/test_leaks.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/linkedin/Desktop/advanced-python-working-with-databases-4365479/mysql-sqlalchemy-workspace/lib/python3.9/site-packages/greenlet/tests/test_stack_saved.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/linkedin/Desktop/advanced-python-working-with-databases-4365479/mysql-sqlalchemy-workspace/lib/python3.9/site-packages/greenlet/tests/test_throw.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/linkedin/Desktop/advanced-python-working-with-databases-4365479/mysql-sqlalchemy-workspace/lib/python3.9/site-packages/greenlet/tests/test_tracing.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/linkedin/Desktop/advanced-python-working-with-databases-4365479/mysql-sqlalchemy-workspace/lib/python3.9/site-packages/greenlet/tests/test_version.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/linkedin/Desktop/advanced-python-working-with-databases-4365479/mysql-sqlalchemy-workspace/lib/python3.9/site-packages/greenlet/tests/test_weakref.cpython-39.pyc,, +../../../include/site/python3.9/greenlet/greenlet.h,sha256=sz5pYRSQqedgOt2AMgxLZdTjO-qcr_JMvgiEJR9IAJ8,4755 +greenlet-2.0.2.dist-info/AUTHORS,sha256=swW28t2knVRxRkaEQNZtO7MP9Sgnompb7B6cNgJM8Gk,849 +greenlet-2.0.2.dist-info/INSTALLER,sha256=zuuue4knoyJ-UwPPXg8fezS7VCrXJQrAP7zeNuwvFQg,4 +greenlet-2.0.2.dist-info/LICENSE,sha256=dpgx1uXfrywggC-sz_H6-0wgJd2PYlPfpH_K1Z1NCXk,1434 +greenlet-2.0.2.dist-info/LICENSE.PSF,sha256=5f88I8EQ5JTNfXNsEP2W1GJFe6_soxCEDbZScpjH1Gs,2424 +greenlet-2.0.2.dist-info/METADATA,sha256=KZpnNWzPIfiuyKUmIqKejP15A-764pAa--ytS9zIoMY,4094 +greenlet-2.0.2.dist-info/RECORD,, +greenlet-2.0.2.dist-info/WHEEL,sha256=Tb_t7JGjS9UNO7L5mRtd9E9oLUA9YQccQulkdRossZI,109 +greenlet-2.0.2.dist-info/top_level.txt,sha256=YSnRsCRoO61JGlP57o8iKL6rdLWDWuiyKD8ekpWUsDc,9 +greenlet/__init__.py,sha256=0uLqzJsY24W1VZgMSvKDRr7H3lIAajRNvjeDjenhwg8,1723 +greenlet/_greenlet.cpython-39-darwin.so,sha256=Jjwyekrv5MUzGJAPeRB6bwhAmJ3xPJUYpG4TpLzIS6c,337291 +greenlet/greenlet.cpp,sha256=edwdXy3KBU7RnjO7BXfQ-OUMrjSTuPEH7dk07dGjd9c,110159 +greenlet/greenlet.h,sha256=sz5pYRSQqedgOt2AMgxLZdTjO-qcr_JMvgiEJR9IAJ8,4755 +greenlet/greenlet_allocator.hpp,sha256=kxyWW4Qdwlrc7ufgdb5vd6Y7jhauQ699Kod0mqiO1iM,1582 +greenlet/greenlet_compiler_compat.hpp,sha256=c-z5dQNEyja6Wos4-umxsFQ04_MPRSneOZAGyDCwHs4,5446 +greenlet/greenlet_cpython_compat.hpp,sha256=6TYeFAhuLQQ33FS3Ndun35W7oyARqcUwYdleRsysfqg,4806 +greenlet/greenlet_exceptions.hpp,sha256=FiWMqW8Aj6RedcmNDUk7dVd5T9HIdLgL-DREOyZ0new,2307 +greenlet/greenlet_greenlet.hpp,sha256=k7Z2HNmGckPoXbpEbP9L9hvecQ32SuP7oFGDwwSdHME,41611 +greenlet/greenlet_internal.hpp,sha256=bfIjQ4gXFtk7mJt9xcPnWholXa1FxL6Q5ip-lWbrhjI,2707 +greenlet/greenlet_refs.hpp,sha256=x117iO59IolyQ_rTwP_xW66TW_Q2D_jqbQJtmD0N6H0,32906 +greenlet/greenlet_slp_switch.hpp,sha256=dGtzhCBzsVI0b9KCHulv8X7kXjasZ7KCrKYxFB-fewY,3987 +greenlet/greenlet_thread_state.hpp,sha256=vzRx-RdlhDQIfsi_uitvd6Mudb3_3F5Mzxi0Cs7vuuU,21075 +greenlet/greenlet_thread_state_dict_cleanup.hpp,sha256=tEN0rI1pZiEsdtr7Oda24gr52fGiHnYTLyM8Vme3Gns,3831 +greenlet/greenlet_thread_support.hpp,sha256=ZI5Ye4842cGDyUo8aVKM2FQ_jWs7Pj0cR27Nxx1IY6s,4126 +greenlet/platform/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +greenlet/platform/setup_switch_x64_masm.cmd,sha256=ZpClUJeU0ujEPSTWNSepP0W2f9XiYQKA8QKSoVou8EU,143 +greenlet/platform/switch_aarch64_gcc.h,sha256=J89BnU496h11g99DXl1DYnZXwdZAb2bK0g3AZ8BSQgY,2683 +greenlet/platform/switch_alpha_unix.h,sha256=T6kOBiHy3hLmy1vrmFrxbnOnRu0EJkoG_yuWy7fykZ4,689 +greenlet/platform/switch_amd64_unix.h,sha256=EcSFCBlodEBhqhKjcJqY_5Dn_jn7pKpkJlOvp7gFXLI,2748 +greenlet/platform/switch_arm32_gcc.h,sha256=wflI2cGZBfLzM_GGgYx3OrFeoOq7OTsJP53dKLsrxS0,2488 +greenlet/platform/switch_arm32_ios.h,sha256=yQZXCa0AZbyAIS9tKceyTCrRYlihpFBKDbiPCn_3im0,1901 +greenlet/platform/switch_arm64_masm.asm,sha256=4kpTtfy7rfcr8j1CpJLAK21EtZpGDAJXWRU68HEy5A8,1245 +greenlet/platform/switch_arm64_masm.obj,sha256=DmLnIB_icoEHAz1naue_pJPTZgR9ElM7-Nmztr-o9_U,746 +greenlet/platform/switch_arm64_msvc.h,sha256=RqK5MHLmXI3Q-FQ7tm32KWnbDNZKnkJdq8CR89cz640,398 +greenlet/platform/switch_csky_gcc.h,sha256=GHlaVXrzQuSkrDqgL7-Ji9YwZnprpFhjPznNyp0NnvU,1340 +greenlet/platform/switch_m68k_gcc.h,sha256=VSa6NpZhvyyvF-Q58CTIWSpEDo4FKygOyTz00whctlw,928 +greenlet/platform/switch_mips_unix.h,sha256=9ptMGEBXafee15RxOm5NrxiC2bEnwM9AkxJ7ktVatU8,1444 +greenlet/platform/switch_ppc64_aix.h,sha256=ADpifLPlr6pTdT76bt6ozcqPjHrfPsJ93lQfc1VNaug,3878 +greenlet/platform/switch_ppc64_linux.h,sha256=jqPKpTg09FzmCn59Kt6OJi2-40aoazFVJcf1YETLlwA,3833 +greenlet/platform/switch_ppc_aix.h,sha256=nClVVlsRlFAI-I3fmivSJyJK7Xzx3_8l3Wf8QNJ9FMU,2959 +greenlet/platform/switch_ppc_linux.h,sha256=J4eKMA73WbPYSaq0yAedzHB6J6ZKE8tIIzkqYxlaA2c,2777 +greenlet/platform/switch_ppc_macosx.h,sha256=bnL2MqIUm9--NHizb5NYijvSrqutvuJx4auYCdqXllM,2642 +greenlet/platform/switch_ppc_unix.h,sha256=5UW9c71NGJh6xksEbAOButBFH168QRyZ5O53yXdXGxg,2670 +greenlet/platform/switch_riscv_unix.h,sha256=jX3vC_xZXiUho8tz4J6Ai8BNQB80yLn03fxkoMztVCU,740 +greenlet/platform/switch_s390_unix.h,sha256=9oJkYnyUovPvXOAsVLXoj-Unl_Rr_DidkXYMaRXLS0w,2781 +greenlet/platform/switch_sparc_sun_gcc.h,sha256=0vHXNNCdz-1ioQsw-OtK0ridnBVIzErYWiK7bBu6OgM,2815 +greenlet/platform/switch_x32_unix.h,sha256=ie7Nxo6Cf_x4UVOSA_a3bJYPlRKZ1BvLWsclyQle_SY,1527 +greenlet/platform/switch_x64_masm.asm,sha256=nu6n2sWyXuXfpPx40d9YmLfHXUc1sHgeTvX1kUzuvEM,1841 +greenlet/platform/switch_x64_masm.obj,sha256=GNtTNxYdo7idFUYsQv-mrXWgyT5EJ93-9q90lN6svtQ,1078 +greenlet/platform/switch_x64_msvc.h,sha256=LIeasyKo_vHzspdMzMHbosRhrBfKI4BkQOh4qcTHyJw,1805 +greenlet/platform/switch_x86_msvc.h,sha256=TtGOwinbFfnn6clxMNkCz8i6OmgB6kVRrShoF5iT9to,12838 +greenlet/platform/switch_x86_unix.h,sha256=WvY2sNMFIEfoFVNVakl-osygJui3pSnlVj5jBrdaU08,3068 +greenlet/slp_platformselect.h,sha256=t4Yy9Eb0zO1Qhu9RsRj1YLoIueNAOywrHGZZbeuFz64,3280 +greenlet/tests/__init__.py,sha256=Qo3bLZpIWxq-tqpRVLql1O_tOMjK-ROknGPXhMTAt0g,4976 +greenlet/tests/_test_extension.c,sha256=py-Rg7fRdAMNwpxgoHDzf5PeT1-h3iZ_xpGBVcmzu4M,6017 +greenlet/tests/_test_extension.cpython-39-darwin.so,sha256=x-ccOuknzDKxLUIWPW0XH7ubgI8EOTE2OGBywd4jR3s,134369 +greenlet/tests/_test_extension_cpp.cpp,sha256=hUEUj8zBCbOc6jBYLYooAz_rEUtZd1U-lv6ykyd3BZY,5639 +greenlet/tests/_test_extension_cpp.cpython-39-darwin.so,sha256=rmUNAsw3GKag72Bwb-a_O2oqFjODNblSF7MQStMOsxM,134709 +greenlet/tests/leakcheck.py,sha256=SgPOQ5_vttOiLDsCOV6wXvvXRxy6noNHqEwctTC5Vpc,11929 +greenlet/tests/test_contextvars.py,sha256=2fRW58UnSPilM6oWkpKPiQjt2Nt7GX3S1TTSlIWg1qE,10240 +greenlet/tests/test_cpp.py,sha256=kDyP_aEFWgihbGwHoYEbDckez_ceh0pfNNl0cWD5C6s,2963 +greenlet/tests/test_extension_interface.py,sha256=eJ3cwLacdK2WbsrC-4DgeyHdwLRcG4zx7rrkRtqSzC4,3829 +greenlet/tests/test_gc.py,sha256=nf4pgF0eUz8tUYQGPHRPWQZPslztN-FfxvD4EONIpmw,2916 +greenlet/tests/test_generator.py,sha256=tONXiTf98VGm347o1b-810daPiwdla5cbpFg6QI1R1g,1240 +greenlet/tests/test_generator_nested.py,sha256=gMTDwBb5Rx4UcuYYp31YufLONLXruVDaCcKlJ4UIk64,3720 +greenlet/tests/test_greenlet.py,sha256=8HV85AKGTGOEYOcEeYRodWInLamjQIVz0SY8sD0o7ZQ,37747 +greenlet/tests/test_greenlet_trash.py,sha256=e-1l_mexXRpIYpwYvOPqXjzmE9oI0BXMRpT3ywWP-Bw,7683 +greenlet/tests/test_leaks.py,sha256=yx57dXe1wLB_NMinIvIDKRnUj-g6YDytox3Vkx1LXTE,17683 +greenlet/tests/test_stack_saved.py,sha256=eyzqNY2VCGuGlxhT_In6TvZ6Okb0AXFZVyBEnK1jDwA,446 +greenlet/tests/test_throw.py,sha256=cowzx8900jpKon8-N4-UwsGH9ox5hfsqtDoVUNat84g,3734 +greenlet/tests/test_tracing.py,sha256=KjZh3t-4f9q1YG5JJ0sKxmwXUwDnpRHU1Y4x0Fi4N3E,7843 +greenlet/tests/test_version.py,sha256=O9DpAITsOFgiRcjd4odQ7ejmwx_N9Q1zQENVcbtFHIc,1339 +greenlet/tests/test_weakref.py,sha256=NWOaaJOMn83oKdXGoGzGAswb-QRHprlF2f0-4igjZMI,898 diff --git a/mysql-sqlalchemy-workspace/lib/python3.9/site-packages/greenlet-2.0.2.dist-info/WHEEL b/mysql-sqlalchemy-workspace/lib/python3.9/site-packages/greenlet-2.0.2.dist-info/WHEEL new file mode 100644 index 00000000..9db4b0cb --- /dev/null +++ b/mysql-sqlalchemy-workspace/lib/python3.9/site-packages/greenlet-2.0.2.dist-info/WHEEL @@ -0,0 +1,5 @@ +Wheel-Version: 1.0 +Generator: bdist_wheel (0.38.4) +Root-Is-Purelib: false +Tag: cp39-cp39-macosx_11_0_x86_64 + diff --git a/mysql-sqlalchemy-workspace/lib/python3.9/site-packages/greenlet-2.0.2.dist-info/top_level.txt b/mysql-sqlalchemy-workspace/lib/python3.9/site-packages/greenlet-2.0.2.dist-info/top_level.txt new file mode 100644 index 00000000..46725be4 --- /dev/null +++ b/mysql-sqlalchemy-workspace/lib/python3.9/site-packages/greenlet-2.0.2.dist-info/top_level.txt @@ -0,0 +1 @@ +greenlet diff --git a/mysql-sqlalchemy-workspace/lib/python3.9/site-packages/greenlet/__init__.py b/mysql-sqlalchemy-workspace/lib/python3.9/site-packages/greenlet/__init__.py new file mode 100644 index 00000000..ada1165d --- /dev/null +++ b/mysql-sqlalchemy-workspace/lib/python3.9/site-packages/greenlet/__init__.py @@ -0,0 +1,71 @@ +# -*- coding: utf-8 -*- +""" +The root of the greenlet package. +""" +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +__all__ = [ + '__version__', + '_C_API', + + 'GreenletExit', + 'error', + + 'getcurrent', + 'greenlet', + + 'gettrace', + 'settrace', +] + +# pylint:disable=no-name-in-module + +### +# Metadata +### +__version__ = '2.0.2' +from ._greenlet import _C_API # pylint:disable=no-name-in-module + +### +# Exceptions +### +from ._greenlet import GreenletExit +from ._greenlet import error + +### +# greenlets +### +from ._greenlet import getcurrent +from ._greenlet import greenlet + +### +# tracing +### +try: + from ._greenlet import gettrace + from ._greenlet import settrace +except ImportError: + # Tracing wasn't supported. + # XXX: The option to disable it was removed in 1.0, + # so this branch should be dead code. + pass + +### +# Constants +# These constants aren't documented and aren't recommended. +# In 1.0, USE_GC and USE_TRACING are always true, and USE_CONTEXT_VARS +# is the same as ``sys.version_info[:2] >= 3.7`` +### +from ._greenlet import GREENLET_USE_CONTEXT_VARS # pylint:disable=unused-import +from ._greenlet import GREENLET_USE_GC # pylint:disable=unused-import +from ._greenlet import GREENLET_USE_TRACING # pylint:disable=unused-import + +# Controlling the use of the gc module. Provisional API for this greenlet +# implementation in 2.0. +from ._greenlet import CLOCKS_PER_SEC # pylint:disable=unused-import +from ._greenlet import enable_optional_cleanup # pylint:disable=unused-import +from ._greenlet import get_clocks_used_doing_optional_cleanup # pylint:disable=unused-import + +# Other APIS in the _greenlet module are for test support. diff --git a/mysql-sqlalchemy-workspace/lib/python3.9/site-packages/greenlet/_greenlet.cpython-39-darwin.so b/mysql-sqlalchemy-workspace/lib/python3.9/site-packages/greenlet/_greenlet.cpython-39-darwin.so new file mode 100755 index 00000000..8166e71b Binary files /dev/null and b/mysql-sqlalchemy-workspace/lib/python3.9/site-packages/greenlet/_greenlet.cpython-39-darwin.so differ diff --git a/mysql-sqlalchemy-workspace/lib/python3.9/site-packages/greenlet/greenlet.cpp b/mysql-sqlalchemy-workspace/lib/python3.9/site-packages/greenlet/greenlet.cpp new file mode 100644 index 00000000..1d6ddaae --- /dev/null +++ b/mysql-sqlalchemy-workspace/lib/python3.9/site-packages/greenlet/greenlet.cpp @@ -0,0 +1,3256 @@ +/* -*- indent-tabs-mode: nil; tab-width: 4; -*- */ +/* Format with: + * clang-format -i --style=file src/greenlet/greenlet.c + * + * + * Fix missing braces with: + * clang-tidy src/greenlet/greenlet.c -fix -checks="readability-braces-around-statements" +*/ +#include +#include +#include +#include + + +#define PY_SSIZE_T_CLEAN +#include +#include "structmember.h" // PyMemberDef + +#include "greenlet_internal.hpp" +#include "greenlet_refs.hpp" +#include "greenlet_slp_switch.hpp" +#include "greenlet_thread_state.hpp" +#include "greenlet_thread_support.hpp" +#include "greenlet_greenlet.hpp" + +using greenlet::ThreadState; +using greenlet::Mutex; +using greenlet::LockGuard; +using greenlet::LockInitError; +using greenlet::PyErrOccurred; +using greenlet::Require; +using greenlet::PyFatalError; +using greenlet::ExceptionState; +using greenlet::StackState; +using greenlet::Greenlet; + + +// Helpers for reference counting. +// XXX: running the test cases for greenlet 1.1.2 under Python 3.10+pydebug +// with zope.testrunner's "report refcounts" option shows a growth of +// over 500 references when running 90 tests at a steady state (10 repeats) +// Running in verbose mode and adding objgraph to report gives us this +// info in a steady state: +// Ran 90 tests with 0 failures, 0 errors and 1 skipped in 2.120 seconds. +// Showing growth +// tuple 2811 +16 +// list 1733 +14 +// function 6304 +11 +// dict 3604 +9 +// cell 707 +9 +// greenlet 81 +8 +// method 103 +5 +// Genlet 40 +4 +// list_iterator 30 +3 +// getset_descriptor 916 +2 +// sum detail refcount=341678 sys refcount=379357 change=523 +// Leak details, changes in instances and refcounts by type/class: +// type/class insts refs +// ------------------------------------------------------- ----- ---- +// builtins.NoneType 0 2 +// builtins.cell 9 20 +// builtins.code 0 31 +// builtins.dict 18 91 +// builtins.frame 20 32 +// builtins.function 11 28 +// builtins.getset_descriptor 2 2 +// builtins.int 2 42 +// builtins.list 14 37 +// builtins.list_iterator 3 3 +// builtins.method 5 5 +// builtins.method_descriptor 0 9 +// builtins.str 11 76 +// builtins.traceback 1 2 +// builtins.tuple 20 42 +// builtins.type 2 28 +// builtins.weakref 2 2 +// greenlet.GreenletExit 1 1 +// greenlet.greenlet 8 26 +// greenlet.tests.test_contextvars.NoContextVarsTests 0 1 +// greenlet.tests.test_gc.object_with_finalizer 1 1 +// greenlet.tests.test_generator_nested.Genlet 4 26 +// greenlet.tests.test_greenlet.convoluted 1 2 +// ------------------------------------------------------- ----- ---- +// total 135 509 +// +// As of the commit that adds this comment, we're doing better than +// 1.1.2, but still not perfect: +// Ran 115 tests with 0 failures, 0 errors, 1 skipped in 8.623 seconds. +// tuple 21310 +23 +// dict 5428 +18 +// frame 183 +17 +// list 1760 +14 +// function 6359 +11 +// cell 698 +8 +// method 105 +5 +// int 2709 +4 +// TheGenlet 40 +4 +// list_iterator 30 +3 +// sum detail refcount=345051 sys refcount=383043 change=494 +// Leak details, changes in instances and refcounts by type/class: +// type/class insts refs +// ------------------------------------------------------- ----- ---- +// builtins.NoneType 0 12 +// builtins.bool 0 2 +// builtins.cell 8 16 +// builtins.code 0 28 +// builtins.dict 18 74 +// builtins.frame 17 28 +// builtins.function 11 28 +// builtins.getset_descriptor 2 2 +// builtins.int 4 44 +// builtins.list 14 39 +// builtins.list_iterator 3 3 +// builtins.method 5 5 +// builtins.method_descriptor 0 8 +// builtins.str -2 69 +// builtins.tuple 23 42 +// builtins.type 2 28 +// builtins.weakref 2 2 +// greenlet.greenlet 1 1 +// greenlet.main_greenlet 1 16 +// greenlet.tests.test_contextvars.NoContextVarsTests 0 1 +// greenlet.tests.test_gc.object_with_finalizer 1 1 +// greenlet.tests.test_generator_nested.TheGenlet 4 29 +// greenlet.tests.test_greenlet.convoluted 1 2 +// greenlet.tests.test_leaks.HasFinalizerTracksInstances 2 2 +// ------------------------------------------------------- ----- ---- +// total 117 482 + +using greenlet::refs::BorrowedObject; +using greenlet::refs::BorrowedGreenlet; +using greenlet::refs::BorrowedMainGreenlet; +using greenlet::refs::OwnedObject; +using greenlet::refs::PyErrFetchParam; +using greenlet::refs::PyArgParseParam; +using greenlet::refs::ImmortalString; +using greenlet::refs::ImmortalObject; +using greenlet::refs::CreatedModule; +using greenlet::refs::PyErrPieces; +using greenlet::refs::PyObjectPointer; +using greenlet::Greenlet; +using greenlet::UserGreenlet; +using greenlet::MainGreenlet; + + +// ******* Implementation of things from included files +template +greenlet::refs::_BorrowedGreenlet& greenlet::refs::_BorrowedGreenlet::operator=(const greenlet::refs::BorrowedObject& other) +{ + this->_set_raw_pointer(static_cast(other)); + return *this; +} + +template +inline greenlet::refs::_BorrowedGreenlet::operator Greenlet*() const G_NOEXCEPT +{ + if (!this->p) { + return nullptr; + } + return reinterpret_cast(this->p)->pimpl; +} + +template +greenlet::refs::_BorrowedGreenlet::_BorrowedGreenlet(const BorrowedObject& p) + : BorrowedReference(nullptr) +{ + + this->_set_raw_pointer(p.borrow()); +} + +template +inline greenlet::refs::_OwnedGreenlet::operator Greenlet*() const G_NOEXCEPT +{ + if (!this->p) { + return nullptr; + } + return reinterpret_cast(this->p)->pimpl; +} + + + +#ifdef __clang__ +# pragma clang diagnostic push +# pragma clang diagnostic ignored "-Wmissing-field-initializers" +# pragma clang diagnostic ignored "-Wwritable-strings" +#elif defined(__GNUC__) +# pragma GCC diagnostic push +// warning: ISO C++ forbids converting a string constant to ‘char*’ +// (The python APIs aren't const correct and accept writable char*) +# pragma GCC diagnostic ignored "-Wwrite-strings" +#endif + + +/*********************************************************** + +A PyGreenlet is a range of C stack addresses that must be +saved and restored in such a way that the full range of the +stack contains valid data when we switch to it. + +Stack layout for a greenlet: + + | ^^^ | + | older data | + | | + stack_stop . |_______________| + . | | + . | greenlet data | + . | in stack | + . * |_______________| . . _____________ stack_copy + stack_saved + . | | | | + . | data | |greenlet data| + . | unrelated | | saved | + . | to | | in heap | + stack_start . | this | . . |_____________| stack_copy + | greenlet | + | | + | newer data | + | vvv | + + +Note that a greenlet's stack data is typically partly at its correct +place in the stack, and partly saved away in the heap, but always in +the above configuration: two blocks, the more recent one in the heap +and the older one still in the stack (either block may be empty). + +Greenlets are chained: each points to the previous greenlet, which is +the one that owns the data currently in the C stack above my +stack_stop. The currently running greenlet is the first element of +this chain. The main (initial) greenlet is the last one. Greenlets +whose stack is entirely in the heap can be skipped from the chain. + +The chain is not related to execution order, but only to the order +in which bits of C stack happen to belong to greenlets at a particular +point in time. + +The main greenlet doesn't have a stack_stop: it is responsible for the +complete rest of the C stack, and we don't know where it begins. We +use (char*) -1, the largest possible address. + +States: + stack_stop == NULL && stack_start == NULL: did not start yet + stack_stop != NULL && stack_start == NULL: already finished + stack_stop != NULL && stack_start != NULL: active + +The running greenlet's stack_start is undefined but not NULL. + + ***********************************************************/ + +/*** global state ***/ + +/* In the presence of multithreading, this is a bit tricky; see + greenlet_thread_state.hpp for details. +*/ + + +static inline OwnedObject +single_result(const OwnedObject& results) +{ + if (results + && PyTuple_Check(results.borrow()) + && PyTuple_GET_SIZE(results.borrow()) == 1) { + PyObject* result = PyTuple_GET_ITEM(results.borrow(), 0); + return OwnedObject::owning(result); + } + return results; +} + + + +class ImmortalEventName : public ImmortalString +{ +private: + G_NO_COPIES_OF_CLS(ImmortalEventName); +public: + ImmortalEventName(const char* const str) : ImmortalString(str) + {} +}; + +class ImmortalException : public ImmortalObject +{ +private: + G_NO_COPIES_OF_CLS(ImmortalException); +public: + ImmortalException(const char* const name, PyObject* base=nullptr) : + ImmortalObject(name + // Python 2.7 isn't const correct + ? Require(PyErr_NewException((char*)name, base, nullptr)) + : nullptr) + {} + + inline bool PyExceptionMatches() const + { + return PyErr_ExceptionMatches(this->p) > 0; + } + +}; + +// This encapsulates what were previously module global "constants" +// established at init time. +// This is a step towards Python3 style module state that allows +// reloading. +// We play some tricks with placement new to be able to allocate this +// object statically still, so that references to its members don't +// incur an extra pointer indirection. +class GreenletGlobals +{ +public: + const ImmortalEventName event_switch; + const ImmortalEventName event_throw; + const ImmortalException PyExc_GreenletError; + const ImmortalException PyExc_GreenletExit; + const ImmortalObject empty_tuple; + const ImmortalObject empty_dict; + const ImmortalString str_run; + Mutex* const thread_states_to_destroy_lock; + greenlet::cleanup_queue_t thread_states_to_destroy; + + GreenletGlobals(const int UNUSED(dummy)) : + event_switch(0), + event_throw(0), + PyExc_GreenletError(0), + PyExc_GreenletExit(0), + empty_tuple(0), + empty_dict(0), + str_run(0), + thread_states_to_destroy_lock(0) + {} + + GreenletGlobals() : + event_switch("switch"), + event_throw("throw"), + PyExc_GreenletError("greenlet.error"), + PyExc_GreenletExit("greenlet.GreenletExit", PyExc_BaseException), + empty_tuple(Require(PyTuple_New(0))), + empty_dict(Require(PyDict_New())), + str_run("run"), + thread_states_to_destroy_lock(new Mutex()) + {} + + ~GreenletGlobals() + { + // This object is (currently) effectively immortal, and not + // just because of those placement new tricks; if we try to + // deallocate the static object we allocated, and overwrote, + // we would be doing so at C++ teardown time, which is after + // the final Python GIL is released, and we can't use the API + // then. + // (The members will still be destructed, but they also don't + // do any deallocation.) + } + + void queue_to_destroy(ThreadState* ts) const + { + // we're currently accessed through a static const object, + // implicitly marking our members as const, so code can't just + // call push_back (or pop_back) without casting away the + // const. + // + // Do that for callers. + greenlet::cleanup_queue_t& q = const_cast(this->thread_states_to_destroy); + q.push_back(ts); + } + + ThreadState* take_next_to_destroy() const + { + greenlet::cleanup_queue_t& q = const_cast(this->thread_states_to_destroy); + ThreadState* result = q.back(); + q.pop_back(); + return result; + } +}; + +static const GreenletGlobals mod_globs(0); + +// Protected by the GIL. Incremented when we create a main greenlet, +// in a new thread, decremented when it is destroyed. +static Py_ssize_t total_main_greenlets; + +struct ThreadState_DestroyWithGIL +{ + ThreadState_DestroyWithGIL(ThreadState* state) + { + if (state && state->has_main_greenlet()) { + DestroyWithGIL(state); + } + } + + static int + DestroyWithGIL(ThreadState* state) + { + // Holding the GIL. + // Passed a non-shared pointer to the actual thread state. + // state -> main greenlet + assert(state->has_main_greenlet()); + PyGreenlet* main(state->borrow_main_greenlet()); + // When we need to do cross-thread operations, we check this. + // A NULL value means the thread died some time ago. + // We do this here, rather than in a Python dealloc function + // for the greenlet, in case there's still a reference out + // there. + static_cast(main->pimpl)->thread_state(nullptr); + + delete state; // Deleting this runs the destructor, DECREFs the main greenlet. + return 0; + } +}; + +#if (PY_VERSION_HEX >= 0x30800A0 && PY_VERSION_HEX < 0x3090000) && !(defined(_WIN32) || defined(WIN32)) +// XXX: From Python 3.8a3 [1] up until Python 3.9a6 [2][3], +// ``Py_AddPendingCall`` would try to produce a Python exception if +// the interpreter was in the beginning of shutting down when this +// function is called. However, ``Py_AddPendingCall`` doesn't require +// the GIL, and we are absolutely not holding it when we make that +// call. That means that trying to create the Python exception is +// using the C API in an undefined state; here the C API detects this +// and aborts the process with an error ("Fatal Python error: Python +// memory allocator called without holding the GIL": Add -> +// PyErr_SetString -> PyUnicode_New -> PyObject_Malloc). This arises +// (obviously) in multi-threaded programs and happens if one thread is +// exiting and cleaning up its thread-local data while the other +// thread is trying to shut down the interpreter. A crash on shutdown +// is still a crash and could result in data loss (e.g., daemon +// threads are still running, pending signal handlers may be present, +// buffers may not be flushed, there may be __del__ that need run, +// etc), so we have to work around it. +// +// Of course, we can (and do) check for whether the interpreter is +// shutting down before calling ``Py_AddPendingCall``, but that's a +// race condition since we don't hold the GIL, and so we may not +// actually get the right answer. Plus, ``Py_FinalizeEx`` actually +// calls ``_Py_FinishPendingCalls`` (which sets the pending->finishing +// flag, which is used to gate creating the exceptioen) *before* +// publishing any other data that would let us detect the shutdown +// (such as runtime->finalizing). So that point is moot. +// +// Our solution for those versions is to inline the same code, without +// the problematic bit that sets the exception. Unfortunately, all of +// the structure definitions are private/opaque, *and* we can't +// actually count on being able to include their definitions from +// ``internal/pycore_*``, because on some platforms those header files +// are incomplete (i.e., on macOS with macports 3.8, the includes are +// fine, but on Ubuntu jammy with 3.8 from ppa:deadsnakes or GitHub +// Actions 3.8 (I think it's Ubuntu 18.04), they con't be used; at +// least, I couldn't get them to work). So we need to define the +// structures and _PyRuntime data member ourself. Yet more +// unfortunately, _PyRuntime won't link on Windows, so we can only do +// this on other platforms. +// +// [1] https://github.com/python/cpython/commit/842a2f07f2f08a935ef470bfdaeef40f87490cfc +// [2] https://github.com/python/cpython/commit/cfc3c2f8b34d3864717ab584c5b6c260014ba55a +// [3] https://github.com/python/cpython/issues/81308 +# define GREENLET_BROKEN_PY_ADD_PENDING 1 + +// When defining these structures, the important thing is to get +// binary compatibility, i.e., structure layout. For that, we only +// need to define fields up to the ones we use; after that they're +// irrelevant UNLESS the structure is included in another structure +// *before* the structure we're interested in --- in that case, it +// must be complete. Ellipsis indicate elided trailing members. +// Pointer types are changed to void* to keep from having to define +// more structures. + +// From "internal/pycore_atomic.h" + +// There are several different definitions of this, including the +// plain ``int`` version, a ``volatile int`` and an ``_Atomic int`` +// I don't think any of those change the size/layout. +typedef struct _Py_atomic_int { + volatile int _value; +} _Py_atomic_int; + +// This needs too much infrastructure, so we just do a regular store. +#define _Py_atomic_store_relaxed(ATOMIC_VAL, NEW_VAL) \ + (ATOMIC_VAL)->_value = NEW_VAL + + + +// From "internal/pycore_pymem.h" +#define NUM_GENERATIONS 3 + + +struct gc_generation { + PyGC_Head head; // We already have this defined. + int threshold; + int count; +}; +struct gc_generation_stats { + Py_ssize_t collections; + Py_ssize_t collected; + Py_ssize_t uncollectable; +}; + +struct _gc_runtime_state { + void *trash_delete_later; + int trash_delete_nesting; + int enabled; + int debug; + struct gc_generation generations[NUM_GENERATIONS]; + void *generation0; + struct gc_generation permanent_generation; + struct gc_generation_stats generation_stats[NUM_GENERATIONS]; + int collecting; + void *garbage; + void *callbacks; + Py_ssize_t long_lived_total; + Py_ssize_t long_lived_pending; +}; + +// From "internal/pycore_pystate.h" +struct _pending_calls { + int finishing; + PyThread_type_lock lock; + _Py_atomic_int calls_to_do; + int async_exc; +#define NPENDINGCALLS 32 + struct { + int (*func)(void *); + void *arg; + } calls[NPENDINGCALLS]; + int first; + int last; +}; + +struct _ceval_runtime_state { + int recursion_limit; + int tracing_possible; + _Py_atomic_int eval_breaker; + _Py_atomic_int gil_drop_request; + struct _pending_calls pending; + // ... +}; + +typedef struct pyruntimestate { + int preinitializing; + int preinitialized; + int core_initialized; + int initialized; + void *finalizing; + + struct pyinterpreters { + PyThread_type_lock mutex; + void *head; + void *main; + int64_t next_id; + } interpreters; + // XXX Remove this field once we have a tp_* slot. + struct _xidregistry { + PyThread_type_lock mutex; + void *head; + } xidregistry; + + unsigned long main_thread; + +#define NEXITFUNCS 32 + void (*exitfuncs[NEXITFUNCS])(void); + int nexitfuncs; + + struct _gc_runtime_state gc; + struct _ceval_runtime_state ceval; + // ... +} _PyRuntimeState; + +#define SIGNAL_PENDING_CALLS(ceval) \ + do { \ + _Py_atomic_store_relaxed(&(ceval)->pending.calls_to_do, 1); \ + _Py_atomic_store_relaxed(&(ceval)->eval_breaker, 1); \ + } while (0) + +extern _PyRuntimeState _PyRuntime; + +#else +# define GREENLET_BROKEN_PY_ADD_PENDING 0 +#endif + + +struct ThreadState_DestroyNoGIL +{ +#if GREENLET_BROKEN_PY_ADD_PENDING + static int _push_pending_call(struct _pending_calls *pending, + int (*func)(void *), void *arg) + { + int i = pending->last; + int j = (i + 1) % NPENDINGCALLS; + if (j == pending->first) { + return -1; /* Queue full */ + } + pending->calls[i].func = func; + pending->calls[i].arg = arg; + pending->last = j; + return 0; + } + + static int AddPendingCall(int (*func)(void *), void *arg) + { + _PyRuntimeState *runtime = &_PyRuntime; + if (!runtime) { + // obviously impossible + return 0; + } + struct _pending_calls *pending = &runtime->ceval.pending; + if (!pending->lock) { + return 0; + } + int result = 0; + PyThread_acquire_lock(pending->lock, WAIT_LOCK); + if (!pending->finishing) { + result = _push_pending_call(pending, func, arg); + } + PyThread_release_lock(pending->lock); + SIGNAL_PENDING_CALLS(&runtime->ceval); + return result; + } +#else + // Python < 3.8 or >= 3.9 + static int AddPendingCall(int (*func)(void*), void* arg) + { + return Py_AddPendingCall(func, arg); + } +#endif + + ThreadState_DestroyNoGIL(ThreadState* state) + { + // We are *NOT* holding the GIL. Our thread is in the middle + // of its death throes and the Python thread state is already + // gone so we can't use most Python APIs. One that is safe is + // ``Py_AddPendingCall``, unless the interpreter itself has + // been torn down. There is a limited number of calls that can + // be queued: 32 (NPENDINGCALLS) in CPython 3.10, so we + // coalesce these calls using our own queue. + if (state && state->has_main_greenlet()) { + // mark the thread as dead ASAP. + // this is racy! If we try to throw or switch to a + // greenlet from this thread from some other thread before + // we clear the state pointer, it won't realize the state + // is dead which can crash the process. + PyGreenlet* p = state->borrow_main_greenlet(); + assert(p->pimpl->thread_state() == state || p->pimpl->thread_state() == nullptr); + static_cast(p->pimpl)->thread_state(nullptr); + } + + // NOTE: Because we're not holding the GIL here, some other + // Python thread could run and call ``os.fork()``, which would + // be bad if that happenend while we are holding the cleanup + // lock (it wouldn't function in the child process). + // Make a best effort to try to keep the duration we hold the + // lock short. + // TODO: On platforms that support it, use ``pthread_atfork`` to + // drop this lock. + LockGuard cleanup_lock(*mod_globs.thread_states_to_destroy_lock); + + if (state && state->has_main_greenlet()) { + // Because we don't have the GIL, this is a race condition. + if (!PyInterpreterState_Head()) { + // We have to leak the thread state, if the + // interpreter has shut down when we're getting + // deallocated, we can't run the cleanup code that + // deleting it would imply. + return; + } + + mod_globs.queue_to_destroy(state); + if (mod_globs.thread_states_to_destroy.size() == 1) { + // We added the first item to the queue. We need to schedule + // the cleanup. + int result = ThreadState_DestroyNoGIL::AddPendingCall( + ThreadState_DestroyNoGIL::DestroyQueueWithGIL, + NULL); + if (result < 0) { + // Hmm, what can we do here? + fprintf(stderr, + "greenlet: WARNING: failed in call to Py_AddPendingCall; " + "expect a memory leak.\n"); + } + } + } + } + + static int + DestroyQueueWithGIL(void* UNUSED(arg)) + { + // We're holding the GIL here, so no Python code should be able to + // run to call ``os.fork()``. + while (1) { + ThreadState* to_destroy; + { + LockGuard cleanup_lock(*mod_globs.thread_states_to_destroy_lock); + if (mod_globs.thread_states_to_destroy.empty()) { + break; + } + to_destroy = mod_globs.take_next_to_destroy(); + } + // Drop the lock while we do the actual deletion. + ThreadState_DestroyWithGIL::DestroyWithGIL(to_destroy); + } + return 0; + } + +}; + +// The intent when GET_THREAD_STATE() is used multiple times in a function is to +// take a reference to it in a local variable, to avoid the +// thread-local indirection. On some platforms (macOS), +// accessing a thread-local involves a function call (plus an initial +// function call in each function that uses a thread local); in +// contrast, static volatile variables are at some pre-computed offset. + +#if G_USE_STANDARD_THREADING == 1 +typedef greenlet::ThreadStateCreator ThreadStateCreator; +static G_THREAD_LOCAL_VAR ThreadStateCreator g_thread_state_global; +#define GET_THREAD_STATE() g_thread_state_global +#else +// if we're not using standard threading, we're using +// the Python thread-local dictionary to perform our cleanup, +// which means we're deallocated when holding the GIL. The +// thread state is valid enough still for us to destroy +// stuff. +typedef greenlet::ThreadStateCreator ThreadStateCreator; +#define G_THREAD_STATE_DICT_CLEANUP_TYPE +#include "greenlet_thread_state_dict_cleanup.hpp" +typedef greenlet::refs::OwnedReference OwnedGreenletCleanup; +// RECALL: legacy thread-local objects (__thread on GCC, __declspec(thread) on +// MSVC) can't have constructors or destructors, they have to be +// constant. So we indirect through a pointer and a function. +static G_THREAD_LOCAL_VAR ThreadStateCreator* _g_thread_state_global_ptr = nullptr; +static ThreadStateCreator& GET_THREAD_STATE() +{ + if (!_g_thread_state_global_ptr) { + // NOTE: If any of this fails, we'll probably go on to hard + // crash the process, because we're returning a reference to a + // null pointer. we've called Py_FatalError(), but have no way + // to communicate that to the caller. Since these should + // essentially never fail unless the entire process is borked, + // a hard crash with a decent C++ backtrace from the exception + // is much more useful. + _g_thread_state_global_ptr = new ThreadStateCreator(); + if (!_g_thread_state_global_ptr) { + throw PyFatalError("greenlet: Failed to create greenlet thread state."); + } + + OwnedGreenletCleanup cleanup(OwnedGreenletCleanup::consuming(PyType_GenericAlloc(&PyGreenletCleanup_Type, 0))); + if (!cleanup) { + throw PyFatalError("greenlet: Failed to create greenlet thread state cleanup."); + } + + cleanup->thread_state_creator = _g_thread_state_global_ptr; + assert(PyObject_GC_IsTracked(cleanup.borrow_o())); + + PyObject* ts_dict_w = PyThreadState_GetDict(); + if (!ts_dict_w) { + throw PyFatalError("greenlet: Failed to get Python thread state."); + } + if (PyDict_SetItemString(ts_dict_w, "__greenlet_cleanup", cleanup.borrow_o()) < 0) { + throw PyFatalError("greenlet: Failed to save cleanup key in Python thread state."); + } + } + return *_g_thread_state_global_ptr; +} +#endif + + +Greenlet::Greenlet(PyGreenlet* p) +{ + p ->pimpl = this; +} + +Greenlet::Greenlet(PyGreenlet* p, const StackState& initial_stack) + : stack_state(initial_stack) +{ + // can't use a delegating constructor because of + // MSVC for Python 2.7 + p->pimpl = this; +} + +UserGreenlet::UserGreenlet(PyGreenlet* p,BorrowedGreenlet the_parent) + : Greenlet(p), _parent(the_parent) +{ + this->_self = p; +} + + +MainGreenlet::MainGreenlet(PyGreenlet* p, ThreadState* state) + : Greenlet(p, StackState::make_main()), + _self(p), + _thread_state(state) +{ + total_main_greenlets++; +} + +ThreadState* +MainGreenlet::thread_state() const G_NOEXCEPT +{ + return this->_thread_state; +} + +void +MainGreenlet::thread_state(ThreadState* t) G_NOEXCEPT +{ + assert(!t); + this->_thread_state = t; +} + +BorrowedGreenlet +UserGreenlet::self() const G_NOEXCEPT +{ + return this->_self; +} + +BorrowedGreenlet +MainGreenlet::self() const G_NOEXCEPT +{ + return BorrowedGreenlet(this->_self.borrow()); +} + +const BorrowedMainGreenlet +UserGreenlet::main_greenlet() const +{ + return this->_main_greenlet; +} + +const BorrowedMainGreenlet +MainGreenlet::main_greenlet() const +{ + return this->_self; +} + +static PyGreenlet* +green_create_main(ThreadState* state) +{ + PyGreenlet* gmain; + + /* create the main greenlet for this thread */ + gmain = (PyGreenlet*)PyType_GenericAlloc(&PyGreenlet_Type, 0); + if (gmain == NULL) { + Py_FatalError("green_create_main failed to alloc"); + return NULL; + } + new MainGreenlet(gmain, state); + + assert(Py_REFCNT(gmain) == 1); + return gmain; +} + + +BorrowedMainGreenlet +UserGreenlet::find_main_greenlet_in_lineage() const +{ + if (this->started()) { + assert(this->_main_greenlet); + return BorrowedMainGreenlet(this->_main_greenlet); + } + + if (!this->_parent) { + /* garbage collected greenlet in chain */ + // XXX: WHAT? + return BorrowedMainGreenlet(nullptr); + } + + return this->_parent->find_main_greenlet_in_lineage(); +} + + +BorrowedMainGreenlet +MainGreenlet::find_main_greenlet_in_lineage() const +{ + return BorrowedMainGreenlet(this->_self); +} + +/***********************************************************/ + +/* Some functions must not be inlined: + * slp_restore_state, when inlined into slp_switch might cause + it to restore stack over its own local variables + * slp_save_state, when inlined would add its own local + variables to the saved stack, wasting space + * slp_switch, cannot be inlined for obvious reasons + * g_initialstub, when inlined would receive a pointer into its + own stack frame, leading to incomplete stack save/restore + +g_initialstub is a member function and declared virtual so that the +compiler always calls it through a vtable. + +slp_save_state and slp_restore_state are also member functions. They +are called from trampoline functions that themselves are declared as +not eligible for inlining. +*/ + + + +/* add forward declarations */ + + +static void +g_calltrace(const OwnedObject& tracefunc, + const ImmortalEventName& event, + const BorrowedGreenlet& origin, + const BorrowedGreenlet& target); + +static OwnedObject +g_handle_exit(const OwnedObject& greenlet_result); + + + + + +/** + * CAUTION: May invoke arbitrary Python code. + * + * Figure out what the result of ``greenlet.switch(arg, kwargs)`` + * should be and transfers ownership of it to the left-hand-side. + * + * If switch() was just passed an arg tuple, then we'll just return that. + * If only keyword arguments were passed, then we'll pass the keyword + * argument dict. Otherwise, we'll create a tuple of (args, kwargs) and + * return both. + */ +OwnedObject& operator<<=(OwnedObject& lhs, greenlet::SwitchingArgs& rhs) G_NOEXCEPT +{ + // Because this may invoke arbitrary Python code, which could + // result in switching back to us, we need to get the + // arguments locally on the stack. + assert(rhs); + OwnedObject args = rhs.args(); + OwnedObject kwargs = rhs.kwargs(); + rhs.CLEAR(); + // We shouldn't be called twice for the same switch. + assert(args || kwargs); + assert(!rhs); + + if (!kwargs) { + lhs = args; + } + else if (!PyDict_Size(kwargs.borrow())) { + lhs = args; + } + else if (!PySequence_Length(args.borrow())) { + lhs = kwargs; + } + else { + lhs = OwnedObject::consuming(PyTuple_Pack(2, args.borrow(), kwargs.borrow())); + } + return lhs; +} + + + +void Greenlet::release_args() +{ + this->switch_args.CLEAR(); +} + + +void* UserGreenlet::operator new(size_t UNUSED(count)) +{ + return allocator.allocate(1); +} + + +void UserGreenlet::operator delete(void* ptr) +{ + return allocator.deallocate(static_cast(ptr), + 1); +} + +void* MainGreenlet::operator new(size_t UNUSED(count)) +{ + return allocator.allocate(1); +} + + +void MainGreenlet::operator delete(void* ptr) +{ + return allocator.deallocate(static_cast(ptr), + 1); +} + + +OwnedObject +Greenlet::throw_GreenletExit_during_dealloc(const ThreadState& UNUSED(current_thread_state)) +{ + // If we're killed because we lost all references in the + // middle of a switch, that's ok. Don't reset the args/kwargs, + // we still want to pass them to the parent. + PyErr_SetString(mod_globs.PyExc_GreenletExit, + "Killing the greenlet because all references have vanished."); + // To get here it had to have run before + return this->g_switch(); +} + +OwnedObject +UserGreenlet::throw_GreenletExit_during_dealloc(const ThreadState& current_thread_state) +{ + /* The dying greenlet cannot be a parent of ts_current + because the 'parent' field chain would hold a + reference */ + UserGreenlet::ParentIsCurrentGuard with_current_parent(this, current_thread_state); + + // We don't care about the return value, only whether an + // exception happened. Whether or not an exception happens, + // we need to restore the parent in case the greenlet gets + // resurrected. + return Greenlet::throw_GreenletExit_during_dealloc(current_thread_state); +} + +ThreadState* +UserGreenlet::thread_state() const G_NOEXCEPT +{ + // TODO: maybe make this throw, if the thread state isn't there? + // if (!this->main_greenlet) { + // throw std::runtime_error("No thread state"); // TODO: Better exception + // } + if (!this->_main_greenlet) { + return nullptr; + } + return this->_main_greenlet->thread_state(); +} + + + +bool +UserGreenlet::was_running_in_dead_thread() const G_NOEXCEPT +{ + return this->_main_greenlet && !this->thread_state(); +} + +bool +MainGreenlet::was_running_in_dead_thread() const G_NOEXCEPT +{ + return !this->_thread_state; +} + +inline void +Greenlet::slp_restore_state() G_NOEXCEPT +{ +#ifdef SLP_BEFORE_RESTORE_STATE + SLP_BEFORE_RESTORE_STATE(); +#endif + this->stack_state.copy_heap_to_stack( + this->thread_state()->borrow_current()->stack_state); +} + + +inline int +Greenlet::slp_save_state(char *const stackref) G_NOEXCEPT +{ + // XXX: This used to happen in the middle, before saving, but + // after finding the next owner. Does that matter? This is + // only defined for Sparc/GCC where it flushes register + // windows to the stack (I think) +#ifdef SLP_BEFORE_SAVE_STATE + SLP_BEFORE_SAVE_STATE(); +#endif + return this->stack_state.copy_stack_to_heap(stackref, + this->thread_state()->borrow_current()->stack_state); +} + + +OwnedObject +UserGreenlet::g_switch() +{ + try { + this->check_switch_allowed(); + } + catch(const PyErrOccurred&) { + this->release_args(); + throw; + } + + // Switching greenlets used to attempt to clean out ones that need + // deleted *if* we detected a thread switch. Should it still do + // that? + // An issue is that if we delete a greenlet from another thread, + // it gets queued to this thread, and ``kill_greenlet()`` switches + // back into the greenlet + + /* find the real target by ignoring dead greenlets, + and if necessary starting a greenlet. */ + switchstack_result_t err; + Greenlet* target = this; + // TODO: probably cleaner to handle the case where we do + // switch to ourself separately from the other cases. + // This can probably even further be simplified if we keep + // track of the switching_state we're going for and just call + // into g_switch() if it's not ourself. The main problem with that + // is that we would be using more stack space. + bool target_was_me = true; + while (target) { + + if (target->active()) { + if (!target_was_me) { + target->args() <<= this->switch_args; + assert(!this->switch_args); + } + err = target->g_switchstack(); + break; + } + if (!target->started()) { + // We never encounter a main greenlet that's not started. + assert(!target->main()); + UserGreenlet* real_target = static_cast(target); + assert(real_target); + void* dummymarker; + + if (!target_was_me) { + target->args() <<= this->switch_args; + assert(!this->switch_args); + } + + try { + // This can only throw back to us while we're + // still in this greenlet. Once the new greenlet + // is bootstrapped, it has its own exception state. + err = real_target->g_initialstub(&dummymarker); + } + catch (const PyErrOccurred&) { + this->release_args(); + throw; + } + catch (const GreenletStartedWhileInPython&) { + // The greenlet was started sometime before this + // greenlet actually switched to it, i.e., + // "concurrent" calls to switch() or throw(). + // We need to retry the switch. + // Note that the current greenlet has been reset + // to this one (or we wouldn't be running!) + continue; + } + break; + } + + target = target->parent(); + target_was_me = false; + } + // The this pointer and all other stack or register based + // variables are invalid now, at least where things succeed + // above. + // But this one, probably not so much? It's not clear if it's + // safe to throw an exception at this point. + + if (err.status < 0) { + // XXX: This code path is untested. + assert(PyErr_Occurred()); + assert(!err.the_state_that_switched); + assert(!err.origin_greenlet); + return OwnedObject(); + } + + return err.the_state_that_switched->g_switch_finish(err); +} + +OwnedObject +MainGreenlet::g_switch() +{ + try { + this->check_switch_allowed(); + } + catch(const PyErrOccurred&) { + this->release_args(); + throw; + } + + switchstack_result_t err = this->g_switchstack(); + if (err.status < 0) { + // XXX: This code path is untested. + assert(PyErr_Occurred()); + assert(!err.the_state_that_switched); + assert(!err.origin_greenlet); + return OwnedObject(); + } + + return err.the_state_that_switched->g_switch_finish(err); +} + + +OwnedGreenlet +Greenlet::g_switchstack_success() G_NOEXCEPT +{ + PyThreadState* tstate = PyThreadState_GET(); + // restore the saved state + this->python_state >> tstate; + this->exception_state >> tstate; + + // The thread state hasn't been changed yet. + ThreadState* thread_state = this->thread_state(); + OwnedGreenlet result(thread_state->get_current()); + thread_state->set_current(this->self()); + //assert(thread_state->borrow_current().borrow() == this->_self); + return result; +} + + +Greenlet::switchstack_result_t +UserGreenlet::g_initialstub(void* mark) +{ + OwnedObject run; + + // We need to grab a reference to the current switch arguments + // in case we're entered concurrently during the call to + // GetAttr() and have to try again. + // We'll restore them when we return in that case. + // Scope them tightly to avoid ref leaks. + { + SwitchingArgs args(this->switch_args); + + /* save exception in case getattr clears it */ + PyErrPieces saved; + + /* + self.run is the object to call in the new greenlet. + This could run arbitrary python code and switch greenlets! + */ + run = this->_self.PyRequireAttr(mod_globs.str_run); + + /* restore saved exception */ + saved.PyErrRestore(); + + + /* recheck that it's safe to switch in case greenlet reparented anywhere above */ + this->check_switch_allowed(); + + /* by the time we got here another start could happen elsewhere, + * that means it should now be a regular switch. + * This can happen if the Python code is a subclass that implements + * __getattribute__ or __getattr__, or makes ``run`` a descriptor; + * all of those can run arbitrary code that switches back into + * this greenlet. + */ + if (this->stack_state.started()) { + // the successful switch cleared these out, we need to + // restore our version. + assert(!this->switch_args); + this->switch_args <<= args; + + throw GreenletStartedWhileInPython(); + } + } + + // Sweet, if we got here, we have the go-ahead and will switch + // greenlets. + // Nothing we do from here on out should allow for a thread or + // greenlet switch: No arbitrary calls to Python, including + // decref'ing + +#if GREENLET_USE_CFRAME + /* OK, we need it, we're about to switch greenlets, save the state. */ + /* + See green_new(). This is a stack-allocated variable used + while *self* is in PyObject_Call(). + We want to defer copying the state info until we're sure + we need it and are in a stable place to do so. + */ + _PyCFrame trace_info; + + this->python_state.set_new_cframe(trace_info); +#endif + /* start the greenlet */ + ThreadState& thread_state = GET_THREAD_STATE().state(); + this->stack_state = StackState(mark, + thread_state.borrow_current()->stack_state); + this->python_state.set_initial_state(PyThreadState_GET()); + this->exception_state.clear(); + this->_main_greenlet = thread_state.get_main_greenlet(); + + /* perform the initial switch */ + switchstack_result_t err = this->g_switchstack(); + /* returns twice! + The 1st time with ``err == 1``: we are in the new greenlet. + This one owns a greenlet that used to be current. + The 2nd time with ``err <= 0``: back in the caller's + greenlet; this happens if the child finishes or switches + explicitly to us. Either way, the ``err`` variable is + created twice at the same memory location, but possibly + having different ``origin`` values. Note that it's not + constructed for the second time until the switch actually happens. + */ + if (err.status == 1) { + // This never returns! Calling inner_bootstrap steals + // the contents of our run object within this stack frame, so + // it is not valid to do anything with it. + this->inner_bootstrap(err.origin_greenlet, run); + Py_FatalError("greenlet: inner_bootstrap returned\n"); + } + // The child will take care of decrefing this. + run.relinquish_ownership(); + + // In contrast, notice that we're keeping the origin greenlet + // around as an owned reference; we need it to call the trace + // function for the switch back into the parent. It was only + // captured at the time the switch actually happened, though, + // so we haven't been keeping an extra reference around this + // whole time. + + /* back in the parent */ + if (err.status < 0) { + /* start failed badly, restore greenlet state */ + // XXX: This code path is not tested. + this->stack_state = StackState(); + this->_main_greenlet.CLEAR(); + fprintf(stderr, "greenlet: g_initialstub: starting child failed.\n"); + } + return err; +} + + +void +UserGreenlet::inner_bootstrap(OwnedGreenlet& origin_greenlet, OwnedObject& _run) G_NOEXCEPT_WIN32 +{ + // The arguments here would be another great place for move. + // As it is, we take them as a reference so that when we clear + // them we clear what's on the stack above us. Do that NOW, and + // without using a C++ RAII object, + // so there's no way that exiting the parent frame can clear it, + // or we clear it unexpectedly. This arises in the context of the + // interpreter shutting down. See https://github.com/python-greenlet/greenlet/issues/325 + PyObject* run = _run.relinquish_ownership(); + + /* in the new greenlet */ + assert(this->thread_state()->borrow_current() == this->_self); + // C++ exceptions cannot propagate to the parent greenlet from + // here. (TODO: Do we need a catch(...) clause, perhaps on the + // function itself? ALl we could do is terminate the program.) + // NOTE: On 32-bit Windows, the call chain is extremely + // important here in ways that are subtle, having to do with + // the depth of the SEH list. The call to restore it MUST NOT + // add a new SEH handler to the list, or we'll restore it to + // the wrong thing. + this->thread_state()->restore_exception_state(); + /* stack variables from above are no good and also will not unwind! */ + // EXCEPT: That can't be true, we access run, among others, here. + + this->stack_state.set_active(); /* running */ + + // XXX: We could clear this much earlier, right? + // Or would that introduce the possibility of running Python + // code when we don't want to? + this->_run_callable.CLEAR(); + + + // We're about to possibly run Python code again, which + // could switch back to us, so we need to grab the + // arguments locally. + SwitchingArgs args; + args <<= this->switch_args; + assert(!this->switch_args); + + // The first switch we need to manually call the trace + // function here instead of in g_switch_finish, because we + // never return there. + + if (OwnedObject tracefunc = this->thread_state()->get_tracefunc()) { + try { + g_calltrace(tracefunc, + args ? mod_globs.event_switch : mod_globs.event_throw, + origin_greenlet, + this->_self); + } + catch (const PyErrOccurred&) { + /* Turn trace errors into switch throws */ + args.CLEAR(); + } + } + + // We no longer need the origin, it was only here for + // tracing. + // We may never actually exit this stack frame so we need + // to explicitly clear it. + // This could run Python code and switch. + origin_greenlet.CLEAR(); + + OwnedObject result; + if (!args) { + /* pending exception */ + result = NULL; + } + else { + /* call g.run(*args, **kwargs) */ + // This could result in further switches + try { + //result = run.PyCall(args.args(), args.kwargs()); + result = OwnedObject::consuming(PyObject_Call(run, args.args().borrow(), args.kwargs().borrow())); + } + catch(...) { + // Unhandled C++ exception! + + // If we declare ourselves as noexcept, if we don't catch + // this here, most platforms will just abort() the + // process. But on 64-bit Windows with older versions of + // the C runtime, this can actually corrupt memory and + // just return. We see this when compiling with the + // Windows 7.0 SDK targeting Windows Server 2008, but not + // when using the Appveyor Visual Studio 2019 image. So + // this currently only affects Python 2.7 on Windows 64. + // That is, the tests pass and the runtime aborts + // everywhere else. + // + // However, if we catch it and try to continue with a + // Python error, then all Windows 64 bit platforms corrupt + // memory. So all we can do is manually abort, hopefully + // with a good error message. (Note that the above was + // tested WITHOUT the `/EHr` switch being used at compile + // time, so MSVC may have "optimized" out important + // checking. Using that switch, we may be in a better + // place in terms of memory corruption.) But sometimes it + // can't be caught here at all, which is confusing but not + // terribly surprising; so again, the G_NOEXCEPT_WIN32 + // plus "/EHr". + // + // Hopefully the basic C stdlib is still functional enough + // for us to at least print an error. + // + // It gets more complicated than that, though, on some + // platforms, specifically at least Linux/gcc/libstdc++. They use + // an exception to unwind the stack when a background + // thread exits. (See comments about G_NOEXCEPT.) So this + // may not actually represent anything untoward. On those + // platforms we allow throws of this to propagate, or + // attempt to anyway. +# if defined(WIN32) || defined(_WIN32) + Py_FatalError( + "greenlet: Unhandled C++ exception from a greenlet run function. " + "Because memory is likely corrupted, terminating process."); + std::abort(); +#else + throw; +#endif + } + } + args.CLEAR(); + Py_CLEAR(run); + + if (!result + && mod_globs.PyExc_GreenletExit.PyExceptionMatches() + && (this->switch_args)) { + // This can happen, for example, if our only reference + // goes away after we switch back to the parent. + // See test_dealloc_switch_args_not_lost + PyErrPieces clear_error; + result <<= this->switch_args; + result = single_result(result); + } + this->release_args(); + this->python_state.did_finish(PyThreadState_GET()); + + result = g_handle_exit(result); + assert(this->thread_state()->borrow_current() == this->_self); + + /* jump back to parent */ + this->stack_state.set_inactive(); /* dead */ + + + // TODO: Can we decref some things here? Release our main greenlet + // and maybe parent? + for (Greenlet* parent = this->_parent; + parent; + parent = parent->parent()) { + // We need to somewhere consume a reference to + // the result; in most cases we'll never have control + // back in this stack frame again. Calling + // green_switch actually adds another reference! + // This would probably be clearer with a specific API + // to hand results to the parent. + parent->args() <<= result; + assert(!result); + // The parent greenlet now owns the result; in the + // typical case we'll never get back here to assign to + // result and thus release the reference. + try { + result = parent->g_switch(); + } + catch (const PyErrOccurred&) { + // Ignore. + } + + /* Return here means switch to parent failed, + * in which case we throw *current* exception + * to the next parent in chain. + */ + assert(!result); + } + /* We ran out of parents, cannot continue */ + PyErr_WriteUnraisable(this->self().borrow_o()); + Py_FatalError("greenlet: ran out of parent greenlets while propagating exception; " + "cannot continue"); + std::abort(); +} + + +Greenlet::switchstack_result_t +Greenlet::g_switchstack(void) +{ + { /* save state */ + if (this->thread_state()->is_current(this->self())) { + // Hmm, nothing to do. + // TODO: Does this bypass trace events that are + // important? + return switchstack_result_t(0, + this, this->thread_state()->borrow_current()); + } + BorrowedGreenlet current = this->thread_state()->borrow_current(); + PyThreadState* tstate = PyThreadState_GET(); + current->python_state << tstate; + current->exception_state << tstate; + this->python_state.will_switch_from(tstate); + switching_thread_state = this; + } + // If this is the first switch into a greenlet, this will + // return twice, once with 1 in the new greenlet, once with 0 + // in the origin. + int err = slp_switch(); + + if (err < 0) { /* error */ + // XXX: This code path is not tested. + BorrowedGreenlet current(GET_THREAD_STATE().state().borrow_current()); + //current->top_frame = NULL; // This probably leaks? + current->exception_state.clear(); + + switching_thread_state = nullptr; + //GET_THREAD_STATE().state().wref_target(NULL); + this->release_args(); + // It's important to make sure not to actually return an + // owned greenlet here, no telling how long before it + // could be cleaned up. + // TODO: Can this be a throw? How stable is the stack in + // an error case like this? + return switchstack_result_t(err); + } + + // No stack-based variables are valid anymore. + + // But the global is volatile so we can reload it without the + // compiler caching it from earlier. + Greenlet* after_switch = switching_thread_state; + OwnedGreenlet origin = after_switch->g_switchstack_success(); + switching_thread_state = nullptr; + return switchstack_result_t(err, after_switch, origin); +} + + +inline void +Greenlet::check_switch_allowed() const +{ + // TODO: Make this take a parameter of the current greenlet, + // or current main greenlet, to make the check for + // cross-thread switching cheaper. Surely somewhere up the + // call stack we've already accessed the thread local variable. + + // We expect to always have a main greenlet now; accessing the thread state + // created it. However, if we get here and cleanup has already + // begun because we're a greenlet that was running in a + // (now dead) thread, these invariants will not hold true. In + // fact, accessing `this->thread_state` may not even be possible. + + // If the thread this greenlet was running in is dead, + // we'll still have a reference to a main greenlet, but the + // thread state pointer we have is bogus. + // TODO: Give the objects an API to determine if they belong + // to a dead thread. + + const BorrowedMainGreenlet main_greenlet = this->find_main_greenlet_in_lineage(); + + if (!main_greenlet) { + throw PyErrOccurred(mod_globs.PyExc_GreenletError, + "cannot switch to a garbage collected greenlet"); + } + + if (!main_greenlet->thread_state()) { + throw PyErrOccurred(mod_globs.PyExc_GreenletError, + "cannot switch to a different thread (which happens to have exited)"); + } + + // The main greenlet we found was from the .parent lineage. + // That may or may not have any relationship to the main + // greenlet of the running thread. We can't actually access + // our this->thread_state members to try to check that, + // because it could be in the process of getting destroyed, + // but setting the main_greenlet->thread_state member to NULL + // may not be visible yet. So we need to check against the + // current thread state (once the cheaper checks are out of + // the way) + const BorrowedMainGreenlet current_main_greenlet = GET_THREAD_STATE().state().borrow_main_greenlet(); + if ( + // lineage main greenlet is not this thread's greenlet + current_main_greenlet != main_greenlet + || ( + // atteched to some thread + this->main_greenlet() + // XXX: Same condition as above. Was this supposed to be + // this->main_greenlet()? + && current_main_greenlet != main_greenlet) + // switching into a known dead thread (XXX: which, if we get here, + // is bad, because we just accessed the thread state, which is + // gone!) + || (!current_main_greenlet->thread_state())) { + throw PyErrOccurred(mod_globs.PyExc_GreenletError, + "cannot switch to a different thread"); + } +} + + +OwnedObject +Greenlet::g_switch_finish(const switchstack_result_t& err) +{ + + ThreadState& state = *this->thread_state(); + try { + // Our only caller handles the bad error case + assert(err.status >= 0); + assert(state.borrow_current() == this->self()); + + if (OwnedObject tracefunc = state.get_tracefunc()) { + g_calltrace(tracefunc, + this->args() ? mod_globs.event_switch : mod_globs.event_throw, + err.origin_greenlet, + this->self()); + } + // The above could have invoked arbitrary Python code, but + // it couldn't switch back to this object and *also* + // throw an exception, so the args won't have changed. + + if (PyErr_Occurred()) { + // We get here if we fell of the end of the run() function + // raising an exception. The switch itself was + // successful, but the function raised. + // valgrind reports that memory allocated here can still + // be reached after a test run. + throw PyErrOccurred(); + } + + OwnedObject result; + result <<= this->switch_args; + assert(!this->switch_args); + return result; + } + catch (const PyErrOccurred&) { + /* Turn switch errors into switch throws */ + /* Turn trace errors into switch throws */ + this->release_args(); + throw; + } +} + + +greenlet::PythonAllocator UserGreenlet::allocator; +greenlet::PythonAllocator MainGreenlet::allocator; + + +extern "C" { +static int GREENLET_NOINLINE(slp_save_state_trampoline)(char* stackref) +{ + return switching_thread_state->slp_save_state(stackref); +} +static void GREENLET_NOINLINE(slp_restore_state_trampoline)() +{ + switching_thread_state->slp_restore_state(); +} +} + + + +/***********************************************************/ + +class TracingGuard +{ +private: + PyThreadState* tstate; +public: + TracingGuard() + : tstate(PyThreadState_GET()) + { + PyThreadState_EnterTracing(this->tstate); + } + + ~TracingGuard() + { + PyThreadState_LeaveTracing(this->tstate); + this->tstate = nullptr; + } + + inline void CallTraceFunction(const OwnedObject& tracefunc, + const ImmortalEventName& event, + const BorrowedGreenlet& origin, + const BorrowedGreenlet& target) + { + // TODO: This calls tracefunc(event, (origin, target)). Add a shortcut + // function for that that's specialized to avoid the Py_BuildValue + // string parsing, or start with just using "ON" format with PyTuple_Pack(2, + // origin, target). That seems like what the N format is meant + // for. + // XXX: Why does event not automatically cast back to a PyObject? + // It tries to call the "deleted constructor ImmortalEventName + // const" instead. + assert(tracefunc); + assert(event); + assert(origin); + assert(target); + NewReference retval(PyObject_CallFunction(tracefunc.borrow(), + "O(OO)", + event.borrow(), + origin.borrow(), + target.borrow())); + if (!retval) { + throw PyErrOccurred(); + } + } +}; + +static void +g_calltrace(const OwnedObject& tracefunc, + const ImmortalEventName& event, + const BorrowedGreenlet& origin, + const BorrowedGreenlet& target) +{ + PyErrPieces saved_exc; + try { + TracingGuard tracing_guard; + tracing_guard.CallTraceFunction(tracefunc, event, origin, target); + } + catch (const PyErrOccurred&) { + // In case of exceptions trace function is removed, + // and any existing exception is replaced with the tracing + // exception. + GET_THREAD_STATE().state().set_tracefunc(Py_None); + throw; + } + + saved_exc.PyErrRestore(); +} + + + +static OwnedObject +g_handle_exit(const OwnedObject& greenlet_result) +{ + if (!greenlet_result && mod_globs.PyExc_GreenletExit.PyExceptionMatches()) { + /* catch and ignore GreenletExit */ + PyErrFetchParam val; + PyErr_Fetch(PyErrFetchParam(), val, PyErrFetchParam()); + if (!val) { + return OwnedObject::None(); + } + return OwnedObject(val); + } + + if (greenlet_result) { + // package the result into a 1-tuple + // PyTuple_Pack increments the reference of its arguments, + // so we always need to decref the greenlet result; + // the owner will do that. + return OwnedObject::consuming(PyTuple_Pack(1, greenlet_result.borrow())); + } + + return OwnedObject(); +} + + + +/***********************************************************/ + +static PyGreenlet* +green_new(PyTypeObject* type, PyObject* UNUSED(args), PyObject* UNUSED(kwds)) +{ + PyGreenlet* o = + (PyGreenlet*)PyBaseObject_Type.tp_new(type, mod_globs.empty_tuple, mod_globs.empty_dict); + if (o) { + new UserGreenlet(o, GET_THREAD_STATE().state().borrow_current()); + assert(Py_REFCNT(o) == 1); + } + return o; +} + +static int +green_setrun(BorrowedGreenlet self, BorrowedObject nrun, void* c); +static int +green_setparent(BorrowedGreenlet self, BorrowedObject nparent, void* c); + +static int +green_init(BorrowedGreenlet self, BorrowedObject args, BorrowedObject kwargs) +{ + PyArgParseParam run; + PyArgParseParam nparent; + static const char* const kwlist[] = { + "run", + "parent", + NULL + }; + + // recall: The O specifier does NOT increase the reference count. + if (!PyArg_ParseTupleAndKeywords( + args, kwargs, "|OO:green", (char**)kwlist, &run, &nparent)) { + return -1; + } + + if (run) { + if (green_setrun(self, run, NULL)) { + return -1; + } + } + if (nparent && !nparent.is_None()) { + return green_setparent(self, nparent, NULL); + } + return 0; +} + + +UserGreenlet::ParentIsCurrentGuard::ParentIsCurrentGuard(UserGreenlet* p, + const ThreadState& thread_state) + : oldparent(p->_parent), + greenlet(p) +{ + p->_parent = thread_state.get_current(); +} + +UserGreenlet::ParentIsCurrentGuard::~ParentIsCurrentGuard() +{ + this->greenlet->_parent = oldparent; + oldparent.CLEAR(); +} + + +void +Greenlet::murder_in_place() +{ + if (this->active()) { + assert(!this->is_currently_running_in_some_thread()); + this->deactivate_and_free(); + } +} + +void +UserGreenlet::murder_in_place() +{ + this->_main_greenlet.CLEAR(); + Greenlet::murder_in_place(); +} + +inline void +Greenlet::deactivate_and_free() +{ + if (!this->active()) { + return; + } + // Throw away any saved stack. + this->stack_state = StackState(); + assert(!this->stack_state.active()); + // Throw away any Python references. + // We're holding a borrowed reference to the last + // frame we executed. Since we borrowed it, the + // normal traversal, clear, and dealloc functions + // ignore it, meaning it leaks. (The thread state + // object can't find it to clear it when that's + // deallocated either, because by definition if we + // got an object on this list, it wasn't + // running and the thread state doesn't have + // this frame.) + // So here, we *do* clear it. + this->python_state.tp_clear(true); +} + +bool +Greenlet::belongs_to_thread(const ThreadState* thread_state) const +{ + if (!this->thread_state() // not running anywhere, or thread + // exited + || !thread_state) { // same, or there is no thread state. + return false; + } + return true; +} + +bool +UserGreenlet::belongs_to_thread(const ThreadState* thread_state) const +{ + return Greenlet::belongs_to_thread(thread_state) && this->_main_greenlet == thread_state->borrow_main_greenlet(); +} + +void +Greenlet::deallocing_greenlet_in_thread(const ThreadState* current_thread_state) +{ + /* Cannot raise an exception to kill the greenlet if + it is not running in the same thread! */ + if (this->belongs_to_thread(current_thread_state)) { + assert(current_thread_state); + // To get here it had to have run before + /* Send the greenlet a GreenletExit exception. */ + + // We don't care about the return value, only whether an + // exception happened. + this->throw_GreenletExit_during_dealloc(*current_thread_state); + return; + } + + // Not the same thread! Temporarily save the greenlet + // into its thread's deleteme list, *if* it exists. + // If that thread has already exited, and processed its pending + // cleanup, we'll never be able to clean everything up: we won't + // be able to raise an exception. + // That's mostly OK! Since we can't add it to a list, our refcount + // won't increase, and we'll go ahead with the DECREFs later. + ThreadState *const thread_state = this->thread_state(); + if (thread_state) { + thread_state->delete_when_thread_running(this->self()); + } + else { + // The thread is dead, we can't raise an exception. + // We need to make it look non-active, though, so that dealloc + // finishes killing it. + this->deactivate_and_free(); + } + return; +} + + +int +Greenlet::tp_traverse(visitproc visit, void* arg) +{ + + int result; + if ((result = this->exception_state.tp_traverse(visit, arg)) != 0) { + return result; + } + //XXX: This is ugly. But so is handling everything having to do + //with the top frame. + bool visit_top_frame = this->was_running_in_dead_thread(); + // When true, the thread is dead. Our implicit weak reference to the + // frame is now all that's left; we consider ourselves to + // strongly own it now. + if ((result = this->python_state.tp_traverse(visit, arg, visit_top_frame)) != 0) { + return result; + } + return 0; +} + +int +UserGreenlet::tp_traverse(visitproc visit, void* arg) +{ + Py_VISIT(this->_parent.borrow_o()); + Py_VISIT(this->_main_greenlet.borrow_o()); + Py_VISIT(this->_run_callable.borrow_o()); + + return Greenlet::tp_traverse(visit, arg); +} + +int +MainGreenlet::tp_traverse(visitproc visit, void* arg) +{ + if (this->_thread_state) { + // we've already traversed main, (self), don't do it again. + int result = this->_thread_state->tp_traverse(visit, arg, false); + if (result) { + return result; + } + } + return Greenlet::tp_traverse(visit, arg); +} + +static int +green_traverse(PyGreenlet* self, visitproc visit, void* arg) +{ + // We must only visit referenced objects, i.e. only objects + // Py_INCREF'ed by this greenlet (directly or indirectly): + // + // - stack_prev is not visited: holds previous stack pointer, but it's not + // referenced + // - frames are not visited as we don't strongly reference them; + // alive greenlets are not garbage collected + // anyway. This can be a problem, however, if this greenlet is + // never allowed to finish, and is referenced from the frame: we + // have an uncollectible cycle in that case. Note that the + // frame object itself is also frequently not even tracked by the GC + // starting with Python 3.7 (frames are allocated by the + // interpreter untracked, and only become tracked when their + // evaluation is finished if they have a refcount > 1). All of + // this is to say that we should probably strongly reference + // the frame object. Doing so, while always allowing GC on a + // greenlet, solves several leaks for us. + + Py_VISIT(self->dict); + if (!self->pimpl) { + // Hmm. I have seen this at interpreter shutdown time, + // I think. That's very odd because this doesn't go away until + // we're ``green_dealloc()``, at which point we shouldn't be + // traversed anymore. + return 0; + } + + return self->pimpl->tp_traverse(visit, arg); +} + +static int +green_is_gc(BorrowedGreenlet self) +{ + int result = 0; + /* Main greenlet can be garbage collected since it can only + become unreachable if the underlying thread exited. + Active greenlets --- including those that are suspended --- + cannot be garbage collected, however. + */ + if (self->main() || !self->active()) { + result = 1; + } + // The main greenlet pointer will eventually go away after the thread dies. + if (self->was_running_in_dead_thread()) { + // Our thread is dead! We can never run again. Might as well + // GC us. Note that if a tuple containing only us and other + // immutable objects had been scanned before this, when we + // would have returned 0, the tuple will take itself out of GC + // tracking and never be investigated again. So that could + // result in both us and the tuple leaking due to an + // unreachable/uncollectible reference. The same goes for + // dictionaries. + // + // It's not a great idea to be changing our GC state on the + // fly. + result = 1; + } + return result; +} + + +int +Greenlet::tp_clear() +{ + bool own_top_frame = this->was_running_in_dead_thread(); + this->exception_state.tp_clear(); + this->python_state.tp_clear(own_top_frame); + return 0; +} + +int +UserGreenlet::tp_clear() +{ + Greenlet::tp_clear(); + this->_parent.CLEAR(); + this->_main_greenlet.CLEAR(); + this->_run_callable.CLEAR(); + return 0; +} + + +static int +green_clear(PyGreenlet* self) +{ + /* Greenlet is only cleared if it is about to be collected. + Since active greenlets are not garbage collectable, we can + be sure that, even if they are deallocated during clear, + nothing they reference is in unreachable or finalizers, + so even if it switches we are relatively safe. */ + // XXX: Are we responsible for clearing weakrefs here? + Py_CLEAR(self->dict); + return self->pimpl->tp_clear(); +} + +/** + * Returns 0 on failure (the object was resurrected) or 1 on success. + **/ +static int +_green_dealloc_kill_started_non_main_greenlet(BorrowedGreenlet self) +{ + /* Hacks hacks hacks copied from instance_dealloc() */ + /* Temporarily resurrect the greenlet. */ + assert(self.REFCNT() == 0); + Py_SET_REFCNT(self.borrow(), 1); + /* Save the current exception, if any. */ + PyErrPieces saved_err; + try { + // BY THE TIME WE GET HERE, the state may actually be going + // away + // if we're shutting down the interpreter and freeing thread + // entries, + // this could result in freeing greenlets that were leaked. So + // we can't try to read the state. + self->deallocing_greenlet_in_thread( + self->thread_state() + ? static_cast(GET_THREAD_STATE()) + : nullptr); + } + catch (const PyErrOccurred&) { + PyErr_WriteUnraisable(self.borrow_o()); + /* XXX what else should we do? */ + } + /* Check for no resurrection must be done while we keep + * our internal reference, otherwise PyFile_WriteObject + * causes recursion if using Py_INCREF/Py_DECREF + */ + if (self.REFCNT() == 1 && self->active()) { + /* Not resurrected, but still not dead! + XXX what else should we do? we complain. */ + PyObject* f = PySys_GetObject("stderr"); + Py_INCREF(self.borrow_o()); /* leak! */ + if (f != NULL) { + PyFile_WriteString("GreenletExit did not kill ", f); + PyFile_WriteObject(self.borrow_o(), f, 0); + PyFile_WriteString("\n", f); + } + } + /* Restore the saved exception. */ + saved_err.PyErrRestore(); + /* Undo the temporary resurrection; can't use DECREF here, + * it would cause a recursive call. + */ + assert(self.REFCNT() > 0); + + Py_ssize_t refcnt = self.REFCNT() - 1; + Py_SET_REFCNT(self.borrow_o(), refcnt); + if (refcnt != 0) { + /* Resurrected! */ + _Py_NewReference(self.borrow_o()); + Py_SET_REFCNT(self.borrow_o(), refcnt); + /* Better to use tp_finalizer slot (PEP 442) + * and call ``PyObject_CallFinalizerFromDealloc``, + * but that's only supported in Python 3.4+; see + * Modules/_io/iobase.c for an example. + * + * The following approach is copied from iobase.c in CPython 2.7. + * (along with much of this function in general). Here's their + * comment: + * + * When called from a heap type's dealloc, the type will be + * decref'ed on return (see e.g. subtype_dealloc in typeobject.c). */ + if (PyType_HasFeature(self.TYPE(), Py_TPFLAGS_HEAPTYPE)) { + Py_INCREF(self.TYPE()); + } + + PyObject_GC_Track((PyObject*)self); + + _Py_DEC_REFTOTAL; +#ifdef COUNT_ALLOCS + --Py_TYPE(self)->tp_frees; + --Py_TYPE(self)->tp_allocs; +#endif /* COUNT_ALLOCS */ + return 0; + } + return 1; +} + + +Greenlet::~Greenlet() +{ + // XXX: Can't do this. tp_clear is a virtual function, and by the + // time we're here, we've sliced off our child classes. + //this->tp_clear(); +} + +UserGreenlet::~UserGreenlet() +{ + // Python 3.11: If we don't clear out the raw frame datastack + // when deleting an unfinished greenlet, + // TestLeaks.test_untracked_memory_doesnt_increase_unfinished_thread_dealloc_in_main fails. + this->python_state.did_finish(nullptr); + this->tp_clear(); +} + +MainGreenlet::~MainGreenlet() +{ + total_main_greenlets--; + this->tp_clear(); +} + +static void +green_dealloc(PyGreenlet* self) +{ + PyObject_GC_UnTrack(self); + BorrowedGreenlet me(self); + if (me->active() + && me->started() + && !me->main()) { + if (!_green_dealloc_kill_started_non_main_greenlet(me)) { + return; + } + } + + if (self->weakreflist != NULL) { + PyObject_ClearWeakRefs((PyObject*)self); + } + Py_CLEAR(self->dict); + + if (self->pimpl) { + // In case deleting this, which frees some memory, + // somehow winds up calling back into us. That's usually a + //bug in our code. + Greenlet* p = self->pimpl; + self->pimpl = nullptr; + delete p; + } + // and finally we're done. self is now invalid. + Py_TYPE(self)->tp_free((PyObject*)self); +} + + + +static OwnedObject +throw_greenlet(BorrowedGreenlet self, PyErrPieces& err_pieces) +{ + PyObject* result = nullptr; + err_pieces.PyErrRestore(); + assert(PyErr_Occurred()); + if (self->started() && !self->active()) { + /* dead greenlet: turn GreenletExit into a regular return */ + result = g_handle_exit(OwnedObject()).relinquish_ownership(); + } + + self->args() <<= result; + + return single_result(self->g_switch()); +} + + + +PyDoc_STRVAR( + green_switch_doc, + "switch(*args, **kwargs)\n" + "\n" + "Switch execution to this greenlet.\n" + "\n" + "If this greenlet has never been run, then this greenlet\n" + "will be switched to using the body of ``self.run(*args, **kwargs)``.\n" + "\n" + "If the greenlet is active (has been run, but was switch()'ed\n" + "out before leaving its run function), then this greenlet will\n" + "be resumed and the return value to its switch call will be\n" + "None if no arguments are given, the given argument if one\n" + "argument is given, or the args tuple and keyword args dict if\n" + "multiple arguments are given.\n" + "\n" + "If the greenlet is dead, or is the current greenlet then this\n" + "function will simply return the arguments using the same rules as\n" + "above.\n"); + +static PyObject* +green_switch(PyGreenlet* self, PyObject* args, PyObject* kwargs) +{ + using greenlet::SwitchingArgs; + SwitchingArgs switch_args(OwnedObject::owning(args), OwnedObject::owning(kwargs)); + self->pimpl->args() <<= switch_args; + + + // If we're switching out of a greenlet, and that switch is the + // last thing the greenlet does, the greenlet ought to be able to + // go ahead and die at that point. Currently, someone else must + // manually switch back to the greenlet so that we "fall off the + // end" and can perform cleanup. You'd think we'd be able to + // figure out that this is happening using the frame's ``f_lasti`` + // member, which is supposed to be an index into + // ``frame->f_code->co_code``, the bytecode string. However, in + // recent interpreters, ``f_lasti`` tends not to be updated thanks + // to things like the PREDICT() macros in ceval.c. So it doesn't + // really work to do that in many cases. For example, the Python + // code: + // def run(): + // greenlet.getcurrent().parent.switch() + // produces bytecode of len 16, with the actual call to switch() + // being at index 10 (in Python 3.10). However, the reported + // ``f_lasti`` we actually see is...5! (Which happens to be the + // second byte of the CALL_METHOD op for ``getcurrent()``). + + try { + OwnedObject result = single_result(self->pimpl->g_switch()); +#ifndef NDEBUG + // Note that the current greenlet isn't necessarily self. If self + // finished, we went to one of its parents. + assert(!self->pimpl->args()); + + const BorrowedGreenlet& current = GET_THREAD_STATE().state().borrow_current(); + // It's possible it's never been switched to. + assert(!current->args()); +#endif + return result.relinquish_ownership(); + } + catch(const PyErrOccurred&) { + return nullptr; + } +} + +PyDoc_STRVAR( + green_throw_doc, + "Switches execution to this greenlet, but immediately raises the\n" + "given exception in this greenlet. If no argument is provided, the " + "exception\n" + "defaults to `greenlet.GreenletExit`. The normal exception\n" + "propagation rules apply, as described for `switch`. Note that calling " + "this\n" + "method is almost equivalent to the following::\n" + "\n" + " def raiser():\n" + " raise typ, val, tb\n" + " g_raiser = greenlet(raiser, parent=g)\n" + " g_raiser.switch()\n" + "\n" + "except that this trick does not work for the\n" + "`greenlet.GreenletExit` exception, which would not propagate\n" + "from ``g_raiser`` to ``g``.\n"); + +static PyObject* +green_throw(PyGreenlet* self, PyObject* args) +{ + PyArgParseParam typ(mod_globs.PyExc_GreenletExit); + PyArgParseParam val; + PyArgParseParam tb; + + if (!PyArg_ParseTuple(args, "|OOO:throw", &typ, &val, &tb)) { + return NULL; + } + + try { + // Both normalizing the error and the actual throw_greenlet + // could throw PyErrOccurred. + PyErrPieces err_pieces(typ.borrow(), val.borrow(), tb.borrow()); + + return throw_greenlet(self, err_pieces).relinquish_ownership(); + } + catch (const PyErrOccurred&) { + return nullptr; + } +} + +static int +green_bool(PyGreenlet* self) +{ + return self->pimpl->active(); +} + +static PyObject* +green_getdict(PyGreenlet* self, void* UNUSED(context)) +{ + if (self->dict == NULL) { + self->dict = PyDict_New(); + if (self->dict == NULL) { + return NULL; + } + } + Py_INCREF(self->dict); + return self->dict; +} + +static int +green_setdict(PyGreenlet* self, PyObject* val, void* UNUSED(context)) +{ + PyObject* tmp; + + if (val == NULL) { + PyErr_SetString(PyExc_TypeError, "__dict__ may not be deleted"); + return -1; + } + if (!PyDict_Check(val)) { + PyErr_SetString(PyExc_TypeError, "__dict__ must be a dictionary"); + return -1; + } + tmp = self->dict; + Py_INCREF(val); + self->dict = val; + Py_XDECREF(tmp); + return 0; +} + +static bool +_green_not_dead(BorrowedGreenlet self) +{ + // XXX: Where else should we do this? + // Probably on entry to most Python-facing functions? + if (self->was_running_in_dead_thread()) { + self->deactivate_and_free(); + return false; + } + return self->active() || !self->started(); +} + + +static PyObject* +green_getdead(BorrowedGreenlet self, void* UNUSED(context)) +{ + if (_green_not_dead(self)) { + Py_RETURN_FALSE; + } + else { + Py_RETURN_TRUE; + } +} + +static PyObject* +green_get_stack_saved(PyGreenlet* self, void* UNUSED(context)) +{ + return PyLong_FromSsize_t(self->pimpl->stack_saved()); +} + + +static PyObject* +green_getrun(BorrowedGreenlet self, void* UNUSED(context)) +{ + try { + OwnedObject result(self->run()); + return result.relinquish_ownership(); + } + catch(const PyErrOccurred&) { + return nullptr; + } +} + +void +UserGreenlet::run(const BorrowedObject nrun) +{ + if (this->started()) { + throw AttributeError( + "run cannot be set " + "after the start of the greenlet"); + } + this->_run_callable = nrun; +} + +const OwnedObject& +MainGreenlet::run() const +{ + throw AttributeError("Main greenlets do not have a run attribute."); +} + +void +MainGreenlet::run(const BorrowedObject UNUSED(nrun)) +{ + throw AttributeError("Main greenlets do not have a run attribute."); +} + +static int +green_setrun(BorrowedGreenlet self, BorrowedObject nrun, void* UNUSED(context)) +{ + try { + self->run(nrun); + return 0; + } + catch(const PyErrOccurred&) { + return -1; + } +} + +static PyObject* +green_getparent(BorrowedGreenlet self, void* UNUSED(context)) +{ + return self->parent().acquire_or_None(); +} + +using greenlet::AttributeError; + +const OwnedGreenlet +UserGreenlet::parent() const +{ + return this->_parent; +} + +void +UserGreenlet::parent(const BorrowedObject raw_new_parent) +{ + if (!raw_new_parent) { + throw AttributeError("can't delete attribute"); + } + + BorrowedMainGreenlet main_greenlet_of_new_parent; + BorrowedGreenlet new_parent(raw_new_parent.borrow()); // could + // throw + // TypeError! + for (BorrowedGreenlet p = new_parent; p; p = p->parent()) { + if (p == this->_self) { + throw ValueError("cyclic parent chain"); + } + main_greenlet_of_new_parent = p->main_greenlet(); + } + + if (!main_greenlet_of_new_parent) { + throw ValueError("parent must not be garbage collected"); + } + + if (this->started() + && this->_main_greenlet != main_greenlet_of_new_parent) { + throw ValueError("parent cannot be on a different thread"); + } + + this->_parent = new_parent; +} + +void +MainGreenlet::parent(const BorrowedObject raw_new_parent) +{ + if (!raw_new_parent) { + throw AttributeError("can't delete attribute"); + } + throw AttributeError("cannot set the parent of a main greenlet"); +} + +const OwnedGreenlet +MainGreenlet::parent() const +{ + return OwnedGreenlet(); // null becomes None +} + +static int +green_setparent(BorrowedGreenlet self, BorrowedObject nparent, void* UNUSED(context)) +{ + try { + self->parent(nparent); + } + catch(const PyErrOccurred&) { + return -1; + } + return 0; +} + +#ifdef Py_CONTEXT_H +# define GREENLET_NO_CONTEXTVARS_REASON "This build of greenlet" +#else +# define GREENLET_NO_CONTEXTVARS_REASON "This Python interpreter" +#endif + +namespace greenlet +{ + +template<> +const OwnedObject +Greenlet::context(GREENLET_WHEN_PY37::Yes) const +{ + using greenlet::PythonStateContext; + OwnedObject result; + + if (this->is_currently_running_in_some_thread()) { + /* Currently running greenlet: context is stored in the thread state, + not the greenlet object. */ + if (GET_THREAD_STATE().state().is_current(this->self())) { + result = PythonStateContext::context(PyThreadState_GET()); + } + else { + throw ValueError( + "cannot get context of a " + "greenlet that is running in a different thread"); + } + } + else { + /* Greenlet is not running: just return context. */ + result = this->python_state.context(); + } + if (!result) { + result = OwnedObject::None(); + } + return result; +} + +template<> +const OwnedObject +Greenlet::context(GREENLET_WHEN_NOT_PY37::No) const +{ + throw AttributeError( + GREENLET_NO_CONTEXTVARS_REASON + "does not support context variables" + ); +} + +template<> +void Greenlet::context(BorrowedObject given, GREENLET_WHEN_PY37::Yes) +{ + using greenlet::PythonStateContext; + if (!given) { + throw AttributeError("can't delete context attribute"); + } + if (given.is_None()) { + /* "Empty context" is stored as NULL, not None. */ + given = nullptr; + } + + //checks type, incrs refcnt + greenlet::refs::OwnedContext context(given); + PyThreadState* tstate = PyThreadState_GET(); + + if (this->is_currently_running_in_some_thread()) { + if (!GET_THREAD_STATE().state().is_current(this->self())) { + throw ValueError("cannot set context of a greenlet" + " that is running in a different thread"); + } + + /* Currently running greenlet: context is stored in the thread state, + not the greenlet object. */ + OwnedObject octx = OwnedObject::consuming(PythonStateContext::context(tstate)); + PythonStateContext::context(tstate, context.relinquish_ownership()); + } + else { + /* Greenlet is not running: just set context. Note that the + greenlet may be dead.*/ + this->python_state.context() = context; + } +} + +template<> +void +Greenlet::context(BorrowedObject UNUSED(given), GREENLET_WHEN_NOT_PY37::No) +{ + throw AttributeError( + GREENLET_NO_CONTEXTVARS_REASON + "does not support context variables" + ); +} + +}; + +static PyObject* +green_getcontext(const PyGreenlet* self, void* UNUSED(context)) +{ + const Greenlet *const g = self->pimpl; + try { + OwnedObject result(g->context()); + return result.relinquish_ownership(); + } + catch(const PyErrOccurred&) { + return nullptr; + } +} + +static int +green_setcontext(BorrowedGreenlet self, PyObject* nctx, void* UNUSED(context)) +{ + try { + self->context(nctx, G_IS_PY37::IsIt()); + return 0; + } + catch(const PyErrOccurred&) { + return -1; + } +} + +#undef GREENLET_NO_CONTEXTVARS_REASON + +static PyObject* +green_getframe(BorrowedGreenlet self, void* UNUSED(context)) +{ + const PythonState::OwnedFrame& top_frame = self->top_frame(); + return top_frame.acquire_or_None(); +} + +static PyObject* +green_getstate(PyGreenlet* self) +{ + PyErr_Format(PyExc_TypeError, + "cannot serialize '%s' object", + Py_TYPE(self)->tp_name); + return nullptr; +} + +static PyObject* +green_repr(BorrowedGreenlet self) +{ + /* + Return a string like + + + The handling of greenlets across threads is not super good. + We mostly use the internal definitions of these terms, but they + generally should make sense to users as well. + */ + PyObject* result; + int never_started = !self->started() && !self->active(); + + const char* const tp_name = Py_TYPE(self)->tp_name; + + if (_green_not_dead(self)) { + /* XXX: The otid= is almost useless because you can't correlate it to + any thread identifier exposed to Python. We could use + PyThreadState_GET()->thread_id, but we'd need to save that in the + greenlet, or save the whole PyThreadState object itself. + + As it stands, its only useful for identifying greenlets from the same thread. + */ + const char* state_in_thread; + if (self->was_running_in_dead_thread()) { + // The thread it was running in is dead! + // This can happen, especially at interpreter shut down. + // It complicates debugging output because it may be + // impossible to access the current thread state at that + // time. Thus, don't access the current thread state. + state_in_thread = " (thread exited)"; + } + else { + state_in_thread = GET_THREAD_STATE().state().is_current(self) + ? " current" + : (self->started() ? " suspended" : ""); + } + result = GNative_FromFormat( + "<%s object at %p (otid=%p)%s%s%s%s>", + tp_name, + self.borrow_o(), + self->thread_state(), + state_in_thread, + self->active() ? " active" : "", + never_started ? " pending" : " started", + self->main() ? " main" : "" + ); + } + else { + result = GNative_FromFormat( + "<%s object at %p (otid=%p) %sdead>", + tp_name, + self.borrow_o(), + self->thread_state(), + self->was_running_in_dead_thread() + ? "(thread exited) " + : "" + ); + } + + return result; +} + +/***************************************************************************** + * C interface + * + * These are exported using the CObject API + */ +extern "C" { +static PyGreenlet* +PyGreenlet_GetCurrent(void) +{ + return GET_THREAD_STATE().state().get_current().relinquish_ownership(); +} + +static int +PyGreenlet_SetParent(PyGreenlet* g, PyGreenlet* nparent) +{ + return green_setparent((PyGreenlet*)g, (PyObject*)nparent, NULL); +} + +static PyGreenlet* +PyGreenlet_New(PyObject* run, PyGreenlet* parent) +{ + using greenlet::refs::NewDictReference; + // In the past, we didn't use green_new and green_init, but that + // was a maintenance issue because we duplicated code. This way is + // much safer, but slightly slower. If that's a problem, we could + // refactor green_init to separate argument parsing from initialization. + OwnedGreenlet g = OwnedGreenlet::consuming(green_new(&PyGreenlet_Type, nullptr, nullptr)); + if (!g) { + return NULL; + } + + try { + NewDictReference kwargs; + if (run) { + kwargs.SetItem(mod_globs.str_run, run); + } + if (parent) { + kwargs.SetItem("parent", (PyObject*)parent); + } + + Require(green_init(g, mod_globs.empty_tuple, kwargs)); + } + catch (const PyErrOccurred&) { + return nullptr; + } + + return g.relinquish_ownership(); +} + +static PyObject* +PyGreenlet_Switch(PyGreenlet* g, PyObject* args, PyObject* kwargs) +{ + PyGreenlet* self = (PyGreenlet*)g; + + if (!PyGreenlet_Check(self)) { + PyErr_BadArgument(); + return NULL; + } + + if (args == NULL) { + args = mod_globs.empty_tuple; + } + + if (kwargs == NULL || !PyDict_Check(kwargs)) { + kwargs = NULL; + } + + return green_switch(g, args, kwargs); +} + +static PyObject* +PyGreenlet_Throw(PyGreenlet* self, PyObject* typ, PyObject* val, PyObject* tb) +{ + if (!PyGreenlet_Check(self)) { + PyErr_BadArgument(); + return nullptr; + } + try { + PyErrPieces err_pieces(typ, val, tb); + return throw_greenlet(self, err_pieces).relinquish_ownership(); + } + catch (const PyErrOccurred&) { + return nullptr; + } +} + +static int +Extern_PyGreenlet_MAIN(PyGreenlet* self) +{ + if (!PyGreenlet_Check(self)) { + PyErr_BadArgument(); + return -1; + } + return self->pimpl->main(); +} + +static int +Extern_PyGreenlet_ACTIVE(PyGreenlet* self) +{ + if (!PyGreenlet_Check(self)) { + PyErr_BadArgument(); + return -1; + } + return self->pimpl->active(); +} + +static int +Extern_PyGreenlet_STARTED(PyGreenlet* self) +{ + if (!PyGreenlet_Check(self)) { + PyErr_BadArgument(); + return -1; + } + return self->pimpl->started(); +} + +static PyGreenlet* +Extern_PyGreenlet_GET_PARENT(PyGreenlet* self) +{ + if (!PyGreenlet_Check(self)) { + PyErr_BadArgument(); + return NULL; + } + // This can return NULL even if there is no exception + return self->pimpl->parent().acquire(); +} +} // extern C. +/** End C API ****************************************************************/ + +static PyMethodDef green_methods[] = { + {"switch", + reinterpret_cast(green_switch), + METH_VARARGS | METH_KEYWORDS, + green_switch_doc}, + {"throw", (PyCFunction)green_throw, METH_VARARGS, green_throw_doc}, + {"__getstate__", (PyCFunction)green_getstate, METH_NOARGS, NULL}, + {NULL, NULL} /* sentinel */ +}; + +static PyGetSetDef green_getsets[] = { + {"__dict__", (getter)green_getdict, (setter)green_setdict, /*XXX*/ NULL}, + {"run", (getter)green_getrun, (setter)green_setrun, /*XXX*/ NULL}, + {"parent", (getter)green_getparent, (setter)green_setparent, /*XXX*/ NULL}, + {"gr_frame", (getter)green_getframe, NULL, /*XXX*/ NULL}, + {"gr_context", + (getter)green_getcontext, + (setter)green_setcontext, + /*XXX*/ NULL}, + {"dead", (getter)green_getdead, NULL, /*XXX*/ NULL}, + {"_stack_saved", (getter)green_get_stack_saved, NULL, /*XXX*/ NULL}, + {NULL}}; + +static PyMemberDef green_members[] = { + {NULL} +}; + +static PyNumberMethods green_as_number = { + NULL, /* nb_add */ + NULL, /* nb_subtract */ + NULL, /* nb_multiply */ +#if PY_MAJOR_VERSION < 3 + NULL, /* nb_divide */ +#endif + NULL, /* nb_remainder */ + NULL, /* nb_divmod */ + NULL, /* nb_power */ + NULL, /* nb_negative */ + NULL, /* nb_positive */ + NULL, /* nb_absolute */ + (inquiry)green_bool, /* nb_bool */ +}; + + +PyTypeObject PyGreenlet_Type = { + PyVarObject_HEAD_INIT(NULL, 0) + "greenlet.greenlet", /* tp_name */ + sizeof(PyGreenlet), /* tp_basicsize */ + 0, /* tp_itemsize */ + /* methods */ + (destructor)green_dealloc, /* tp_dealloc */ + 0, /* tp_print */ + 0, /* tp_getattr */ + 0, /* tp_setattr */ + 0, /* tp_compare */ + (reprfunc)green_repr, /* tp_repr */ + &green_as_number, /* tp_as _number*/ + 0, /* tp_as _sequence*/ + 0, /* tp_as _mapping*/ + 0, /* tp_hash */ + 0, /* tp_call */ + 0, /* tp_str */ + 0, /* tp_getattro */ + 0, /* tp_setattro */ + 0, /* tp_as_buffer*/ + G_TPFLAGS_DEFAULT | Py_TPFLAGS_BASETYPE, /* tp_flags */ + "greenlet(run=None, parent=None) -> greenlet\n\n" + "Creates a new greenlet object (without running it).\n\n" + " - *run* -- The callable to invoke.\n" + " - *parent* -- The parent greenlet. The default is the current " + "greenlet.", /* tp_doc */ + (traverseproc)green_traverse, /* tp_traverse */ + (inquiry)green_clear, /* tp_clear */ + 0, /* tp_richcompare */ + offsetof(PyGreenlet, weakreflist), /* tp_weaklistoffset */ + 0, /* tp_iter */ + 0, /* tp_iternext */ + green_methods, /* tp_methods */ + green_members, /* tp_members */ + green_getsets, /* tp_getset */ + 0, /* tp_base */ + 0, /* tp_dict */ + 0, /* tp_descr_get */ + 0, /* tp_descr_set */ + offsetof(PyGreenlet, dict), /* tp_dictoffset */ + (initproc)green_init, /* tp_init */ + PyType_GenericAlloc, /* tp_alloc */ + (newfunc)green_new, /* tp_new */ + PyObject_GC_Del, /* tp_free */ + (inquiry)green_is_gc, /* tp_is_gc */ +}; + + + +PyDoc_STRVAR(mod_getcurrent_doc, + "getcurrent() -> greenlet\n" + "\n" + "Returns the current greenlet (i.e. the one which called this " + "function).\n"); + +static PyObject* +mod_getcurrent(PyObject* UNUSED(module)) +{ + return GET_THREAD_STATE().state().get_current().relinquish_ownership_o(); +} + +PyDoc_STRVAR(mod_settrace_doc, + "settrace(callback) -> object\n" + "\n" + "Sets a new tracing function and returns the previous one.\n"); +static PyObject* +mod_settrace(PyObject* UNUSED(module), PyObject* args) +{ + PyArgParseParam tracefunc; + if (!PyArg_ParseTuple(args, "O", &tracefunc)) { + return NULL; + } + ThreadState& state = GET_THREAD_STATE(); + OwnedObject previous = state.get_tracefunc(); + if (!previous) { + previous = Py_None; + } + + state.set_tracefunc(tracefunc); + + return previous.relinquish_ownership(); +} + +PyDoc_STRVAR(mod_gettrace_doc, + "gettrace() -> object\n" + "\n" + "Returns the currently set tracing function, or None.\n"); + +static PyObject* +mod_gettrace(PyObject* UNUSED(module)) +{ + OwnedObject tracefunc = GET_THREAD_STATE().state().get_tracefunc(); + if (!tracefunc) { + tracefunc = Py_None; + } + return tracefunc.relinquish_ownership(); +} + +PyDoc_STRVAR(mod_set_thread_local_doc, + "set_thread_local(key, value) -> None\n" + "\n" + "Set a value in the current thread-local dictionary. Debbuging only.\n"); + +static PyObject* +mod_set_thread_local(PyObject* UNUSED(module), PyObject* args) +{ + PyArgParseParam key; + PyArgParseParam value; + PyObject* result = NULL; + + if (PyArg_UnpackTuple(args, "set_thread_local", 2, 2, &key, &value)) { + if(PyDict_SetItem( + PyThreadState_GetDict(), // borrow + key, + value) == 0 ) { + // success + Py_INCREF(Py_None); + result = Py_None; + } + } + return result; +} + +PyDoc_STRVAR(mod_get_pending_cleanup_count_doc, + "get_pending_cleanup_count() -> Integer\n" + "\n" + "Get the number of greenlet cleanup operations pending. Testing only.\n"); + + +static PyObject* +mod_get_pending_cleanup_count(PyObject* UNUSED(module)) +{ + LockGuard cleanup_lock(*mod_globs.thread_states_to_destroy_lock); + return PyLong_FromSize_t(mod_globs.thread_states_to_destroy.size()); +} + +PyDoc_STRVAR(mod_get_total_main_greenlets_doc, + "get_total_main_greenlets() -> Integer\n" + "\n" + "Quickly return the number of main greenlets that exist. Testing only.\n"); + +static PyObject* +mod_get_total_main_greenlets(PyObject* UNUSED(module)) +{ + return PyLong_FromSize_t(total_main_greenlets); +} + +PyDoc_STRVAR(mod_get_clocks_used_doing_optional_cleanup_doc, + "get_clocks_used_doing_optional_cleanup() -> Integer\n" + "\n" + "Get the number of clock ticks the program has used doing optional " + "greenlet cleanup.\n" + "Beginning in greenlet 2.0, greenlet tries to find and dispose of greenlets\n" + "that leaked after a thread exited. This requires invoking Python's garbage collector,\n" + "which may have a performance cost proportional to the number of live objects.\n" + "This function returns the amount of processor time\n" + "greenlet has used to do this. In programs that run with very large amounts of live\n" + "objects, this metric can be used to decide whether the cost of doing this cleanup\n" + "is worth the memory leak being corrected. If not, you can disable the cleanup\n" + "using ``enable_optional_cleanup(False)``.\n" + "The units are arbitrary and can only be compared to themselves (similarly to ``time.clock()``);\n" + "for example, to see how it scales with your heap. You can attempt to convert them into seconds\n" + "by dividing by the value of CLOCKS_PER_SEC." + "If cleanup has been disabled, returns None." + "\n" + "This is an implementation specific, provisional API. It may be changed or removed\n" + "in the future.\n" + ".. versionadded:: 2.0" + ); +static PyObject* +mod_get_clocks_used_doing_optional_cleanup(PyObject* UNUSED(module)) +{ + std::clock_t& clocks = ThreadState::clocks_used_doing_gc(); + + if (clocks == std::clock_t(-1)) { + Py_RETURN_NONE; + } + // This might not actually work on some implementations; clock_t + // is an opaque type. + return PyLong_FromSsize_t(clocks); +} + +PyDoc_STRVAR(mod_enable_optional_cleanup_doc, + "mod_enable_optional_cleanup(bool) -> None\n" + "\n" + "Enable or disable optional cleanup operations.\n" + "See ``get_clocks_used_doing_optional_cleanup()`` for details.\n" + ); +static PyObject* +mod_enable_optional_cleanup(PyObject* UNUSED(module), PyObject* flag) +{ + int is_true = PyObject_IsTrue(flag); + if (is_true == -1) { + return nullptr; + } + + std::clock_t& clocks = ThreadState::clocks_used_doing_gc(); + if (is_true) { + // If we already have a value, we don't want to lose it. + if (clocks == std::clock_t(-1)) { + clocks = 0; + } + } + else { + clocks = std::clock_t(-1); + } + Py_RETURN_NONE; +} + +PyDoc_STRVAR(mod_get_tstate_trash_delete_nesting_doc, + "get_tstate_trash_delete_nesting() -> Integer\n" + "\n" + "Return the 'trash can' nesting level. Testing only.\n"); +static PyObject* +mod_get_tstate_trash_delete_nesting(PyObject* UNUSED(module)) +{ + PyThreadState* tstate = PyThreadState_GET(); + return PyLong_FromLong(tstate->trash_delete_nesting); +} + +static PyMethodDef GreenMethods[] = { + {"getcurrent", + (PyCFunction)mod_getcurrent, + METH_NOARGS, + mod_getcurrent_doc}, + {"settrace", (PyCFunction)mod_settrace, METH_VARARGS, mod_settrace_doc}, + {"gettrace", (PyCFunction)mod_gettrace, METH_NOARGS, mod_gettrace_doc}, + {"set_thread_local", (PyCFunction)mod_set_thread_local, METH_VARARGS, mod_set_thread_local_doc}, + {"get_pending_cleanup_count", (PyCFunction)mod_get_pending_cleanup_count, METH_NOARGS, mod_get_pending_cleanup_count_doc}, + {"get_total_main_greenlets", (PyCFunction)mod_get_total_main_greenlets, METH_NOARGS, mod_get_total_main_greenlets_doc}, + {"get_clocks_used_doing_optional_cleanup", (PyCFunction)mod_get_clocks_used_doing_optional_cleanup, METH_NOARGS, mod_get_clocks_used_doing_optional_cleanup_doc}, + {"enable_optional_cleanup", (PyCFunction)mod_enable_optional_cleanup, METH_O, mod_enable_optional_cleanup_doc}, + {"get_tstate_trash_delete_nesting", (PyCFunction)mod_get_tstate_trash_delete_nesting, METH_NOARGS, mod_get_tstate_trash_delete_nesting_doc}, + {NULL, NULL} /* Sentinel */ +}; + +static const char* const copy_on_greentype[] = { + "getcurrent", + "error", + "GreenletExit", + "settrace", + "gettrace", + NULL +}; + +static struct PyModuleDef greenlet_module_def = { + PyModuleDef_HEAD_INIT, + "greenlet._greenlet", + NULL, + -1, + GreenMethods, +}; + + + +static PyObject* +greenlet_internal_mod_init() G_NOEXCEPT +{ + static void* _PyGreenlet_API[PyGreenlet_API_pointers]; + GREENLET_NOINLINE_INIT(); + + try { + CreatedModule m(greenlet_module_def); + + Require(PyType_Ready(&PyGreenlet_Type)); + +#if G_USE_STANDARD_THREADING == 0 + Require(PyType_Ready(&PyGreenletCleanup_Type)); +#endif + + new((void*)&mod_globs) GreenletGlobals; + ThreadState::init(); + + m.PyAddObject("greenlet", PyGreenlet_Type); + m.PyAddObject("error", mod_globs.PyExc_GreenletError); + m.PyAddObject("GreenletExit", mod_globs.PyExc_GreenletExit); + + m.PyAddObject("GREENLET_USE_GC", 1); + m.PyAddObject("GREENLET_USE_TRACING", 1); + // The macros are eithre 0 or 1; the 0 case can be interpreted + // the same as NULL, which is ambiguous with a pointer. + m.PyAddObject("GREENLET_USE_CONTEXT_VARS", (long)GREENLET_PY37); + m.PyAddObject("GREENLET_USE_STANDARD_THREADING", (long)G_USE_STANDARD_THREADING); + + OwnedObject clocks_per_sec = OwnedObject::consuming(PyLong_FromSsize_t(CLOCKS_PER_SEC)); + m.PyAddObject("CLOCKS_PER_SEC", clocks_per_sec); + + /* also publish module-level data as attributes of the greentype. */ + // XXX: This is weird, and enables a strange pattern of + // confusing the class greenlet with the module greenlet; with + // the exception of (possibly) ``getcurrent()``, this + // shouldn't be encouraged so don't add new items here. + for (const char* const* p = copy_on_greentype; *p; p++) { + OwnedObject o = m.PyRequireAttr(*p); + PyDict_SetItemString(PyGreenlet_Type.tp_dict, *p, o.borrow()); + } + + /* + * Expose C API + */ + + /* types */ + _PyGreenlet_API[PyGreenlet_Type_NUM] = (void*)&PyGreenlet_Type; + + /* exceptions */ + _PyGreenlet_API[PyExc_GreenletError_NUM] = (void*)mod_globs.PyExc_GreenletError; + _PyGreenlet_API[PyExc_GreenletExit_NUM] = (void*)mod_globs.PyExc_GreenletExit; + + /* methods */ + _PyGreenlet_API[PyGreenlet_New_NUM] = (void*)PyGreenlet_New; + _PyGreenlet_API[PyGreenlet_GetCurrent_NUM] = (void*)PyGreenlet_GetCurrent; + _PyGreenlet_API[PyGreenlet_Throw_NUM] = (void*)PyGreenlet_Throw; + _PyGreenlet_API[PyGreenlet_Switch_NUM] = (void*)PyGreenlet_Switch; + _PyGreenlet_API[PyGreenlet_SetParent_NUM] = (void*)PyGreenlet_SetParent; + + /* Previously macros, but now need to be functions externally. */ + _PyGreenlet_API[PyGreenlet_MAIN_NUM] = (void*)Extern_PyGreenlet_MAIN; + _PyGreenlet_API[PyGreenlet_STARTED_NUM] = (void*)Extern_PyGreenlet_STARTED; + _PyGreenlet_API[PyGreenlet_ACTIVE_NUM] = (void*)Extern_PyGreenlet_ACTIVE; + _PyGreenlet_API[PyGreenlet_GET_PARENT_NUM] = (void*)Extern_PyGreenlet_GET_PARENT; + + /* XXX: Note that our module name is ``greenlet._greenlet``, but for + backwards compatibility with existing C code, we need the _C_API to + be directly in greenlet. + */ + const NewReference c_api_object(Require( + PyCapsule_New( + (void*)_PyGreenlet_API, + "greenlet._C_API", + NULL))); + m.PyAddObject("_C_API", c_api_object); + assert(c_api_object.REFCNT() == 2); + + // cerr << "Sizes:" + // << "\n\tGreenlet : " << sizeof(Greenlet) + // << "\n\tUserGreenlet : " << sizeof(UserGreenlet) + // << "\n\tMainGreenlet : " << sizeof(MainGreenlet) + // << "\n\tExceptionState : " << sizeof(greenlet::ExceptionState) + // << "\n\tPythonState : " << sizeof(greenlet::PythonState) + // << "\n\tStackState : " << sizeof(greenlet::StackState) + // << "\n\tSwitchingArgs : " << sizeof(greenlet::SwitchingArgs) + // << "\n\tOwnedObject : " << sizeof(greenlet::refs::OwnedObject) + // << "\n\tBorrowedObject : " << sizeof(greenlet::refs::BorrowedObject) + // << "\n\tPyGreenlet : " << sizeof(PyGreenlet) + // << endl; + + return m.borrow(); // But really it's the main reference. + } + catch (const LockInitError& e) { + PyErr_SetString(PyExc_MemoryError, e.what()); + return NULL; + } + catch (const PyErrOccurred&) { + return NULL; + } + +} + +extern "C" { +#if PY_MAJOR_VERSION >= 3 +PyMODINIT_FUNC +PyInit__greenlet(void) +{ + return greenlet_internal_mod_init(); +} +#else +PyMODINIT_FUNC +init_greenlet(void) +{ + greenlet_internal_mod_init(); +} +#endif +}; + +#ifdef __clang__ +# pragma clang diagnostic pop +#elif defined(__GNUC__) +# pragma GCC diagnostic pop +#endif diff --git a/mysql-sqlalchemy-workspace/lib/python3.9/site-packages/greenlet/greenlet.h b/mysql-sqlalchemy-workspace/lib/python3.9/site-packages/greenlet/greenlet.h new file mode 100644 index 00000000..d02a16e4 --- /dev/null +++ b/mysql-sqlalchemy-workspace/lib/python3.9/site-packages/greenlet/greenlet.h @@ -0,0 +1,164 @@ +/* -*- indent-tabs-mode: nil; tab-width: 4; -*- */ + +/* Greenlet object interface */ + +#ifndef Py_GREENLETOBJECT_H +#define Py_GREENLETOBJECT_H + + +#include + +#ifdef __cplusplus +extern "C" { +#endif + +/* This is deprecated and undocumented. It does not change. */ +#define GREENLET_VERSION "1.0.0" + +#ifndef GREENLET_MODULE +#define implementation_ptr_t void* +#endif + +typedef struct _greenlet { + PyObject_HEAD + PyObject* weakreflist; + PyObject* dict; + implementation_ptr_t pimpl; +} PyGreenlet; + +#define PyGreenlet_Check(op) (op && PyObject_TypeCheck(op, &PyGreenlet_Type)) + + +/* C API functions */ + +/* Total number of symbols that are exported */ +#define PyGreenlet_API_pointers 12 + +#define PyGreenlet_Type_NUM 0 +#define PyExc_GreenletError_NUM 1 +#define PyExc_GreenletExit_NUM 2 + +#define PyGreenlet_New_NUM 3 +#define PyGreenlet_GetCurrent_NUM 4 +#define PyGreenlet_Throw_NUM 5 +#define PyGreenlet_Switch_NUM 6 +#define PyGreenlet_SetParent_NUM 7 + +#define PyGreenlet_MAIN_NUM 8 +#define PyGreenlet_STARTED_NUM 9 +#define PyGreenlet_ACTIVE_NUM 10 +#define PyGreenlet_GET_PARENT_NUM 11 + +#ifndef GREENLET_MODULE +/* This section is used by modules that uses the greenlet C API */ +static void** _PyGreenlet_API = NULL; + +# define PyGreenlet_Type \ + (*(PyTypeObject*)_PyGreenlet_API[PyGreenlet_Type_NUM]) + +# define PyExc_GreenletError \ + ((PyObject*)_PyGreenlet_API[PyExc_GreenletError_NUM]) + +# define PyExc_GreenletExit \ + ((PyObject*)_PyGreenlet_API[PyExc_GreenletExit_NUM]) + +/* + * PyGreenlet_New(PyObject *args) + * + * greenlet.greenlet(run, parent=None) + */ +# define PyGreenlet_New \ + (*(PyGreenlet * (*)(PyObject * run, PyGreenlet * parent)) \ + _PyGreenlet_API[PyGreenlet_New_NUM]) + +/* + * PyGreenlet_GetCurrent(void) + * + * greenlet.getcurrent() + */ +# define PyGreenlet_GetCurrent \ + (*(PyGreenlet * (*)(void)) _PyGreenlet_API[PyGreenlet_GetCurrent_NUM]) + +/* + * PyGreenlet_Throw( + * PyGreenlet *greenlet, + * PyObject *typ, + * PyObject *val, + * PyObject *tb) + * + * g.throw(...) + */ +# define PyGreenlet_Throw \ + (*(PyObject * (*)(PyGreenlet * self, \ + PyObject * typ, \ + PyObject * val, \ + PyObject * tb)) \ + _PyGreenlet_API[PyGreenlet_Throw_NUM]) + +/* + * PyGreenlet_Switch(PyGreenlet *greenlet, PyObject *args) + * + * g.switch(*args, **kwargs) + */ +# define PyGreenlet_Switch \ + (*(PyObject * \ + (*)(PyGreenlet * greenlet, PyObject * args, PyObject * kwargs)) \ + _PyGreenlet_API[PyGreenlet_Switch_NUM]) + +/* + * PyGreenlet_SetParent(PyObject *greenlet, PyObject *new_parent) + * + * g.parent = new_parent + */ +# define PyGreenlet_SetParent \ + (*(int (*)(PyGreenlet * greenlet, PyGreenlet * nparent)) \ + _PyGreenlet_API[PyGreenlet_SetParent_NUM]) + +/* + * PyGreenlet_GetParent(PyObject* greenlet) + * + * return greenlet.parent; + * + * This could return NULL even if there is no exception active. + * If it does not return NULL, you are responsible for decrementing the + * reference count. + */ +# define PyGreenlet_GetParent \ + (*(PyGreenlet* (*)(PyGreenlet*)) \ + _PyGreenlet_API[PyGreenlet_GET_PARENT_NUM]) + +/* + * deprecated, undocumented alias. + */ +# define PyGreenlet_GET_PARENT PyGreenlet_GetParent + +# define PyGreenlet_MAIN \ + (*(int (*)(PyGreenlet*)) \ + _PyGreenlet_API[PyGreenlet_MAIN_NUM]) + +# define PyGreenlet_STARTED \ + (*(int (*)(PyGreenlet*)) \ + _PyGreenlet_API[PyGreenlet_STARTED_NUM]) + +# define PyGreenlet_ACTIVE \ + (*(int (*)(PyGreenlet*)) \ + _PyGreenlet_API[PyGreenlet_ACTIVE_NUM]) + + + + +/* Macro that imports greenlet and initializes C API */ +/* NOTE: This has actually moved to ``greenlet._greenlet._C_API``, but we + keep the older definition to be sure older code that might have a copy of + the header still works. */ +# define PyGreenlet_Import() \ + { \ + _PyGreenlet_API = (void**)PyCapsule_Import("greenlet._C_API", 0); \ + } + +#endif /* GREENLET_MODULE */ + +#ifdef __cplusplus +} +#endif +#endif /* !Py_GREENLETOBJECT_H */ diff --git a/mysql-sqlalchemy-workspace/lib/python3.9/site-packages/greenlet/greenlet_allocator.hpp b/mysql-sqlalchemy-workspace/lib/python3.9/site-packages/greenlet/greenlet_allocator.hpp new file mode 100644 index 00000000..b452f544 --- /dev/null +++ b/mysql-sqlalchemy-workspace/lib/python3.9/site-packages/greenlet/greenlet_allocator.hpp @@ -0,0 +1,63 @@ +#ifndef GREENLET_ALLOCATOR_HPP +#define GREENLET_ALLOCATOR_HPP + +#define PY_SSIZE_T_CLEAN +#include +#include +#include "greenlet_compiler_compat.hpp" + + +namespace greenlet +{ + // This allocator is stateless; all instances are identical. + // It can *ONLY* be used when we're sure we're holding the GIL + // (Python's allocators require the GIL). + template + struct PythonAllocator : public std::allocator { + + PythonAllocator(const PythonAllocator& UNUSED(other)) + : std::allocator() + { + } + + PythonAllocator(const std::allocator other) + : std::allocator(other) + {} + + template + PythonAllocator(const std::allocator& other) + : std::allocator(other) + { + } + + PythonAllocator() : std::allocator() {} + + T* allocate(size_t number_objects, const void* UNUSED(hint)=0) + { + void* p; + if (number_objects == 1) + p = PyObject_Malloc(sizeof(T)); + else + p = PyMem_Malloc(sizeof(T) * number_objects); + return static_cast(p); + } + + void deallocate(T* t, size_t n) + { + void* p = t; + if (n == 1) { + PyObject_Free(p); + } + else + PyMem_Free(p); + } + // This member is deprecated in C++17 and removed in C++20 + template< class U > + struct rebind { + typedef PythonAllocator other; + }; + + }; +} + +#endif diff --git a/mysql-sqlalchemy-workspace/lib/python3.9/site-packages/greenlet/greenlet_compiler_compat.hpp b/mysql-sqlalchemy-workspace/lib/python3.9/site-packages/greenlet/greenlet_compiler_compat.hpp new file mode 100644 index 00000000..ecaeb326 --- /dev/null +++ b/mysql-sqlalchemy-workspace/lib/python3.9/site-packages/greenlet/greenlet_compiler_compat.hpp @@ -0,0 +1,132 @@ +/* -*- indent-tabs-mode: nil; tab-width: 4; -*- */ +#ifndef GREENLET_COMPILER_COMPAT_HPP +#define GREENLET_COMPILER_COMPAT_HPP + +/** + * Definitions to aid with compatibility with different compilers. + * + * .. caution:: Use extreme care with G_NOEXCEPT. + * Some compilers and runtimes, specifically gcc/libgcc/libstdc++ on + * Linux, implement stack unwinding by throwing an uncatchable + * exception, one that specifically does not appear to be an active + * exception to the rest of the runtime. If this happens while we're in a G_NOEXCEPT function, + * we have violated our dynamic exception contract, and so the runtime + * will call std::terminate(), which kills the process with the + * unhelpful message "terminate called without an active exception". + * + * This has happened in this scenario: A background thread is running + * a greenlet that has made a native call and released the GIL. + * Meanwhile, the main thread finishes and starts shutting down the + * interpreter. When the background thread is scheduled again and + * attempts to obtain the GIL, it notices that the interpreter is + * exiting and calls ``pthread_exit()``. This in turn starts to unwind + * the stack by throwing that exception. But we had the ``PyCall`` + * functions annotated as G_NOEXCEPT, so the runtime terminated us. + * + * #2 0x00007fab26fec2b7 in std::terminate() () from /lib/x86_64-linux-gnu/libstdc++.so.6 + * #3 0x00007fab26febb3c in __gxx_personality_v0 () from /lib/x86_64-linux-gnu/libstdc++.so.6 + * #4 0x00007fab26f34de6 in ?? () from /lib/x86_64-linux-gnu/libgcc_s.so.1 + * #6 0x00007fab276a34c6 in __GI___pthread_unwind at ./nptl/unwind.c:130 + * #7 0x00007fab2769bd3a in __do_cancel () at ../sysdeps/nptl/pthreadP.h:280 + * #8 __GI___pthread_exit (value=value@entry=0x0) at ./nptl/pthread_exit.c:36 + * #9 0x000000000052e567 in PyThread_exit_thread () at ../Python/thread_pthread.h:370 + * #10 0x00000000004d60b5 in take_gil at ../Python/ceval_gil.h:224 + * #11 0x00000000004d65f9 in PyEval_RestoreThread at ../Python/ceval.c:467 + * #12 0x000000000060cce3 in setipaddr at ../Modules/socketmodule.c:1203 + * #13 0x00000000006101cd in socket_gethostbyname + */ + + +/* The compiler used for Python 2.7 on Windows doesn't include + either stdint.h or cstdint.h. Nor does it understand nullptr or have + std::shared_ptr. = delete, etc Sigh. */ +#if defined(_MSC_VER) && _MSC_VER <= 1500 +typedef unsigned long long uint64_t; +typedef signed long long int64_t; +typedef unsigned int uint32_t; +// C++ defines NULL to be 0, which is ambiguous +// with an integer in certain cases, and won't autoconvert to a +// pointer in other cases. +#define nullptr NULL +#define G_HAS_METHOD_DELETE 0 +// Use G_EXPLICIT_OP as the prefix for operator methods +// that should be explicit. Old MSVC doesn't support explicit operator +// methods. +#define G_EXPLICIT_OP +#define G_NOEXCEPT throw() +// This version doesn't support "objects with internal linkage" +// in non-type template arguments. Translation: function pointer +// template arguments cannot be for static functions. +#define G_FP_TMPL_STATIC +#else +// Newer, reasonable compilers implementing C++11 or so. +#include +#define G_HAS_METHOD_DELETE 1 +#define G_EXPLICIT_OP explicit +#define G_NOEXCEPT noexcept +# if defined(__clang__) +# define G_FP_TMPL_STATIC static +# else +// GCC has no problem allowing static function pointers, but emits +// tons of warnings about "whose type uses the anonymous namespace [-Wsubobject-linkage]" +# define G_FP_TMPL_STATIC +# endif + +#endif + +#if G_HAS_METHOD_DELETE == 1 +# define G_NO_COPIES_OF_CLS(Cls) private: \ + Cls(const Cls& other) = delete; \ + Cls& operator=(const Cls& other) = delete + +# define G_NO_ASSIGNMENT_OF_CLS(Cls) private: \ + Cls& operator=(const Cls& other) = delete + +# define G_NO_COPY_CONSTRUCTOR_OF_CLS(Cls) private: \ + Cls(const Cls& other) = delete; +#else +# define G_NO_COPIES_OF_CLS(Cls) private: \ + Cls(const Cls& other); \ + Cls& operator=(const Cls& other) + +# define G_NO_ASSIGNMENT_OF_CLS(Cls) private: \ + Cls& operator=(const Cls& other) + +# define G_NO_COPY_CONSTRUCTOR_OF_CLS(Cls) private: \ + Cls(const Cls& other); +#endif + +// CAUTION: MSVC is stupidly picky: +// +// "The compiler ignores, without warning, any __declspec keywords +// placed after * or & and in front of the variable identifier in a +// declaration." +// (https://docs.microsoft.com/en-us/cpp/cpp/declspec?view=msvc-160) +// +// So pointer return types must be handled differently (because of the +// trailing *), or you get inscrutable compiler warnings like "error +// C2059: syntax error: ''" + +#if defined(__GNUC__) || defined(__clang__) +/* We used to check for GCC 4+ or 3.4+, but those compilers are + laughably out of date. Just assume they support it. */ +# define GREENLET_NOINLINE_SUPPORTED +# define GREENLET_NOINLINE(name) __attribute__((noinline)) name +# define GREENLET_NOINLINE_P(rtype, name) rtype __attribute__((noinline)) name +# define UNUSED(x) UNUSED_ ## x __attribute__((__unused__)) +#elif defined(_MSC_VER) +/* We used to check for && (_MSC_VER >= 1300) but that's also out of date. */ +# define GREENLET_NOINLINE_SUPPORTED +# define GREENLET_NOINLINE(name) __declspec(noinline) name +# define GREENLET_NOINLINE_P(rtype, name) __declspec(noinline) rtype name +# define UNUSED(x) UNUSED_ ## x +#endif + +#if defined(_MSC_VER) +# define G_NOEXCEPT_WIN32 G_NOEXCEPT +#else +# define G_NOEXCEPT_WIN32 +#endif + + +#endif diff --git a/mysql-sqlalchemy-workspace/lib/python3.9/site-packages/greenlet/greenlet_cpython_compat.hpp b/mysql-sqlalchemy-workspace/lib/python3.9/site-packages/greenlet/greenlet_cpython_compat.hpp new file mode 100644 index 00000000..3fd13ac2 --- /dev/null +++ b/mysql-sqlalchemy-workspace/lib/python3.9/site-packages/greenlet/greenlet_cpython_compat.hpp @@ -0,0 +1,165 @@ +/* -*- indent-tabs-mode: nil; tab-width: 4; -*- */ +#ifndef GREENLET_CPYTHON_COMPAT_H +#define GREENLET_CPYTHON_COMPAT_H + +/** + * Helpers for compatibility with multiple versions of CPython. + */ + +#define PY_SSIZE_T_CLEAN +#include "Python.h" + +// These enable writing template functions or classes specialized +// based on the Python version. Write both versions of the function, +// one with the WHEN version, one with the WHEN_NOT version. +// Instantiate the template using the G_IS_PY37 macro. +struct GREENLET_WHEN_PY37 +{ + typedef GREENLET_WHEN_PY37* Yes; + // We really just want an alias, `using Yes = IsIt`, + // but old MSVC for Py27 doesn't support that. + typedef GREENLET_WHEN_PY37* IsIt; +}; + +struct GREENLET_WHEN_NOT_PY37 +{ + typedef GREENLET_WHEN_NOT_PY37* No; + typedef GREENLET_WHEN_NOT_PY37* IsIt; +}; + + +#if PY_VERSION_HEX >= 0x030700A3 +# define GREENLET_PY37 1 +typedef GREENLET_WHEN_PY37 G_IS_PY37; +#else +# define GREENLET_PY37 0 +typedef GREENLET_WHEN_NOT_PY37 G_IS_PY37; +#endif + + +#if PY_VERSION_HEX >= 0x30A00B1 +/* +Python 3.10 beta 1 changed tstate->use_tracing to a nested cframe member. +See https://github.com/python/cpython/pull/25276 +We have to save and restore this as well. +*/ +# define GREENLET_USE_CFRAME 1 +#else +# define GREENLET_USE_CFRAME 0 +#endif + +#if PY_VERSION_HEX >= 0x30B00A4 +/* +Greenlet won't compile on anything older than Python 3.11 alpha 4 (see +https://bugs.python.org/issue46090). Summary of breaking internal changes: +- Python 3.11 alpha 1 changed how frame objects are represented internally. + - https://github.com/python/cpython/pull/30122 +- Python 3.11 alpha 3 changed how recursion limits are stored. + - https://github.com/python/cpython/pull/29524 +- Python 3.11 alpha 4 changed how exception state is stored. It also includes a + change to help greenlet save and restore the interpreter frame "data stack". + - https://github.com/python/cpython/pull/30122 + - https://github.com/python/cpython/pull/30234 +*/ +# define GREENLET_PY311 1 +#else +# define GREENLET_PY311 0 +#endif + +#ifndef Py_SET_REFCNT +/* Py_REFCNT and Py_SIZE macros are converted to functions +https://bugs.python.org/issue39573 */ +# define Py_SET_REFCNT(obj, refcnt) Py_REFCNT(obj) = (refcnt) +#endif + +#ifndef _Py_DEC_REFTOTAL +/* _Py_DEC_REFTOTAL macro has been removed from Python 3.9 by: + https://github.com/python/cpython/commit/49932fec62c616ec88da52642339d83ae719e924 +*/ +# ifdef Py_REF_DEBUG +# define _Py_DEC_REFTOTAL _Py_RefTotal-- +# else +# define _Py_DEC_REFTOTAL +# endif +#endif +// Define these flags like Cython does if we're on an old version. +#ifndef Py_TPFLAGS_CHECKTYPES + #define Py_TPFLAGS_CHECKTYPES 0 +#endif +#ifndef Py_TPFLAGS_HAVE_INDEX + #define Py_TPFLAGS_HAVE_INDEX 0 +#endif +#ifndef Py_TPFLAGS_HAVE_NEWBUFFER + #define Py_TPFLAGS_HAVE_NEWBUFFER 0 +#endif +#ifndef Py_TPFLAGS_HAVE_FINALIZE + #define Py_TPFLAGS_HAVE_FINALIZE 0 +#endif +#ifndef Py_TPFLAGS_HAVE_VERSION_TAG + #define Py_TPFLAGS_HAVE_VERSION_TAG 0 +#endif + +#define G_TPFLAGS_DEFAULT Py_TPFLAGS_DEFAULT | Py_TPFLAGS_HAVE_VERSION_TAG | Py_TPFLAGS_CHECKTYPES | Py_TPFLAGS_HAVE_NEWBUFFER | Py_TPFLAGS_HAVE_GC + +#if PY_MAJOR_VERSION >= 3 +# define GNative_FromFormat PyUnicode_FromFormat +#else +# define GNative_FromFormat PyString_FromFormat +#endif + +#if PY_MAJOR_VERSION >= 3 +# define Greenlet_Intern PyUnicode_InternFromString +#else +# define Greenlet_Intern PyString_InternFromString +#endif + +#if PY_VERSION_HEX < 0x03090000 +// The official version only became available in 3.9 +# define PyObject_GC_IsTracked(o) _PyObject_GC_IS_TRACKED(o) +#endif + +#if PY_MAJOR_VERSION < 3 +struct PyModuleDef { + int unused; + const char* const m_name; + const char* m_doc; + Py_ssize_t m_size; + PyMethodDef* m_methods; + // Then several more fields we're not currently using. +}; +#define PyModuleDef_HEAD_INIT 1 +PyObject* PyModule_Create(PyModuleDef* m) +{ + return Py_InitModule(m->m_name, m->m_methods); +} +#endif + +// bpo-43760 added PyThreadState_EnterTracing() to Python 3.11.0a2 +#if PY_VERSION_HEX < 0x030B00A2 && !defined(PYPY_VERSION) +static inline void PyThreadState_EnterTracing(PyThreadState *tstate) +{ + tstate->tracing++; +#if PY_VERSION_HEX >= 0x030A00A1 + tstate->cframe->use_tracing = 0; +#else + tstate->use_tracing = 0; +#endif +} +#endif + +// bpo-43760 added PyThreadState_LeaveTracing() to Python 3.11.0a2 +#if PY_VERSION_HEX < 0x030B00A2 && !defined(PYPY_VERSION) +static inline void PyThreadState_LeaveTracing(PyThreadState *tstate) +{ + tstate->tracing--; + int use_tracing = (tstate->c_tracefunc != NULL + || tstate->c_profilefunc != NULL); +#if PY_VERSION_HEX >= 0x030A00A1 + tstate->cframe->use_tracing = use_tracing; +#else + tstate->use_tracing = use_tracing; +#endif +} +#endif + +#endif /* GREENLET_CPYTHON_COMPAT_H */ diff --git a/mysql-sqlalchemy-workspace/lib/python3.9/site-packages/greenlet/greenlet_exceptions.hpp b/mysql-sqlalchemy-workspace/lib/python3.9/site-packages/greenlet/greenlet_exceptions.hpp new file mode 100644 index 00000000..697df002 --- /dev/null +++ b/mysql-sqlalchemy-workspace/lib/python3.9/site-packages/greenlet/greenlet_exceptions.hpp @@ -0,0 +1,106 @@ +#ifndef GREENLET_EXCEPTIONS_HPP +#define GREENLET_EXCEPTIONS_HPP + +#define PY_SSIZE_T_CLEAN +#include +#include +#include + +#ifdef __clang__ +# pragma clang diagnostic push +# pragma clang diagnostic ignored "-Wunused-function" +#endif + +namespace greenlet { + + class PyErrOccurred : public std::runtime_error + { + public: + PyErrOccurred() : std::runtime_error("") + { + assert(PyErr_Occurred()); + } + + PyErrOccurred(PyObject* exc_kind, const char* const msg) + : std::runtime_error(msg) + { + PyErr_SetString(exc_kind, msg); + } + PyErrOccurred(PyObject* exc_kind, const std::string msg) + : std::runtime_error(msg) + { + // This copies the c_str, so we don't have any lifetime + // issues to worry about. + PyErr_SetString(exc_kind, msg.c_str()); + } + }; + + class TypeError : public PyErrOccurred + { + public: + TypeError(const char* const what) + : PyErrOccurred(PyExc_TypeError, what) + { + } + TypeError(const std::string what) + : PyErrOccurred(PyExc_TypeError, what) + { + } + }; + + class ValueError : public PyErrOccurred + { + public: + ValueError(const char* const what) + : PyErrOccurred(PyExc_ValueError, what) + { + } + }; + + class AttributeError : public PyErrOccurred + { + public: + AttributeError(const char* const what) + : PyErrOccurred(PyExc_AttributeError, what) + { + } + }; + + /** + * Calls `Py_FatalError` when constructed, so you can't actually + * throw this. It just makes static analysis easier. + */ + class PyFatalError : public std::runtime_error + { + public: + PyFatalError(const char* const msg) + : std::runtime_error(msg) + { + Py_FatalError(msg); + } + }; + + static inline PyObject* + Require(PyObject* p) + { + if (!p) { + throw PyErrOccurred(); + } + return p; + }; + + static inline void + Require(const int retval) + { + if (retval < 0) { + throw PyErrOccurred(); + } + }; + + +}; +#ifdef __clang__ +# pragma clang diagnostic pop +#endif + +#endif diff --git a/mysql-sqlalchemy-workspace/lib/python3.9/site-packages/greenlet/greenlet_greenlet.hpp b/mysql-sqlalchemy-workspace/lib/python3.9/site-packages/greenlet/greenlet_greenlet.hpp new file mode 100644 index 00000000..cc02c5c5 --- /dev/null +++ b/mysql-sqlalchemy-workspace/lib/python3.9/site-packages/greenlet/greenlet_greenlet.hpp @@ -0,0 +1,1272 @@ +#ifndef GREENLET_GREENLET_HPP +#define GREENLET_GREENLET_HPP +/* + * Declarations of the core data structures. +*/ + +#define PY_SSIZE_T_CLEAN +#include + +#include "greenlet_compiler_compat.hpp" +#include "greenlet_refs.hpp" +#include "greenlet_cpython_compat.hpp" +#include "greenlet_allocator.hpp" + +using greenlet::refs::OwnedObject; +using greenlet::refs::OwnedGreenlet; +using greenlet::refs::OwnedMainGreenlet; +using greenlet::refs::BorrowedGreenlet; + +#if PY_VERSION_HEX < 0x30B00A6 +# define _PyCFrame CFrame +# define _PyInterpreterFrame _interpreter_frame +#endif + +// XXX: TODO: Work to remove all virtual functions +// for speed of calling and size of objects (no vtable). +// One pattern is the Curiously Recurring Template +namespace greenlet +{ + class ExceptionState + { + private: + G_NO_COPIES_OF_CLS(ExceptionState); + +#if PY_VERSION_HEX >= 0x030700A3 + // Even though these are borrowed objects, we actually own + // them, when they're not null. + // XXX: Express that in the API. + private: + _PyErr_StackItem* exc_info; + _PyErr_StackItem exc_state; +#else + OwnedObject exc_value; +#if !GREENLET_PY311 + OwnedObject exc_type; + OwnedObject exc_traceback; +#endif +#endif + public: + ExceptionState(); + void operator<<(const PyThreadState *const tstate) G_NOEXCEPT; + void operator>>(PyThreadState* tstate) G_NOEXCEPT; + void clear() G_NOEXCEPT; + + int tp_traverse(visitproc visit, void* arg) G_NOEXCEPT; + void tp_clear() G_NOEXCEPT; + }; + + template + void operator<<(const PyThreadState *const tstate, T& exc); + + template + class PythonStateContext + {}; + + template<> + class PythonStateContext + { + protected: + greenlet::refs::OwnedContext _context; + public: + inline const greenlet::refs::OwnedContext& context() const + { + return this->_context; + } + inline greenlet::refs::OwnedContext& context() + { + return this->_context; + } + + inline void tp_clear() + { + this->_context.CLEAR(); + } + + template + inline static PyObject* context(T* tstate) + { + return tstate->context; + } + + template + inline static void context(T* tstate, PyObject* new_context) + { + tstate->context = new_context; + tstate->context_ver++; + } + }; + + + template<> + class PythonStateContext + { + public: + inline const greenlet::refs::OwnedContext& context() const + { + throw AttributeError("no context"); + } + + inline greenlet::refs::OwnedContext& context() + { + throw AttributeError("no context"); + } + + inline void tp_clear(){}; + + template + inline static PyObject* context(T* UNUSED(tstate)) + { + throw PyFatalError("This should never be called."); + } + + template + inline static void context(T* UNUSED(tstate), PyObject* UNUSED(new_context)) + { + throw PyFatalError("This should never be called."); + } + }; + + class PythonState : public PythonStateContext + { + public: + typedef greenlet::refs::OwnedReference OwnedFrame; + private: + G_NO_COPIES_OF_CLS(PythonState); + // We own this if we're suspended (although currently we don't + // tp_traverse into it; that's a TODO). If we're running, it's + // empty. If we get deallocated and *still* have a frame, it + // won't be reachable from the place that normally decref's + // it, so we need to do it (hence owning it). + OwnedFrame _top_frame; +#if GREENLET_USE_CFRAME + _PyCFrame* cframe; + int use_tracing; +#endif + int recursion_depth; + int trash_delete_nesting; +#if GREENLET_PY311 + _PyInterpreterFrame* current_frame; + _PyStackChunk* datastack_chunk; + PyObject** datastack_top; + PyObject** datastack_limit; +#endif + + public: + PythonState(); + // You can use this for testing whether we have a frame + // or not. It returns const so they can't modify it. + const OwnedFrame& top_frame() const G_NOEXCEPT; + + + void operator<<(const PyThreadState *const tstate) G_NOEXCEPT; + void operator>>(PyThreadState* tstate) G_NOEXCEPT; + void clear() G_NOEXCEPT; + + int tp_traverse(visitproc visit, void* arg, bool visit_top_frame) G_NOEXCEPT; + void tp_clear(bool own_top_frame) G_NOEXCEPT; + void set_initial_state(const PyThreadState* const tstate) G_NOEXCEPT; +#if GREENLET_USE_CFRAME + void set_new_cframe(_PyCFrame& frame) G_NOEXCEPT; +#endif + void will_switch_from(PyThreadState *const origin_tstate) G_NOEXCEPT; + void did_finish(PyThreadState* tstate) G_NOEXCEPT; + }; + + class StackState + { + // By having only plain C (POD) members, no virtual functions + // or bases, we get a trivial assignment operator generated + // for us. However, that's not safe since we do manage memory. + // So we declare an assignment operator that only works if we + // don't have any memory allocated. (We don't use + // std::shared_ptr for reference counting just to keep this + // object small) + private: + char* _stack_start; + char* stack_stop; + char* stack_copy; + intptr_t _stack_saved; + StackState* stack_prev; + inline int copy_stack_to_heap_up_to(const char* const stop) G_NOEXCEPT; + inline void free_stack_copy() G_NOEXCEPT; + + public: + /** + * Creates a started, but inactive, state, using *current* + * as the previous. + */ + StackState(void* mark, StackState& current); + /** + * Creates an inactive, unstarted, state. + */ + StackState(); + ~StackState(); + StackState(const StackState& other); + StackState& operator=(const StackState& other); + inline void copy_heap_to_stack(const StackState& current) G_NOEXCEPT; + inline int copy_stack_to_heap(char* const stackref, const StackState& current) G_NOEXCEPT; + inline bool started() const G_NOEXCEPT; + inline bool main() const G_NOEXCEPT; + inline bool active() const G_NOEXCEPT; + inline void set_active() G_NOEXCEPT; + inline void set_inactive() G_NOEXCEPT; + inline intptr_t stack_saved() const G_NOEXCEPT; + inline char* stack_start() const G_NOEXCEPT; + static inline StackState make_main() G_NOEXCEPT; +#ifdef GREENLET_USE_STDIO + friend std::ostream& operator<<(std::ostream& os, const StackState& s); +#endif + }; +#ifdef GREENLET_USE_STDIO + std::ostream& operator<<(std::ostream& os, const StackState& s); +#endif + + class SwitchingArgs + { + private: + G_NO_ASSIGNMENT_OF_CLS(SwitchingArgs); + // If args and kwargs are both false (NULL), this is a *throw*, not a + // switch. PyErr_... must have been called already. + OwnedObject _args; + OwnedObject _kwargs; + public: + + SwitchingArgs() + {} + + SwitchingArgs(const OwnedObject& args, const OwnedObject& kwargs) + : _args(args), + _kwargs(kwargs) + {} + + SwitchingArgs(const SwitchingArgs& other) + : _args(other._args), + _kwargs(other._kwargs) + {} + + OwnedObject& args() + { + return this->_args; + } + + OwnedObject& kwargs() + { + return this->_kwargs; + } + + /** + * Moves ownership from the argument to this object. + */ + SwitchingArgs& operator<<=(SwitchingArgs& other) + { + if (this != &other) { + this->_args = other._args; + this->_kwargs = other._kwargs; + other.CLEAR(); + } + return *this; + } + + /** + * Acquires ownership of the argument (consumes the reference). + */ + SwitchingArgs& operator<<=(PyObject* args) + { + this->_args = OwnedObject::consuming(args); + this->_kwargs.CLEAR(); + return *this; + } + + /** + * Acquires ownership of the argument. + * + * Sets the args to be the given value; clears the kwargs. + */ + SwitchingArgs& operator<<=(OwnedObject& args) + { + assert(&args != &this->_args); + this->_args = args; + this->_kwargs.CLEAR(); + args.CLEAR(); + + return *this; + } + + G_EXPLICIT_OP operator bool() const G_NOEXCEPT + { + return this->_args || this->_kwargs; + } + + inline void CLEAR() + { + this->_args.CLEAR(); + this->_kwargs.CLEAR(); + } + }; + + class ThreadState; + + class UserGreenlet; + class MainGreenlet; + + class Greenlet + { + private: + G_NO_COPIES_OF_CLS(Greenlet); + private: + // XXX: Work to remove these. + friend class ThreadState; + friend class UserGreenlet; + friend class MainGreenlet; + protected: + ExceptionState exception_state; + SwitchingArgs switch_args; + StackState stack_state; + PythonState python_state; + Greenlet(PyGreenlet* p, const StackState& initial_state); + public: + Greenlet(PyGreenlet* p); + virtual ~Greenlet(); + + template // maybe we can use a value here? + const OwnedObject context(const typename IsPy37::IsIt=nullptr) const; + + template + inline void context(refs::BorrowedObject new_context, typename IsPy37::IsIt=nullptr); + + inline SwitchingArgs& args() + { + return this->switch_args; + } + + virtual const refs::BorrowedMainGreenlet main_greenlet() const = 0; + + inline intptr_t stack_saved() const G_NOEXCEPT + { + return this->stack_state.stack_saved(); + } + + // This is used by the macro SLP_SAVE_STATE to compute the + // difference in stack sizes. It might be nice to handle the + // computation ourself, but the type of the result + // varies by platform, so doing it in the macro is the + // simplest way. + inline const char* stack_start() const G_NOEXCEPT + { + return this->stack_state.stack_start(); + } + + virtual OwnedObject throw_GreenletExit_during_dealloc(const ThreadState& current_thread_state); + virtual OwnedObject g_switch() = 0; + /** + * Force the greenlet to appear dead. Used when it's not + * possible to throw an exception into a greenlet anymore. + * + * This losses access to the thread state and the main greenlet. + */ + virtual void murder_in_place(); + + /** + * Called when somebody notices we were running in a dead + * thread to allow cleaning up resources (because we can't + * raise GreenletExit into it anymore). + * This is very similar to ``murder_in_place()``, except that + * it DOES NOT lose the main greenlet or thread state. + */ + inline void deactivate_and_free(); + + + // Called when some thread wants to deallocate a greenlet + // object. + // The thread may or may not be the same thread the greenlet + // was running in. + // The thread state will be null if the thread the greenlet + // was running in was known to have exited. + void deallocing_greenlet_in_thread(const ThreadState* current_state); + + // TODO: Figure out how to make these non-public. + inline void slp_restore_state() G_NOEXCEPT; + inline int slp_save_state(char *const stackref) G_NOEXCEPT; + + inline bool is_currently_running_in_some_thread() const; + virtual bool belongs_to_thread(const ThreadState* state) const; + + inline bool started() const + { + return this->stack_state.started(); + } + inline bool active() const + { + return this->stack_state.active(); + } + inline bool main() const + { + return this->stack_state.main(); + } + virtual refs::BorrowedMainGreenlet find_main_greenlet_in_lineage() const = 0; + + virtual const OwnedGreenlet parent() const = 0; + virtual void parent(const refs::BorrowedObject new_parent) = 0; + + inline const PythonState::OwnedFrame& top_frame() + { + return this->python_state.top_frame(); + } + + virtual const OwnedObject& run() const = 0; + virtual void run(const refs::BorrowedObject nrun) = 0; + + + virtual int tp_traverse(visitproc visit, void* arg); + virtual int tp_clear(); + + + // Return the thread state that the greenlet is running in, or + // null if the greenlet is not running or the thread is known + // to have exited. + virtual ThreadState* thread_state() const G_NOEXCEPT = 0; + + // Return true if the greenlet is known to have been running + // (active) in a thread that has now exited. + virtual bool was_running_in_dead_thread() const G_NOEXCEPT = 0; + + // Return a borrowed greenlet that is the Python object + // this object represents. + virtual BorrowedGreenlet self() const G_NOEXCEPT = 0; + + protected: + inline void release_args(); + + // The functions that must not be inlined are declared virtual. + // We also mark them as protected, not private, so that the + // compiler is forced to call them through a function pointer. + // (A sufficiently smart compiler could directly call a private + // virtual function since it can never be overridden in a + // subclass). + + // Also TODO: Switch away from integer error codes and to enums, + // or throw exceptions when possible. + struct switchstack_result_t + { + int status; + Greenlet* the_state_that_switched; + OwnedGreenlet origin_greenlet; + + switchstack_result_t() + : status(0), + the_state_that_switched(nullptr) + {} + + switchstack_result_t(int err) + : status(err), + the_state_that_switched(nullptr) + {} + + switchstack_result_t(int err, Greenlet* state, OwnedGreenlet& origin) + : status(err), + the_state_that_switched(state), + origin_greenlet(origin) + { + } + + switchstack_result_t(int err, Greenlet* state, const BorrowedGreenlet& origin) + : status(err), + the_state_that_switched(state), + origin_greenlet(origin) + { + } + + switchstack_result_t& operator=(const switchstack_result_t& other) + { + this->status = other.status; + this->the_state_that_switched = other.the_state_that_switched; + this->origin_greenlet = other.origin_greenlet; + return *this; + } + }; + + // Returns the previous greenlet we just switched away from. + virtual OwnedGreenlet g_switchstack_success() G_NOEXCEPT; + + + // Check the preconditions for switching to this greenlet; if they + // aren't met, throws PyErrOccurred. Most callers will want to + // catch this and clear the arguments + inline void check_switch_allowed() const; + class GreenletStartedWhileInPython : public std::runtime_error + { + public: + GreenletStartedWhileInPython() : std::runtime_error("") + {} + }; + + protected: + + + /** + Perform a stack switch into this greenlet. + + This temporarily sets the global variable + ``switching_thread_state`` to this greenlet; as soon as the + call to ``slp_switch`` completes, this is reset to NULL. + Consequently, this depends on the GIL. + + TODO: Adopt the stackman model and pass ``slp_switch`` a + callback function and context pointer; this eliminates the + need for global variables altogether. + + Because the stack switch happens in this function, this + function can't use its own stack (local) variables, set + before the switch, and then accessed after the switch. + + Further, you con't even access ``g_thread_state_global`` + before and after the switch from the global variable. + Because it is thread local some compilers cache it in a + register/on the stack, notably new versions of MSVC; this + breaks with strange crashes sometime later, because writing + to anything in ``g_thread_state_global`` after the switch + is actually writing to random memory. For this reason, we + call a non-inlined function to finish the operation. (XXX: + The ``/GT`` MSVC compiler argument probably fixes that.) + + It is very important that stack switch is 'atomic', i.e. no + calls into other Python code allowed (except very few that + are safe), because global variables are very fragile. (This + should no longer be the case with thread-local variables.) + + */ + switchstack_result_t g_switchstack(void); + private: + OwnedObject g_switch_finish(const switchstack_result_t& err); + + }; + + class UserGreenlet : public Greenlet + { + private: + static greenlet::PythonAllocator allocator; + BorrowedGreenlet _self; + OwnedMainGreenlet _main_greenlet; + OwnedObject _run_callable; + OwnedGreenlet _parent; + public: + static void* operator new(size_t UNUSED(count)); + static void operator delete(void* ptr); + + UserGreenlet(PyGreenlet* p, BorrowedGreenlet the_parent); + virtual ~UserGreenlet(); + + virtual refs::BorrowedMainGreenlet find_main_greenlet_in_lineage() const; + virtual bool was_running_in_dead_thread() const G_NOEXCEPT; + virtual ThreadState* thread_state() const G_NOEXCEPT; + virtual OwnedObject g_switch(); + virtual const OwnedObject& run() const + { + if (this->started() || !this->_run_callable) { + throw AttributeError("run"); + } + return this->_run_callable; + } + virtual void run(const refs::BorrowedObject nrun); + + virtual const OwnedGreenlet parent() const; + virtual void parent(const refs::BorrowedObject new_parent); + + virtual const refs::BorrowedMainGreenlet main_greenlet() const; + + virtual BorrowedGreenlet self() const G_NOEXCEPT; + virtual void murder_in_place(); + virtual bool belongs_to_thread(const ThreadState* state) const; + virtual int tp_traverse(visitproc visit, void* arg); + virtual int tp_clear(); + class ParentIsCurrentGuard + { + private: + OwnedGreenlet oldparent; + UserGreenlet* greenlet; + G_NO_COPIES_OF_CLS(ParentIsCurrentGuard); + public: + ParentIsCurrentGuard(UserGreenlet* p, const ThreadState& thread_state); + ~ParentIsCurrentGuard(); + }; + virtual OwnedObject throw_GreenletExit_during_dealloc(const ThreadState& current_thread_state); + protected: + virtual switchstack_result_t g_initialstub(void* mark); + private: + void inner_bootstrap(OwnedGreenlet& origin_greenlet, OwnedObject& run) G_NOEXCEPT_WIN32; + }; + + class MainGreenlet : public Greenlet + { + private: + static greenlet::PythonAllocator allocator; + refs::BorrowedMainGreenlet _self; + ThreadState* _thread_state; + G_NO_COPIES_OF_CLS(MainGreenlet); + public: + static void* operator new(size_t UNUSED(count)); + static void operator delete(void* ptr); + + MainGreenlet(refs::BorrowedMainGreenlet::PyType*, ThreadState*); + virtual ~MainGreenlet(); + + + virtual const OwnedObject& run() const; + virtual void run(const refs::BorrowedObject nrun); + + virtual const OwnedGreenlet parent() const; + virtual void parent(const refs::BorrowedObject new_parent); + + virtual const refs::BorrowedMainGreenlet main_greenlet() const; + + virtual refs::BorrowedMainGreenlet find_main_greenlet_in_lineage() const; + virtual bool was_running_in_dead_thread() const G_NOEXCEPT; + virtual ThreadState* thread_state() const G_NOEXCEPT; + void thread_state(ThreadState*) G_NOEXCEPT; + virtual OwnedObject g_switch(); + virtual BorrowedGreenlet self() const G_NOEXCEPT; + virtual int tp_traverse(visitproc visit, void* arg); + }; + +}; + +template +void greenlet::operator<<(const PyThreadState *const lhs, T& rhs) +{ + rhs.operator<<(lhs); +} + +using greenlet::ExceptionState; + +ExceptionState::ExceptionState() +{ + this->clear(); +} + +#if PY_VERSION_HEX >= 0x030700A3 +// ******** Python 3.7 and above ********* +void ExceptionState::operator<<(const PyThreadState *const tstate) G_NOEXCEPT +{ + this->exc_info = tstate->exc_info; + this->exc_state = tstate->exc_state; +} + +void ExceptionState::operator>>(PyThreadState *const tstate) G_NOEXCEPT +{ + tstate->exc_state = this->exc_state; + tstate->exc_info = + this->exc_info ? this->exc_info : &tstate->exc_state; + this->clear(); +} + +void ExceptionState::clear() G_NOEXCEPT +{ + this->exc_info = nullptr; + this->exc_state.exc_value = nullptr; +#if !GREENLET_PY311 + this->exc_state.exc_type = nullptr; + this->exc_state.exc_traceback = nullptr; +#endif + this->exc_state.previous_item = nullptr; +} + +int ExceptionState::tp_traverse(visitproc visit, void* arg) G_NOEXCEPT +{ + Py_VISIT(this->exc_state.exc_value); +#if !GREENLET_PY311 + Py_VISIT(this->exc_state.exc_type); + Py_VISIT(this->exc_state.exc_traceback); +#endif + return 0; +} + +void ExceptionState::tp_clear() G_NOEXCEPT +{ + Py_CLEAR(this->exc_state.exc_value); +#if !GREENLET_PY311 + Py_CLEAR(this->exc_state.exc_type); + Py_CLEAR(this->exc_state.exc_traceback); +#endif +} +#else +// ********** Python 3.6 and below ******** +void ExceptionState::operator<<(const PyThreadState *const tstate) G_NOEXCEPT +{ + this->exc_value.steal(tstate->exc_value); +#if !GREENLET_PY311 + this->exc_type.steal(tstate->exc_type); + this->exc_traceback.steal(tstate->exc_traceback); +#endif +} + +void ExceptionState::operator>>(PyThreadState *const tstate) G_NOEXCEPT +{ + tstate->exc_value <<= this->exc_value; +#if !GREENLET_PY311 + tstate->exc_type <<= this->exc_type; + tstate->exc_traceback <<= this->exc_traceback; +#endif + this->clear(); +} + +void ExceptionState::clear() G_NOEXCEPT +{ + this->exc_value = nullptr; +#if !GREENLET_PY311 + this->exc_type = nullptr; + this->exc_traceback = nullptr; +#endif +} + +int ExceptionState::tp_traverse(visitproc visit, void* arg) G_NOEXCEPT +{ + Py_VISIT(this->exc_value.borrow()); +#if !GREENLET_PY311 + Py_VISIT(this->exc_type.borrow()); + Py_VISIT(this->exc_traceback.borrow()); +#endif + return 0; +} + +void ExceptionState::tp_clear() G_NOEXCEPT +{ + this->exc_value.CLEAR(); +#if !GREENLET_PY311 + this->exc_type.CLEAR(); + this->exc_traceback.CLEAR(); +#endif +} +#endif + + +using greenlet::PythonState; + +PythonState::PythonState() + : _top_frame() +#if GREENLET_USE_CFRAME + ,cframe(nullptr) + ,use_tracing(0) +#endif + ,recursion_depth(0) + ,trash_delete_nesting(0) +#if GREENLET_PY311 + ,current_frame(nullptr) + ,datastack_chunk(nullptr) + ,datastack_top(nullptr) + ,datastack_limit(nullptr) +#endif +{ +#if GREENLET_USE_CFRAME + /* + The PyThreadState->cframe pointer usually points to memory on + the stack, alloceted in a call into PyEval_EvalFrameDefault. + + Initially, before any evaluation begins, it points to the + initial PyThreadState object's ``root_cframe`` object, which is + statically allocated for the lifetime of the thread. + + A greenlet can last for longer than a call to + PyEval_EvalFrameDefault, so we can't set its ``cframe`` pointer + to be the current ``PyThreadState->cframe``; nor could we use + one from the greenlet parent for the same reason. Yet a further + no: we can't allocate one scoped to the greenlet and then + destroy it when the greenlet is deallocated, because inside the + interpreter the _PyCFrame objects form a linked list, and that too + can result in accessing memory beyond its dynamic lifetime (if + the greenlet doesn't actually finish before it dies, its entry + could still be in the list). + + Using the ``root_cframe`` is problematic, though, because its + members are never modified by the interpreter and are set to 0, + meaning that its ``use_tracing`` flag is never updated. We don't + want to modify that value in the ``root_cframe`` ourself: it + *shouldn't* matter much because we should probably never get + back to the point where that's the only cframe on the stack; + even if it did matter, the major consequence of an incorrect + value for ``use_tracing`` is that if its true the interpreter + does some extra work --- however, it's just good code hygiene. + + Our solution: before a greenlet runs, after its initial + creation, it uses the ``root_cframe`` just to have something to + put there. However, once the greenlet is actually switched to + for the first time, ``g_initialstub`` (which doesn't actually + "return" while the greenlet is running) stores a new _PyCFrame on + its local stack, and copies the appropriate values from the + currently running _PyCFrame; this is then made the _PyCFrame for the + newly-minted greenlet. ``g_initialstub`` then proceeds to call + ``glet.run()``, which results in ``PyEval_...`` adding the + _PyCFrame to the list. Switches continue as normal. Finally, when + the greenlet finishes, the call to ``glet.run()`` returns and + the _PyCFrame is taken out of the linked list and the stack value + is now unused and free to expire. + + XXX: I think we can do better. If we're deallocing in the same + thread, can't we traverse the list and unlink our frame? + Can we just keep a reference to the thread state in case we + dealloc in another thread? (Is that even possible if we're still + running and haven't returned from g_initialstub?) + */ + this->cframe = &PyThreadState_GET()->root_cframe; +#endif +} + +void PythonState::operator<<(const PyThreadState *const tstate) G_NOEXCEPT +{ +#if GREENLET_PY37 + this->_context.steal(tstate->context); +#endif +#if GREENLET_USE_CFRAME + /* + IMPORTANT: ``cframe`` is a pointer into the STACK. Thus, because + the call to ``slp_switch()`` changes the contents of the stack, + you cannot read from ``ts_current->cframe`` after that call and + necessarily get the same values you get from reading it here. + Anything you need to restore from now to then must be saved in a + global/threadlocal variable (because we can't use stack + variables here either). For things that need to persist across + the switch, use `will_switch_from`. + */ + this->cframe = tstate->cframe; + this->use_tracing = tstate->cframe->use_tracing; +#endif +#if GREENLET_PY311 + this->recursion_depth = tstate->recursion_limit - tstate->recursion_remaining; + this->current_frame = tstate->cframe->current_frame; + this->datastack_chunk = tstate->datastack_chunk; + this->datastack_top = tstate->datastack_top; + this->datastack_limit = tstate->datastack_limit; + PyFrameObject *frame = PyThreadState_GetFrame((PyThreadState *)tstate); + Py_XDECREF(frame); // PyThreadState_GetFrame gives us a new reference. + this->_top_frame.steal(frame); +#else + this->recursion_depth = tstate->recursion_depth; + this->_top_frame.steal(tstate->frame); +#endif + + // All versions of Python. + this->trash_delete_nesting = tstate->trash_delete_nesting; +} + +void PythonState::operator>>(PyThreadState *const tstate) G_NOEXCEPT +{ +#if GREENLET_PY37 + tstate->context = this->_context.relinquish_ownership(); + /* Incrementing this value invalidates the contextvars cache, + which would otherwise remain valid across switches */ + tstate->context_ver++; +#endif +#if GREENLET_USE_CFRAME + tstate->cframe = this->cframe; + /* + If we were tracing, we need to keep tracing. + There should never be the possibility of hitting the + root_cframe here. See note above about why we can't + just copy this from ``origin->cframe->use_tracing``. + */ + tstate->cframe->use_tracing = this->use_tracing; +#endif +#if GREENLET_PY311 + tstate->recursion_remaining = tstate->recursion_limit - this->recursion_depth; + tstate->cframe->current_frame = this->current_frame; + tstate->datastack_chunk = this->datastack_chunk; + tstate->datastack_top = this->datastack_top; + tstate->datastack_limit = this->datastack_limit; + this->_top_frame.relinquish_ownership(); +#else + tstate->frame = this->_top_frame.relinquish_ownership(); + tstate->recursion_depth = this->recursion_depth; +#endif + // All versions of Python. + tstate->trash_delete_nesting = this->trash_delete_nesting; +} + +void PythonState::will_switch_from(PyThreadState *const origin_tstate) G_NOEXCEPT +{ +#if GREENLET_USE_CFRAME + // The weird thing is, we don't actually save this for an + // effect on the current greenlet, it's saved for an + // effect on the target greenlet. That is, we want + // continuity of this setting across the greenlet switch. + this->use_tracing = origin_tstate->cframe->use_tracing; +#endif +} + +void PythonState::set_initial_state(const PyThreadState* const tstate) G_NOEXCEPT +{ + this->_top_frame = nullptr; +#if GREENLET_PY311 + this->recursion_depth = tstate->recursion_limit - tstate->recursion_remaining; +#else + this->recursion_depth = tstate->recursion_depth; +#endif +} +// TODO: Better state management about when we own the top frame. +int PythonState::tp_traverse(visitproc visit, void* arg, bool own_top_frame) G_NOEXCEPT +{ +#if GREENLET_PY37 + Py_VISIT(this->_context.borrow()); +#endif + if (own_top_frame) { + Py_VISIT(this->_top_frame.borrow()); + } + return 0; +} + +void PythonState::tp_clear(bool own_top_frame) G_NOEXCEPT +{ + PythonStateContext::tp_clear(); + // If we get here owning a frame, + // we got dealloc'd without being finished. We may or may not be + // in the same thread. + if (own_top_frame) { + this->_top_frame.CLEAR(); + } +} + +#if GREENLET_USE_CFRAME +void PythonState::set_new_cframe(_PyCFrame& frame) G_NOEXCEPT +{ + frame = *PyThreadState_GET()->cframe; + /* Make the target greenlet refer to the stack value. */ + this->cframe = &frame; + /* + And restore the link to the previous frame so this one gets + unliked appropriately. + */ + this->cframe->previous = &PyThreadState_GET()->root_cframe; +} +#endif + +const PythonState::OwnedFrame& PythonState::top_frame() const G_NOEXCEPT +{ + return this->_top_frame; +} + +void PythonState::did_finish(PyThreadState* tstate) G_NOEXCEPT +{ +#if GREENLET_PY311 + // See https://github.com/gevent/gevent/issues/1924 and + // https://github.com/python-greenlet/greenlet/issues/328. In + // short, Python 3.11 allocates memory for frames as a sort of + // linked list that's kept as part of PyThreadState in the + // ``datastack_chunk`` member and friends. These are saved and + // restored as part of switching greenlets. + // + // When we initially switch to a greenlet, we set those to NULL. + // That causes the frame management code to treat this like a + // brand new thread and start a fresh list of chunks, beginning + // with a new "root" chunk. As we make calls in this greenlet, + // those chunks get added, and as calls return, they get popped. + // But the frame code (pystate.c) is careful to make sure that the + // root chunk never gets popped. + // + // Thus, when a greenlet exits for the last time, there will be at + // least a single root chunk that we must be responsible for + // deallocating. + // + // The complex part is that these chunks are allocated and freed + // using ``_PyObject_VirtualAlloc``/``Free``. Those aren't public + // functions, and they aren't exported for linking. It so happens + // that we know they are just thin wrappers around the Arena + // allocator, so we can use that directly to deallocate in a + // compatible way. + // + // CAUTION: Check this implementation detail on every major version. + // + // It might be nice to be able to do this in our destructor, but + // can we be sure that no one else is using that memory? Plus, as + // described below, our pointers may not even be valid anymore. As + // a special case, there is one time that we know we can do this, + // and that's from the destructor of the associated UserGreenlet + // (NOT main greenlet) + PyObjectArenaAllocator alloc; + _PyStackChunk* chunk = nullptr; + if (tstate) { + // We really did finish, we can never be switched to again. + chunk = tstate->datastack_chunk; + // Unfortunately, we can't do much sanity checking. Our + // this->datastack_chunk pointer is out of date (evaluation may + // have popped down through it already) so we can't verify that + // we deallocate it. I don't think we can even check datastack_top + // for the same reason. + + PyObject_GetArenaAllocator(&alloc); + tstate->datastack_chunk = nullptr; + tstate->datastack_limit = nullptr; + tstate->datastack_top = nullptr; + + } + else if (this->datastack_chunk) { + // The UserGreenlet (NOT the main greenlet!) is being deallocated. If we're + // still holding a stack chunk, it's garbage because we know + // we can never switch back to let cPython clean it up. + // Because the last time we got switched away from, and we + // haven't run since then, we know our chain is valid and can + // be dealloced. + chunk = this->datastack_chunk; + PyObject_GetArenaAllocator(&alloc); + } + + if (alloc.free && chunk) { + // In case the arena mechanism has been torn down already. + while (chunk) { + _PyStackChunk *prev = chunk->previous; + chunk->previous = nullptr; + alloc.free(alloc.ctx, chunk, chunk->size); + chunk = prev; + } + } + + this->datastack_chunk = nullptr; + this->datastack_limit = nullptr; + this->datastack_top = nullptr; +#endif +} + + + + +using greenlet::StackState; + +#ifdef GREENLET_USE_STDIO +#include +using std::cerr; +using std::endl; + +std::ostream& greenlet::operator<<(std::ostream& os, const StackState& s) +{ + os << "StackState(stack_start=" << (void*)s._stack_start + << ", stack_stop=" << (void*)s.stack_stop + << ", stack_copy=" << (void*)s.stack_copy + << ", stack_saved=" << s._stack_saved + << ", stack_prev=" << s.stack_prev + << ", addr=" << &s + << ")"; + return os; +} +#endif + +StackState::StackState(void* mark, StackState& current) + : _stack_start(nullptr), + stack_stop((char*)mark), + stack_copy(nullptr), + _stack_saved(0), + /* Skip a dying greenlet */ + stack_prev(current._stack_start + ? ¤t + : current.stack_prev) +{ +} + +StackState::StackState() + : _stack_start(nullptr), + stack_stop(nullptr), + stack_copy(nullptr), + _stack_saved(0), + stack_prev(nullptr) +{ +} + +StackState::StackState(const StackState& other) +// can't use a delegating constructor because of +// MSVC for Python 2.7 + : _stack_start(nullptr), + stack_stop(nullptr), + stack_copy(nullptr), + _stack_saved(0), + stack_prev(nullptr) +{ + this->operator=(other); +} + +StackState& StackState::operator=(const StackState& other) +{ + if (&other == this) { + return *this; + } + if (other._stack_saved) { + throw std::runtime_error("Refusing to steal memory."); + } + + //If we have memory allocated, dispose of it + this->free_stack_copy(); + + this->_stack_start = other._stack_start; + this->stack_stop = other.stack_stop; + this->stack_copy = other.stack_copy; + this->_stack_saved = other._stack_saved; + this->stack_prev = other.stack_prev; + return *this; +} + +inline void StackState::free_stack_copy() G_NOEXCEPT +{ + PyMem_Free(this->stack_copy); + this->stack_copy = nullptr; + this->_stack_saved = 0; +} + +inline void StackState::copy_heap_to_stack(const StackState& current) G_NOEXCEPT +{ + // cerr << "copy_heap_to_stack" << endl + // << "\tFrom : " << *this << endl + // << "\tCurrent:" << current + // << endl; + /* Restore the heap copy back into the C stack */ + if (this->_stack_saved != 0) { + memcpy(this->_stack_start, this->stack_copy, this->_stack_saved); + this->free_stack_copy(); + } + StackState* owner = const_cast(¤t); + if (!owner->_stack_start) { + owner = owner->stack_prev; /* greenlet is dying, skip it */ + } + while (owner && owner->stack_stop <= this->stack_stop) { + // cerr << "\tOwner: " << owner << endl; + owner = owner->stack_prev; /* find greenlet with more stack */ + } + this->stack_prev = owner; + // cerr << "\tFinished with: " << *this << endl; +} + +inline int StackState::copy_stack_to_heap_up_to(const char* const stop) G_NOEXCEPT +{ + /* Save more of g's stack into the heap -- at least up to 'stop' + g->stack_stop |________| + | | + | __ stop . . . . . + | | ==> . . + |________| _______ + | | | | + | | | | + g->stack_start | | |_______| g->stack_copy + */ + intptr_t sz1 = this->_stack_saved; + intptr_t sz2 = stop - this->_stack_start; + assert(this->_stack_start); + if (sz2 > sz1) { + char* c = (char*)PyMem_Realloc(this->stack_copy, sz2); + if (!c) { + PyErr_NoMemory(); + return -1; + } + memcpy(c + sz1, this->_stack_start + sz1, sz2 - sz1); + this->stack_copy = c; + this->_stack_saved = sz2; + } + return 0; +} + +inline int StackState::copy_stack_to_heap(char* const stackref, + const StackState& current) G_NOEXCEPT +{ + // cerr << "copy_stack_to_heap: " << endl + // << "\tstackref: " << (void*)stackref << endl + // << "\tthis: " << *this << endl + // << "\tcurrent: " << current + // << endl; + /* must free all the C stack up to target_stop */ + const char* const target_stop = this->stack_stop; + + StackState* owner = const_cast(¤t); + assert(owner->_stack_saved == 0); // everything is present on the stack + if (!owner->_stack_start) { + // cerr << "\tcurrent is dead; using: " << owner->stack_prev << endl; + owner = owner->stack_prev; /* not saved if dying */ + } + else { + owner->_stack_start = stackref; + } + + while (owner->stack_stop < target_stop) { + // cerr << "\tCopying from " << *owner << endl; + /* ts_current is entierely within the area to free */ + if (owner->copy_stack_to_heap_up_to(owner->stack_stop)) { + return -1; /* XXX */ + } + owner = owner->stack_prev; + } + if (owner != this) { + if (owner->copy_stack_to_heap_up_to(target_stop)) { + return -1; /* XXX */ + } + } + return 0; +} + +inline bool StackState::started() const G_NOEXCEPT +{ + return this->stack_stop != nullptr; +} + +inline bool StackState::main() const G_NOEXCEPT +{ + return this->stack_stop == (char*)-1; +} + +inline bool StackState::active() const G_NOEXCEPT +{ + return this->_stack_start != nullptr; +} + +inline void StackState::set_active() G_NOEXCEPT +{ + assert(this->_stack_start == nullptr); + this->_stack_start = (char*)1; +} + +inline void StackState::set_inactive() G_NOEXCEPT +{ + this->_stack_start = nullptr; + // XXX: What if we still have memory out there? + // That case is actually triggered by + // test_issue251_issue252_explicit_reference_not_collectable (greenlet.tests.test_leaks.TestLeaks) + // and + // test_issue251_issue252_need_to_collect_in_background + // (greenlet.tests.test_leaks.TestLeaks) + // + // Those objects never get deallocated, so the destructor never + // runs. + // It *seems* safe to clean up the memory here? + if (this->_stack_saved) { + this->free_stack_copy(); + } +} + +inline intptr_t StackState::stack_saved() const G_NOEXCEPT +{ + return this->_stack_saved; +} + +inline char* StackState::stack_start() const G_NOEXCEPT +{ + return this->_stack_start; +} + + +inline StackState StackState::make_main() G_NOEXCEPT +{ + StackState s; + s._stack_start = (char*)1; + s.stack_stop = (char*)-1; + return s; +} + +StackState::~StackState() +{ + if (this->_stack_saved != 0) { + this->free_stack_copy(); + } +} + +using greenlet::Greenlet; + +bool Greenlet::is_currently_running_in_some_thread() const +{ + return this->stack_state.active() && !this->python_state.top_frame(); +} + + + +#endif diff --git a/mysql-sqlalchemy-workspace/lib/python3.9/site-packages/greenlet/greenlet_internal.hpp b/mysql-sqlalchemy-workspace/lib/python3.9/site-packages/greenlet/greenlet_internal.hpp new file mode 100644 index 00000000..4f7fe6bb --- /dev/null +++ b/mysql-sqlalchemy-workspace/lib/python3.9/site-packages/greenlet/greenlet_internal.hpp @@ -0,0 +1,106 @@ +/* -*- indent-tabs-mode: nil; tab-width: 4; -*- */ +#ifndef GREENLET_INTERNAL_H +#define GREENLET_INTERNAL_H +#ifdef __clang__ +# pragma clang diagnostic push +# pragma clang diagnostic ignored "-Wunused-function" +# pragma clang diagnostic ignored "-Wmissing-field-initializers" +# pragma clang diagnostic ignored "-Wunused-variable" +#endif + +/** + * Implementation helpers. + * + * C++ templates and inline functions should go here. + */ +#define PY_SSIZE_T_CLEAN +#include "greenlet_compiler_compat.hpp" +#include "greenlet_cpython_compat.hpp" +#include "greenlet_exceptions.hpp" +#include "greenlet_greenlet.hpp" +#include "greenlet_allocator.hpp" + +#include +#include + +#define GREENLET_MODULE +struct _greenlet; +typedef struct _greenlet PyGreenlet; +namespace greenlet { + + class ThreadState; + +}; + + +#define implementation_ptr_t greenlet::Greenlet* + + +#include "greenlet.h" + +G_FP_TMPL_STATIC inline void +greenlet::refs::MainGreenletExactChecker(void *p) +{ + if (!p) { + return; + } + // We control the class of the main greenlet exactly. + if (Py_TYPE(p) != &PyGreenlet_Type) { + std::string err("MainGreenlet: Expected exactly a greenlet, not a "); + err += Py_TYPE(p)->tp_name; + throw greenlet::TypeError(err); + } + + // Greenlets from dead threads no longer respond to main() with a + // true value; so in that case we need to perform an additional + // check. + Greenlet* g = ((PyGreenlet*)p)->pimpl; + if (g->main()) { + return; + } + if (!dynamic_cast(g)) { + std::string err("MainGreenlet: Expected exactly a main greenlet, not a "); + err += Py_TYPE(p)->tp_name; + throw greenlet::TypeError(err); + } +} + + + +template +inline greenlet::Greenlet* greenlet::refs::_OwnedGreenlet::operator->() const G_NOEXCEPT +{ + return reinterpret_cast(this->p)->pimpl; +} + +template +inline greenlet::Greenlet* greenlet::refs::_BorrowedGreenlet::operator->() const G_NOEXCEPT +{ + return reinterpret_cast(this->p)->pimpl; +} + +#include +#include + + +extern PyTypeObject PyGreenlet_Type; + + + +/** + * Forward declarations needed in multiple files. + */ +static PyGreenlet* green_create_main(greenlet::ThreadState*); +static PyObject* green_switch(PyGreenlet* self, PyObject* args, PyObject* kwargs); +static int green_is_gc(BorrowedGreenlet self); + +#ifdef __clang__ +# pragma clang diagnostic pop +#endif + + +#endif + +// Local Variables: +// flycheck-clang-include-path: ("../../include" "/opt/local/Library/Frameworks/Python.framework/Versions/3.10/include/python3.10") +// End: diff --git a/mysql-sqlalchemy-workspace/lib/python3.9/site-packages/greenlet/greenlet_refs.hpp b/mysql-sqlalchemy-workspace/lib/python3.9/site-packages/greenlet/greenlet_refs.hpp new file mode 100644 index 00000000..ed1ef195 --- /dev/null +++ b/mysql-sqlalchemy-workspace/lib/python3.9/site-packages/greenlet/greenlet_refs.hpp @@ -0,0 +1,1062 @@ +#ifndef GREENLET_REFS_HPP +#define GREENLET_REFS_HPP + +#define PY_SSIZE_T_CLEAN +#include +//#include "greenlet_internal.hpp" +#include "greenlet_compiler_compat.hpp" +#include "greenlet_cpython_compat.hpp" +#include "greenlet_exceptions.hpp" + +struct _greenlet; +struct _PyMainGreenlet; + +typedef struct _greenlet PyGreenlet; +extern PyTypeObject PyGreenlet_Type; + + +#ifdef GREENLET_USE_STDIO +#include +using std::cerr; +using std::endl; +#endif + +namespace greenlet +{ + class Greenlet; + + namespace refs + { + // Type checkers throw a TypeError if the argument is not + // null, and isn't of the required Python type. + // (We can't use most of the defined type checkers + // like PyList_Check, etc, directly, because they are + // implemented as macros.) + typedef void (*TypeChecker)(void*); + + G_FP_TMPL_STATIC inline void + NoOpChecker(void*) + { + return; + } + + G_FP_TMPL_STATIC inline void + GreenletChecker(void *p) + { + if (!p) { + return; + } + + PyTypeObject* typ = Py_TYPE(p); + // fast, common path. (PyObject_TypeCheck is a macro or + // static inline function, and it also does a + // direct comparison of the type pointers, but its fast + // path only handles one type) + if (typ == &PyGreenlet_Type) { + return; + } + + if (!PyObject_TypeCheck(p, &PyGreenlet_Type)) { + std::string err("GreenletChecker: Expected any type of greenlet, not "); + err += Py_TYPE(p)->tp_name; + throw TypeError(err); + } + } + + G_FP_TMPL_STATIC inline void + MainGreenletExactChecker(void *p); + + template + class PyObjectPointer; + + template + class OwnedReference; + + + template + class BorrowedReference; + + typedef BorrowedReference BorrowedObject; + typedef OwnedReference OwnedObject; + + class ImmortalObject; + + template + class _OwnedGreenlet; + + typedef _OwnedGreenlet OwnedGreenlet; + typedef _OwnedGreenlet OwnedMainGreenlet; + + template + class _BorrowedGreenlet; + + typedef _BorrowedGreenlet BorrowedGreenlet; + + G_FP_TMPL_STATIC inline void + ContextExactChecker(void *p) + { + if (!p) { + return; + } +#if GREENLET_PY37 + if (!PyContext_CheckExact(p)) { + throw TypeError( + "greenlet context must be a contextvars.Context or None" + ); + } +#endif + } + + typedef OwnedReference OwnedContext; + } +} + +namespace greenlet { + + + namespace refs { + // A set of classes to make reference counting rules in python + // code explicit. + // + // Rules of use: + // (1) Functions returning a new reference that the caller of the + // function is expected to dispose of should return a + // ``OwnedObject`` object. This object automatically releases its + // reference when it goes out of scope. It works like a ``std::shared_ptr`` + // and can be copied or used as a function parameter (but don't do + // that). Note that constructing a ``OwnedObject`` from a + // PyObject* steals the reference. + // (2) Parameters to functions should be either a + // ``OwnedObject&``, or, more generally, a ``PyObjectPointer&``. + // If the function needs to create its own new reference, it can + // do so by copying to a local ``OwnedObject``. + // (3) Functions returning an existing pointer that is NOT + // incref'd, and which the caller MUST NOT decref, + // should return a ``BorrowedObject``. + + // + // For a class with a single pointer member, whose constructor + // does nothing but copy a pointer parameter into the member, and + // which can then be converted back to the pointer type, compilers + // generate code that's the same as just passing the pointer. + // That is, func(BorrowedObject x) called like ``PyObject* p = + // ...; f(p)`` has 0 overhead. Similarly, they "unpack" to the + // pointer type with 0 overhead. + // + // If there are no virtual functions, no complex inheritance (maybe?) and + // no destructor, these can be directly used as parameters in + // Python callbacks like tp_init: the layout is the same as a + // single pointer. Only subclasses with trivial constructors that + // do nothing but set the single pointer member are safe to use + // that way. + + + // This is the base class for things that can be done with a + // PyObject pointer. It assumes nothing about memory management. + // NOTE: Nothing is virtual, so subclasses shouldn't add new + // storage fields or try to override these methods. + template + class PyObjectPointer + { + public: + typedef T PyType; + protected: + T* p; + public: + explicit PyObjectPointer(T* it=nullptr) : p(it) + { + TC(p); + } + + // We don't allow automatic casting to PyObject* at this + // level, because then we could be passed to Py_DECREF/INCREF, + // but we want nothing to do with memory management. If you + // know better, then you can use the get() method, like on a + // std::shared_ptr. Except we name it borrow() to clarify that + // if this is a reference-tracked object, the pointer you get + // back will go away when the object does. + // TODO: This should probably not exist here, but be moved + // down to relevant sub-types. + + inline T* borrow() const G_NOEXCEPT + { + return this->p; + } + + PyObject* borrow_o() const G_NOEXCEPT + { + return reinterpret_cast(this->p); + } + + inline T* operator->() const G_NOEXCEPT + { + return this->p; + } + + bool is_None() const G_NOEXCEPT + { + return this->p == Py_None; + } + + inline PyObject* acquire_or_None() const G_NOEXCEPT + { + PyObject* result = this->p ? reinterpret_cast(this->p) : Py_None; + Py_INCREF(result); + return result; + } + + G_EXPLICIT_OP operator bool() const G_NOEXCEPT + { + return p != nullptr; + } + + inline Py_ssize_t REFCNT() const G_NOEXCEPT + { + return p ? Py_REFCNT(p) : -42; + } + + inline PyTypeObject* TYPE() const G_NOEXCEPT + { + return p ? Py_TYPE(p) : nullptr; + } + + inline OwnedObject PyStr() const G_NOEXCEPT; + inline const std::string as_str() const G_NOEXCEPT; + inline OwnedObject PyGetAttr(const ImmortalObject& name) const G_NOEXCEPT; + inline OwnedObject PyRequireAttr(const char* const name) const; + inline OwnedObject PyRequireAttr(const ImmortalObject& name) const; + inline OwnedObject PyCall(const BorrowedObject& arg) const; + inline OwnedObject PyCall(PyGreenlet* arg) const ; + inline OwnedObject PyCall(PyObject* arg) const ; + // PyObject_Call(this, args, kwargs); + inline OwnedObject PyCall(const BorrowedObject args, + const BorrowedObject kwargs) const; + inline OwnedObject PyCall(const OwnedObject& args, + const OwnedObject& kwargs) const; + + protected: + void _set_raw_pointer(void* t) + { + TC(t); + p = reinterpret_cast(t); + } + void* _get_raw_pointer() const + { + return p; + } + }; + +#ifdef GREENLET_USE_STDIO + template + std::ostream& operator<<(std::ostream& os, const PyObjectPointer& s) + { + const std::type_info& t = typeid(s); + os << t.name() + << "(addr=" << s.borrow() + << ", refcnt=" << s.REFCNT() + << ", value=" << s.as_str() + << ")"; + + return os; + } +#endif + + template + inline bool operator==(const PyObjectPointer& lhs, const void* const rhs) G_NOEXCEPT + { + return lhs.borrow_o() == rhs; + } + + template + inline bool operator==(const PyObjectPointer& lhs, const PyObjectPointer& rhs) G_NOEXCEPT + { + return lhs.borrow_o() == rhs.borrow_o(); + } + + template + inline bool operator!=(const PyObjectPointer& lhs, + const PyObjectPointer& rhs) G_NOEXCEPT + { + return lhs.borrow_o() != rhs.borrow_o(); + } + + template + class OwnedReference : public PyObjectPointer + { + private: + friend class OwnedList; + + protected: + explicit OwnedReference(T* it) : PyObjectPointer(it) + { + } + + public: + + // Constructors + + static OwnedReference consuming(PyObject* p) + { + return OwnedReference(reinterpret_cast(p)); + } + + static OwnedReference owning(T* p) + { + OwnedReference result(p); + Py_XINCREF(result.p); + return result; + } + + OwnedReference() : PyObjectPointer(nullptr) + {} + + explicit OwnedReference(const PyObjectPointer<>& other) + : PyObjectPointer(nullptr) + { + T* op = other.borrow(); + TC(op); + this->p = other.borrow(); + Py_XINCREF(this->p); + } + + // It would be good to make use of the C++11 distinction + // between move and copy operations, e.g., constructing from a + // pointer should be a move operation. + // In the common case of ``OwnedObject x = Py_SomeFunction()``, + // the call to the copy constructor will be elided completely. + OwnedReference(const OwnedReference& other) + : PyObjectPointer(other.p) + { + Py_XINCREF(this->p); + } + + static OwnedReference None() + { + Py_INCREF(Py_None); + return OwnedReference(Py_None); + } + + // We can assign from exactly our type without any extra checking + OwnedReference& operator=(const OwnedReference& other) + { + Py_XINCREF(other.p); + const T* tmp = this->p; + this->p = other.p; + Py_XDECREF(tmp); + return *this; + } + + OwnedReference& operator=(const BorrowedReference other) + { + return this->operator=(other.borrow()); + } + + OwnedReference& operator=(T* const other) + { + TC(other); + Py_XINCREF(other); + T* tmp = this->p; + this->p = other; + Py_XDECREF(tmp); + return *this; + } + + // We can assign from an arbitrary reference type + // if it passes our check. + template + OwnedReference& operator=(const OwnedReference& other) + { + X* op = other.borrow(); + TC(op); + return this->operator=(reinterpret_cast(op)); + } + + inline void steal(T* other) + { + assert(this->p == nullptr); + TC(other); + this->p = other; + } + + T* relinquish_ownership() + { + T* result = this->p; + this->p = nullptr; + return result; + } + + T* acquire() const + { + // Return a new reference. + // TODO: This may go away when we have reference objects + // throughout the code. + Py_XINCREF(this->p); + return this->p; + } + + // Nothing else declares a destructor, we're the leaf, so we + // should be able to get away without virtual. + ~OwnedReference() + { + Py_CLEAR(this->p); + } + + void CLEAR() + { + Py_CLEAR(this->p); + assert(this->p == nullptr); + } + }; + + static inline + void operator<<=(PyObject*& target, OwnedObject& o) + { + target = o.relinquish_ownership(); + } + + class NewReference : public OwnedObject + { + private: + G_NO_COPIES_OF_CLS(NewReference); + public: + // Consumes the reference. Only use this + // for API return values. + NewReference(PyObject* it) : OwnedObject(it) + { + } + }; + + class NewDictReference : public NewReference + { + private: + G_NO_COPIES_OF_CLS(NewDictReference); + public: + NewDictReference() : NewReference(PyDict_New()) + { + if (!this->p) { + throw PyErrOccurred(); + } + } + + void SetItem(const char* const key, PyObject* value) + { + Require(PyDict_SetItemString(this->p, key, value)); + } + + void SetItem(const PyObjectPointer<>& key, PyObject* value) + { + Require(PyDict_SetItem(this->p, key.borrow_o(), value)); + } + }; + + template + class _OwnedGreenlet: public OwnedReference + { + private: + protected: + _OwnedGreenlet(T* it) : OwnedReference(it) + {} + + public: + _OwnedGreenlet() : OwnedReference() + {} + + _OwnedGreenlet(const _OwnedGreenlet& other) : OwnedReference(other) + { + } + _OwnedGreenlet(OwnedMainGreenlet& other) : + OwnedReference(reinterpret_cast(other.acquire())) + { + } + _OwnedGreenlet(const BorrowedGreenlet& other); + // Steals a reference. + static _OwnedGreenlet consuming(PyGreenlet* it) + { + return _OwnedGreenlet(reinterpret_cast(it)); + } + + inline _OwnedGreenlet& operator=(const OwnedGreenlet& other) + { + return this->operator=(other.borrow()); + } + + inline _OwnedGreenlet& operator=(const BorrowedGreenlet& other); + + _OwnedGreenlet& operator=(const OwnedMainGreenlet& other) + { + PyGreenlet* owned = other.acquire(); + Py_XDECREF(this->p); + this->p = reinterpret_cast(owned); + return *this; + } + + _OwnedGreenlet& operator=(T* const other) + { + OwnedReference::operator=(other); + return *this; + } + + T* relinquish_ownership() + { + T* result = this->p; + this->p = nullptr; + return result; + } + + PyObject* relinquish_ownership_o() + { + return reinterpret_cast(relinquish_ownership()); + } + + inline Greenlet* operator->() const G_NOEXCEPT; + inline operator Greenlet*() const G_NOEXCEPT; + }; + + template + class BorrowedReference : public PyObjectPointer + { + public: + // Allow implicit creation from PyObject* pointers as we + // transition to using these classes. Also allow automatic + // conversion to PyObject* for passing to C API calls and even + // for Py_INCREF/DECREF, because we ourselves do no memory management. + BorrowedReference(T* it) : PyObjectPointer(it) + {} + + BorrowedReference(const PyObjectPointer& ref) : PyObjectPointer(ref.borrow()) + {} + + BorrowedReference() : PyObjectPointer(nullptr) + {} + + operator T*() const + { + return this->p; + } + }; + + typedef BorrowedReference BorrowedObject; + //typedef BorrowedReference BorrowedGreenlet; + + template + class _BorrowedGreenlet : public BorrowedReference + { + public: + _BorrowedGreenlet() : + BorrowedReference(nullptr) + {} + + _BorrowedGreenlet(T* it) : + BorrowedReference(it) + {} + + _BorrowedGreenlet(const BorrowedObject& it); + + _BorrowedGreenlet(const OwnedGreenlet& it) : + BorrowedReference(it.borrow()) + {} + + _BorrowedGreenlet& operator=(const BorrowedObject& other); + + // We get one of these for PyGreenlet, but one for PyObject + // is handy as well + operator PyObject*() const + { + return reinterpret_cast(this->p); + } + inline Greenlet* operator->() const G_NOEXCEPT; + inline operator Greenlet*() const G_NOEXCEPT; + }; + + typedef _BorrowedGreenlet BorrowedGreenlet; + + template + _OwnedGreenlet::_OwnedGreenlet(const BorrowedGreenlet& other) + : OwnedReference(reinterpret_cast(other.borrow())) + { + Py_XINCREF(this->p); + } + + + class BorrowedMainGreenlet + : public _BorrowedGreenlet + { + public: + BorrowedMainGreenlet(const OwnedMainGreenlet& it) : + _BorrowedGreenlet(it.borrow()) + {} + BorrowedMainGreenlet(PyGreenlet* it=nullptr) + : _BorrowedGreenlet(it) + {} + }; + + template + _OwnedGreenlet& _OwnedGreenlet::operator=(const BorrowedGreenlet& other) + { + return this->operator=(other.borrow()); + } + + + class ImmortalObject : public PyObjectPointer<> + { + private: + G_NO_ASSIGNMENT_OF_CLS(ImmortalObject); + public: + explicit ImmortalObject(PyObject* it) : PyObjectPointer<>(it) + { + } + + /** + * Become the new owner of the object. Does not change the + * reference count. + */ + ImmortalObject& operator=(PyObject* it) + { + assert(this->p == nullptr); + this->p = it; + return *this; + } + + static ImmortalObject consuming(PyObject* it) + { + return ImmortalObject(it); + } + + inline operator PyObject*() const + { + return this->p; + } + }; + + class ImmortalString : public ImmortalObject + { + private: + G_NO_COPIES_OF_CLS(ImmortalString); + const char* str; + public: + ImmortalString(const char* const str) : + ImmortalObject(str ? Require(Greenlet_Intern(str)) : nullptr) + { + this->str = str; + } + + inline ImmortalString& operator=(const char* const str) + { + if (!this->p) { + this->p = Require(Greenlet_Intern(str)); + this->str = str; + } + else { + assert(this->str == str); + } + return *this; + } + + }; + + template + inline OwnedObject PyObjectPointer::PyStr() const G_NOEXCEPT + { + if (!this->p) { + return OwnedObject(); + } + return OwnedObject::consuming(PyObject_Str(reinterpret_cast(this->p))); + } + + template + inline const std::string PyObjectPointer::as_str() const G_NOEXCEPT + { + // NOTE: This is not Python exception safe. + if (this->p) { + // The Python APIs return a cached char* value that's only valid + // as long as the original object stays around, and we're + // about to (probably) toss it. Hence the copy to std::string. + OwnedObject py_str = this->PyStr(); + if (!py_str) { + return "(nil)"; + } +#if PY_MAJOR_VERSION >= 3 + return PyUnicode_AsUTF8(py_str.borrow()); +#else + return PyString_AsString(py_str.borrow()); +#endif + } + return "(nil)"; + } + + template + inline OwnedObject PyObjectPointer::PyGetAttr(const ImmortalObject& name) const G_NOEXCEPT + { + assert(this->p); + return OwnedObject::consuming(PyObject_GetAttr(reinterpret_cast(this->p), name)); + } + + template + inline OwnedObject PyObjectPointer::PyRequireAttr(const char* const name) const + { + assert(this->p); + return OwnedObject::consuming(Require(PyObject_GetAttrString(this->p, name))); + } + + template + inline OwnedObject PyObjectPointer::PyRequireAttr(const ImmortalObject& name) const + { + assert(this->p); + return OwnedObject::consuming(Require( + PyObject_GetAttr(reinterpret_cast(this->p), + name))); + } + + template + inline OwnedObject PyObjectPointer::PyCall(const BorrowedObject& arg) const + { + return this->PyCall(arg.borrow()); + } + + template + inline OwnedObject PyObjectPointer::PyCall(PyGreenlet* arg) const + { + return this->PyCall(reinterpret_cast(arg)); + } + + template + inline OwnedObject PyObjectPointer::PyCall(PyObject* arg) const + { + assert(this->p); + return OwnedObject::consuming(PyObject_CallFunctionObjArgs(this->p, arg, NULL)); + } + + template + inline OwnedObject PyObjectPointer::PyCall(const BorrowedObject args, + const BorrowedObject kwargs) const + { + assert(this->p); + return OwnedObject::consuming(PyObject_Call(this->p, args, kwargs)); + } + + template + inline OwnedObject PyObjectPointer::PyCall(const OwnedObject& args, + const OwnedObject& kwargs) const + { + assert(this->p); + return OwnedObject::consuming(PyObject_Call(this->p, args.borrow(), kwargs.borrow())); + } + + G_FP_TMPL_STATIC inline void + ListChecker(void * p) + { + if (!p) { + return; + } + if (!PyList_Check(p)) { + throw TypeError("Expected a list"); + } + } + + class OwnedList : public OwnedReference + { + private: + G_NO_ASSIGNMENT_OF_CLS(OwnedList); + public: + // TODO: Would like to use move. + explicit OwnedList(const OwnedObject& other) + : OwnedReference(other) + { + } + + OwnedList& operator=(const OwnedObject& other) + { + if (other && PyList_Check(other.p)) { + // Valid list. Own a new reference to it, discard the + // reference to what we did own. + PyObject* new_ptr = other.p; + Py_INCREF(new_ptr); + Py_XDECREF(this->p); + this->p = new_ptr; + } + else { + // Either the other object was NULL (an error) or it + // wasn't a list. Either way, we're now invalidated. + Py_XDECREF(this->p); + this->p = nullptr; + } + return *this; + } + + inline bool empty() const + { + return PyList_GET_SIZE(p) == 0; + } + + inline Py_ssize_t size() const + { + return PyList_GET_SIZE(p); + } + + inline BorrowedObject at(const Py_ssize_t index) const + { + return PyList_GET_ITEM(p, index); + } + + inline void clear() + { + PyList_SetSlice(p, 0, PyList_GET_SIZE(p), NULL); + } + }; + + // Use this to represent the module object used at module init + // time. + // This could either be a borrowed (Py2) or new (Py3) reference; + // either way, we don't want to do any memory management + // on it here, Python itself will handle that. + // XXX: Actually, that's not quite right. On Python 3, if an + // exception occurs before we return to the interpreter, this will + // leak; but all previous versions also had that problem. + class CreatedModule : public PyObjectPointer<> + { + private: + G_NO_COPIES_OF_CLS(CreatedModule); + public: + CreatedModule(PyModuleDef& mod_def) : PyObjectPointer<>( + Require(PyModule_Create(&mod_def))) + { + } + + // PyAddObject(): Add a reference to the object to the module. + // On return, the reference count of the object is unchanged. + // + // The docs warn that PyModule_AddObject only steals the + // reference on success, so if it fails after we've incref'd + // or allocated, we're responsible for the decref. + void PyAddObject(const char* name, const long new_bool) + { + OwnedObject p = OwnedObject::consuming(Require(PyBool_FromLong(new_bool))); + this->PyAddObject(name, p); + } + + void PyAddObject(const char* name, const OwnedObject& new_object) + { + // The caller already owns a reference they will decref + // when their variable goes out of scope, we still need to + // incref/decref. + this->PyAddObject(name, new_object.borrow()); + } + + void PyAddObject(const char* name, const ImmortalObject& new_object) + { + this->PyAddObject(name, new_object.borrow()); + } + + void PyAddObject(const char* name, PyTypeObject& type) + { + this->PyAddObject(name, reinterpret_cast(&type)); + } + + void PyAddObject(const char* name, PyObject* new_object) + { + Py_INCREF(new_object); + try { + Require(PyModule_AddObject(this->p, name, new_object)); + } + catch (const PyErrOccurred&) { + Py_DECREF(p); + throw; + } + } + }; + + class PyErrFetchParam : public PyObjectPointer<> + { + // Not an owned object, because we can't be initialized with + // one, and we only sometimes acquire ownership. + private: + G_NO_COPIES_OF_CLS(PyErrFetchParam); + public: + // To allow declaring these and passing them to + // PyErr_Fetch we implement the empty constructor, + // and the address operator. + PyErrFetchParam() : PyObjectPointer<>(nullptr) + { + } + + PyObject** operator&() + { + return &this->p; + } + + // This allows us to pass one directly without the &, + // BUT it has higher precedence than the bool operator + // if it's not explicit. + operator PyObject**() + { + return &this->p; + } + + // We don't want to be able to pass these to Py_DECREF and + // such so we don't have the implicit PyObject* conversion. + + inline PyObject* relinquish_ownership() + { + PyObject* result = this->p; + this->p = nullptr; + return result; + } + + ~PyErrFetchParam() + { + Py_XDECREF(p); + } + }; + + class OwnedErrPiece : public OwnedObject + { + private: + + public: + // Unlike OwnedObject, this increments the refcount. + OwnedErrPiece(PyObject* p=nullptr) : OwnedObject(p) + { + this->acquire(); + } + + PyObject** operator&() + { + return &this->p; + } + + inline operator PyObject*() const + { + return this->p; + } + + operator PyTypeObject*() const + { + return reinterpret_cast(this->p); + } + }; + + class PyErrPieces + { + private: + OwnedErrPiece type; + OwnedErrPiece instance; + OwnedErrPiece traceback; + bool restored; + public: + // Takes new references; if we're destroyed before + // restoring the error, we drop the references. + PyErrPieces(PyObject* t, PyObject* v, PyObject* tb) : + type(t), + instance(v), + traceback(tb), + restored(0) + { + this->normalize(); + } + + PyErrPieces() : + restored(0) + { + // PyErr_Fetch transfers ownership to us, so + // we don't actually need to INCREF; but we *do* + // need to DECREF if we're not restored. + PyErrFetchParam t, v, tb; + PyErr_Fetch(&t, &v, &tb); + type.steal(t.relinquish_ownership()); + instance.steal(v.relinquish_ownership()); + traceback.steal(tb.relinquish_ownership()); + } + + void PyErrRestore() + { + // can only do this once + assert(!this->restored); + this->restored = true; + PyErr_Restore( + this->type.relinquish_ownership(), + this->instance.relinquish_ownership(), + this->traceback.relinquish_ownership()); + assert(!this->type && !this->instance && !this->traceback); + } + + private: + void normalize() + { + // First, check the traceback argument, replacing None, + // with NULL + if (traceback.is_None()) { + traceback = nullptr; + } + + if (traceback && !PyTraceBack_Check(traceback.borrow())) { + throw PyErrOccurred(PyExc_TypeError, + "throw() third argument must be a traceback object"); + } + + if (PyExceptionClass_Check(type)) { + // If we just had a type, we'll now have a type and + // instance. + // The type's refcount will have gone up by one + // because of the instance and the instance will have + // a refcount of one. Either way, we owned, and still + // do own, exactly one reference. + PyErr_NormalizeException(&type, &instance, &traceback); + + } + else if (PyExceptionInstance_Check(type)) { + /* Raising an instance --- usually that means an + object that is a subclass of BaseException, but on + Python 2, that can also mean an arbitrary old-style + object. The value should be a dummy. */ + if (instance && !instance.is_None()) { + throw PyErrOccurred( + PyExc_TypeError, + "instance exception may not have a separate value"); + } + /* Normalize to raise , */ + this->instance = this->type; + this->type = PyExceptionInstance_Class(instance.borrow()); + + /* + It would be tempting to do this: + + Py_ssize_t type_count = Py_REFCNT(Py_TYPE(instance.borrow())); + this->type = PyExceptionInstance_Class(instance.borrow()); + assert(this->type.REFCNT() == type_count + 1); + + But that doesn't work on Python 2 in the case of + old-style instances: The result of Py_TYPE is going to + be the global shared that all + old-style classes have, while the return of Instance_Class() + will be the Python-level class object. The two are unrelated. + */ + } + else { + /* Not something you can raise. throw() fails. */ + PyErr_Format(PyExc_TypeError, + "exceptions must be classes, or instances, not %s", + Py_TYPE(type.borrow())->tp_name); + throw PyErrOccurred(); + } + } + }; + + // PyArg_Parse's O argument returns a borrowed reference. + class PyArgParseParam : public BorrowedObject + { + private: + G_NO_COPIES_OF_CLS(PyArgParseParam); + public: + explicit PyArgParseParam(PyObject* p=nullptr) : BorrowedObject(p) + { + } + + inline PyObject** operator&() + { + return &this->p; + } + }; + +};}; + +#endif diff --git a/mysql-sqlalchemy-workspace/lib/python3.9/site-packages/greenlet/greenlet_slp_switch.hpp b/mysql-sqlalchemy-workspace/lib/python3.9/site-packages/greenlet/greenlet_slp_switch.hpp new file mode 100644 index 00000000..25ac5ab3 --- /dev/null +++ b/mysql-sqlalchemy-workspace/lib/python3.9/site-packages/greenlet/greenlet_slp_switch.hpp @@ -0,0 +1,117 @@ +#ifndef GREENLET_SLP_SWITCH_HPP +#define GREENLET_SLP_SWITCH_HPP + +#include "greenlet_compiler_compat.hpp" +#include "greenlet_refs.hpp" + +/* + * the following macros are spliced into the OS/compiler + * specific code, in order to simplify maintenance. + */ +// We can save about 10% of the time it takes to switch greenlets if +// we thread the thread state through the slp_save_state() and the +// following slp_restore_state() calls from +// slp_switch()->g_switchstack() (which already needs to access it). +// +// However: +// +// that requires changing the prototypes and implementations of the +// switching functions. If we just change the prototype of +// slp_switch() to accept the argument and update the macros, without +// changing the implementation of slp_switch(), we get crashes on +// 64-bit Linux and 32-bit x86 (for reasons that aren't 100% clear); +// on the other hand, 64-bit macOS seems to be fine. Also, 64-bit +// windows is an issue because slp_switch is written fully in assembly +// and currently ignores its argument so some code would have to be +// adjusted there to pass the argument on to the +// ``slp_save_state_asm()`` function (but interestingly, because of +// the calling convention, the extra argument is just ignored and +// things function fine, albeit slower, if we just modify +// ``slp_save_state_asm`()` to fetch the pointer to pass to the +// macro.) +// +// Our compromise is to use a *glabal*, untracked, weak, pointer +// to the necessary thread state during the process of switching only. +// This is safe because we're protected by the GIL, and if we're +// running this code, the thread isn't exiting. This also nets us a +// 10-12% speed improvement. + +static greenlet::Greenlet* volatile switching_thread_state = nullptr; + + +#ifdef GREENLET_NOINLINE_SUPPORTED +extern "C" { +static int GREENLET_NOINLINE(slp_save_state_trampoline)(char* stackref); +static void GREENLET_NOINLINE(slp_restore_state_trampoline)(); +} +#define GREENLET_NOINLINE_INIT() \ + do { \ + } while (0) +#else +/* force compiler to call functions via pointers */ +/* XXX: Do we even want/need to support such compilers? This code path + is untested on CI. */ +extern "C" { +static int (slp_save_state_trampoline)(char* stackref); +static void (slp_restore_state_trampoline)(); +} +#define GREENLET_NOINLINE(name) cannot_inline_##name +#define GREENLET_NOINLINE_INIT() \ + do { \ + slp_save_state_trampoline = GREENLET_NOINLINE(slp_save_state_trampoline); \ + slp_restore_state_trampoline = GREENLET_NOINLINE(slp_restore_state_trampoline); \ + } while (0) +#endif + +#define SLP_SAVE_STATE(stackref, stsizediff) \ +do { \ + assert(switching_thread_state); \ + stackref += STACK_MAGIC; \ + if (slp_save_state_trampoline((char*)stackref)) \ + return -1; \ + if (!switching_thread_state->active()) \ + return 1; \ + stsizediff = switching_thread_state->stack_start() - (char*)stackref; \ +} while (0) + +#define SLP_RESTORE_STATE() slp_restore_state_trampoline() + +#define SLP_EVAL +extern "C" { +#define slp_switch GREENLET_NOINLINE(slp_switch) +#include "slp_platformselect.h" +} +#undef slp_switch + +#ifndef STACK_MAGIC +# error \ + "greenlet needs to be ported to this platform, or taught how to detect your compiler properly." +#endif /* !STACK_MAGIC */ + + + +#ifdef EXTERNAL_ASM +/* CCP addition: Make these functions, to be called from assembler. + * The token include file for the given platform should enable the + * EXTERNAL_ASM define so that this is included. + */ +extern "C" { +intptr_t +slp_save_state_asm(intptr_t* ref) +{ + intptr_t diff; + SLP_SAVE_STATE(ref, diff); + return diff; +} + +void +slp_restore_state_asm(void) +{ + SLP_RESTORE_STATE(); +} + +extern int slp_switch(void); +}; +#endif + +#endif diff --git a/mysql-sqlalchemy-workspace/lib/python3.9/site-packages/greenlet/greenlet_thread_state.hpp b/mysql-sqlalchemy-workspace/lib/python3.9/site-packages/greenlet/greenlet_thread_state.hpp new file mode 100644 index 00000000..b740874e --- /dev/null +++ b/mysql-sqlalchemy-workspace/lib/python3.9/site-packages/greenlet/greenlet_thread_state.hpp @@ -0,0 +1,561 @@ +#ifndef GREENLET_THREAD_STATE_HPP +#define GREENLET_THREAD_STATE_HPP + +#include +#include + +#include "greenlet_internal.hpp" +#include "greenlet_refs.hpp" +#include "greenlet_thread_support.hpp" + +using greenlet::refs::BorrowedObject; +using greenlet::refs::BorrowedGreenlet; +using greenlet::refs::BorrowedMainGreenlet; +using greenlet::refs::OwnedMainGreenlet; +using greenlet::refs::OwnedObject; +using greenlet::refs::OwnedGreenlet; +using greenlet::refs::OwnedList; +using greenlet::refs::PyErrFetchParam; +using greenlet::refs::PyArgParseParam; +using greenlet::refs::ImmortalString; +using greenlet::refs::CreatedModule; +using greenlet::refs::PyErrPieces; +using greenlet::refs::NewReference; + +namespace greenlet { +/** + * Thread-local state of greenlets. + * + * Each native thread will get exactly one of these objects, + * automatically accessed through the best available thread-local + * mechanism the compiler supports (``thread_local`` for C++11 + * compilers or ``__thread``/``declspec(thread)`` for older GCC/clang + * or MSVC, respectively.) + * + * Previously, we kept thread-local state mostly in a bunch of + * ``static volatile`` variables in the main greenlet file.. This had + * the problem of requiring extra checks, loops, and great care + * accessing these variables if we potentially invoked any Python code + * that could release the GIL, because the state could change out from + * under us. Making the variables thread-local solves this problem. + * + * When we detected that a greenlet API accessing the current greenlet + * was invoked from a different thread than the greenlet belonged to, + * we stored a reference to the greenlet in the Python thread + * dictionary for the thread the greenlet belonged to. This could lead + * to memory leaks if the thread then exited (because of a reference + * cycle, as greenlets referred to the thread dictionary, and deleting + * non-current greenlets leaked their frame plus perhaps arguments on + * the C stack). If a thread exited while still having running + * greenlet objects (perhaps that had just switched back to the main + * greenlet), and did not invoke one of the greenlet APIs *in that + * thread, immediately before it exited, without some other thread + * then being invoked*, such a leak was guaranteed. + * + * This can be partly solved by using compiler thread-local variables + * instead of the Python thread dictionary, thus avoiding a cycle. + * + * To fully solve this problem, we need a reliable way to know that a + * thread is done and we should clean up the main greenlet. On POSIX, + * we can use the destructor function of ``pthread_key_create``, but + * there's nothing similar on Windows; a C++11 thread local object + * reliably invokes its destructor when the thread it belongs to exits + * (non-C++11 compilers offer ``__thread`` or ``declspec(thread)`` to + * create thread-local variables, but they can't hold C++ objects that + * invoke destructors; the C++11 version is the most portable solution + * I found). When the thread exits, we can drop references and + * otherwise manipulate greenlets and frames that we know can no + * longer be switched to. For compilers that don't support C++11 + * thread locals, we have a solution that uses the python thread + * dictionary, though it may not collect everything as promptly as + * other compilers do, if some other library is using the thread + * dictionary and has a cycle or extra reference. + * + * There are two small wrinkles. The first is that when the thread + * exits, it is too late to actually invoke Python APIs: the Python + * thread state is gone, and the GIL is released. To solve *this* + * problem, our destructor uses ``Py_AddPendingCall`` to transfer the + * destruction work to the main thread. (This is not an issue for the + * dictionary solution.) + * + * The second is that once the thread exits, the thread local object + * is invalid and we can't even access a pointer to it, so we can't + * pass it to ``Py_AddPendingCall``. This is handled by actually using + * a second object that's thread local (ThreadStateCreator) and having + * it dynamically allocate this object so it can live until the + * pending call runs. + */ + + + +class ThreadState { +private: + // As of commit 08ad1dd7012b101db953f492e0021fb08634afad + // this class needed 56 bytes in o Py_DEBUG build + // on 64-bit macOS 11. + // Adding the vector takes us up to 80 bytes () + + /* Strong reference to the main greenlet */ + OwnedMainGreenlet main_greenlet; + + /* Strong reference to the current greenlet. */ + OwnedGreenlet current_greenlet; + + /* Strong reference to the trace function, if any. */ + OwnedObject tracefunc; + + typedef std::vector > deleteme_t; + /* A vector of raw PyGreenlet pointers representing things that need + deleted when this thread is running. The vector owns the + references, but you need to manually INCREF/DECREF as you use + them. We don't use a vector because we + make copy of this vector, and that would become O(n) as all the + refcounts are incremented in the copy. + */ + deleteme_t deleteme; + +#ifdef GREENLET_NEEDS_EXCEPTION_STATE_SAVED + void* exception_state; +#endif + + static std::clock_t _clocks_used_doing_gc; + static ImmortalString get_referrers_name; + static PythonAllocator allocator; + + G_NO_COPIES_OF_CLS(ThreadState); + +public: + static void* operator new(size_t UNUSED(count)) + { + return ThreadState::allocator.allocate(1); + } + + static void operator delete(void* ptr) + { + return ThreadState::allocator.deallocate(static_cast(ptr), + 1); + } + + static void init() + { + ThreadState::get_referrers_name = "get_referrers"; + ThreadState::_clocks_used_doing_gc = 0; + } + + ThreadState() + : main_greenlet(OwnedMainGreenlet::consuming(green_create_main(this))), + current_greenlet(main_greenlet) + { + if (!this->main_greenlet) { + // We failed to create the main greenlet. That's bad. + throw PyFatalError("Failed to create main greenlet"); + } + // The main greenlet starts with 1 refs: The returned one. We + // then copied it to the current greenlet. + assert(this->main_greenlet.REFCNT() == 2); + +#ifdef GREENLET_NEEDS_EXCEPTION_STATE_SAVED + this->exception_state = slp_get_exception_state(); +#endif + } + + inline void restore_exception_state() + { +#ifdef GREENLET_NEEDS_EXCEPTION_STATE_SAVED + // It's probably important this be inlined and only call C + // functions to avoid adding an SEH frame. + slp_set_exception_state(this->exception_state); +#endif + } + + inline bool has_main_greenlet() + { + return !!this->main_greenlet; + } + + // Called from the ThreadStateCreator when we're in non-standard + // threading mode. In that case, there is an object in the Python + // thread state dictionary that points to us. The main greenlet + // also traverses into us, in which case it's crucial not to + // traverse back into the main greenlet. + int tp_traverse(visitproc visit, void* arg, bool traverse_main=true) + { + if (traverse_main) { + Py_VISIT(main_greenlet.borrow_o()); + } + if (traverse_main || current_greenlet != main_greenlet) { + Py_VISIT(current_greenlet.borrow_o()); + } + Py_VISIT(tracefunc.borrow()); + return 0; + } + + inline BorrowedMainGreenlet borrow_main_greenlet() const + { + assert(this->main_greenlet); + assert(this->main_greenlet.REFCNT() >= 2); + return this->main_greenlet; + }; + + inline OwnedMainGreenlet get_main_greenlet() + { + return this->main_greenlet; + } + + /** + * In addition to returning a new reference to the currunt + * greenlet, this performs any maintenance needed. + */ + inline OwnedGreenlet get_current() + { + /* green_dealloc() cannot delete greenlets from other threads, so + it stores them in the thread dict; delete them now. */ + this->clear_deleteme_list(); + //assert(this->current_greenlet->main_greenlet == this->main_greenlet); + //assert(this->main_greenlet->main_greenlet == this->main_greenlet); + return this->current_greenlet; + } + + /** + * As for non-const get_current(); + */ + inline BorrowedGreenlet borrow_current() + { + this->clear_deleteme_list(); + return this->current_greenlet; + } + + /** + * Does no maintenance. + */ + inline OwnedGreenlet get_current() const + { + return this->current_greenlet; + } + + template + inline bool is_current(const refs::PyObjectPointer& obj) const + { + return this->current_greenlet.borrow_o() == obj.borrow_o(); + } + + inline void set_current(const OwnedGreenlet& target) + { + this->current_greenlet = target; + } + +private: + /** + * Deref and remove the greenlets from the deleteme list. Must be + * holding the GIL. + * + * If *murder* is true, then we must be called from a different + * thread than the one that these greenlets were running in. + * In that case, if the greenlet was actually running, we destroy + * the frame reference and otherwise make it appear dead before + * proceeding; otherwise, we would try (and fail) to raise an + * exception in it and wind up right back in this list. + */ + inline void clear_deleteme_list(const bool murder=false) + { + if (!this->deleteme.empty()) { + // It's possible we could add items to this list while + // running Python code if there's a thread switch, so we + // need to defensively copy it before that can happen. + deleteme_t copy = this->deleteme; + this->deleteme.clear(); // in case things come back on the list + for(deleteme_t::iterator it = copy.begin(), end = copy.end(); + it != end; + ++it ) { + PyGreenlet* to_del = *it; + if (murder) { + // Force each greenlet to appear dead; we can't raise an + // exception into it anymore anyway. + to_del->pimpl->murder_in_place(); + } + + // The only reference to these greenlets should be in + // this list, decreffing them should let them be + // deleted again, triggering calls to green_dealloc() + // in the correct thread (if we're not murdering). + // This may run arbitrary Python code and switch + // threads or greenlets! + Py_DECREF(to_del); + if (PyErr_Occurred()) { + PyErr_WriteUnraisable(nullptr); + PyErr_Clear(); + } + } + } + } + +public: + + /** + * Returns a new reference, or a false object. + */ + inline OwnedObject get_tracefunc() const + { + return tracefunc; + }; + + + inline void set_tracefunc(BorrowedObject tracefunc) + { + assert(tracefunc); + if (tracefunc == BorrowedObject(Py_None)) { + this->tracefunc.CLEAR(); + } + else { + this->tracefunc = tracefunc; + } + } + + /** + * Given a reference to a greenlet that some other thread + * attempted to delete (has a refcount of 0) store it for later + * deletion when the thread this state belongs to is current. + */ + inline void delete_when_thread_running(PyGreenlet* to_del) + { + Py_INCREF(to_del); + this->deleteme.push_back(to_del); + } + + /** + * Set to std::clock_t(-1) to disable. + */ + inline static std::clock_t& clocks_used_doing_gc() + { + return ThreadState::_clocks_used_doing_gc; + } + + ~ThreadState() + { + if (!PyInterpreterState_Head()) { + // We shouldn't get here (our callers protect us) + // but if we do, all we can do is bail early. + return; + } + + // We should not have an "origin" greenlet; that only exists + // for the temporary time during a switch, which should not + // be in progress as the thread dies. + //assert(!this->switching_state.origin); + + this->tracefunc.CLEAR(); + + // Forcibly GC as much as we can. + this->clear_deleteme_list(true); + + // The pending call did this. + assert(this->main_greenlet->thread_state() == nullptr); + + // If the main greenlet is the current greenlet, + // then we "fell off the end" and the thread died. + // It's possible that there is some other greenlet that + // switched to us, leaving a reference to the main greenlet + // on the stack, somewhere uncollectible. Try to detect that. + if (this->current_greenlet == this->main_greenlet && this->current_greenlet) { + assert(this->current_greenlet->is_currently_running_in_some_thread()); + // Drop one reference we hold. + this->current_greenlet.CLEAR(); + assert(!this->current_greenlet); + // Only our reference to the main greenlet should be left, + // But hold onto the pointer in case we need to do extra cleanup. + PyGreenlet* old_main_greenlet = this->main_greenlet.borrow(); + Py_ssize_t cnt = this->main_greenlet.REFCNT(); + this->main_greenlet.CLEAR(); + if (ThreadState::_clocks_used_doing_gc != std::clock_t(-1) + && cnt == 2 && Py_REFCNT(old_main_greenlet) == 1) { + // Highly likely that the reference is somewhere on + // the stack, not reachable by GC. Verify. + // XXX: This is O(n) in the total number of objects. + // TODO: Add a way to disable this at runtime, and + // another way to report on it. + std::clock_t begin = std::clock(); + NewReference gc(PyImport_ImportModule("gc")); + if (gc) { + OwnedObject get_referrers = gc.PyRequireAttr(ThreadState::get_referrers_name); + OwnedList refs(get_referrers.PyCall(old_main_greenlet)); + if (refs && refs.empty()) { + assert(refs.REFCNT() == 1); + // We found nothing! So we left a dangling + // reference: Probably the last thing some + // other greenlet did was call + // 'getcurrent().parent.switch()' to switch + // back to us. Clean it up. This will be the + // case on CPython 3.7 and newer, as they use + // an internal calling conversion that avoids + // creating method objects and storing them on + // the stack. + Py_DECREF(old_main_greenlet); + } + else if (refs + && refs.size() == 1 + && PyCFunction_Check(refs.at(0)) + && Py_REFCNT(refs.at(0)) == 2) { + assert(refs.REFCNT() == 1); + // Ok, we found a C method that refers to the + // main greenlet, and its only referenced + // twice, once in the list we just created, + // once from...somewhere else. If we can't + // find where else, then this is a leak. + // This happens in older versions of CPython + // that create a bound method object somewhere + // on the stack that we'll never get back to. + if (PyCFunction_GetFunction(refs.at(0).borrow()) == (PyCFunction)green_switch) { + BorrowedObject function_w = refs.at(0); + refs.clear(); // destroy the reference + // from the list. + // back to one reference. Can *it* be + // found? + assert(function_w.REFCNT() == 1); + refs = get_referrers.PyCall(function_w); + if (refs && refs.empty()) { + // Nope, it can't be found so it won't + // ever be GC'd. Drop it. + Py_CLEAR(function_w); + } + } + } + std::clock_t end = std::clock(); + ThreadState::_clocks_used_doing_gc += (end - begin); + } + } + } + + // We need to make sure this greenlet appears to be dead, + // because otherwise deallocing it would fail to raise an + // exception in it (the thread is dead) and put it back in our + // deleteme list. + if (this->current_greenlet) { + this->current_greenlet->murder_in_place(); + this->current_greenlet.CLEAR(); + } + + if (this->main_greenlet) { + // Couldn't have been the main greenlet that was running + // when the thread exited (because we already cleared this + // pointer if it was). This shouldn't be possible? + + // If the main greenlet was current when the thread died (it + // should be, right?) then we cleared its self pointer above + // when we cleared the current greenlet's main greenlet pointer. + // assert(this->main_greenlet->main_greenlet == this->main_greenlet + // || !this->main_greenlet->main_greenlet); + // // self reference, probably gone + // this->main_greenlet->main_greenlet.CLEAR(); + + // This will actually go away when the ivar is destructed. + this->main_greenlet.CLEAR(); + } + + if (PyErr_Occurred()) { + PyErr_WriteUnraisable(NULL); + PyErr_Clear(); + } + + } + +}; + +ImmortalString ThreadState::get_referrers_name(nullptr); +PythonAllocator ThreadState::allocator; +std::clock_t ThreadState::_clocks_used_doing_gc(0); + +template +class ThreadStateCreator +{ +private: + // Initialized to 1, and, if still 1, created on access. + // Set to 0 on destruction. + ThreadState* _state; + G_NO_COPIES_OF_CLS(ThreadStateCreator); +public: + + // Only one of these, auto created per thread + ThreadStateCreator() : + _state((ThreadState*)1) + { + } + + ~ThreadStateCreator() + { + ThreadState* tmp = this->_state; + this->_state = nullptr; + if (tmp && tmp != (ThreadState*)1) { + Destructor x(tmp); + } + } + + inline ThreadState& state() + { + // The main greenlet will own this pointer when it is created, + // which will be right after this. The plan is to give every + // greenlet a pointer to the main greenlet for the thread it + // runs in; if we are doing something cross-thread, we need to + // access the pointer from the main greenlet. Deleting the + // thread, and hence the thread-local storage, will delete the + // state pointer in the main greenlet. + if (this->_state == (ThreadState*)1) { + // XXX: Assuming allocation never fails + this->_state = new ThreadState; + // For non-standard threading, we need to store an object + // in the Python thread state dictionary so that it can be + // DECREF'd when the thread ends (ideally; the dict could + // last longer) and clean this object up. + } + if (!this->_state) { + throw std::runtime_error("Accessing state after destruction."); + } + return *this->_state; + } + + operator ThreadState&() + { + return this->state(); + } + + operator ThreadState*() + { + return &this->state(); + } + + inline int tp_traverse(visitproc visit, void* arg) + { + if (this->_state) { + return this->_state->tp_traverse(visit, arg); + } + return 0; + } + +}; + +#if G_USE_STANDARD_THREADING == 1 +// We can't use the PythonAllocator for this, because we push to it +// from the thread state destructor, which doesn't have the GIL, +// and Python's allocators can only be called with the GIL. +typedef std::vector cleanup_queue_t; +#else +class cleanup_queue_t { +public: + inline ssize_t size() const { return 0; }; + inline bool empty() const { return true; }; + inline void pop_back() + { + throw std::out_of_range("empty queue."); + }; + inline ThreadState* back() + { + throw std::out_of_range("empty queue."); + }; + inline void push_back(ThreadState* g) + { + throw std::out_of_range("empty queue."); + }; +}; +#endif +}; // namespace greenlet + +#endif diff --git a/mysql-sqlalchemy-workspace/lib/python3.9/site-packages/greenlet/greenlet_thread_state_dict_cleanup.hpp b/mysql-sqlalchemy-workspace/lib/python3.9/site-packages/greenlet/greenlet_thread_state_dict_cleanup.hpp new file mode 100644 index 00000000..acf39c8f --- /dev/null +++ b/mysql-sqlalchemy-workspace/lib/python3.9/site-packages/greenlet/greenlet_thread_state_dict_cleanup.hpp @@ -0,0 +1,118 @@ +#ifndef GREENLET_THREAD_STATE_DICT_CLEANUP_HPP +#define GREENLET_THREAD_STATE_DICT_CLEANUP_HPP + +#include "greenlet_internal.hpp" +#include "greenlet_thread_state.hpp" + +#ifdef __clang__ +# pragma clang diagnostic push +# pragma clang diagnostic ignored "-Wmissing-field-initializers" +#endif + +#ifndef G_THREAD_STATE_DICT_CLEANUP_TYPE +// shut the compiler up if it looks at this file in isolation +#define ThreadStateCreator int +#endif + +// Define a Python object that goes in the Python thread state dict +// when the greenlet thread state is created, and which owns the +// reference to the greenlet thread local state. +// When the thread state dict is cleaned up, so too is the thread +// state. This works best if we make sure there are no circular +// references to the thread state. +typedef struct _PyGreenletCleanup { + PyObject_HEAD + ThreadStateCreator* thread_state_creator; +} PyGreenletCleanup; + +static void +cleanup_do_dealloc(PyGreenletCleanup* self) +{ + ThreadStateCreator* tmp = self->thread_state_creator; + self->thread_state_creator = nullptr; + if (tmp) { + delete tmp; + } +} + +static void +cleanup_dealloc(PyGreenletCleanup* self) +{ + PyObject_GC_UnTrack(self); + cleanup_do_dealloc(self); +} + +static int +cleanup_clear(PyGreenletCleanup* self) +{ + // This method is never called by our test cases. + cleanup_do_dealloc(self); + return 0; +} + +static int +cleanup_traverse(PyGreenletCleanup* self, visitproc visit, void* arg) +{ + if (self->thread_state_creator) { + return self->thread_state_creator->tp_traverse(visit, arg); + } + return 0; +} + +static int +cleanup_is_gc(PyGreenlet* UNUSED(self)) +{ + return 1; +} + +static PyTypeObject PyGreenletCleanup_Type = { + PyVarObject_HEAD_INIT(NULL, 0) + "greenlet._greenlet.ThreadStateCleanup", + sizeof(struct _PyGreenletCleanup), + 0, /* tp_itemsize */ + /* methods */ + (destructor)cleanup_dealloc, /* tp_dealloc */ + 0, /* tp_print */ + 0, /* tp_getattr */ + 0, /* tp_setattr */ + 0, /* tp_compare */ + 0, /* tp_repr */ + 0, /* tp_as _number*/ + 0, /* tp_as _sequence*/ + 0, /* tp_as _mapping*/ + 0, /* tp_hash */ + 0, /* tp_call */ + 0, /* tp_str */ + 0, /* tp_getattro */ + 0, /* tp_setattro */ + 0, /* tp_as_buffer*/ + G_TPFLAGS_DEFAULT | Py_TPFLAGS_BASETYPE, /* tp_flags */ + "Internal use only", /* tp_doc */ + (traverseproc)cleanup_traverse, /* tp_traverse */ + (inquiry)cleanup_clear, /* tp_clear */ + 0, /* tp_richcompare */ + // XXX: Don't our flags promise a weakref? + 0, /* tp_weaklistoffset */ + 0, /* tp_iter */ + 0, /* tp_iternext */ + 0, /* tp_methods */ + 0, /* tp_members */ + 0, /* tp_getset */ + 0, /* tp_base */ + 0, /* tp_dict */ + 0, /* tp_descr_get */ + 0, /* tp_descr_set */ + 0, /* tp_dictoffset */ + 0, /* tp_init */ + PyType_GenericAlloc, /* tp_alloc */ + PyType_GenericNew, /* tp_new */ + PyObject_GC_Del, /* tp_free */ + (inquiry)cleanup_is_gc, /* tp_is_gc */ +}; + +#ifdef __clang__ +# pragma clang diagnostic pop +#endif + + +#endif diff --git a/mysql-sqlalchemy-workspace/lib/python3.9/site-packages/greenlet/greenlet_thread_support.hpp b/mysql-sqlalchemy-workspace/lib/python3.9/site-packages/greenlet/greenlet_thread_support.hpp new file mode 100644 index 00000000..747ae477 --- /dev/null +++ b/mysql-sqlalchemy-workspace/lib/python3.9/site-packages/greenlet/greenlet_thread_support.hpp @@ -0,0 +1,144 @@ +#ifndef GREENLET_THREAD_SUPPORT_HPP +#define GREENLET_THREAD_SUPPORT_HPP + +/** + * Defines various utility functions to help greenlet integrate well + * with threads. When possible, we use portable C++ 11 threading; when + * not possible, we will use platform specific APIs if needed and + * available. (Currently, this is only for Python 2.7 on Windows.) + */ + +#include +#include "greenlet_compiler_compat.hpp" + +// Allow setting this to 0 on the command line so that we +// can test these code paths on compilers that otherwise support +// standard threads. +#ifndef G_USE_STANDARD_THREADING +#if __cplusplus >= 201103 +// Cool. We should have standard support +# define G_USE_STANDARD_THREADING 1 +#elif defined(_MSC_VER) +// MSVC doesn't use a modern version of __cplusplus automatically, you +// have to opt-in to update it with /Zc:__cplusplus, but that's not +// available on our old version of visual studio for Python 2.7 +# if _MSC_VER <= 1500 +// Python 2.7 on Windows. Use the Python thread state and native Win32 APIs. +# define G_USE_STANDARD_THREADING 0 +# else +// Assume we have a compiler that supports it. The Appveyor compilers +// we use all do have standard support +# define G_USE_STANDARD_THREADING 1 +# endif +#elif defined(__GNUC__) || defined(__clang__) +// All tested versions either do, or can with the right --std argument, support what we need +# define G_USE_STANDARD_THREADING 1 +#else +# define G_USE_STANDARD_THREADING 0 +#endif +#endif /* G_USE_STANDARD_THREADING */ + +namespace greenlet { + class LockInitError : public std::runtime_error + { + public: + LockInitError(const char* what) : std::runtime_error(what) + {}; + }; +}; + + +#if G_USE_STANDARD_THREADING == 1 +# define G_THREAD_LOCAL_SUPPORTS_DESTRUCTOR 1 +# include +# include +# define G_THREAD_LOCAL_VAR thread_local +namespace greenlet { + typedef std::mutex Mutex; + typedef std::lock_guard LockGuard; +}; +#else +// NOTE: At this writing, the mutex isn't currently required; +// we don't use a shared cleanup queue or Py_AddPendingCall in this +// model, we rely on the thread state dictionary for cleanup. +# if defined(_MSC_VER) +// We should only hit this case for Python 2.7 on Windows. +# define G_THREAD_LOCAL_VAR __declspec(thread) +# include +namespace greenlet { + class Mutex + { + CRITICAL_SECTION _mutex; + G_NO_COPIES_OF_CLS(Mutex); + public: + Mutex() + { + InitializeCriticalSection(&this->_mutex); + }; + + void Lock() + { + EnterCriticalSection(&this->_mutex); + }; + + void UnLock() + { + LeaveCriticalSection(&this->_mutex); + }; + }; +}; +# elif (defined(__GNUC__) || defined(__clang__)) || (defined(__SUNPRO_C)) +// GCC, clang, SunStudio all use __thread for thread-local variables. +// For locks, we can use PyThread APIs, officially added in 3.2, but +// present back to 2.7 +# define G_THREAD_LOCAL_VAR __thread +# include "pythread.h" +namespace greenlet { + class Mutex + { + PyThread_type_lock _mutex; + G_NO_COPIES_OF_CLS(Mutex); + public: + Mutex() + { + this->_mutex = PyThread_allocate_lock(); + if (!this->_mutex) { + throw LockInitError("Failed to initialize mutex."); + } + }; + + void Lock() + { + PyThread_acquire_lock(this->_mutex, WAIT_LOCK); + }; + + void UnLock() + { + PyThread_release_lock(this->_mutex); + }; + }; +}; +# else +# error Unable to declare thread-local variables. +# endif +// the RAII lock keeper for all non-standard threading platforms. +namespace greenlet { + class LockGuard + { + Mutex& _mutex; + G_NO_COPIES_OF_CLS(LockGuard); + public: + LockGuard(Mutex& m) : _mutex(m) + { + this->_mutex.Lock(); + }; + ~LockGuard() + { + this->_mutex.UnLock(); + }; + }; + +}; +#endif /* G_USE_STANDARD_THREADING == 1 */ + +#endif /* GREENLET_THREAD_SUPPORT_HPP */ diff --git a/mysql-sqlalchemy-workspace/lib/python3.9/site-packages/greenlet/platform/__init__.py b/mysql-sqlalchemy-workspace/lib/python3.9/site-packages/greenlet/platform/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/mysql-sqlalchemy-workspace/lib/python3.9/site-packages/greenlet/platform/setup_switch_x64_masm.cmd b/mysql-sqlalchemy-workspace/lib/python3.9/site-packages/greenlet/platform/setup_switch_x64_masm.cmd new file mode 100644 index 00000000..09285955 --- /dev/null +++ b/mysql-sqlalchemy-workspace/lib/python3.9/site-packages/greenlet/platform/setup_switch_x64_masm.cmd @@ -0,0 +1,2 @@ +call "C:\Program Files (x86)\Microsoft Visual Studio 9.0\VC\vcvarsall.bat" amd64 +ml64 /nologo /c /Fo switch_x64_masm.obj switch_x64_masm.asm diff --git a/mysql-sqlalchemy-workspace/lib/python3.9/site-packages/greenlet/platform/switch_aarch64_gcc.h b/mysql-sqlalchemy-workspace/lib/python3.9/site-packages/greenlet/platform/switch_aarch64_gcc.h new file mode 100644 index 00000000..31e09b97 --- /dev/null +++ b/mysql-sqlalchemy-workspace/lib/python3.9/site-packages/greenlet/platform/switch_aarch64_gcc.h @@ -0,0 +1,78 @@ +/* + * this is the internal transfer function. + * + * HISTORY + * 07-Sep-16 Add clang support using x register naming. Fredrik Fornwall + * 13-Apr-13 Add support for strange GCC caller-save decisions + * 08-Apr-13 File creation. Michael Matz + * + * NOTES + * + * Simply save all callee saved registers + * + */ + +#define STACK_REFPLUS 1 + +#ifdef SLP_EVAL +#define STACK_MAGIC 0 +#define REGS_TO_SAVE "x19", "x20", "x21", "x22", "x23", "x24", "x25", "x26", \ + "x27", "x28", "x30" /* aka lr */, \ + "v8", "v9", "v10", "v11", \ + "v12", "v13", "v14", "v15" + +static int +slp_switch(void) +{ + int err; + void *fp; + long *stackref, stsizediff; + __asm__ volatile ("" : : : REGS_TO_SAVE); + __asm__ volatile ("str x29, %0" : "=m"(fp) : : ); + __asm__ ("mov %0, sp" : "=r" (stackref)); + { + SLP_SAVE_STATE(stackref, stsizediff); + __asm__ volatile ( + "add sp,sp,%0\n" + "add x29,x29,%0\n" + : + : "r" (stsizediff) + ); + SLP_RESTORE_STATE(); + /* SLP_SAVE_STATE macro contains some return statements + (of -1 and 1). It falls through only when + the return value of slp_save_state() is zero, which + is placed in x0. + In that case we (slp_switch) also want to return zero + (also in x0 of course). + Now, some GCC versions (seen with 4.8) think it's a + good idea to save/restore x0 around the call to + slp_restore_state(), instead of simply zeroing it + at the return below. But slp_restore_state + writes random values to the stack slot used for this + save/restore (from when it once was saved above in + SLP_SAVE_STATE, when it was still uninitialized), so + "restoring" that precious zero actually makes us + return random values. There are some ways to make + GCC not use that zero value in the normal return path + (e.g. making err volatile, but that costs a little + stack space), and the simplest is to call a function + that returns an unknown value (which happens to be zero), + so the saved/restored value is unused. */ + /* XXX: This line produces warnings: + + value size does not match register size specified by the + constraint and modifier + + The suggested fix is to change %0 to %w0. + + TODO: Validate and change that. + */ + __asm__ volatile ("mov %0, #0" : "=r" (err)); + } + __asm__ volatile ("ldr x29, %0" : : "m" (fp) :); + __asm__ volatile ("" : : : REGS_TO_SAVE); + return err; +} + +#endif diff --git a/mysql-sqlalchemy-workspace/lib/python3.9/site-packages/greenlet/platform/switch_alpha_unix.h b/mysql-sqlalchemy-workspace/lib/python3.9/site-packages/greenlet/platform/switch_alpha_unix.h new file mode 100644 index 00000000..216619f9 --- /dev/null +++ b/mysql-sqlalchemy-workspace/lib/python3.9/site-packages/greenlet/platform/switch_alpha_unix.h @@ -0,0 +1,30 @@ +#define STACK_REFPLUS 1 + +#ifdef SLP_EVAL +#define STACK_MAGIC 0 + +#define REGS_TO_SAVE "$9", "$10", "$11", "$12", "$13", "$14", "$15", \ + "$f2", "$f3", "$f4", "$f5", "$f6", "$f7", "$f8", "$f9" + +static int +slp_switch(void) +{ + register int ret; + register long *stackref, stsizediff; + __asm__ volatile ("" : : : REGS_TO_SAVE); + __asm__ volatile ("mov $30, %0" : "=r" (stackref) : ); + { + SLP_SAVE_STATE(stackref, stsizediff); + __asm__ volatile ( + "addq $30, %0, $30\n\t" + : /* no outputs */ + : "r" (stsizediff) + ); + SLP_RESTORE_STATE(); + } + __asm__ volatile ("" : : : REGS_TO_SAVE); + __asm__ volatile ("mov $31, %0" : "=r" (ret) : ); + return ret; +} + +#endif diff --git a/mysql-sqlalchemy-workspace/lib/python3.9/site-packages/greenlet/platform/switch_amd64_unix.h b/mysql-sqlalchemy-workspace/lib/python3.9/site-packages/greenlet/platform/switch_amd64_unix.h new file mode 100644 index 00000000..d4701105 --- /dev/null +++ b/mysql-sqlalchemy-workspace/lib/python3.9/site-packages/greenlet/platform/switch_amd64_unix.h @@ -0,0 +1,87 @@ +/* + * this is the internal transfer function. + * + * HISTORY + * 3-May-13 Ralf Schmitt + * Add support for strange GCC caller-save decisions + * (ported from switch_aarch64_gcc.h) + * 18-Aug-11 Alexey Borzenkov + * Correctly save rbp, csr and cw + * 01-Apr-04 Hye-Shik Chang + * Ported from i386 to amd64. + * 24-Nov-02 Christian Tismer + * needed to add another magic constant to insure + * that f in slp_eval_frame(PyFrameObject *f) + * STACK_REFPLUS will probably be 1 in most cases. + * gets included into the saved stack area. + * 17-Sep-02 Christian Tismer + * after virtualizing stack save/restore, the + * stack size shrunk a bit. Needed to introduce + * an adjustment STACK_MAGIC per platform. + * 15-Sep-02 Gerd Woetzel + * slightly changed framework for spark + * 31-Avr-02 Armin Rigo + * Added ebx, esi and edi register-saves. + * 01-Mar-02 Samual M. Rushing + * Ported from i386. + */ + +#define STACK_REFPLUS 1 + +#ifdef SLP_EVAL + +/* #define STACK_MAGIC 3 */ +/* the above works fine with gcc 2.96, but 2.95.3 wants this */ +#define STACK_MAGIC 0 + +#define REGS_TO_SAVE "r12", "r13", "r14", "r15" + +static int +slp_switch(void) +{ + int err; + void* rbp; + void* rbx; + unsigned int csr; + unsigned short cw; + /* This used to be declared 'register', but that does nothing in + modern compilers and is explicitly forbidden in some new + standards. */ + long *stackref, stsizediff; + __asm__ volatile ("" : : : REGS_TO_SAVE); + __asm__ volatile ("fstcw %0" : "=m" (cw)); + __asm__ volatile ("stmxcsr %0" : "=m" (csr)); + __asm__ volatile ("movq %%rbp, %0" : "=m" (rbp)); + __asm__ volatile ("movq %%rbx, %0" : "=m" (rbx)); + __asm__ ("movq %%rsp, %0" : "=g" (stackref)); + { + SLP_SAVE_STATE(stackref, stsizediff); + __asm__ volatile ( + "addq %0, %%rsp\n" + "addq %0, %%rbp\n" + : + : "r" (stsizediff) + ); + SLP_RESTORE_STATE(); + __asm__ volatile ("xorq %%rax, %%rax" : "=a" (err)); + } + __asm__ volatile ("movq %0, %%rbx" : : "m" (rbx)); + __asm__ volatile ("movq %0, %%rbp" : : "m" (rbp)); + __asm__ volatile ("ldmxcsr %0" : : "m" (csr)); + __asm__ volatile ("fldcw %0" : : "m" (cw)); + __asm__ volatile ("" : : : REGS_TO_SAVE); + return err; +} + +#endif + +/* + * further self-processing support + */ + +/* + * if you want to add self-inspection tools, place them + * here. See the x86_msvc for the necessary defines. + * These features are highly experimental und not + * essential yet. + */ diff --git a/mysql-sqlalchemy-workspace/lib/python3.9/site-packages/greenlet/platform/switch_arm32_gcc.h b/mysql-sqlalchemy-workspace/lib/python3.9/site-packages/greenlet/platform/switch_arm32_gcc.h new file mode 100644 index 00000000..035d6b94 --- /dev/null +++ b/mysql-sqlalchemy-workspace/lib/python3.9/site-packages/greenlet/platform/switch_arm32_gcc.h @@ -0,0 +1,79 @@ +/* + * this is the internal transfer function. + * + * HISTORY + * 14-Aug-06 File creation. Ported from Arm Thumb. Sylvain Baro + * 3-Sep-06 Commented out saving of r1-r3 (r4 already commented out) as I + * read that these do not need to be saved. Also added notes and + * errors related to the frame pointer. Richard Tew. + * + * NOTES + * + * It is not possible to detect if fp is used or not, so the supplied + * switch function needs to support it, so that you can remove it if + * it does not apply to you. + * + * POSSIBLE ERRORS + * + * "fp cannot be used in asm here" + * + * - Try commenting out "fp" in REGS_TO_SAVE. + * + */ + +#define STACK_REFPLUS 1 + +#ifdef SLP_EVAL +#define STACK_MAGIC 0 +#define REG_SP "sp" +#define REG_SPSP "sp,sp" +#ifdef __thumb__ +#define REG_FP "r7" +#define REG_FPFP "r7,r7" +#define REGS_TO_SAVE_GENERAL "r4", "r5", "r6", "r8", "r9", "r10", "r11", "lr" +#else +#define REG_FP "fp" +#define REG_FPFP "fp,fp" +#define REGS_TO_SAVE_GENERAL "r4", "r5", "r6", "r7", "r8", "r9", "r10", "lr" +#endif +#if defined(__SOFTFP__) +#define REGS_TO_SAVE REGS_TO_SAVE_GENERAL +#elif defined(__VFP_FP__) +#define REGS_TO_SAVE REGS_TO_SAVE_GENERAL, "d8", "d9", "d10", "d11", \ + "d12", "d13", "d14", "d15" +#elif defined(__MAVERICK__) +#define REGS_TO_SAVE REGS_TO_SAVE_GENERAL, "mvf4", "mvf5", "mvf6", "mvf7", \ + "mvf8", "mvf9", "mvf10", "mvf11", \ + "mvf12", "mvf13", "mvf14", "mvf15" +#else +#define REGS_TO_SAVE REGS_TO_SAVE_GENERAL, "f4", "f5", "f6", "f7" +#endif + +static int +#ifdef __GNUC__ +__attribute__((optimize("no-omit-frame-pointer"))) +#endif +slp_switch(void) +{ + void *fp; + register int *stackref, stsizediff; + int result; + __asm__ volatile ("" : : : REGS_TO_SAVE); + __asm__ volatile ("mov r0," REG_FP "\n\tstr r0,%0" : "=m" (fp) : : "r0"); + __asm__ ("mov %0," REG_SP : "=r" (stackref)); + { + SLP_SAVE_STATE(stackref, stsizediff); + __asm__ volatile ( + "add " REG_SPSP ",%0\n" + "add " REG_FPFP ",%0\n" + : + : "r" (stsizediff) + ); + SLP_RESTORE_STATE(); + } + __asm__ volatile ("ldr r0,%1\n\tmov " REG_FP ",r0\n\tmov %0, #0" : "=r" (result) : "m" (fp) : "r0"); + __asm__ volatile ("" : : : REGS_TO_SAVE); + return result; +} + +#endif diff --git a/mysql-sqlalchemy-workspace/lib/python3.9/site-packages/greenlet/platform/switch_arm32_ios.h b/mysql-sqlalchemy-workspace/lib/python3.9/site-packages/greenlet/platform/switch_arm32_ios.h new file mode 100644 index 00000000..e993707f --- /dev/null +++ b/mysql-sqlalchemy-workspace/lib/python3.9/site-packages/greenlet/platform/switch_arm32_ios.h @@ -0,0 +1,67 @@ +/* + * this is the internal transfer function. + * + * HISTORY + * 31-May-15 iOS support. Ported from arm32. Proton + * + * NOTES + * + * It is not possible to detect if fp is used or not, so the supplied + * switch function needs to support it, so that you can remove it if + * it does not apply to you. + * + * POSSIBLE ERRORS + * + * "fp cannot be used in asm here" + * + * - Try commenting out "fp" in REGS_TO_SAVE. + * + */ + +#define STACK_REFPLUS 1 + +#ifdef SLP_EVAL + +#define STACK_MAGIC 0 +#define REG_SP "sp" +#define REG_SPSP "sp,sp" +#define REG_FP "r7" +#define REG_FPFP "r7,r7" +#define REGS_TO_SAVE_GENERAL "r4", "r5", "r6", "r8", "r10", "r11", "lr" +#define REGS_TO_SAVE REGS_TO_SAVE_GENERAL, "d8", "d9", "d10", "d11", \ + "d12", "d13", "d14", "d15" + +static int +#ifdef __GNUC__ +__attribute__((optimize("no-omit-frame-pointer"))) +#endif +slp_switch(void) +{ + void *fp; + register int *stackref, stsizediff, result; + __asm__ volatile ("" : : : REGS_TO_SAVE); + __asm__ volatile ("str " REG_FP ",%0" : "=m" (fp)); + __asm__ ("mov %0," REG_SP : "=r" (stackref)); + { + SLP_SAVE_STATE(stackref, stsizediff); + __asm__ volatile ( + "add " REG_SPSP ",%0\n" + "add " REG_FPFP ",%0\n" + : + : "r" (stsizediff) + : REGS_TO_SAVE /* Clobber registers, force compiler to + * recalculate address of void *fp from REG_SP or REG_FP */ + ); + SLP_RESTORE_STATE(); + } + __asm__ volatile ( + "ldr " REG_FP ", %1\n\t" + "mov %0, #0" + : "=r" (result) + : "m" (fp) + : REGS_TO_SAVE /* Force compiler to restore saved registers after this */ + ); + return result; +} + +#endif diff --git a/mysql-sqlalchemy-workspace/lib/python3.9/site-packages/greenlet/platform/switch_arm64_masm.asm b/mysql-sqlalchemy-workspace/lib/python3.9/site-packages/greenlet/platform/switch_arm64_masm.asm new file mode 100644 index 00000000..29f9c225 --- /dev/null +++ b/mysql-sqlalchemy-workspace/lib/python3.9/site-packages/greenlet/platform/switch_arm64_masm.asm @@ -0,0 +1,53 @@ + AREA switch_arm64_masm, CODE, READONLY; + GLOBAL slp_switch [FUNC] + EXTERN slp_save_state_asm + EXTERN slp_restore_state_asm + +slp_switch + ; push callee saved registers to stack + stp x19, x20, [sp, #-16]! + stp x21, x22, [sp, #-16]! + stp x23, x24, [sp, #-16]! + stp x25, x26, [sp, #-16]! + stp x27, x28, [sp, #-16]! + stp x29, x30, [sp, #-16]! + stp d8, d9, [sp, #-16]! + stp d10, d11, [sp, #-16]! + stp d12, d13, [sp, #-16]! + stp d14, d15, [sp, #-16]! + + ; call slp_save_state_asm with stack pointer + mov x0, sp + bl slp_save_state_asm + + ; early return for return value of 1 and -1 + cmp x0, #-1 + b.eq RETURN + cmp x0, #1 + b.eq RETURN + + ; increment stack and frame pointer + add sp, sp, x0 + add x29, x29, x0 + + bl slp_restore_state_asm + + ; store return value for successful completion of routine + mov x0, #0 + +RETURN + ; pop registers from stack + ldp d14, d15, [sp], #16 + ldp d12, d13, [sp], #16 + ldp d10, d11, [sp], #16 + ldp d8, d9, [sp], #16 + ldp x29, x30, [sp], #16 + ldp x27, x28, [sp], #16 + ldp x25, x26, [sp], #16 + ldp x23, x24, [sp], #16 + ldp x21, x22, [sp], #16 + ldp x19, x20, [sp], #16 + + ret + + END diff --git a/mysql-sqlalchemy-workspace/lib/python3.9/site-packages/greenlet/platform/switch_arm64_masm.obj b/mysql-sqlalchemy-workspace/lib/python3.9/site-packages/greenlet/platform/switch_arm64_masm.obj new file mode 100644 index 00000000..f6f220e4 Binary files /dev/null and b/mysql-sqlalchemy-workspace/lib/python3.9/site-packages/greenlet/platform/switch_arm64_masm.obj differ diff --git a/mysql-sqlalchemy-workspace/lib/python3.9/site-packages/greenlet/platform/switch_arm64_msvc.h b/mysql-sqlalchemy-workspace/lib/python3.9/site-packages/greenlet/platform/switch_arm64_msvc.h new file mode 100644 index 00000000..7ab7f45b --- /dev/null +++ b/mysql-sqlalchemy-workspace/lib/python3.9/site-packages/greenlet/platform/switch_arm64_msvc.h @@ -0,0 +1,17 @@ +/* + * this is the internal transfer function. + * + * HISTORY + * 21-Oct-21 Niyas Sait + * First version to enable win/arm64 support. + */ + +#define STACK_REFPLUS 1 +#define STACK_MAGIC 0 + +/* Use the generic support for an external assembly language slp_switch function. */ +#define EXTERNAL_ASM + +#ifdef SLP_EVAL +/* This always uses the external masm assembly file. */ +#endif \ No newline at end of file diff --git a/mysql-sqlalchemy-workspace/lib/python3.9/site-packages/greenlet/platform/switch_csky_gcc.h b/mysql-sqlalchemy-workspace/lib/python3.9/site-packages/greenlet/platform/switch_csky_gcc.h new file mode 100644 index 00000000..7486b948 --- /dev/null +++ b/mysql-sqlalchemy-workspace/lib/python3.9/site-packages/greenlet/platform/switch_csky_gcc.h @@ -0,0 +1,48 @@ +#ifdef SLP_EVAL +#define STACK_MAGIC 0 +#define REG_FP "r8" +#ifdef __CSKYABIV2__ +#define REGS_TO_SAVE_GENERAL "r4", "r5", "r6", "r7", "r9", "r10", "r11", "r15",\ + "r16", "r17", "r18", "r19", "r20", "r21", "r22",\ + "r23", "r24", "r25" + +#if defined (__CSKY_HARD_FLOAT__) || (__CSKY_VDSP__) +#define REGS_TO_SAVE REGS_TO_SAVE_GENERAL, "vr8", "vr9", "vr10", "vr11", "vr12",\ + "vr13", "vr14", "vr15" +#else +#define REGS_TO_SAVE REGS_TO_SAVE_GENERAL +#endif +#else +#define REGS_TO_SAVE "r9", "r10", "r11", "r12", "r13", "r15" +#endif + + +static int +#ifdef __GNUC__ +__attribute__((optimize("no-omit-frame-pointer"))) +#endif +slp_switch(void) +{ + register int *stackref, stsizediff; + int result; + + __asm__ volatile ("" : : : REGS_TO_SAVE); + __asm__ ("mov %0, sp" : "=r" (stackref)); + { + SLP_SAVE_STATE(stackref, stsizediff); + __asm__ volatile ( + "addu sp,%0\n" + "addu "REG_FP",%0\n" + : + : "r" (stsizediff) + ); + + SLP_RESTORE_STATE(); + } + __asm__ volatile ("movi %0, 0" : "=r" (result)); + __asm__ volatile ("" : : : REGS_TO_SAVE); + + return result; +} + +#endif diff --git a/mysql-sqlalchemy-workspace/lib/python3.9/site-packages/greenlet/platform/switch_m68k_gcc.h b/mysql-sqlalchemy-workspace/lib/python3.9/site-packages/greenlet/platform/switch_m68k_gcc.h new file mode 100644 index 00000000..da761c2d --- /dev/null +++ b/mysql-sqlalchemy-workspace/lib/python3.9/site-packages/greenlet/platform/switch_m68k_gcc.h @@ -0,0 +1,38 @@ +/* + * this is the internal transfer function. + * + * HISTORY + * 2014-01-06 Andreas Schwab + * File created. + */ + +#ifdef SLP_EVAL + +#define STACK_MAGIC 0 + +#define REGS_TO_SAVE "%d2", "%d3", "%d4", "%d5", "%d6", "%d7", \ + "%a2", "%a3", "%a4" + +static int +slp_switch(void) +{ + int err; + int *stackref, stsizediff; + void *fp, *a5; + __asm__ volatile ("" : : : REGS_TO_SAVE); + __asm__ volatile ("move.l %%fp, %0" : "=m"(fp)); + __asm__ volatile ("move.l %%a5, %0" : "=m"(a5)); + __asm__ ("move.l %%sp, %0" : "=r"(stackref)); + { + SLP_SAVE_STATE(stackref, stsizediff); + __asm__ volatile ("add.l %0, %%sp; add.l %0, %%fp" : : "r"(stsizediff)); + SLP_RESTORE_STATE(); + __asm__ volatile ("clr.l %0" : "=g" (err)); + } + __asm__ volatile ("move.l %0, %%a5" : : "m"(a5)); + __asm__ volatile ("move.l %0, %%fp" : : "m"(fp)); + __asm__ volatile ("" : : : REGS_TO_SAVE); + return err; +} + +#endif diff --git a/mysql-sqlalchemy-workspace/lib/python3.9/site-packages/greenlet/platform/switch_mips_unix.h b/mysql-sqlalchemy-workspace/lib/python3.9/site-packages/greenlet/platform/switch_mips_unix.h new file mode 100644 index 00000000..1916b264 --- /dev/null +++ b/mysql-sqlalchemy-workspace/lib/python3.9/site-packages/greenlet/platform/switch_mips_unix.h @@ -0,0 +1,64 @@ +/* + * this is the internal transfer function. + * + * HISTORY + * 20-Sep-14 Matt Madison + * Re-code the saving of the gp register for MIPS64. + * 05-Jan-08 Thiemo Seufer + * Ported from ppc. + */ + +#define STACK_REFPLUS 1 + +#ifdef SLP_EVAL + +#define STACK_MAGIC 0 + +#define REGS_TO_SAVE "$16", "$17", "$18", "$19", "$20", "$21", "$22", \ + "$23", "$30" +static int +slp_switch(void) +{ + register int err; + register int *stackref, stsizediff; +#ifdef __mips64 + uint64_t gpsave; +#endif + __asm__ __volatile__ ("" : : : REGS_TO_SAVE); +#ifdef __mips64 + __asm__ __volatile__ ("sd $28,%0" : "=m" (gpsave) : : ); +#endif + __asm__ ("move %0, $29" : "=r" (stackref) : ); + { + SLP_SAVE_STATE(stackref, stsizediff); + __asm__ __volatile__ ( +#ifdef __mips64 + "daddu $29, %0\n" +#else + "addu $29, %0\n" +#endif + : /* no outputs */ + : "r" (stsizediff) + ); + SLP_RESTORE_STATE(); + } +#ifdef __mips64 + __asm__ __volatile__ ("ld $28,%0" : : "m" (gpsave) : ); +#endif + __asm__ __volatile__ ("" : : : REGS_TO_SAVE); + __asm__ __volatile__ ("move %0, $0" : "=r" (err)); + return err; +} + +#endif + +/* + * further self-processing support + */ + +/* + * if you want to add self-inspection tools, place them + * here. See the x86_msvc for the necessary defines. + * These features are highly experimental und not + * essential yet. + */ diff --git a/mysql-sqlalchemy-workspace/lib/python3.9/site-packages/greenlet/platform/switch_ppc64_aix.h b/mysql-sqlalchemy-workspace/lib/python3.9/site-packages/greenlet/platform/switch_ppc64_aix.h new file mode 100644 index 00000000..e07b8de3 --- /dev/null +++ b/mysql-sqlalchemy-workspace/lib/python3.9/site-packages/greenlet/platform/switch_ppc64_aix.h @@ -0,0 +1,103 @@ +/* + * this is the internal transfer function. + * + * HISTORY + * 16-Oct-20 Jesse Gorzinski + * Copied from Linux PPC64 implementation + * 04-Sep-18 Alexey Borzenkov + * Workaround a gcc bug using manual save/restore of r30 + * 21-Mar-18 Tulio Magno Quites Machado Filho + * Added r30 to the list of saved registers in order to fully comply with + * both ppc64 ELFv1 ABI and the ppc64le ELFv2 ABI, that classify this + * register as a nonvolatile register used for local variables. + * 21-Mar-18 Laszlo Boszormenyi + * Save r2 (TOC pointer) manually. + * 10-Dec-13 Ulrich Weigand + * Support ELFv2 ABI. Save float/vector registers. + * 09-Mar-12 Michael Ellerman + * 64-bit implementation, copied from 32-bit. + * 07-Sep-05 (py-dev mailing list discussion) + * removed 'r31' from the register-saved. !!!! WARNING !!!! + * It means that this file can no longer be compiled statically! + * It is now only suitable as part of a dynamic library! + * 14-Jan-04 Bob Ippolito + * added cr2-cr4 to the registers to be saved. + * Open questions: Should we save FP registers? + * What about vector registers? + * Differences between darwin and unix? + * 24-Nov-02 Christian Tismer + * needed to add another magic constant to insure + * that f in slp_eval_frame(PyFrameObject *f) + * STACK_REFPLUS will probably be 1 in most cases. + * gets included into the saved stack area. + * 04-Oct-02 Gustavo Niemeyer + * Ported from MacOS version. + * 17-Sep-02 Christian Tismer + * after virtualizing stack save/restore, the + * stack size shrunk a bit. Needed to introduce + * an adjustment STACK_MAGIC per platform. + * 15-Sep-02 Gerd Woetzel + * slightly changed framework for sparc + * 29-Jun-02 Christian Tismer + * Added register 13-29, 31 saves. The same way as + * Armin Rigo did for the x86_unix version. + * This seems to be now fully functional! + * 04-Mar-02 Hye-Shik Chang + * Ported from i386. + * 31-Jul-12 Trevor Bowen + * Changed memory constraints to register only. + */ + +#define STACK_REFPLUS 1 + +#ifdef SLP_EVAL + +#define STACK_MAGIC 6 + +#if defined(__ALTIVEC__) +#define ALTIVEC_REGS \ + "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", \ + "v28", "v29", "v30", "v31", +#else +#define ALTIVEC_REGS +#endif + +#define REGS_TO_SAVE "r14", "r15", "r16", "r17", "r18", "r19", "r20", \ + "r21", "r22", "r23", "r24", "r25", "r26", "r27", "r28", "r29", \ + "r31", \ + "fr14", "fr15", "fr16", "fr17", "fr18", "fr19", "fr20", "fr21", \ + "fr22", "fr23", "fr24", "fr25", "fr26", "fr27", "fr28", "fr29", \ + "fr30", "fr31", \ + ALTIVEC_REGS \ + "cr2", "cr3", "cr4" + +static int +slp_switch(void) +{ + register int err; + register long *stackref, stsizediff; + void * toc; + void * r30; + __asm__ volatile ("" : : : REGS_TO_SAVE); + __asm__ volatile ("std 2, %0" : "=m" (toc)); + __asm__ volatile ("std 30, %0" : "=m" (r30)); + __asm__ ("mr %0, 1" : "=r" (stackref) : ); + { + SLP_SAVE_STATE(stackref, stsizediff); + __asm__ volatile ( + "mr 11, %0\n" + "add 1, 1, 11\n" + : /* no outputs */ + : "r" (stsizediff) + : "11" + ); + SLP_RESTORE_STATE(); + } + __asm__ volatile ("ld 30, %0" : : "m" (r30)); + __asm__ volatile ("ld 2, %0" : : "m" (toc)); + __asm__ volatile ("" : : : REGS_TO_SAVE); + __asm__ volatile ("li %0, 0" : "=r" (err)); + return err; +} + +#endif diff --git a/mysql-sqlalchemy-workspace/lib/python3.9/site-packages/greenlet/platform/switch_ppc64_linux.h b/mysql-sqlalchemy-workspace/lib/python3.9/site-packages/greenlet/platform/switch_ppc64_linux.h new file mode 100644 index 00000000..88e6847f --- /dev/null +++ b/mysql-sqlalchemy-workspace/lib/python3.9/site-packages/greenlet/platform/switch_ppc64_linux.h @@ -0,0 +1,105 @@ +/* + * this is the internal transfer function. + * + * HISTORY + * 04-Sep-18 Alexey Borzenkov + * Workaround a gcc bug using manual save/restore of r30 + * 21-Mar-18 Tulio Magno Quites Machado Filho + * Added r30 to the list of saved registers in order to fully comply with + * both ppc64 ELFv1 ABI and the ppc64le ELFv2 ABI, that classify this + * register as a nonvolatile register used for local variables. + * 21-Mar-18 Laszlo Boszormenyi + * Save r2 (TOC pointer) manually. + * 10-Dec-13 Ulrich Weigand + * Support ELFv2 ABI. Save float/vector registers. + * 09-Mar-12 Michael Ellerman + * 64-bit implementation, copied from 32-bit. + * 07-Sep-05 (py-dev mailing list discussion) + * removed 'r31' from the register-saved. !!!! WARNING !!!! + * It means that this file can no longer be compiled statically! + * It is now only suitable as part of a dynamic library! + * 14-Jan-04 Bob Ippolito + * added cr2-cr4 to the registers to be saved. + * Open questions: Should we save FP registers? + * What about vector registers? + * Differences between darwin and unix? + * 24-Nov-02 Christian Tismer + * needed to add another magic constant to insure + * that f in slp_eval_frame(PyFrameObject *f) + * STACK_REFPLUS will probably be 1 in most cases. + * gets included into the saved stack area. + * 04-Oct-02 Gustavo Niemeyer + * Ported from MacOS version. + * 17-Sep-02 Christian Tismer + * after virtualizing stack save/restore, the + * stack size shrunk a bit. Needed to introduce + * an adjustment STACK_MAGIC per platform. + * 15-Sep-02 Gerd Woetzel + * slightly changed framework for sparc + * 29-Jun-02 Christian Tismer + * Added register 13-29, 31 saves. The same way as + * Armin Rigo did for the x86_unix version. + * This seems to be now fully functional! + * 04-Mar-02 Hye-Shik Chang + * Ported from i386. + * 31-Jul-12 Trevor Bowen + * Changed memory constraints to register only. + */ + +#define STACK_REFPLUS 1 + +#ifdef SLP_EVAL + +#if _CALL_ELF == 2 +#define STACK_MAGIC 4 +#else +#define STACK_MAGIC 6 +#endif + +#if defined(__ALTIVEC__) +#define ALTIVEC_REGS \ + "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", \ + "v28", "v29", "v30", "v31", +#else +#define ALTIVEC_REGS +#endif + +#define REGS_TO_SAVE "r14", "r15", "r16", "r17", "r18", "r19", "r20", \ + "r21", "r22", "r23", "r24", "r25", "r26", "r27", "r28", "r29", \ + "r31", \ + "fr14", "fr15", "fr16", "fr17", "fr18", "fr19", "fr20", "fr21", \ + "fr22", "fr23", "fr24", "fr25", "fr26", "fr27", "fr28", "fr29", \ + "fr30", "fr31", \ + ALTIVEC_REGS \ + "cr2", "cr3", "cr4" + +static int +slp_switch(void) +{ + register int err; + register long *stackref, stsizediff; + void * toc; + void * r30; + __asm__ volatile ("" : : : REGS_TO_SAVE); + __asm__ volatile ("std 2, %0" : "=m" (toc)); + __asm__ volatile ("std 30, %0" : "=m" (r30)); + __asm__ ("mr %0, 1" : "=r" (stackref) : ); + { + SLP_SAVE_STATE(stackref, stsizediff); + __asm__ volatile ( + "mr 11, %0\n" + "add 1, 1, 11\n" + : /* no outputs */ + : "r" (stsizediff) + : "11" + ); + SLP_RESTORE_STATE(); + } + __asm__ volatile ("ld 30, %0" : : "m" (r30)); + __asm__ volatile ("ld 2, %0" : : "m" (toc)); + __asm__ volatile ("" : : : REGS_TO_SAVE); + __asm__ volatile ("li %0, 0" : "=r" (err)); + return err; +} + +#endif diff --git a/mysql-sqlalchemy-workspace/lib/python3.9/site-packages/greenlet/platform/switch_ppc_aix.h b/mysql-sqlalchemy-workspace/lib/python3.9/site-packages/greenlet/platform/switch_ppc_aix.h new file mode 100644 index 00000000..c7d476f6 --- /dev/null +++ b/mysql-sqlalchemy-workspace/lib/python3.9/site-packages/greenlet/platform/switch_ppc_aix.h @@ -0,0 +1,87 @@ +/* + * this is the internal transfer function. + * + * HISTORY + * 07-Mar-11 Floris Bruynooghe + * Do not add stsizediff to general purpose + * register (GPR) 30 as this is a non-volatile and + * unused by the PowerOpen Environment, therefore + * this was modifying a user register instead of the + * frame pointer (which does not seem to exist). + * 07-Sep-05 (py-dev mailing list discussion) + * removed 'r31' from the register-saved. !!!! WARNING !!!! + * It means that this file can no longer be compiled statically! + * It is now only suitable as part of a dynamic library! + * 14-Jan-04 Bob Ippolito + * added cr2-cr4 to the registers to be saved. + * Open questions: Should we save FP registers? + * What about vector registers? + * Differences between darwin and unix? + * 24-Nov-02 Christian Tismer + * needed to add another magic constant to insure + * that f in slp_eval_frame(PyFrameObject *f) + * STACK_REFPLUS will probably be 1 in most cases. + * gets included into the saved stack area. + * 04-Oct-02 Gustavo Niemeyer + * Ported from MacOS version. + * 17-Sep-02 Christian Tismer + * after virtualizing stack save/restore, the + * stack size shrunk a bit. Needed to introduce + * an adjustment STACK_MAGIC per platform. + * 15-Sep-02 Gerd Woetzel + * slightly changed framework for sparc + * 29-Jun-02 Christian Tismer + * Added register 13-29, 31 saves. The same way as + * Armin Rigo did for the x86_unix version. + * This seems to be now fully functional! + * 04-Mar-02 Hye-Shik Chang + * Ported from i386. + */ + +#define STACK_REFPLUS 1 + +#ifdef SLP_EVAL + +#define STACK_MAGIC 3 + +/* !!!!WARNING!!!! need to add "r31" in the next line if this header file + * is meant to be compiled non-dynamically! + */ +#define REGS_TO_SAVE "r13", "r14", "r15", "r16", "r17", "r18", "r19", "r20", \ + "r21", "r22", "r23", "r24", "r25", "r26", "r27", "r28", "r29", \ + "cr2", "cr3", "cr4" +static int +slp_switch(void) +{ + register int err; + register int *stackref, stsizediff; + __asm__ volatile ("" : : : REGS_TO_SAVE); + __asm__ ("mr %0, 1" : "=r" (stackref) : ); + { + SLP_SAVE_STATE(stackref, stsizediff); + __asm__ volatile ( + "mr 11, %0\n" + "add 1, 1, 11\n" + : /* no outputs */ + : "r" (stsizediff) + : "11" + ); + SLP_RESTORE_STATE(); + } + __asm__ volatile ("" : : : REGS_TO_SAVE); + __asm__ volatile ("li %0, 0" : "=r" (err)); + return err; +} + +#endif + +/* + * further self-processing support + */ + +/* + * if you want to add self-inspection tools, place them + * here. See the x86_msvc for the necessary defines. + * These features are highly experimental und not + * essential yet. + */ diff --git a/mysql-sqlalchemy-workspace/lib/python3.9/site-packages/greenlet/platform/switch_ppc_linux.h b/mysql-sqlalchemy-workspace/lib/python3.9/site-packages/greenlet/platform/switch_ppc_linux.h new file mode 100644 index 00000000..0a712554 --- /dev/null +++ b/mysql-sqlalchemy-workspace/lib/python3.9/site-packages/greenlet/platform/switch_ppc_linux.h @@ -0,0 +1,84 @@ +/* + * this is the internal transfer function. + * + * HISTORY + * 07-Sep-05 (py-dev mailing list discussion) + * removed 'r31' from the register-saved. !!!! WARNING !!!! + * It means that this file can no longer be compiled statically! + * It is now only suitable as part of a dynamic library! + * 14-Jan-04 Bob Ippolito + * added cr2-cr4 to the registers to be saved. + * Open questions: Should we save FP registers? + * What about vector registers? + * Differences between darwin and unix? + * 24-Nov-02 Christian Tismer + * needed to add another magic constant to insure + * that f in slp_eval_frame(PyFrameObject *f) + * STACK_REFPLUS will probably be 1 in most cases. + * gets included into the saved stack area. + * 04-Oct-02 Gustavo Niemeyer + * Ported from MacOS version. + * 17-Sep-02 Christian Tismer + * after virtualizing stack save/restore, the + * stack size shrunk a bit. Needed to introduce + * an adjustment STACK_MAGIC per platform. + * 15-Sep-02 Gerd Woetzel + * slightly changed framework for sparc + * 29-Jun-02 Christian Tismer + * Added register 13-29, 31 saves. The same way as + * Armin Rigo did for the x86_unix version. + * This seems to be now fully functional! + * 04-Mar-02 Hye-Shik Chang + * Ported from i386. + * 31-Jul-12 Trevor Bowen + * Changed memory constraints to register only. + */ + +#define STACK_REFPLUS 1 + +#ifdef SLP_EVAL + +#define STACK_MAGIC 3 + +/* !!!!WARNING!!!! need to add "r31" in the next line if this header file + * is meant to be compiled non-dynamically! + */ +#define REGS_TO_SAVE "r13", "r14", "r15", "r16", "r17", "r18", "r19", "r20", \ + "r21", "r22", "r23", "r24", "r25", "r26", "r27", "r28", "r29", \ + "cr2", "cr3", "cr4" +static int +slp_switch(void) +{ + register int err; + register int *stackref, stsizediff; + __asm__ volatile ("" : : : REGS_TO_SAVE); + __asm__ ("mr %0, 1" : "=r" (stackref) : ); + { + SLP_SAVE_STATE(stackref, stsizediff); + __asm__ volatile ( + "mr 11, %0\n" + "add 1, 1, 11\n" + "add 30, 30, 11\n" + : /* no outputs */ + : "r" (stsizediff) + : "11" + ); + SLP_RESTORE_STATE(); + } + __asm__ volatile ("" : : : REGS_TO_SAVE); + __asm__ volatile ("li %0, 0" : "=r" (err)); + return err; +} + +#endif + +/* + * further self-processing support + */ + +/* + * if you want to add self-inspection tools, place them + * here. See the x86_msvc for the necessary defines. + * These features are highly experimental und not + * essential yet. + */ diff --git a/mysql-sqlalchemy-workspace/lib/python3.9/site-packages/greenlet/platform/switch_ppc_macosx.h b/mysql-sqlalchemy-workspace/lib/python3.9/site-packages/greenlet/platform/switch_ppc_macosx.h new file mode 100644 index 00000000..56e573fe --- /dev/null +++ b/mysql-sqlalchemy-workspace/lib/python3.9/site-packages/greenlet/platform/switch_ppc_macosx.h @@ -0,0 +1,82 @@ +/* + * this is the internal transfer function. + * + * HISTORY + * 07-Sep-05 (py-dev mailing list discussion) + * removed 'r31' from the register-saved. !!!! WARNING !!!! + * It means that this file can no longer be compiled statically! + * It is now only suitable as part of a dynamic library! + * 14-Jan-04 Bob Ippolito + * added cr2-cr4 to the registers to be saved. + * Open questions: Should we save FP registers? + * What about vector registers? + * Differences between darwin and unix? + * 24-Nov-02 Christian Tismer + * needed to add another magic constant to insure + * that f in slp_eval_frame(PyFrameObject *f) + * STACK_REFPLUS will probably be 1 in most cases. + * gets included into the saved stack area. + * 17-Sep-02 Christian Tismer + * after virtualizing stack save/restore, the + * stack size shrunk a bit. Needed to introduce + * an adjustment STACK_MAGIC per platform. + * 15-Sep-02 Gerd Woetzel + * slightly changed framework for sparc + * 29-Jun-02 Christian Tismer + * Added register 13-29, 31 saves. The same way as + * Armin Rigo did for the x86_unix version. + * This seems to be now fully functional! + * 04-Mar-02 Hye-Shik Chang + * Ported from i386. + */ + +#define STACK_REFPLUS 1 + +#ifdef SLP_EVAL + +#define STACK_MAGIC 3 + +/* !!!!WARNING!!!! need to add "r31" in the next line if this header file + * is meant to be compiled non-dynamically! + */ +#define REGS_TO_SAVE "r13", "r14", "r15", "r16", "r17", "r18", "r19", "r20", \ + "r21", "r22", "r23", "r24", "r25", "r26", "r27", "r28", "r29", \ + "cr2", "cr3", "cr4" + +static int +slp_switch(void) +{ + register int err; + register int *stackref, stsizediff; + __asm__ volatile ("" : : : REGS_TO_SAVE); + __asm__ ("; asm block 2\n\tmr %0, r1" : "=g" (stackref) : ); + { + SLP_SAVE_STATE(stackref, stsizediff); + __asm__ volatile ( + "; asm block 3\n" + "\tmr r11, %0\n" + "\tadd r1, r1, r11\n" + "\tadd r30, r30, r11\n" + : /* no outputs */ + : "g" (stsizediff) + : "r11" + ); + SLP_RESTORE_STATE(); + } + __asm__ volatile ("" : : : REGS_TO_SAVE); + __asm__ volatile ("li %0, 0" : "=r" (err)); + return err; +} + +#endif + +/* + * further self-processing support + */ + +/* + * if you want to add self-inspection tools, place them + * here. See the x86_msvc for the necessary defines. + * These features are highly experimental und not + * essential yet. + */ diff --git a/mysql-sqlalchemy-workspace/lib/python3.9/site-packages/greenlet/platform/switch_ppc_unix.h b/mysql-sqlalchemy-workspace/lib/python3.9/site-packages/greenlet/platform/switch_ppc_unix.h new file mode 100644 index 00000000..2b3d307a --- /dev/null +++ b/mysql-sqlalchemy-workspace/lib/python3.9/site-packages/greenlet/platform/switch_ppc_unix.h @@ -0,0 +1,82 @@ +/* + * this is the internal transfer function. + * + * HISTORY + * 07-Sep-05 (py-dev mailing list discussion) + * removed 'r31' from the register-saved. !!!! WARNING !!!! + * It means that this file can no longer be compiled statically! + * It is now only suitable as part of a dynamic library! + * 14-Jan-04 Bob Ippolito + * added cr2-cr4 to the registers to be saved. + * Open questions: Should we save FP registers? + * What about vector registers? + * Differences between darwin and unix? + * 24-Nov-02 Christian Tismer + * needed to add another magic constant to insure + * that f in slp_eval_frame(PyFrameObject *f) + * STACK_REFPLUS will probably be 1 in most cases. + * gets included into the saved stack area. + * 04-Oct-02 Gustavo Niemeyer + * Ported from MacOS version. + * 17-Sep-02 Christian Tismer + * after virtualizing stack save/restore, the + * stack size shrunk a bit. Needed to introduce + * an adjustment STACK_MAGIC per platform. + * 15-Sep-02 Gerd Woetzel + * slightly changed framework for sparc + * 29-Jun-02 Christian Tismer + * Added register 13-29, 31 saves. The same way as + * Armin Rigo did for the x86_unix version. + * This seems to be now fully functional! + * 04-Mar-02 Hye-Shik Chang + * Ported from i386. + */ + +#define STACK_REFPLUS 1 + +#ifdef SLP_EVAL + +#define STACK_MAGIC 3 + +/* !!!!WARNING!!!! need to add "r31" in the next line if this header file + * is meant to be compiled non-dynamically! + */ +#define REGS_TO_SAVE "r13", "r14", "r15", "r16", "r17", "r18", "r19", "r20", \ + "r21", "r22", "r23", "r24", "r25", "r26", "r27", "r28", "r29", \ + "cr2", "cr3", "cr4" +static int +slp_switch(void) +{ + register int err; + register int *stackref, stsizediff; + __asm__ volatile ("" : : : REGS_TO_SAVE); + __asm__ ("mr %0, 1" : "=g" (stackref) : ); + { + SLP_SAVE_STATE(stackref, stsizediff); + __asm__ volatile ( + "mr 11, %0\n" + "add 1, 1, 11\n" + "add 30, 30, 11\n" + : /* no outputs */ + : "g" (stsizediff) + : "11" + ); + SLP_RESTORE_STATE(); + } + __asm__ volatile ("" : : : REGS_TO_SAVE); + __asm__ volatile ("li %0, 0" : "=r" (err)); + return err; +} + +#endif + +/* + * further self-processing support + */ + +/* + * if you want to add self-inspection tools, place them + * here. See the x86_msvc for the necessary defines. + * These features are highly experimental und not + * essential yet. + */ diff --git a/mysql-sqlalchemy-workspace/lib/python3.9/site-packages/greenlet/platform/switch_riscv_unix.h b/mysql-sqlalchemy-workspace/lib/python3.9/site-packages/greenlet/platform/switch_riscv_unix.h new file mode 100644 index 00000000..24df9dbb --- /dev/null +++ b/mysql-sqlalchemy-workspace/lib/python3.9/site-packages/greenlet/platform/switch_riscv_unix.h @@ -0,0 +1,32 @@ +#define STACK_REFPLUS 1 + +#ifdef SLP_EVAL +#define STACK_MAGIC 0 + +#define REGS_TO_SAVE "s0", "s1", "s2", "s3", "s4", "s5", \ + "s6", "s7", "s8", "s9", "s10", "s11", "fs0", "fs1", \ + "fs2", "fs3", "fs4", "fs5", "fs6", "fs7", "fs8", "fs9", \ + "fs10", "fs11" + +static int +slp_switch(void) +{ + int ret; + long *stackref, stsizediff; + __asm__ volatile ("" : : : REGS_TO_SAVE); + __asm__ volatile ("mv %0, sp" : "=r" (stackref) : ); + { + SLP_SAVE_STATE(stackref, stsizediff); + __asm__ volatile ( + "add sp, sp, %0\n\t" + : /* no outputs */ + : "r" (stsizediff) + ); + SLP_RESTORE_STATE(); + } + __asm__ volatile ("" : : : REGS_TO_SAVE); + __asm__ volatile ("mv %0, zero" : "=r" (ret) : ); + return ret; +} + +#endif diff --git a/mysql-sqlalchemy-workspace/lib/python3.9/site-packages/greenlet/platform/switch_s390_unix.h b/mysql-sqlalchemy-workspace/lib/python3.9/site-packages/greenlet/platform/switch_s390_unix.h new file mode 100644 index 00000000..6641854e --- /dev/null +++ b/mysql-sqlalchemy-workspace/lib/python3.9/site-packages/greenlet/platform/switch_s390_unix.h @@ -0,0 +1,87 @@ +/* + * this is the internal transfer function. + * + * HISTORY + * 25-Jan-12 Alexey Borzenkov + * Fixed Linux/S390 port to work correctly with + * different optimization options both on 31-bit + * and 64-bit. Thanks to Stefan Raabe for lots + * of testing. + * 24-Nov-02 Christian Tismer + * needed to add another magic constant to insure + * that f in slp_eval_frame(PyFrameObject *f) + * STACK_REFPLUS will probably be 1 in most cases. + * gets included into the saved stack area. + * 06-Oct-02 Gustavo Niemeyer + * Ported to Linux/S390. + */ + +#define STACK_REFPLUS 1 + +#ifdef SLP_EVAL + +#ifdef __s390x__ +#define STACK_MAGIC 20 /* 20 * 8 = 160 bytes of function call area */ +#else +#define STACK_MAGIC 24 /* 24 * 4 = 96 bytes of function call area */ +#endif + +/* Technically, r11-r13 also need saving, but function prolog starts + with stm(g) and since there are so many saved registers already + it won't be optimized, resulting in all r6-r15 being saved */ +#define REGS_TO_SAVE "r6", "r7", "r8", "r9", "r10", "r14", \ + "f0", "f1", "f2", "f3", "f4", "f5", "f6", "f7", \ + "f8", "f9", "f10", "f11", "f12", "f13", "f14", "f15" + +static int +slp_switch(void) +{ + register int ret; + register long *stackref, stsizediff; + __asm__ volatile ("" : : : REGS_TO_SAVE); +#ifdef __s390x__ + __asm__ volatile ("lgr %0, 15" : "=r" (stackref) : ); +#else + __asm__ volatile ("lr %0, 15" : "=r" (stackref) : ); +#endif + { + SLP_SAVE_STATE(stackref, stsizediff); +/* N.B. + r11 may be used as the frame pointer, and in that case it cannot be + clobbered and needs offsetting just like the stack pointer (but in cases + where frame pointer isn't used we might clobber it accidentally). What's + scary is that r11 is 2nd (and even 1st when GOT is used) callee saved + register that gcc would chose for surviving function calls. However, + since r6-r10 are clobbered above, their cost for reuse is reduced, so + gcc IRA will chose them over r11 (not seeing r11 is implicitly saved), + making it relatively safe to offset in all cases. :) */ + __asm__ volatile ( +#ifdef __s390x__ + "agr 15, %0\n\t" + "agr 11, %0" +#else + "ar 15, %0\n\t" + "ar 11, %0" +#endif + : /* no outputs */ + : "r" (stsizediff) + ); + SLP_RESTORE_STATE(); + } + __asm__ volatile ("" : : : REGS_TO_SAVE); + __asm__ volatile ("lhi %0, 0" : "=r" (ret) : ); + return ret; +} + +#endif + +/* + * further self-processing support + */ + +/* + * if you want to add self-inspection tools, place them + * here. See the x86_msvc for the necessary defines. + * These features are highly experimental und not + * essential yet. + */ diff --git a/mysql-sqlalchemy-workspace/lib/python3.9/site-packages/greenlet/platform/switch_sparc_sun_gcc.h b/mysql-sqlalchemy-workspace/lib/python3.9/site-packages/greenlet/platform/switch_sparc_sun_gcc.h new file mode 100644 index 00000000..652b57fd --- /dev/null +++ b/mysql-sqlalchemy-workspace/lib/python3.9/site-packages/greenlet/platform/switch_sparc_sun_gcc.h @@ -0,0 +1,92 @@ +/* + * this is the internal transfer function. + * + * HISTORY + * 16-May-15 Alexey Borzenkov + * Move stack spilling code inside save/restore functions + * 30-Aug-13 Floris Bruynooghe + Clean the register windows again before returning. + This does not clobber the PIC register as it leaves + the current window intact and is required for multi- + threaded code to work correctly. + * 08-Mar-11 Floris Bruynooghe + * No need to set return value register explicitly + * before the stack and framepointer are adjusted + * as none of the other registers are influenced by + * this. Also don't needlessly clean the windows + * ('ta %0" :: "i" (ST_CLEAN_WINDOWS)') as that + * clobbers the gcc PIC register (%l7). + * 24-Nov-02 Christian Tismer + * needed to add another magic constant to insure + * that f in slp_eval_frame(PyFrameObject *f) + * STACK_REFPLUS will probably be 1 in most cases. + * gets included into the saved stack area. + * 17-Sep-02 Christian Tismer + * after virtualizing stack save/restore, the + * stack size shrunk a bit. Needed to introduce + * an adjustment STACK_MAGIC per platform. + * 15-Sep-02 Gerd Woetzel + * added support for SunOS sparc with gcc + */ + +#define STACK_REFPLUS 1 + +#ifdef SLP_EVAL + + +#define STACK_MAGIC 0 + + +#if defined(__sparcv9) +#define SLP_FLUSHW __asm__ volatile ("flushw") +#else +#define SLP_FLUSHW __asm__ volatile ("ta 3") /* ST_FLUSH_WINDOWS */ +#endif + +/* On sparc we need to spill register windows inside save/restore functions */ +#define SLP_BEFORE_SAVE_STATE() SLP_FLUSHW +#define SLP_BEFORE_RESTORE_STATE() SLP_FLUSHW + + +static int +slp_switch(void) +{ + register int err; + register int *stackref, stsizediff; + + /* Put current stack pointer into stackref. + * Register spilling is done in save/restore. + */ + __asm__ volatile ("mov %%sp, %0" : "=r" (stackref)); + + { + /* Thou shalt put SLP_SAVE_STATE into a local block */ + /* Copy the current stack onto the heap */ + SLP_SAVE_STATE(stackref, stsizediff); + + /* Increment stack and frame pointer by stsizediff */ + __asm__ volatile ( + "add %0, %%sp, %%sp\n\t" + "add %0, %%fp, %%fp" + : : "r" (stsizediff)); + + /* Copy new stack from it's save store on the heap */ + SLP_RESTORE_STATE(); + + __asm__ volatile ("mov %1, %0" : "=r" (err) : "i" (0)); + return err; + } +} + +#endif + +/* + * further self-processing support + */ + +/* + * if you want to add self-inspection tools, place them + * here. See the x86_msvc for the necessary defines. + * These features are highly experimental und not + * essential yet. + */ diff --git a/mysql-sqlalchemy-workspace/lib/python3.9/site-packages/greenlet/platform/switch_x32_unix.h b/mysql-sqlalchemy-workspace/lib/python3.9/site-packages/greenlet/platform/switch_x32_unix.h new file mode 100644 index 00000000..cb14ec1c --- /dev/null +++ b/mysql-sqlalchemy-workspace/lib/python3.9/site-packages/greenlet/platform/switch_x32_unix.h @@ -0,0 +1,63 @@ +/* + * this is the internal transfer function. + * + * HISTORY + * 17-Aug-12 Fantix King + * Ported from amd64. + */ + +#define STACK_REFPLUS 1 + +#ifdef SLP_EVAL + +#define STACK_MAGIC 0 + +#define REGS_TO_SAVE "r12", "r13", "r14", "r15" + + +static int +slp_switch(void) +{ + void* ebp; + void* ebx; + unsigned int csr; + unsigned short cw; + register int err; + register int *stackref, stsizediff; + __asm__ volatile ("" : : : REGS_TO_SAVE); + __asm__ volatile ("fstcw %0" : "=m" (cw)); + __asm__ volatile ("stmxcsr %0" : "=m" (csr)); + __asm__ volatile ("movl %%ebp, %0" : "=m" (ebp)); + __asm__ volatile ("movl %%ebx, %0" : "=m" (ebx)); + __asm__ ("movl %%esp, %0" : "=g" (stackref)); + { + SLP_SAVE_STATE(stackref, stsizediff); + __asm__ volatile ( + "addl %0, %%esp\n" + "addl %0, %%ebp\n" + : + : "r" (stsizediff) + ); + SLP_RESTORE_STATE(); + } + __asm__ volatile ("movl %0, %%ebx" : : "m" (ebx)); + __asm__ volatile ("movl %0, %%ebp" : : "m" (ebp)); + __asm__ volatile ("ldmxcsr %0" : : "m" (csr)); + __asm__ volatile ("fldcw %0" : : "m" (cw)); + __asm__ volatile ("" : : : REGS_TO_SAVE); + __asm__ volatile ("xorl %%eax, %%eax" : "=a" (err)); + return err; +} + +#endif + +/* + * further self-processing support + */ + +/* + * if you want to add self-inspection tools, place them + * here. See the x86_msvc for the necessary defines. + * These features are highly experimental und not + * essential yet. + */ diff --git a/mysql-sqlalchemy-workspace/lib/python3.9/site-packages/greenlet/platform/switch_x64_masm.asm b/mysql-sqlalchemy-workspace/lib/python3.9/site-packages/greenlet/platform/switch_x64_masm.asm new file mode 100644 index 00000000..f5c72a27 --- /dev/null +++ b/mysql-sqlalchemy-workspace/lib/python3.9/site-packages/greenlet/platform/switch_x64_masm.asm @@ -0,0 +1,111 @@ +; +; stack switching code for MASM on x641 +; Kristjan Valur Jonsson, sept 2005 +; + + +;prototypes for our calls +slp_save_state_asm PROTO +slp_restore_state_asm PROTO + + +pushxmm MACRO reg + sub rsp, 16 + .allocstack 16 + movaps [rsp], reg ; faster than movups, but we must be aligned + ; .savexmm128 reg, offset (don't know what offset is, no documentation) +ENDM +popxmm MACRO reg + movaps reg, [rsp] ; faster than movups, but we must be aligned + add rsp, 16 +ENDM + +pushreg MACRO reg + push reg + .pushreg reg +ENDM +popreg MACRO reg + pop reg +ENDM + + +.code +slp_switch PROC FRAME + ;realign stack to 16 bytes after return address push, makes the following faster + sub rsp,8 + .allocstack 8 + + pushxmm xmm15 + pushxmm xmm14 + pushxmm xmm13 + pushxmm xmm12 + pushxmm xmm11 + pushxmm xmm10 + pushxmm xmm9 + pushxmm xmm8 + pushxmm xmm7 + pushxmm xmm6 + + pushreg r15 + pushreg r14 + pushreg r13 + pushreg r12 + + pushreg rbp + pushreg rbx + pushreg rdi + pushreg rsi + + sub rsp, 10h ;allocate the singlefunction argument (must be multiple of 16) + .allocstack 10h +.endprolog + + lea rcx, [rsp+10h] ;load stack base that we are saving + call slp_save_state_asm ;pass stackpointer, return offset in eax + cmp rax, 1 + je EXIT1 + cmp rax, -1 + je EXIT2 + ;actual stack switch: + add rsp, rax + call slp_restore_state_asm + xor rax, rax ;return 0 + +EXIT: + + add rsp, 10h + popreg rsi + popreg rdi + popreg rbx + popreg rbp + + popreg r12 + popreg r13 + popreg r14 + popreg r15 + + popxmm xmm6 + popxmm xmm7 + popxmm xmm8 + popxmm xmm9 + popxmm xmm10 + popxmm xmm11 + popxmm xmm12 + popxmm xmm13 + popxmm xmm14 + popxmm xmm15 + + add rsp, 8 + ret + +EXIT1: + mov rax, 1 + jmp EXIT + +EXIT2: + sar rax, 1 + jmp EXIT + +slp_switch ENDP + +END \ No newline at end of file diff --git a/mysql-sqlalchemy-workspace/lib/python3.9/site-packages/greenlet/platform/switch_x64_masm.obj b/mysql-sqlalchemy-workspace/lib/python3.9/site-packages/greenlet/platform/switch_x64_masm.obj new file mode 100644 index 00000000..64e3e6b8 Binary files /dev/null and b/mysql-sqlalchemy-workspace/lib/python3.9/site-packages/greenlet/platform/switch_x64_masm.obj differ diff --git a/mysql-sqlalchemy-workspace/lib/python3.9/site-packages/greenlet/platform/switch_x64_msvc.h b/mysql-sqlalchemy-workspace/lib/python3.9/site-packages/greenlet/platform/switch_x64_msvc.h new file mode 100644 index 00000000..601ea560 --- /dev/null +++ b/mysql-sqlalchemy-workspace/lib/python3.9/site-packages/greenlet/platform/switch_x64_msvc.h @@ -0,0 +1,60 @@ +/* + * this is the internal transfer function. + * + * HISTORY + * 24-Nov-02 Christian Tismer + * needed to add another magic constant to insure + * that f in slp_eval_frame(PyFrameObject *f) + * STACK_REFPLUS will probably be 1 in most cases. + * gets included into the saved stack area. + * 26-Sep-02 Christian Tismer + * again as a result of virtualized stack access, + * the compiler used less registers. Needed to + * explicit mention registers in order to get them saved. + * Thanks to Jeff Senn for pointing this out and help. + * 17-Sep-02 Christian Tismer + * after virtualizing stack save/restore, the + * stack size shrunk a bit. Needed to introduce + * an adjustment STACK_MAGIC per platform. + * 15-Sep-02 Gerd Woetzel + * slightly changed framework for sparc + * 01-Mar-02 Christian Tismer + * Initial final version after lots of iterations for i386. + */ + +/* Avoid alloca redefined warning on mingw64 */ +#ifndef alloca +#define alloca _alloca +#endif + +#define STACK_REFPLUS 1 +#define STACK_MAGIC 0 + +/* Use the generic support for an external assembly language slp_switch function. */ +#define EXTERNAL_ASM + +#ifdef SLP_EVAL +/* This always uses the external masm assembly file. */ +#endif + +/* + * further self-processing support + */ + +/* we have IsBadReadPtr available, so we can peek at objects */ +/* +#define STACKLESS_SPY + +#ifdef IMPLEMENT_STACKLESSMODULE +#include "Windows.h" +#define CANNOT_READ_MEM(p, bytes) IsBadReadPtr(p, bytes) + +static int IS_ON_STACK(void*p) +{ + int stackref; + intptr_t stackbase = ((intptr_t)&stackref) & 0xfffff000; + return (intptr_t)p >= stackbase && (intptr_t)p < stackbase + 0x00100000; +} + +#endif +*/ \ No newline at end of file diff --git a/mysql-sqlalchemy-workspace/lib/python3.9/site-packages/greenlet/platform/switch_x86_msvc.h b/mysql-sqlalchemy-workspace/lib/python3.9/site-packages/greenlet/platform/switch_x86_msvc.h new file mode 100644 index 00000000..0f3a59f5 --- /dev/null +++ b/mysql-sqlalchemy-workspace/lib/python3.9/site-packages/greenlet/platform/switch_x86_msvc.h @@ -0,0 +1,326 @@ +/* + * this is the internal transfer function. + * + * HISTORY + * 24-Nov-02 Christian Tismer + * needed to add another magic constant to insure + * that f in slp_eval_frame(PyFrameObject *f) + * STACK_REFPLUS will probably be 1 in most cases. + * gets included into the saved stack area. + * 26-Sep-02 Christian Tismer + * again as a result of virtualized stack access, + * the compiler used less registers. Needed to + * explicit mention registers in order to get them saved. + * Thanks to Jeff Senn for pointing this out and help. + * 17-Sep-02 Christian Tismer + * after virtualizing stack save/restore, the + * stack size shrunk a bit. Needed to introduce + * an adjustment STACK_MAGIC per platform. + * 15-Sep-02 Gerd Woetzel + * slightly changed framework for sparc + * 01-Mar-02 Christian Tismer + * Initial final version after lots of iterations for i386. + */ + +#define alloca _alloca + +#define STACK_REFPLUS 1 + +#ifdef SLP_EVAL + +#define STACK_MAGIC 0 + +/* Some magic to quell warnings and keep slp_switch() from crashing when built + with VC90. Disable global optimizations, and the warning: frame pointer + register 'ebp' modified by inline assembly code. + + We used to just disable global optimizations ("g") but upstream stackless + Python, as well as stackman, turn off all optimizations. + +References: +https://github.com/stackless-dev/stackman/blob/dbc72fe5207a2055e658c819fdeab9731dee78b9/stackman/platforms/switch_x86_msvc.h +https://github.com/stackless-dev/stackless/blob/main-slp/Stackless/platf/switch_x86_msvc.h +*/ +#define WIN32_LEAN_AND_MEAN +#include + +#pragma optimize("", off) /* so that autos are stored on the stack */ +#pragma warning(disable:4731) +#pragma warning(disable:4733) /* disable warning about modifying FS[0] */ + +/** + * Most modern compilers and environments handle C++ exceptions without any + * special help from us. MSVC on 32-bit windows is an exception. There, C++ + * exceptions are dealt with using Windows' Structured Exception Handling + * (SEH). + * + * SEH is implemented as a singly linked list of nodes. The + * head of this list is stored in the Thread Information Block, which itself + * is pointed to from the FS register. It's the first field in the structure, + * or offset 0, so we can access it using assembly FS:[0], or the compiler + * intrinsics and field offset information from the headers (as we do below). + * Somewhat unusually, the tail of the list doesn't have prev == NULL, it has + * prev == 0xFFFFFFFF. + * + * SEH was designed for C, and traditionally uses the MSVC compiler + * intrinsincs __try{}/__except{}. It is also utilized for C++ exceptions by + * MSVC; there, every throw of a C++ exception raises a SEH error with the + * ExceptionCode 0xE06D7363; the SEH handler list is then traversed to + * deal with the exception. + * + * If the SEH list is corrupt, then when a C++ exception is thrown the program + * will abruptly exit with exit code 1. This does not use std::terminate(), so + * std::set_terminate() is useless to debug this. + * + * The SEH list is closely tied to the call stack; entering a function that + * uses __try{} or most C++ functions will push a new handler onto the front + * of the list. Returning from the function will remove the handler. Saving + * and restoring the head node of the SEH list (FS:[0]) per-greenlet is NOT + * ENOUGH to make SEH or exceptions work. + * + * Stack switching breaks SEH because the call stack no longer necessarily + * matches the SEH list. For example, given greenlet A that switches to + * greenlet B, at the moment of entering greenlet B, we will have any SEH + * handlers from greenlet A on the SEH list; greenlet B can then add its own + * handlers to the SEH list. When greenlet B switches back to greenlet A, + * greenlet B's handlers would still be on the SEH stack, but when switch() + * returns control to greenlet A, we have replaced the contents of the stack + * in memory, so all the address that greenlet B added to the SEH list are now + * invalid: part of the call stack has been unwound, but the SEH list was out + * of sync with the call stack. The net effect is that exception handling + * stops working. + * + * Thus, when switching greenlets, we need to be sure that the SEH list + * matches the effective call stack, "cutting out" any handlers that were + * pushed by the greenlet that switched out and which are no longer valid. + * + * The easiest way to do this is to capture the SEH list at the time the main + * greenlet for a thread is created, and, when initially starting a greenlet, + * start a new SEH list for it, which contains nothing but the handler + * established for the new greenlet itself, with the tail being the handlers + * for the main greenlet. If we then save and restore the SEH per-greenlet, + * they won't interfere with each others SEH lists. (No greenlet can unwind + * the call stack past the handlers established by the main greenlet). + * + * By observation, a new thread starts with three SEH handlers on the list. By + * the time we get around to creating the main greenlet, though, there can be + * many more, established by transient calls that lead to the creation of the + * main greenlet. Therefore, 3 is a magic constant telling us when to perform + * the initial slice. + * + * All of this can be debugged using a vectored exception handler, which + * operates independently of the SEH handler list, and is called first. + * Walking the SEH list at key points can also be helpful. + * + * References: + * https://en.wikipedia.org/wiki/Win32_Thread_Information_Block + * https://devblogs.microsoft.com/oldnewthing/20100730-00/?p=13273 + * https://docs.microsoft.com/en-us/cpp/cpp/try-except-statement?view=msvc-160 + * https://docs.microsoft.com/en-us/cpp/cpp/structured-exception-handling-c-cpp?view=msvc-160 + * https://docs.microsoft.com/en-us/windows/win32/debug/structured-exception-handling + * https://docs.microsoft.com/en-us/windows/win32/debug/using-a-vectored-exception-handler + * https://bytepointer.com/resources/pietrek_crash_course_depths_of_win32_seh.htm + */ +#define GREENLET_NEEDS_EXCEPTION_STATE_SAVED + + +typedef struct _GExceptionRegistration { + struct _GExceptionRegistration* prev; + void* handler_f; +} GExceptionRegistration; + +static void +slp_set_exception_state(const void *const seh_state) +{ + // Because the stack from from which we do this is ALSO a handler, and + // that one we want to keep, we need to relink the current SEH handler + // frame to point to this one, cutting out the middle men, as it were. + // + // Entering a try block doesn't change the SEH frame, but entering a + // function containing a try block does. + GExceptionRegistration* current_seh_state = (GExceptionRegistration*)__readfsdword(FIELD_OFFSET(NT_TIB, ExceptionList)); + current_seh_state->prev = (GExceptionRegistration*)seh_state; +} + + +static GExceptionRegistration* +x86_slp_get_third_oldest_handler() +{ + GExceptionRegistration* a = NULL; /* Closest to the top */ + GExceptionRegistration* b = NULL; /* second */ + GExceptionRegistration* c = NULL; + GExceptionRegistration* seh_state = (GExceptionRegistration*)__readfsdword(FIELD_OFFSET(NT_TIB, ExceptionList)); + a = b = c = seh_state; + + while (seh_state && seh_state != (GExceptionRegistration*)0xFFFFFFFF) { + if ((void*)seh_state->prev < (void*)100) { + fprintf(stderr, "\tERROR: Broken SEH chain.\n"); + return NULL; + } + a = b; + b = c; + c = seh_state; + + seh_state = seh_state->prev; + } + return a ? a : (b ? b : c); +} + + +static void* +slp_get_exception_state() +{ + // XXX: There appear to be three SEH handlers on the stack already at the + // start of the thread. Is that a guarantee? Almost certainly not. Yet in + // all observed cases it has been three. This is consistent with + // faulthandler off or on, and optimizations off or on. It may not be + // consistent with other operating system versions, though: we only have + // CI on one or two versions (don't ask what there are). + // In theory we could capture the number of handlers on the chain when + // PyInit__greenlet is called: there are probably only the default + // handlers at that point (unless we're embedded and people have used + // __try/__except or a C++ handler)? + return x86_slp_get_third_oldest_handler(); +} + +static int +slp_switch(void) +{ + /* MASM syntax is typically reversed from other assemblers. + It is usually + */ + int *stackref, stsizediff; + /* store the structured exception state for this stack */ + DWORD seh_state = __readfsdword(FIELD_OFFSET(NT_TIB, ExceptionList)); + __asm mov stackref, esp; + /* modify EBX, ESI and EDI in order to get them preserved */ + __asm mov ebx, ebx; + __asm xchg esi, edi; + { + SLP_SAVE_STATE(stackref, stsizediff); + __asm { + mov eax, stsizediff + add esp, eax + add ebp, eax + } + SLP_RESTORE_STATE(); + } + __writefsdword(FIELD_OFFSET(NT_TIB, ExceptionList), seh_state); + return 0; +} + +/* re-enable ebp warning and global optimizations. */ +#pragma optimize("", on) +#pragma warning(default:4731) +#pragma warning(default:4733) /* disable warning about modifying FS[0] */ + + +#endif + +/* + * further self-processing support + */ + +/* we have IsBadReadPtr available, so we can peek at objects */ +#define STACKLESS_SPY + +#ifdef GREENLET_DEBUG + +#define CANNOT_READ_MEM(p, bytes) IsBadReadPtr(p, bytes) + +static int IS_ON_STACK(void*p) +{ + int stackref; + int stackbase = ((int)&stackref) & 0xfffff000; + return (int)p >= stackbase && (int)p < stackbase + 0x00100000; +} + +static void +x86_slp_show_seh_chain() +{ + GExceptionRegistration* seh_state = (GExceptionRegistration*)__readfsdword(FIELD_OFFSET(NT_TIB, ExceptionList)); + fprintf(stderr, "====== SEH Chain ======\n"); + while (seh_state && seh_state != (GExceptionRegistration*)0xFFFFFFFF) { + fprintf(stderr, "\tSEH_chain addr: %p handler: %p prev: %p\n", + seh_state, + seh_state->handler_f, seh_state->prev); + if ((void*)seh_state->prev < (void*)100) { + fprintf(stderr, "\tERROR: Broken chain.\n"); + break; + } + seh_state = seh_state->prev; + } + fprintf(stderr, "====== End SEH Chain ======\n"); + fflush(NULL); + return; +} + +//addVectoredExceptionHandler constants: +//CALL_FIRST means call this exception handler first; +//CALL_LAST means call this exception handler last +#define CALL_FIRST 1 +#define CALL_LAST 0 + +LONG WINAPI +GreenletVectorHandler(PEXCEPTION_POINTERS ExceptionInfo) +{ + // We get one of these for every C++ exception, with code + // E06D7363 + // This is a special value that means "C++ exception from MSVC" + // https://devblogs.microsoft.com/oldnewthing/20100730-00/?p=13273 + // + // Install in the module init function with: + // AddVectoredExceptionHandler(CALL_FIRST, GreenletVectorHandler); + PEXCEPTION_RECORD ExceptionRecord = ExceptionInfo->ExceptionRecord; + + fprintf(stderr, + "GOT VECTORED EXCEPTION:\n" + "\tExceptionCode : %p\n" + "\tExceptionFlags : %p\n" + "\tExceptionAddr : %p\n" + "\tNumberparams : %ld\n", + ExceptionRecord->ExceptionCode, + ExceptionRecord->ExceptionFlags, + ExceptionRecord->ExceptionAddress, + ExceptionRecord->NumberParameters + ); + if (ExceptionRecord->ExceptionFlags & 1) { + fprintf(stderr, "\t\tEH_NONCONTINUABLE\n" ); + } + if (ExceptionRecord->ExceptionFlags & 2) { + fprintf(stderr, "\t\tEH_UNWINDING\n" ); + } + if (ExceptionRecord->ExceptionFlags & 4) { + fprintf(stderr, "\t\tEH_EXIT_UNWIND\n" ); + } + if (ExceptionRecord->ExceptionFlags & 8) { + fprintf(stderr, "\t\tEH_STACK_INVALID\n" ); + } + if (ExceptionRecord->ExceptionFlags & 0x10) { + fprintf(stderr, "\t\tEH_NESTED_CALL\n" ); + } + if (ExceptionRecord->ExceptionFlags & 0x20) { + fprintf(stderr, "\t\tEH_TARGET_UNWIND\n" ); + } + if (ExceptionRecord->ExceptionFlags & 0x40) { + fprintf(stderr, "\t\tEH_COLLIDED_UNWIND\n" ); + } + fprintf(stderr, "\n"); + fflush(NULL); + for(DWORD i = 0; i < ExceptionRecord->NumberParameters; i++) { + fprintf(stderr, "\t\t\tParam %ld: %lX\n", i, ExceptionRecord->ExceptionInformation[i]); + } + + if (ExceptionRecord->NumberParameters == 3) { + fprintf(stderr, "\tAbout to traverse SEH chain\n"); + // C++ Exception records have 3 params. + x86_slp_show_seh_chain(); + } + + return EXCEPTION_CONTINUE_SEARCH; +} + + + + +#endif diff --git a/mysql-sqlalchemy-workspace/lib/python3.9/site-packages/greenlet/platform/switch_x86_unix.h b/mysql-sqlalchemy-workspace/lib/python3.9/site-packages/greenlet/platform/switch_x86_unix.h new file mode 100644 index 00000000..3a951865 --- /dev/null +++ b/mysql-sqlalchemy-workspace/lib/python3.9/site-packages/greenlet/platform/switch_x86_unix.h @@ -0,0 +1,105 @@ +/* + * this is the internal transfer function. + * + * HISTORY + * 3-May-13 Ralf Schmitt + * Add support for strange GCC caller-save decisions + * (ported from switch_aarch64_gcc.h) + * 19-Aug-11 Alexey Borzenkov + * Correctly save ebp, ebx and cw + * 07-Sep-05 (py-dev mailing list discussion) + * removed 'ebx' from the register-saved. !!!! WARNING !!!! + * It means that this file can no longer be compiled statically! + * It is now only suitable as part of a dynamic library! + * 24-Nov-02 Christian Tismer + * needed to add another magic constant to insure + * that f in slp_eval_frame(PyFrameObject *f) + * STACK_REFPLUS will probably be 1 in most cases. + * gets included into the saved stack area. + * 17-Sep-02 Christian Tismer + * after virtualizing stack save/restore, the + * stack size shrunk a bit. Needed to introduce + * an adjustment STACK_MAGIC per platform. + * 15-Sep-02 Gerd Woetzel + * slightly changed framework for spark + * 31-Avr-02 Armin Rigo + * Added ebx, esi and edi register-saves. + * 01-Mar-02 Samual M. Rushing + * Ported from i386. + */ + +#define STACK_REFPLUS 1 + +#ifdef SLP_EVAL + +/* #define STACK_MAGIC 3 */ +/* the above works fine with gcc 2.96, but 2.95.3 wants this */ +#define STACK_MAGIC 0 + +#if __GNUC__ > 4 || (__GNUC__ == 4 && __GNUC_MINOR__ >= 5) +# define ATTR_NOCLONE __attribute__((noclone)) +#else +# define ATTR_NOCLONE +#endif + +static int +slp_switch(void) +{ + int err; +#ifdef _WIN32 + void *seh; +#endif + void *ebp, *ebx; + unsigned short cw; + register int *stackref, stsizediff; + __asm__ volatile ("" : : : "esi", "edi"); + __asm__ volatile ("fstcw %0" : "=m" (cw)); + __asm__ volatile ("movl %%ebp, %0" : "=m" (ebp)); + __asm__ volatile ("movl %%ebx, %0" : "=m" (ebx)); +#ifdef _WIN32 + __asm__ volatile ( + "movl %%fs:0x0, %%eax\n" + "movl %%eax, %0\n" + : "=m" (seh) + : + : "eax"); +#endif + __asm__ ("movl %%esp, %0" : "=g" (stackref)); + { + SLP_SAVE_STATE(stackref, stsizediff); + __asm__ volatile ( + "addl %0, %%esp\n" + "addl %0, %%ebp\n" + : + : "r" (stsizediff) + ); + SLP_RESTORE_STATE(); + __asm__ volatile ("xorl %%eax, %%eax" : "=a" (err)); + } +#ifdef _WIN32 + __asm__ volatile ( + "movl %0, %%eax\n" + "movl %%eax, %%fs:0x0\n" + : + : "m" (seh) + : "eax"); +#endif + __asm__ volatile ("movl %0, %%ebx" : : "m" (ebx)); + __asm__ volatile ("movl %0, %%ebp" : : "m" (ebp)); + __asm__ volatile ("fldcw %0" : : "m" (cw)); + __asm__ volatile ("" : : : "esi", "edi"); + return err; +} + +#endif + +/* + * further self-processing support + */ + +/* + * if you want to add self-inspection tools, place them + * here. See the x86_msvc for the necessary defines. + * These features are highly experimental und not + * essential yet. + */ diff --git a/mysql-sqlalchemy-workspace/lib/python3.9/site-packages/greenlet/slp_platformselect.h b/mysql-sqlalchemy-workspace/lib/python3.9/site-packages/greenlet/slp_platformselect.h new file mode 100644 index 00000000..b6a3e704 --- /dev/null +++ b/mysql-sqlalchemy-workspace/lib/python3.9/site-packages/greenlet/slp_platformselect.h @@ -0,0 +1,65 @@ +/* + * Platform Selection for Stackless Python + */ +#ifdef __cplusplus +extern "C" { +#endif +#if defined(MS_WIN32) && !defined(MS_WIN64) && defined(_M_IX86) && defined(_MSC_VER) +#include "platform/switch_x86_msvc.h" /* MS Visual Studio on X86 */ +#elif defined(MS_WIN64) && defined(_M_X64) && defined(_MSC_VER) || defined(__MINGW64__) +#include "platform/switch_x64_msvc.h" /* MS Visual Studio on X64 */ +#elif defined(MS_WIN64) && defined(_M_ARM64) +#include "platform/switch_arm64_msvc.h" /* MS Visual Studio on ARM64 */ +#elif defined(__GNUC__) && defined(__amd64__) && defined(__ILP32__) +#include "platform/switch_x32_unix.h" /* gcc on amd64 with x32 ABI */ +#elif defined(__GNUC__) && defined(__amd64__) +#include "platform/switch_amd64_unix.h" /* gcc on amd64 */ +#elif defined(__GNUC__) && defined(__i386__) +#include "platform/switch_x86_unix.h" /* gcc on X86 */ +#elif defined(__GNUC__) && defined(__powerpc64__) && (defined(__linux__) || defined(__FreeBSD__)) +#include "platform/switch_ppc64_linux.h" /* gcc on PowerPC 64-bit */ +#elif defined(__GNUC__) && defined(__PPC__) && (defined(__linux__) || defined(__FreeBSD__)) +#include "platform/switch_ppc_linux.h" /* gcc on PowerPC */ +#elif defined(__GNUC__) && defined(__ppc__) && defined(__APPLE__) +#include "platform/switch_ppc_macosx.h" /* Apple MacOS X on PowerPC */ +#elif defined(__GNUC__) && defined(__powerpc64__) && defined(_AIX) +#include "platform/switch_ppc64_aix.h" /* gcc on AIX/PowerPC 64-bit */ +#elif defined(__GNUC__) && defined(_ARCH_PPC) && defined(_AIX) +#include "platform/switch_ppc_aix.h" /* gcc on AIX/PowerPC */ +#elif defined(__GNUC__) && defined(sparc) +#include "platform/switch_sparc_sun_gcc.h" /* SunOS sparc with gcc */ +#elif defined(__SUNPRO_C) && defined(sparc) && defined(sun) +#include "platform/switch_sparc_sun_gcc.h" /* SunStudio on amd64 */ +#elif defined(__SUNPRO_C) && defined(__amd64__) && defined(sun) +#include "platform/switch_amd64_unix.h" /* SunStudio on amd64 */ +#elif defined(__SUNPRO_C) && defined(__i386__) && defined(sun) +#include "platform/switch_x86_unix.h" /* SunStudio on x86 */ +#elif defined(__GNUC__) && defined(__s390__) && defined(__linux__) +#include "platform/switch_s390_unix.h" /* Linux/S390 */ +#elif defined(__GNUC__) && defined(__s390x__) && defined(__linux__) +#include "platform/switch_s390_unix.h" /* Linux/S390 zSeries (64-bit) */ +#elif defined(__GNUC__) && defined(__arm__) +#ifdef __APPLE__ +#include +#endif +#if TARGET_OS_IPHONE +#include "platform/switch_arm32_ios.h" /* iPhone OS on arm32 */ +#else +#include "platform/switch_arm32_gcc.h" /* gcc using arm32 */ +#endif +#elif defined(__GNUC__) && defined(__mips__) && defined(__linux__) +#include "platform/switch_mips_unix.h" /* Linux/MIPS */ +#elif defined(__GNUC__) && defined(__aarch64__) +#include "platform/switch_aarch64_gcc.h" /* Aarch64 ABI */ +#elif defined(__GNUC__) && defined(__mc68000__) +#include "platform/switch_m68k_gcc.h" /* gcc on m68k */ +#elif defined(__GNUC__) && defined(__csky__) +#include "platform/switch_csky_gcc.h" /* gcc on csky */ +#elif defined(__GNUC__) && defined(__riscv) +#include "platform/switch_riscv_unix.h" /* gcc on RISC-V */ +#elif defined(__GNUC__) && defined(__alpha__) +#include "platform/switch_alpha_unix.h" /* gcc on DEC Alpha */ +#endif +#ifdef __cplusplus +}; +#endif diff --git a/mysql-sqlalchemy-workspace/lib/python3.9/site-packages/greenlet/tests/__init__.py b/mysql-sqlalchemy-workspace/lib/python3.9/site-packages/greenlet/tests/__init__.py new file mode 100644 index 00000000..7ff5afb9 --- /dev/null +++ b/mysql-sqlalchemy-workspace/lib/python3.9/site-packages/greenlet/tests/__init__.py @@ -0,0 +1,135 @@ +# -*- coding: utf-8 -*- +""" +Tests for greenlet. + +""" +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import unittest + +from gc import collect +from gc import get_objects +from threading import active_count as active_thread_count +from time import sleep +from time import time + +from greenlet import greenlet as RawGreenlet +from greenlet import getcurrent + +from greenlet._greenlet import get_pending_cleanup_count +from greenlet._greenlet import get_total_main_greenlets + +from . import leakcheck + + +class TestCaseMetaClass(type): + # wrap each test method with + # a) leak checks + def __new__(cls, classname, bases, classDict): + # pylint and pep8 fight over what this should be called (mcs or cls). + # pylint gets it right, but we can't scope disable pep8, so we go with + # its convention. + # pylint: disable=bad-mcs-classmethod-argument + check_totalrefcount = True + + # Python 3: must copy, we mutate the classDict. Interestingly enough, + # it doesn't actually error out, but under 3.6 we wind up wrapping + # and re-wrapping the same items over and over and over. + for key, value in list(classDict.items()): + if key.startswith('test') and callable(value): + classDict.pop(key) + if check_totalrefcount: + value = leakcheck.wrap_refcount(value) + classDict[key] = value + return type.__new__(cls, classname, bases, classDict) + + +class TestCase(TestCaseMetaClass( + "NewBase", + (unittest.TestCase,), + {})): + + cleanup_attempt_sleep_duration = 0.001 + cleanup_max_sleep_seconds = 1 + + def wait_for_pending_cleanups(self, + initial_active_threads=None, + initial_main_greenlets=None): + initial_active_threads = initial_active_threads or self.threads_before_test + initial_main_greenlets = initial_main_greenlets or self.main_greenlets_before_test + sleep_time = self.cleanup_attempt_sleep_duration + # NOTE: This is racy! A Python-level thread object may be dead + # and gone, but the C thread may not yet have fired its + # destructors and added to the queue. There's no particular + # way to know that's about to happen. We try to watch the + # Python threads to make sure they, at least, have gone away. + # Counting the main greenlets, which we can easily do deterministically, + # also helps. + + # Always sleep at least once to let other threads run + sleep(sleep_time) + quit_after = time() + self.cleanup_max_sleep_seconds + # TODO: We could add an API that calls us back when a particular main greenlet is deleted? + # It would have to drop the GIL + while ( + get_pending_cleanup_count() + or active_thread_count() > initial_active_threads + or (not self.expect_greenlet_leak + and get_total_main_greenlets() > initial_main_greenlets)): + sleep(sleep_time) + if time() > quit_after: + print("Time limit exceeded.") + print("Threads: Waiting for only", initial_active_threads, + "-->", active_thread_count()) + print("MGlets : Waiting for only", initial_main_greenlets, + "-->", get_total_main_greenlets()) + break + collect() + + def count_objects(self, kind=list, exact_kind=True): + # pylint:disable=unidiomatic-typecheck + # Collect the garbage. + for _ in range(3): + collect() + if exact_kind: + return sum( + 1 + for x in get_objects() + if type(x) is kind + ) + # instances + return sum( + 1 + for x in get_objects() + if isinstance(x, kind) + ) + + greenlets_before_test = 0 + threads_before_test = 0 + main_greenlets_before_test = 0 + expect_greenlet_leak = False + + def count_greenlets(self): + """ + Find all the greenlets and subclasses tracked by the GC. + """ + return self.count_objects(RawGreenlet, False) + + def setUp(self): + # Ensure the main greenlet exists, otherwise the first test + # gets a false positive leak + super(TestCase, self).setUp() + getcurrent() + self.threads_before_test = active_thread_count() + self.main_greenlets_before_test = get_total_main_greenlets() + self.wait_for_pending_cleanups(self.threads_before_test, self.main_greenlets_before_test) + self.greenlets_before_test = self.count_greenlets() + + def tearDown(self): + if getattr(self, 'skipTearDown', False): + return + + self.wait_for_pending_cleanups(self.threads_before_test, self.main_greenlets_before_test) + super(TestCase, self).tearDown() diff --git a/mysql-sqlalchemy-workspace/lib/python3.9/site-packages/greenlet/tests/_test_extension.c b/mysql-sqlalchemy-workspace/lib/python3.9/site-packages/greenlet/tests/_test_extension.c new file mode 100644 index 00000000..ddf52047 --- /dev/null +++ b/mysql-sqlalchemy-workspace/lib/python3.9/site-packages/greenlet/tests/_test_extension.c @@ -0,0 +1,244 @@ +/* This is a set of functions used by test_extension_interface.py to test the + * Greenlet C API. + */ + +#include "../greenlet.h" + +#ifndef Py_RETURN_NONE +# define Py_RETURN_NONE return Py_INCREF(Py_None), Py_None +#endif + +#define TEST_MODULE_NAME "_test_extension" + +static PyObject* +test_switch(PyObject* self, PyObject* greenlet) +{ + PyObject* result = NULL; + + if (greenlet == NULL || !PyGreenlet_Check(greenlet)) { + PyErr_BadArgument(); + return NULL; + } + + result = PyGreenlet_Switch((PyGreenlet*)greenlet, NULL, NULL); + if (result == NULL) { + if (!PyErr_Occurred()) { + PyErr_SetString(PyExc_AssertionError, + "greenlet.switch() failed for some reason."); + } + return NULL; + } + Py_INCREF(result); + return result; +} + +static PyObject* +test_switch_kwargs(PyObject* self, PyObject* args, PyObject* kwargs) +{ + PyGreenlet* g = NULL; + PyObject* result = NULL; + + PyArg_ParseTuple(args, "O!", &PyGreenlet_Type, &g); + + if (g == NULL || !PyGreenlet_Check(g)) { + PyErr_BadArgument(); + return NULL; + } + + result = PyGreenlet_Switch(g, NULL, kwargs); + if (result == NULL) { + if (!PyErr_Occurred()) { + PyErr_SetString(PyExc_AssertionError, + "greenlet.switch() failed for some reason."); + } + return NULL; + } + Py_XINCREF(result); + return result; +} + +static PyObject* +test_getcurrent(PyObject* self) +{ + PyGreenlet* g = PyGreenlet_GetCurrent(); + if (g == NULL || !PyGreenlet_Check(g) || !PyGreenlet_ACTIVE(g)) { + PyErr_SetString(PyExc_AssertionError, + "getcurrent() returned an invalid greenlet"); + Py_XDECREF(g); + return NULL; + } + Py_DECREF(g); + Py_RETURN_NONE; +} + +static PyObject* +test_setparent(PyObject* self, PyObject* arg) +{ + PyGreenlet* current; + PyGreenlet* greenlet = NULL; + + if (arg == NULL || !PyGreenlet_Check(arg)) { + PyErr_BadArgument(); + return NULL; + } + if ((current = PyGreenlet_GetCurrent()) == NULL) { + return NULL; + } + greenlet = (PyGreenlet*)arg; + if (PyGreenlet_SetParent(greenlet, current)) { + Py_DECREF(current); + return NULL; + } + Py_DECREF(current); + if (PyGreenlet_Switch(greenlet, NULL, NULL) == NULL) { + return NULL; + } + Py_RETURN_NONE; +} + +static PyObject* +test_new_greenlet(PyObject* self, PyObject* callable) +{ + PyObject* result = NULL; + PyGreenlet* greenlet = PyGreenlet_New(callable, NULL); + + if (!greenlet) { + return NULL; + } + + result = PyGreenlet_Switch(greenlet, NULL, NULL); + Py_CLEAR(greenlet); + if (result == NULL) { + return NULL; + } + + Py_INCREF(result); + return result; +} + +static PyObject* +test_raise_dead_greenlet(PyObject* self) +{ + PyErr_SetString(PyExc_GreenletExit, "test GreenletExit exception."); + return NULL; +} + +static PyObject* +test_raise_greenlet_error(PyObject* self) +{ + PyErr_SetString(PyExc_GreenletError, "test greenlet.error exception"); + return NULL; +} + +static PyObject* +test_throw(PyObject* self, PyGreenlet* g) +{ + const char msg[] = "take that sucka!"; + PyObject* msg_obj = Py_BuildValue("s", msg); + PyGreenlet_Throw(g, PyExc_ValueError, msg_obj, NULL); + Py_DECREF(msg_obj); + if (PyErr_Occurred()) { + return NULL; + } + Py_RETURN_NONE; +} + +static PyObject* +test_throw_exact(PyObject* self, PyObject* args) +{ + PyGreenlet* g = NULL; + PyObject* typ = NULL; + PyObject* val = NULL; + PyObject* tb = NULL; + + if (!PyArg_ParseTuple(args, "OOOO:throw", &g, &typ, &val, &tb)) { + return NULL; + } + + PyGreenlet_Throw(g, typ, val, tb); + if (PyErr_Occurred()) { + return NULL; + } + Py_RETURN_NONE; +} + +static PyMethodDef test_methods[] = { + {"test_switch", + (PyCFunction)test_switch, + METH_O, + "Switch to the provided greenlet sending provided arguments, and \n" + "return the results."}, + {"test_switch_kwargs", + (PyCFunction)test_switch_kwargs, + METH_VARARGS | METH_KEYWORDS, + "Switch to the provided greenlet sending the provided keyword args."}, + {"test_getcurrent", + (PyCFunction)test_getcurrent, + METH_NOARGS, + "Test PyGreenlet_GetCurrent()"}, + {"test_setparent", + (PyCFunction)test_setparent, + METH_O, + "Se the parent of the provided greenlet and switch to it."}, + {"test_new_greenlet", + (PyCFunction)test_new_greenlet, + METH_O, + "Test PyGreenlet_New()"}, + {"test_raise_dead_greenlet", + (PyCFunction)test_raise_dead_greenlet, + METH_NOARGS, + "Just raise greenlet.GreenletExit"}, + {"test_raise_greenlet_error", + (PyCFunction)test_raise_greenlet_error, + METH_NOARGS, + "Just raise greenlet.error"}, + {"test_throw", + (PyCFunction)test_throw, + METH_O, + "Throw a ValueError at the provided greenlet"}, + {"test_throw_exact", + (PyCFunction)test_throw_exact, + METH_VARARGS, + "Throw exactly the arguments given at the provided greenlet"}, + {NULL, NULL, 0, NULL} +}; + +#if PY_MAJOR_VERSION >= 3 +# define INITERROR return NULL + +static struct PyModuleDef moduledef = {PyModuleDef_HEAD_INIT, + TEST_MODULE_NAME, + NULL, + 0, + test_methods, + NULL, + NULL, + NULL, + NULL}; + +PyMODINIT_FUNC +PyInit__test_extension(void) +#else +# define INITERROR return +PyMODINIT_FUNC +init_test_extension(void) +#endif +{ + PyObject* module = NULL; + +#if PY_MAJOR_VERSION >= 3 + module = PyModule_Create(&moduledef); +#else + module = Py_InitModule(TEST_MODULE_NAME, test_methods); +#endif + + if (module == NULL) { + INITERROR; + } + + PyGreenlet_Import(); + +#if PY_MAJOR_VERSION >= 3 + return module; +#endif +} diff --git a/mysql-sqlalchemy-workspace/lib/python3.9/site-packages/greenlet/tests/_test_extension.cpython-39-darwin.so b/mysql-sqlalchemy-workspace/lib/python3.9/site-packages/greenlet/tests/_test_extension.cpython-39-darwin.so new file mode 100755 index 00000000..bbb09f9c Binary files /dev/null and b/mysql-sqlalchemy-workspace/lib/python3.9/site-packages/greenlet/tests/_test_extension.cpython-39-darwin.so differ diff --git a/mysql-sqlalchemy-workspace/lib/python3.9/site-packages/greenlet/tests/_test_extension_cpp.cpp b/mysql-sqlalchemy-workspace/lib/python3.9/site-packages/greenlet/tests/_test_extension_cpp.cpp new file mode 100644 index 00000000..d4dfd5eb --- /dev/null +++ b/mysql-sqlalchemy-workspace/lib/python3.9/site-packages/greenlet/tests/_test_extension_cpp.cpp @@ -0,0 +1,196 @@ +/* This is a set of functions used to test C++ exceptions are not + * broken during greenlet switches + */ + +#include "../greenlet.h" +#include "../greenlet_compiler_compat.hpp" + +struct exception_t { + int depth; + exception_t(int depth) : depth(depth) {} +}; + +/* Functions are called via pointers to prevent inlining */ +static void (*p_test_exception_throw)(int depth); +static PyObject* (*p_test_exception_switch_recurse)(int depth, int left); + +static void +test_exception_throw(int depth) +{ + throw exception_t(depth); +} + +static PyObject* +test_exception_switch_recurse(int depth, int left) +{ + if (left > 0) { + return p_test_exception_switch_recurse(depth, left - 1); + } + + PyObject* result = NULL; + PyGreenlet* self = PyGreenlet_GetCurrent(); + if (self == NULL) + return NULL; + + try { + if (PyGreenlet_Switch(PyGreenlet_GET_PARENT(self), NULL, NULL) == NULL) { + Py_DECREF(self); + return NULL; + } + p_test_exception_throw(depth); + PyErr_SetString(PyExc_RuntimeError, + "throwing C++ exception didn't work"); + } + catch (const exception_t& e) { + if (e.depth != depth) + PyErr_SetString(PyExc_AssertionError, "depth mismatch"); + else + result = PyLong_FromLong(depth); + } + catch (...) { + PyErr_SetString(PyExc_RuntimeError, "unexpected C++ exception"); + } + + Py_DECREF(self); + return result; +} + +/* test_exception_switch(int depth) + * - recurses depth times + * - switches to parent inside try/catch block + * - throws an exception that (expected to be caught in the same function) + * - verifies depth matches (exceptions shouldn't be caught in other greenlets) + */ +static PyObject* +test_exception_switch(PyObject* UNUSED(self), PyObject* args) +{ + int depth; + if (!PyArg_ParseTuple(args, "i", &depth)) + return NULL; + return p_test_exception_switch_recurse(depth, depth); +} + + +static PyObject* +py_test_exception_throw(PyObject* self, PyObject* args) +{ + if (!PyArg_ParseTuple(args, "")) + return NULL; + p_test_exception_throw(0); + PyErr_SetString(PyExc_AssertionError, "unreachable code running after throw"); + return NULL; +} + + +/* test_exception_switch_and_do_in_g2(g2func) + * - creates new greenlet g2 to run g2func + * - switches to g2 inside try/catch block + * - verifies that no exception has been caught + * + * it is used together with test_exception_throw to verify that unhandled + * exceptions thrown in one greenlet do not propagate to other greenlet nor + * segfault the process. + */ +static PyObject* +test_exception_switch_and_do_in_g2(PyObject* self, PyObject* args) +{ + PyObject* g2func = NULL; + PyObject* result = NULL; + + if (!PyArg_ParseTuple(args, "O", &g2func)) + return NULL; + PyGreenlet* g2 = PyGreenlet_New(g2func, NULL); + if (!g2) { + return NULL; + } + + try { + result = PyGreenlet_Switch(g2, NULL, NULL); + if (!result) { + return NULL; + } + } + catch (const exception_t& e) { + /* if we are here the memory can be already corrupted and the program + * might crash before below py-level exception might become printed. + * -> print something to stderr to make it clear that we had entered + * this catch block. + * See comments in inner_bootstrap() + */ +#if defined(WIN32) || defined(_WIN32) + fprintf(stderr, "C++ exception unexpectedly caught in g1\n"); + PyErr_SetString(PyExc_AssertionError, "C++ exception unexpectedly caught in g1"); + Py_XDECREF(result); + return NULL; +#else + throw; +#endif + } + + Py_XDECREF(result); + Py_RETURN_NONE; +} + +static PyMethodDef test_methods[] = { + {"test_exception_switch", + (PyCFunction)&test_exception_switch, + METH_VARARGS, + "Switches to parent twice, to test exception handling and greenlet " + "switching."}, + {"test_exception_switch_and_do_in_g2", + (PyCFunction)&test_exception_switch_and_do_in_g2, + METH_VARARGS, + "Creates new greenlet g2 to run g2func and switches to it inside try/catch " + "block. Used together with test_exception_throw to verify that unhandled " + "C++ exceptions thrown in a greenlet doe not corrupt memory."}, + {"test_exception_throw", + (PyCFunction)&py_test_exception_throw, + METH_VARARGS, + "Throws C++ exception. Calling this function directly should abort the process."}, + {NULL, NULL, 0, NULL}}; + +#if PY_MAJOR_VERSION >= 3 +# define INITERROR return NULL + +static struct PyModuleDef moduledef = {PyModuleDef_HEAD_INIT, + "greenlet.tests._test_extension_cpp", + NULL, + 0, + test_methods, + NULL, + NULL, + NULL, + NULL}; + +PyMODINIT_FUNC +PyInit__test_extension_cpp(void) +#else +# define INITERROR return +PyMODINIT_FUNC +init_test_extension_cpp(void) +#endif +{ + PyObject* module = NULL; + +#if PY_MAJOR_VERSION >= 3 + module = PyModule_Create(&moduledef); +#else + module = Py_InitModule("greenlet.tests._test_extension_cpp", test_methods); +#endif + + if (module == NULL) { + INITERROR; + } + + PyGreenlet_Import(); + if (_PyGreenlet_API == NULL) { + INITERROR; + } + + p_test_exception_throw = test_exception_throw; + p_test_exception_switch_recurse = test_exception_switch_recurse; + +#if PY_MAJOR_VERSION >= 3 + return module; +#endif +} diff --git a/mysql-sqlalchemy-workspace/lib/python3.9/site-packages/greenlet/tests/_test_extension_cpp.cpython-39-darwin.so b/mysql-sqlalchemy-workspace/lib/python3.9/site-packages/greenlet/tests/_test_extension_cpp.cpython-39-darwin.so new file mode 100755 index 00000000..5583feec Binary files /dev/null and b/mysql-sqlalchemy-workspace/lib/python3.9/site-packages/greenlet/tests/_test_extension_cpp.cpython-39-darwin.so differ diff --git a/mysql-sqlalchemy-workspace/lib/python3.9/site-packages/greenlet/tests/leakcheck.py b/mysql-sqlalchemy-workspace/lib/python3.9/site-packages/greenlet/tests/leakcheck.py new file mode 100644 index 00000000..79a18fce --- /dev/null +++ b/mysql-sqlalchemy-workspace/lib/python3.9/site-packages/greenlet/tests/leakcheck.py @@ -0,0 +1,318 @@ +# Copyright (c) 2018 gevent community +# Copyright (c) 2021 greenlet community +# +# This was originally part of gevent's test suite. The main author +# (Jason Madden) vendored a copy of it into greenlet. +# +# Permission is hereby granted, free of charge, to any person obtaining a copy +# of this software and associated documentation files (the "Software"), to deal +# in the Software without restriction, including without limitation the rights +# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +# copies of the Software, and to permit persons to whom the Software is +# furnished to do so, subject to the following conditions: +# +# The above copyright notice and this permission notice shall be included in +# all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +# THE SOFTWARE. +from __future__ import print_function + +import os +import sys +import gc + +from functools import wraps +import unittest + + +import objgraph + +# graphviz 0.18 (Nov 7 2021), available only on Python 3.6 and newer, +# has added type hints (sigh). It wants to use ``typing.Literal`` for +# some stuff, but that's only available on Python 3.9+. If that's not +# found, it creates a ``unittest.mock.MagicMock`` object and annotates +# with that. These are GC'able objects, and doing almost *anything* +# with them results in an explosion of objects. For example, trying to +# compare them for equality creates new objects. This causes our +# leakchecks to fail, with reports like: +# +# greenlet.tests.leakcheck.LeakCheckError: refcount increased by [337, 1333, 343, 430, 530, 643, 769] +# _Call 1820 +546 +# dict 4094 +76 +# MagicProxy 585 +73 +# tuple 2693 +66 +# _CallList 24 +3 +# weakref 1441 +1 +# function 5996 +1 +# type 736 +1 +# cell 592 +1 +# MagicMock 8 +1 +# +# To avoid this, we *could* filter this type of object out early. In +# principle it could leak, but we don't use mocks in greenlet, so it +# doesn't leak from us. However, a further issue is that ``MagicMock`` +# objects have subobjects that are also GC'able, like ``_Call``, and +# those create new mocks of their own too. So we'd have to filter them +# as well, and they're not public. That's OK, we can workaround the +# problem by being very careful to never compare by equality or other +# user-defined operators, only using object identity or other builtin +# functions. + +RUNNING_ON_GITHUB_ACTIONS = os.environ.get('GITHUB_ACTIONS') +RUNNING_ON_TRAVIS = os.environ.get('TRAVIS') or RUNNING_ON_GITHUB_ACTIONS +RUNNING_ON_APPVEYOR = os.environ.get('APPVEYOR') +RUNNING_ON_CI = RUNNING_ON_TRAVIS or RUNNING_ON_APPVEYOR +RUNNING_ON_MANYLINUX = os.environ.get('GREENLET_MANYLINUX') +SKIP_LEAKCHECKS = RUNNING_ON_MANYLINUX or os.environ.get('GREENLET_SKIP_LEAKCHECKS') +SKIP_FAILING_LEAKCHECKS = os.environ.get('GREENLET_SKIP_FAILING_LEAKCHECKS') +ONLY_FAILING_LEAKCHECKS = os.environ.get('GREENLET_ONLY_FAILING_LEAKCHECKS') + +def ignores_leakcheck(func): + """ + Ignore the given object during leakchecks. + + Can be applied to a method, in which case the method will run, but + will not be subject to leak checks. + + If applied to a class, the entire class will be skipped during leakchecks. This + is intended to be used for classes that are very slow and cause problems such as + test timeouts; typically it will be used for classes that are subclasses of a base + class and specify variants of behaviour (such as pool sizes). + """ + func.ignore_leakcheck = True + return func + +def fails_leakcheck(func): + """ + Mark that the function is known to leak. + """ + func.fails_leakcheck = True + if SKIP_FAILING_LEAKCHECKS: + func = unittest.skip("Skipping known failures")(func) + return func + +class LeakCheckError(AssertionError): + pass + +if hasattr(sys, 'getobjects'): + # In a Python build with ``--with-trace-refs``, make objgraph + # trace *all* the objects, not just those that are tracked by the + # GC + class _MockGC(object): + def get_objects(self): + return sys.getobjects(0) # pylint:disable=no-member + def __getattr__(self, name): + return getattr(gc, name) + objgraph.gc = _MockGC() + fails_strict_leakcheck = fails_leakcheck +else: + def fails_strict_leakcheck(func): + """ + Decorator for a function that is known to fail when running + strict (``sys.getobjects()``) leakchecks. + + This type of leakcheck finds all objects, even those, such as + strings, which are not tracked by the garbage collector. + """ + return func + +class ignores_types_in_strict_leakcheck(object): + def __init__(self, types): + self.types = types + def __call__(self, func): + func.leakcheck_ignore_types = self.types + return func + +class _RefCountChecker(object): + + # Some builtin things that we ignore + # XXX: Those things were ignored by gevent, but they're important here, + # presumably. + IGNORED_TYPES = () #(tuple, dict, types.FrameType, types.TracebackType) + + def __init__(self, testcase, function): + self.testcase = testcase + self.function = function + self.deltas = [] + self.peak_stats = {} + self.ignored_types = () + + # The very first time we are called, we have already been + # self.setUp() by the test runner, so we don't need to do it again. + self.needs_setUp = False + + def _include_object_p(self, obj): + # pylint:disable=too-many-return-statements + # + # See the comment block at the top. We must be careful to + # avoid invoking user-defined operations. + if obj is self: + return False + kind = type(obj) + # ``self._include_object_p == obj`` returns NotImplemented + # for non-function objects, which causes the interpreter + # to try to reverse the order of arguments...which leads + # to the explosion of mock objects. We don't want that, so we implement + # the check manually. + if kind == type(self._include_object_p): + try: + # pylint:disable=not-callable + exact_method_equals = self._include_object_p.__eq__(obj) + except AttributeError: + # Python 2.7 methods may only have __cmp__, and that raises a + # TypeError for non-method arguments + # pylint:disable=no-member + exact_method_equals = self._include_object_p.__cmp__(obj) == 0 + + if exact_method_equals is not NotImplemented and exact_method_equals: + return False + + # Similarly, we need to check identity in our __dict__ to avoid mock explosions. + for x in self.__dict__.values(): + if obj is x: + return False + + + if kind in self.ignored_types or kind in self.IGNORED_TYPES: + return False + + return True + + def _growth(self): + return objgraph.growth(limit=None, peak_stats=self.peak_stats, + filter=self._include_object_p) + + def _report_diff(self, growth): + if not growth: + return "" + + lines = [] + width = max(len(name) for name, _, _ in growth) + for name, count, delta in growth: + lines.append('%-*s%9d %+9d' % (width, name, count, delta)) + + diff = '\n'.join(lines) + return diff + + + def _run_test(self, args, kwargs): + gc_enabled = gc.isenabled() + gc.disable() + + if self.needs_setUp: + self.testcase.setUp() + self.testcase.skipTearDown = False + try: + self.function(self.testcase, *args, **kwargs) + finally: + self.testcase.tearDown() + self.testcase.doCleanups() + self.testcase.skipTearDown = True + self.needs_setUp = True + if gc_enabled: + gc.enable() + + def _growth_after(self): + # Grab post snapshot + if 'urlparse' in sys.modules: + sys.modules['urlparse'].clear_cache() + if 'urllib.parse' in sys.modules: + sys.modules['urllib.parse'].clear_cache() + + return self._growth() + + def _check_deltas(self, growth): + # Return false when we have decided there is no leak, + # true if we should keep looping, raises an assertion + # if we have decided there is a leak. + + deltas = self.deltas + if not deltas: + # We haven't run yet, no data, keep looping + return True + + if gc.garbage: + raise LeakCheckError("Generated uncollectable garbage %r" % (gc.garbage,)) + + + # the following configurations are classified as "no leak" + # [0, 0] + # [x, 0, 0] + # [... a, b, c, d] where a+b+c+d = 0 + # + # the following configurations are classified as "leak" + # [... z, z, z] where z > 0 + + if deltas[-2:] == [0, 0] and len(deltas) in (2, 3): + return False + + if deltas[-3:] == [0, 0, 0]: + return False + + if len(deltas) >= 4 and sum(deltas[-4:]) == 0: + return False + + if len(deltas) >= 3 and deltas[-1] > 0 and deltas[-1] == deltas[-2] and deltas[-2] == deltas[-3]: + diff = self._report_diff(growth) + raise LeakCheckError('refcount increased by %r\n%s' % (deltas, diff)) + + # OK, we don't know for sure yet. Let's search for more + if sum(deltas[-3:]) <= 0 or sum(deltas[-4:]) <= 0 or deltas[-4:].count(0) >= 2: + # this is suspicious, so give a few more runs + limit = 11 + else: + limit = 7 + if len(deltas) >= limit: + raise LeakCheckError('refcount increased by %r\n%s' + % (deltas, + self._report_diff(growth))) + + # We couldn't decide yet, keep going + return True + + def __call__(self, args, kwargs): + for _ in range(3): + gc.collect() + + expect_failure = getattr(self.function, 'fails_leakcheck', False) + if expect_failure: + self.testcase.expect_greenlet_leak = True + self.ignored_types = getattr(self.function, "leakcheck_ignore_types", ()) + + # Capture state before; the incremental will be + # updated by each call to _growth_after + growth = self._growth() + + try: + while self._check_deltas(growth): + self._run_test(args, kwargs) + + growth = self._growth_after() + + self.deltas.append(sum((stat[2] for stat in growth))) + except LeakCheckError: + if not expect_failure: + raise + else: + if expect_failure: + raise LeakCheckError("Expected %s to leak but it did not." % (self.function,)) + +def wrap_refcount(method): + if getattr(method, 'ignore_leakcheck', False) or SKIP_LEAKCHECKS: + return method + + @wraps(method) + def wrapper(self, *args, **kwargs): # pylint:disable=too-many-branches + if getattr(self, 'ignore_leakcheck', False): + raise unittest.SkipTest("This class ignored during leakchecks") + if ONLY_FAILING_LEAKCHECKS and not getattr(method, 'fails_leakcheck', False): + raise unittest.SkipTest("Only running tests that fail leakchecks.") + return _RefCountChecker(self, method)(args, kwargs) + + return wrapper diff --git a/mysql-sqlalchemy-workspace/lib/python3.9/site-packages/greenlet/tests/test_contextvars.py b/mysql-sqlalchemy-workspace/lib/python3.9/site-packages/greenlet/tests/test_contextvars.py new file mode 100644 index 00000000..38b9bb79 --- /dev/null +++ b/mysql-sqlalchemy-workspace/lib/python3.9/site-packages/greenlet/tests/test_contextvars.py @@ -0,0 +1,304 @@ +from __future__ import print_function + +import gc +import sys + +from functools import partial +from unittest import skipUnless +from unittest import skipIf + +from greenlet import greenlet +from greenlet import getcurrent +from . import TestCase + + +try: + from contextvars import Context + from contextvars import ContextVar + from contextvars import copy_context + # From the documentation: + # + # Important: Context Variables should be created at the top module + # level and never in closures. Context objects hold strong + # references to context variables which prevents context variables + # from being properly garbage collected. + ID_VAR = ContextVar("id", default=None) + VAR_VAR = ContextVar("var", default=None) + ContextVar = None +except ImportError: + Context = ContextVar = copy_context = None + +# We don't support testing if greenlet's built-in context var support is disabled. +@skipUnless(Context is not None, "ContextVar not supported") +class ContextVarsTests(TestCase): + def _new_ctx_run(self, *args, **kwargs): + return copy_context().run(*args, **kwargs) + + def _increment(self, greenlet_id, callback, counts, expect): + ctx_var = ID_VAR + if expect is None: + self.assertIsNone(ctx_var.get()) + else: + self.assertEqual(ctx_var.get(), expect) + ctx_var.set(greenlet_id) + for _ in range(2): + counts[ctx_var.get()] += 1 + callback() + + def _test_context(self, propagate_by): + ID_VAR.set(0) + + callback = getcurrent().switch + counts = dict((i, 0) for i in range(5)) + + lets = [ + greenlet(partial( + partial( + copy_context().run, + self._increment + ) if propagate_by == "run" else self._increment, + greenlet_id=i, + callback=callback, + counts=counts, + expect=( + i - 1 if propagate_by == "share" else + 0 if propagate_by in ("set", "run") else None + ) + )) + for i in range(1, 5) + ] + + for let in lets: + if propagate_by == "set": + let.gr_context = copy_context() + elif propagate_by == "share": + let.gr_context = getcurrent().gr_context + + for i in range(2): + counts[ID_VAR.get()] += 1 + for let in lets: + let.switch() + + if propagate_by == "run": + # Must leave each context.run() in reverse order of entry + for let in reversed(lets): + let.switch() + else: + # No context.run(), so fine to exit in any order. + for let in lets: + let.switch() + + for let in lets: + self.assertTrue(let.dead) + # When using run(), we leave the run() as the greenlet dies, + # and there's no context "underneath". When not using run(), + # gr_context still reflects the context the greenlet was + # running in. + if propagate_by == 'run': + self.assertIsNone(let.gr_context) + else: + self.assertIsNotNone(let.gr_context) + + + if propagate_by == "share": + self.assertEqual(counts, {0: 1, 1: 1, 2: 1, 3: 1, 4: 6}) + else: + self.assertEqual(set(counts.values()), set([2])) + + def test_context_propagated_by_context_run(self): + self._new_ctx_run(self._test_context, "run") + + def test_context_propagated_by_setting_attribute(self): + self._new_ctx_run(self._test_context, "set") + + def test_context_not_propagated(self): + self._new_ctx_run(self._test_context, None) + + def test_context_shared(self): + self._new_ctx_run(self._test_context, "share") + + def test_break_ctxvars(self): + let1 = greenlet(copy_context().run) + let2 = greenlet(copy_context().run) + let1.switch(getcurrent().switch) + let2.switch(getcurrent().switch) + # Since let2 entered the current context and let1 exits its own, the + # interpreter emits: + # RuntimeError: cannot exit context: thread state references a different context object + let1.switch() + + def test_not_broken_if_using_attribute_instead_of_context_run(self): + let1 = greenlet(getcurrent().switch) + let2 = greenlet(getcurrent().switch) + let1.gr_context = copy_context() + let2.gr_context = copy_context() + let1.switch() + let2.switch() + let1.switch() + let2.switch() + + def test_context_assignment_while_running(self): + # pylint:disable=too-many-statements + ID_VAR.set(None) + + def target(): + self.assertIsNone(ID_VAR.get()) + self.assertIsNone(gr.gr_context) + + # Context is created on first use + ID_VAR.set(1) + self.assertIsInstance(gr.gr_context, Context) + self.assertEqual(ID_VAR.get(), 1) + self.assertEqual(gr.gr_context[ID_VAR], 1) + + # Clearing the context makes it get re-created as another + # empty context when next used + old_context = gr.gr_context + gr.gr_context = None # assign None while running + self.assertIsNone(ID_VAR.get()) + self.assertIsNone(gr.gr_context) + ID_VAR.set(2) + self.assertIsInstance(gr.gr_context, Context) + self.assertEqual(ID_VAR.get(), 2) + self.assertEqual(gr.gr_context[ID_VAR], 2) + + new_context = gr.gr_context + getcurrent().parent.switch((old_context, new_context)) + # parent switches us back to old_context + + self.assertEqual(ID_VAR.get(), 1) + gr.gr_context = new_context # assign non-None while running + self.assertEqual(ID_VAR.get(), 2) + + getcurrent().parent.switch() + # parent switches us back to no context + self.assertIsNone(ID_VAR.get()) + self.assertIsNone(gr.gr_context) + gr.gr_context = old_context + self.assertEqual(ID_VAR.get(), 1) + + getcurrent().parent.switch() + # parent switches us back to no context + self.assertIsNone(ID_VAR.get()) + self.assertIsNone(gr.gr_context) + + gr = greenlet(target) + + with self.assertRaisesRegex(AttributeError, "can't delete context attribute"): + del gr.gr_context + + self.assertIsNone(gr.gr_context) + old_context, new_context = gr.switch() + self.assertIs(new_context, gr.gr_context) + self.assertEqual(old_context[ID_VAR], 1) + self.assertEqual(new_context[ID_VAR], 2) + self.assertEqual(new_context.run(ID_VAR.get), 2) + gr.gr_context = old_context # assign non-None while suspended + gr.switch() + self.assertIs(gr.gr_context, new_context) + gr.gr_context = None # assign None while suspended + gr.switch() + self.assertIs(gr.gr_context, old_context) + gr.gr_context = None + gr.switch() + self.assertIsNone(gr.gr_context) + + # Make sure there are no reference leaks + gr = None + gc.collect() + self.assertEqual(sys.getrefcount(old_context), 2) + self.assertEqual(sys.getrefcount(new_context), 2) + + def test_context_assignment_different_thread(self): + import threading + VAR_VAR.set(None) + ctx = Context() + + is_running = threading.Event() + should_suspend = threading.Event() + did_suspend = threading.Event() + should_exit = threading.Event() + holder = [] + + def greenlet_in_thread_fn(): + VAR_VAR.set(1) + is_running.set() + should_suspend.wait(10) + VAR_VAR.set(2) + getcurrent().parent.switch() + holder.append(VAR_VAR.get()) + + def thread_fn(): + gr = greenlet(greenlet_in_thread_fn) + gr.gr_context = ctx + holder.append(gr) + gr.switch() + did_suspend.set() + should_exit.wait(10) + gr.switch() + del gr + greenlet() # trigger cleanup + + thread = threading.Thread(target=thread_fn, daemon=True) + thread.start() + is_running.wait(10) + gr = holder[0] + + # Can't access or modify context if the greenlet is running + # in a different thread + with self.assertRaisesRegex(ValueError, "running in a different"): + getattr(gr, 'gr_context') + with self.assertRaisesRegex(ValueError, "running in a different"): + gr.gr_context = None + + should_suspend.set() + did_suspend.wait(10) + + # OK to access and modify context if greenlet is suspended + self.assertIs(gr.gr_context, ctx) + self.assertEqual(gr.gr_context[VAR_VAR], 2) + gr.gr_context = None + + should_exit.set() + thread.join(10) + + self.assertEqual(holder, [gr, None]) + + # Context can still be accessed/modified when greenlet is dead: + self.assertIsNone(gr.gr_context) + gr.gr_context = ctx + self.assertIs(gr.gr_context, ctx) + + # Otherwise we leak greenlets on some platforms. + # XXX: Should be able to do this automatically + del holder[:] + gr = None + thread = None + + def test_context_assignment_wrong_type(self): + g = greenlet() + with self.assertRaisesRegex(TypeError, + "greenlet context must be a contextvars.Context or None"): + g.gr_context = self + + +@skipIf(Context is not None, "ContextVar supported") +class NoContextVarsTests(TestCase): + def test_contextvars_errors(self): + let1 = greenlet(getcurrent().switch) + self.assertFalse(hasattr(let1, 'gr_context')) + with self.assertRaises(AttributeError): + getattr(let1, 'gr_context') + + with self.assertRaises(AttributeError): + let1.gr_context = None + + let1.switch() + + with self.assertRaises(AttributeError): + getattr(let1, 'gr_context') + + with self.assertRaises(AttributeError): + let1.gr_context = None + + del let1 diff --git a/mysql-sqlalchemy-workspace/lib/python3.9/site-packages/greenlet/tests/test_cpp.py b/mysql-sqlalchemy-workspace/lib/python3.9/site-packages/greenlet/tests/test_cpp.py new file mode 100644 index 00000000..7aaeb0bb --- /dev/null +++ b/mysql-sqlalchemy-workspace/lib/python3.9/site-packages/greenlet/tests/test_cpp.py @@ -0,0 +1,80 @@ +from __future__ import print_function +from __future__ import absolute_import + +import signal +from multiprocessing import Process + +import greenlet +from . import _test_extension_cpp +from . import TestCase + +def run_unhandled_exception_in_greenlet_aborts(): + # This is used in multiprocessing.Process and must be picklable + # so it needs to be global. + + + def _(): + _test_extension_cpp.test_exception_switch_and_do_in_g2( + _test_extension_cpp.test_exception_throw + ) + g1 = greenlet.greenlet(_) + g1.switch() + +class CPPTests(TestCase): + def test_exception_switch(self): + greenlets = [] + for i in range(4): + g = greenlet.greenlet(_test_extension_cpp.test_exception_switch) + g.switch(i) + greenlets.append(g) + for i, g in enumerate(greenlets): + self.assertEqual(g.switch(), i) + + def _do_test_unhandled_exception(self, target): + # TODO: On some versions of Python with some settings, this + # spews a lot of garbage to stderr. It would be nice to capture and ignore that. + import sys + WIN = sys.platform.startswith("win") + + p = Process(target=target) + p.start() + p.join(10) + # The child should be aborted in an unusual way. On POSIX + # platforms, this is done with abort() and signal.SIGABRT, + # which is reflected in a negative return value; however, on + # Windows, even though we observe the child print "Fatal + # Python error: Aborted" and in older versions of the C + # runtime "This application has requested the Runtime to + # terminate it in an unusual way," it always has an exit code + # of 3. This is interesting because 3 is the error code for + # ERROR_PATH_NOT_FOUND; BUT: the C runtime abort() function + # also uses this code. + # + # See + # https://devblogs.microsoft.com/oldnewthing/20110519-00/?p=10623 + # and + # https://docs.microsoft.com/en-us/previous-versions/k089yyh0(v=vs.140)?redirectedfrom=MSDN + expected_exit = ( + -signal.SIGABRT, + # But beginning on Python 3.11, the faulthandler + # that prints the C backtraces sometimes segfaults after + # reporting the exception but before printing the stack. + # This has only been seen on linux/gcc. + -signal.SIGSEGV + ) if not WIN else ( + 3, + ) + self.assertIn(p.exitcode, expected_exit) + + def test_unhandled_exception_aborts(self): + # verify that plain unhandled throw aborts + self._do_test_unhandled_exception(_test_extension_cpp.test_exception_throw) + + + def test_unhandled_exception_in_greenlet_aborts(self): + # verify that unhandled throw called in greenlet aborts too + self._do_test_unhandled_exception(run_unhandled_exception_in_greenlet_aborts) + + +if __name__ == '__main__': + __import__('unittest').main() diff --git a/mysql-sqlalchemy-workspace/lib/python3.9/site-packages/greenlet/tests/test_extension_interface.py b/mysql-sqlalchemy-workspace/lib/python3.9/site-packages/greenlet/tests/test_extension_interface.py new file mode 100644 index 00000000..34b66567 --- /dev/null +++ b/mysql-sqlalchemy-workspace/lib/python3.9/site-packages/greenlet/tests/test_extension_interface.py @@ -0,0 +1,115 @@ +from __future__ import print_function +from __future__ import absolute_import + +import sys + +import greenlet +from . import _test_extension +from . import TestCase + +# pylint:disable=c-extension-no-member + +class CAPITests(TestCase): + def test_switch(self): + self.assertEqual( + 50, _test_extension.test_switch(greenlet.greenlet(lambda: 50))) + + def test_switch_kwargs(self): + def adder(x, y): + return x * y + g = greenlet.greenlet(adder) + self.assertEqual(6, _test_extension.test_switch_kwargs(g, x=3, y=2)) + + def test_setparent(self): + # pylint:disable=disallowed-name + def foo(): + def bar(): + greenlet.getcurrent().parent.switch() + + # This final switch should go back to the main greenlet, since + # the test_setparent() function in the C extension should have + # reparented this greenlet. + greenlet.getcurrent().parent.switch() + raise AssertionError("Should never have reached this code") + child = greenlet.greenlet(bar) + child.switch() + greenlet.getcurrent().parent.switch(child) + greenlet.getcurrent().parent.throw( + AssertionError("Should never reach this code")) + foo_child = greenlet.greenlet(foo).switch() + self.assertEqual(None, _test_extension.test_setparent(foo_child)) + + def test_getcurrent(self): + _test_extension.test_getcurrent() + + def test_new_greenlet(self): + self.assertEqual(-15, _test_extension.test_new_greenlet(lambda: -15)) + + def test_raise_greenlet_dead(self): + self.assertRaises( + greenlet.GreenletExit, _test_extension.test_raise_dead_greenlet) + + def test_raise_greenlet_error(self): + self.assertRaises( + greenlet.error, _test_extension.test_raise_greenlet_error) + + def test_throw(self): + seen = [] + + def foo(): # pylint:disable=disallowed-name + try: + greenlet.getcurrent().parent.switch() + except ValueError: + seen.append(sys.exc_info()[1]) + except greenlet.GreenletExit: + raise AssertionError + g = greenlet.greenlet(foo) + g.switch() + _test_extension.test_throw(g) + self.assertEqual(len(seen), 1) + self.assertTrue( + isinstance(seen[0], ValueError), + "ValueError was not raised in foo()") + self.assertEqual( + str(seen[0]), + 'take that sucka!', + "message doesn't match") + + def test_non_traceback_param(self): + with self.assertRaises(TypeError) as exc: + _test_extension.test_throw_exact( + greenlet.getcurrent(), + Exception, + Exception(), + self + ) + self.assertEqual(str(exc.exception), + "throw() third argument must be a traceback object") + + def test_instance_of_wrong_type(self): + with self.assertRaises(TypeError) as exc: + _test_extension.test_throw_exact( + greenlet.getcurrent(), + Exception(), + BaseException(), + None, + ) + + self.assertEqual(str(exc.exception), + "instance exception may not have a separate value") + + def test_not_throwable(self): + with self.assertRaises(TypeError) as exc: + _test_extension.test_throw_exact( + greenlet.getcurrent(), + "abc", + None, + None, + ) + self.assertEqual(str(exc.exception), + "exceptions must be classes, or instances, not str") + + +if __name__ == '__main__': + import unittest + unittest.main() diff --git a/mysql-sqlalchemy-workspace/lib/python3.9/site-packages/greenlet/tests/test_gc.py b/mysql-sqlalchemy-workspace/lib/python3.9/site-packages/greenlet/tests/test_gc.py new file mode 100644 index 00000000..43927d45 --- /dev/null +++ b/mysql-sqlalchemy-workspace/lib/python3.9/site-packages/greenlet/tests/test_gc.py @@ -0,0 +1,86 @@ +import gc + +import weakref + +import greenlet + + +from . import TestCase +from .leakcheck import fails_leakcheck +# These only work with greenlet gc support +# which is no longer optional. +assert greenlet.GREENLET_USE_GC + +class GCTests(TestCase): + def test_dead_circular_ref(self): + o = weakref.ref(greenlet.greenlet(greenlet.getcurrent).switch()) + gc.collect() + if o() is not None: + import sys + print("O IS NOT NONE.", sys.getrefcount(o())) + self.assertIsNone(o()) + self.assertFalse(gc.garbage, gc.garbage) + + def test_circular_greenlet(self): + class circular_greenlet(greenlet.greenlet): + pass + o = circular_greenlet() + o.self = o + o = weakref.ref(o) + gc.collect() + self.assertIsNone(o()) + self.assertFalse(gc.garbage, gc.garbage) + + def test_inactive_ref(self): + class inactive_greenlet(greenlet.greenlet): + def __init__(self): + greenlet.greenlet.__init__(self, run=self.run) + + def run(self): + pass + o = inactive_greenlet() + o = weakref.ref(o) + gc.collect() + self.assertIsNone(o()) + self.assertFalse(gc.garbage, gc.garbage) + + @fails_leakcheck + def test_finalizer_crash(self): + # This test is designed to crash when active greenlets + # are made garbage collectable, until the underlying + # problem is resolved. How does it work: + # - order of object creation is important + # - array is created first, so it is moved to unreachable first + # - we create a cycle between a greenlet and this array + # - we create an object that participates in gc, is only + # referenced by a greenlet, and would corrupt gc lists + # on destruction, the easiest is to use an object with + # a finalizer + # - because array is the first object in unreachable it is + # cleared first, which causes all references to greenlet + # to disappear and causes greenlet to be destroyed, but since + # it is still live it causes a switch during gc, which causes + # an object with finalizer to be destroyed, which causes stack + # corruption and then a crash + + class object_with_finalizer(object): + def __del__(self): + pass + array = [] + parent = greenlet.getcurrent() + def greenlet_body(): + greenlet.getcurrent().object = object_with_finalizer() + try: + parent.switch() + except greenlet.GreenletExit: + print("Got greenlet exit!") + finally: + del greenlet.getcurrent().object + g = greenlet.greenlet(greenlet_body) + g.array = array + array.append(g) + g.switch() + del array + del g + greenlet.getcurrent() + gc.collect() diff --git a/mysql-sqlalchemy-workspace/lib/python3.9/site-packages/greenlet/tests/test_generator.py b/mysql-sqlalchemy-workspace/lib/python3.9/site-packages/greenlet/tests/test_generator.py new file mode 100644 index 00000000..ca4a644b --- /dev/null +++ b/mysql-sqlalchemy-workspace/lib/python3.9/site-packages/greenlet/tests/test_generator.py @@ -0,0 +1,59 @@ + +from greenlet import greenlet + +from . import TestCase + +class genlet(greenlet): + parent = None + def __init__(self, *args, **kwds): + self.args = args + self.kwds = kwds + + def run(self): + fn, = self.fn + fn(*self.args, **self.kwds) + + def __iter__(self): + return self + + def __next__(self): + self.parent = greenlet.getcurrent() + result = self.switch() + if self: + return result + + raise StopIteration + + next = __next__ + + +def Yield(value): + g = greenlet.getcurrent() + while not isinstance(g, genlet): + if g is None: + raise RuntimeError('yield outside a genlet') + g = g.parent + g.parent.switch(value) + + +def generator(func): + class Generator(genlet): + fn = (func,) + return Generator + +# ____________________________________________________________ + + +class GeneratorTests(TestCase): + def test_generator(self): + seen = [] + + def g(n): + for i in range(n): + seen.append(i) + Yield(i) + g = generator(g) + for _ in range(3): + for j in g(5): + seen.append(j) + self.assertEqual(seen, 3 * [0, 0, 1, 1, 2, 2, 3, 3, 4, 4]) diff --git a/mysql-sqlalchemy-workspace/lib/python3.9/site-packages/greenlet/tests/test_generator_nested.py b/mysql-sqlalchemy-workspace/lib/python3.9/site-packages/greenlet/tests/test_generator_nested.py new file mode 100644 index 00000000..0c5d7466 --- /dev/null +++ b/mysql-sqlalchemy-workspace/lib/python3.9/site-packages/greenlet/tests/test_generator_nested.py @@ -0,0 +1,168 @@ + +from greenlet import greenlet +from . import TestCase +from .leakcheck import fails_leakcheck + +class genlet(greenlet): + parent = None + def __init__(self, *args, **kwds): + self.args = args + self.kwds = kwds + self.child = None + + def run(self): + # Note the function is packed in a tuple + # to avoid creating a bound method for it. + fn, = self.fn + fn(*self.args, **self.kwds) + + def __iter__(self): + return self + + def set_child(self, child): + self.child = child + + def __next__(self): + if self.child: + child = self.child + while child.child: + tmp = child + child = child.child + tmp.child = None + + result = child.switch() + else: + self.parent = greenlet.getcurrent() + result = self.switch() + + if self: + return result + + raise StopIteration + + next = __next__ + +def Yield(value, level=1): + g = greenlet.getcurrent() + + while level != 0: + if not isinstance(g, genlet): + raise RuntimeError('yield outside a genlet') + if level > 1: + g.parent.set_child(g) + g = g.parent + level -= 1 + + g.switch(value) + + +def Genlet(func): + class TheGenlet(genlet): + fn = (func,) + return TheGenlet + +# ____________________________________________________________ + + +def g1(n, seen): + for i in range(n): + seen.append(i + 1) + yield i + + +def g2(n, seen): + for i in range(n): + seen.append(i + 1) + Yield(i) + +g2 = Genlet(g2) + + +def nested(i): + Yield(i) + + +def g3(n, seen): + for i in range(n): + seen.append(i + 1) + nested(i) +g3 = Genlet(g3) + + +def a(n): + if n == 0: + return + for ii in ax(n - 1): + Yield(ii) + Yield(n) +ax = Genlet(a) + + +def perms(l): + if len(l) > 1: + for e in l: + # No syntactical sugar for generator expressions + x = [Yield([e] + p) for p in perms([x for x in l if x != e])] + assert x + else: + Yield(l) +perms = Genlet(perms) + + +def gr1(n): + for ii in range(1, n): + Yield(ii) + Yield(ii * ii, 2) + +gr1 = Genlet(gr1) + + +def gr2(n, seen): + for ii in gr1(n): + seen.append(ii) + +gr2 = Genlet(gr2) + + +class NestedGeneratorTests(TestCase): + def test_layered_genlets(self): + seen = [] + for ii in gr2(5, seen): + seen.append(ii) + self.assertEqual(seen, [1, 1, 2, 4, 3, 9, 4, 16]) + + @fails_leakcheck + def test_permutations(self): + gen_perms = perms(list(range(4))) + permutations = list(gen_perms) + self.assertEqual(len(permutations), 4 * 3 * 2 * 1) + self.assertIn([0, 1, 2, 3], permutations) + self.assertIn([3, 2, 1, 0], permutations) + res = [] + for ii in zip(perms(list(range(4))), perms(list(range(3)))): + res.append(ii) + self.assertEqual( + res, + [([0, 1, 2, 3], [0, 1, 2]), ([0, 1, 3, 2], [0, 2, 1]), + ([0, 2, 1, 3], [1, 0, 2]), ([0, 2, 3, 1], [1, 2, 0]), + ([0, 3, 1, 2], [2, 0, 1]), ([0, 3, 2, 1], [2, 1, 0])]) + # XXX Test to make sure we are working as a generator expression + + def test_genlet_simple(self): + for g in [g1, g2, g3]: + seen = [] + for _ in range(3): + for j in g(5, seen): + seen.append(j) + self.assertEqual(seen, 3 * [1, 0, 2, 1, 3, 2, 4, 3, 5, 4]) + + def test_genlet_bad(self): + try: + Yield(10) + except RuntimeError: + pass + + def test_nested_genlets(self): + seen = [] + for ii in ax(5): + seen.append(ii) diff --git a/mysql-sqlalchemy-workspace/lib/python3.9/site-packages/greenlet/tests/test_greenlet.py b/mysql-sqlalchemy-workspace/lib/python3.9/site-packages/greenlet/tests/test_greenlet.py new file mode 100644 index 00000000..3185b39b --- /dev/null +++ b/mysql-sqlalchemy-workspace/lib/python3.9/site-packages/greenlet/tests/test_greenlet.py @@ -0,0 +1,1126 @@ +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import gc +import sys +import time +import threading + +from abc import ABCMeta, abstractmethod + +from greenlet import greenlet +from . import TestCase +from .leakcheck import fails_leakcheck + + +# We manually manage locks in many tests +# pylint:disable=consider-using-with +# pylint:disable=too-many-public-methods + +class SomeError(Exception): + pass + + +def fmain(seen): + try: + greenlet.getcurrent().parent.switch() + except: + seen.append(sys.exc_info()[0]) + raise + raise SomeError + + +def send_exception(g, exc): + # note: send_exception(g, exc) can be now done with g.throw(exc). + # the purpose of this test is to explicitly check the propagation rules. + def crasher(exc): + raise exc + g1 = greenlet(crasher, parent=g) + g1.switch(exc) + + +class TestGreenlet(TestCase): + + def _do_simple_test(self): + lst = [] + + def f(): + lst.append(1) + greenlet.getcurrent().parent.switch() + lst.append(3) + g = greenlet(f) + lst.append(0) + g.switch() + lst.append(2) + g.switch() + lst.append(4) + self.assertEqual(lst, list(range(5))) + + def test_simple(self): + self._do_simple_test() + + def test_switch_no_run_raises_AttributeError(self): + g = greenlet() + with self.assertRaises(AttributeError) as exc: + g.switch() + + self.assertIn("run", str(exc.exception)) + + def test_throw_no_run_raises_AttributeError(self): + g = greenlet() + with self.assertRaises(AttributeError) as exc: + g.throw(SomeError) + + self.assertIn("run", str(exc.exception)) + + def test_parent_equals_None(self): + g = greenlet(parent=None) + self.assertIsNotNone(g) + self.assertIs(g.parent, greenlet.getcurrent()) + + def test_run_equals_None(self): + g = greenlet(run=None) + self.assertIsNotNone(g) + self.assertIsNone(g.run) + + def test_two_children(self): + lst = [] + + def f(): + lst.append(1) + greenlet.getcurrent().parent.switch() + lst.extend([1, 1]) + g = greenlet(f) + h = greenlet(f) + g.switch() + self.assertEqual(len(lst), 1) + h.switch() + self.assertEqual(len(lst), 2) + h.switch() + self.assertEqual(len(lst), 4) + self.assertEqual(h.dead, True) + g.switch() + self.assertEqual(len(lst), 6) + self.assertEqual(g.dead, True) + + def test_two_recursive_children(self): + lst = [] + + def f(): + lst.append('b') + greenlet.getcurrent().parent.switch() + + def g(): + lst.append('a') + g = greenlet(f) + g.switch() + lst.append('c') + + g = greenlet(g) + self.assertEqual(sys.getrefcount(g), 2) + g.switch() + self.assertEqual(lst, ['a', 'b', 'c']) + # Just the one in this frame, plus the one on the stack we pass to the function + self.assertEqual(sys.getrefcount(g), 2) + + def test_threads(self): + success = [] + + def f(): + self._do_simple_test() + success.append(True) + ths = [threading.Thread(target=f) for i in range(10)] + for th in ths: + th.start() + for th in ths: + th.join(10) + self.assertEqual(len(success), len(ths)) + + def test_exception(self): + seen = [] + g1 = greenlet(fmain) + g2 = greenlet(fmain) + g1.switch(seen) + g2.switch(seen) + g2.parent = g1 + + self.assertEqual(seen, []) + #with self.assertRaises(SomeError): + # p("***Switching back") + # g2.switch() + # Creating this as a bound method can reveal bugs that + # are hidden on newer versions of Python that avoid creating + # bound methods for direct expressions; IOW, don't use the `with` + # form! + self.assertRaises(SomeError, g2.switch) + self.assertEqual(seen, [SomeError]) + + value = g2.switch() + self.assertEqual(value, ()) + self.assertEqual(seen, [SomeError]) + + value = g2.switch(25) + self.assertEqual(value, 25) + self.assertEqual(seen, [SomeError]) + + + def test_send_exception(self): + seen = [] + g1 = greenlet(fmain) + g1.switch(seen) + self.assertRaises(KeyError, send_exception, g1, KeyError) + self.assertEqual(seen, [KeyError]) + + def test_dealloc(self): + seen = [] + g1 = greenlet(fmain) + g2 = greenlet(fmain) + g1.switch(seen) + g2.switch(seen) + self.assertEqual(seen, []) + del g1 + gc.collect() + self.assertEqual(seen, [greenlet.GreenletExit]) + del g2 + gc.collect() + self.assertEqual(seen, [greenlet.GreenletExit, greenlet.GreenletExit]) + + def test_dealloc_catches_GreenletExit_throws_other(self): + def run(): + try: + greenlet.getcurrent().parent.switch() + except greenlet.GreenletExit: + raise SomeError + + g = greenlet(run) + g.switch() + # Destroying the only reference to the greenlet causes it + # to get GreenletExit; when it in turn raises, even though we're the parent + # we don't get the exception, it just gets printed. + # When we run on 3.8 only, we can use sys.unraisablehook + oldstderr = sys.stderr + try: + from cStringIO import StringIO + except ImportError: + from io import StringIO + stderr = sys.stderr = StringIO() + try: + del g + finally: + sys.stderr = oldstderr + + v = stderr.getvalue() + self.assertIn("Exception", v) + self.assertIn('ignored', v) + self.assertIn("SomeError", v) + + + def test_dealloc_other_thread(self): + seen = [] + someref = [] + + bg_glet_created_running_and_no_longer_ref_in_bg = threading.Event() + fg_ref_released = threading.Event() + bg_should_be_clear = threading.Event() + ok_to_exit_bg_thread = threading.Event() + + def f(): + g1 = greenlet(fmain) + g1.switch(seen) + someref.append(g1) + del g1 + gc.collect() + + bg_glet_created_running_and_no_longer_ref_in_bg.set() + fg_ref_released.wait(3) + + greenlet() # trigger release + bg_should_be_clear.set() + ok_to_exit_bg_thread.wait(3) + greenlet() # One more time + + t = threading.Thread(target=f) + t.start() + bg_glet_created_running_and_no_longer_ref_in_bg.wait(10) + + self.assertEqual(seen, []) + self.assertEqual(len(someref), 1) + del someref[:] + gc.collect() + # g1 is not released immediately because it's from another thread + self.assertEqual(seen, []) + fg_ref_released.set() + bg_should_be_clear.wait(3) + try: + self.assertEqual(seen, [greenlet.GreenletExit]) + finally: + ok_to_exit_bg_thread.set() + t.join(10) + del seen[:] + del someref[:] + + def test_frame(self): + def f1(): + f = sys._getframe(0) # pylint:disable=protected-access + self.assertEqual(f.f_back, None) + greenlet.getcurrent().parent.switch(f) + return "meaning of life" + g = greenlet(f1) + frame = g.switch() + self.assertTrue(frame is g.gr_frame) + self.assertTrue(g) + + from_g = g.switch() + self.assertFalse(g) + self.assertEqual(from_g, 'meaning of life') + self.assertEqual(g.gr_frame, None) + + def test_thread_bug(self): + def runner(x): + g = greenlet(lambda: time.sleep(x)) + g.switch() + t1 = threading.Thread(target=runner, args=(0.2,)) + t2 = threading.Thread(target=runner, args=(0.3,)) + t1.start() + t2.start() + t1.join(10) + t2.join(10) + + def test_switch_kwargs(self): + def run(a, b): + self.assertEqual(a, 4) + self.assertEqual(b, 2) + return 42 + x = greenlet(run).switch(a=4, b=2) + self.assertEqual(x, 42) + + def test_switch_kwargs_to_parent(self): + def run(x): + greenlet.getcurrent().parent.switch(x=x) + greenlet.getcurrent().parent.switch(2, x=3) + return x, x ** 2 + g = greenlet(run) + self.assertEqual({'x': 3}, g.switch(3)) + self.assertEqual(((2,), {'x': 3}), g.switch()) + self.assertEqual((3, 9), g.switch()) + + def test_switch_to_another_thread(self): + data = {} + created_event = threading.Event() + done_event = threading.Event() + + def run(): + data['g'] = greenlet(lambda: None) + created_event.set() + done_event.wait(10) + thread = threading.Thread(target=run) + thread.start() + created_event.wait(10) + with self.assertRaises(greenlet.error): + data['g'].switch() + done_event.set() + thread.join(10) + # XXX: Should handle this automatically + data.clear() + + def test_exc_state(self): + def f(): + try: + raise ValueError('fun') + except: # pylint:disable=bare-except + exc_info = sys.exc_info() + greenlet(h).switch() + self.assertEqual(exc_info, sys.exc_info()) + + def h(): + self.assertEqual(sys.exc_info(), (None, None, None)) + + greenlet(f).switch() + + def test_instance_dict(self): + def f(): + greenlet.getcurrent().test = 42 + def deldict(g): + del g.__dict__ + def setdict(g, value): + g.__dict__ = value + g = greenlet(f) + self.assertEqual(g.__dict__, {}) + g.switch() + self.assertEqual(g.test, 42) + self.assertEqual(g.__dict__, {'test': 42}) + g.__dict__ = g.__dict__ + self.assertEqual(g.__dict__, {'test': 42}) + self.assertRaises(TypeError, deldict, g) + self.assertRaises(TypeError, setdict, g, 42) + + def test_running_greenlet_has_no_run(self): + has_run = [] + def func(): + has_run.append( + hasattr(greenlet.getcurrent(), 'run') + ) + + g = greenlet(func) + g.switch() + self.assertEqual(has_run, [False]) + + def test_deepcopy(self): + import copy + self.assertRaises(TypeError, copy.copy, greenlet()) + self.assertRaises(TypeError, copy.deepcopy, greenlet()) + + def test_parent_restored_on_kill(self): + hub = greenlet(lambda: None) + main = greenlet.getcurrent() + result = [] + def worker(): + try: + # Wait to be killed by going back to the test. + main.switch() + except greenlet.GreenletExit: + # Resurrect and switch to parent + result.append(greenlet.getcurrent().parent) + result.append(greenlet.getcurrent()) + hub.switch() + g = greenlet(worker, parent=hub) + g.switch() + # delete the only reference, thereby raising GreenletExit + del g + self.assertTrue(result) + self.assertIs(result[0], main) + self.assertIs(result[1].parent, hub) + # Delete them, thereby breaking the cycle between the greenlet + # and the frame, which otherwise would never be collectable + # XXX: We should be able to automatically fix this. + del result[:] + hub = None + main = None + + def test_parent_return_failure(self): + # No run causes AttributeError on switch + g1 = greenlet() + # Greenlet that implicitly switches to parent + g2 = greenlet(lambda: None, parent=g1) + # AttributeError should propagate to us, no fatal errors + with self.assertRaises(AttributeError): + g2.switch() + + def test_throw_exception_not_lost(self): + class mygreenlet(greenlet): + def __getattribute__(self, name): + try: + raise Exception() + except: # pylint:disable=bare-except + pass + return greenlet.__getattribute__(self, name) + g = mygreenlet(lambda: None) + self.assertRaises(SomeError, g.throw, SomeError()) + + @fails_leakcheck + def _do_test_throw_to_dead_thread_doesnt_crash(self, wait_for_cleanup=False): + result = [] + def worker(): + greenlet.getcurrent().parent.switch() + + def creator(): + g = greenlet(worker) + g.switch() + result.append(g) + if wait_for_cleanup: + # Let this greenlet eventually be cleaned up. + g.switch() + greenlet.getcurrent() + t = threading.Thread(target=creator) + t.start() + t.join(10) + del t + # But, depending on the operating system, the thread + # deallocator may not actually have run yet! So we can't be + # sure about the error message unless we wait. + if wait_for_cleanup: + self.wait_for_pending_cleanups() + with self.assertRaises(greenlet.error) as exc: + result[0].throw(SomeError) + + if not wait_for_cleanup: + self.assertIn( + str(exc.exception), [ + "cannot switch to a different thread (which happens to have exited)", + "cannot switch to a different thread" + ] + ) + else: + self.assertEqual( + str(exc.exception), + "cannot switch to a different thread (which happens to have exited)", + ) + + if hasattr(result[0].gr_frame, 'clear'): + # The frame is actually executing (it thinks), we can't clear it. + with self.assertRaises(RuntimeError): + result[0].gr_frame.clear() + # Unfortunately, this doesn't actually clear the references, they're in the + # fast local array. + if not wait_for_cleanup: + result[0].gr_frame.f_locals.clear() + else: + self.assertIsNone(result[0].gr_frame) + + del creator + worker = None + del result[:] + # XXX: we ought to be able to automatically fix this. + # See issue 252 + self.expect_greenlet_leak = True # direct us not to wait for it to go away + + @fails_leakcheck + def test_throw_to_dead_thread_doesnt_crash(self): + self._do_test_throw_to_dead_thread_doesnt_crash() + + def test_throw_to_dead_thread_doesnt_crash_wait(self): + self._do_test_throw_to_dead_thread_doesnt_crash(True) + + @fails_leakcheck + def test_recursive_startup(self): + class convoluted(greenlet): + def __init__(self): + greenlet.__init__(self) + self.count = 0 + def __getattribute__(self, name): + if name == 'run' and self.count == 0: + self.count = 1 + self.switch(43) + return greenlet.__getattribute__(self, name) + def run(self, value): + while True: + self.parent.switch(value) + g = convoluted() + self.assertEqual(g.switch(42), 43) + # Exits the running greenlet, otherwise it leaks + # XXX: We should be able to automatically fix this + #g.throw(greenlet.GreenletExit) + #del g + self.expect_greenlet_leak = True + + def test_threaded_updatecurrent(self): + # released when main thread should execute + lock1 = threading.Lock() + lock1.acquire() + # released when another thread should execute + lock2 = threading.Lock() + lock2.acquire() + class finalized(object): + def __del__(self): + # happens while in green_updatecurrent() in main greenlet + # should be very careful not to accidentally call it again + # at the same time we must make sure another thread executes + lock2.release() + lock1.acquire() + # now ts_current belongs to another thread + def deallocator(): + greenlet.getcurrent().parent.switch() + def fthread(): + lock2.acquire() + greenlet.getcurrent() + del g[0] + lock1.release() + lock2.acquire() + greenlet.getcurrent() + lock1.release() + main = greenlet.getcurrent() + g = [greenlet(deallocator)] + g[0].bomb = finalized() + g[0].switch() + t = threading.Thread(target=fthread) + t.start() + # let another thread grab ts_current and deallocate g[0] + lock2.release() + lock1.acquire() + # this is the corner stone + # getcurrent() will notice that ts_current belongs to another thread + # and start the update process, which would notice that g[0] should + # be deallocated, and that will execute an object's finalizer. Now, + # that object will let another thread run so it can grab ts_current + # again, which would likely crash the interpreter if there's no + # check for this case at the end of green_updatecurrent(). This test + # passes if getcurrent() returns correct result, but it's likely + # to randomly crash if it's not anyway. + self.assertEqual(greenlet.getcurrent(), main) + # wait for another thread to complete, just in case + t.join(10) + + def test_dealloc_switch_args_not_lost(self): + seen = [] + def worker(): + # wait for the value + value = greenlet.getcurrent().parent.switch() + # delete all references to ourself + del worker[0] + initiator.parent = greenlet.getcurrent().parent + # switch to main with the value, but because + # ts_current is the last reference to us we + # return here immediately, where we resurrect ourself. + try: + greenlet.getcurrent().parent.switch(value) + finally: + seen.append(greenlet.getcurrent()) + def initiator(): + return 42 # implicitly falls thru to parent + + worker = [greenlet(worker)] + + worker[0].switch() # prime worker + initiator = greenlet(initiator, worker[0]) + value = initiator.switch() + self.assertTrue(seen) + self.assertEqual(value, 42) + + def test_tuple_subclass(self): + # XXX: This is failing on Python 2 with a SystemError: error return without exception set + + # The point of this test is to see what happens when a custom + # tuple subclass is used as an object passed directly to the C + # function ``green_switch``; part of ``green_switch`` checks + # the ``len()`` of the ``args`` tuple, and that can call back + # into Python. Here, when it calls back into Python, we + # recursively enter ``green_switch`` again. + + # This test is really only relevant on Python 2. The builtin + # `apply` function directly passes the given args tuple object + # to the underlying function, whereas the Python 3 version + # unpacks and repacks into an actual tuple. This could still + # happen using the C API on Python 3 though. + if sys.version_info[0] > 2: + # There's no apply in Python 3.x + def _apply(func, a, k): + func(*a, **k) + else: + _apply = apply # pylint:disable=undefined-variable + + class mytuple(tuple): + def __len__(self): + greenlet.getcurrent().switch() + return tuple.__len__(self) + args = mytuple() + kwargs = dict(a=42) + def switchapply(): + _apply(greenlet.getcurrent().parent.switch, args, kwargs) + g = greenlet(switchapply) + self.assertEqual(g.switch(), kwargs) + + def test_abstract_subclasses(self): + AbstractSubclass = ABCMeta( + 'AbstractSubclass', + (greenlet,), + {'run': abstractmethod(lambda self: None)}) + + class BadSubclass(AbstractSubclass): + pass + + class GoodSubclass(AbstractSubclass): + def run(self): + pass + + GoodSubclass() # should not raise + self.assertRaises(TypeError, BadSubclass) + + def test_implicit_parent_with_threads(self): + if not gc.isenabled(): + return # cannot test with disabled gc + N = gc.get_threshold()[0] + if N < 50: + return # cannot test with such a small N + def attempt(): + lock1 = threading.Lock() + lock1.acquire() + lock2 = threading.Lock() + lock2.acquire() + recycled = [False] + def another_thread(): + lock1.acquire() # wait for gc + greenlet.getcurrent() # update ts_current + lock2.release() # release gc + t = threading.Thread(target=another_thread) + t.start() + class gc_callback(object): + def __del__(self): + lock1.release() + lock2.acquire() + recycled[0] = True + class garbage(object): + def __init__(self): + self.cycle = self + self.callback = gc_callback() + l = [] + x = range(N*2) + current = greenlet.getcurrent() + g = garbage() + for _ in x: + g = None # lose reference to garbage + if recycled[0]: + # gc callback called prematurely + t.join(10) + return False + last = greenlet() + if recycled[0]: + break # yes! gc called in green_new + l.append(last) # increase allocation counter + else: + # gc callback not called when expected + gc.collect() + if recycled[0]: + t.join(10) + return False + self.assertEqual(last.parent, current) + for g in l: + self.assertEqual(g.parent, current) + return True + for _ in range(5): + if attempt(): + break + + def test_issue_245_reference_counting_subclass_no_threads(self): + # https://github.com/python-greenlet/greenlet/issues/245 + # Before the fix, this crashed pretty reliably on + # Python 3.10, at least on macOS; but much less reliably on other + # interpreters (memory layout must have changed). + # The threaded test crashed more reliably on more interpreters. + from greenlet import getcurrent + from greenlet import GreenletExit + + class Greenlet(greenlet): + pass + + initial_refs = sys.getrefcount(Greenlet) + # This has to be an instance variable because + # Python 2 raises a SyntaxError if we delete a local + # variable referenced in an inner scope. + self.glets = [] # pylint:disable=attribute-defined-outside-init + + def greenlet_main(): + try: + getcurrent().parent.switch() + except GreenletExit: + self.glets.append(getcurrent()) + + # Before the + for _ in range(10): + Greenlet(greenlet_main).switch() + + del self.glets + self.assertEqual(sys.getrefcount(Greenlet), initial_refs) + + def test_issue_245_reference_counting_subclass_threads(self): + # https://github.com/python-greenlet/greenlet/issues/245 + from threading import Thread + from threading import Event + + from greenlet import getcurrent + + class MyGreenlet(greenlet): + pass + + glets = [] + ref_cleared = Event() + + def greenlet_main(): + getcurrent().parent.switch() + + def thread_main(greenlet_running_event): + mine = MyGreenlet(greenlet_main) + glets.append(mine) + # The greenlets being deleted must be active + mine.switch() + # Don't keep any reference to it in this thread + del mine + # Let main know we published our greenlet. + greenlet_running_event.set() + # Wait for main to let us know the references are + # gone and the greenlet objects no longer reachable + ref_cleared.wait(10) + # The creating thread must call getcurrent() (or a few other + # greenlet APIs) because that's when the thread-local list of dead + # greenlets gets cleared. + getcurrent() + + # We start with 3 references to the subclass: + # - This module + # - Its __mro__ + # - The __subclassess__ attribute of greenlet + # - (If we call gc.get_referents(), we find four entries, including + # some other tuple ``(greenlet)`` that I'm not sure about but must be part + # of the machinery.) + # + # On Python 3.10 it's often enough to just run 3 threads; on Python 2.7, + # more threads are needed, and the results are still + # non-deterministic. Presumably the memory layouts are different + initial_refs = sys.getrefcount(MyGreenlet) + thread_ready_events = [] + for _ in range( + initial_refs + 45 + ): + event = Event() + thread = Thread(target=thread_main, args=(event,)) + thread_ready_events.append(event) + thread.start() + + + for done_event in thread_ready_events: + done_event.wait(10) + + + del glets[:] + ref_cleared.set() + # Let any other thread run; it will crash the interpreter + # if not fixed (or silently corrupt memory and we possibly crash + # later). + self.wait_for_pending_cleanups() + self.assertEqual(sys.getrefcount(MyGreenlet), initial_refs) + + def test_falling_off_end_switches_to_unstarted_parent_raises_error(self): + def no_args(): + return 13 + + parent_never_started = greenlet(no_args) + + def leaf(): + return 42 + + child = greenlet(leaf, parent_never_started) + + # Because the run function takes to arguments + with self.assertRaises(TypeError): + child.switch() + + def test_falling_off_end_switches_to_unstarted_parent_works(self): + def one_arg(x): + return (x, 24) + + parent_never_started = greenlet(one_arg) + + def leaf(): + return 42 + + child = greenlet(leaf, parent_never_started) + + result = child.switch() + self.assertEqual(result, (42, 24)) + + def test_switch_to_dead_greenlet_with_unstarted_perverse_parent(self): + class Parent(greenlet): + def __getattribute__(self, name): + if name == 'run': + raise SomeError + + + parent_never_started = Parent() + seen = [] + child = greenlet(lambda: seen.append(42), parent_never_started) + # Because we automatically start the parent when the child is + # finished + with self.assertRaises(SomeError): + child.switch() + + self.assertEqual(seen, [42]) + + with self.assertRaises(SomeError): + child.switch() + self.assertEqual(seen, [42]) + + def test_switch_to_dead_greenlet_reparent(self): + seen = [] + parent_never_started = greenlet(lambda: seen.append(24)) + child = greenlet(lambda: seen.append(42)) + + child.switch() + self.assertEqual(seen, [42]) + + child.parent = parent_never_started + # This actually is the same as switching to the parent. + result = child.switch() + self.assertIsNone(result) + self.assertEqual(seen, [42, 24]) + + +class TestGreenletSetParentErrors(TestCase): + def test_threaded_reparent(self): + data = {} + created_event = threading.Event() + done_event = threading.Event() + + def run(): + data['g'] = greenlet(lambda: None) + created_event.set() + done_event.wait(10) + + def blank(): + greenlet.getcurrent().parent.switch() + + thread = threading.Thread(target=run) + thread.start() + created_event.wait(10) + g = greenlet(blank) + g.switch() + with self.assertRaises(ValueError) as exc: + g.parent = data['g'] + done_event.set() + thread.join(10) + + self.assertEqual(str(exc.exception), "parent cannot be on a different thread") + + def test_unexpected_reparenting(self): + another = [] + def worker(): + g = greenlet(lambda: None) + another.append(g) + g.switch() + t = threading.Thread(target=worker) + t.start() + t.join(10) + # The first time we switch (running g_initialstub(), which is + # when we look up the run attribute) we attempt to change the + # parent to one from another thread (which also happens to be + # dead). ``g_initialstub()`` should detect this and raise a + # greenlet error. + # + # EXCEPT: With the fix for #252, this is actually detected + # sooner, when setting the parent itself. Prior to that fix, + # the main greenlet from the background thread kept a valid + # value for ``run_info``, and appeared to be a valid parent + # until we actually started the greenlet. But now that it's + # cleared, this test is catching whether ``green_setparent`` + # can detect the dead thread. + # + # Further refactoring once again changes this back to a greenlet.error + # + # We need to wait for the cleanup to happen, but we're + # deliberately leaking a main greenlet here. + self.wait_for_pending_cleanups(initial_main_greenlets=self.main_greenlets_before_test + 1) + + class convoluted(greenlet): + def __getattribute__(self, name): + if name == 'run': + self.parent = another[0] # pylint:disable=attribute-defined-outside-init + return greenlet.__getattribute__(self, name) + g = convoluted(lambda: None) + with self.assertRaises(greenlet.error) as exc: + g.switch() + self.assertEqual(str(exc.exception), + "cannot switch to a different thread (which happens to have exited)") + del another[:] + + def test_unexpected_reparenting_thread_running(self): + # Like ``test_unexpected_reparenting``, except the background thread is + # actually still alive. + another = [] + switched_to_greenlet = threading.Event() + keep_main_alive = threading.Event() + def worker(): + g = greenlet(lambda: None) + another.append(g) + g.switch() + switched_to_greenlet.set() + keep_main_alive.wait(10) + class convoluted(greenlet): + def __getattribute__(self, name): + if name == 'run': + self.parent = another[0] # pylint:disable=attribute-defined-outside-init + return greenlet.__getattribute__(self, name) + + t = threading.Thread(target=worker) + t.start() + + switched_to_greenlet.wait(10) + try: + g = convoluted(lambda: None) + + with self.assertRaises(greenlet.error) as exc: + g.switch() + self.assertEqual(str(exc.exception), "cannot switch to a different thread") + finally: + keep_main_alive.set() + t.join(10) + # XXX: Should handle this automatically. + del another[:] + + def test_cannot_delete_parent(self): + worker = greenlet(lambda: None) + self.assertIs(worker.parent, greenlet.getcurrent()) + + with self.assertRaises(AttributeError) as exc: + del worker.parent + self.assertEqual(str(exc.exception), "can't delete attribute") + + def test_cannot_delete_parent_of_main(self): + with self.assertRaises(AttributeError) as exc: + del greenlet.getcurrent().parent + self.assertEqual(str(exc.exception), "can't delete attribute") + + + def test_main_greenlet_parent_is_none(self): + # assuming we're in a main greenlet here. + self.assertIsNone(greenlet.getcurrent().parent) + + def test_set_parent_wrong_types(self): + def bg(): + # Go back to main. + greenlet.getcurrent().parent.switch() + + def check(glet): + for p in None, 1, self, "42": + with self.assertRaises(TypeError) as exc: + glet.parent = p + + self.assertEqual( + str(exc.exception), + "GreenletChecker: Expected any type of greenlet, not " + type(p).__name__) + + # First, not running + g = greenlet(bg) + self.assertFalse(g) + check(g) + + # Then when running. + g.switch() + self.assertTrue(g) + check(g) + + # Let it finish + g.switch() + + + def test_trivial_cycle(self): + glet = greenlet(lambda: None) + with self.assertRaises(ValueError) as exc: + glet.parent = glet + self.assertEqual(str(exc.exception), "cyclic parent chain") + + def test_trivial_cycle_main(self): + # This used to produce a ValueError, but we catch it earlier than that now. + with self.assertRaises(AttributeError) as exc: + greenlet.getcurrent().parent = greenlet.getcurrent() + self.assertEqual(str(exc.exception), "cannot set the parent of a main greenlet") + + def test_deeper_cycle(self): + g1 = greenlet(lambda: None) + g2 = greenlet(lambda: None) + g3 = greenlet(lambda: None) + + g1.parent = g2 + g2.parent = g3 + with self.assertRaises(ValueError) as exc: + g3.parent = g1 + self.assertEqual(str(exc.exception), "cyclic parent chain") + + +class TestRepr(TestCase): + + def assertEndsWith(self, got, suffix): + self.assertTrue(got.endswith(suffix), (got, suffix)) + + def test_main_while_running(self): + r = repr(greenlet.getcurrent()) + self.assertEndsWith(r, " current active started main>") + + def test_main_in_background(self): + main = greenlet.getcurrent() + def run(): + return repr(main) + + g = greenlet(run) + r = g.switch() + self.assertEndsWith(r, ' suspended active started main>') + + def test_initial(self): + r = repr(greenlet()) + self.assertEndsWith(r, ' pending>') + + def test_main_from_other_thread(self): + main = greenlet.getcurrent() + + class T(threading.Thread): + original_main = thread_main = None + main_glet = None + def run(self): + self.original_main = repr(main) + self.main_glet = greenlet.getcurrent() + self.thread_main = repr(self.main_glet) + + t = T() + t.start() + t.join(10) + + self.assertEndsWith(t.original_main, ' suspended active started main>') + self.assertEndsWith(t.thread_main, ' current active started main>') + # give the machinery time to notice the death of the thread, + # and clean it up. Note that we don't use + # ``expect_greenlet_leak`` or wait_for_pending_cleanups, + # because at this point we know we have an extra greenlet + # still reachable. + for _ in range(3): + time.sleep(0.001) + + # In the past, main greenlets, even from dead threads, never + # really appear dead. We have fixed that, and we also report + # that the thread is dead in the repr. (Do this multiple times + # to make sure that we don't self-modify and forget our state + # in the C++ code). + for _ in range(3): + self.assertTrue(t.main_glet.dead) + r = repr(t.main_glet) + self.assertEndsWith(r, ' (thread exited) dead>') + + def test_dead(self): + g = greenlet(lambda: None) + g.switch() + self.assertEndsWith(repr(g), ' dead>') + self.assertNotIn('suspended', repr(g)) + self.assertNotIn('started', repr(g)) + self.assertNotIn('active', repr(g)) + + def test_formatting_produces_native_str(self): + # https://github.com/python-greenlet/greenlet/issues/218 + # %s formatting on Python 2 was producing unicode, not str. + + g_dead = greenlet(lambda: None) + g_not_started = greenlet(lambda: None) + g_cur = greenlet.getcurrent() + + for g in g_dead, g_not_started, g_cur: + + self.assertIsInstance( + '%s' % (g,), + str + ) + self.assertIsInstance( + '%r' % (g,), + str, + ) + + +class TestMainGreenlet(TestCase): + # Tests some implementation details, and relies on some + # implementation details. + + def _check_current_is_main(self): + # implementation detail + assert 'main' in repr(greenlet.getcurrent()) + + t = type(greenlet.getcurrent()) + assert 'main' not in repr(t) + return t + + def test_main_greenlet_type_can_be_subclassed(self): + main_type = self._check_current_is_main() + subclass = type('subclass', (main_type,), {}) + self.assertIsNotNone(subclass) + + def test_main_greenlet_is_greenlet(self): + self._check_current_is_main() + self.assertIsInstance(greenlet.getcurrent(), greenlet) + +if __name__ == '__main__': + import unittest + unittest.main() diff --git a/mysql-sqlalchemy-workspace/lib/python3.9/site-packages/greenlet/tests/test_greenlet_trash.py b/mysql-sqlalchemy-workspace/lib/python3.9/site-packages/greenlet/tests/test_greenlet_trash.py new file mode 100644 index 00000000..6dface3b --- /dev/null +++ b/mysql-sqlalchemy-workspace/lib/python3.9/site-packages/greenlet/tests/test_greenlet_trash.py @@ -0,0 +1,185 @@ +# -*- coding: utf-8 -*- +""" +Tests for greenlets interacting with the CPython trash can API. + +The CPython trash can API is not designed to be re-entered from a +single thread. But this can happen using greenlets, if something +during the object deallocation process switches greenlets, and this second +greenlet then causes the trash can to get entered again. Here, we do this +very explicitly, but in other cases (like gevent) it could be arbitrarily more +complicated: for example, a weakref callback might try to acquire a lock that's +already held by another greenlet; that would allow a greenlet switch to occur. + +See https://github.com/gevent/gevent/issues/1909 + +This test is fragile and relies on details of the CPython +implementation (like most of the rest of this package): + + - We enter the trashcan and deferred deallocation after + ``_PyTrash_UNWIND_LEVEL`` calls. This constant, defined in + CPython's object.c, is generally 50. That's basically how many objects are required to + get us into the deferred deallocation situation. + + - The test fails by hitting an ``assert()`` in object.c; if the + build didn't enable assert, then we don't catch this. + + - If the test fails in that way, the interpreter crashes. +""" +from __future__ import print_function, absolute_import, division + +import sys +import unittest + + + +class TestTrashCanReEnter(unittest.TestCase): + + @unittest.skipUnless( + sys.version_info[0] > 2, + "Python 2 tracks this slightly differently, so our test doesn't catch a problem there. " + ) + def test_it(self): + # Try several times to trigger it, because it isn't 100% + # reliable. + for _ in range(10): + self.check_it() + + def check_it(self): # pylint:disable=too-many-statements + import greenlet + from greenlet._greenlet import get_tstate_trash_delete_nesting # pylint:disable=no-name-in-module + + main = greenlet.getcurrent() + + assert get_tstate_trash_delete_nesting() == 0 + + # We expect to be in deferred deallocation after this many + # deallocations have occurred. TODO: I wish we had a better way to do + # this --- that was before get_tstate_trash_delete_nesting; perhaps + # we can use that API to do better? + TRASH_UNWIND_LEVEL = 50 + # How many objects to put in a container; it's the container that + # queues objects for deferred deallocation. + OBJECTS_PER_CONTAINER = 500 + + class Dealloc: # define the class here because we alter class variables each time we run. + """ + An object with a ``__del__`` method. When it starts getting deallocated + from a deferred trash can run, it switches greenlets, allocates more objects + which then also go in the trash can. If we don't save state appropriately, + nesting gets out of order and we can crash the interpreter. + """ + + #: Has our deallocation actually run and switched greenlets? + #: When it does, this will be set to the current greenlet. This should + #: be happening in the main greenlet, so we check that down below. + SPAWNED = False + + #: Has the background greenlet run? + BG_RAN = False + + BG_GLET = None + + #: How many of these things have ever been allocated. + CREATED = 0 + + #: How many of these things have ever been deallocated. + DESTROYED = 0 + + #: How many were destroyed not in the main greenlet. There should always + #: be some. + #: If the test is broken or things change in the trashcan implementation, + #: this may not be correct. + DESTROYED_BG = 0 + + def __init__(self, sequence_number): + """ + :param sequence_number: The ordinal of this object during + one particular creation run. This is used to detect (guess, really) + when we have entered the trash can's deferred deallocation. + """ + self.i = sequence_number + Dealloc.CREATED += 1 + + def __del__(self): + if self.i == TRASH_UNWIND_LEVEL and not self.SPAWNED: + Dealloc.SPAWNED = greenlet.getcurrent() + other = Dealloc.BG_GLET = greenlet.greenlet(background_greenlet) + x = other.switch() + assert x == 42 + # It's important that we don't switch back to the greenlet, + # we leave it hanging there in an incomplete state. But we don't let it + # get collected, either. If we complete it now, while we're still + # in the scope of the initial trash can, things work out and we + # don't see the problem. We need this greenlet to complete + # at some point in the future, after we've exited this trash can invocation. + del other + elif self.i == 40 and greenlet.getcurrent() is not main: + Dealloc.BG_RAN = True + try: + main.switch(42) + except greenlet.GreenletExit as ex: + # We expect this; all references to us go away + # while we're still running, and we need to finish deleting + # ourself. + Dealloc.BG_RAN = type(ex) + del ex + + # Record the fact that we're dead last of all. This ensures that + # we actually get returned too. + Dealloc.DESTROYED += 1 + if greenlet.getcurrent() is not main: + Dealloc.DESTROYED_BG += 1 + + + def background_greenlet(): + # We direct through a second function, instead of + # directly calling ``make_some()``, so that we have complete + # control over when these objects are destroyed: we need them + # to be destroyed in the context of the background greenlet + t = make_some() + del t # Triggere deletion. + + def make_some(): + t = () + i = OBJECTS_PER_CONTAINER + while i: + # Nest the tuples; it's the recursion that gets us + # into trash. + t = (Dealloc(i), t) + i -= 1 + return t + + + some = make_some() + self.assertEqual(Dealloc.CREATED, OBJECTS_PER_CONTAINER) + self.assertEqual(Dealloc.DESTROYED, 0) + + # If we're going to crash, it should be on the following line. + # We only crash if ``assert()`` is enabled, of course. + del some + + # For non-debug builds of CPython, we won't crash. The best we can do is check + # the nesting level explicitly. + self.assertEqual(0, get_tstate_trash_delete_nesting()) + + # Discard this, raising GreenletExit into where it is waiting. + Dealloc.BG_GLET = None + # The same nesting level maintains. + self.assertEqual(0, get_tstate_trash_delete_nesting()) + + # We definitely cleaned some up in the background + self.assertGreater(Dealloc.DESTROYED_BG, 0) + + # Make sure all the cleanups happened. + self.assertIs(Dealloc.SPAWNED, main) + self.assertTrue(Dealloc.BG_RAN) + self.assertEqual(Dealloc.BG_RAN, greenlet.GreenletExit) + self.assertEqual(Dealloc.CREATED, Dealloc.DESTROYED ) + self.assertEqual(Dealloc.CREATED, OBJECTS_PER_CONTAINER * 2) + + import gc + gc.collect() + + +if __name__ == '__main__': + unittest.main() diff --git a/mysql-sqlalchemy-workspace/lib/python3.9/site-packages/greenlet/tests/test_leaks.py b/mysql-sqlalchemy-workspace/lib/python3.9/site-packages/greenlet/tests/test_leaks.py new file mode 100644 index 00000000..0ed43b05 --- /dev/null +++ b/mysql-sqlalchemy-workspace/lib/python3.9/site-packages/greenlet/tests/test_leaks.py @@ -0,0 +1,448 @@ +# -*- coding: utf-8 -*- +""" +Testing scenarios that may have leaked. +""" +from __future__ import print_function, absolute_import, division + +import sys +import gc + +import time +import weakref +import threading + +import psutil + +import greenlet +from . import TestCase +from .leakcheck import fails_leakcheck +from .leakcheck import ignores_leakcheck +from .leakcheck import RUNNING_ON_GITHUB_ACTIONS +from .leakcheck import RUNNING_ON_MANYLINUX + +try: + from sys import intern +except ImportError: + # Python 2 + pass + +assert greenlet.GREENLET_USE_GC # Option to disable this was removed in 1.0 + +class HasFinalizerTracksInstances(object): + EXTANT_INSTANCES = set() + def __init__(self, msg): + self.msg = intern(msg) + self.EXTANT_INSTANCES.add(id(self)) + def __del__(self): + self.EXTANT_INSTANCES.remove(id(self)) + def __repr__(self): + return "" % ( + id(self), self.msg + ) + @classmethod + def reset(cls): + cls.EXTANT_INSTANCES.clear() + + +class TestLeaks(TestCase): + + def test_arg_refs(self): + args = ('a', 'b', 'c') + refcount_before = sys.getrefcount(args) + # pylint:disable=unnecessary-lambda + g = greenlet.greenlet( + lambda *args: greenlet.getcurrent().parent.switch(*args)) + for _ in range(100): + g.switch(*args) + self.assertEqual(sys.getrefcount(args), refcount_before) + + def test_kwarg_refs(self): + kwargs = {} + # pylint:disable=unnecessary-lambda + g = greenlet.greenlet( + lambda **kwargs: greenlet.getcurrent().parent.switch(**kwargs)) + for _ in range(100): + g.switch(**kwargs) + self.assertEqual(sys.getrefcount(kwargs), 2) + + + @staticmethod + def __recycle_threads(): + # By introducing a thread that does sleep we allow other threads, + # that have triggered their __block condition, but did not have a + # chance to deallocate their thread state yet, to finally do so. + # The way it works is by requiring a GIL switch (different thread), + # which does a GIL release (sleep), which might do a GIL switch + # to finished threads and allow them to clean up. + def worker(): + time.sleep(0.001) + t = threading.Thread(target=worker) + t.start() + time.sleep(0.001) + t.join(10) + + def test_threaded_leak(self): + gg = [] + def worker(): + # only main greenlet present + gg.append(weakref.ref(greenlet.getcurrent())) + for _ in range(2): + t = threading.Thread(target=worker) + t.start() + t.join(10) + del t + greenlet.getcurrent() # update ts_current + self.__recycle_threads() + greenlet.getcurrent() # update ts_current + gc.collect() + greenlet.getcurrent() # update ts_current + for g in gg: + self.assertIsNone(g()) + + def test_threaded_adv_leak(self): + gg = [] + def worker(): + # main and additional *finished* greenlets + ll = greenlet.getcurrent().ll = [] + def additional(): + ll.append(greenlet.getcurrent()) + for _ in range(2): + greenlet.greenlet(additional).switch() + gg.append(weakref.ref(greenlet.getcurrent())) + for _ in range(2): + t = threading.Thread(target=worker) + t.start() + t.join(10) + del t + greenlet.getcurrent() # update ts_current + self.__recycle_threads() + greenlet.getcurrent() # update ts_current + gc.collect() + greenlet.getcurrent() # update ts_current + for g in gg: + self.assertIsNone(g()) + + def assertClocksUsed(self): + used = greenlet._greenlet.get_clocks_used_doing_optional_cleanup() + self.assertGreaterEqual(used, 0) + # we don't lose the value + greenlet._greenlet.enable_optional_cleanup(True) + used2 = greenlet._greenlet.get_clocks_used_doing_optional_cleanup() + self.assertEqual(used, used2) + self.assertGreater(greenlet._greenlet.CLOCKS_PER_SEC, 1) + + def _check_issue251(self, + manually_collect_background=True, + explicit_reference_to_switch=False): + # See https://github.com/python-greenlet/greenlet/issues/251 + # Killing a greenlet (probably not the main one) + # in one thread from another thread would + # result in leaking a list (the ts_delkey list). + # We no longer use lists to hold that stuff, though. + + # For the test to be valid, even empty lists have to be tracked by the + # GC + + assert gc.is_tracked([]) + HasFinalizerTracksInstances.reset() + greenlet.getcurrent() + greenlets_before = self.count_objects(greenlet.greenlet, exact_kind=False) + + background_glet_running = threading.Event() + background_glet_killed = threading.Event() + background_greenlets = [] + + # XXX: Switching this to a greenlet subclass that overrides + # run results in all callers failing the leaktest; that + # greenlet instance is leaked. There's a bound method for + # run() living on the stack of the greenlet in g_initialstub, + # and since we don't manually switch back to the background + # greenlet to let it "fall off the end" and exit the + # g_initialstub function, it never gets cleaned up. Making the + # garbage collector aware of this bound method (making it an + # attribute of the greenlet structure and traversing into it) + # doesn't help, for some reason. + def background_greenlet(): + # Throw control back to the main greenlet. + jd = HasFinalizerTracksInstances("DELETING STACK OBJECT") + greenlet._greenlet.set_thread_local( + 'test_leaks_key', + HasFinalizerTracksInstances("DELETING THREAD STATE")) + # Explicitly keeping 'switch' in a local variable + # breaks this test in all versions + if explicit_reference_to_switch: + s = greenlet.getcurrent().parent.switch + s([jd]) + else: + greenlet.getcurrent().parent.switch([jd]) + + bg_main_wrefs = [] + + def background_thread(): + glet = greenlet.greenlet(background_greenlet) + bg_main_wrefs.append(weakref.ref(glet.parent)) + + background_greenlets.append(glet) + glet.switch() # Be sure it's active. + # Control is ours again. + del glet # Delete one reference from the thread it runs in. + background_glet_running.set() + background_glet_killed.wait(10) + + # To trigger the background collection of the dead + # greenlet, thus clearing out the contents of the list, we + # need to run some APIs. See issue 252. + if manually_collect_background: + greenlet.getcurrent() + + + t = threading.Thread(target=background_thread) + t.start() + background_glet_running.wait(10) + greenlet.getcurrent() + lists_before = self.count_objects(list, exact_kind=True) + + assert len(background_greenlets) == 1 + self.assertFalse(background_greenlets[0].dead) + # Delete the last reference to the background greenlet + # from a different thread. This puts it in the background thread's + # ts_delkey list. + del background_greenlets[:] + background_glet_killed.set() + + # Now wait for the background thread to die. + t.join(10) + del t + # As part of the fix for 252, we need to cycle the ceval.c + # interpreter loop to be sure it has had a chance to process + # the pending call. + self.wait_for_pending_cleanups() + + lists_after = self.count_objects(list, exact_kind=True) + greenlets_after = self.count_objects(greenlet.greenlet, exact_kind=False) + + # On 2.7, we observe that lists_after is smaller than + # lists_before. No idea what lists got cleaned up. All the + # Python 3 versions match exactly. + self.assertLessEqual(lists_after, lists_before) + # On versions after 3.6, we've successfully cleaned up the + # greenlet references thanks to the internal "vectorcall" + # protocol; prior to that, there is a reference path through + # the ``greenlet.switch`` method still on the stack that we + # can't reach to clean up. The C code goes through terrific + # lengths to clean that up. + if not explicit_reference_to_switch and greenlet._greenlet.get_clocks_used_doing_optional_cleanup() is not None: + # If cleanup was disabled, though, we may not find it. + self.assertEqual(greenlets_after, greenlets_before) + if manually_collect_background: + # TODO: Figure out how to make this work! + # The one on the stack is still leaking somehow + # in the non-manually-collect state. + self.assertEqual(HasFinalizerTracksInstances.EXTANT_INSTANCES, set()) + else: + # The explicit reference prevents us from collecting it + # and it isn't always found by the GC either for some + # reason. The entire frame is leaked somehow, on some + # platforms (e.g., MacPorts builds of Python (all + # versions!)), but not on other platforms (the linux and + # windows builds on GitHub actions and Appveyor). So we'd + # like to write a test that proves that the main greenlet + # sticks around, and we can on my machine (macOS 11.6, + # MacPorts builds of everything) but we can't write that + # same test on other platforms. However, hopefully iteration + # done by leakcheck will find it. + pass + + if greenlet._greenlet.get_clocks_used_doing_optional_cleanup() is not None: + self.assertClocksUsed() + + def test_issue251_killing_cross_thread_leaks_list(self): + self._check_issue251() + + def test_issue251_with_cleanup_disabled(self): + greenlet._greenlet.enable_optional_cleanup(False) + try: + self._check_issue251() + finally: + greenlet._greenlet.enable_optional_cleanup(True) + + @fails_leakcheck + def test_issue251_issue252_need_to_collect_in_background(self): + # Between greenlet 1.1.2 and the next version, this was still + # failing because the leak of the list still exists when we + # don't call a greenlet API before exiting the thread. The + # proximate cause is that neither of the two greenlets from + # the background thread are actually being destroyed, even + # though the GC is in fact visiting both objects. It's not + # clear where that leak is? For some reason the thread-local + # dict holding it isn't being cleaned up. + # + # The leak, I think, is in the CPYthon internal function that + # calls into green_switch(). The argument tuple is still on + # the C stack somewhere and can't be reached? That doesn't + # make sense, because the tuple should be collectable when + # this object goes away. + # + # Note that this test sometimes spuriously passes on Linux, + # for some reason, but I've never seen it pass on macOS. + self._check_issue251(manually_collect_background=False) + + @fails_leakcheck + def test_issue251_issue252_need_to_collect_in_background_cleanup_disabled(self): + self.expect_greenlet_leak = True + greenlet._greenlet.enable_optional_cleanup(False) + try: + self._check_issue251(manually_collect_background=False) + finally: + greenlet._greenlet.enable_optional_cleanup(True) + + @fails_leakcheck + def test_issue251_issue252_explicit_reference_not_collectable(self): + self._check_issue251( + manually_collect_background=False, + explicit_reference_to_switch=True) + + UNTRACK_ATTEMPTS = 100 + + def _only_test_some_versions(self): + # We're only looking for this problem specifically on 3.11, + # and this set of tests is relatively fragile, depending on + # OS and memory management details. So we want to run it on 3.11+ + # (obviously) but not every older 3.x version in order to reduce + # false negatives. + if sys.version_info[0] >= 3 and sys.version_info[:2] < (3, 8): + self.skipTest('Only observed on 3.11') + if sys.version_info[0] == 2 and RUNNING_ON_GITHUB_ACTIONS: + self.skipTest('Hard to get a stable pattern here') + if RUNNING_ON_MANYLINUX: + self.skipTest("Slow and not worth repeating here") + + @ignores_leakcheck + # Because we're just trying to track raw memory, not objects, and running + # the leakcheck makes an already slow test slower. + def test_untracked_memory_doesnt_increase(self): + # See https://github.com/gevent/gevent/issues/1924 + # and https://github.com/python-greenlet/greenlet/issues/328 + self._only_test_some_versions() + def f(): + return 1 + + ITER = 10000 + def run_it(): + for _ in range(ITER): + greenlet.greenlet(f).switch() + + # Establish baseline + for _ in range(3): + run_it() + + # uss: (Linux, macOS, Windows): aka "Unique Set Size", this is + # the memory which is unique to a process and which would be + # freed if the process was terminated right now. + uss_before = psutil.Process().memory_full_info().uss + + for count in range(self.UNTRACK_ATTEMPTS): + uss_before = max(uss_before, psutil.Process().memory_full_info().uss) + run_it() + + uss_after = psutil.Process().memory_full_info().uss + if uss_after <= uss_before and count > 1: + break + + self.assertLessEqual(uss_after, uss_before) + + def _check_untracked_memory_thread(self, deallocate_in_thread=True): + self._only_test_some_versions() + # Like the above test, but what if there are a bunch of + # unfinished greenlets in a thread that dies? + # Does it matter if we deallocate in the thread or not? + EXIT_COUNT = [0] + + def f(): + try: + greenlet.getcurrent().parent.switch() + except greenlet.GreenletExit: + EXIT_COUNT[0] += 1 + raise + return 1 + + ITER = 10000 + def run_it(): + glets = [] + for _ in range(ITER): + # Greenlet starts, switches back to us. + # We keep a strong reference to the greenlet though so it doesn't + # get a GreenletExit exception. + g = greenlet.greenlet(f) + glets.append(g) + g.switch() + + return glets + + test = self + + class ThreadFunc: + uss_before = uss_after = 0 + glets = () + ITER = 2 + def __call__(self): + self.uss_before = psutil.Process().memory_full_info().uss + + for _ in range(self.ITER): + self.glets += tuple(run_it()) + + for g in self.glets: + test.assertIn('suspended active', str(g)) + # Drop them. + if deallocate_in_thread: + self.glets = () + self.uss_after = psutil.Process().memory_full_info().uss + + # Establish baseline + uss_before = uss_after = None + for count in range(self.UNTRACK_ATTEMPTS): + EXIT_COUNT[0] = 0 + thread_func = ThreadFunc() + t = threading.Thread(target=thread_func) + t.start() + t.join(30) + self.assertFalse(t.is_alive()) + + if uss_before is None: + uss_before = thread_func.uss_before + + uss_before = max(uss_before, thread_func.uss_before) + if deallocate_in_thread: + self.assertEqual(thread_func.glets, ()) + self.assertEqual(EXIT_COUNT[0], ITER * thread_func.ITER) + + del thread_func # Deallocate the greenlets; but this won't raise into them + del t + if not deallocate_in_thread: + self.assertEqual(EXIT_COUNT[0], 0) + if deallocate_in_thread: + self.wait_for_pending_cleanups() + + uss_after = psutil.Process().memory_full_info().uss + # See if we achieve a non-growth state at some point. Break when we do. + if uss_after <= uss_before and count > 1: + break + + self.wait_for_pending_cleanups() + uss_after = psutil.Process().memory_full_info().uss + self.assertLessEqual(uss_after, uss_before, "after attempts %d" % (count,)) + + @ignores_leakcheck + # Because we're just trying to track raw memory, not objects, and running + # the leakcheck makes an already slow test slower. + def test_untracked_memory_doesnt_increase_unfinished_thread_dealloc_in_thread(self): + self._check_untracked_memory_thread(deallocate_in_thread=True) + + @ignores_leakcheck + # Because the main greenlets from the background threads do not exit in a timely fashion, + # we fail the object-based leakchecks. + def test_untracked_memory_doesnt_increase_unfinished_thread_dealloc_in_main(self): + self._check_untracked_memory_thread(deallocate_in_thread=False) + +if __name__ == '__main__': + __import__('unittest').main() diff --git a/mysql-sqlalchemy-workspace/lib/python3.9/site-packages/greenlet/tests/test_stack_saved.py b/mysql-sqlalchemy-workspace/lib/python3.9/site-packages/greenlet/tests/test_stack_saved.py new file mode 100644 index 00000000..b362bf95 --- /dev/null +++ b/mysql-sqlalchemy-workspace/lib/python3.9/site-packages/greenlet/tests/test_stack_saved.py @@ -0,0 +1,19 @@ +import greenlet +from . import TestCase + + +class Test(TestCase): + + def test_stack_saved(self): + main = greenlet.getcurrent() + self.assertEqual(main._stack_saved, 0) + + def func(): + main.switch(main._stack_saved) + + g = greenlet.greenlet(func) + x = g.switch() + self.assertGreater(x, 0) + self.assertGreater(g._stack_saved, 0) + g.switch() + self.assertEqual(g._stack_saved, 0) diff --git a/mysql-sqlalchemy-workspace/lib/python3.9/site-packages/greenlet/tests/test_throw.py b/mysql-sqlalchemy-workspace/lib/python3.9/site-packages/greenlet/tests/test_throw.py new file mode 100644 index 00000000..90d657a2 --- /dev/null +++ b/mysql-sqlalchemy-workspace/lib/python3.9/site-packages/greenlet/tests/test_throw.py @@ -0,0 +1,129 @@ +import sys + + +from greenlet import greenlet +from . import TestCase + +def switch(*args): + return greenlet.getcurrent().parent.switch(*args) + + +class ThrowTests(TestCase): + def test_class(self): + def f(): + try: + switch("ok") + except RuntimeError: + switch("ok") + return + switch("fail") + g = greenlet(f) + res = g.switch() + self.assertEqual(res, "ok") + res = g.throw(RuntimeError) + self.assertEqual(res, "ok") + + def test_val(self): + def f(): + try: + switch("ok") + except RuntimeError: + val = sys.exc_info()[1] + if str(val) == "ciao": + switch("ok") + return + switch("fail") + + g = greenlet(f) + res = g.switch() + self.assertEqual(res, "ok") + res = g.throw(RuntimeError("ciao")) + self.assertEqual(res, "ok") + + g = greenlet(f) + res = g.switch() + self.assertEqual(res, "ok") + res = g.throw(RuntimeError, "ciao") + self.assertEqual(res, "ok") + + def test_kill(self): + def f(): + switch("ok") + switch("fail") + g = greenlet(f) + res = g.switch() + self.assertEqual(res, "ok") + res = g.throw() + self.assertTrue(isinstance(res, greenlet.GreenletExit)) + self.assertTrue(g.dead) + res = g.throw() # immediately eaten by the already-dead greenlet + self.assertTrue(isinstance(res, greenlet.GreenletExit)) + + def test_throw_goes_to_original_parent(self): + main = greenlet.getcurrent() + + def f1(): + try: + main.switch("f1 ready to catch") + except IndexError: + return "caught" + else: + return "normal exit" + + def f2(): + main.switch("from f2") + + g1 = greenlet(f1) + g2 = greenlet(f2, parent=g1) + with self.assertRaises(IndexError): + g2.throw(IndexError) + self.assertTrue(g2.dead) + self.assertTrue(g1.dead) + + g1 = greenlet(f1) + g2 = greenlet(f2, parent=g1) + res = g1.switch() + self.assertEqual(res, "f1 ready to catch") + res = g2.throw(IndexError) + self.assertEqual(res, "caught") + self.assertTrue(g2.dead) + self.assertTrue(g1.dead) + + g1 = greenlet(f1) + g2 = greenlet(f2, parent=g1) + res = g1.switch() + self.assertEqual(res, "f1 ready to catch") + res = g2.switch() + self.assertEqual(res, "from f2") + res = g2.throw(IndexError) + self.assertEqual(res, "caught") + self.assertTrue(g2.dead) + self.assertTrue(g1.dead) + + def test_non_traceback_param(self): + with self.assertRaises(TypeError) as exc: + greenlet.getcurrent().throw( + Exception, + Exception(), + self + ) + self.assertEqual(str(exc.exception), + "throw() third argument must be a traceback object") + + def test_instance_of_wrong_type(self): + with self.assertRaises(TypeError) as exc: + greenlet.getcurrent().throw( + Exception(), + BaseException() + ) + + self.assertEqual(str(exc.exception), + "instance exception may not have a separate value") + + def test_not_throwable(self): + with self.assertRaises(TypeError) as exc: + greenlet.getcurrent().throw( + "abc" + ) + self.assertEqual(str(exc.exception), + "exceptions must be classes, or instances, not str") diff --git a/mysql-sqlalchemy-workspace/lib/python3.9/site-packages/greenlet/tests/test_tracing.py b/mysql-sqlalchemy-workspace/lib/python3.9/site-packages/greenlet/tests/test_tracing.py new file mode 100644 index 00000000..de84dbca --- /dev/null +++ b/mysql-sqlalchemy-workspace/lib/python3.9/site-packages/greenlet/tests/test_tracing.py @@ -0,0 +1,278 @@ +from __future__ import print_function +import sys +import greenlet + +from . import TestCase + +class SomeError(Exception): + pass + +class GreenletTracer(object): + oldtrace = None + + def __init__(self, error_on_trace=False): + self.actions = [] + self.error_on_trace = error_on_trace + + def __call__(self, *args): + self.actions.append(args) + if self.error_on_trace: + raise SomeError + + def __enter__(self): + self.oldtrace = greenlet.settrace(self) + return self.actions + + def __exit__(self, *args): + greenlet.settrace(self.oldtrace) + + +class TestGreenletTracing(TestCase): + """ + Tests of ``greenlet.settrace()`` + """ + + def test_a_greenlet_tracing(self): + main = greenlet.getcurrent() + def dummy(): + pass + def dummyexc(): + raise SomeError() + + with GreenletTracer() as actions: + g1 = greenlet.greenlet(dummy) + g1.switch() + g2 = greenlet.greenlet(dummyexc) + self.assertRaises(SomeError, g2.switch) + + self.assertEqual(actions, [ + ('switch', (main, g1)), + ('switch', (g1, main)), + ('switch', (main, g2)), + ('throw', (g2, main)), + ]) + + def test_b_exception_disables_tracing(self): + main = greenlet.getcurrent() + def dummy(): + main.switch() + g = greenlet.greenlet(dummy) + g.switch() + with GreenletTracer(error_on_trace=True) as actions: + self.assertRaises(SomeError, g.switch) + self.assertEqual(greenlet.gettrace(), None) + + self.assertEqual(actions, [ + ('switch', (main, g)), + ]) + + def test_set_same_tracer_twice(self): + # https://github.com/python-greenlet/greenlet/issues/332 + # Our logic in asserting that the tracefunction should + # gain a reference was incorrect if the same tracefunction was set + # twice. + tracer = GreenletTracer() + with tracer: + greenlet.settrace(tracer) + + +class PythonTracer(object): + oldtrace = None + + def __init__(self): + self.actions = [] + + def __call__(self, frame, event, arg): + # Record the co_name so we have an idea what function we're in. + self.actions.append((event, frame.f_code.co_name)) + + def __enter__(self): + self.oldtrace = sys.setprofile(self) + return self.actions + + def __exit__(self, *args): + sys.setprofile(self.oldtrace) + +def tpt_callback(): + return 42 + +class TestPythonTracing(TestCase): + """ + Tests of the interaction of ``sys.settrace()`` + with greenlet facilities. + + NOTE: Most of this is probably CPython specific. + """ + + maxDiff = None + + def test_trace_events_trivial(self): + with PythonTracer() as actions: + tpt_callback() + # If we use the sys.settrace instead of setprofile, we get + # this: + + # self.assertEqual(actions, [ + # ('call', 'tpt_callback'), + # ('call', '__exit__'), + # ]) + + self.assertEqual(actions, [ + ('return', '__enter__'), + ('call', 'tpt_callback'), + ('return', 'tpt_callback'), + ('call', '__exit__'), + ('c_call', '__exit__'), + ]) + + def _trace_switch(self, glet): + with PythonTracer() as actions: + glet.switch() + return actions + + def _check_trace_events_func_already_set(self, glet): + actions = self._trace_switch(glet) + self.assertEqual(actions, [ + ('return', '__enter__'), + ('c_call', '_trace_switch'), + ('call', 'run'), + ('call', 'tpt_callback'), + ('return', 'tpt_callback'), + ('return', 'run'), + ('c_return', '_trace_switch'), + ('call', '__exit__'), + ('c_call', '__exit__'), + ]) + + def test_trace_events_into_greenlet_func_already_set(self): + def run(): + return tpt_callback() + + self._check_trace_events_func_already_set(greenlet.greenlet(run)) + + def test_trace_events_into_greenlet_subclass_already_set(self): + class X(greenlet.greenlet): + def run(self): + return tpt_callback() + self._check_trace_events_func_already_set(X()) + + def _check_trace_events_from_greenlet_sets_profiler(self, g, tracer): + g.switch() + tpt_callback() + tracer.__exit__() + self.assertEqual(tracer.actions, [ + ('return', '__enter__'), + ('call', 'tpt_callback'), + ('return', 'tpt_callback'), + ('return', 'run'), + ('call', 'tpt_callback'), + ('return', 'tpt_callback'), + ('call', '__exit__'), + ('c_call', '__exit__'), + ]) + + + def test_trace_events_from_greenlet_func_sets_profiler(self): + tracer = PythonTracer() + def run(): + tracer.__enter__() + return tpt_callback() + + self._check_trace_events_from_greenlet_sets_profiler(greenlet.greenlet(run), + tracer) + + def test_trace_events_from_greenlet_subclass_sets_profiler(self): + tracer = PythonTracer() + class X(greenlet.greenlet): + def run(self): + tracer.__enter__() + return tpt_callback() + + self._check_trace_events_from_greenlet_sets_profiler(X(), tracer) + + + def test_trace_events_multiple_greenlets_switching(self): + tracer = PythonTracer() + + g1 = None + g2 = None + + def g1_run(): + tracer.__enter__() + tpt_callback() + g2.switch() + tpt_callback() + return 42 + + def g2_run(): + tpt_callback() + tracer.__exit__() + tpt_callback() + g1.switch() + + g1 = greenlet.greenlet(g1_run) + g2 = greenlet.greenlet(g2_run) + + x = g1.switch() + self.assertEqual(x, 42) + tpt_callback() # ensure not in the trace + self.assertEqual(tracer.actions, [ + ('return', '__enter__'), + ('call', 'tpt_callback'), + ('return', 'tpt_callback'), + ('c_call', 'g1_run'), + ('call', 'g2_run'), + ('call', 'tpt_callback'), + ('return', 'tpt_callback'), + ('call', '__exit__'), + ('c_call', '__exit__'), + ]) + + def test_trace_events_multiple_greenlets_switching_siblings(self): + # Like the first version, but get both greenlets running first + # as "siblings" and then establish the tracing. + tracer = PythonTracer() + + g1 = None + g2 = None + + def g1_run(): + greenlet.getcurrent().parent.switch() + tracer.__enter__() + tpt_callback() + g2.switch() + tpt_callback() + return 42 + + def g2_run(): + greenlet.getcurrent().parent.switch() + + tpt_callback() + tracer.__exit__() + tpt_callback() + g1.switch() + + g1 = greenlet.greenlet(g1_run) + g2 = greenlet.greenlet(g2_run) + + # Start g1 + g1.switch() + # And it immediately returns control to us. + # Start g2 + g2.switch() + # Which also returns. Now kick of the real part of the + # test. + x = g1.switch() + self.assertEqual(x, 42) + + tpt_callback() # ensure not in the trace + self.assertEqual(tracer.actions, [ + ('return', '__enter__'), + ('call', 'tpt_callback'), + ('return', 'tpt_callback'), + ('c_call', 'g1_run'), + ('call', 'tpt_callback'), + ('return', 'tpt_callback'), + ('call', '__exit__'), + ('c_call', '__exit__'), + ]) diff --git a/mysql-sqlalchemy-workspace/lib/python3.9/site-packages/greenlet/tests/test_version.py b/mysql-sqlalchemy-workspace/lib/python3.9/site-packages/greenlet/tests/test_version.py new file mode 100644 index 00000000..96c17cf1 --- /dev/null +++ b/mysql-sqlalchemy-workspace/lib/python3.9/site-packages/greenlet/tests/test_version.py @@ -0,0 +1,41 @@ +#! /usr/bin/env python +from __future__ import absolute_import +from __future__ import print_function + +import sys +import os +from unittest import TestCase as NonLeakingTestCase + +import greenlet + +# No reason to run this multiple times under leakchecks, +# it doesn't do anything. +class VersionTests(NonLeakingTestCase): + def test_version(self): + def find_dominating_file(name): + if os.path.exists(name): + return name + + tried = [] + here = os.path.abspath(os.path.dirname(__file__)) + for i in range(10): + up = ['..'] * i + path = [here] + up + [name] + fname = os.path.join(*path) + fname = os.path.abspath(fname) + tried.append(fname) + if os.path.exists(fname): + return fname + raise AssertionError("Could not find file " + name + "; checked " + str(tried)) + + try: + setup_py = find_dominating_file('setup.py') + except AssertionError as e: + self.skipTest("Unable to find setup.py; must be out of tree. " + str(e)) + + + invoke_setup = "%s %s --version" % (sys.executable, setup_py) + with os.popen(invoke_setup) as f: + sversion = f.read().strip() + + self.assertEqual(sversion, greenlet.__version__) diff --git a/mysql-sqlalchemy-workspace/lib/python3.9/site-packages/greenlet/tests/test_weakref.py b/mysql-sqlalchemy-workspace/lib/python3.9/site-packages/greenlet/tests/test_weakref.py new file mode 100644 index 00000000..916ef8ae --- /dev/null +++ b/mysql-sqlalchemy-workspace/lib/python3.9/site-packages/greenlet/tests/test_weakref.py @@ -0,0 +1,35 @@ +import gc +import weakref +import unittest + +import greenlet +from . import TestCase + +class WeakRefTests(TestCase): + def test_dead_weakref(self): + def _dead_greenlet(): + g = greenlet.greenlet(lambda: None) + g.switch() + return g + o = weakref.ref(_dead_greenlet()) + gc.collect() + self.assertEqual(o(), None) + + def test_inactive_weakref(self): + o = weakref.ref(greenlet.greenlet()) + gc.collect() + self.assertEqual(o(), None) + + def test_dealloc_weakref(self): + seen = [] + def worker(): + try: + greenlet.getcurrent().parent.switch() + finally: + seen.append(g()) + g = greenlet.greenlet(worker) + g.switch() + g2 = greenlet.greenlet(lambda: None, g) + g = weakref.ref(g2) + g2 = None + self.assertEqual(seen, [None]) diff --git a/mysql-sqlalchemy-workspace/lib/python3.9/site-packages/mysql/__init__.py b/mysql-sqlalchemy-workspace/lib/python3.9/site-packages/mysql/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/mysql-sqlalchemy-workspace/lib/python3.9/site-packages/mysql/connector/__init__.py b/mysql-sqlalchemy-workspace/lib/python3.9/site-packages/mysql/connector/__init__.py new file mode 100644 index 00000000..86878f97 --- /dev/null +++ b/mysql-sqlalchemy-workspace/lib/python3.9/site-packages/mysql/connector/__init__.py @@ -0,0 +1,123 @@ +# Copyright (c) 2009, 2022, Oracle and/or its affiliates. +# +# This program is free software; you can redistribute it and/or modify +# it under the terms of the GNU General Public License, version 2.0, as +# published by the Free Software Foundation. +# +# This program is also distributed with certain software (including +# but not limited to OpenSSL) that is licensed under separate terms, +# as designated in a particular file or component or in included license +# documentation. The authors of MySQL hereby grant you an +# additional permission to link the program and your derivative works +# with the separately licensed software that they have included with +# MySQL. +# +# Without limiting anything contained in the foregoing, this file, +# which is part of MySQL Connector/Python, is also subject to the +# Universal FOSS Exception, version 1.0, a copy of which can be found at +# http://oss.oracle.com/licenses/universal-foss-exception. +# +# This program is distributed in the hope that it will be useful, but +# WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. +# See the GNU General Public License, version 2.0, for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program; if not, write to the Free Software Foundation, Inc., +# 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA + +"""MySQL Connector/Python - MySQL driver written in Python.""" + +try: + from .connection_cext import CMySQLConnection +except ImportError: + HAVE_CEXT = False +else: + HAVE_CEXT = True + + +from . import version +from .connection import MySQLConnection +from .constants import CharacterSet, ClientFlag, FieldFlag, FieldType, RefreshOption +from .dbapi import ( + BINARY, + DATETIME, + NUMBER, + ROWID, + STRING, + Binary, + Date, + DateFromTicks, + Time, + TimeFromTicks, + Timestamp, + TimestampFromTicks, + apilevel, + paramstyle, + threadsafety, +) +from .errors import ( # pylint: disable=redefined-builtin + DatabaseError, + DataError, + Error, + IntegrityError, + InterfaceError, + InternalError, + NotSupportedError, + OperationalError, + PoolError, + ProgrammingError, + Warning, + custom_error_exception, +) +from .pooling import connect + +Connect = connect + +__version_info__ = version.VERSION +__version__ = version.VERSION_TEXT + +__all__ = [ + "MySQLConnection", + "Connect", + "custom_error_exception", + # Some useful constants + "FieldType", + "FieldFlag", + "ClientFlag", + "CharacterSet", + "RefreshOption", + "HAVE_CEXT", + # Error handling + "Error", + "Warning", + "InterfaceError", + "DatabaseError", + "NotSupportedError", + "DataError", + "IntegrityError", + "PoolError", + "ProgrammingError", + "OperationalError", + "InternalError", + # DBAPI PEP 249 required exports + "connect", + "apilevel", + "threadsafety", + "paramstyle", + "Date", + "Time", + "Timestamp", + "Binary", + "DateFromTicks", + "DateFromTicks", + "TimestampFromTicks", + "TimeFromTicks", + "STRING", + "BINARY", + "NUMBER", + "DATETIME", + "ROWID", + # C Extension + "CMySQLConnection", +] diff --git a/mysql-sqlalchemy-workspace/lib/python3.9/site-packages/mysql/connector/abstracts.py b/mysql-sqlalchemy-workspace/lib/python3.9/site-packages/mysql/connector/abstracts.py new file mode 100644 index 00000000..d44c9f8b --- /dev/null +++ b/mysql-sqlalchemy-workspace/lib/python3.9/site-packages/mysql/connector/abstracts.py @@ -0,0 +1,1736 @@ +# Copyright (c) 2014, 2022, Oracle and/or its affiliates. +# +# This program is free software; you can redistribute it and/or modify +# it under the terms of the GNU General Public License, version 2.0, as +# published by the Free Software Foundation. +# +# This program is also distributed with certain software (including +# but not limited to OpenSSL) that is licensed under separate terms, +# as designated in a particular file or component or in included license +# documentation. The authors of MySQL hereby grant you an +# additional permission to link the program and your derivative works +# with the separately licensed software that they have included with +# MySQL. +# +# Without limiting anything contained in the foregoing, this file, +# which is part of MySQL Connector/Python, is also subject to the +# Universal FOSS Exception, version 1.0, a copy of which can be found at +# http://oss.oracle.com/licenses/universal-foss-exception. +# +# This program is distributed in the hope that it will be useful, but +# WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. +# See the GNU General Public License, version 2.0, for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program; if not, write to the Free Software Foundation, Inc., +# 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA + +# mypy: disable-error-code="assignment,attr-defined" + +"""Module gathering all abstract base classes.""" + +from __future__ import annotations + +import importlib +import os +import re +import weakref + +from abc import ABC, abstractmethod +from datetime import date, datetime, time, timedelta +from decimal import Decimal +from inspect import signature +from time import sleep +from types import TracebackType +from typing import ( + Any, + BinaryIO, + Callable, + Dict, + Generator, + List, + Mapping, + Optional, + Sequence, + Tuple, + Type, + Union, +) + +TLS_V1_3_SUPPORTED = False +try: + import ssl + + if hasattr(ssl, "HAS_TLSv1_3") and ssl.HAS_TLSv1_3: + TLS_V1_3_SUPPORTED = True +except ImportError: + # If import fails, we don't have SSL support. + pass + +from .constants import ( + CONN_ATTRS_DN, + DEFAULT_CONFIGURATION, + DEPRECATED_TLS_VERSIONS, + OPENSSL_CS_NAMES, + TLS_CIPHER_SUITES, + TLS_VERSIONS, + CharacterSet, + ClientFlag, +) +from .conversion import MySQLConverter, MySQLConverterBase +from .errors import ( + Error, + InterfaceError, + NotSupportedError, + OperationalError, + ProgrammingError, +) +from .optionfiles import read_option_files +from .types import ( + ConnAttrsType, + DescriptionType, + HandShakeType, + QueryAttrType, + StrOrBytes, + SupportedMysqlBinaryProtocolTypes, + WarningType, +) + +NAMED_TUPLE_CACHE: weakref.WeakValueDictionary[Any, Any] = weakref.WeakValueDictionary() + +DUPLICATED_IN_LIST_ERROR = ( + "The '{list}' list must not contain repeated values, the value " + "'{value}' is duplicated." +) + +TLS_VERSION_ERROR = ( + "The given tls_version: '{}' is not recognized as a valid " + "TLS protocol version (should be one of {})." +) + +TLS_VERSION_DEPRECATED_ERROR = ( + "The given tls_version: '{}' are no longer allowed (should be one of {})." +) + +TLS_VER_NO_SUPPORTED = ( + "No supported TLS protocol version found in the 'tls-versions' list '{}'. " +) + +KRB_SERVICE_PINCIPAL_ERROR = ( + 'Option "krb_service_principal" {error}, must be a string in the form ' + '"primary/instance@realm" e.g "ldap/ldapauth@MYSQL.COM" where "@realm" ' + "is optional and if it is not given will be assumed to belong to the " + "default realm, as configured in the krb5.conf file." +) + +MYSQL_PY_TYPES = ( + Decimal, + bytes, + date, + datetime, + float, + int, + str, + time, + timedelta, +) + + +class MySQLConnectionAbstract(ABC): + """Abstract class for classes connecting to a MySQL server""" + + def __init__(self) -> None: + """Initialize""" + self._client_flags: int = ClientFlag.get_default() + self._charset_id: int = 45 + self._sql_mode: Optional[str] = None + self._time_zone: Optional[str] = None + self._autocommit: bool = False + self._server_version: Optional[Tuple[int, ...]] = None + self._handshake: Optional[HandShakeType] = None + self._conn_attrs: ConnAttrsType = {} + + self._user: str = "" + self._password: str = "" + self._password1: str = "" + self._password2: str = "" + self._password3: str = "" + self._database: str = "" + self._host: str = "127.0.0.1" + self._port: int = 3306 + self._unix_socket: Optional[str] = None + self._client_host: str = "" + self._client_port: int = 0 + self._ssl: Dict[str, Optional[Union[str, bool, List[str]]]] = {} + self._ssl_disabled: bool = DEFAULT_CONFIGURATION["ssl_disabled"] + self._force_ipv6: bool = False + self._oci_config_file: Optional[str] = None + self._fido_callback: Optional[Union[str, Callable]] = None + self._krb_service_principal: Optional[str] = None + + self._use_unicode: bool = True + self._get_warnings: bool = False + self._raise_on_warnings: bool = False + self._connection_timeout: Optional[int] = DEFAULT_CONFIGURATION[ + "connect_timeout" + ] + self._buffered: bool = False + self._unread_result: bool = False + self._have_next_result: bool = False + self._raw: bool = False + self._in_transaction: bool = False + self._allow_local_infile: bool = DEFAULT_CONFIGURATION["allow_local_infile"] + self._allow_local_infile_in_path: Optional[str] = DEFAULT_CONFIGURATION[ + "allow_local_infile_in_path" + ] + + self._prepared_statements: Any = None + self._query_attrs: QueryAttrType = [] + + self._ssl_active: bool = False + self._auth_plugin: Optional[str] = None + self._auth_plugin_class: Optional[str] = None + self._pool_config_version: Any = None + self.converter: Optional[MySQLConverter] = None + self._converter_class: Optional[Type[MySQLConverter]] = None + self._converter_str_fallback: bool = False + self._compress: bool = False + + self._consume_results: bool = False + self._init_command: Optional[str] = None + + def __enter__(self) -> MySQLConnectionAbstract: + return self + + def __exit__( + self, + exc_type: Type[BaseException], + exc_value: BaseException, + traceback: TracebackType, + ) -> None: + self.close() + + def get_self(self) -> MySQLConnectionAbstract: + """Return self for weakref.proxy + + This method is used when the original object is needed when using + weakref.proxy. + """ + return self + + @property + def is_secure(self) -> bool: + """Return True if is a secure connection.""" + return self._ssl_active or ( + self._unix_socket is not None and os.name == "posix" + ) + + @property + def have_next_result(self) -> bool: + """Return if have next result.""" + return self._have_next_result + + @property + def query_attrs(self) -> QueryAttrType: + """Return query attributes list.""" + return self._query_attrs + + def query_attrs_append( + self, value: Tuple[str, SupportedMysqlBinaryProtocolTypes] + ) -> None: + """Add element to the query attributes list.""" + self._query_attrs.append(value) + + def query_attrs_clear(self) -> None: + """Clear query attributes list.""" + del self._query_attrs[:] + + def _validate_tls_ciphersuites(self) -> None: + """Validates the tls_ciphersuites option.""" + tls_ciphersuites = [] + tls_cs = self._ssl["tls_ciphersuites"] + + if isinstance(tls_cs, str): + if not (tls_cs.startswith("[") and tls_cs.endswith("]")): + raise AttributeError( + f"tls_ciphersuites must be a list, found: '{tls_cs}'" + ) + tls_css = tls_cs[1:-1].split(",") + if not tls_css: + raise AttributeError( + "No valid cipher suite found in 'tls_ciphersuites' list" + ) + for _tls_cs in tls_css: + _tls_cs = tls_cs.strip().upper() + if _tls_cs: + tls_ciphersuites.append(_tls_cs) + + elif isinstance(tls_cs, (list, set)): + tls_ciphersuites = [tls_cs for tls_cs in tls_cs if tls_cs] + else: + raise AttributeError( + "tls_ciphersuites should be a list with one or more " + f"ciphersuites. Found: '{tls_cs}'" + ) + + tls_versions = ( + TLS_VERSIONS[:] + if self._ssl.get("tls_versions", None) is None + else self._ssl["tls_versions"][:] # type: ignore[index] + ) + + # A newer TLS version can use a cipher introduced on + # an older version. + tls_versions.sort(reverse=True) # type: ignore[union-attr] + newer_tls_ver = tls_versions[0] + # translated_names[0] belongs to TLSv1, TLSv1.1 and TLSv1.2 + # translated_names[1] are TLSv1.3 only + translated_names: List[List[str]] = [[], []] + iani_cipher_suites_names = {} + ossl_cipher_suites_names: List[str] = [] + + # Old ciphers can work with new TLS versions. + # Find all the ciphers introduced on previous TLS versions. + for tls_ver in TLS_VERSIONS[: TLS_VERSIONS.index(newer_tls_ver) + 1]: + iani_cipher_suites_names.update(TLS_CIPHER_SUITES[tls_ver]) + ossl_cipher_suites_names.extend(OPENSSL_CS_NAMES[tls_ver]) + + for name in tls_ciphersuites: + if "-" in name and name in ossl_cipher_suites_names: + if name in OPENSSL_CS_NAMES["TLSv1.3"]: + translated_names[1].append(name) + else: + translated_names[0].append(name) + elif name in iani_cipher_suites_names: + translated_name = iani_cipher_suites_names[name] + if translated_name in translated_names: + raise AttributeError( + DUPLICATED_IN_LIST_ERROR.format( + list="tls_ciphersuites", value=translated_name + ) + ) + if name in TLS_CIPHER_SUITES["TLSv1.3"]: + translated_names[1].append(iani_cipher_suites_names[name]) + else: + translated_names[0].append(iani_cipher_suites_names[name]) + else: + raise AttributeError( + f"The value '{name}' in tls_ciphersuites is not a valid " + "cipher suite" + ) + if not translated_names[0] and not translated_names[1]: + raise AttributeError( + "No valid cipher suite found in the 'tls_ciphersuites' list" + ) + + self._ssl["tls_ciphersuites"] = [ + ":".join(translated_names[0]), + ":".join(translated_names[1]), + ] + + def _validate_tls_versions(self) -> None: + """Validates the tls_versions option.""" + tls_versions = [] + tls_version = self._ssl["tls_versions"] + + if isinstance(tls_version, str): + if not (tls_version.startswith("[") and tls_version.endswith("]")): + raise AttributeError( + f"tls_versions must be a list, found: '{tls_version}'" + ) + tls_vers = tls_version[1:-1].split(",") + for tls_ver in tls_vers: + tls_version = tls_ver.strip() + if tls_version == "": + continue + if tls_version in tls_versions: + raise AttributeError( + DUPLICATED_IN_LIST_ERROR.format( + list="tls_versions", value=tls_version + ) + ) + tls_versions.append(tls_version) + if tls_vers == ["TLSv1.3"] and not TLS_V1_3_SUPPORTED: + raise AttributeError( + TLS_VER_NO_SUPPORTED.format(tls_version, TLS_VERSIONS) + ) + elif isinstance(tls_version, list): + if not tls_version: + raise AttributeError( + "At least one TLS protocol version must be specified in " + "'tls_versions' list" + ) + for tls_ver in tls_version: + if tls_ver in tls_versions: + raise AttributeError( + DUPLICATED_IN_LIST_ERROR.format( + list="tls_versions", value=tls_ver + ) + ) + tls_versions.append(tls_ver) + elif isinstance(tls_version, set): + for tls_ver in tls_version: + tls_versions.append(tls_ver) + else: + raise AttributeError( + "tls_versions should be a list with one or more of versions " + f"in {', '.join(TLS_VERSIONS)}. found: '{tls_versions}'" + ) + + if not tls_versions: + raise AttributeError( + "At least one TLS protocol version must be specified " + "in 'tls_versions' list when this option is given" + ) + + use_tls_versions = [] + deprecated_tls_versions = [] + invalid_tls_versions = [] + for tls_ver in tls_versions: + if tls_ver in TLS_VERSIONS: + use_tls_versions.append(tls_ver) + if tls_ver in DEPRECATED_TLS_VERSIONS: + deprecated_tls_versions.append(tls_ver) + else: + invalid_tls_versions.append(tls_ver) + + if use_tls_versions: + if use_tls_versions == ["TLSv1.3"] and not TLS_V1_3_SUPPORTED: + raise NotSupportedError( + TLS_VER_NO_SUPPORTED.format(tls_version, TLS_VERSIONS) + ) + use_tls_versions.sort() + self._ssl["tls_versions"] = use_tls_versions + elif deprecated_tls_versions: + raise NotSupportedError( + TLS_VERSION_DEPRECATED_ERROR.format( + deprecated_tls_versions, TLS_VERSIONS + ) + ) + elif invalid_tls_versions: + raise AttributeError(TLS_VERSION_ERROR.format(tls_ver, TLS_VERSIONS)) + + @property + def user(self) -> str: + """User used while connecting to MySQL""" + return self._user + + @property + def server_host(self) -> str: + """MySQL server IP address or name""" + return self._host + + @property + def server_port(self) -> int: + "MySQL server TCP/IP port" + return self._port + + @property + def unix_socket(self) -> Optional[str]: + "MySQL Unix socket file location" + return self._unix_socket + + @property + @abstractmethod + def database(self) -> str: + """Get the current database""" + + @database.setter + def database(self, value: str) -> None: + """Set the current database""" + self.cmd_query(f"USE {value}") + + @property + def can_consume_results(self) -> bool: + """Returns whether to consume results""" + return self._consume_results + + @can_consume_results.setter + def can_consume_results(self, value: bool) -> None: + """Set if can consume results.""" + assert isinstance(value, bool) + self._consume_results = value + + @property + def pool_config_version(self) -> Any: + """Return the pool configuration version""" + return self._pool_config_version + + @pool_config_version.setter + def pool_config_version(self, value: Any) -> None: + """Set the pool configuration version""" + self._pool_config_version = value + + def config(self, **kwargs: Any) -> None: + """Configure the MySQL Connection + + This method allows you to configure the MySQLConnection instance. + + Raises on errors. + """ + config = kwargs.copy() + if "dsn" in config: + raise NotSupportedError("Data source name is not supported") + + # Read option files + config = read_option_files(**config) + + # Configure how we handle MySQL warnings + try: + self.get_warnings = config["get_warnings"] + del config["get_warnings"] + except KeyError: + pass # Leave what was set or default + try: + self.raise_on_warnings = config["raise_on_warnings"] + del config["raise_on_warnings"] + except KeyError: + pass # Leave what was set or default + + # Configure client flags + try: + default = ClientFlag.get_default() + self.set_client_flags(config["client_flags"] or default) + del config["client_flags"] + except KeyError: + pass # Missing client_flags-argument is OK + + try: + if config["compress"]: + self._compress = True + self.set_client_flags([ClientFlag.COMPRESS]) + except KeyError: + pass # Missing compress argument is OK + + self._allow_local_infile = config.get( + "allow_local_infile", DEFAULT_CONFIGURATION["allow_local_infile"] + ) + self._allow_local_infile_in_path = config.get( + "allow_local_infile_in_path", + DEFAULT_CONFIGURATION["allow_local_infile_in_path"], + ) + infile_in_path = None + if self._allow_local_infile_in_path: + infile_in_path = os.path.abspath(self._allow_local_infile_in_path) + if ( + infile_in_path + and os.path.exists(infile_in_path) + and not os.path.isdir(infile_in_path) + or os.path.islink(infile_in_path) + ): + raise AttributeError("allow_local_infile_in_path must be a directory") + if self._allow_local_infile or self._allow_local_infile_in_path: + self.set_client_flags([ClientFlag.LOCAL_FILES]) + else: + self.set_client_flags([-ClientFlag.LOCAL_FILES]) + + try: + if not config["consume_results"]: + self._consume_results = False + else: + self._consume_results = True + except KeyError: + self._consume_results = False + + # Configure auth_plugin + try: + self._auth_plugin = config["auth_plugin"] + del config["auth_plugin"] + except KeyError: + self._auth_plugin = "" + + # Configure character set and collation + if "charset" in config or "collation" in config: + try: + charset = config["charset"] + del config["charset"] + except KeyError: + charset = None + try: + collation = config["collation"] + del config["collation"] + except KeyError: + collation = None + self._charset_id = CharacterSet.get_charset_info(charset, collation)[0] + + # Set converter class + try: + self.set_converter_class(config["converter_class"]) + except KeyError: + pass # Using default converter class + except TypeError as err: + raise AttributeError( + "Converter class should be a subclass of " + "conversion.MySQLConverterBase" + ) from err + + # Compatible configuration with other drivers + compat_map = [ + # (,) + ("db", "database"), + ("username", "user"), + ("passwd", "password"), + ("connect_timeout", "connection_timeout"), + ("read_default_file", "option_files"), + ] + for compat, translate in compat_map: + try: + if translate not in config: + config[translate] = config[compat] + del config[compat] + except KeyError: + pass # Missing compat argument is OK + + # Configure login information + if "user" in config or "password" in config: + try: + user = config["user"] + del config["user"] + except KeyError: + user = self._user + try: + password = config["password"] + del config["password"] + except KeyError: + password = self._password + self.set_login(user, password) + + # Configure host information + if "host" in config and config["host"]: + self._host = config["host"] + + # Check network locations + try: + self._port = int(config["port"]) + del config["port"] + except KeyError: + pass # Missing port argument is OK + except ValueError as err: + raise InterfaceError("TCP/IP port number should be an integer") from err + + if "ssl_disabled" in config: + self._ssl_disabled = config.pop("ssl_disabled") + + # If an init_command is set, keep it, so we can execute it in _post_connection + if "init_command" in config: + self._init_command = config["init_command"] + del config["init_command"] + + # Other configuration + set_ssl_flag = False + for key, value in config.items(): + try: + DEFAULT_CONFIGURATION[key] + except KeyError: + raise AttributeError(f"Unsupported argument '{key}'") from None + # SSL Configuration + if key.startswith("ssl_"): + set_ssl_flag = True + self._ssl.update({key.replace("ssl_", ""): value}) + elif key.startswith("tls_"): + set_ssl_flag = True + self._ssl.update({key: value}) + else: + attribute = "_" + key + try: + setattr(self, attribute, value.strip()) + except AttributeError: + setattr(self, attribute, value) + + # Disable SSL for unix socket connections + if self._unix_socket and os.name == "posix": + self._ssl_disabled = True + + if self._ssl_disabled and self._auth_plugin == "mysql_clear_password": + raise InterfaceError( + "Clear password authentication is not supported over insecure channels" + ) + + if set_ssl_flag: + if "verify_cert" not in self._ssl: + self._ssl["verify_cert"] = DEFAULT_CONFIGURATION["ssl_verify_cert"] + if "verify_identity" not in self._ssl: + self._ssl["verify_identity"] = DEFAULT_CONFIGURATION[ + "ssl_verify_identity" + ] + # Make sure both ssl_key/ssl_cert are set, or neither (XOR) + if "ca" not in self._ssl or self._ssl["ca"] is None: + self._ssl["ca"] = "" + if bool("key" in self._ssl) != bool("cert" in self._ssl): + raise AttributeError( + "ssl_key and ssl_cert need to be both specified, or neither" + ) + # Make sure key/cert are set to None + if not set(("key", "cert")) <= set(self._ssl): + self._ssl["key"] = None + self._ssl["cert"] = None + elif (self._ssl["key"] is None) != (self._ssl["cert"] is None): + raise AttributeError( + "ssl_key and ssl_cert need to be both set, or neither" + ) + if "tls_versions" in self._ssl and self._ssl["tls_versions"] is not None: + self._validate_tls_versions() + + if ( + "tls_ciphersuites" in self._ssl + and self._ssl["tls_ciphersuites"] is not None + ): + self._validate_tls_ciphersuites() + + if self._conn_attrs is None: + self._conn_attrs = {} + elif not isinstance(self._conn_attrs, dict): + raise InterfaceError("conn_attrs must be of type dict") + else: + for attr_name, attr_value in self._conn_attrs.items(): + if attr_name in CONN_ATTRS_DN: + continue + # Validate name type + if not isinstance(attr_name, str): + raise InterfaceError( + "Attribute name should be a string, found: " + f"'{attr_name}' in '{self._conn_attrs}'" + ) + # Validate attribute name limit 32 characters + if len(attr_name) > 32: + raise InterfaceError( + f"Attribute name '{attr_name}' exceeds 32 characters limit size" + ) + # Validate names in connection attributes cannot start with "_" + if attr_name.startswith("_"): + raise InterfaceError( + "Key names in connection attributes cannot start with " + "'_', found: '{attr_name}'" + ) + # Validate value type + if not isinstance(attr_value, str): + raise InterfaceError( + f"Attribute '{attr_name}' value: '{attr_value}' must " + "be a string type" + ) + # Validate attribute value limit 1024 characters + if len(attr_value) > 1024: + raise InterfaceError( + f"Attribute '{attr_name}' value: '{attr_value}' " + "exceeds 1024 characters limit size" + ) + + if self._client_flags & ClientFlag.CONNECT_ARGS: + self._add_default_conn_attrs() + + if "kerberos_auth_mode" in config and config["kerberos_auth_mode"] is not None: + if not isinstance(config["kerberos_auth_mode"], str): + raise InterfaceError("'kerberos_auth_mode' must be of type str") + kerberos_auth_mode = config["kerberos_auth_mode"].lower() + if kerberos_auth_mode == "sspi": + if os.name != "nt": + raise InterfaceError( + "'kerberos_auth_mode=SSPI' is only available on Windows" + ) + self._auth_plugin_class = "MySQLSSPIKerberosAuthPlugin" + elif kerberos_auth_mode == "gssapi": + self._auth_plugin_class = "MySQLKerberosAuthPlugin" + else: + raise InterfaceError( + "Invalid 'kerberos_auth_mode' mode. Please use 'SSPI' or 'GSSAPI'" + ) + + if ( + "krb_service_principal" in config + and config["krb_service_principal"] is not None + ): + self._krb_service_principal = config["krb_service_principal"] + if not isinstance(self._krb_service_principal, str): + raise InterfaceError( + KRB_SERVICE_PINCIPAL_ERROR.format(error="is not a string") + ) + if self._krb_service_principal == "": + raise InterfaceError( + KRB_SERVICE_PINCIPAL_ERROR.format( + error="can not be an empty string" + ) + ) + if "/" not in self._krb_service_principal: + raise InterfaceError( + KRB_SERVICE_PINCIPAL_ERROR.format(error="is incorrectly formatted") + ) + + if self._fido_callback: + # Import the callable if it's a str + if isinstance(self._fido_callback, str): + try: + module, callback = self._fido_callback.rsplit(".", 1) + except ValueError: + raise ProgrammingError( + f"No callable named '{self._fido_callback}'" + ) from None + try: + module = importlib.import_module(module) + self._fido_callback = getattr(module, callback) + except (AttributeError, ModuleNotFoundError) as err: + raise ProgrammingError(f"{err}") from err + # Check if it's a callable + if not callable(self._fido_callback): + raise ProgrammingError("Expected a callable for 'fido_callback'") + # Check the callable signature if has only 1 positional argument + params = len(signature(self._fido_callback).parameters) + if params != 1: + raise ProgrammingError( + "'fido_callback' requires 1 positional argument, but the " + f"callback provided has {params}" + ) + + def _add_default_conn_attrs(self) -> Any: + """Add the default connection attributes.""" + + @staticmethod + def _check_server_version(server_version: StrOrBytes) -> Tuple[int, ...]: + """Check the MySQL version + + This method will check the MySQL version and raise an InterfaceError + when it is not supported or invalid. It will return the version + as a tuple with major, minor and patch. + + Raises InterfaceError if invalid server version. + + Returns tuple + """ + if isinstance(server_version, (bytearray, bytes)): + server_version = server_version.decode() + + regex_ver = re.compile(r"^(\d{1,2})\.(\d{1,2})\.(\d{1,3})(.*)") + match = regex_ver.match(server_version) + if not match: + raise InterfaceError("Failed parsing MySQL version") + + version = tuple(int(v) for v in match.groups()[0:3]) + if version < (4, 1): + raise InterfaceError(f"MySQL Version '{server_version}' is not supported") + + return version + + def get_server_version(self) -> Tuple[int, ...]: + """Get the MySQL version + + This method returns the MySQL server version as a tuple. If not + previously connected, it will return None. + + Returns a tuple or None. + """ + return self._server_version + + def get_server_info(self) -> Optional[str]: + """Get the original MySQL version information + + This method returns the original MySQL server as text. If not + previously connected, it will return None. + + Returns a string or None. + """ + try: + return self._handshake["server_version_original"] # type: ignore[return-value] + except (TypeError, KeyError): + return None + + @property + @abstractmethod + def in_transaction(self) -> Any: + """MySQL session has started a transaction""" + + def set_client_flags(self, flags: Union[int, Sequence[int]]) -> int: + """Set the client flags + + The flags-argument can be either an int or a list (or tuple) of + ClientFlag-values. If it is an integer, it will set client_flags + to flags as is. + If flags is a list (or tuple), each flag will be set or unset + when it's negative. + + set_client_flags([ClientFlag.FOUND_ROWS,-ClientFlag.LONG_FLAG]) + + Raises ProgrammingError when the flags argument is not a set or + an integer bigger than 0. + + Returns self.client_flags + """ + if isinstance(flags, int) and flags > 0: + self._client_flags = flags + elif isinstance(flags, (tuple, list)): + for flag in flags: + if flag < 0: + self._client_flags &= ~abs(flag) + else: + self._client_flags |= flag + else: + raise ProgrammingError("set_client_flags expect integer (>0) or set") + return self._client_flags + + def isset_client_flag(self, flag: int) -> bool: + """Check if a client flag is set""" + if (self._client_flags & flag) > 0: + return True + return False + + @property + def time_zone(self) -> str: + """Get the current time zone""" + return self.info_query("SELECT @@session.time_zone")[0] + + @time_zone.setter + def time_zone(self, value: str) -> None: + """Set the time zone""" + self.cmd_query(f"SET @@session.time_zone = '{value}'") + self._time_zone = value + + @property + def sql_mode(self) -> str: + """Get the SQL mode""" + return self.info_query("SELECT @@session.sql_mode")[0] + + @sql_mode.setter + def sql_mode(self, value: Union[str, Sequence[int]]) -> None: + """Set the SQL mode + + This method sets the SQL Mode for the current connection. The value + argument can be either a string with comma separate mode names, or + a sequence of mode names. + + It is good practice to use the constants class SQLMode: + from mysql.connector.constants import SQLMode + cnx.sql_mode = [SQLMode.NO_ZERO_DATE, SQLMode.REAL_AS_FLOAT] + """ + if isinstance(value, (list, tuple)): + value = ",".join(value) + self.cmd_query(f"SET @@session.sql_mode = '{value}'") + self._sql_mode = value + + @abstractmethod + def info_query(self, query: Any) -> Any: + """Send a query which only returns 1 row""" + + def set_login( + self, username: Optional[str] = None, password: Optional[str] = None + ) -> None: + """Set login information for MySQL + + Set the username and/or password for the user connecting to + the MySQL Server. + """ + if username is not None: + self._user = username.strip() + else: + self._user = "" + if password is not None: + self._password = password + else: + self._password = "" + + def set_unicode(self, value: bool = True) -> None: + """Toggle unicode mode + + Set whether we return string fields as unicode or not. + Default is True. + """ + self._use_unicode = value + if self.converter: + self.converter.set_unicode(value) + + @property + def autocommit(self) -> bool: + """Get whether autocommit is on or off""" + value = self.info_query("SELECT @@session.autocommit")[0] + return value == 1 + + @autocommit.setter + def autocommit(self, value: bool) -> None: + """Toggle autocommit""" + switch = "ON" if value else "OFF" + self.cmd_query(f"SET @@session.autocommit = {switch}") + self._autocommit = value + + @property + def get_warnings(self) -> bool: + """Get whether this connection retrieves warnings automatically + + This method returns whether this connection retrieves warnings + automatically. + + Returns True, or False when warnings are not retrieved. + """ + return self._get_warnings + + @get_warnings.setter + def get_warnings(self, value: bool) -> None: + """Set whether warnings should be automatically retrieved + + The toggle-argument must be a boolean. When True, cursors for this + connection will retrieve information about warnings (if any). + + Raises ValueError on error. + """ + if not isinstance(value, bool): + raise ValueError("Expected a boolean type") + self._get_warnings = value + + @property + def raise_on_warnings(self) -> bool: + """Get whether this connection raises an error on warnings + + This method returns whether this connection will raise errors when + MySQL reports warnings. + + Returns True or False. + """ + return self._raise_on_warnings + + @raise_on_warnings.setter + def raise_on_warnings(self, value: bool) -> None: + """Set whether warnings raise an error + + The toggle-argument must be a boolean. When True, cursors for this + connection will raise an error when MySQL reports warnings. + + Raising on warnings implies retrieving warnings automatically. In + other words: warnings will be set to True. If set to False, warnings + will be also set to False. + + Raises ValueError on error. + """ + if not isinstance(value, bool): + raise ValueError("Expected a boolean type") + self._raise_on_warnings = value + # Don't disable warning retrieval if raising explicitly disabled + if value: + self._get_warnings = value + + @property + def unread_result(self) -> bool: + """Get whether there is an unread result + + This method is used by cursors to check whether another cursor still + needs to retrieve its result set. + + Returns True, or False when there is no unread result. + """ + return self._unread_result + + @unread_result.setter + def unread_result(self, value: bool) -> None: + """Set whether there is an unread result + + This method is used by cursors to let other cursors know there is + still a result set that needs to be retrieved. + + Raises ValueError on errors. + """ + if not isinstance(value, bool): + raise ValueError("Expected a boolean type") + self._unread_result = value + + @property + def charset(self) -> str: + """Returns the character set for current connection + + This property returns the character set name of the current connection. + The server is queried when the connection is active. If not connected, + the configured character set name is returned. + + Returns a string. + """ + return CharacterSet.get_info(self._charset_id)[0] + + @property + def python_charset(self) -> str: + """Returns the Python character set for current connection + + This property returns the character set name of the current connection. + Note that, unlike property charset, this checks if the previously set + character set is supported by Python and if not, it returns the + equivalent character set that Python supports. + + Returns a string. + """ + encoding = CharacterSet.get_info(self._charset_id)[0] + if encoding in ("utf8mb4", "utf8mb3", "binary"): + return "utf8" + return encoding + + def set_charset_collation( + self, charset: Optional[Union[int, str]] = None, collation: Optional[str] = None + ) -> None: + """Sets the character set and collation for the current connection + + This method sets the character set and collation to be used for + the current connection. The charset argument can be either the + name of a character set as a string, or the numerical equivalent + as defined in constants.CharacterSet. + + When the collation is not given, the default will be looked up and + used. + + For example, the following will set the collation for the latin1 + character set to latin1_general_ci: + + set_charset('latin1','latin1_general_ci') + + """ + err_msg = "{} should be either integer, string or None" + if not isinstance(charset, (int, str)) and charset is not None: + raise ValueError(err_msg.format("charset")) + if not isinstance(collation, str) and collation is not None: + raise ValueError("collation should be either string or None") + + if charset: + if isinstance(charset, int): + ( + self._charset_id, + charset_name, + collation_name, + ) = CharacterSet.get_charset_info(charset) + elif isinstance(charset, str): + ( + self._charset_id, + charset_name, + collation_name, + ) = CharacterSet.get_charset_info(charset, collation) + else: + raise ValueError(err_msg.format("charset")) + elif collation: + ( + self._charset_id, + charset_name, + collation_name, + ) = CharacterSet.get_charset_info(collation=collation) + else: + charset = DEFAULT_CONFIGURATION["charset"] + ( + self._charset_id, + charset_name, + collation_name, + ) = CharacterSet.get_charset_info(charset, collation=None) + + self._execute_query(f"SET NAMES '{charset_name}' COLLATE '{collation_name}'") + + try: + # Required for C Extension + self.set_character_set_name(charset_name) + except AttributeError: + # Not required for pure Python connection + pass + + if self.converter: + self.converter.set_charset(charset_name) + + @property + def collation(self) -> str: + """Returns the collation for current connection + + This property returns the collation name of the current connection. + The server is queried when the connection is active. If not connected, + the configured collation name is returned. + + Returns a string. + """ + return CharacterSet.get_charset_info(self._charset_id)[2] + + @abstractmethod + def _do_handshake(self) -> Any: + """Gather information of the MySQL server before authentication""" + + @abstractmethod + def _open_connection(self) -> Any: + """Open the connection to the MySQL server""" + + def _post_connection(self) -> None: + """Executes commands after connection has been established + + This method executes commands after the connection has been + established. Some setting like autocommit, character set, and SQL mode + are set using this method. + """ + self.set_charset_collation(self._charset_id) + self.autocommit = self._autocommit + if self._time_zone: + self.time_zone = self._time_zone + if self._sql_mode: + self.sql_mode = self._sql_mode + if self._init_command: + self._execute_query(self._init_command) + + @abstractmethod + def disconnect(self) -> Any: + """Disconnect from the MySQL server""" + + close: Callable[[], Any] = disconnect + + def connect(self, **kwargs: Any) -> None: + """Connect to the MySQL server + + This method sets up the connection to the MySQL server. If no + arguments are given, it will use the already configured or default + values. + """ + if kwargs: + self.config(**kwargs) + + self.disconnect() + self._open_connection() + # Server does not allow to run any other statement different from ALTER + # when user's password has been expired. + if not self._client_flags & ClientFlag.CAN_HANDLE_EXPIRED_PASSWORDS: + self._post_connection() + + def reconnect(self, attempts: int = 1, delay: int = 0) -> None: + """Attempt to reconnect to the MySQL server + + The argument attempts should be the number of times a reconnect + is tried. The delay argument is the number of seconds to wait between + each retry. + + You may want to set the number of attempts higher and use delay when + you expect the MySQL server to be down for maintenance or when you + expect the network to be temporary unavailable. + + Raises InterfaceError on errors. + """ + counter = 0 + while counter != attempts: + counter = counter + 1 + try: + self.disconnect() + self.connect() + if self.is_connected(): + break + except (Error, IOError) as err: + if counter == attempts: + msg = ( + f"Can not reconnect to MySQL after {attempts} " + f"attempt(s): {err}" + ) + raise InterfaceError(msg) from err + if delay > 0: + sleep(delay) + + @abstractmethod + def is_connected(self) -> Any: + """Reports whether the connection to MySQL Server is available""" + + @abstractmethod + def ping(self, reconnect: bool = False, attempts: int = 1, delay: int = 0) -> Any: + """Check availability of the MySQL server""" + + @abstractmethod + def commit(self) -> Any: + """Commit current transaction""" + + @abstractmethod + def cursor( + self, + buffered: Optional[bool] = None, + raw: Optional[bool] = None, + prepared: Optional[bool] = None, + cursor_class: Optional[type] = None, + dictionary: Optional[bool] = None, + named_tuple: Optional[bool] = None, + ) -> "MySQLCursorAbstract": + """Instantiates and returns a cursor""" + + @abstractmethod + def _execute_query(self, query: Any) -> Any: + """Execute a query""" + + @abstractmethod + def rollback(self) -> Any: + """Rollback current transaction""" + + def start_transaction( + self, + consistent_snapshot: bool = False, + isolation_level: Optional[str] = None, + readonly: Optional[bool] = None, + ) -> None: + """Start a transaction + + This method explicitly starts a transaction sending the + START TRANSACTION statement to the MySQL server. You can optionally + set whether there should be a consistent snapshot, which + isolation level you need or which access mode i.e. READ ONLY or + READ WRITE. + + For example, to start a transaction with isolation level SERIALIZABLE, + you would do the following: + >>> cnx = mysql.connector.connect(..) + >>> cnx.start_transaction(isolation_level='SERIALIZABLE') + + Raises ProgrammingError when a transaction is already in progress + and when ValueError when isolation_level specifies an Unknown + level. + """ + if self.in_transaction: + raise ProgrammingError("Transaction already in progress") + + if isolation_level: + level = isolation_level.strip().replace("-", " ").upper() + levels = [ + "READ UNCOMMITTED", + "READ COMMITTED", + "REPEATABLE READ", + "SERIALIZABLE", + ] + + if level not in levels: + raise ValueError(f'Unknown isolation level "{isolation_level}"') + + self._execute_query(f"SET TRANSACTION ISOLATION LEVEL {level}") + + if readonly is not None: + if self._server_version < (5, 6, 5): + raise ValueError( + f"MySQL server version {self._server_version} does not " + "support this feature" + ) + + if readonly: + access_mode = "READ ONLY" + else: + access_mode = "READ WRITE" + self._execute_query(f"SET TRANSACTION {access_mode}") + + query = "START TRANSACTION" + if consistent_snapshot: + query += " WITH CONSISTENT SNAPSHOT" + self.cmd_query(query) + + def reset_session( + self, + user_variables: Optional[Dict[str, Any]] = None, + session_variables: Optional[Dict[str, Any]] = None, + ) -> None: + """Clears the current active session + + This method resets the session state, if the MySQL server is 5.7.3 + or later active session will be reset without re-authenticating. + For other server versions session will be reset by re-authenticating. + + It is possible to provide a sequence of variables and their values to + be set after clearing the session. This is possible for both user + defined variables and session variables. + This method takes two arguments user_variables and session_variables + which are dictionaries. + + Raises OperationalError if not connected, InternalError if there are + unread results and InterfaceError on errors. + """ + if not self.is_connected(): + raise OperationalError("MySQL Connection not available") + + try: + self.cmd_reset_connection() + except (NotSupportedError, NotImplementedError): + if self._compress: + raise NotSupportedError( + "Reset session is not supported with compression for " + "MySQL server version 5.7.2 or earlier" + ) from None + self.cmd_change_user( + self._user, + self._password, + self._database, + self._charset_id, + ) + + if user_variables or session_variables: + cur = self.cursor() + if user_variables: + for key, value in user_variables.items(): + cur.execute(f"SET @`{key}` = {value}") + if session_variables: + for key, value in session_variables.items(): + cur.execute(f"SET SESSION `{key}` = {value}") + cur.close() + + def set_converter_class(self, convclass: Optional[Type[MySQLConverter]]) -> None: + """ + Set the converter class to be used. This should be a class overloading + methods and members of conversion.MySQLConverter. + """ + if convclass and issubclass(convclass, MySQLConverterBase): + charset_name = CharacterSet.get_info(self._charset_id)[0] + self._converter_class = convclass + self.converter = convclass(charset_name, self._use_unicode) + self.converter.str_fallback = self._converter_str_fallback + else: + raise TypeError( + "Converter class should be a subclass of conversion.MySQLConverterBase." + ) + + @abstractmethod + def get_rows( + self, + count: Optional[int] = None, + binary: bool = False, + columns: Optional[List[DescriptionType]] = None, + raw: Optional[bool] = None, + prep_stmt: Any = None, + ) -> Tuple[List[Any], Optional[Mapping[str, Any]]]: + """Get all rows returned by the MySQL server""" + + def cmd_init_db(self, database: str) -> Optional[Mapping[str, Any]]: + """Change the current database""" + raise NotImplementedError + + def cmd_query( + self, + query: Any, + raw: bool = False, + buffered: bool = False, + raw_as_string: bool = False, + ) -> Optional[Mapping[str, Any]]: + """Send a query to the MySQL server""" + raise NotImplementedError + + def cmd_query_iter( + self, statements: Any + ) -> Generator[Mapping[str, Any], None, None]: + """Send one or more statements to the MySQL server""" + raise NotImplementedError + + def cmd_refresh(self, options: int) -> Optional[Mapping[str, Any]]: + """Send the Refresh command to the MySQL server""" + raise NotImplementedError + + def cmd_quit(self) -> Any: + """Close the current connection with the server""" + raise NotImplementedError + + def cmd_shutdown( + self, shutdown_type: Optional[int] = None + ) -> Optional[Mapping[str, Any]]: + """Shut down the MySQL Server""" + raise NotImplementedError + + def cmd_statistics(self) -> Optional[Mapping[str, Any]]: + """Send the statistics command to the MySQL Server""" + raise NotImplementedError + + @staticmethod + def cmd_process_info() -> Any: + """Get the process list of the MySQL Server + + This method is a placeholder to notify that the PROCESS_INFO command + is not supported by raising the NotSupportedError. The command + "SHOW PROCESSLIST" should be send using the cmd_query()-method or + using the INFORMATION_SCHEMA database. + + Raises NotSupportedError exception + """ + raise NotSupportedError( + "Not implemented. Use SHOW PROCESSLIST or INFORMATION_SCHEMA" + ) + + def cmd_process_kill(self, mysql_pid: int) -> Optional[Mapping[str, Any]]: + """Kill a MySQL process""" + raise NotImplementedError + + def cmd_debug(self) -> Optional[Mapping[str, Any]]: + """Send the DEBUG command""" + raise NotImplementedError + + def cmd_ping(self) -> Optional[Mapping[str, Any]]: + """Send the PING command""" + raise NotImplementedError + + def cmd_change_user( + self, + username: str = "", + password: str = "", + database: str = "", + charset: int = 45, + password1: str = "", + password2: str = "", + password3: str = "", + oci_config_file: str = "", + ) -> Optional[Mapping[str, Any]]: + """Change the current logged in user""" + raise NotImplementedError + + def cmd_stmt_prepare(self, statement: Any) -> Optional[Mapping[str, Any]]: + """Prepare a MySQL statement""" + raise NotImplementedError + + def cmd_stmt_execute( + self, + statement_id: Any, + data: Sequence[Any] = (), + parameters: Sequence[Any] = (), + flags: int = 0, + ) -> Any: + """Execute a prepared MySQL statement""" + raise NotImplementedError + + def cmd_stmt_close(self, statement_id: Any) -> Any: + """Deallocate a prepared MySQL statement""" + raise NotImplementedError + + def cmd_stmt_send_long_data( + self, statement_id: Any, param_id: int, data: BinaryIO + ) -> Any: + """Send data for a column""" + raise NotImplementedError + + def cmd_stmt_reset(self, statement_id: Any) -> Any: + """Reset data for prepared statement sent as long data""" + raise NotImplementedError + + def cmd_reset_connection(self) -> Any: + """Resets the session state without re-authenticating""" + raise NotImplementedError + + +class MySQLCursorAbstract(ABC): + """Abstract cursor class + + Abstract class defining cursor class with method and members + required by the Python Database API Specification v2.0. + """ + + def __init__(self) -> None: + """Initialization""" + self._description: Optional[List[DescriptionType]] = None + self._rowcount: int = -1 + self._last_insert_id: Optional[int] = None + self._warnings: Optional[List[WarningType]] = None + self._warning_count: int = 0 + self._executed: Optional[StrOrBytes] = None + self._executed_list: List[StrOrBytes] = [] + self._stored_results: List[Any] = [] + self.arraysize: int = 1 + + def __enter__(self) -> MySQLCursorAbstract: + return self + + def __exit__( + self, + exc_type: Type[BaseException], + exc_value: BaseException, + traceback: TracebackType, + ) -> None: + self.close() + + @abstractmethod + def callproc(self, procname: str, args: Sequence[Any] = ()) -> Any: + """Calls a stored procedure with the given arguments + + The arguments will be set during this session, meaning + they will be called like ___arg where + is an enumeration (+1) of the arguments. + + Coding Example: + 1) Defining the Stored Routine in MySQL: + CREATE PROCEDURE multiply(IN pFac1 INT, IN pFac2 INT, OUT pProd INT) + BEGIN + SET pProd := pFac1 * pFac2; + END + + 2) Executing in Python: + args = (5,5,0) # 0 is to hold pprod + cursor.callproc('multiply', args) + print(cursor.fetchone()) + + Does not return a value, but a result set will be + available when the CALL-statement execute successfully. + Raises exceptions when something is wrong. + """ + + @abstractmethod + def close(self) -> Any: + """Close the cursor.""" + + @abstractmethod + def execute( + self, + operation: Any, + params: Union[Sequence[Any], Dict[str, Any]] = (), + multi: bool = False, + ) -> Any: + """Executes the given operation + + Executes the given operation substituting any markers with + the given parameters. + + For example, getting all rows where id is 5: + cursor.execute("SELECT * FROM t1 WHERE id = %s", (5,)) + + The multi argument should be set to True when executing multiple + statements in one operation. If not set and multiple results are + found, an InterfaceError will be raised. + + If warnings where generated, and connection.get_warnings is True, then + self._warnings will be a list containing these warnings. + + Returns an iterator when multi is True, otherwise None. + """ + + @abstractmethod + def executemany( + self, operation: Any, seq_params: Sequence[Union[Sequence[Any], Dict[str, Any]]] + ) -> Any: + """Execute the given operation multiple times + + The executemany() method will execute the operation iterating + over the list of parameters in seq_params. + + Example: Inserting 3 new employees and their phone number + + data = [ + ('Jane','555-001'), + ('Joe', '555-001'), + ('John', '555-003') + ] + stmt = "INSERT INTO employees (name, phone) VALUES ('%s','%s')" + cursor.executemany(stmt, data) + + INSERT statements are optimized by batching the data, that is + using the MySQL multiple rows syntax. + + Results are discarded. If they are needed, consider looping over + data using the execute() method. + """ + + @abstractmethod + def fetchone(self) -> Optional[Sequence[Any]]: + """Returns next row of a query result set + + Returns a tuple or None. + """ + + @abstractmethod + def fetchmany(self, size: int = 1) -> List[Sequence[Any]]: + """Returns the next set of rows of a query result, returning a + list of tuples. When no more rows are available, it returns an + empty list. + + The number of rows returned can be specified using the size argument, + which defaults to one + """ + + @abstractmethod + def fetchall(self) -> Sequence[Any]: + """Returns all rows of a query result set + + Returns a list of tuples. + """ + + def nextset(self) -> Any: + """Not Implemented.""" + + def setinputsizes(self, sizes: Any) -> Any: + """Not Implemented.""" + + def setoutputsize(self, size: Any, column: Any = None) -> Any: + """Not Implemented.""" + + def reset(self, free: bool = True) -> Any: + """Reset the cursor to default""" + + @property + @abstractmethod + def description( + self, + ) -> Optional[List[DescriptionType]]: + """Returns description of columns in a result + + This property returns a list of tuples describing the columns in + in a result set. A tuple is described as follows:: + + (column_name, + type, + None, + None, + None, + None, + null_ok, + column_flags) # Addition to PEP-249 specs + + Returns a list of tuples. + """ + return self._description + + @property + @abstractmethod + def rowcount(self) -> int: + """Returns the number of rows produced or affected + + This property returns the number of rows produced by queries + such as a SELECT, or affected rows when executing DML statements + like INSERT or UPDATE. + + Note that for non-buffered cursors it is impossible to know the + number of rows produced before having fetched them all. For those, + the number of rows will be -1 right after execution, and + incremented when fetching rows. + + Returns an integer. + """ + return self._rowcount + + @property + def lastrowid(self) -> Optional[int]: + """Returns the value generated for an AUTO_INCREMENT column + + Returns the value generated for an AUTO_INCREMENT column by + the previous INSERT or UPDATE statement or None when there is + no such value available. + + Returns a long value or None. + """ + return self._last_insert_id + + @property + def warnings(self) -> Optional[List[WarningType]]: + """Return warnings.""" + return self._warnings + + @property + def warning_count(self) -> int: + """Returns the number of warnings + + This property returns the number of warnings generated by the + previously executed operation. + + Returns an integer value. + """ + return self._warning_count + + def fetchwarnings(self) -> Optional[List[WarningType]]: + """Returns Warnings.""" + return self._warnings + + def get_attributes(self) -> Optional[List[Tuple[Any, Any]]]: + """Get the added query attributes so far.""" + if hasattr(self, "_cnx"): + return self._cnx.query_attrs + if hasattr(self, "_connection"): + return self._connection.query_attrs + return None + + def add_attribute(self, name: str, value: Any) -> None: + """Add a query attribute and his value.""" + if not isinstance(name, str): + raise ProgrammingError("Parameter `name` must be a string type") + if value is not None and not isinstance(value, MYSQL_PY_TYPES): + raise ProgrammingError( + f"Object {value} cannot be converted to a MySQL type" + ) + if hasattr(self, "_cnx"): + self._cnx.query_attrs_append((name, value)) + elif hasattr(self, "_connection"): + self._connection.query_attrs_append((name, value)) + + def clear_attributes(self) -> None: + """Remove all the query attributes.""" + if hasattr(self, "_cnx"): + self._cnx.query_attrs_clear() + elif hasattr(self, "_connection"): + self._connection.query_attrs_clear() diff --git a/mysql-sqlalchemy-workspace/lib/python3.9/site-packages/mysql/connector/authentication.py b/mysql-sqlalchemy-workspace/lib/python3.9/site-packages/mysql/connector/authentication.py new file mode 100644 index 00000000..9d0202d5 --- /dev/null +++ b/mysql-sqlalchemy-workspace/lib/python3.9/site-packages/mysql/connector/authentication.py @@ -0,0 +1,81 @@ +# Copyright (c) 2014, 2022, Oracle and/or its affiliates. +# +# This program is free software; you can redistribute it and/or modify +# it under the terms of the GNU General Public License, version 2.0, as +# published by the Free Software Foundation. +# +# This program is also distributed with certain software (including +# but not limited to OpenSSL) that is licensed under separate terms, +# as designated in a particular file or component or in included license +# documentation. The authors of MySQL hereby grant you an +# additional permission to link the program and your derivative works +# with the separately licensed software that they have included with +# MySQL. +# +# Without limiting anything contained in the foregoing, this file, +# which is part of MySQL Connector/Python, is also subject to the +# Universal FOSS Exception, version 1.0, a copy of which can be found at +# http://oss.oracle.com/licenses/universal-foss-exception. +# +# This program is distributed in the hope that it will be useful, but +# WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. +# See the GNU General Public License, version 2.0, for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program; if not, write to the Free Software Foundation, Inc., +# 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA + +"""Implementing support for MySQL Authentication Plugins""" + +import importlib +import logging + +from functools import lru_cache +from typing import Optional, Type + +from .errors import NotSupportedError, ProgrammingError +from .plugins import BaseAuthPlugin + +logging.getLogger(__name__).addHandler(logging.NullHandler()) + +_LOGGER = logging.getLogger(__name__) + +DEFAULT_PLUGINS_PKG = "mysql.connector.plugins" + + +@lru_cache(maxsize=10, typed=False) +def get_auth_plugin( + plugin_name: str, + auth_plugin_class: Optional[str] = None, +) -> Type[BaseAuthPlugin]: # AUTH_PLUGIN_CLASS_TYPES: + """Return authentication class based on plugin name + + This function returns the class for the authentication plugin plugin_name. + The returned class is a subclass of BaseAuthPlugin. + + Args: + plugin_name (str): Authentication plugin name. + auth_plugin_class (str): Authentication plugin class name. + + Raises: + NotSupportedError: When plugin_name is not supported. + + Returns: + Subclass of `BaseAuthPlugin`. + """ + package = DEFAULT_PLUGINS_PKG + if plugin_name: + try: + _LOGGER.info("package: %s", package) + _LOGGER.info("plugin_name: %s", plugin_name) + plugin_module = importlib.import_module(f".{plugin_name}", package) + if not auth_plugin_class or not hasattr(plugin_module, auth_plugin_class): + auth_plugin_class = plugin_module.AUTHENTICATION_PLUGIN_CLASS + _LOGGER.info("AUTHENTICATION_PLUGIN_CLASS: %s", auth_plugin_class) + return getattr(plugin_module, auth_plugin_class) + except ModuleNotFoundError as err: + _LOGGER.warning("Requested Module was not found: %s", err) + except ValueError as err: + raise ProgrammingError(f"Invalid module name: {err}") from err + raise NotSupportedError(f"Authentication plugin '{plugin_name}' is not supported") diff --git a/mysql-sqlalchemy-workspace/lib/python3.9/site-packages/mysql/connector/charsets.py b/mysql-sqlalchemy-workspace/lib/python3.9/site-packages/mysql/connector/charsets.py new file mode 100644 index 00000000..5ce9b1a0 --- /dev/null +++ b/mysql-sqlalchemy-workspace/lib/python3.9/site-packages/mysql/connector/charsets.py @@ -0,0 +1,620 @@ +# -*- coding: utf-8 -*- # pylint: disable=missing-module-docstring + +# Copyright (c) 2013, 2022, Oracle and/or its affiliates. All rights reserved. +# +# This program is free software; you can redistribute it and/or modify +# it under the terms of the GNU General Public License, version 2.0, as +# published by the Free Software Foundation. +# +# This program is also distributed with certain software (including +# but not limited to OpenSSL) that is licensed under separate terms, +# as designated in a particular file or component or in included license +# documentation. The authors of MySQL hereby grant you an +# additional permission to link the program and your derivative works +# with the separately licensed software that they have included with +# MySQL. +# +# Without limiting anything contained in the foregoing, this file, +# which is part of MySQL Connector/Python, is also subject to the +# Universal FOSS Exception, version 1.0, a copy of which can be found at +# http://oss.oracle.com/licenses/universal-foss-exception. +# +# This program is distributed in the hope that it will be useful, but +# WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. +# See the GNU General Public License, version 2.0, for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program; if not, write to the Free Software Foundation, Inc., +# 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA + +from typing import List, Optional, Tuple + +"""This module contains the MySQL Server Character Sets.""" # pylint: disable=pointless-string-statement + +# This file was auto-generated. +_GENERATED_ON: str = "2022-05-09" +_MYSQL_VERSION: Tuple[int, int, int] = (8, 0, 30) + +MYSQL_CHARACTER_SETS: List[Optional[Tuple[str, str, bool]]] = [ + # (character set name, collation, default) + None, + ("big5", "big5_chinese_ci", True), # 1 + ("latin2", "latin2_czech_cs", False), # 2 + ("dec8", "dec8_swedish_ci", True), # 3 + ("cp850", "cp850_general_ci", True), # 4 + ("latin1", "latin1_german1_ci", False), # 5 + ("hp8", "hp8_english_ci", True), # 6 + ("koi8r", "koi8r_general_ci", True), # 7 + ("latin1", "latin1_swedish_ci", True), # 8 + ("latin2", "latin2_general_ci", True), # 9 + ("swe7", "swe7_swedish_ci", True), # 10 + ("ascii", "ascii_general_ci", True), # 11 + ("ujis", "ujis_japanese_ci", True), # 12 + ("sjis", "sjis_japanese_ci", True), # 13 + ("cp1251", "cp1251_bulgarian_ci", False), # 14 + ("latin1", "latin1_danish_ci", False), # 15 + ("hebrew", "hebrew_general_ci", True), # 16 + None, + ("tis620", "tis620_thai_ci", True), # 18 + ("euckr", "euckr_korean_ci", True), # 19 + ("latin7", "latin7_estonian_cs", False), # 20 + ("latin2", "latin2_hungarian_ci", False), # 21 + ("koi8u", "koi8u_general_ci", True), # 22 + ("cp1251", "cp1251_ukrainian_ci", False), # 23 + ("gb2312", "gb2312_chinese_ci", True), # 24 + ("greek", "greek_general_ci", True), # 25 + ("cp1250", "cp1250_general_ci", True), # 26 + ("latin2", "latin2_croatian_ci", False), # 27 + ("gbk", "gbk_chinese_ci", True), # 28 + ("cp1257", "cp1257_lithuanian_ci", False), # 29 + ("latin5", "latin5_turkish_ci", True), # 30 + ("latin1", "latin1_german2_ci", False), # 31 + ("armscii8", "armscii8_general_ci", True), # 32 + ("utf8mb3", "utf8mb3_general_ci", True), # 33 + ("cp1250", "cp1250_czech_cs", False), # 34 + ("ucs2", "ucs2_general_ci", True), # 35 + ("cp866", "cp866_general_ci", True), # 36 + ("keybcs2", "keybcs2_general_ci", True), # 37 + ("macce", "macce_general_ci", True), # 38 + ("macroman", "macroman_general_ci", True), # 39 + ("cp852", "cp852_general_ci", True), # 40 + ("latin7", "latin7_general_ci", True), # 41 + ("latin7", "latin7_general_cs", False), # 42 + ("macce", "macce_bin", False), # 43 + ("cp1250", "cp1250_croatian_ci", False), # 44 + ("utf8mb4", "utf8mb4_general_ci", False), # 45 + ("utf8mb4", "utf8mb4_bin", False), # 46 + ("latin1", "latin1_bin", False), # 47 + ("latin1", "latin1_general_ci", False), # 48 + ("latin1", "latin1_general_cs", False), # 49 + ("cp1251", "cp1251_bin", False), # 50 + ("cp1251", "cp1251_general_ci", True), # 51 + ("cp1251", "cp1251_general_cs", False), # 52 + ("macroman", "macroman_bin", False), # 53 + ("utf16", "utf16_general_ci", True), # 54 + ("utf16", "utf16_bin", False), # 55 + ("utf16le", "utf16le_general_ci", True), # 56 + ("cp1256", "cp1256_general_ci", True), # 57 + ("cp1257", "cp1257_bin", False), # 58 + ("cp1257", "cp1257_general_ci", True), # 59 + ("utf32", "utf32_general_ci", True), # 60 + ("utf32", "utf32_bin", False), # 61 + ("utf16le", "utf16le_bin", False), # 62 + ("binary", "binary", True), # 63 + ("armscii8", "armscii8_bin", False), # 64 + ("ascii", "ascii_bin", False), # 65 + ("cp1250", "cp1250_bin", False), # 66 + ("cp1256", "cp1256_bin", False), # 67 + ("cp866", "cp866_bin", False), # 68 + ("dec8", "dec8_bin", False), # 69 + ("greek", "greek_bin", False), # 70 + ("hebrew", "hebrew_bin", False), # 71 + ("hp8", "hp8_bin", False), # 72 + ("keybcs2", "keybcs2_bin", False), # 73 + ("koi8r", "koi8r_bin", False), # 74 + ("koi8u", "koi8u_bin", False), # 75 + ("utf8mb3", "utf8mb3_tolower_ci", False), # 76 + ("latin2", "latin2_bin", False), # 77 + ("latin5", "latin5_bin", False), # 78 + ("latin7", "latin7_bin", False), # 79 + ("cp850", "cp850_bin", False), # 80 + ("cp852", "cp852_bin", False), # 81 + ("swe7", "swe7_bin", False), # 82 + ("utf8mb3", "utf8mb3_bin", False), # 83 + ("big5", "big5_bin", False), # 84 + ("euckr", "euckr_bin", False), # 85 + ("gb2312", "gb2312_bin", False), # 86 + ("gbk", "gbk_bin", False), # 87 + ("sjis", "sjis_bin", False), # 88 + ("tis620", "tis620_bin", False), # 89 + ("ucs2", "ucs2_bin", False), # 90 + ("ujis", "ujis_bin", False), # 91 + ("geostd8", "geostd8_general_ci", True), # 92 + ("geostd8", "geostd8_bin", False), # 93 + ("latin1", "latin1_spanish_ci", False), # 94 + ("cp932", "cp932_japanese_ci", True), # 95 + ("cp932", "cp932_bin", False), # 96 + ("eucjpms", "eucjpms_japanese_ci", True), # 97 + ("eucjpms", "eucjpms_bin", False), # 98 + ("cp1250", "cp1250_polish_ci", False), # 99 + None, + ("utf16", "utf16_unicode_ci", False), # 101 + ("utf16", "utf16_icelandic_ci", False), # 102 + ("utf16", "utf16_latvian_ci", False), # 103 + ("utf16", "utf16_romanian_ci", False), # 104 + ("utf16", "utf16_slovenian_ci", False), # 105 + ("utf16", "utf16_polish_ci", False), # 106 + ("utf16", "utf16_estonian_ci", False), # 107 + ("utf16", "utf16_spanish_ci", False), # 108 + ("utf16", "utf16_swedish_ci", False), # 109 + ("utf16", "utf16_turkish_ci", False), # 110 + ("utf16", "utf16_czech_ci", False), # 111 + ("utf16", "utf16_danish_ci", False), # 112 + ("utf16", "utf16_lithuanian_ci", False), # 113 + ("utf16", "utf16_slovak_ci", False), # 114 + ("utf16", "utf16_spanish2_ci", False), # 115 + ("utf16", "utf16_roman_ci", False), # 116 + ("utf16", "utf16_persian_ci", False), # 117 + ("utf16", "utf16_esperanto_ci", False), # 118 + ("utf16", "utf16_hungarian_ci", False), # 119 + ("utf16", "utf16_sinhala_ci", False), # 120 + ("utf16", "utf16_german2_ci", False), # 121 + ("utf16", "utf16_croatian_ci", False), # 122 + ("utf16", "utf16_unicode_520_ci", False), # 123 + ("utf16", "utf16_vietnamese_ci", False), # 124 + None, + None, + None, + ("ucs2", "ucs2_unicode_ci", False), # 128 + ("ucs2", "ucs2_icelandic_ci", False), # 129 + ("ucs2", "ucs2_latvian_ci", False), # 130 + ("ucs2", "ucs2_romanian_ci", False), # 131 + ("ucs2", "ucs2_slovenian_ci", False), # 132 + ("ucs2", "ucs2_polish_ci", False), # 133 + ("ucs2", "ucs2_estonian_ci", False), # 134 + ("ucs2", "ucs2_spanish_ci", False), # 135 + ("ucs2", "ucs2_swedish_ci", False), # 136 + ("ucs2", "ucs2_turkish_ci", False), # 137 + ("ucs2", "ucs2_czech_ci", False), # 138 + ("ucs2", "ucs2_danish_ci", False), # 139 + ("ucs2", "ucs2_lithuanian_ci", False), # 140 + ("ucs2", "ucs2_slovak_ci", False), # 141 + ("ucs2", "ucs2_spanish2_ci", False), # 142 + ("ucs2", "ucs2_roman_ci", False), # 143 + ("ucs2", "ucs2_persian_ci", False), # 144 + ("ucs2", "ucs2_esperanto_ci", False), # 145 + ("ucs2", "ucs2_hungarian_ci", False), # 146 + ("ucs2", "ucs2_sinhala_ci", False), # 147 + ("ucs2", "ucs2_german2_ci", False), # 148 + ("ucs2", "ucs2_croatian_ci", False), # 149 + ("ucs2", "ucs2_unicode_520_ci", False), # 150 + ("ucs2", "ucs2_vietnamese_ci", False), # 151 + None, + None, + None, + None, + None, + None, + None, + ("ucs2", "ucs2_general_mysql500_ci", False), # 159 + ("utf32", "utf32_unicode_ci", False), # 160 + ("utf32", "utf32_icelandic_ci", False), # 161 + ("utf32", "utf32_latvian_ci", False), # 162 + ("utf32", "utf32_romanian_ci", False), # 163 + ("utf32", "utf32_slovenian_ci", False), # 164 + ("utf32", "utf32_polish_ci", False), # 165 + ("utf32", "utf32_estonian_ci", False), # 166 + ("utf32", "utf32_spanish_ci", False), # 167 + ("utf32", "utf32_swedish_ci", False), # 168 + ("utf32", "utf32_turkish_ci", False), # 169 + ("utf32", "utf32_czech_ci", False), # 170 + ("utf32", "utf32_danish_ci", False), # 171 + ("utf32", "utf32_lithuanian_ci", False), # 172 + ("utf32", "utf32_slovak_ci", False), # 173 + ("utf32", "utf32_spanish2_ci", False), # 174 + ("utf32", "utf32_roman_ci", False), # 175 + ("utf32", "utf32_persian_ci", False), # 176 + ("utf32", "utf32_esperanto_ci", False), # 177 + ("utf32", "utf32_hungarian_ci", False), # 178 + ("utf32", "utf32_sinhala_ci", False), # 179 + ("utf32", "utf32_german2_ci", False), # 180 + ("utf32", "utf32_croatian_ci", False), # 181 + ("utf32", "utf32_unicode_520_ci", False), # 182 + ("utf32", "utf32_vietnamese_ci", False), # 183 + None, + None, + None, + None, + None, + None, + None, + None, + ("utf8mb3", "utf8mb3_unicode_ci", False), # 192 + ("utf8mb3", "utf8mb3_icelandic_ci", False), # 193 + ("utf8mb3", "utf8mb3_latvian_ci", False), # 194 + ("utf8mb3", "utf8mb3_romanian_ci", False), # 195 + ("utf8mb3", "utf8mb3_slovenian_ci", False), # 196 + ("utf8mb3", "utf8mb3_polish_ci", False), # 197 + ("utf8mb3", "utf8mb3_estonian_ci", False), # 198 + ("utf8mb3", "utf8mb3_spanish_ci", False), # 199 + ("utf8mb3", "utf8mb3_swedish_ci", False), # 200 + ("utf8mb3", "utf8mb3_turkish_ci", False), # 201 + ("utf8mb3", "utf8mb3_czech_ci", False), # 202 + ("utf8mb3", "utf8mb3_danish_ci", False), # 203 + ("utf8mb3", "utf8mb3_lithuanian_ci", False), # 204 + ("utf8mb3", "utf8mb3_slovak_ci", False), # 205 + ("utf8mb3", "utf8mb3_spanish2_ci", False), # 206 + ("utf8mb3", "utf8mb3_roman_ci", False), # 207 + ("utf8mb3", "utf8mb3_persian_ci", False), # 208 + ("utf8mb3", "utf8mb3_esperanto_ci", False), # 209 + ("utf8mb3", "utf8mb3_hungarian_ci", False), # 210 + ("utf8mb3", "utf8mb3_sinhala_ci", False), # 211 + ("utf8mb3", "utf8mb3_german2_ci", False), # 212 + ("utf8mb3", "utf8mb3_croatian_ci", False), # 213 + ("utf8mb3", "utf8mb3_unicode_520_ci", False), # 214 + ("utf8mb3", "utf8mb3_vietnamese_ci", False), # 215 + None, + None, + None, + None, + None, + None, + None, + ("utf8mb3", "utf8mb3_general_mysql500_ci", False), # 223 + ("utf8mb4", "utf8mb4_unicode_ci", False), # 224 + ("utf8mb4", "utf8mb4_icelandic_ci", False), # 225 + ("utf8mb4", "utf8mb4_latvian_ci", False), # 226 + ("utf8mb4", "utf8mb4_romanian_ci", False), # 227 + ("utf8mb4", "utf8mb4_slovenian_ci", False), # 228 + ("utf8mb4", "utf8mb4_polish_ci", False), # 229 + ("utf8mb4", "utf8mb4_estonian_ci", False), # 230 + ("utf8mb4", "utf8mb4_spanish_ci", False), # 231 + ("utf8mb4", "utf8mb4_swedish_ci", False), # 232 + ("utf8mb4", "utf8mb4_turkish_ci", False), # 233 + ("utf8mb4", "utf8mb4_czech_ci", False), # 234 + ("utf8mb4", "utf8mb4_danish_ci", False), # 235 + ("utf8mb4", "utf8mb4_lithuanian_ci", False), # 236 + ("utf8mb4", "utf8mb4_slovak_ci", False), # 237 + ("utf8mb4", "utf8mb4_spanish2_ci", False), # 238 + ("utf8mb4", "utf8mb4_roman_ci", False), # 239 + ("utf8mb4", "utf8mb4_persian_ci", False), # 240 + ("utf8mb4", "utf8mb4_esperanto_ci", False), # 241 + ("utf8mb4", "utf8mb4_hungarian_ci", False), # 242 + ("utf8mb4", "utf8mb4_sinhala_ci", False), # 243 + ("utf8mb4", "utf8mb4_german2_ci", False), # 244 + ("utf8mb4", "utf8mb4_croatian_ci", False), # 245 + ("utf8mb4", "utf8mb4_unicode_520_ci", False), # 246 + ("utf8mb4", "utf8mb4_vietnamese_ci", False), # 247 + ("gb18030", "gb18030_chinese_ci", True), # 248 + ("gb18030", "gb18030_bin", False), # 249 + ("gb18030", "gb18030_unicode_520_ci", False), # 250 + None, + None, + None, + None, + ("utf8mb4", "utf8mb4_0900_ai_ci", True), # 255 + ("utf8mb4", "utf8mb4_de_pb_0900_ai_ci", False), # 256 + ("utf8mb4", "utf8mb4_is_0900_ai_ci", False), # 257 + ("utf8mb4", "utf8mb4_lv_0900_ai_ci", False), # 258 + ("utf8mb4", "utf8mb4_ro_0900_ai_ci", False), # 259 + ("utf8mb4", "utf8mb4_sl_0900_ai_ci", False), # 260 + ("utf8mb4", "utf8mb4_pl_0900_ai_ci", False), # 261 + ("utf8mb4", "utf8mb4_et_0900_ai_ci", False), # 262 + ("utf8mb4", "utf8mb4_es_0900_ai_ci", False), # 263 + ("utf8mb4", "utf8mb4_sv_0900_ai_ci", False), # 264 + ("utf8mb4", "utf8mb4_tr_0900_ai_ci", False), # 265 + ("utf8mb4", "utf8mb4_cs_0900_ai_ci", False), # 266 + ("utf8mb4", "utf8mb4_da_0900_ai_ci", False), # 267 + ("utf8mb4", "utf8mb4_lt_0900_ai_ci", False), # 268 + ("utf8mb4", "utf8mb4_sk_0900_ai_ci", False), # 269 + ("utf8mb4", "utf8mb4_es_trad_0900_ai_ci", False), # 270 + ("utf8mb4", "utf8mb4_la_0900_ai_ci", False), # 271 + None, + ("utf8mb4", "utf8mb4_eo_0900_ai_ci", False), # 273 + ("utf8mb4", "utf8mb4_hu_0900_ai_ci", False), # 274 + ("utf8mb4", "utf8mb4_hr_0900_ai_ci", False), # 275 + None, + ("utf8mb4", "utf8mb4_vi_0900_ai_ci", False), # 277 + ("utf8mb4", "utf8mb4_0900_as_cs", False), # 278 + ("utf8mb4", "utf8mb4_de_pb_0900_as_cs", False), # 279 + ("utf8mb4", "utf8mb4_is_0900_as_cs", False), # 280 + ("utf8mb4", "utf8mb4_lv_0900_as_cs", False), # 281 + ("utf8mb4", "utf8mb4_ro_0900_as_cs", False), # 282 + ("utf8mb4", "utf8mb4_sl_0900_as_cs", False), # 283 + ("utf8mb4", "utf8mb4_pl_0900_as_cs", False), # 284 + ("utf8mb4", "utf8mb4_et_0900_as_cs", False), # 285 + ("utf8mb4", "utf8mb4_es_0900_as_cs", False), # 286 + ("utf8mb4", "utf8mb4_sv_0900_as_cs", False), # 287 + ("utf8mb4", "utf8mb4_tr_0900_as_cs", False), # 288 + ("utf8mb4", "utf8mb4_cs_0900_as_cs", False), # 289 + ("utf8mb4", "utf8mb4_da_0900_as_cs", False), # 290 + ("utf8mb4", "utf8mb4_lt_0900_as_cs", False), # 291 + ("utf8mb4", "utf8mb4_sk_0900_as_cs", False), # 292 + ("utf8mb4", "utf8mb4_es_trad_0900_as_cs", False), # 293 + ("utf8mb4", "utf8mb4_la_0900_as_cs", False), # 294 + None, + ("utf8mb4", "utf8mb4_eo_0900_as_cs", False), # 296 + ("utf8mb4", "utf8mb4_hu_0900_as_cs", False), # 297 + ("utf8mb4", "utf8mb4_hr_0900_as_cs", False), # 298 + None, + ("utf8mb4", "utf8mb4_vi_0900_as_cs", False), # 300 + None, + None, + ("utf8mb4", "utf8mb4_ja_0900_as_cs", False), # 303 + ("utf8mb4", "utf8mb4_ja_0900_as_cs_ks", False), # 304 + ("utf8mb4", "utf8mb4_0900_as_ci", False), # 305 + ("utf8mb4", "utf8mb4_ru_0900_ai_ci", False), # 306 + ("utf8mb4", "utf8mb4_ru_0900_as_cs", False), # 307 + ("utf8mb4", "utf8mb4_zh_0900_as_cs", False), # 308 + ("utf8mb4", "utf8mb4_0900_bin", False), # 309 + ("utf8mb4", "utf8mb4_nb_0900_ai_ci", False), # 310 + ("utf8mb4", "utf8mb4_nb_0900_as_cs", False), # 311 + ("utf8mb4", "utf8mb4_nn_0900_ai_ci", False), # 312 + ("utf8mb4", "utf8mb4_nn_0900_as_cs", False), # 313 + ("utf8mb4", "utf8mb4_sr_latn_0900_ai_ci", False), # 314 + ("utf8mb4", "utf8mb4_sr_latn_0900_as_cs", False), # 315 + ("utf8mb4", "utf8mb4_bs_0900_ai_ci", False), # 316 + ("utf8mb4", "utf8mb4_bs_0900_as_cs", False), # 317 + ("utf8mb4", "utf8mb4_bg_0900_ai_ci", False), # 318 + ("utf8mb4", "utf8mb4_bg_0900_as_cs", False), # 319 + ("utf8mb4", "utf8mb4_gl_0900_ai_ci", False), # 320 + ("utf8mb4", "utf8mb4_gl_0900_as_cs", False), # 321 + ("utf8mb4", "utf8mb4_mn_cyrl_0900_ai_ci", False), # 322 + ("utf8mb4", "utf8mb4_mn_cyrl_0900_as_cs", False), # 323 +] + +MYSQL_CHARACTER_SETS_57: List[Optional[Tuple[str, str, bool]]] = [ + # (character set name, collation, default) + None, + ("big5", "big5_chinese_ci", True), # 1 + ("latin2", "latin2_czech_cs", False), # 2 + ("dec8", "dec8_swedish_ci", True), # 3 + ("cp850", "cp850_general_ci", True), # 4 + ("latin1", "latin1_german1_ci", False), # 5 + ("hp8", "hp8_english_ci", True), # 6 + ("koi8r", "koi8r_general_ci", True), # 7 + ("latin1", "latin1_swedish_ci", True), # 8 + ("latin2", "latin2_general_ci", True), # 9 + ("swe7", "swe7_swedish_ci", True), # 10 + ("ascii", "ascii_general_ci", True), # 11 + ("ujis", "ujis_japanese_ci", True), # 12 + ("sjis", "sjis_japanese_ci", True), # 13 + ("cp1251", "cp1251_bulgarian_ci", False), # 14 + ("latin1", "latin1_danish_ci", False), # 15 + ("hebrew", "hebrew_general_ci", True), # 16 + None, + ("tis620", "tis620_thai_ci", True), # 18 + ("euckr", "euckr_korean_ci", True), # 19 + ("latin7", "latin7_estonian_cs", False), # 20 + ("latin2", "latin2_hungarian_ci", False), # 21 + ("koi8u", "koi8u_general_ci", True), # 22 + ("cp1251", "cp1251_ukrainian_ci", False), # 23 + ("gb2312", "gb2312_chinese_ci", True), # 24 + ("greek", "greek_general_ci", True), # 25 + ("cp1250", "cp1250_general_ci", True), # 26 + ("latin2", "latin2_croatian_ci", False), # 27 + ("gbk", "gbk_chinese_ci", True), # 28 + ("cp1257", "cp1257_lithuanian_ci", False), # 29 + ("latin5", "latin5_turkish_ci", True), # 30 + ("latin1", "latin1_german2_ci", False), # 31 + ("armscii8", "armscii8_general_ci", True), # 32 + ("utf8", "utf8_general_ci", True), # 33 + ("cp1250", "cp1250_czech_cs", False), # 34 + ("ucs2", "ucs2_general_ci", True), # 35 + ("cp866", "cp866_general_ci", True), # 36 + ("keybcs2", "keybcs2_general_ci", True), # 37 + ("macce", "macce_general_ci", True), # 38 + ("macroman", "macroman_general_ci", True), # 39 + ("cp852", "cp852_general_ci", True), # 40 + ("latin7", "latin7_general_ci", True), # 41 + ("latin7", "latin7_general_cs", False), # 42 + ("macce", "macce_bin", False), # 43 + ("cp1250", "cp1250_croatian_ci", False), # 44 + ("utf8mb4", "utf8mb4_general_ci", True), # 45 + ("utf8mb4", "utf8mb4_bin", False), # 46 + ("latin1", "latin1_bin", False), # 47 + ("latin1", "latin1_general_ci", False), # 48 + ("latin1", "latin1_general_cs", False), # 49 + ("cp1251", "cp1251_bin", False), # 50 + ("cp1251", "cp1251_general_ci", True), # 51 + ("cp1251", "cp1251_general_cs", False), # 52 + ("macroman", "macroman_bin", False), # 53 + ("utf16", "utf16_general_ci", True), # 54 + ("utf16", "utf16_bin", False), # 55 + ("utf16le", "utf16le_general_ci", True), # 56 + ("cp1256", "cp1256_general_ci", True), # 57 + ("cp1257", "cp1257_bin", False), # 58 + ("cp1257", "cp1257_general_ci", True), # 59 + ("utf32", "utf32_general_ci", True), # 60 + ("utf32", "utf32_bin", False), # 61 + ("utf16le", "utf16le_bin", False), # 62 + ("binary", "binary", True), # 63 + ("armscii8", "armscii8_bin", False), # 64 + ("ascii", "ascii_bin", False), # 65 + ("cp1250", "cp1250_bin", False), # 66 + ("cp1256", "cp1256_bin", False), # 67 + ("cp866", "cp866_bin", False), # 68 + ("dec8", "dec8_bin", False), # 69 + ("greek", "greek_bin", False), # 70 + ("hebrew", "hebrew_bin", False), # 71 + ("hp8", "hp8_bin", False), # 72 + ("keybcs2", "keybcs2_bin", False), # 73 + ("koi8r", "koi8r_bin", False), # 74 + ("koi8u", "koi8u_bin", False), # 75 + None, + ("latin2", "latin2_bin", False), # 77 + ("latin5", "latin5_bin", False), # 78 + ("latin7", "latin7_bin", False), # 79 + ("cp850", "cp850_bin", False), # 80 + ("cp852", "cp852_bin", False), # 81 + ("swe7", "swe7_bin", False), # 82 + ("utf8", "utf8_bin", False), # 83 + ("big5", "big5_bin", False), # 84 + ("euckr", "euckr_bin", False), # 85 + ("gb2312", "gb2312_bin", False), # 86 + ("gbk", "gbk_bin", False), # 87 + ("sjis", "sjis_bin", False), # 88 + ("tis620", "tis620_bin", False), # 89 + ("ucs2", "ucs2_bin", False), # 90 + ("ujis", "ujis_bin", False), # 91 + ("geostd8", "geostd8_general_ci", True), # 92 + ("geostd8", "geostd8_bin", False), # 93 + ("latin1", "latin1_spanish_ci", False), # 94 + ("cp932", "cp932_japanese_ci", True), # 95 + ("cp932", "cp932_bin", False), # 96 + ("eucjpms", "eucjpms_japanese_ci", True), # 97 + ("eucjpms", "eucjpms_bin", False), # 98 + ("cp1250", "cp1250_polish_ci", False), # 99 + None, + ("utf16", "utf16_unicode_ci", False), # 101 + ("utf16", "utf16_icelandic_ci", False), # 102 + ("utf16", "utf16_latvian_ci", False), # 103 + ("utf16", "utf16_romanian_ci", False), # 104 + ("utf16", "utf16_slovenian_ci", False), # 105 + ("utf16", "utf16_polish_ci", False), # 106 + ("utf16", "utf16_estonian_ci", False), # 107 + ("utf16", "utf16_spanish_ci", False), # 108 + ("utf16", "utf16_swedish_ci", False), # 109 + ("utf16", "utf16_turkish_ci", False), # 110 + ("utf16", "utf16_czech_ci", False), # 111 + ("utf16", "utf16_danish_ci", False), # 112 + ("utf16", "utf16_lithuanian_ci", False), # 113 + ("utf16", "utf16_slovak_ci", False), # 114 + ("utf16", "utf16_spanish2_ci", False), # 115 + ("utf16", "utf16_roman_ci", False), # 116 + ("utf16", "utf16_persian_ci", False), # 117 + ("utf16", "utf16_esperanto_ci", False), # 118 + ("utf16", "utf16_hungarian_ci", False), # 119 + ("utf16", "utf16_sinhala_ci", False), # 120 + ("utf16", "utf16_german2_ci", False), # 121 + ("utf16", "utf16_croatian_ci", False), # 122 + ("utf16", "utf16_unicode_520_ci", False), # 123 + ("utf16", "utf16_vietnamese_ci", False), # 124 + None, + None, + None, + ("ucs2", "ucs2_unicode_ci", False), # 128 + ("ucs2", "ucs2_icelandic_ci", False), # 129 + ("ucs2", "ucs2_latvian_ci", False), # 130 + ("ucs2", "ucs2_romanian_ci", False), # 131 + ("ucs2", "ucs2_slovenian_ci", False), # 132 + ("ucs2", "ucs2_polish_ci", False), # 133 + ("ucs2", "ucs2_estonian_ci", False), # 134 + ("ucs2", "ucs2_spanish_ci", False), # 135 + ("ucs2", "ucs2_swedish_ci", False), # 136 + ("ucs2", "ucs2_turkish_ci", False), # 137 + ("ucs2", "ucs2_czech_ci", False), # 138 + ("ucs2", "ucs2_danish_ci", False), # 139 + ("ucs2", "ucs2_lithuanian_ci", False), # 140 + ("ucs2", "ucs2_slovak_ci", False), # 141 + ("ucs2", "ucs2_spanish2_ci", False), # 142 + ("ucs2", "ucs2_roman_ci", False), # 143 + ("ucs2", "ucs2_persian_ci", False), # 144 + ("ucs2", "ucs2_esperanto_ci", False), # 145 + ("ucs2", "ucs2_hungarian_ci", False), # 146 + ("ucs2", "ucs2_sinhala_ci", False), # 147 + ("ucs2", "ucs2_german2_ci", False), # 148 + ("ucs2", "ucs2_croatian_ci", False), # 149 + ("ucs2", "ucs2_unicode_520_ci", False), # 150 + ("ucs2", "ucs2_vietnamese_ci", False), # 151 + None, + None, + None, + None, + None, + None, + None, + ("ucs2", "ucs2_general_mysql500_ci", False), # 159 + ("utf32", "utf32_unicode_ci", False), # 160 + ("utf32", "utf32_icelandic_ci", False), # 161 + ("utf32", "utf32_latvian_ci", False), # 162 + ("utf32", "utf32_romanian_ci", False), # 163 + ("utf32", "utf32_slovenian_ci", False), # 164 + ("utf32", "utf32_polish_ci", False), # 165 + ("utf32", "utf32_estonian_ci", False), # 166 + ("utf32", "utf32_spanish_ci", False), # 167 + ("utf32", "utf32_swedish_ci", False), # 168 + ("utf32", "utf32_turkish_ci", False), # 169 + ("utf32", "utf32_czech_ci", False), # 170 + ("utf32", "utf32_danish_ci", False), # 171 + ("utf32", "utf32_lithuanian_ci", False), # 172 + ("utf32", "utf32_slovak_ci", False), # 173 + ("utf32", "utf32_spanish2_ci", False), # 174 + ("utf32", "utf32_roman_ci", False), # 175 + ("utf32", "utf32_persian_ci", False), # 176 + ("utf32", "utf32_esperanto_ci", False), # 177 + ("utf32", "utf32_hungarian_ci", False), # 178 + ("utf32", "utf32_sinhala_ci", False), # 179 + ("utf32", "utf32_german2_ci", False), # 180 + ("utf32", "utf32_croatian_ci", False), # 181 + ("utf32", "utf32_unicode_520_ci", False), # 182 + ("utf32", "utf32_vietnamese_ci", False), # 183 + None, + None, + None, + None, + None, + None, + None, + None, + ("utf8", "utf8_unicode_ci", False), # 192 + ("utf8", "utf8_icelandic_ci", False), # 193 + ("utf8", "utf8_latvian_ci", False), # 194 + ("utf8", "utf8_romanian_ci", False), # 195 + ("utf8", "utf8_slovenian_ci", False), # 196 + ("utf8", "utf8_polish_ci", False), # 197 + ("utf8", "utf8_estonian_ci", False), # 198 + ("utf8", "utf8_spanish_ci", False), # 199 + ("utf8", "utf8_swedish_ci", False), # 200 + ("utf8", "utf8_turkish_ci", False), # 201 + ("utf8", "utf8_czech_ci", False), # 202 + ("utf8", "utf8_danish_ci", False), # 203 + ("utf8", "utf8_lithuanian_ci", False), # 204 + ("utf8", "utf8_slovak_ci", False), # 205 + ("utf8", "utf8_spanish2_ci", False), # 206 + ("utf8", "utf8_roman_ci", False), # 207 + ("utf8", "utf8_persian_ci", False), # 208 + ("utf8", "utf8_esperanto_ci", False), # 209 + ("utf8", "utf8_hungarian_ci", False), # 210 + ("utf8", "utf8_sinhala_ci", False), # 211 + ("utf8", "utf8_german2_ci", False), # 212 + ("utf8", "utf8_croatian_ci", False), # 213 + ("utf8", "utf8_unicode_520_ci", False), # 214 + ("utf8", "utf8_vietnamese_ci", False), # 215 + None, + None, + None, + None, + None, + None, + None, + ("utf8", "utf8_general_mysql500_ci", False), # 223 + ("utf8mb4", "utf8mb4_unicode_ci", False), # 224 + ("utf8mb4", "utf8mb4_icelandic_ci", False), # 225 + ("utf8mb4", "utf8mb4_latvian_ci", False), # 226 + ("utf8mb4", "utf8mb4_romanian_ci", False), # 227 + ("utf8mb4", "utf8mb4_slovenian_ci", False), # 228 + ("utf8mb4", "utf8mb4_polish_ci", False), # 229 + ("utf8mb4", "utf8mb4_estonian_ci", False), # 230 + ("utf8mb4", "utf8mb4_spanish_ci", False), # 231 + ("utf8mb4", "utf8mb4_swedish_ci", False), # 232 + ("utf8mb4", "utf8mb4_turkish_ci", False), # 233 + ("utf8mb4", "utf8mb4_czech_ci", False), # 234 + ("utf8mb4", "utf8mb4_danish_ci", False), # 235 + ("utf8mb4", "utf8mb4_lithuanian_ci", False), # 236 + ("utf8mb4", "utf8mb4_slovak_ci", False), # 237 + ("utf8mb4", "utf8mb4_spanish2_ci", False), # 238 + ("utf8mb4", "utf8mb4_roman_ci", False), # 239 + ("utf8mb4", "utf8mb4_persian_ci", False), # 240 + ("utf8mb4", "utf8mb4_esperanto_ci", False), # 241 + ("utf8mb4", "utf8mb4_hungarian_ci", False), # 242 + ("utf8mb4", "utf8mb4_sinhala_ci", False), # 243 + ("utf8mb4", "utf8mb4_german2_ci", False), # 244 + ("utf8mb4", "utf8mb4_croatian_ci", False), # 245 + ("utf8mb4", "utf8mb4_unicode_520_ci", False), # 246 + ("utf8mb4", "utf8mb4_vietnamese_ci", False), # 247 + ("gb18030", "gb18030_chinese_ci", True), # 248 + ("gb18030", "gb18030_bin", False), # 249 + ("gb18030", "gb18030_unicode_520_ci", False), # 250 +] diff --git a/mysql-sqlalchemy-workspace/lib/python3.9/site-packages/mysql/connector/connection.py b/mysql-sqlalchemy-workspace/lib/python3.9/site-packages/mysql/connector/connection.py new file mode 100644 index 00000000..267de899 --- /dev/null +++ b/mysql-sqlalchemy-workspace/lib/python3.9/site-packages/mysql/connector/connection.py @@ -0,0 +1,1711 @@ +# Copyright (c) 2009, 2022, Oracle and/or its affiliates. +# +# This program is free software; you can redistribute it and/or modify +# it under the terms of the GNU General Public License, version 2.0, as +# published by the Free Software Foundation. +# +# This program is also distributed with certain software (including +# but not limited to OpenSSL) that is licensed under separate terms, +# as designated in a particular file or component or in included license +# documentation. The authors of MySQL hereby grant you an +# additional permission to link the program and your derivative works +# with the separately licensed software that they have included with +# MySQL. +# +# Without limiting anything contained in the foregoing, this file, +# which is part of MySQL Connector/Python, is also subject to the +# Universal FOSS Exception, version 1.0, a copy of which can be found at +# http://oss.oracle.com/licenses/universal-foss-exception. +# +# This program is distributed in the hope that it will be useful, but +# WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. +# See the GNU General Public License, version 2.0, for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program; if not, write to the Free Software Foundation, Inc., +# 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA + +# mypy: disable-error-code="arg-type,operator,attr-defined,assignment" + +"""Implementing communication with MySQL servers.""" + +import datetime +import getpass +import logging +import os +import socket +import struct +import warnings + +from decimal import Decimal +from io import IOBase +from typing import ( + Any, + BinaryIO, + Dict, + Generator, + List, + Mapping, + Optional, + Sequence, + Tuple, + Type, + Union, +) + +from . import version +from .abstracts import MySQLConnectionAbstract +from .authentication import get_auth_plugin +from .constants import ( + NET_BUFFER_LENGTH, + CharacterSet, + ClientFlag, + FieldType, + ServerCmd, + ServerFlag, + ShutdownType, + flag_is_set, +) +from .conversion import MySQLConverter +from .cursor import ( + CursorBase, + MySQLCursor, + MySQLCursorBuffered, + MySQLCursorBufferedDict, + MySQLCursorBufferedNamedTuple, + MySQLCursorBufferedRaw, + MySQLCursorDict, + MySQLCursorNamedTuple, + MySQLCursorPrepared, + MySQLCursorPreparedDict, + MySQLCursorRaw, +) +from .errors import ( + DatabaseError, + Error, + InterfaceError, + InternalError, + NotSupportedError, + OperationalError, + ProgrammingError, + get_exception, +) +from .network import MySQLTCPSocket, MySQLUnixSocket +from .plugins import BaseAuthPlugin +from .protocol import MySQLProtocol +from .types import ( + ConnAttrsType, + DescriptionType, + EofPacketType, + HandShakeType, + OkPacketType, + ResultType, + RowType, + SocketType, + StatsPacketType, + StrOrBytes, + SupportedMysqlBinaryProtocolTypes, +) +from .utils import get_platform, int1store, int4store, lc_int + +logging.getLogger(__name__).addHandler(logging.NullHandler()) + +_LOGGER = logging.getLogger(__name__) + + +class MySQLConnection(MySQLConnectionAbstract): + """Connection to a MySQL Server""" + + def __init__(self, **kwargs: Any) -> None: + self._protocol: Optional[MySQLProtocol] = None + self._socket: Optional[SocketType] = None + self._handshake: Optional[HandShakeType] = None + super().__init__() + + self._converter_class: Type[MySQLConverter] = MySQLConverter + + self._client_flags: int = ClientFlag.get_default() + self._charset_id: int = 45 + self._sql_mode: Optional[str] = None + self._time_zone: Optional[str] = None + self._autocommit: bool = False + + self._user: str = "" + self._password: str = "" + self._database: str = "" + self._host: str = "127.0.0.1" + self._port: int = 3306 + self._unix_socket: Optional[str] = None + self._client_host: str = "" + self._client_port: int = 0 + self._ssl: Dict[str, Optional[Union[str, bool, List[str]]]] = {} + self._force_ipv6: bool = False + + self._use_unicode: bool = True + self._get_warnings: bool = False + self._raise_on_warnings: bool = False + self._buffered: bool = False + self._unread_result: bool = False + self._have_next_result: bool = False + self._raw: bool = False + self._in_transaction: bool = False + + self._prepared_statements: Any = None + + self._ssl_active: bool = False + self._auth_plugin: Optional[str] = None + self._krb_service_principal: Optional[str] = None + self._pool_config_version: Any = None + self._query_attrs_supported: int = False + + self._columns_desc: List[DescriptionType] = [] + self._mfa_nfactor: int = 1 + + if kwargs: + try: + self.connect(**kwargs) + except Exception: + # Tidy-up underlying socket on failure + self.close() + self._socket = None + raise + + def _add_default_conn_attrs(self) -> None: + """Add the default connection attributes.""" + platform = get_platform() + license_chunks = version.LICENSE.split(" ") + if license_chunks[0] == "GPLv2": + client_license = "GPL-2.0" + else: + client_license = "Commercial" + default_conn_attrs = { + "_pid": str(os.getpid()), + "_platform": platform["arch"], + "_source_host": socket.gethostname(), + "_client_name": "mysql-connector-python", + "_client_license": client_license, + "_client_version": ".".join([str(x) for x in version.VERSION[0:3]]), + "_os": platform["version"], + } + + self._conn_attrs.update((default_conn_attrs)) + + def _do_handshake(self) -> None: + """Get the handshake from the MySQL server""" + packet = self._socket.recv() + if packet[4] == 255: + raise get_exception(packet) + + self._handshake = None + handshake = self._protocol.parse_handshake(packet) + + server_version = handshake["server_version_original"] + + self._server_version = self._check_server_version( + server_version + if isinstance(server_version, (str, bytes, bytearray)) + else "Unknown" + ) + CharacterSet.set_mysql_version(self._server_version) + + if not handshake["capabilities"] & ClientFlag.SSL: + if self._auth_plugin == "mysql_clear_password" and not self.is_secure: + raise InterfaceError( + "Clear password authentication is not supported over " + "insecure channels" + ) + if self._ssl.get("verify_cert"): + raise InterfaceError( + "SSL is required but the server doesn't support it", + errno=2026, + ) + self._client_flags &= ~ClientFlag.SSL + elif not self._ssl_disabled: + self._client_flags |= ClientFlag.SSL + + if handshake["capabilities"] & ClientFlag.PLUGIN_AUTH: + self.set_client_flags([ClientFlag.PLUGIN_AUTH]) + + if handshake["capabilities"] & ClientFlag.CLIENT_QUERY_ATTRIBUTES: + self._query_attrs_supported = True + self.set_client_flags([ClientFlag.CLIENT_QUERY_ATTRIBUTES]) + + if handshake["capabilities"] & ClientFlag.MULTI_FACTOR_AUTHENTICATION: + self.set_client_flags([ClientFlag.MULTI_FACTOR_AUTHENTICATION]) + + self._handshake = handshake + + def _do_auth( + self, + username: Optional[str] = None, + password: Optional[str] = None, + database: Optional[str] = None, + client_flags: int = 0, + charset: int = 45, + ssl_options: Optional[Dict[str, Optional[Union[str, bool, List[str]]]]] = None, + conn_attrs: Optional[ConnAttrsType] = None, + ) -> bool: + """Authenticate with the MySQL server + + Authentication happens in two parts. We first send a response to the + handshake. The MySQL server will then send either an AuthSwitchRequest + or an error packet. + + Raises NotSupportedError when we get the old, insecure password + reply back. Raises any error coming from MySQL. + """ + self._ssl_active = False + if ssl_options is None: + ssl_options = {} + if not self._ssl_disabled and (client_flags & ClientFlag.SSL): + packet: bytes = self._protocol.make_auth_ssl( + charset=charset, client_flags=client_flags + ) + self._socket.send(packet) + if ssl_options.get("tls_ciphersuites") is not None: + tls_ciphersuites = ":".join(ssl_options.get("tls_ciphersuites")) + else: + tls_ciphersuites = "" + self._socket.switch_to_ssl( + ssl_options.get("ca"), + ssl_options.get("cert"), + ssl_options.get("key"), + ssl_options.get("verify_cert") or False, + ssl_options.get("verify_identity") or False, + tls_ciphersuites, + ssl_options.get("tls_versions"), + ) + self._ssl_active = True + + if self._password1 and password != self._password1: + password = self._password1 + + _LOGGER.debug("# _do_auth(): self._auth_plugin: %s", self._auth_plugin) + if ( + self._auth_plugin.startswith("authentication_oci") + or ( + self._auth_plugin.startswith("authentication_kerberos") + and os.name == "nt" + ) + ) and not username: + username = getpass.getuser() + _LOGGER.debug( + "MySQL user is empty, OS user: %s will be used for %s", + username, + self._auth_plugin, + ) + + packet = self._protocol.make_auth( + handshake=self._handshake, + username=username, + password=password, + database=database, + charset=charset, + client_flags=client_flags, + ssl_enabled=self._ssl_active, + auth_plugin=self._auth_plugin, + conn_attrs=conn_attrs, + auth_plugin_class=self._auth_plugin_class, + ) + self._socket.send(packet) + self._auth_switch_request(username, password) + + if not (client_flags & ClientFlag.CONNECT_WITH_DB) and database: + self.cmd_init_db(database) + + return True + + def _auth_switch_request( + self, username: Optional[str] = None, password: Optional[str] = None + ) -> Optional[OkPacketType]: + """Handle second part of authentication + + Raises NotSupportedError when we get the old, insecure password + reply back. Raises any error coming from MySQL. + """ + auth = None + new_auth_plugin: Optional[str] = ( + self._auth_plugin or self._handshake["auth_plugin"] + ) + _LOGGER.debug("new_auth_plugin: %s", new_auth_plugin) + packet = self._socket.recv() + if packet[4] == 254 and len(packet) == 5: + raise NotSupportedError( + "Authentication with old (insecure) passwords " + "is not supported. For more information, lookup " + "Password Hashing in the latest MySQL manual" + ) + if packet[4] == 254: + # AuthSwitchRequest + ( + new_auth_plugin, + auth_data, + ) = self._protocol.parse_auth_switch_request(packet) + auth = get_auth_plugin(new_auth_plugin, self._auth_plugin_class)( + auth_data, + username=username or self._user, + password=password, + ssl_enabled=self.is_secure, + ) + packet = self._auth_continue(auth, new_auth_plugin, auth_data) + + if packet[4] == 1: + auth_data = self._protocol.parse_auth_more_data(packet) + auth = get_auth_plugin(new_auth_plugin, self._auth_plugin_class)( + auth_data, password=password, ssl_enabled=self.is_secure + ) + if new_auth_plugin == "caching_sha2_password": + response = auth.auth_response() + if response: + self._socket.send(response) + packet = self._socket.recv() + + if packet[4] == 0: + return self._handle_ok(packet) + if packet[4] == 2: + return self._handle_mfa(packet) + if packet[4] == 255: + raise get_exception(packet) + return None + + def _handle_mfa(self, packet: bytes) -> Optional[OkPacketType]: + """Handle Multi Factor Authentication.""" + self._mfa_nfactor += 1 + if self._mfa_nfactor == 2: + password = self._password2 + elif self._mfa_nfactor == 3: + password = self._password3 + else: + raise InterfaceError( + "Failed Multi Factor Authentication (invalid N factor)" + ) + + _LOGGER.debug("# MFA N Factor #%d", self._mfa_nfactor) + + packet, auth_plugin = self._protocol.parse_auth_next_factor(packet[4:]) + auth = get_auth_plugin(auth_plugin, self._auth_plugin_class)( + None, + username=self._user, + password=password, + ssl_enabled=self.is_secure, + ) + packet = self._auth_continue(auth, auth_plugin, packet) + + if packet[4] == 1: + auth_data = self._protocol.parse_auth_more_data(packet) + auth = get_auth_plugin(auth_plugin, self._auth_plugin_class)( + auth_data, password=password, ssl_enabled=self.is_secure + ) + if auth_plugin == "caching_sha2_password": + response = auth.auth_response() + if response: + self._socket.send(response) + packet = self._socket.recv() + + if packet[4] == 0: + return self._handle_ok(packet) + if packet[4] == 2: + return self._handle_mfa(packet) + if packet[4] == 255: + raise get_exception(packet) + return None + + def _auth_continue( + self, auth: BaseAuthPlugin, auth_plugin: str, auth_data: bytes + ) -> bytearray: + """Continue with the authentication.""" + if auth_plugin == "authentication_ldap_sasl_client": + _LOGGER.debug("# auth_data: %s", auth_data) + response = auth.auth_response(self._krb_service_principal) + elif auth_plugin == "authentication_kerberos_client": + _LOGGER.debug("# auth_data: %s", auth_data) + response = auth.auth_response(auth_data) + elif auth_plugin == "authentication_oci_client": + _LOGGER.debug("# oci configuration file path: %s", self._oci_config_file) + response = auth.auth_response(self._oci_config_file) + else: + response = auth.auth_response() + + _LOGGER.debug("# request: %s size: %s", response, len(response)) + self._socket.send(response) + packet = self._socket.recv() + _LOGGER.debug("# server response packet: %s", packet) + if ( + auth_plugin == "authentication_ldap_sasl_client" + and len(packet) >= 6 + and packet[5] == 114 + and packet[6] == 61 + ): # 'r' and '=' + # Continue with sasl authentication + dec_response = packet[5:] + cresponse = auth.auth_continue(dec_response) + self._socket.send(cresponse) + packet = self._socket.recv() + if packet[5] == 118 and packet[6] == 61: # 'v' and '=' + if auth.auth_finalize(packet[5:]): + # receive packed OK + packet = self._socket.recv() + elif ( + auth_plugin == "authentication_ldap_sasl_client" + and auth_data == b"GSSAPI" + and packet[4] != 255 + ): + rcode_size = 5 # header size for the response status code. + _LOGGER.debug("# Continue with sasl GSSAPI authentication") + _LOGGER.debug("# response header: %s", packet[: rcode_size + 1]) + _LOGGER.debug("# response size: %s", len(packet)) + + _LOGGER.debug("# Negotiate a service request") + complete = False + tries = 0 # To avoid a infinite loop attempt no more than feedback messages + while not complete and tries < 5: + _LOGGER.debug("%s Attempt %s %s", "-" * 20, tries + 1, "-" * 20) + _LOGGER.debug("<< server response: %s", packet) + _LOGGER.debug("# response code: %s", packet[: rcode_size + 1]) + step, complete = auth.auth_continue_krb(packet[rcode_size:]) + _LOGGER.debug(" >> response to server: %s", step) + self._socket.send(step or b"") + packet = self._socket.recv() + tries += 1 + if not complete: + raise InterfaceError( + f"Unable to fulfill server request after {tries} " + f"attempts. Last server response: {packet}" + ) + _LOGGER.debug( + " last GSSAPI response from server: %s length: %d", + packet, + len(packet), + ) + last_step = auth.auth_accept_close_handshake(packet[rcode_size:]) + _LOGGER.debug( + " >> last response to server: %s length: %d", + last_step, + len(last_step), + ) + self._socket.send(last_step) + # Receive final handshake from server + packet = self._socket.recv() + _LOGGER.debug("<< final handshake from server: %s", packet) + + # receive OK packet from server. + packet = self._socket.recv() + _LOGGER.debug("<< ok packet from server: %s", packet) + elif auth_plugin == "authentication_kerberos_client" and packet[4] != 255: + rcode_size = 5 # Reader size for the response status code + _LOGGER.debug("# Continue with GSSAPI authentication") + _LOGGER.debug("# Response header: %s", packet[: rcode_size + 1]) + _LOGGER.debug("# Response size: %s", len(packet)) + _LOGGER.debug("# Negotiate a service request") + complete = False + tries = 0 + + while not complete and tries < 5: + _LOGGER.debug("%s Attempt %s %s", "-" * 20, tries + 1, "-" * 20) + _LOGGER.debug("<< Server response: %s", packet) + _LOGGER.debug("# Response code: %s", packet[: rcode_size + 1]) + token, complete = auth.auth_continue(packet[rcode_size:]) + if token: + self._socket.send(token) + if complete: + break + packet = self._socket.recv() + + _LOGGER.debug(">> Response to server: %s", token) + tries += 1 + + if not complete: + raise InterfaceError( + f"Unable to fulfill server request after {tries} " + f"attempts. Last server response: {packet}" + ) + + _LOGGER.debug( + "Last response from server: %s length: %d", + packet, + len(packet), + ) + + # Receive OK packet from server. + packet = self._socket.recv() + _LOGGER.debug("<< Ok packet from server: %s", packet) + + return packet + + def _get_connection(self) -> SocketType: + """Get connection based on configuration + + This method will return the appropriated connection object using + the connection parameters. + + Returns subclass of MySQLBaseSocket. + """ + conn: Optional[SocketType] = None + if self._unix_socket and os.name == "posix": + conn = MySQLUnixSocket(unix_socket=self.unix_socket) + else: + conn = MySQLTCPSocket( + host=self.server_host, + port=self.server_port, + force_ipv6=self._force_ipv6, + ) + + conn.set_connection_timeout(self._connection_timeout) + return conn + + def _open_connection(self) -> None: + """Open the connection to the MySQL server + + This method sets up and opens the connection to the MySQL server. + + Raises on errors. + """ + if self._auth_plugin == "authentication_kerberos_client" and not self._user: + cls = get_auth_plugin(self._auth_plugin, self._auth_plugin_class) + self._user = cls.get_user_from_credentials() + + self._protocol = MySQLProtocol() + self._socket = self._get_connection() + try: + self._socket.open_connection() + self._do_handshake() + self._do_auth( + self._user, + self._password, + self._database, + self._client_flags, + self._charset_id, + self._ssl, + self._conn_attrs, + ) + self.set_converter_class(self._converter_class) + if self._client_flags & ClientFlag.COMPRESS: + self._socket.recv = self._socket.recv_compressed + self._socket.send = self._socket.send_compressed + self._socket.set_connection_timeout(None) + except Exception: + # close socket + self.close() + raise + + if ( + not self._ssl_disabled + and hasattr(self._socket.sock, "version") + and callable(self._socket.sock.version) + ): + # Raise a deprecation warning if TLSv1 or TLSv1.1 is being used + tls_version = self._socket.sock.version() + if tls_version in ("TLSv1", "TLSv1.1"): + warn_msg = ( + f"This connection is using {tls_version} which is now " + "deprecated and will be removed in a future release of " + "MySQL Connector/Python" + ) + warnings.warn(warn_msg, DeprecationWarning) + + def shutdown(self) -> None: + """Shut down connection to MySQL Server.""" + if not self._socket: + return + + try: + self._socket.shutdown() + except (AttributeError, Error): + pass # Getting an exception would mean we are disconnected. + + def close(self) -> None: + """Disconnect from the MySQL server""" + if not self._socket: + return + + try: + self.cmd_quit() + except (AttributeError, Error): + pass # Getting an exception would mean we are disconnected. + self._socket.close_connection() + self._handshake = None + + disconnect = close + + def _send_cmd( + self, + command: int, + argument: Optional[bytes] = None, + packet_number: int = 0, + packet: Optional[bytes] = None, + expect_response: bool = True, + compressed_packet_number: int = 0, + ) -> Optional[bytearray]: + """Send a command to the MySQL server + + This method sends a command with an optional argument. + If packet is not None, it will be sent and the argument will be + ignored. + + The packet_number is optional and should usually not be used. + + Some commands might not result in the MySQL server returning + a response. If a command does not return anything, you should + set expect_response to False. The _send_cmd method will then + return None instead of a MySQL packet. + + Returns a MySQL packet or None. + """ + self.handle_unread_result() + + try: + self._socket.send( + self._protocol.make_command(command, packet or argument), + packet_number, + compressed_packet_number, + ) + except AttributeError as err: + raise OperationalError("MySQL Connection not available") from err + + if not expect_response: + return None + return self._socket.recv() + + def _send_data( + self, data_file: BinaryIO, send_empty_packet: bool = False + ) -> bytearray: + """Send data to the MySQL server + + This method accepts a file-like object and sends its data + as is to the MySQL server. If the send_empty_packet is + True, it will send an extra empty package (for example + when using LOAD LOCAL DATA INFILE). + + Returns a MySQL packet. + """ + self.handle_unread_result() + + if not hasattr(data_file, "read"): + raise ValueError("expecting a file-like object") + + try: + buf = data_file.read(NET_BUFFER_LENGTH - 16) + while buf: + self._socket.send(buf) + buf = data_file.read(NET_BUFFER_LENGTH - 16) + except AttributeError as err: + raise OperationalError("MySQL Connection not available") from err + + if send_empty_packet: + try: + self._socket.send(b"") + except AttributeError as err: + raise OperationalError("MySQL Connection not available") from err + + return self._socket.recv() + + def _handle_server_status(self, flags: int) -> None: + """Handle the server flags found in MySQL packets + + This method handles the server flags send by MySQL OK and EOF + packets. It, for example, checks whether there exists more result + sets or whether there is an ongoing transaction. + """ + self._have_next_result = flag_is_set(ServerFlag.MORE_RESULTS_EXISTS, flags) + self._in_transaction = flag_is_set(ServerFlag.STATUS_IN_TRANS, flags) + + @property + def in_transaction(self) -> bool: + """MySQL session has started a transaction""" + return self._in_transaction + + def _handle_ok(self, packet: bytes) -> OkPacketType: + """Handle a MySQL OK packet + + This method handles a MySQL OK packet. When the packet is found to + be an Error packet, an error will be raised. If the packet is neither + an OK or an Error packet, InterfaceError will be raised. + + Returns a dict() + """ + if packet[4] == 0: + ok_pkt = self._protocol.parse_ok(packet) + self._handle_server_status(ok_pkt["status_flag"]) + return ok_pkt + if packet[4] == 255: + raise get_exception(packet) + raise InterfaceError("Expected OK packet") + + def _handle_eof(self, packet: bytes) -> EofPacketType: + """Handle a MySQL EOF packet + + This method handles a MySQL EOF packet. When the packet is found to + be an Error packet, an error will be raised. If the packet is neither + and OK or an Error packet, InterfaceError will be raised. + + Returns a dict() + """ + if packet[4] == 254: + eof = self._protocol.parse_eof(packet) + self._handle_server_status(eof["status_flag"]) + return eof + if packet[4] == 255: + raise get_exception(packet) + raise InterfaceError("Expected EOF packet") + + def _handle_load_data_infile(self, filename: str) -> OkPacketType: + """Handle a LOAD DATA INFILE LOCAL request""" + file_name = os.path.abspath(filename) + if os.path.islink(file_name): + raise OperationalError("Use of symbolic link is not allowed") + if not self._allow_local_infile and not self._allow_local_infile_in_path: + raise DatabaseError( + "LOAD DATA LOCAL INFILE file request rejected due to " + "restrictions on access." + ) + if not self._allow_local_infile and self._allow_local_infile_in_path: + # validate filename is inside of allow_local_infile_in_path path. + infile_path = os.path.abspath(self._allow_local_infile_in_path) + c_path = None + try: + c_path = os.path.commonpath([infile_path, file_name]) + except ValueError as err: + err_msg = ( + "{} while loading file `{}` and path `{}` given" + " in allow_local_infile_in_path" + ) + raise InterfaceError( + err_msg.format(str(err), file_name, infile_path) + ) from err + + if c_path != infile_path: + err_msg = ( + "The file `{}` is not found in the given " + "allow_local_infile_in_path {}" + ) + raise DatabaseError(err_msg.format(file_name, infile_path)) + + try: + data_file = open(file_name, "rb") # pylint: disable=consider-using-with + return self._handle_ok(self._send_data(data_file, send_empty_packet=True)) + except IOError: + # Send a empty packet to cancel the operation + try: + self._socket.send(b"") + except AttributeError as err: + raise OperationalError("MySQL Connection not available") from err + raise InterfaceError(f"File '{file_name}' could not be read") from None + finally: + try: + data_file.close() + except (IOError, NameError): + pass + + def _handle_result(self, packet: bytes) -> ResultType: + """Handle a MySQL Result + + This method handles a MySQL result, for example, after sending the + query command. OK and EOF packets will be handled and returned. If + the packet is an Error packet, an Error-exception will be + raised. + + The dictionary returned of: + - columns: column information + - eof: the EOF-packet information + + Returns a dict() + """ + if not packet or len(packet) < 4: + raise InterfaceError("Empty response") + if packet[4] == 0: + return self._handle_ok(packet) + if packet[4] == 251: + filename = packet[5:].decode() + return self._handle_load_data_infile(filename) + if packet[4] == 254: + return self._handle_eof(packet) + if packet[4] == 255: + raise get_exception(packet) + + # We have a text result set + column_count = self._protocol.parse_column_count(packet) + if not column_count or not isinstance(column_count, int): + raise InterfaceError("Illegal result set") + + self._columns_desc = [ + None, + ] * column_count + for i in range(0, column_count): + self._columns_desc[i] = self._protocol.parse_column( + self._socket.recv(), self.python_charset + ) + + eof = self._handle_eof(self._socket.recv()) + self.unread_result = True + return {"columns": self._columns_desc, "eof": eof} + + def get_row( + self, + binary: bool = False, + columns: Optional[List[DescriptionType]] = None, + raw: Optional[bool] = None, + ) -> Tuple[Optional[RowType], Optional[EofPacketType]]: + """Get the next rows returned by the MySQL server + + This method gets one row from the result set after sending, for + example, the query command. The result is a tuple consisting of the + row and the EOF packet. + If no row was available in the result set, the row data will be None. + + Returns a tuple. + """ + (rows, eof) = self.get_rows(count=1, binary=binary, columns=columns, raw=raw) + if rows: + return (rows[0], eof) + return (None, eof) + + def get_rows( + self, + count: Optional[int] = None, + binary: bool = False, + columns: Optional[List[DescriptionType]] = None, + raw: Optional[bool] = None, + prep_stmt: Any = None, + ) -> Tuple[List[RowType], Optional[EofPacketType]]: + """Get all rows returned by the MySQL server + + This method gets all rows returned by the MySQL server after sending, + for example, the query command. The result is a tuple consisting of + a list of rows and the EOF packet. + + Returns a tuple() + """ + if raw is None: + raw = self._raw + + if not self.unread_result: + raise InternalError("No result set available") + + rows: Tuple[List[Tuple[Any, ...]], Optional[EofPacketType]] = ([], None) + try: + if binary: + charset = self.charset + if charset == "utf8mb4": + charset = "utf8" + rows = self._protocol.read_binary_result( + self._socket, columns, count, charset + ) + else: + rows = self._protocol.read_text_result( + self._socket, self._server_version, count=count + ) + except Error as err: + self.unread_result = False + raise err + + rows, eof_p = rows + if ( + not (binary or raw) + and self._columns_desc is not None + and rows + and hasattr(self, "converter") + ): + row_to_python = self.converter.row_to_python + rows = [row_to_python(row, self._columns_desc) for row in rows] + + if eof_p is not None: + self._handle_server_status( + eof_p["status_flag"] + if "status_flag" in eof_p + else eof_p["server_status"] + ) + self.unread_result = False + + return rows, eof_p + + def consume_results(self) -> None: + """Consume results""" + if self.unread_result: + self.get_rows() + + def cmd_init_db(self, database: str) -> OkPacketType: + """Change the current database + + This method changes the current (default) database by sending the + INIT_DB command. The result is a dictionary containing the OK packet + information. + + Returns a dict() + """ + return self._handle_ok( + self._send_cmd(ServerCmd.INIT_DB, database.encode("utf-8")) + ) + + def cmd_query( + self, + query: StrOrBytes, + raw: bool = False, + buffered: bool = False, + raw_as_string: bool = False, + ) -> ResultType: + """Send a query to the MySQL server + + This method send the query to the MySQL server and returns the result. + + If there was a text result, a tuple will be returned consisting of + the number of columns and a list containing information about these + columns. + + When the query doesn't return a text result, the OK or EOF packet + information as dictionary will be returned. In case the result was + an error, exception Error will be raised. + + Returns a tuple() + """ + if not isinstance(query, bytearray): + if isinstance(query, str): + query = query.encode("utf-8") + query = bytearray(query) + # Prepare query attrs + charset = self.charset if self.charset != "utf8mb4" else "utf8" + packet = bytearray() + if not self._query_attrs_supported and self._query_attrs: + warnings.warn( + "This version of the server does not support Query Attributes", + category=Warning, + ) + if self._client_flags & ClientFlag.CLIENT_QUERY_ATTRIBUTES: + names = [] + types = [] + values: List[bytes] = [] + null_bitmap = [0] * ((len(self._query_attrs) + 7) // 8) + for pos, attr_tuple in enumerate(self._query_attrs): + value = attr_tuple[1] + flags = 0 + if value is None: + null_bitmap[(pos // 8)] |= 1 << (pos % 8) + types.append(int1store(FieldType.NULL) + int1store(flags)) + continue + if isinstance(value, int): + ( + packed, + field_type, + flags, + ) = self._protocol.prepare_binary_integer(value) + values.append(packed) + elif isinstance(value, str): + value = value.encode(charset) + values.append(lc_int(len(value)) + value) + field_type = FieldType.VARCHAR + elif isinstance(value, bytes): + values.append(lc_int(len(value)) + value) + field_type = FieldType.BLOB + elif isinstance(value, Decimal): + values.append( + lc_int(len(str(value).encode(charset))) + + str(value).encode(charset) + ) + field_type = FieldType.DECIMAL + elif isinstance(value, float): + values.append(struct.pack(" parameter_count Number of parameters + packet.extend(lc_int(len(self._query_attrs))) + # int parameter_set_count Number of parameter sets. + # Currently always 1 + packet.extend(lc_int(1)) + if values: + packet.extend( + b"".join([struct.pack("B", bit) for bit in null_bitmap]) + + int1store(1) + ) + for _type, name in zip(types, names): + packet.extend(_type) + packet.extend(name) + + for value in values: + packet.extend(value) + + packet.extend(query) + query = bytes(packet) + try: + result = self._handle_result(self._send_cmd(ServerCmd.QUERY, query)) + except ProgrammingError as err: + if err.errno == 3948 and "Loading local data is disabled" in err.msg: + err_msg = ( + "LOAD DATA LOCAL INFILE file request rejected due " + "to restrictions on access." + ) + raise DatabaseError(err_msg) from err + raise + if self._have_next_result: + raise InterfaceError( + "Use cmd_query_iter for statements with multiple queries." + ) + + return result + + def cmd_query_iter( + self, statements: StrOrBytes + ) -> Generator[ResultType, None, None]: + """Send one or more statements to the MySQL server + + Similar to the cmd_query method, but instead returns a generator + object to iterate through results. It sends the statements to the + MySQL server and through the iterator you can get the results. + + statement = 'SELECT 1; INSERT INTO t1 VALUES (); SELECT 2' + for result in cnx.cmd_query(statement, iterate=True): + if 'columns' in result: + columns = result['columns'] + rows = cnx.get_rows() + else: + # do something useful with INSERT result + + Returns a generator. + """ + packet = bytearray() + if not isinstance(statements, bytearray): + if isinstance(statements, str): + statements = statements.encode("utf8") + statements = bytearray(statements) + + if self._client_flags & ClientFlag.CLIENT_QUERY_ATTRIBUTES: + # int parameter_count Number of parameters + packet.extend(lc_int(0)) + # int parameter_set_count Number of parameter sets. + # Currently always 1 + packet.extend(lc_int(1)) + + packet.extend(statements) + query = bytes(packet) + # Handle the first query result + yield self._handle_result(self._send_cmd(ServerCmd.QUERY, query)) + + # Handle next results, if any + while self._have_next_result: + self.handle_unread_result() + yield self._handle_result(self._socket.recv()) + + def cmd_refresh(self, options: int) -> OkPacketType: + """Send the Refresh command to the MySQL server + + This method sends the Refresh command to the MySQL server. The options + argument should be a bitwise value using constants.RefreshOption. + Usage example: + RefreshOption = mysql.connector.RefreshOption + refresh = RefreshOption.LOG | RefreshOption.THREADS + cnx.cmd_refresh(refresh) + + The result is a dictionary with the OK packet information. + + Returns a dict() + """ + return self._handle_ok(self._send_cmd(ServerCmd.REFRESH, int4store(options))) + + def cmd_quit(self) -> bytearray: + """Close the current connection with the server + + This method sends the QUIT command to the MySQL server, closing the + current connection. Since the no response can be returned to the + client, cmd_quit() will return the packet it send. + + Returns a str() + """ + self.handle_unread_result() + + packet = self._protocol.make_command(ServerCmd.QUIT) + self._socket.send(packet, 0, 0) + return packet + + def cmd_shutdown(self, shutdown_type: Optional[int] = None) -> EofPacketType: + """Shut down the MySQL Server + + This method sends the SHUTDOWN command to the MySQL server and is only + possible if the current user has SUPER privileges. The result is a + dictionary containing the OK packet information. + + Note: Most applications and scripts do not the SUPER privilege. + + Returns a dict() + """ + if shutdown_type: + if not ShutdownType.get_info(shutdown_type): + raise InterfaceError("Invalid shutdown type") + atype = shutdown_type + else: + atype = ShutdownType.SHUTDOWN_DEFAULT + return self._handle_eof(self._send_cmd(ServerCmd.SHUTDOWN, int4store(atype))) + + def cmd_statistics(self) -> StatsPacketType: + """Send the statistics command to the MySQL Server + + This method sends the STATISTICS command to the MySQL server. The + result is a dictionary with various statistical information. + + Returns a dict() + """ + self.handle_unread_result() + + packet = self._protocol.make_command(ServerCmd.STATISTICS) + self._socket.send(packet, 0, 0) + return self._protocol.parse_statistics(self._socket.recv()) + + def cmd_process_kill(self, mysql_pid: int) -> OkPacketType: + """Kill a MySQL process + + This method send the PROCESS_KILL command to the server along with + the process ID. The result is a dictionary with the OK packet + information. + + Returns a dict() + """ + return self._handle_ok( + self._send_cmd(ServerCmd.PROCESS_KILL, int4store(mysql_pid)) + ) + + def cmd_debug(self) -> EofPacketType: + """Send the DEBUG command + + This method sends the DEBUG command to the MySQL server, which + requires the MySQL user to have SUPER privilege. The output will go + to the MySQL server error log and the result of this method is a + dictionary with EOF packet information. + + Returns a dict() + """ + return self._handle_eof(self._send_cmd(ServerCmd.DEBUG)) + + def cmd_ping(self) -> OkPacketType: + """Send the PING command + + This method sends the PING command to the MySQL server. It is used to + check if the the connection is still valid. The result of this + method is dictionary with OK packet information. + + Returns a dict() + """ + return self._handle_ok(self._send_cmd(ServerCmd.PING)) + + def cmd_change_user( + self, + username: str = "", + password: str = "", + database: str = "", + charset: int = 45, + password1: str = "", + password2: str = "", + password3: str = "", + oci_config_file: str = "", + ) -> Optional[OkPacketType]: + """Change the current logged in user + + This method allows to change the current logged in user information. + The result is a dictionary with OK packet information. + + Returns a dict() + """ + if not isinstance(charset, int): + raise ValueError("charset must be an integer") + if charset < 0: + raise ValueError("charset should be either zero or a postive integer") + + self._mfa_nfactor = 1 + self._user = username + self._password = password + self._password1 = password1 + self._password2 = password2 + self._password3 = password3 + + if self._password1 and password != self._password1: + password = self._password1 + + self.handle_unread_result() + + if self._compress: + raise NotSupportedError("Change user is not supported with compression") + packet = self._protocol.make_change_user( + handshake=self._handshake, + username=username, + password=password, + database=database, + charset=charset, + client_flags=self._client_flags, + ssl_enabled=self._ssl_active, + auth_plugin=self._auth_plugin, + conn_attrs=self._conn_attrs, + ) + self._socket.send(packet, 0, 0) + + if oci_config_file: + self._oci_config_file = oci_config_file + + ok_packet = self._auth_switch_request(username, password) + + if not (self._client_flags & ClientFlag.CONNECT_WITH_DB) and database: + self.cmd_init_db(database) + + self._charset_id = charset + self._post_connection() + + return ok_packet + + @property + def database(self) -> str: + """Get the current database""" + return self.info_query("SELECT DATABASE()")[0] # type: ignore[return-value] + + @database.setter + def database(self, value: str) -> None: + """Set the current database""" + self.cmd_query(f"USE {value}") + + def is_connected(self) -> bool: + """Reports whether the connection to MySQL Server is available + + This method checks whether the connection to MySQL is available. + It is similar to ping(), but unlike the ping()-method, either True + or False is returned and no exception is raised. + + Returns True or False. + """ + try: + self.cmd_ping() + except Error: + return False # This method does not raise + return True + + def set_allow_local_infile_in_path(self, path: str) -> None: + """Set the path that user can upload files. + + Args: + path (str): Path that user can upload files. + """ + self._allow_local_infile_in_path = path + + def reset_session( + self, + user_variables: Optional[Dict[str, Any]] = None, + session_variables: Optional[Dict[str, Any]] = None, + ) -> None: + """Clears the current active session + + This method resets the session state, if the MySQL server is 5.7.3 + or later active session will be reset without re-authenticating. + For other server versions session will be reset by re-authenticating. + + It is possible to provide a sequence of variables and their values to + be set after clearing the session. This is possible for both user + defined variables and session variables. + This method takes two arguments user_variables and session_variables + which are dictionaries. + + Raises OperationalError if not connected, InternalError if there are + unread results and InterfaceError on errors. + """ + if not self.is_connected(): + raise OperationalError("MySQL Connection not available.") + + if not self.cmd_reset_connection(): + try: + self.cmd_change_user( + self._user, + self._password, + self._database, + self._charset_id, + self._password1, + self._password2, + self._password3, + self._oci_config_file, + ) + except ProgrammingError: + self.reconnect() + + cur = self.cursor() + if user_variables: + for key, value in user_variables.items(): + cur.execute(f"SET @`{key}` = %s", (value,)) + if session_variables: + for key, value in session_variables.items(): + cur.execute(f"SET SESSION `{key}` = %s", (value,)) + + def ping(self, reconnect: bool = False, attempts: int = 1, delay: int = 0) -> None: + """Check availability of the MySQL server + + When reconnect is set to True, one or more attempts are made to try + to reconnect to the MySQL server using the reconnect()-method. + + delay is the number of seconds to wait between each retry. + + When the connection is not available, an InterfaceError is raised. Use + the is_connected()-method if you just want to check the connection + without raising an error. + + Raises InterfaceError on errors. + """ + try: + self.cmd_ping() + except Error as err: + if reconnect: + self.reconnect(attempts=attempts, delay=delay) + else: + raise InterfaceError("Connection to MySQL is not available") from err + + @property + def connection_id(self) -> Optional[int]: + """MySQL connection ID""" + if self._handshake: + return self._handshake.get("server_threadid") # type: ignore[return-value] + return None + + def cursor( + self, + buffered: Optional[bool] = None, + raw: Optional[bool] = None, + prepared: Optional[bool] = None, + cursor_class: Optional[Type[MySQLCursor]] = None, + dictionary: Optional[bool] = None, + named_tuple: Optional[bool] = None, + ) -> MySQLCursor: + """Instantiates and returns a cursor + + By default, MySQLCursor is returned. Depending on the options + while connecting, a buffered and/or raw cursor is instantiated + instead. Also depending upon the cursor options, rows can be + returned as dictionary or named tuple. + + Dictionary and namedtuple based cursors are available with buffered + output but not raw. + + It is possible to also give a custom cursor through the + cursor_class parameter, but it needs to be a subclass of + mysql.connector.cursor.CursorBase. + + Raises ProgrammingError when cursor_class is not a subclass of + CursorBase. Raises ValueError when cursor is not available. + + Returns a cursor-object + """ + self.handle_unread_result() + + if not self.is_connected(): + raise OperationalError("MySQL Connection not available") + if cursor_class is not None: + if not issubclass(cursor_class, CursorBase): + raise ProgrammingError( + "Cursor class needs be to subclass of cursor.CursorBase" + ) + return (cursor_class)(self) + + buffered = buffered if buffered is not None else self._buffered + raw = raw if raw is not None else self._raw + + cursor_type = 0 + if buffered is True: + cursor_type |= 1 + if raw is True: + cursor_type |= 2 + if dictionary is True: + cursor_type |= 4 + if named_tuple is True: + cursor_type |= 8 + if prepared is True: + cursor_type |= 16 + + types = { + 0: MySQLCursor, # 0 + 1: MySQLCursorBuffered, + 2: MySQLCursorRaw, + 3: MySQLCursorBufferedRaw, + 4: MySQLCursorDict, + 5: MySQLCursorBufferedDict, + 8: MySQLCursorNamedTuple, + 9: MySQLCursorBufferedNamedTuple, + 16: MySQLCursorPrepared, + 20: MySQLCursorPreparedDict, + } + try: + return (types[cursor_type])(self) + except KeyError: + args = ("buffered", "raw", "dictionary", "named_tuple", "prepared") + raise ValueError( + "Cursor not available with given criteria: " + + ", ".join([args[i] for i in range(5) if cursor_type & (1 << i) != 0]) + ) from None + + def commit(self) -> None: + """Commit current transaction""" + self._execute_query("COMMIT") + + def rollback(self) -> None: + """Rollback current transaction""" + if self.unread_result: + self.get_rows() + + self._execute_query("ROLLBACK") + + def _execute_query(self, query: StrOrBytes) -> None: + """Execute a query + + This method simply calls cmd_query() after checking for unread + result. If there are still unread result, an InterfaceError + is raised. Otherwise whatever cmd_query() returns is returned. + + Returns a dict() + """ + self.handle_unread_result() + self.cmd_query(query) + + def info_query(self, query: StrOrBytes) -> Optional[RowType]: + """Send a query which only returns 1 row""" + cursor = self.cursor(buffered=True) + cursor.execute(query) + return cursor.fetchone() + + def _handle_binary_ok(self, packet: bytes) -> Dict[str, int]: + """Handle a MySQL Binary Protocol OK packet + + This method handles a MySQL Binary Protocol OK packet. When the + packet is found to be an Error packet, an error will be raised. If + the packet is neither an OK or an Error packet, InterfaceError + will be raised. + + Returns a dict() + """ + if packet[4] == 0: + return self._protocol.parse_binary_prepare_ok(packet) + if packet[4] == 255: + raise get_exception(packet) + raise InterfaceError("Expected Binary OK packet") + + def _handle_binary_result( + self, packet: bytes + ) -> Union[OkPacketType, Tuple[int, List[DescriptionType], EofPacketType]]: + """Handle a MySQL Result + + This method handles a MySQL result, for example, after sending the + query command. OK and EOF packets will be handled and returned. If + the packet is an Error packet, an Error exception will be raised. + + The tuple returned by this method consist of: + - the number of columns in the result, + - a list of tuples with information about the columns, + - the EOF packet information as a dictionary. + + Returns tuple() or dict() + """ + if not packet or len(packet) < 4: + raise InterfaceError("Empty response") + if packet[4] == 0: + return self._handle_ok(packet) + if packet[4] == 254: + return self._handle_eof(packet) + if packet[4] == 255: + raise get_exception(packet) + + # We have a binary result set + column_count = self._protocol.parse_column_count(packet) + if not column_count or not isinstance(column_count, int): + raise InterfaceError("Illegal result set.") + + columns: List[DescriptionType] = [None] * column_count + for i in range(0, column_count): + columns[i] = self._protocol.parse_column( + self._socket.recv(), self.python_charset + ) + + eof = self._handle_eof(self._socket.recv()) + return (column_count, columns, eof) + + def cmd_stmt_fetch(self, statement_id: int, rows: int = 1) -> None: + """Fetch a MySQL statement Result Set + + This method will send the FETCH command to MySQL together with the + given statement id and the number of rows to fetch. + """ + packet = self._protocol.make_stmt_fetch(statement_id, rows) + self.unread_result = False + self._send_cmd(ServerCmd.STMT_FETCH, packet, expect_response=False) + self.unread_result = True + + def cmd_stmt_prepare( + self, statement: bytes + ) -> Mapping[str, Union[int, List[DescriptionType]]]: + """Prepare a MySQL statement + + This method will send the PREPARE command to MySQL together with the + given statement. + + Returns a dict() + """ + packet = self._send_cmd(ServerCmd.STMT_PREPARE, statement) + result = self._handle_binary_ok(packet) + + result["columns"] = [] + result["parameters"] = [] + if result["num_params"] > 0: + for _ in range(0, result["num_params"]): + result["parameters"].append( + self._protocol.parse_column( + self._socket.recv(), self.python_charset + ) + ) + self._handle_eof(self._socket.recv()) + if result["num_columns"] > 0: + for _ in range(0, result["num_columns"]): + result["columns"].append( + self._protocol.parse_column( + self._socket.recv(), self.python_charset + ) + ) + self._handle_eof(self._socket.recv()) + + return result + + def cmd_stmt_execute( + self, + statement_id: int, + data: Sequence[SupportedMysqlBinaryProtocolTypes] = (), + parameters: Sequence[Any] = (), + flags: int = 0, + ) -> Union[OkPacketType, Tuple[int, List[DescriptionType], EofPacketType]]: + """Execute a prepared MySQL statement""" + parameters = list(parameters) + long_data_used = {} + + if data: + for param_id, _ in enumerate(parameters): + if isinstance(data[param_id], IOBase): + binary = True + try: + binary = "b" not in data[param_id].mode # type: ignore[union-attr] + except AttributeError: + pass + self.cmd_stmt_send_long_data(statement_id, param_id, data[param_id]) + long_data_used[param_id] = (binary,) + if not self._query_attrs_supported and self._query_attrs: + warnings.warn( + "This version of the server does not support Query Attributes", + category=Warning, + ) + if self._client_flags & ClientFlag.CLIENT_QUERY_ATTRIBUTES: + execute_packet = self._protocol.make_stmt_execute( + statement_id, + data, + tuple(parameters), + flags, + long_data_used, + self.charset, + self._query_attrs, + self._converter_str_fallback, + ) + else: + execute_packet = self._protocol.make_stmt_execute( + statement_id, + data, + tuple(parameters), + flags, + long_data_used, + self.charset, + converter_str_fallback=self._converter_str_fallback, + ) + packet = self._send_cmd(ServerCmd.STMT_EXECUTE, packet=execute_packet) + result = self._handle_binary_result(packet) + return result + + def cmd_stmt_close(self, statement_id: int) -> None: + """Deallocate a prepared MySQL statement + + This method deallocates the prepared statement using the + statement_id. Note that the MySQL server does not return + anything. + """ + self._send_cmd( + ServerCmd.STMT_CLOSE, + int4store(statement_id), + expect_response=False, + ) + + def cmd_stmt_send_long_data( + self, statement_id: int, param_id: int, data: BinaryIO + ) -> int: + """Send data for a column + + This methods send data for a column (for example BLOB) for statement + identified by statement_id. The param_id indicate which parameter + the data belongs too. + The data argument should be a file-like object. + + Since MySQL does not send anything back, no error is raised. When + the MySQL server is not reachable, an OperationalError is raised. + + cmd_stmt_send_long_data should be called before cmd_stmt_execute. + + The total bytes send is returned. + + Returns int. + """ + chunk_size = 8192 + total_sent = 0 + try: + buf = data.read(chunk_size) + while buf: + packet = self._protocol.prepare_stmt_send_long_data( + statement_id, param_id, buf + ) + self._send_cmd( + ServerCmd.STMT_SEND_LONG_DATA, + packet=packet, + expect_response=False, + ) + total_sent += len(buf) + buf = data.read(chunk_size) + except AttributeError as err: + raise OperationalError("MySQL Connection not available") from err + + return total_sent + + def cmd_stmt_reset(self, statement_id: int) -> None: + """Reset data for prepared statement sent as long data + + The result is a dictionary with OK packet information. + + Returns a dict() + """ + self._handle_ok(self._send_cmd(ServerCmd.STMT_RESET, int4store(statement_id))) + + def cmd_reset_connection(self) -> bool: + """Resets the session state without re-authenticating + + Reset command only works on MySQL server 5.7.3 or later. + The result is True for a successful reset otherwise False. + + Returns bool + """ + try: + self._handle_ok(self._send_cmd(ServerCmd.RESET_CONNECTION)) + self._post_connection() + return True + except (NotSupportedError, OperationalError): + return False + + def handle_unread_result(self) -> None: + """Check whether there is an unread result""" + if self.can_consume_results: + self.consume_results() + elif self.unread_result: + raise InternalError("Unread result found") diff --git a/mysql-sqlalchemy-workspace/lib/python3.9/site-packages/mysql/connector/connection_cext.py b/mysql-sqlalchemy-workspace/lib/python3.9/site-packages/mysql/connector/connection_cext.py new file mode 100644 index 00000000..1de87318 --- /dev/null +++ b/mysql-sqlalchemy-workspace/lib/python3.9/site-packages/mysql/connector/connection_cext.py @@ -0,0 +1,969 @@ +# Copyright (c) 2014, 2022, Oracle and/or its affiliates. +# +# This program is free software; you can redistribute it and/or modify +# it under the terms of the GNU General Public License, version 2.0, as +# published by the Free Software Foundation. +# +# This program is also distributed with certain software (including +# but not limited to OpenSSL) that is licensed under separate terms, +# as designated in a particular file or component or in included license +# documentation. The authors of MySQL hereby grant you an +# additional permission to link the program and your derivative works +# with the separately licensed software that they have included with +# MySQL. +# +# Without limiting anything contained in the foregoing, this file, +# which is part of MySQL Connector/Python, is also subject to the +# Universal FOSS Exception, version 1.0, a copy of which can be found at +# http://oss.oracle.com/licenses/universal-foss-exception. +# +# This program is distributed in the hope that it will be useful, but +# WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. +# See the GNU General Public License, version 2.0, for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program; if not, write to the Free Software Foundation, Inc., +# 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA + +# mypy: disable-error-code="arg-type,index" + +"""Connection class using the C Extension.""" + +import os +import platform +import socket + +from typing import Any, Dict, List, Optional, Sequence, Tuple, Type, Union + +from . import version +from .abstracts import MySQLConnectionAbstract +from .constants import CharacterSet, ClientFlag, FieldFlag, ServerFlag, ShutdownType +from .conversion import MySQLConverter +from .errors import ( + InterfaceError, + InternalError, + OperationalError, + ProgrammingError, + get_mysql_exception, +) +from .protocol import MySQLProtocol +from .types import ( + CextEofPacketType, + CextResultType, + DescriptionType, + ParamsSequenceOrDictType, + RowType, + StatsPacketType, + StrOrBytes, +) + +HAVE_CMYSQL = False + +try: + import _mysql_connector + + from _mysql_connector import MySQLInterfaceError, MySQLPrepStmt + + from .cursor_cext import ( + CMySQLCursor, + CMySQLCursorBuffered, + CMySQLCursorBufferedDict, + CMySQLCursorBufferedNamedTuple, + CMySQLCursorBufferedRaw, + CMySQLCursorDict, + CMySQLCursorNamedTuple, + CMySQLCursorPrepared, + CMySQLCursorPreparedDict, + CMySQLCursorRaw, + ) +except ImportError as exc: + raise ImportError( + f"MySQL Connector/Python C Extension not available ({exc})" + ) from exc +else: + HAVE_CMYSQL = True + + +class CMySQLConnection(MySQLConnectionAbstract): + """Class initiating a MySQL Connection using Connector/C.""" + + def __init__(self, **kwargs: Any) -> None: + """Initialization""" + if not HAVE_CMYSQL: + raise RuntimeError("MySQL Connector/Python C Extension not available") + self._cmysql: Optional[ + _mysql_connector.MySQL # pylint: disable=c-extension-no-member + ] = None + self._columns: List[DescriptionType] = [] + self._plugin_dir: str = os.path.join( + os.path.dirname(os.path.abspath(_mysql_connector.__file__)), + "mysql", + "vendor", + "plugin", + ) + if platform.system() == "Linux": + # Use the authentication plugins from system if they aren't bundled + if not os.path.exists(self._plugin_dir): + self._plugin_dir = ( + "/usr/lib64/mysql/plugin" + if os.path.exists("/usr/lib64/mysql/plugin") + else "/usr/lib/mysql/plugin" + ) + + self.converter: Optional[MySQLConverter] = None + super().__init__() + + if kwargs: + self.connect(**kwargs) + + def _add_default_conn_attrs(self) -> None: + """Add default connection attributes""" + license_chunks = version.LICENSE.split(" ") + if license_chunks[0] == "GPLv2": + client_license = "GPL-2.0" + else: + client_license = "Commercial" + + self._conn_attrs.update( + { + "_connector_name": "mysql-connector-python", + "_connector_license": client_license, + "_connector_version": ".".join([str(x) for x in version.VERSION[0:3]]), + "_source_host": socket.gethostname(), + } + ) + + def _do_handshake(self) -> None: + """Gather information of the MySQL server before authentication""" + self._handshake = { + "protocol": self._cmysql.get_proto_info(), + "server_version_original": self._cmysql.get_server_info(), + "server_threadid": self._cmysql.thread_id(), + "charset": None, + "server_status": None, + "auth_plugin": None, + "auth_data": None, + "capabilities": self._cmysql.st_server_capabilities(), + } + + self._server_version = self._check_server_version( + self._handshake["server_version_original"] + ) + CharacterSet.set_mysql_version(self._server_version) + + @property + def _server_status(self) -> int: + """Returns the server status attribute of MYSQL structure""" + return self._cmysql.st_server_status() + + def set_allow_local_infile_in_path(self, path: str) -> None: + """set local_infile_in_path + + Set allow_local_infile_in_path. + """ + + if self._cmysql: + self._cmysql.set_load_data_local_infile_option(path) + + def set_unicode(self, value: bool = True) -> None: + """Toggle unicode mode + + Set whether we return string fields as unicode or not. + Default is True. + """ + self._use_unicode = value + if self._cmysql: + self._cmysql.use_unicode(value) + if self.converter: + self.converter.set_unicode(value) + + @property + def autocommit(self) -> bool: + """Get whether autocommit is on or off""" + value = self.info_query("SELECT @@session.autocommit")[0] + return value == 1 + + @autocommit.setter + def autocommit(self, value: bool) -> None: + """Toggle autocommit""" + try: + self._cmysql.autocommit(value) + self._autocommit = value + except MySQLInterfaceError as err: + raise get_mysql_exception( + msg=err.msg, errno=err.errno, sqlstate=err.sqlstate + ) from err + + @property + def database(self) -> str: + """Get the current database""" + return self.info_query("SELECT DATABASE()")[0] # type: ignore[return-value] + + @database.setter + def database(self, value: str) -> None: + """Set the current database""" + try: + self._cmysql.select_db(value) + except MySQLInterfaceError as err: + raise get_mysql_exception( + msg=err.msg, errno=err.errno, sqlstate=err.sqlstate + ) from err + + @property + def in_transaction(self) -> int: + """MySQL session has started a transaction""" + return self._server_status & ServerFlag.STATUS_IN_TRANS + + def _open_connection(self) -> None: + charset_name = CharacterSet.get_info(self._charset_id)[0] + # pylint: disable=c-extension-no-member + self._cmysql = _mysql_connector.MySQL( + buffered=self._buffered, + raw=self._raw, + charset_name=charset_name, + connection_timeout=(self._connection_timeout or 0), + use_unicode=self._use_unicode, + auth_plugin=self._auth_plugin, + plugin_dir=self._plugin_dir, + ) + # pylint: enable=c-extension-no-member + if not self.isset_client_flag(ClientFlag.CONNECT_ARGS): + self._conn_attrs = {} + cnx_kwargs = { + "host": self._host, + "user": self._user, + "password": self._password, + "password1": self._password1, + "password2": self._password2, + "password3": self._password3, + "database": self._database, + "port": self._port, + "client_flags": self._client_flags, + "unix_socket": self._unix_socket, + "compress": self.isset_client_flag(ClientFlag.COMPRESS), + "ssl_disabled": True, + "conn_attrs": self._conn_attrs, + "local_infile": self._allow_local_infile, + "load_data_local_dir": self._allow_local_infile_in_path, + "oci_config_file": self._oci_config_file, + "fido_callback": self._fido_callback, + } + + tls_versions = self._ssl.get("tls_versions") + if tls_versions is not None: + tls_versions.sort(reverse=True) # type: ignore[union-attr] + tls_versions = ",".join(tls_versions) + if self._ssl.get("tls_ciphersuites") is not None: + ssl_ciphersuites = self._ssl.get("tls_ciphersuites")[0] + tls_ciphersuites = self._ssl.get("tls_ciphersuites")[1] + else: + ssl_ciphersuites = None + tls_ciphersuites = None + if ( + tls_versions is not None + and "TLSv1.3" in tls_versions + and not tls_ciphersuites + ): + tls_ciphersuites = "TLS_AES_256_GCM_SHA384" + if not self._ssl_disabled: + cnx_kwargs.update( + { + "ssl_ca": self._ssl.get("ca"), + "ssl_cert": self._ssl.get("cert"), + "ssl_key": self._ssl.get("key"), + "ssl_cipher_suites": ssl_ciphersuites, + "tls_versions": tls_versions, + "tls_cipher_suites": tls_ciphersuites, + "ssl_verify_cert": self._ssl.get("verify_cert") or False, + "ssl_verify_identity": self._ssl.get("verify_identity") or False, + "ssl_disabled": self._ssl_disabled, + } + ) + + if os.name == "nt" and self._auth_plugin_class == "MySQLKerberosAuthPlugin": + cnx_kwargs["use_kerberos_gssapi"] = True + + try: + self._cmysql.connect(**cnx_kwargs) + self._cmysql.converter_str_fallback = self._converter_str_fallback + if self.converter: + self.converter.str_fallback = self._converter_str_fallback + except MySQLInterfaceError as err: + raise get_mysql_exception( + msg=err.msg, errno=err.errno, sqlstate=err.sqlstate + ) from err + + self._do_handshake() + + def close(self) -> None: + """Disconnect from the MySQL server""" + if self._cmysql: + try: + self.free_result() + self._cmysql.close() + except MySQLInterfaceError as err: + raise get_mysql_exception( + msg=err.msg, errno=err.errno, sqlstate=err.sqlstate + ) from err + + disconnect = close + + def is_closed(self) -> bool: + """Return True if the connection to MySQL Server is closed.""" + return not self._cmysql.connected() + + def is_connected(self) -> bool: + """Reports whether the connection to MySQL Server is available""" + if self._cmysql: + self.handle_unread_result() + return self._cmysql.ping() + + return False + + def ping(self, reconnect: bool = False, attempts: int = 1, delay: int = 0) -> None: + """Check availability of the MySQL server + + When reconnect is set to True, one or more attempts are made to try + to reconnect to the MySQL server using the reconnect()-method. + + delay is the number of seconds to wait between each retry. + + When the connection is not available, an InterfaceError is raised. Use + the is_connected()-method if you just want to check the connection + without raising an error. + + Raises InterfaceError on errors. + """ + self.handle_unread_result() + + try: + connected = self._cmysql.ping() + except AttributeError: + pass # Raise or reconnect later + else: + if connected: + return + + if reconnect: + self.reconnect(attempts=attempts, delay=delay) + else: + raise InterfaceError("Connection to MySQL is not available") + + def set_character_set_name(self, charset: str) -> None: + """Sets the default character set name for current connection.""" + self._cmysql.set_character_set(charset) + + def info_query(self, query: StrOrBytes) -> Optional[RowType]: + """Send a query which only returns 1 row""" + first_row = () + try: + self._cmysql.query(query) + if self._cmysql.have_result_set: + first_row = self._cmysql.fetch_row() + if self._cmysql.fetch_row(): + self._cmysql.free_result() + raise InterfaceError("Query should not return more than 1 row") + self._cmysql.free_result() + except MySQLInterfaceError as err: + raise get_mysql_exception( + msg=err.msg, errno=err.errno, sqlstate=err.sqlstate + ) from err + + return first_row + + @property + def connection_id(self) -> Optional[int]: + """MySQL connection ID""" + try: + return self._cmysql.thread_id() + except MySQLInterfaceError: + pass # Just return None + + return None + + def get_rows( + self, + count: Optional[int] = None, + binary: bool = False, + columns: Optional[List[DescriptionType]] = None, + raw: Optional[bool] = None, + prep_stmt: Optional[MySQLPrepStmt] = None, + ) -> Tuple[List[RowType], Optional[CextEofPacketType]]: + """Get all or a subset of rows returned by the MySQL server""" + unread_result = prep_stmt.have_result_set if prep_stmt else self.unread_result + if not (self._cmysql and unread_result): + raise InternalError("No result set available") + + if raw is None: + raw = self._raw + + rows: List[Tuple[Any, ...]] = [] + if count is not None and count <= 0: + raise AttributeError("count should be 1 or higher, or None") + + counter = 0 + try: + fetch_row = prep_stmt.fetch_row if prep_stmt else self._cmysql.fetch_row + if self.converter: + # When using a converter class, the C extension should not + # convert the values. This can be accomplished by setting + # the raw option to True. + self._cmysql.raw(True) + row = fetch_row() + while row: + if not self._raw and self.converter: + row = list(row) + for i, _ in enumerate(row): + if not raw: + row[i] = self.converter.to_python(self._columns[i], row[i]) + row = tuple(row) + rows.append(row) + counter += 1 + if count and counter == count: + break + row = fetch_row() + if not row: + _eof: Optional[CextEofPacketType] = self.fetch_eof_columns(prep_stmt)[ + "eof" + ] # type: ignore[assignment] + if prep_stmt: + prep_stmt.free_result() + self._unread_result = False + else: + self.free_result() + else: + _eof = None + except MySQLInterfaceError as err: + if prep_stmt: + prep_stmt.free_result() + raise InterfaceError(str(err)) from err + self.free_result() + raise get_mysql_exception( + msg=err.msg, errno=err.errno, sqlstate=err.sqlstate + ) from err + + return rows, _eof + + def get_row( + self, + binary: bool = False, + columns: Optional[List[DescriptionType]] = None, + raw: Optional[bool] = None, + prep_stmt: Optional[MySQLPrepStmt] = None, + ) -> Tuple[Optional[RowType], CextEofPacketType]: + """Get the next rows returned by the MySQL server""" + try: + rows, eof = self.get_rows( + count=1, + binary=binary, + columns=columns, + raw=raw, + prep_stmt=prep_stmt, + ) + if rows: + return (rows[0], eof) + return (None, eof) + except IndexError: + # No row available + return (None, None) + + def next_result(self) -> Optional[bool]: + """Reads the next result""" + if self._cmysql: + self._cmysql.consume_result() + return self._cmysql.next_result() + return None + + def free_result(self) -> None: + """Frees the result""" + if self._cmysql: + self._cmysql.free_result() + + def commit(self) -> None: + """Commit current transaction""" + if self._cmysql: + self.handle_unread_result() + self._cmysql.commit() + + def rollback(self) -> None: + """Rollback current transaction""" + if self._cmysql: + self._cmysql.consume_result() + self._cmysql.rollback() + + def cmd_init_db(self, database: str) -> None: + """Change the current database""" + try: + self._cmysql.select_db(database) + except MySQLInterfaceError as err: + raise get_mysql_exception( + msg=err.msg, errno=err.errno, sqlstate=err.sqlstate + ) from err + + def fetch_eof_columns( + self, prep_stmt: Optional[MySQLPrepStmt] = None + ) -> CextResultType: + """Fetch EOF and column information""" + have_result_set = ( + prep_stmt.have_result_set if prep_stmt else self._cmysql.have_result_set + ) + if not have_result_set: + raise InterfaceError("No result set") + + fields = prep_stmt.fetch_fields() if prep_stmt else self._cmysql.fetch_fields() + self._columns = [] + for col in fields: + self._columns.append( + ( + col[4], + int(col[8]), + None, + None, + None, + None, + ~int(col[9]) & FieldFlag.NOT_NULL, + int(col[9]), + int(col[6]), + ) + ) + + return { + "eof": { + "status_flag": self._server_status, + "warning_count": self._cmysql.st_warning_count(), + }, + "columns": self._columns, + } + + def fetch_eof_status(self) -> Optional[CextEofPacketType]: + """Fetch EOF and status information""" + if self._cmysql: + return { + "warning_count": self._cmysql.st_warning_count(), + "field_count": self._cmysql.st_field_count(), + "insert_id": self._cmysql.insert_id(), + "affected_rows": self._cmysql.affected_rows(), + "server_status": self._server_status, + } + + return None + + def cmd_stmt_prepare(self, statement: bytes) -> MySQLPrepStmt: + """Prepares the SQL statement""" + if not self._cmysql: + raise OperationalError("MySQL Connection not available") + + try: + stmt = self._cmysql.stmt_prepare(statement) + stmt.converter_str_fallback = self._converter_str_fallback + return stmt + except MySQLInterfaceError as err: + raise InterfaceError(str(err)) from err + + def cmd_stmt_execute( + self, statement_id: MySQLPrepStmt, *args: Any + ) -> Optional[Union[CextEofPacketType, CextResultType]]: + """Executes the prepared statement""" + try: + statement_id.stmt_execute(*args) + except MySQLInterfaceError as err: + raise InterfaceError(str(err)) from err + + self._columns = [] + if not statement_id.have_result_set: + # No result + self._unread_result = False + return self.fetch_eof_status() + + self._unread_result = True + return self.fetch_eof_columns(statement_id) + + def cmd_stmt_close(self, statement_id: MySQLPrepStmt) -> None: + """Closes the prepared statement""" + if self._unread_result: + raise InternalError("Unread result found") + statement_id.stmt_close() + + def cmd_stmt_reset(self, statement_id: MySQLPrepStmt) -> None: + """Resets the prepared statement""" + if self._unread_result: + raise InternalError("Unread result found") + statement_id.stmt_reset() + + def cmd_query( + self, + query: StrOrBytes, + raw: Optional[bool] = None, + buffered: bool = False, + raw_as_string: bool = False, + ) -> Optional[Union[CextEofPacketType, CextResultType]]: + """Send a query to the MySQL server""" + self.handle_unread_result() + if raw is None: + raw = self._raw + try: + if not isinstance(query, bytes): + query = query.encode("utf-8") + self._cmysql.query( + query, + raw=raw, + buffered=buffered, + raw_as_string=raw_as_string, + query_attrs=self._query_attrs, + ) + except MySQLInterfaceError as err: + raise get_mysql_exception( + err.errno, msg=err.msg, sqlstate=err.sqlstate + ) from err + except AttributeError as err: + addr = ( + self._unix_socket if self._unix_socket else f"{self._host}:{self._port}" + ) + raise OperationalError( + errno=2055, values=(addr, "Connection not available.") + ) from err + + self._columns = [] + if not self._cmysql.have_result_set: + # No result + return self.fetch_eof_status() + + return self.fetch_eof_columns() + + _execute_query = cmd_query + + def cursor( + self, + buffered: Optional[bool] = None, + raw: Optional[bool] = None, + prepared: Optional[bool] = None, + cursor_class: Optional[Type[CMySQLCursor]] = None, + dictionary: Optional[bool] = None, + named_tuple: Optional[bool] = None, + ) -> CMySQLCursor: + """Instantiates and returns a cursor using C Extension + + By default, CMySQLCursor is returned. Depending on the options + while connecting, a buffered and/or raw cursor is instantiated + instead. Also depending upon the cursor options, rows can be + returned as dictionary or named tuple. + + Dictionary and namedtuple based cursors are available with buffered + output but not raw. + + It is possible to also give a custom cursor through the + cursor_class parameter, but it needs to be a subclass of + mysql.connector.cursor_cext.CMySQLCursor. + + Raises ProgrammingError when cursor_class is not a subclass of + CursorBase. Raises ValueError when cursor is not available. + + Returns instance of CMySQLCursor or subclass. + + :param buffered: Return a buffering cursor + :param raw: Return a raw cursor + :param prepared: Return a cursor which uses prepared statements + :param cursor_class: Use a custom cursor class + :param dictionary: Rows are returned as dictionary + :param named_tuple: Rows are returned as named tuple + :return: Subclass of CMySQLCursor + :rtype: CMySQLCursor or subclass + """ + self.handle_unread_result(prepared) + if not self.is_connected(): + raise OperationalError("MySQL Connection not available.") + if cursor_class is not None: + if not issubclass(cursor_class, CMySQLCursor): + raise ProgrammingError( + "Cursor class needs be to subclass of cursor_cext.CMySQLCursor" + ) + return (cursor_class)(self) + + buffered = buffered or self._buffered + raw = raw or self._raw + + cursor_type = 0 + if buffered is True: + cursor_type |= 1 + if raw is True: + cursor_type |= 2 + if dictionary is True: + cursor_type |= 4 + if named_tuple is True: + cursor_type |= 8 + if prepared is True: + cursor_type |= 16 + + types = { + 0: CMySQLCursor, # 0 + 1: CMySQLCursorBuffered, + 2: CMySQLCursorRaw, + 3: CMySQLCursorBufferedRaw, + 4: CMySQLCursorDict, + 5: CMySQLCursorBufferedDict, + 8: CMySQLCursorNamedTuple, + 9: CMySQLCursorBufferedNamedTuple, + 16: CMySQLCursorPrepared, + 20: CMySQLCursorPreparedDict, + } + try: + return (types[cursor_type])(self) + except KeyError: + args = ("buffered", "raw", "dictionary", "named_tuple", "prepared") + raise ValueError( + "Cursor not available with given criteria: " + + ", ".join([args[i] for i in range(5) if cursor_type & (1 << i) != 0]) + ) from None + + @property + def num_rows(self) -> int: + """Returns number of rows of current result set""" + if not self._cmysql.have_result_set: + raise InterfaceError("No result set") + + return self._cmysql.num_rows() + + @property + def warning_count(self) -> int: + """Returns number of warnings""" + if not self._cmysql: + return 0 + + return self._cmysql.warning_count() + + @property + def result_set_available(self) -> bool: + """Check if a result set is available""" + if not self._cmysql: + return False + + return self._cmysql.have_result_set + + @property # type: ignore[misc] + def unread_result(self) -> bool: + """Check if there are unread results or rows""" + return self.result_set_available + + @property + def more_results(self) -> bool: + """Check if there are more results""" + return self._cmysql.more_results() + + def prepare_for_mysql( + self, params: ParamsSequenceOrDictType + ) -> Union[Sequence[bytes], Dict[str, bytes],]: + """Prepare parameters for statements + + This method is use by cursors to prepared parameters found in the + list (or tuple) params. + + Returns dict. + """ + result: Union[List[Any], Dict[str, Any]] = [] + if isinstance(params, (list, tuple)): + if self.converter: + result = [ + self.converter.quote( + self.converter.escape(self.converter.to_mysql(value)) + ) + for value in params + ] + else: + result = self._cmysql.convert_to_mysql(*params) + elif isinstance(params, dict): + result = {} + if self.converter: + for key, value in params.items(): + result[key] = self.converter.quote( + self.converter.escape(self.converter.to_mysql(value)) + ) + else: + for key, value in params.items(): + result[key] = self._cmysql.convert_to_mysql(value)[0] + else: + raise ProgrammingError( + f"Could not process parameters: {type(params).__name__}({params})," + " it must be of type list, tuple or dict" + ) + + return result + + def consume_results(self) -> None: + """Consume the current result + + This method consume the result by reading (consuming) all rows. + """ + self._cmysql.consume_result() + + def cmd_change_user( + self, + username: str = "", + password: str = "", + database: str = "", + charset: int = 45, + password1: str = "", + password2: str = "", + password3: str = "", + oci_config_file: Optional[str] = None, + ) -> None: + """Change the current logged in user""" + try: + self._cmysql.change_user( + username, + password, + database, + password1, + password2, + password3, + oci_config_file, + ) + + except MySQLInterfaceError as err: + raise get_mysql_exception( + msg=err.msg, errno=err.errno, sqlstate=err.sqlstate + ) from err + + self._charset_id = charset + self._post_connection() + + def cmd_reset_connection(self) -> bool: + """Resets the session state without re-authenticating + + Reset command only works on MySQL server 5.7.3 or later. + The result is True for a successful reset otherwise False. + + Returns bool + """ + res = self._cmysql.reset_connection() + if res: + self._post_connection() + return res + + def cmd_refresh(self, options: int) -> Optional[CextEofPacketType]: + """Send the Refresh command to the MySQL server""" + try: + self.handle_unread_result() + self._cmysql.refresh(options) + except MySQLInterfaceError as err: + raise get_mysql_exception( + msg=err.msg, errno=err.errno, sqlstate=err.sqlstate + ) from err + + return self.fetch_eof_status() + + def cmd_quit(self) -> None: + """Close the current connection with the server""" + self.close() + + def cmd_shutdown(self, shutdown_type: Optional[int] = None) -> None: + """Shut down the MySQL Server""" + if not self._cmysql: + raise OperationalError("MySQL Connection not available") + + if shutdown_type: + if not ShutdownType.get_info(shutdown_type): + raise InterfaceError("Invalid shutdown type") + level = shutdown_type + else: + level = ShutdownType.SHUTDOWN_DEFAULT + + try: + self._cmysql.shutdown(level) + except MySQLInterfaceError as err: + raise get_mysql_exception( + msg=err.msg, errno=err.errno, sqlstate=err.sqlstate + ) from err + self.close() + + def cmd_statistics(self) -> StatsPacketType: + """Return statistics from the MySQL server""" + self.handle_unread_result() + + try: + stat = self._cmysql.stat() + return MySQLProtocol().parse_statistics(stat, with_header=False) + except (MySQLInterfaceError, InterfaceError) as err: + raise get_mysql_exception( + msg=err.msg, errno=err.errno, sqlstate=err.sqlstate + ) from err + + def cmd_process_kill(self, mysql_pid: int) -> None: + """Kill a MySQL process""" + if not isinstance(mysql_pid, int): + raise ValueError("MySQL PID must be int") + self.info_query(f"KILL {mysql_pid}") + + def cmd_debug(self) -> Any: + """Send the DEBUG command""" + raise NotImplementedError + + def cmd_ping(self) -> Any: + """Send the PING command""" + raise NotImplementedError + + def cmd_query_iter(self, statements: Any) -> Any: + """Send one or more statements to the MySQL server""" + raise NotImplementedError + + def cmd_stmt_send_long_data( + self, statement_id: Any, param_id: Any, data: Any + ) -> Any: + """Send data for a column""" + raise NotImplementedError + + def handle_unread_result(self, prepared: bool = False) -> None: + """Check whether there is an unread result""" + unread_result = self._unread_result if prepared is True else self.unread_result + if self.can_consume_results: + self.consume_results() + elif unread_result: + raise InternalError("Unread result found") + + def reset_session( + self, + user_variables: Optional[Dict[str, Any]] = None, + session_variables: Optional[Dict[str, Any]] = None, + ) -> None: + """Clears the current active session + + This method resets the session state, if the MySQL server is 5.7.3 + or later active session will be reset without re-authenticating. + For other server versions session will be reset by re-authenticating. + + It is possible to provide a sequence of variables and their values to + be set after clearing the session. This is possible for both user + defined variables and session variables. + This method takes two arguments user_variables and session_variables + which are dictionaries. + + Raises OperationalError if not connected, InternalError if there are + unread results and InterfaceError on errors. + """ + if not self.is_connected(): + raise OperationalError("MySQL Connection not available.") + + if not self.cmd_reset_connection(): + try: + self.cmd_change_user( + self._user, + self._password, + self._database, + self._charset_id, + self._password1, + self._password2, + self._password3, + self._oci_config_file, + ) + except ProgrammingError: + self.reconnect() + + if user_variables or session_variables: + cur = self.cursor() + if user_variables: + for key, value in user_variables.items(): + cur.execute(f"SET @`{key}` = %s", (value,)) + if session_variables: + for key, value in session_variables.items(): + cur.execute(f"SET SESSION `{key}` = %s", (value,)) + cur.close() diff --git a/mysql-sqlalchemy-workspace/lib/python3.9/site-packages/mysql/connector/constants.py b/mysql-sqlalchemy-workspace/lib/python3.9/site-packages/mysql/connector/constants.py new file mode 100644 index 00000000..c9fcda7d --- /dev/null +++ b/mysql-sqlalchemy-workspace/lib/python3.9/site-packages/mysql/connector/constants.py @@ -0,0 +1,1111 @@ +# Copyright (c) 2009, 2022, Oracle and/or its affiliates. All rights reserved. +# +# This program is free software; you can redistribute it and/or modify +# it under the terms of the GNU General Public License, version 2.0, as +# published by the Free Software Foundation. +# +# This program is also distributed with certain software (including +# but not limited to OpenSSL) that is licensed under separate terms, +# as designated in a particular file or component or in included license +# documentation. The authors of MySQL hereby grant you an +# additional permission to link the program and your derivative works +# with the separately licensed software that they have included with +# MySQL. +# +# Without limiting anything contained in the foregoing, this file, +# which is part of MySQL Connector/Python, is also subject to the +# Universal FOSS Exception, version 1.0, a copy of which can be found at +# http://oss.oracle.com/licenses/universal-foss-exception. +# +# This program is distributed in the hope that it will be useful, but +# WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. +# See the GNU General Public License, version 2.0, for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program; if not, write to the Free Software Foundation, Inc., +# 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA + +"""Various MySQL constants and character sets.""" + +import warnings + +from abc import ABC, ABCMeta +from typing import Dict, List, Optional, Sequence, Tuple, Union, ValuesView + +from .charsets import MYSQL_CHARACTER_SETS, MYSQL_CHARACTER_SETS_57 +from .errors import ProgrammingError + +MAX_PACKET_LENGTH: int = 16777215 +NET_BUFFER_LENGTH: int = 8192 +MAX_MYSQL_TABLE_COLUMNS: int = 4096 +# Flag used to send the Query Attributes with 0 (or more) parameters. +PARAMETER_COUNT_AVAILABLE: int = 8 + +DEFAULT_CONFIGURATION: Dict[str, Optional[Union[str, bool, int]]] = { + "database": None, + "user": "", + "password": "", + "password1": "", + "password2": "", + "password3": "", + "host": "127.0.0.1", + "port": 3306, + "unix_socket": None, + "use_unicode": True, + "charset": "utf8mb4", + "collation": None, + "converter_class": None, + "converter_str_fallback": False, + "autocommit": False, + "time_zone": None, + "sql_mode": None, + "get_warnings": False, + "raise_on_warnings": False, + "connection_timeout": None, + "client_flags": 0, + "compress": False, + "buffered": False, + "raw": False, + "ssl_ca": None, + "ssl_cert": None, + "ssl_key": None, + "ssl_verify_cert": False, + "ssl_verify_identity": False, + "ssl_cipher": None, + "tls_ciphersuites": None, + "ssl_disabled": False, + "tls_versions": None, + "passwd": None, + "db": None, + "connect_timeout": None, + "dsn": None, + "force_ipv6": False, + "auth_plugin": None, + "allow_local_infile": False, + "allow_local_infile_in_path": None, + "consume_results": False, + "conn_attrs": None, + "dns_srv": False, + "use_pure": False, + "krb_service_principal": None, + "oci_config_file": None, + "fido_callback": None, + "kerberos_auth_mode": None, + "init_command": None, +} + +CNX_POOL_ARGS: Tuple[str, str, str] = ("pool_name", "pool_size", "pool_reset_session") + +TLS_VERSIONS: List[str] = ["TLSv1.2", "TLSv1.3"] + +DEPRECATED_TLS_VERSIONS: List[str] = ["TLSv1", "TLSv1.1"] + + +def flag_is_set(flag: int, flags: int) -> bool: + """Checks if the flag is set + + Returns boolean""" + if (flags & flag) > 0: + return True + return False + + +def _obsolete_option(name: str, new_name: str, value: int) -> int: + warnings.warn( + f'The option "{name}" has been deprecated, use "{new_name}" instead.', + category=DeprecationWarning, + ) + return value + + +class _Constants(ABC): + """Base class for constants.""" + + prefix: str = "" + desc: Dict[str, Tuple[int, str]] = {} + + @classmethod + def get_desc(cls, name: str) -> Optional[str]: + """Get description of given constant""" + try: + return cls.desc[name][1] + except (IndexError, KeyError): + return None + + @classmethod + def get_info(cls, setid: int) -> Union[Optional[str], Tuple[str, str]]: + """Get information about given constant""" + for name, info in cls.desc.items(): + if info[0] == setid: + return name + return None + + @classmethod + def get_full_info(cls) -> Union[str, Sequence[str]]: + """get full information about given constant""" + res: Union[str, List[str]] = [] + try: + res = [f"{k} : {v[1]}" for k, v in cls.desc.items()] + except (AttributeError, IndexError) as err: + res = f"No information found in constant class. {err}" + + return res + + +class _Flags(_Constants): + """Base class for classes describing flags""" + + @classmethod + def get_bit_info(cls, value: int) -> List[str]: + """Get the name of all bits set + + Returns a list of strings.""" + res = [] + for name, info in cls.desc.items(): + if value & info[0]: + res.append(name) + return res + + +class FieldType(_Constants): + """MySQL Field Types""" + + prefix: str = "FIELD_TYPE_" + DECIMAL: int = 0x00 + TINY: int = 0x01 + SHORT: int = 0x02 + LONG: int = 0x03 + FLOAT: int = 0x04 + DOUBLE: int = 0x05 + NULL: int = 0x06 + TIMESTAMP: int = 0x07 + LONGLONG: int = 0x08 + INT24: int = 0x09 + DATE: int = 0x0A + TIME: int = 0x0B + DATETIME: int = 0x0C + YEAR: int = 0x0D + NEWDATE: int = 0x0E + VARCHAR: int = 0x0F + BIT: int = 0x10 + JSON: int = 0xF5 + NEWDECIMAL: int = 0xF6 + ENUM: int = 0xF7 + SET: int = 0xF8 + TINY_BLOB: int = 0xF9 + MEDIUM_BLOB: int = 0xFA + LONG_BLOB: int = 0xFB + BLOB: int = 0xFC + VAR_STRING: int = 0xFD + STRING: int = 0xFE + GEOMETRY: int = 0xFF + + desc: Dict[str, Tuple[int, str]] = { + "DECIMAL": (0x00, "DECIMAL"), + "TINY": (0x01, "TINY"), + "SHORT": (0x02, "SHORT"), + "LONG": (0x03, "LONG"), + "FLOAT": (0x04, "FLOAT"), + "DOUBLE": (0x05, "DOUBLE"), + "NULL": (0x06, "NULL"), + "TIMESTAMP": (0x07, "TIMESTAMP"), + "LONGLONG": (0x08, "LONGLONG"), + "INT24": (0x09, "INT24"), + "DATE": (0x0A, "DATE"), + "TIME": (0x0B, "TIME"), + "DATETIME": (0x0C, "DATETIME"), + "YEAR": (0x0D, "YEAR"), + "NEWDATE": (0x0E, "NEWDATE"), + "VARCHAR": (0x0F, "VARCHAR"), + "BIT": (0x10, "BIT"), + "JSON": (0xF5, "JSON"), + "NEWDECIMAL": (0xF6, "NEWDECIMAL"), + "ENUM": (0xF7, "ENUM"), + "SET": (0xF8, "SET"), + "TINY_BLOB": (0xF9, "TINY_BLOB"), + "MEDIUM_BLOB": (0xFA, "MEDIUM_BLOB"), + "LONG_BLOB": (0xFB, "LONG_BLOB"), + "BLOB": (0xFC, "BLOB"), + "VAR_STRING": (0xFD, "VAR_STRING"), + "STRING": (0xFE, "STRING"), + "GEOMETRY": (0xFF, "GEOMETRY"), + } + + @classmethod + def get_string_types(cls) -> List[int]: + """Get the list of all string types""" + return [ + cls.VARCHAR, + cls.ENUM, + cls.VAR_STRING, + cls.STRING, + ] + + @classmethod + def get_binary_types(cls) -> List[int]: + """Get the list of all binary types""" + return [ + cls.TINY_BLOB, + cls.MEDIUM_BLOB, + cls.LONG_BLOB, + cls.BLOB, + ] + + @classmethod + def get_number_types(cls) -> List[int]: + """Get the list of all number types""" + return [ + cls.DECIMAL, + cls.NEWDECIMAL, + cls.TINY, + cls.SHORT, + cls.LONG, + cls.FLOAT, + cls.DOUBLE, + cls.LONGLONG, + cls.INT24, + cls.BIT, + cls.YEAR, + ] + + @classmethod + def get_timestamp_types(cls) -> List[int]: + """Get the list of all timestamp types""" + return [ + cls.DATETIME, + cls.TIMESTAMP, + ] + + +class FieldFlag(_Flags): + """MySQL Field Flags + + Field flags as found in MySQL sources mysql-src/include/mysql_com.h + """ + + _prefix: str = "" + NOT_NULL: int = 1 << 0 + PRI_KEY: int = 1 << 1 + UNIQUE_KEY: int = 1 << 2 + MULTIPLE_KEY: int = 1 << 3 + BLOB: int = 1 << 4 + UNSIGNED: int = 1 << 5 + ZEROFILL: int = 1 << 6 + BINARY: int = 1 << 7 + + ENUM: int = 1 << 8 + AUTO_INCREMENT: int = 1 << 9 + TIMESTAMP: int = 1 << 10 + SET: int = 1 << 11 + + NO_DEFAULT_VALUE: int = 1 << 12 + ON_UPDATE_NOW: int = 1 << 13 + NUM: int = 1 << 14 + PART_KEY: int = 1 << 15 + GROUP: int = 1 << 14 # SAME AS NUM !!!!!!!???? + UNIQUE: int = 1 << 16 + BINCMP: int = 1 << 17 + + GET_FIXED_FIELDS: int = 1 << 18 + FIELD_IN_PART_FUNC: int = 1 << 19 + FIELD_IN_ADD_INDEX: int = 1 << 20 + FIELD_IS_RENAMED: int = 1 << 21 + + desc: Dict[str, Tuple[int, str]] = { + "NOT_NULL": (1 << 0, "Field can't be NULL"), + "PRI_KEY": (1 << 1, "Field is part of a primary key"), + "UNIQUE_KEY": (1 << 2, "Field is part of a unique key"), + "MULTIPLE_KEY": (1 << 3, "Field is part of a key"), + "BLOB": (1 << 4, "Field is a blob"), + "UNSIGNED": (1 << 5, "Field is unsigned"), + "ZEROFILL": (1 << 6, "Field is zerofill"), + "BINARY": (1 << 7, "Field is binary "), + "ENUM": (1 << 8, "field is an enum"), + "AUTO_INCREMENT": (1 << 9, "field is a autoincrement field"), + "TIMESTAMP": (1 << 10, "Field is a timestamp"), + "SET": (1 << 11, "field is a set"), + "NO_DEFAULT_VALUE": (1 << 12, "Field doesn't have default value"), + "ON_UPDATE_NOW": (1 << 13, "Field is set to NOW on UPDATE"), + "NUM": (1 << 14, "Field is num (for clients)"), + "PART_KEY": (1 << 15, "Intern; Part of some key"), + "GROUP": (1 << 14, "Intern: Group field"), # Same as NUM + "UNIQUE": (1 << 16, "Intern: Used by sql_yacc"), + "BINCMP": (1 << 17, "Intern: Used by sql_yacc"), + "GET_FIXED_FIELDS": (1 << 18, "Used to get fields in item tree"), + "FIELD_IN_PART_FUNC": (1 << 19, "Field part of partition func"), + "FIELD_IN_ADD_INDEX": (1 << 20, "Intern: Field used in ADD INDEX"), + "FIELD_IS_RENAMED": (1 << 21, "Intern: Field is being renamed"), + } + + +class ServerCmd(_Constants): + """MySQL Server Commands""" + + _prefix: str = "COM_" + SLEEP: int = 0 + QUIT: int = 1 + INIT_DB: int = 2 + QUERY: int = 3 + FIELD_LIST: int = 4 + CREATE_DB: int = 5 + DROP_DB: int = 6 + REFRESH: int = 7 + SHUTDOWN: int = 8 + STATISTICS: int = 9 + PROCESS_INFO: int = 10 + CONNECT: int = 11 + PROCESS_KILL: int = 12 + DEBUG: int = 13 + PING: int = 14 + TIME: int = 15 + DELAYED_INSERT: int = 16 + CHANGE_USER: int = 17 + BINLOG_DUMP: int = 18 + TABLE_DUMP: int = 19 + CONNECT_OUT: int = 20 + REGISTER_REPLICA: int = 21 + STMT_PREPARE: int = 22 + STMT_EXECUTE: int = 23 + STMT_SEND_LONG_DATA: int = 24 + STMT_CLOSE: int = 25 + STMT_RESET: int = 26 + SET_OPTION: int = 27 + STMT_FETCH: int = 28 + DAEMON: int = 29 + BINLOG_DUMP_GTID: int = 30 + RESET_CONNECTION: int = 31 + + desc: Dict[str, Tuple[int, str]] = { + "SLEEP": (0, "SLEEP"), + "QUIT": (1, "QUIT"), + "INIT_DB": (2, "INIT_DB"), + "QUERY": (3, "QUERY"), + "FIELD_LIST": (4, "FIELD_LIST"), + "CREATE_DB": (5, "CREATE_DB"), + "DROP_DB": (6, "DROP_DB"), + "REFRESH": (7, "REFRESH"), + "SHUTDOWN": (8, "SHUTDOWN"), + "STATISTICS": (9, "STATISTICS"), + "PROCESS_INFO": (10, "PROCESS_INFO"), + "CONNECT": (11, "CONNECT"), + "PROCESS_KILL": (12, "PROCESS_KILL"), + "DEBUG": (13, "DEBUG"), + "PING": (14, "PING"), + "TIME": (15, "TIME"), + "DELAYED_INSERT": (16, "DELAYED_INSERT"), + "CHANGE_USER": (17, "CHANGE_USER"), + "BINLOG_DUMP": (18, "BINLOG_DUMP"), + "TABLE_DUMP": (19, "TABLE_DUMP"), + "CONNECT_OUT": (20, "CONNECT_OUT"), + "REGISTER_REPLICA": (21, "REGISTER_REPLICA"), + "STMT_PREPARE": (22, "STMT_PREPARE"), + "STMT_EXECUTE": (23, "STMT_EXECUTE"), + "STMT_SEND_LONG_DATA": (24, "STMT_SEND_LONG_DATA"), + "STMT_CLOSE": (25, "STMT_CLOSE"), + "STMT_RESET": (26, "STMT_RESET"), + "SET_OPTION": (27, "SET_OPTION"), + "STMT_FETCH": (28, "STMT_FETCH"), + "DAEMON": (29, "DAEMON"), + "BINLOG_DUMP_GTID": (30, "BINLOG_DUMP_GTID"), + "RESET_CONNECTION": (31, "RESET_CONNECTION"), + } + + +class ClientFlag(_Flags): + """MySQL Client Flags + + Client options as found in the MySQL sources mysql-src/include/mysql_com.h + """ + + LONG_PASSWD: int = 1 << 0 + FOUND_ROWS: int = 1 << 1 + LONG_FLAG: int = 1 << 2 + CONNECT_WITH_DB: int = 1 << 3 + NO_SCHEMA: int = 1 << 4 + COMPRESS: int = 1 << 5 + ODBC: int = 1 << 6 + LOCAL_FILES: int = 1 << 7 + IGNORE_SPACE: int = 1 << 8 + PROTOCOL_41: int = 1 << 9 + INTERACTIVE: int = 1 << 10 + SSL: int = 1 << 11 + IGNORE_SIGPIPE: int = 1 << 12 + TRANSACTIONS: int = 1 << 13 + RESERVED: int = 1 << 14 + SECURE_CONNECTION: int = 1 << 15 + MULTI_STATEMENTS: int = 1 << 16 + MULTI_RESULTS: int = 1 << 17 + PS_MULTI_RESULTS: int = 1 << 18 + PLUGIN_AUTH: int = 1 << 19 + CONNECT_ARGS: int = 1 << 20 + PLUGIN_AUTH_LENENC_CLIENT_DATA: int = 1 << 21 + CAN_HANDLE_EXPIRED_PASSWORDS: int = 1 << 22 + SESION_TRACK: int = 1 << 23 + DEPRECATE_EOF: int = 1 << 24 + CLIENT_QUERY_ATTRIBUTES: int = 1 << 27 + SSL_VERIFY_SERVER_CERT: int = 1 << 30 + REMEMBER_OPTIONS: int = 1 << 31 + MULTI_FACTOR_AUTHENTICATION: int = 1 << 28 + + desc: Dict[str, Tuple[int, str]] = { + "LONG_PASSWD": (1 << 0, "New more secure passwords"), + "FOUND_ROWS": (1 << 1, "Found instead of affected rows"), + "LONG_FLAG": (1 << 2, "Get all column flags"), + "CONNECT_WITH_DB": (1 << 3, "One can specify db on connect"), + "NO_SCHEMA": (1 << 4, "Don't allow database.table.column"), + "COMPRESS": (1 << 5, "Can use compression protocol"), + "ODBC": (1 << 6, "ODBC client"), + "LOCAL_FILES": (1 << 7, "Can use LOAD DATA LOCAL"), + "IGNORE_SPACE": (1 << 8, "Ignore spaces before ''"), + "PROTOCOL_41": (1 << 9, "New 4.1 protocol"), + "INTERACTIVE": (1 << 10, "This is an interactive client"), + "SSL": (1 << 11, "Switch to SSL after handshake"), + "IGNORE_SIGPIPE": (1 << 12, "IGNORE sigpipes"), + "TRANSACTIONS": (1 << 13, "Client knows about transactions"), + "RESERVED": (1 << 14, "Old flag for 4.1 protocol"), + "SECURE_CONNECTION": (1 << 15, "New 4.1 authentication"), + "MULTI_STATEMENTS": (1 << 16, "Enable/disable multi-stmt support"), + "MULTI_RESULTS": (1 << 17, "Enable/disable multi-results"), + "PS_MULTI_RESULTS": (1 << 18, "Multi-results in PS-protocol"), + "PLUGIN_AUTH": (1 << 19, "Client supports plugin authentication"), + "CONNECT_ARGS": (1 << 20, "Client supports connection attributes"), + "PLUGIN_AUTH_LENENC_CLIENT_DATA": ( + 1 << 21, + "Enable authentication response packet to be larger than 255 bytes", + ), + "CAN_HANDLE_EXPIRED_PASSWORDS": ( + 1 << 22, + "Don't close the connection for a connection with expired password", + ), + "SESION_TRACK": ( + 1 << 23, + "Capable of handling server state change information", + ), + "DEPRECATE_EOF": (1 << 24, "Client no longer needs EOF packet"), + "CLIENT_QUERY_ATTRIBUTES": ( + 1 << 27, + "Support optional extension for query parameters", + ), + "SSL_VERIFY_SERVER_CERT": (1 << 30, ""), + "REMEMBER_OPTIONS": (1 << 31, ""), + } + + default: List[int] = [ + LONG_PASSWD, + LONG_FLAG, + CONNECT_WITH_DB, + PROTOCOL_41, + TRANSACTIONS, + SECURE_CONNECTION, + MULTI_STATEMENTS, + MULTI_RESULTS, + CONNECT_ARGS, + ] + + @classmethod + def get_default(cls) -> int: + """Get the default client options set + + Returns a flag with all the default client options set""" + flags = 0 + for option in cls.default: + flags |= option + return flags + + +class ServerFlag(_Flags): + """MySQL Server Flags + + Server flags as found in the MySQL sources mysql-src/include/mysql_com.h + """ + + _prefix: str = "SERVER_" + STATUS_IN_TRANS: int = 1 << 0 + STATUS_AUTOCOMMIT: int = 1 << 1 + MORE_RESULTS_EXISTS: int = 1 << 3 + QUERY_NO_GOOD_INDEX_USED: int = 1 << 4 + QUERY_NO_INDEX_USED: int = 1 << 5 + STATUS_CURSOR_EXISTS: int = 1 << 6 + STATUS_LAST_ROW_SENT: int = 1 << 7 + STATUS_DB_DROPPED: int = 1 << 8 + STATUS_NO_BACKSLASH_ESCAPES: int = 1 << 9 + SERVER_STATUS_METADATA_CHANGED: int = 1 << 10 + SERVER_QUERY_WAS_SLOW: int = 1 << 11 + SERVER_PS_OUT_PARAMS: int = 1 << 12 + SERVER_STATUS_IN_TRANS_READONLY: int = 1 << 13 + SERVER_SESSION_STATE_CHANGED: int = 1 << 14 + + desc: Dict[str, Tuple[int, str]] = { + "SERVER_STATUS_IN_TRANS": (1 << 0, "Transaction has started"), + "SERVER_STATUS_AUTOCOMMIT": (1 << 1, "Server in auto_commit mode"), + "SERVER_MORE_RESULTS_EXISTS": ( + 1 << 3, + "Multi query - next query exists", + ), + "SERVER_QUERY_NO_GOOD_INDEX_USED": (1 << 4, ""), + "SERVER_QUERY_NO_INDEX_USED": (1 << 5, ""), + "SERVER_STATUS_CURSOR_EXISTS": ( + 1 << 6, + "Set when server opened a read-only non-scrollable cursor for a query.", + ), + "SERVER_STATUS_LAST_ROW_SENT": ( + 1 << 7, + "Set when a read-only cursor is exhausted", + ), + "SERVER_STATUS_DB_DROPPED": (1 << 8, "A database was dropped"), + "SERVER_STATUS_NO_BACKSLASH_ESCAPES": (1 << 9, ""), + "SERVER_STATUS_METADATA_CHANGED": ( + 1024, + "Set if after a prepared statement " + "reprepare we discovered that the " + "new statement returns a different " + "number of result set columns.", + ), + "SERVER_QUERY_WAS_SLOW": (2048, ""), + "SERVER_PS_OUT_PARAMS": ( + 4096, + "To mark ResultSet containing output parameter values.", + ), + "SERVER_STATUS_IN_TRANS_READONLY": ( + 8192, + "Set if multi-statement transaction is a read-only transaction.", + ), + "SERVER_SESSION_STATE_CHANGED": ( + 1 << 14, + "Session state has changed on the " + "server because of the execution of " + "the last statement", + ), + } + + +class RefreshOptionMeta(ABCMeta): + """RefreshOption Metaclass.""" + + @property + def SLAVE(self) -> int: # pylint: disable=bad-mcs-method-argument,invalid-name + """Return the deprecated alias of RefreshOption.REPLICA. + + Raises a warning about this attribute deprecation. + """ + return _obsolete_option( + "RefreshOption.SLAVE", + "RefreshOption.REPLICA", + RefreshOption.REPLICA, + ) + + +class RefreshOption(_Constants, metaclass=RefreshOptionMeta): + """MySQL Refresh command options. + + Options used when sending the COM_REFRESH server command. + """ + + _prefix: str = "REFRESH_" + GRANT: int = 1 << 0 + LOG: int = 1 << 1 + TABLES: int = 1 << 2 + HOST: int = 1 << 3 + STATUS: int = 1 << 4 + THREADS: int = 1 << 5 + REPLICA: int = 1 << 6 + + desc: Dict[str, Tuple[int, str]] = { + "GRANT": (1 << 0, "Refresh grant tables"), + "LOG": (1 << 1, "Start on new log file"), + "TABLES": (1 << 2, "close all tables"), + "HOST": (1 << 3, "Flush host cache"), + "STATUS": (1 << 4, "Flush status variables"), + "THREADS": (1 << 5, "Flush thread cache"), + "REPLICA": (1 << 6, "Reset source info and restart replica thread"), + "SLAVE": (1 << 6, "Deprecated option; use REPLICA instead."), + } + + +class ShutdownType(_Constants): + """MySQL Shutdown types + + Shutdown types used by the COM_SHUTDOWN server command. + """ + + _prefix: str = "" + SHUTDOWN_DEFAULT: int = 0 + SHUTDOWN_WAIT_CONNECTIONS: int = 1 + SHUTDOWN_WAIT_TRANSACTIONS: int = 2 + SHUTDOWN_WAIT_UPDATES: int = 8 + SHUTDOWN_WAIT_ALL_BUFFERS: int = 16 + SHUTDOWN_WAIT_CRITICAL_BUFFERS: int = 17 + KILL_QUERY: int = 254 + KILL_CONNECTION: int = 255 + + desc: Dict[str, Tuple[int, str]] = { + "SHUTDOWN_DEFAULT": ( + SHUTDOWN_DEFAULT, + "defaults to SHUTDOWN_WAIT_ALL_BUFFERS", + ), + "SHUTDOWN_WAIT_CONNECTIONS": ( + SHUTDOWN_WAIT_CONNECTIONS, + "wait for existing connections to finish", + ), + "SHUTDOWN_WAIT_TRANSACTIONS": ( + SHUTDOWN_WAIT_TRANSACTIONS, + "wait for existing trans to finish", + ), + "SHUTDOWN_WAIT_UPDATES": ( + SHUTDOWN_WAIT_UPDATES, + "wait for existing updates to finish", + ), + "SHUTDOWN_WAIT_ALL_BUFFERS": ( + SHUTDOWN_WAIT_ALL_BUFFERS, + "flush InnoDB and other storage engine buffers", + ), + "SHUTDOWN_WAIT_CRITICAL_BUFFERS": ( + SHUTDOWN_WAIT_CRITICAL_BUFFERS, + "don't flush InnoDB buffers, flush other storage engines' buffers", + ), + "KILL_QUERY": (KILL_QUERY, "(no description)"), + "KILL_CONNECTION": (KILL_CONNECTION, "(no description)"), + } + + +class CharacterSet(_Constants): + """MySQL supported character sets and collations + + List of character sets with their collations supported by MySQL. This + maps to the character set we get from the server within the handshake + packet. + + The list is hardcode so we avoid a database query when getting the + name of the used character set or collation. + """ + + # Use LTS character set as default + desc: List[ + Optional[Tuple[str, str, bool]] + ] = MYSQL_CHARACTER_SETS_57 # type: ignore[assignment] + mysql_version: Tuple[int, ...] = (5, 7) + + # Multi-byte character sets which use 5c (backslash) in characters + slash_charsets: Tuple[int, ...] = (1, 13, 28, 84, 87, 88) + + @classmethod + def set_mysql_version(cls, version: Tuple[int, ...]) -> None: + """Set the MySQL major version and change the charset mapping if is 5.7. + + Args: + version (tuple): MySQL version tuple. + """ + cls.mysql_version = version[:2] + if cls.mysql_version == (8, 0): + cls.desc = MYSQL_CHARACTER_SETS + + @classmethod + def get_info(cls, setid: int) -> Tuple[str, str]: + """Retrieves character set information as tuple using an ID + + Retrieves character set and collation information based on the + given MySQL ID. + + Raises ProgrammingError when character set is not supported. + + Returns a tuple. + """ + try: + return cls.desc[setid][0:2] + except IndexError: + raise ProgrammingError(f"Character set '{setid}' unsupported") from None + + @classmethod + def get_desc(cls, name: int) -> str: # type: ignore[override] + """Retrieves character set information as string using an ID + + Retrieves character set and collation information based on the + given MySQL ID. + + Returns a tuple. + """ + charset, collation = cls.get_info(name) + return f"{charset}/{collation}" + + @classmethod + def get_default_collation(cls, charset: Union[int, str]) -> Tuple[str, str, int]: + """Retrieves the default collation for given character set + + Raises ProgrammingError when character set is not supported. + + Returns list (collation, charset, index) + """ + if isinstance(charset, int): + try: + info = cls.desc[charset] + return info[1], info[0], charset + except (IndexError, KeyError): + ProgrammingError(f"Character set ID '{charset}' unsupported") + + for cid, info in enumerate(cls.desc): + if info is None: + continue + if info[0] == charset and info[2] is True: + return info[1], info[0], cid + + raise ProgrammingError(f"Character set '{charset}' unsupported") + + @classmethod + def get_charset_info( + cls, charset: Optional[Union[int, str]] = None, collation: Optional[str] = None + ) -> Tuple[int, str, str]: + """Get character set information using charset name and/or collation + + Retrieves character set and collation information given character + set name and/or a collation name. + If charset is an integer, it will look up the character set based + on the MySQL's ID. + For example: + get_charset_info('utf8',None) + get_charset_info(collation='utf8_general_ci') + get_charset_info(47) + + Raises ProgrammingError when character set is not supported. + + Returns a tuple with (id, characterset name, collation) + """ + info: Optional[Union[Tuple[str, str, bool], Tuple[str, str, int]]] = None + if isinstance(charset, int): + try: + info = cls.desc[charset] + return (charset, info[0], info[1]) + except IndexError: + ProgrammingError(f"Character set ID {charset} unknown") + + if charset in ("utf8", "utf-8") and cls.mysql_version == (8, 0): + charset = "utf8mb4" + if charset is not None and collation is None: + info = cls.get_default_collation(charset) + return (info[2], info[1], info[0]) + if charset is None and collation is not None: + for cid, info in enumerate(cls.desc): + if info is None: + continue + if collation == info[1]: + return (cid, info[0], info[1]) + raise ProgrammingError(f"Collation '{collation}' unknown") + for cid, info in enumerate(cls.desc): + if info is None: + continue + if info[0] == charset and info[1] == collation: + return (cid, info[0], info[1]) + _ = cls.get_default_collation(charset) + raise ProgrammingError(f"Collation '{collation}' unknown") + + @classmethod + def get_supported(cls) -> Tuple[str, ...]: + """Retrieves a list with names of all supproted character sets + + Returns a tuple. + """ + res = [] + for info in cls.desc: + if info and info[0] not in res: + res.append(info[0]) + return tuple(res) + + +class SQLMode(_Constants): + """MySQL SQL Modes + + The numeric values of SQL Modes are not interesting, only the names + are used when setting the SQL_MODE system variable using the MySQL + SET command. + + See http://dev.mysql.com/doc/refman/5.6/en/server-sql-mode.html + """ + + _prefix: str = "MODE_" + REAL_AS_FLOAT: str = "REAL_AS_FLOAT" + PIPES_AS_CONCAT: str = "PIPES_AS_CONCAT" + ANSI_QUOTES: str = "ANSI_QUOTES" + IGNORE_SPACE: str = "IGNORE_SPACE" + NOT_USED: str = "NOT_USED" + ONLY_FULL_GROUP_BY: str = "ONLY_FULL_GROUP_BY" + NO_UNSIGNED_SUBTRACTION: str = "NO_UNSIGNED_SUBTRACTION" + NO_DIR_IN_CREATE: str = "NO_DIR_IN_CREATE" + POSTGRESQL: str = "POSTGRESQL" + ORACLE: str = "ORACLE" + MSSQL: str = "MSSQL" + DB2: str = "DB2" + MAXDB: str = "MAXDB" + NO_KEY_OPTIONS: str = "NO_KEY_OPTIONS" + NO_TABLE_OPTIONS: str = "NO_TABLE_OPTIONS" + NO_FIELD_OPTIONS: str = "NO_FIELD_OPTIONS" + MYSQL323: str = "MYSQL323" + MYSQL40: str = "MYSQL40" + ANSI: str = "ANSI" + NO_AUTO_VALUE_ON_ZERO: str = "NO_AUTO_VALUE_ON_ZERO" + NO_BACKSLASH_ESCAPES: str = "NO_BACKSLASH_ESCAPES" + STRICT_TRANS_TABLES: str = "STRICT_TRANS_TABLES" + STRICT_ALL_TABLES: str = "STRICT_ALL_TABLES" + NO_ZERO_IN_DATE: str = "NO_ZERO_IN_DATE" + NO_ZERO_DATE: str = "NO_ZERO_DATE" + INVALID_DATES: str = "INVALID_DATES" + ERROR_FOR_DIVISION_BY_ZERO: str = "ERROR_FOR_DIVISION_BY_ZERO" + TRADITIONAL: str = "TRADITIONAL" + NO_AUTO_CREATE_USER: str = "NO_AUTO_CREATE_USER" + HIGH_NOT_PRECEDENCE: str = "HIGH_NOT_PRECEDENCE" + NO_ENGINE_SUBSTITUTION: str = "NO_ENGINE_SUBSTITUTION" + PAD_CHAR_TO_FULL_LENGTH: str = "PAD_CHAR_TO_FULL_LENGTH" + + @classmethod + def get_desc(cls, name: str) -> Optional[str]: + raise NotImplementedError + + @classmethod + def get_info(cls, setid: int) -> Optional[str]: + raise NotImplementedError + + @classmethod + def get_full_info(cls) -> Tuple[str, ...]: + """Returns a sequence of all available SQL Modes + + This class method returns a tuple containing all SQL Mode names. The + names will be alphabetically sorted. + + Returns a tuple. + """ + res = [] + for key in vars(cls).keys(): + if not key.startswith("_") and not hasattr(getattr(cls, key), "__call__"): + res.append(key) + return tuple(sorted(res)) + + +CONN_ATTRS_DN: List[str] = [ + "_pid", + "_platform", + "_source_host", + "_client_name", + "_client_license", + "_client_version", + "_os", + "_connector_name", + "_connector_license", + "_connector_version", +] + +# TLS v1.0 cipher suites IANI to OpenSSL name translation +TLSV1_CIPHER_SUITES: Dict[str, str] = { + "TLS_RSA_WITH_NULL_MD5": "NULL-MD5", + "TLS_RSA_WITH_NULL_SHA": "NULL-SHA", + "TLS_RSA_WITH_RC4_128_MD5": "RC4-MD5", + "TLS_RSA_WITH_RC4_128_SHA": "RC4-SHA", + "TLS_RSA_WITH_IDEA_CBC_SHA": "IDEA-CBC-SHA", + "TLS_RSA_WITH_3DES_EDE_CBC_SHA": "DES-CBC3-SHA", + "TLS_DH_DSS_WITH_3DES_EDE_CBC_SHA": "Not implemented.", + "TLS_DH_RSA_WITH_3DES_EDE_CBC_SHA": "Not implemented.", + "TLS_DHE_DSS_WITH_3DES_EDE_CBC_SHA": "DHE-DSS-DES-CBC3-SHA", + "TLS_DHE_RSA_WITH_3DES_EDE_CBC_SHA": "DHE-RSA-DES-CBC3-SHA", + "TLS_DH_anon_WITH_RC4_128_MD5": "ADH-RC4-MD5", + "TLS_DH_anon_WITH_3DES_EDE_CBC_SHA": "ADH-DES-CBC3-SHA", + # AES cipher suites from RFC3268, extending TLS v1.0 + "TLS_RSA_WITH_AES_128_CBC_SHA": "AES128-SHA", + "TLS_RSA_WITH_AES_256_CBC_SHA": "AES256-SHA", + "TLS_DH_DSS_WITH_AES_128_CBC_SHA": "DH-DSS-AES128-SHA", + "TLS_DH_DSS_WITH_AES_256_CBC_SHA": "DH-DSS-AES256-SHA", + "TLS_DH_RSA_WITH_AES_128_CBC_SHA": "DH-RSA-AES128-SHA", + "TLS_DH_RSA_WITH_AES_256_CBC_SHA": "DH-RSA-AES256-SHA", + "TLS_DHE_DSS_WITH_AES_128_CBC_SHA": "DHE-DSS-AES128-SHA", + "TLS_DHE_DSS_WITH_AES_256_CBC_SHA": "DHE-DSS-AES256-SHA", + "TLS_DHE_RSA_WITH_AES_128_CBC_SHA": "DHE-RSA-AES128-SHA", + "TLS_DHE_RSA_WITH_AES_256_CBC_SHA": "DHE-RSA-AES256-SHA", + "TLS_DH_anon_WITH_AES_128_CBC_SHA": "ADH-AES128-SHA", + "TLS_DH_anon_WITH_AES_256_CBC_SHA": "ADH-AES256-SHA", + # Camellia cipher suites from RFC4132, extending TLS v1.0 + "TLS_RSA_WITH_CAMELLIA_128_CBC_SHA": "CAMELLIA128-SHA", + "TLS_RSA_WITH_CAMELLIA_256_CBC_SHA": "CAMELLIA256-SHA", + "TLS_DH_DSS_WITH_CAMELLIA_128_CBC_SHA": "DH-DSS-CAMELLIA128-SHA", + "TLS_DH_DSS_WITH_CAMELLIA_256_CBC_SHA": "DH-DSS-CAMELLIA256-SHA", + "TLS_DH_RSA_WITH_CAMELLIA_128_CBC_SHA": "DH-RSA-CAMELLIA128-SHA", + "TLS_DH_RSA_WITH_CAMELLIA_256_CBC_SHA": "DH-RSA-CAMELLIA256-SHA", + "TLS_DHE_DSS_WITH_CAMELLIA_128_CBC_SHA": "DHE-DSS-CAMELLIA128-SHA", + "TLS_DHE_DSS_WITH_CAMELLIA_256_CBC_SHA": "DHE-DSS-CAMELLIA256-SHA", + "TLS_DHE_RSA_WITH_CAMELLIA_128_CBC_SHA": "DHE-RSA-CAMELLIA128-SHA", + "TLS_DHE_RSA_WITH_CAMELLIA_256_CBC_SHA": "DHE-RSA-CAMELLIA256-SHA", + "TLS_DH_anon_WITH_CAMELLIA_128_CBC_SHA": "ADH-CAMELLIA128-SHA", + "TLS_DH_anon_WITH_CAMELLIA_256_CBC_SHA": "ADH-CAMELLIA256-SHA", + # SEED cipher suites from RFC4162, extending TLS v1.0 + "TLS_RSA_WITH_SEED_CBC_SHA": "SEED-SHA", + "TLS_DH_DSS_WITH_SEED_CBC_SHA": "DH-DSS-SEED-SHA", + "TLS_DH_RSA_WITH_SEED_CBC_SHA": "DH-RSA-SEED-SHA", + "TLS_DHE_DSS_WITH_SEED_CBC_SHA": "DHE-DSS-SEED-SHA", + "TLS_DHE_RSA_WITH_SEED_CBC_SHA": "DHE-RSA-SEED-SHA", + "TLS_DH_anon_WITH_SEED_CBC_SHA": "ADH-SEED-SHA", + # GOST cipher suites from draft-chudov-cryptopro-cptls, extending TLS v1.0 + "TLS_GOSTR341094_WITH_28147_CNT_IMIT": "GOST94-GOST89-GOST89", + "TLS_GOSTR341001_WITH_28147_CNT_IMIT": "GOST2001-GOST89-GOST89", + "TLS_GOSTR341094_WITH_NULL_GOSTR3411": "GOST94-NULL-GOST94", + "TLS_GOSTR341001_WITH_NULL_GOSTR3411": "GOST2001-NULL-GOST94", +} + +# TLS v1.1 cipher suites IANI to OpenSSL name translation +TLSV1_1_CIPHER_SUITES: Dict[str, str] = TLSV1_CIPHER_SUITES + +# TLS v1.2 cipher suites IANI to OpenSSL name translation +TLSV1_2_CIPHER_SUITES: Dict[str, str] = { + "TLS_RSA_WITH_NULL_SHA256": "NULL-SHA256", + "TLS_RSA_WITH_AES_128_CBC_SHA256": "AES128-SHA256", + "TLS_RSA_WITH_AES_256_CBC_SHA256": "AES256-SHA256", + "TLS_RSA_WITH_AES_128_GCM_SHA256": "AES128-GCM-SHA256", + "TLS_RSA_WITH_AES_256_GCM_SHA384": "AES256-GCM-SHA384", + "TLS_DH_RSA_WITH_AES_128_CBC_SHA256": "DH-RSA-AES128-SHA256", + "TLS_DH_RSA_WITH_AES_256_CBC_SHA256": "DH-RSA-AES256-SHA256", + "TLS_DH_RSA_WITH_AES_128_GCM_SHA256": "DH-RSA-AES128-GCM-SHA256", + "TLS_DH_RSA_WITH_AES_256_GCM_SHA384": "DH-RSA-AES256-GCM-SHA384", + "TLS_DH_DSS_WITH_AES_128_CBC_SHA256": "DH-DSS-AES128-SHA256", + "TLS_DH_DSS_WITH_AES_256_CBC_SHA256": "DH-DSS-AES256-SHA256", + "TLS_DH_DSS_WITH_AES_128_GCM_SHA256": "DH-DSS-AES128-GCM-SHA256", + "TLS_DH_DSS_WITH_AES_256_GCM_SHA384": "DH-DSS-AES256-GCM-SHA384", + "TLS_DHE_RSA_WITH_AES_128_CBC_SHA256": "DHE-RSA-AES128-SHA256", + "TLS_DHE_RSA_WITH_AES_256_CBC_SHA256": "DHE-RSA-AES256-SHA256", + "TLS_DHE_RSA_WITH_AES_128_GCM_SHA256": "DHE-RSA-AES128-GCM-SHA256", + "TLS_DHE_RSA_WITH_AES_256_GCM_SHA384": "DHE-RSA-AES256-GCM-SHA384", + "TLS_DHE_DSS_WITH_AES_128_CBC_SHA256": "DHE-DSS-AES128-SHA256", + "TLS_DHE_DSS_WITH_AES_256_CBC_SHA256": "DHE-DSS-AES256-SHA256", + "TLS_DHE_DSS_WITH_AES_128_GCM_SHA256": "DHE-DSS-AES128-GCM-SHA256", + "TLS_DHE_DSS_WITH_AES_256_GCM_SHA384": "DHE-DSS-AES256-GCM-SHA384", + "TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256": "ECDHE-RSA-AES128-SHA256", + "TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA384": "ECDHE-RSA-AES256-SHA384", + "TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256": "ECDHE-RSA-AES128-GCM-SHA256", + "TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384": "ECDHE-RSA-AES256-GCM-SHA384", + "TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256": "ECDHE-ECDSA-AES128-SHA256", + "TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA384": "ECDHE-ECDSA-AES256-SHA384", + "TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256": "ECDHE-ECDSA-AES128-GCM-SHA256", + "TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384": "ECDHE-ECDSA-AES256-GCM-SHA384", + "TLS_DH_anon_WITH_AES_128_CBC_SHA256": "ADH-AES128-SHA256", + "TLS_DH_anon_WITH_AES_256_CBC_SHA256": "ADH-AES256-SHA256", + "TLS_DH_anon_WITH_AES_128_GCM_SHA256": "ADH-AES128-GCM-SHA256", + "TLS_DH_anon_WITH_AES_256_GCM_SHA384": "ADH-AES256-GCM-SHA384", + "RSA_WITH_AES_128_CCM": "AES128-CCM", + "RSA_WITH_AES_256_CCM": "AES256-CCM", + "DHE_RSA_WITH_AES_128_CCM": "DHE-RSA-AES128-CCM", + "DHE_RSA_WITH_AES_256_CCM": "DHE-RSA-AES256-CCM", + "RSA_WITH_AES_128_CCM_8": "AES128-CCM8", + "RSA_WITH_AES_256_CCM_8": "AES256-CCM8", + "DHE_RSA_WITH_AES_128_CCM_8": "DHE-RSA-AES128-CCM8", + "DHE_RSA_WITH_AES_256_CCM_8": "DHE-RSA-AES256-CCM8", + "ECDHE_ECDSA_WITH_AES_128_CCM": "ECDHE-ECDSA-AES128-CCM", + "ECDHE_ECDSA_WITH_AES_256_CCM": "ECDHE-ECDSA-AES256-CCM", + "ECDHE_ECDSA_WITH_AES_128_CCM_8": "ECDHE-ECDSA-AES128-CCM8", + "ECDHE_ECDSA_WITH_AES_256_CCM_8": "ECDHE-ECDSA-AES256-CCM8", + # ARIA cipher suites from RFC6209, extending TLS v1.2 + "TLS_RSA_WITH_ARIA_128_GCM_SHA256": "ARIA128-GCM-SHA256", + "TLS_RSA_WITH_ARIA_256_GCM_SHA384": "ARIA256-GCM-SHA384", + "TLS_DHE_RSA_WITH_ARIA_128_GCM_SHA256": "DHE-RSA-ARIA128-GCM-SHA256", + "TLS_DHE_RSA_WITH_ARIA_256_GCM_SHA384": "DHE-RSA-ARIA256-GCM-SHA384", + "TLS_DHE_DSS_WITH_ARIA_128_GCM_SHA256": "DHE-DSS-ARIA128-GCM-SHA256", + "TLS_DHE_DSS_WITH_ARIA_256_GCM_SHA384": "DHE-DSS-ARIA256-GCM-SHA384", + "TLS_ECDHE_ECDSA_WITH_ARIA_128_GCM_SHA256": "ECDHE-ECDSA-ARIA128-GCM-SHA256", + "TLS_ECDHE_ECDSA_WITH_ARIA_256_GCM_SHA384": "ECDHE-ECDSA-ARIA256-GCM-SHA384", + "TLS_ECDHE_RSA_WITH_ARIA_128_GCM_SHA256": "ECDHE-ARIA128-GCM-SHA256", + "TLS_ECDHE_RSA_WITH_ARIA_256_GCM_SHA384": "ECDHE-ARIA256-GCM-SHA384", + "TLS_PSK_WITH_ARIA_128_GCM_SHA256": "PSK-ARIA128-GCM-SHA256", + "TLS_PSK_WITH_ARIA_256_GCM_SHA384": "PSK-ARIA256-GCM-SHA384", + "TLS_DHE_PSK_WITH_ARIA_128_GCM_SHA256": "DHE-PSK-ARIA128-GCM-SHA256", + "TLS_DHE_PSK_WITH_ARIA_256_GCM_SHA384": "DHE-PSK-ARIA256-GCM-SHA384", + "TLS_RSA_PSK_WITH_ARIA_128_GCM_SHA256": "RSA-PSK-ARIA128-GCM-SHA256", + "TLS_RSA_PSK_WITH_ARIA_256_GCM_SHA384": "RSA-PSK-ARIA256-GCM-SHA384", + # Camellia HMAC-Based cipher suites from RFC6367, extending TLS v1.2 + "TLS_ECDHE_ECDSA_WITH_CAMELLIA_128_CBC_SHA256": "ECDHE-ECDSA-CAMELLIA128-SHA256", + "TLS_ECDHE_ECDSA_WITH_CAMELLIA_256_CBC_SHA384": "ECDHE-ECDSA-CAMELLIA256-SHA384", + "TLS_ECDHE_RSA_WITH_CAMELLIA_128_CBC_SHA256": "ECDHE-RSA-CAMELLIA128-SHA256", + "TLS_ECDHE_RSA_WITH_CAMELLIA_256_CBC_SHA384": "ECDHE-RSA-CAMELLIA256-SHA384", + # Pre-shared keying (PSK) cipher suites", + "PSK_WITH_NULL_SHA": "PSK-NULL-SHA", + "DHE_PSK_WITH_NULL_SHA": "DHE-PSK-NULL-SHA", + "RSA_PSK_WITH_NULL_SHA": "RSA-PSK-NULL-SHA", + "PSK_WITH_RC4_128_SHA": "PSK-RC4-SHA", + "PSK_WITH_3DES_EDE_CBC_SHA": "PSK-3DES-EDE-CBC-SHA", + "PSK_WITH_AES_128_CBC_SHA": "PSK-AES128-CBC-SHA", + "PSK_WITH_AES_256_CBC_SHA": "PSK-AES256-CBC-SHA", + "DHE_PSK_WITH_RC4_128_SHA": "DHE-PSK-RC4-SHA", + "DHE_PSK_WITH_3DES_EDE_CBC_SHA": "DHE-PSK-3DES-EDE-CBC-SHA", + "DHE_PSK_WITH_AES_128_CBC_SHA": "DHE-PSK-AES128-CBC-SHA", + "DHE_PSK_WITH_AES_256_CBC_SHA": "DHE-PSK-AES256-CBC-SHA", + "RSA_PSK_WITH_RC4_128_SHA": "RSA-PSK-RC4-SHA", + "RSA_PSK_WITH_3DES_EDE_CBC_SHA": "RSA-PSK-3DES-EDE-CBC-SHA", + "RSA_PSK_WITH_AES_128_CBC_SHA": "RSA-PSK-AES128-CBC-SHA", + "RSA_PSK_WITH_AES_256_CBC_SHA": "RSA-PSK-AES256-CBC-SHA", + "PSK_WITH_AES_128_GCM_SHA256": "PSK-AES128-GCM-SHA256", + "PSK_WITH_AES_256_GCM_SHA384": "PSK-AES256-GCM-SHA384", + "DHE_PSK_WITH_AES_128_GCM_SHA256": "DHE-PSK-AES128-GCM-SHA256", + "DHE_PSK_WITH_AES_256_GCM_SHA384": "DHE-PSK-AES256-GCM-SHA384", + "RSA_PSK_WITH_AES_128_GCM_SHA256": "RSA-PSK-AES128-GCM-SHA256", + "RSA_PSK_WITH_AES_256_GCM_SHA384": "RSA-PSK-AES256-GCM-SHA384", + "PSK_WITH_AES_128_CBC_SHA256": "PSK-AES128-CBC-SHA256", + "PSK_WITH_AES_256_CBC_SHA384": "PSK-AES256-CBC-SHA384", + "PSK_WITH_NULL_SHA256": "PSK-NULL-SHA256", + "PSK_WITH_NULL_SHA384": "PSK-NULL-SHA384", + "DHE_PSK_WITH_AES_128_CBC_SHA256": "DHE-PSK-AES128-CBC-SHA256", + "DHE_PSK_WITH_AES_256_CBC_SHA384": "DHE-PSK-AES256-CBC-SHA384", + "DHE_PSK_WITH_NULL_SHA256": "DHE-PSK-NULL-SHA256", + "DHE_PSK_WITH_NULL_SHA384": "DHE-PSK-NULL-SHA384", + "RSA_PSK_WITH_AES_128_CBC_SHA256": "RSA-PSK-AES128-CBC-SHA256", + "RSA_PSK_WITH_AES_256_CBC_SHA384": "RSA-PSK-AES256-CBC-SHA384", + "RSA_PSK_WITH_NULL_SHA256": "RSA-PSK-NULL-SHA256", + "RSA_PSK_WITH_NULL_SHA384": "RSA-PSK-NULL-SHA384", + "ECDHE_PSK_WITH_RC4_128_SHA": "ECDHE-PSK-RC4-SHA", + "ECDHE_PSK_WITH_3DES_EDE_CBC_SHA": "ECDHE-PSK-3DES-EDE-CBC-SHA", + "ECDHE_PSK_WITH_AES_128_CBC_SHA": "ECDHE-PSK-AES128-CBC-SHA", + "ECDHE_PSK_WITH_AES_256_CBC_SHA": "ECDHE-PSK-AES256-CBC-SHA", + "ECDHE_PSK_WITH_AES_128_CBC_SHA256": "ECDHE-PSK-AES128-CBC-SHA256", + "ECDHE_PSK_WITH_AES_256_CBC_SHA384": "ECDHE-PSK-AES256-CBC-SHA384", + "ECDHE_PSK_WITH_NULL_SHA": "ECDHE-PSK-NULL-SHA", + "ECDHE_PSK_WITH_NULL_SHA256": "ECDHE-PSK-NULL-SHA256", + "ECDHE_PSK_WITH_NULL_SHA384": "ECDHE-PSK-NULL-SHA384", + "PSK_WITH_CAMELLIA_128_CBC_SHA256": "PSK-CAMELLIA128-SHA256", + "PSK_WITH_CAMELLIA_256_CBC_SHA384": "PSK-CAMELLIA256-SHA384", + "DHE_PSK_WITH_CAMELLIA_128_CBC_SHA256": "DHE-PSK-CAMELLIA128-SHA256", + "DHE_PSK_WITH_CAMELLIA_256_CBC_SHA384": "DHE-PSK-CAMELLIA256-SHA384", + "RSA_PSK_WITH_CAMELLIA_128_CBC_SHA256": "RSA-PSK-CAMELLIA128-SHA256", + "RSA_PSK_WITH_CAMELLIA_256_CBC_SHA384": "RSA-PSK-CAMELLIA256-SHA384", + "ECDHE_PSK_WITH_CAMELLIA_128_CBC_SHA256": "ECDHE-PSK-CAMELLIA128-SHA256", + "ECDHE_PSK_WITH_CAMELLIA_256_CBC_SHA384": "ECDHE-PSK-CAMELLIA256-SHA384", + "PSK_WITH_AES_128_CCM": "PSK-AES128-CCM", + "PSK_WITH_AES_256_CCM": "PSK-AES256-CCM", + "DHE_PSK_WITH_AES_128_CCM": "DHE-PSK-AES128-CCM", + "DHE_PSK_WITH_AES_256_CCM": "DHE-PSK-AES256-CCM", + "PSK_WITH_AES_128_CCM_8": "PSK-AES128-CCM8", + "PSK_WITH_AES_256_CCM_8": "PSK-AES256-CCM8", + "DHE_PSK_WITH_AES_128_CCM_8": "DHE-PSK-AES128-CCM8", + "DHE_PSK_WITH_AES_256_CCM_8": "DHE-PSK-AES256-CCM8", + # ChaCha20-Poly1305 cipher suites, extending TLS v1.2 + "TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305_SHA256": "ECDHE-RSA-CHACHA20-POLY1305", + "TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305_SHA256": "ECDHE-ECDSA-CHACHA20-POLY1305", + "TLS_DHE_RSA_WITH_CHACHA20_POLY1305_SHA256": "DHE-RSA-CHACHA20-POLY1305", + "TLS_PSK_WITH_CHACHA20_POLY1305_SHA256": "PSK-CHACHA20-POLY1305", + "TLS_ECDHE_PSK_WITH_CHACHA20_POLY1305_SHA256": "ECDHE-PSK-CHACHA20-POLY1305", + "TLS_DHE_PSK_WITH_CHACHA20_POLY1305_SHA256": "DHE-PSK-CHACHA20-POLY1305", + "TLS_RSA_PSK_WITH_CHACHA20_POLY1305_SHA256": "RSA-PSK-CHACHA20-POLY1305", +} + +# TLS v1.3 cipher suites IANI to OpenSSL name translation +TLSV1_3_CIPHER_SUITES: Dict[str, str] = { + "TLS_AES_128_GCM_SHA256": "TLS_AES_128_GCM_SHA256", + "TLS_AES_256_GCM_SHA384": "TLS_AES_256_GCM_SHA384", + "TLS_CHACHA20_POLY1305_SHA256": "TLS_CHACHA20_POLY1305_SHA256", + "TLS_AES_128_CCM_SHA256": "TLS_AES_128_CCM_SHA256", + "TLS_AES_128_CCM_8_SHA256": "TLS_AES_128_CCM_8_SHA256", +} + +TLS_CIPHER_SUITES: Dict[str, Dict[str, str]] = { + "TLSv1": TLSV1_CIPHER_SUITES, + "TLSv1.1": TLSV1_1_CIPHER_SUITES, + "TLSv1.2": TLSV1_2_CIPHER_SUITES, + "TLSv1.3": TLSV1_3_CIPHER_SUITES, +} + +OPENSSL_CS_NAMES: Dict[str, ValuesView[str]] = { + "TLSv1": TLSV1_CIPHER_SUITES.values(), + "TLSv1.1": TLSV1_1_CIPHER_SUITES.values(), + "TLSv1.2": TLSV1_2_CIPHER_SUITES.values(), + "TLSv1.3": TLSV1_3_CIPHER_SUITES.values(), +} diff --git a/mysql-sqlalchemy-workspace/lib/python3.9/site-packages/mysql/connector/conversion.py b/mysql-sqlalchemy-workspace/lib/python3.9/site-packages/mysql/connector/conversion.py new file mode 100644 index 00000000..c52698d3 --- /dev/null +++ b/mysql-sqlalchemy-workspace/lib/python3.9/site-packages/mysql/connector/conversion.py @@ -0,0 +1,733 @@ +# Copyright (c) 2009, 2022, Oracle and/or its affiliates. +# +# This program is free software; you can redistribute it and/or modify +# it under the terms of the GNU General Public License, version 2.0, as +# published by the Free Software Foundation. +# +# This program is also distributed with certain software (including +# but not limited to OpenSSL) that is licensed under separate terms, +# as designated in a particular file or component or in included license +# documentation. The authors of MySQL hereby grant you an +# additional permission to link the program and your derivative works +# with the separately licensed software that they have included with +# MySQL. +# +# Without limiting anything contained in the foregoing, this file, +# which is part of MySQL Connector/Python, is also subject to the +# Universal FOSS Exception, version 1.0, a copy of which can be found at +# http://oss.oracle.com/licenses/universal-foss-exception. +# +# This program is distributed in the hope that it will be useful, but +# WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. +# See the GNU General Public License, version 2.0, for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program; if not, write to the Free Software Foundation, Inc., +# 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA + +"""Converting MySQL and Python types +""" + +import datetime +import math +import struct +import time + +from decimal import Decimal +from typing import Any, Callable, Dict, List, Optional, Set, Tuple, Union + +from .constants import CharacterSet, FieldFlag, FieldType +from .custom_types import HexLiteral +from .types import ( + DescriptionType, + RowType, + StrOrBytes, + ToMysqlInputTypes, + ToMysqlOutputTypes, + ToPythonOutputTypes, +) +from .utils import NUMERIC_TYPES + +CONVERT_ERROR = "Could not convert '{value}' to python {pytype}" + + +class MySQLConverterBase: + """Base class for conversion classes + + All class dealing with converting to and from MySQL data types must + be a subclass of this class. + """ + + def __init__( + self, + charset: Optional[str] = "utf8", + use_unicode: bool = True, + str_fallback: bool = False, + ) -> None: + self.python_types: Optional[Tuple[Any, ...]] = None + self.mysql_types: Optional[Tuple[Any, ...]] = None + self.charset: Optional[str] = None + self.charset_id: int = 0 + self.set_charset(charset) + self.use_unicode: bool = use_unicode + self.str_fallback: bool = str_fallback + self._cache_field_types: Dict[ + int, + Callable[[bytes, DescriptionType], ToPythonOutputTypes], + ] = {} + + def set_charset(self, charset: Optional[str]) -> None: + """Set character set""" + if charset in ("utf8mb4", "utf8mb3"): + charset = "utf8" + if charset is not None: + self.charset = charset + else: + # default to utf8 + self.charset = "utf8" + self.charset_id = CharacterSet.get_charset_info(self.charset)[0] + + def set_unicode(self, value: bool = True) -> None: + """Set whether to use Unicode""" + self.use_unicode = value + + def to_mysql( + self, value: ToMysqlInputTypes + ) -> Union[ToMysqlInputTypes, HexLiteral]: + """Convert Python data type to MySQL""" + type_name = value.__class__.__name__.lower() + try: + converted: ToMysqlOutputTypes = getattr(self, f"_{type_name}_to_mysql")( + value + ) + return converted + except AttributeError: + return value + + def to_python( + self, vtype: DescriptionType, value: Optional[bytes] + ) -> ToPythonOutputTypes: + """Convert MySQL data type to Python""" + + if (value == b"\x00" or value is None) and vtype[1] != FieldType.BIT: + # Don't go further when we hit a NULL value + return None + + if not self._cache_field_types: + self._cache_field_types = {} + for name, info in FieldType.desc.items(): + try: + self._cache_field_types[info[0]] = getattr( + self, f"_{name.lower()}_to_python" + ) + except AttributeError: + # We ignore field types which has no method + pass + if value is None: + return None + try: + return self._cache_field_types[vtype[1]](value, vtype) + except KeyError: + return value + + @staticmethod + def escape(value: Any) -> Any: + """Escape buffer for sending to MySQL""" + return value + + @staticmethod + def quote(buf: Any) -> StrOrBytes: + """Quote buffer for sending to MySQL""" + return str(buf) + + +class MySQLConverter(MySQLConverterBase): + """Default conversion class for MySQL Connector/Python. + + o escape method: for escaping values send to MySQL + o quoting method: for quoting values send to MySQL in statements + o conversion mapping: maps Python and MySQL data types to + function for converting them. + + Whenever one needs to convert values differently, a converter_class + argument can be given while instantiating a new connection like + cnx.connect(converter_class=CustomMySQLConverterClass). + + """ + + def __init__( + self, + charset: Optional[str] = None, + use_unicode: bool = True, + str_fallback: bool = False, + ) -> None: + MySQLConverterBase.__init__(self, charset, use_unicode, str_fallback) + self._cache_field_types: Dict[ + int, + Callable[[bytes, DescriptionType], ToPythonOutputTypes], + ] = {} + + @staticmethod + def escape(value: Any) -> Any: + """ + Escapes special characters as they are expected to by when MySQL + receives them. + As found in MySQL source mysys/charset.c + + Returns the value if not a string, or the escaped string. + """ + if isinstance(value, (bytes, bytearray)): + value = value.replace(b"\\", b"\\\\") + value = value.replace(b"\n", b"\\n") + value = value.replace(b"\r", b"\\r") + value = value.replace(b"\047", b"\134\047") # single quotes + value = value.replace(b"\042", b"\134\042") # double quotes + value = value.replace(b"\032", b"\134\032") # for Win32 + elif isinstance(value, str) and not isinstance(value, HexLiteral): + value = value.replace("\\", "\\\\") + value = value.replace("\n", "\\n") + value = value.replace("\r", "\\r") + value = value.replace("\047", "\134\047") # single quotes + value = value.replace("\042", "\134\042") # double quotes + value = value.replace("\032", "\134\032") # for Win32 + return value + + @staticmethod + def quote(buf: Optional[Union[float, int, Decimal, HexLiteral, bytes]]) -> bytes: + """ + Quote the parameters for commands. General rules: + o numbers are returns as bytes using ascii codec + o None is returned as bytearray(b'NULL') + o Everything else is single quoted '' + + Returns a bytearray object. + """ + if isinstance(buf, NUMERIC_TYPES): + return str(buf).encode("ascii") + if isinstance(buf, type(None)): + return bytearray(b"NULL") + return bytearray(b"'" + buf + b"'") # type: ignore[operator] + + def to_mysql(self, value: ToMysqlInputTypes) -> ToMysqlOutputTypes: + """Convert Python data type to MySQL""" + type_name = value.__class__.__name__.lower() + try: + converted: ToMysqlOutputTypes = getattr(self, f"_{type_name}_to_mysql")( + value + ) + return converted + except AttributeError: + if self.str_fallback: + return str(value).encode() + raise TypeError( + f"Python '{type_name}' cannot be converted to a MySQL type" + ) from None + + def to_python( + self, + vtype: DescriptionType, + value: Optional[bytes], + ) -> ToPythonOutputTypes: + """Convert MySQL data type to Python""" + # \x00 + if value == 0 and vtype[1] != FieldType.BIT: + # Don't go further when we hit a NULL value + return None + if value is None: + return None + + if not self._cache_field_types: + self._cache_field_types = {} + for name, info in FieldType.desc.items(): + try: + self._cache_field_types[info[0]] = getattr( + self, f"_{name.lower()}_to_python" + ) + except AttributeError: + # We ignore field types which has no method + pass + + try: + return self._cache_field_types[vtype[1]](value, vtype) + except KeyError: + # If one type is not defined, we just return the value as str + try: + return value.decode("utf-8") + except UnicodeDecodeError: + return value + except ValueError as err: + raise ValueError(f"{err} (field {vtype[0]})") from err + except TypeError as err: + raise TypeError(f"{err} (field {vtype[0]})") from err + + @staticmethod + def _int_to_mysql(value: int) -> int: + """Convert value to int""" + return int(value) + + @staticmethod + def _long_to_mysql(value: int) -> int: + """Convert value to int + + Note: there is not type "long" in Python 3 since integers (int) are of unlimited size. + Since Python 2 is no longer supported, this method should be deprecated. + """ + return int(value) + + @staticmethod + def _float_to_mysql(value: float) -> Optional[float]: + """Convert value to float""" + if math.isnan(value): + return None + return float(value) + + def _str_to_mysql(self, value: str) -> Union[bytes, HexLiteral]: + """Convert value to string""" + return self._unicode_to_mysql(value) + + def _unicode_to_mysql(self, value: str) -> Union[bytes, HexLiteral]: + """Convert unicode""" + charset = self.charset + charset_id = self.charset_id + if charset == "binary": + charset = "utf8" + charset_id = CharacterSet.get_charset_info(charset)[0] + encoded = value.encode(charset) + if charset_id in CharacterSet.slash_charsets: + if b"\x5c" in encoded: + return HexLiteral(value, charset) + return encoded + + @staticmethod + def _bytes_to_mysql(value: bytes) -> bytes: + """Convert value to bytes""" + return value + + @staticmethod + def _bytearray_to_mysql(value: bytearray) -> bytes: + """Convert value to bytes""" + return bytes(value) + + @staticmethod + def _bool_to_mysql(value: bool) -> int: + """Convert value to boolean""" + return 1 if value else 0 + + @staticmethod + def _nonetype_to_mysql(value: None) -> None: # pylint: disable=unused-argument + """ + This would return what None would be in MySQL, but instead we + leave it None and return it right away. The actual conversion + from None to NULL happens in the quoting functionality. + + Return None. + """ + return None + + @staticmethod + def _datetime_to_mysql(value: datetime.datetime) -> bytes: + """ + Converts a datetime instance to a string suitable for MySQL. + The returned string has format: %Y-%m-%d %H:%M:%S[.%f] + + If the instance isn't a datetime.datetime type, it return None. + + Returns a bytes. + """ + if value.microsecond: + fmt = "{0:04d}-{1:02d}-{2:02d} {3:02d}:{4:02d}:{5:02d}.{6:06d}" + return fmt.format( + value.year, + value.month, + value.day, + value.hour, + value.minute, + value.second, + value.microsecond, + ).encode("ascii") + + fmt = "{0:04d}-{1:02d}-{2:02d} {3:02d}:{4:02d}:{5:02d}" + return fmt.format( + value.year, + value.month, + value.day, + value.hour, + value.minute, + value.second, + ).encode("ascii") + + @staticmethod + def _date_to_mysql(value: datetime.date) -> bytes: + """ + Converts a date instance to a string suitable for MySQL. + The returned string has format: %Y-%m-%d + + If the instance isn't a datetime.date type, it return None. + + Returns a bytes. + """ + return f"{value.year:04d}-{value.month:02d}-{value.day:02d}".encode("ascii") + + @staticmethod + def _time_to_mysql(value: datetime.time) -> bytes: + """ + Converts a time instance to a string suitable for MySQL. + The returned string has format: %H:%M:%S[.%f] + + If the instance isn't a datetime.time type, it return None. + + Returns a bytes. + """ + if value.microsecond: + return value.strftime("%H:%M:%S.%f").encode("ascii") + return value.strftime("%H:%M:%S").encode("ascii") + + @staticmethod + def _struct_time_to_mysql(value: time.struct_time) -> bytes: + """ + Converts a time.struct_time sequence to a string suitable + for MySQL. + The returned string has format: %Y-%m-%d %H:%M:%S + + Returns a bytes or None when not valid. + """ + return time.strftime("%Y-%m-%d %H:%M:%S", value).encode("ascii") + + @staticmethod + def _timedelta_to_mysql(value: datetime.timedelta) -> bytes: + """ + Converts a timedelta instance to a string suitable for MySQL. + The returned string has format: %H:%M:%S + + Returns a bytes. + """ + seconds = abs(value.days * 86400 + value.seconds) + + if value.microseconds: + fmt = "{0:02d}:{1:02d}:{2:02d}.{3:06d}" + if value.days < 0: + mcs = 1000000 - value.microseconds + seconds -= 1 + else: + mcs = value.microseconds + else: + fmt = "{0:02d}:{1:02d}:{2:02d}" + + if value.days < 0: + fmt = "-" + fmt + + (hours, remainder) = divmod(seconds, 3600) + (mins, secs) = divmod(remainder, 60) + + if value.microseconds: + result = fmt.format(hours, mins, secs, mcs) + else: + result = fmt.format(hours, mins, secs) + + return result.encode("ascii") + + @staticmethod + def _decimal_to_mysql(value: Decimal) -> Optional[bytes]: + """ + Converts a decimal.Decimal instance to a string suitable for + MySQL. + + Returns a bytes or None when not valid. + """ + if isinstance(value, Decimal): + return str(value).encode("ascii") + + return None + + def row_to_python( + self, row: Tuple[bytes, ...], fields: List[DescriptionType] + ) -> RowType: + """Convert a MySQL text result row to Python types + + The row argument is a sequence containing text result returned + by a MySQL server. Each value of the row is converted to the + using the field type information in the fields argument. + + Returns a tuple. + """ + i = 0 + result: List[ToPythonOutputTypes] = [None] * len(fields) + + if not self._cache_field_types: + self._cache_field_types = {} + for name, info in FieldType.desc.items(): + try: + self._cache_field_types[info[0]] = getattr( + self, f"_{name.lower()}_to_python" + ) + except AttributeError: + # We ignore field types which has no method + pass + + for field in fields: + field_type = field[1] + + if (row[i] == 0 and field_type != FieldType.BIT) or row[i] is None: + # Don't convert NULL value + i += 1 + continue + + try: + result[i] = self._cache_field_types[field_type](row[i], field) + except KeyError: + # If one type is not defined, we just return the value as str + try: + result[i] = row[i].decode("utf-8") + except UnicodeDecodeError: + result[i] = row[i] + except (ValueError, TypeError) as err: + # Item "ValueError" of "Union[ValueError, TypeError]" has no attribute "message" + err.message = f"{err} (field {field[0]})" # type: ignore[union-attr] + raise + + i += 1 + + return tuple(result) + + # pylint: disable=unused-argument + @staticmethod + def _float_to_python(value: bytes, desc: Optional[DescriptionType] = None) -> float: + """ + Returns value as float type. + """ + return float(value) + + _double_to_python = _float_to_python + + @staticmethod + def _int_to_python(value: bytes, desc: Optional[DescriptionType] = None) -> int: + """ + Returns value as int type. + """ + return int(value) + + _tiny_to_python = _int_to_python + _short_to_python = _int_to_python + _int24_to_python = _int_to_python + _long_to_python = _int_to_python + _longlong_to_python = _int_to_python + + def _decimal_to_python( + self, value: bytes, desc: Optional[DescriptionType] = None + ) -> Decimal: + """ + Returns value as a decimal.Decimal. + """ + val = value.decode(self.charset) + return Decimal(val) + + _newdecimal_to_python = _decimal_to_python + + @staticmethod + def _str(value: bytes, desc: Optional[DescriptionType] = None) -> str: + """ + Returns value as str type. + """ + return str(value) + + @staticmethod + def _bit_to_python(value: bytes, dsc: Optional[DescriptionType] = None) -> int: + """Returns BIT columntype as integer""" + int_val = value + if len(int_val) < 8: + int_val = b"\x00" * (8 - len(int_val)) + int_val + return int(struct.unpack(">Q", int_val)[0]) + + @staticmethod + def _date_to_python( + value: bytes, dsc: Optional[DescriptionType] = None + ) -> Optional[datetime.date]: + """Converts TIME column MySQL to a python datetime.datetime type. + + Raises ValueError if the value can not be converted. + + Returns DATE column type as datetime.date type. + """ + if isinstance(value, datetime.date): + return value + try: + parts = value.split(b"-") + if len(parts) != 3: + raise ValueError(f"invalid datetime format: {parts} len: {len(parts)}") + try: + return datetime.date(int(parts[0]), int(parts[1]), int(parts[2])) + except ValueError: + return None + except (IndexError, ValueError): + raise ValueError( + f"Could not convert {repr(value)} to python datetime.timedelta" + ) from None + + _NEWDATE_to_python = _date_to_python + + @staticmethod + def _time_to_python( + value: bytes, dsc: Optional[DescriptionType] = None + ) -> datetime.timedelta: + """Converts TIME column value to python datetime.time value type. + + Converts the TIME column MySQL type passed as bytes to a python + datetime.datetime type. + + Raises ValueError if the value can not be converted. + + Returns datetime.timedelta type. + """ + mcs: Optional[Union[int, bytes]] = None + try: + (hms, mcs) = value.split(b".") + mcs = int(mcs.ljust(6, b"0")) + except (TypeError, ValueError): + hms = value + mcs = 0 + try: + (hours, mins, secs) = [int(d) for d in hms.split(b":")] + if value[0] == 45 or value[0] == "-": + mins, secs, mcs = ( + -mins, + -secs, + -mcs, # pylint: disable=invalid-unary-operand-type + ) + return datetime.timedelta( + hours=hours, minutes=mins, seconds=secs, microseconds=mcs + ) + except (IndexError, TypeError, ValueError): + raise ValueError( + CONVERT_ERROR.format(value=value, pytype="datetime.timedelta") + ) from None + + @staticmethod + def _datetime_to_python( + value: bytes, dsc: Optional[DescriptionType] = None + ) -> Optional[datetime.datetime]: + """Converts DATETIME column value to python datetime.time value type. + + Converts the DATETIME column MySQL type passed as bytes to a python + datetime.datetime type. + + Returns: datetime.datetime type. + """ + if isinstance(value, datetime.datetime): + return value + datetime_val = None + mcs: Optional[Union[int, bytes]] = None + try: + (date_, time_) = value.split(b" ") + if len(time_) > 8: + (hms, mcs) = time_.split(b".") + mcs = int(mcs.ljust(6, b"0")) + else: + hms = time_ + mcs = 0 + dtval = ( + [int(i) for i in date_.split(b"-")] + + [int(i) for i in hms.split(b":")] + + [ + mcs, + ] + ) + if len(dtval) < 6: + raise ValueError(f"invalid datetime format: {dtval} len: {len(dtval)}") + # Note that by default MySQL accepts invalid timestamps + # (this is also backward compatibility). + # Traditionaly C/py returns None for this well formed but + # invalid datetime for python like '0000-00-00 HH:MM:SS'. + try: + datetime_val = datetime.datetime(*dtval) # type: ignore[arg-type] + except ValueError: + return None + except (IndexError, TypeError): + raise ValueError( + CONVERT_ERROR.format(value=value, pytype="datetime.timedelta") + ) from None + + return datetime_val + + _timestamp_to_python = _datetime_to_python + + @staticmethod + def _year_to_python(value: bytes, dsc: Optional[DescriptionType] = None) -> int: + """Returns YEAR column type as integer""" + try: + year = int(value) + except ValueError as err: + raise ValueError(f"Failed converting YEAR to int ({repr(value)})") from err + + return year + + def _set_to_python( + self, value: bytes, dsc: Optional[DescriptionType] = None + ) -> Set[str]: + """Returns SET column type as set + + Actually, MySQL protocol sees a SET as a string type field. So this + code isn't called directly, but used by STRING_to_python() method. + + Returns SET column type as a set. + """ + set_type = None + val = value.decode(self.charset) + if not val: + return set() + try: + set_type = set(val.split(",")) + except ValueError as err: + raise ValueError( + f"Could not convert set {repr(value)} to a sequence" + ) from err + return set_type + + def _string_to_python( + self, value: bytes, dsc: Optional[DescriptionType] = None + ) -> Union[StrOrBytes, Set[str]]: + """ + Note that a SET is a string too, but using the FieldFlag we can see + whether we have to split it. + + Returns string typed columns as string type. + """ + if self.charset == "binary": + return value + if dsc is not None: + if dsc[1] == FieldType.JSON and self.use_unicode: + return value.decode(self.charset) + if dsc[7] & FieldFlag.SET: + return self._set_to_python(value, dsc) + # 'binary' charset + if dsc[8] == 63: + return value + if isinstance(value, (bytes, bytearray)) and self.use_unicode: + try: + return value.decode(self.charset) + except UnicodeDecodeError: + return value + + return value + + _var_string_to_python = _string_to_python + _json_to_python = _string_to_python + + def _blob_to_python( + self, value: bytes, dsc: Optional[DescriptionType] = None + ) -> Union[StrOrBytes, Set[str]]: + """Convert BLOB data type to Python.""" + if dsc is not None: + if ( + dsc[7] & FieldFlag.BLOB + and dsc[7] & FieldFlag.BINARY + # 'binary' charset + and dsc[8] == 63 + ): + return bytes(value) + return self._string_to_python(value, dsc) + + _long_blob_to_python = _blob_to_python + _medium_blob_to_python = _blob_to_python + _tiny_blob_to_python = _blob_to_python + # pylint: enable=unused-argument diff --git a/mysql-sqlalchemy-workspace/lib/python3.9/site-packages/mysql/connector/cursor.py b/mysql-sqlalchemy-workspace/lib/python3.9/site-packages/mysql/connector/cursor.py new file mode 100644 index 00000000..3a5b320d --- /dev/null +++ b/mysql-sqlalchemy-workspace/lib/python3.9/site-packages/mysql/connector/cursor.py @@ -0,0 +1,1682 @@ +# Copyright (c) 2009, 2022, Oracle and/or its affiliates. +# +# This program is free software; you can redistribute it and/or modify +# it under the terms of the GNU General Public License, version 2.0, as +# published by the Free Software Foundation. +# +# This program is also distributed with certain software (including +# but not limited to OpenSSL) that is licensed under separate terms, +# as designated in a particular file or component or in included license +# documentation. The authors of MySQL hereby grant you an +# additional permission to link the program and your derivative works +# with the separately licensed software that they have included with +# MySQL. +# +# Without limiting anything contained in the foregoing, this file, +# which is part of MySQL Connector/Python, is also subject to the +# Universal FOSS Exception, version 1.0, a copy of which can be found at +# http://oss.oracle.com/licenses/universal-foss-exception. +# +# This program is distributed in the hope that it will be useful, but +# WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. +# See the GNU General Public License, version 2.0, for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program; if not, write to the Free Software Foundation, Inc., +# 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA + +# mypy: disable-error-code="assignment,arg-type,attr-defined,index,override,call-overload" + +"""Cursor classes.""" +from __future__ import annotations + +import re +import warnings +import weakref + +from collections import namedtuple +from decimal import Decimal +from typing import ( + Any, + Dict, + Generator, + Iterator, + List, + NoReturn, + Optional, + Sequence, + Tuple, + Type, + Union, +) +from weakref import CallableProxyType + +from .abstracts import NAMED_TUPLE_CACHE, MySQLConnectionAbstract, MySQLCursorAbstract +from .constants import ServerFlag +from .errors import ( + Error, + InterfaceError, + NotSupportedError, + ProgrammingError, + get_mysql_exception, +) +from .types import ( + DescriptionType, + EofPacketType, + ParamsDictType, + ParamsSequenceOrDictType, + ParamsSequenceType, + ResultType, + RowType, + StrOrBytes, + ToPythonOutputTypes, + WarningType, +) + +SQL_COMMENT = r"\/\*.*?\*\/" +RE_SQL_COMMENT = re.compile( + rf"""({SQL_COMMENT})|(["'`][^"'`]*?({SQL_COMMENT})[^"'`]*?["'`])""", + re.I | re.M | re.S, +) +RE_SQL_ON_DUPLICATE = re.compile( + r"""\s*ON\s+DUPLICATE\s+KEY(?:[^"'`]*["'`][^"'`]*["'`])*[^"'`]*$""", + re.I | re.M | re.S, +) +RE_SQL_INSERT_STMT = re.compile( + rf"({SQL_COMMENT}|\s)*INSERT({SQL_COMMENT}|\s)" + r"*(?:IGNORE\s+)?INTO\s+[`'\"]?.+[`'\"]?(?:\.[`'\"]?.+[`'\"]?)" + r"{0,2}\s+VALUES\s*\(.+(?:\s*,.+)*\)", + re.I | re.M | re.S, +) +RE_SQL_INSERT_VALUES = re.compile(r".*VALUES\s*(\(.*\)).*", re.I | re.M | re.S) +RE_PY_PARAM = re.compile(b"(%s)") +RE_PY_MAPPING_PARAM = re.compile( + rb""" + % + \((?P[^)]+)\) + (?P[diouxXeEfFgGcrs%]) + """, + re.X, +) +RE_SQL_SPLIT_STMTS = re.compile(b""";(?=(?:[^"'`]*["'`].*["'`])*[^"'`]*$)""") +RE_SQL_FIND_PARAM = re.compile(b"""%s(?=(?:[^"'`]*["'`][^"'`]*["'`])*[^"'`]*$)""") +RE_SQL_PYTHON_REPLACE_PARAM = re.compile(r"%\(.*?\)s") +RE_SQL_PYTHON_CAPTURE_PARAM_NAME = re.compile(r"%\((.*?)\)s") + +ERR_NO_RESULT_TO_FETCH = "No result set to fetch from" + +MAX_RESULTS = 4294967295 + + +class _ParamSubstitutor: + """ + Substitutes parameters into SQL statement. + """ + + def __init__(self, params: Sequence[bytes]) -> None: + self.params: Sequence[bytes] = params + self.index: int = 0 + + def __call__(self, matchobj: re.Match) -> bytes: + index = self.index + self.index += 1 + try: + return bytes(self.params[index]) + except IndexError: + raise ProgrammingError( + "Not enough parameters for the SQL statement" + ) from None + + @property + def remaining(self) -> int: + """Returns number of parameters remaining to be substituted""" + return len(self.params) - self.index + + +def _bytestr_format_dict(bytestr: bytes, value_dict: Dict[bytes, bytes]) -> bytes: + """ + >>> _bytestr_format_dict(b'%(a)s', {b'a': b'foobar'}) + b'foobar + >>> _bytestr_format_dict(b'%%(a)s', {b'a': b'foobar'}) + b'%%(a)s' + >>> _bytestr_format_dict(b'%%%(a)s', {b'a': b'foobar'}) + b'%%foobar' + >>> _bytestr_format_dict(b'%(x)s %(y)s', + ... {b'x': b'x=%(y)s', b'y': b'y=%(x)s'}) + b'x=%(y)s y=%(x)s' + """ + + def replace(matchobj: re.Match) -> bytes: + """Replace pattern.""" + value: Optional[bytes] = None + groups = matchobj.groupdict() + if groups["conversion_type"] == b"%": + value = b"%" + if groups["conversion_type"] == b"s": + key = groups["mapping_key"] + value = value_dict[key] + if value is None: + raise ValueError( + f"Unsupported conversion_type: {groups['conversion_type']}" + ) + return value + + stmt = RE_PY_MAPPING_PARAM.sub(replace, bytestr) + return stmt + + +class CursorBase(MySQLCursorAbstract): + """ + Base for defining MySQLCursor. This class is a skeleton and defines + methods and members as required for the Python Database API + Specification v2.0. + + It's better to inherite from MySQLCursor. + """ + + _raw: bool = False + + def __init__(self) -> None: + self._description: Optional[List[DescriptionType]] = None + self._rowcount: int = -1 + self.arraysize: int = 1 + super().__init__() + + def callproc(self, procname: str, args: Sequence[Any] = ()) -> Any: + """Calls a stored procedue with the given arguments + + The arguments will be set during this session, meaning + they will be called like ___arg where + is an enumeration (+1) of the arguments. + + Coding Example: + 1) Definining the Stored Routine in MySQL: + CREATE PROCEDURE multiply(IN pFac1 INT, IN pFac2 INT, OUT pProd INT) + BEGIN + SET pProd := pFac1 * pFac2; + END + + 2) Executing in Python: + args = (5,5,0) # 0 is to hold pprod + cursor.callproc('multiply', args) + print(cursor.fetchone()) + + Does not return a value, but a result set will be + available when the CALL-statement execute successfully. + Raises exceptions when something is wrong. + """ + + def close(self) -> Any: + """Close the cursor.""" + + def execute( + self, + operation: Any, + params: Union[Sequence[Any], Dict[str, Any]] = (), + multi: bool = False, + ) -> Any: + """Executes the given operation + + Executes the given operation substituting any markers with + the given parameters. + + For example, getting all rows where id is 5: + cursor.execute("SELECT * FROM t1 WHERE id = %s", (5,)) + + The multi argument should be set to True when executing multiple + statements in one operation. If not set and multiple results are + found, an InterfaceError will be raised. + + If warnings where generated, and connection.get_warnings is True, then + self._warnings will be a list containing these warnings. + + Returns an iterator when multi is True, otherwise None. + """ + + def executemany( + self, operation: Any, seq_params: Sequence[Union[Sequence[Any], Dict[str, Any]]] + ) -> Any: + """Execute the given operation multiple times + + The executemany() method will execute the operation iterating + over the list of parameters in seq_params. + + Example: Inserting 3 new employees and their phone number + + data = [ + ('Jane','555-001'), + ('Joe', '555-001'), + ('John', '555-003') + ] + stmt = "INSERT INTO employees (name, phone) VALUES ('%s','%s')" + cursor.executemany(stmt, data) + + INSERT statements are optimized by batching the data, that is + using the MySQL multiple rows syntax. + + Results are discarded. If they are needed, consider looping over + data using the execute() method. + """ + + def fetchone(self) -> Optional[Sequence[Any]]: + """Return next row of a query result set. + + Returns: + tuple or None: A row from query result set. + """ + + def fetchmany(self, size: int = 1) -> List[Sequence[Any]]: + """Return the next set of rows of a query result set. + + When no more rows are available, it returns an empty list. + The number of rows returned can be specified using the size argument, + which defaults to one. + + Returns: + list: The next set of rows of a query result set. + """ + + def fetchall(self) -> List[Sequence[Any]]: + """Return all rows of a query result set. + + Returns: + list: A list of tuples with all rows of a query result set. + """ + + def nextset(self) -> Any: + """Not Implemented.""" + + def setinputsizes(self, sizes: Any) -> Any: + """Not Implemented.""" + + def setoutputsize(self, size: Any, column: Any = None) -> Any: + """Not Implemented.""" + + def reset(self, free: bool = True) -> Any: + """Reset the cursor to default""" + + @property + def description(self) -> Optional[List[DescriptionType]]: + """Returns description of columns in a result + + This property returns a list of tuples describing the columns in + in a result set. A tuple is described as follows:: + + (column_name, + type, + None, + None, + None, + None, + null_ok, + column_flags) # Addition to PEP-249 specs + + Returns a list of tuples. + """ + return self._description + + @property + def rowcount(self) -> int: + """Returns the number of rows produced or affected + + This property returns the number of rows produced by queries + such as a SELECT, or affected rows when executing DML statements + like INSERT or UPDATE. + + Note that for non-buffered cursors it is impossible to know the + number of rows produced before having fetched them all. For those, + the number of rows will be -1 right after execution, and + incremented when fetching rows. + + Returns an integer. + """ + return self._rowcount + + +class MySQLCursor(CursorBase): + """Default cursor for interacting with MySQL + + This cursor will execute statements and handle the result. It will + not automatically fetch all rows. + + MySQLCursor should be inherited whenever other functionallity is + required. An example would to change the fetch* member functions + to return dictionaries instead of lists of values. + + Implements the Python Database API Specification v2.0 (PEP-249) + """ + + def __init__( + self, connection: Optional[Type[MySQLConnectionAbstract]] = None + ) -> None: + CursorBase.__init__(self) + self._connection: CallableProxyType[Type[MySQLConnectionAbstract]] = None + self._nextrow: Tuple[Optional[RowType], Optional[EofPacketType]] = ( + None, + None, + ) + self._binary: bool = False + + if connection is not None: + self._set_connection(connection) + + def __iter__(self) -> Iterator[RowType]: + """ + Iteration over the result set which calls self.fetchone() + and returns the next row. + """ + return iter(self.fetchone, None) + + def _set_connection(self, connection: Type[MySQLConnectionAbstract]) -> None: + """Set the connection""" + try: + self._connection = weakref.proxy(connection) + self._connection.is_connected() + except (AttributeError, TypeError): + raise InterfaceError(errno=2048) from None + + def _reset_result(self) -> None: + """Reset the cursor to default""" + self._rowcount: int = -1 + self._nextrow = (None, None) + self._stored_results: List[MySQLCursor] = [] + self._warnings: Optional[List[WarningType]] = None + self._warning_count: int = 0 + self._description: Optional[List[DescriptionType]] = None + self._executed: Optional[StrOrBytes] = None + self._executed_list: List[StrOrBytes] = [] + self.reset() + + def _have_unread_result(self) -> bool: + """Check whether there is an unread result""" + try: + return self._connection.unread_result + except AttributeError: + return False + + def _check_executed(self) -> None: + """Check if the statement has been executed. + + Raises an error if the statement has not been executed. + """ + if self._executed is None: + raise InterfaceError(ERR_NO_RESULT_TO_FETCH) + + def __next__(self) -> RowType: + """ + Used for iterating over the result set. Calles self.fetchone() + to get the next row. + """ + try: + row = self.fetchone() + except InterfaceError: + raise StopIteration from None + if not row: + raise StopIteration + return row + + def close(self) -> bool: + """Close the cursor + + Returns True when successful, otherwise False. + """ + if self._connection is None: + return False + + self._connection.handle_unread_result() + self._reset_result() + self._connection = None + + return True + + def _process_params_dict( + self, params: ParamsDictType + ) -> Dict[bytes, Union[bytes, Decimal]]: + """Process query parameters given as dictionary""" + try: + to_mysql = self._connection.converter.to_mysql + escape = self._connection.converter.escape + quote = self._connection.converter.quote + res: Dict[bytes, Any] = {} + for key, value in params.items(): + conv = value + conv = to_mysql(conv) + conv = escape(conv) + if not isinstance(value, Decimal): + conv = quote(conv) + res[key.encode()] = conv + except Exception as err: + raise ProgrammingError( + f"Failed processing pyformat-parameters; {err}" + ) from err + else: + return res + + def _process_params( + self, params: ParamsSequenceType + ) -> Tuple[Union[bytes, Decimal], ...]: + """Process query parameters.""" + try: + res = params[:] + + to_mysql = self._connection.converter.to_mysql + escape = self._connection.converter.escape + quote = self._connection.converter.quote + + res = [to_mysql(value) for value in res] + res = [escape(value) for value in res] + res = [ + quote(value) if not isinstance(params[i], Decimal) else value + for i, value in enumerate(res) + ] + except Exception as err: + raise ProgrammingError( + f"Failed processing format-parameters; {err}" + ) from err + else: + return tuple(res) + + def _handle_noresultset(self, res: ResultType) -> None: + """Handles result of execute() when there is no result set""" + try: + self._rowcount = res["affected_rows"] + self._last_insert_id = res["insert_id"] + self._warning_count = res["warning_count"] + except (KeyError, TypeError) as err: + raise ProgrammingError(f"Failed handling non-resultset; {err}") from None + + self._handle_warnings() + + def _handle_resultset(self) -> None: + """Handles result set + + This method handles the result set and is called after reading + and storing column information in _handle_result(). For non-buffering + cursors, this method is usually doing nothing. + """ + + def _handle_result(self, result: ResultType) -> None: + """ + Handle the result after a command was send. The result can be either + an OK-packet or a dictionary containing column/eof information. + + Raises InterfaceError when result is not a dict() or result is + invalid. + """ + if not isinstance(result, dict): + raise InterfaceError("Result was not a dict()") + + if "columns" in result: + # Weak test, must be column/eof information + self._description = result["columns"] + self._connection.unread_result = True + self._handle_resultset() + elif "affected_rows" in result: + # Weak test, must be an OK-packet + self._connection.unread_result = False + self._handle_noresultset(result) + else: + raise InterfaceError("Invalid result") + + def _execute_iter( + self, query_iter: Generator[ResultType, None, None] + ) -> Generator[MySQLCursor, None, None]: + """Generator returns MySQLCursor objects for multiple statements + + This method is only used when multiple statements are executed + by the execute() method. It uses zip() to make an iterator from the + given query_iter (result of MySQLConnection.cmd_query_iter()) and + the list of statements that were executed. + """ + executed_list = RE_SQL_SPLIT_STMTS.split(self._executed) + + i = 0 + while True: + try: + result = next(query_iter) + self._reset_result() + self._handle_result(result) + try: + self._executed = executed_list[i].strip() + i += 1 + except IndexError: + self._executed = executed_list[0] + + yield self + except StopIteration: + return + + def execute( + self, + operation: StrOrBytes, + params: Optional[ParamsSequenceOrDictType] = None, + multi: bool = False, + ) -> Optional[Generator[MySQLCursor, None, None]]: + """Executes the given operation + + Executes the given operation substituting any markers with + the given parameters. + + For example, getting all rows where id is 5: + cursor.execute("SELECT * FROM t1 WHERE id = %s", (5,)) + + The multi argument should be set to True when executing multiple + statements in one operation. If not set and multiple results are + found, an InterfaceError will be raised. + + If warnings where generated, and connection.get_warnings is True, then + self._warnings will be a list containing these warnings. + + Returns an iterator when multi is True, otherwise None. + """ + if not operation: + return None + + try: + if not self._connection: + raise ProgrammingError + except (ProgrammingError, ReferenceError) as err: + raise ProgrammingError("Cursor is not connected") from err + + self._connection.handle_unread_result() + + self._reset_result() + stmt: StrOrBytes = "" + + try: + if not isinstance(operation, (bytes, bytearray)): + stmt = operation.encode(self._connection.python_charset) + else: + stmt = operation + except (UnicodeDecodeError, UnicodeEncodeError) as err: + raise ProgrammingError(str(err)) from err + + if params: + if isinstance(params, dict): + stmt = _bytestr_format_dict(stmt, self._process_params_dict(params)) + elif isinstance(params, (list, tuple)): + psub = _ParamSubstitutor(self._process_params(params)) + stmt = RE_PY_PARAM.sub(psub, stmt) + if psub.remaining != 0: + raise ProgrammingError( + "Not all parameters were used in the SQL statement" + ) + else: + raise ProgrammingError( + f"Could not process parameters: {type(params).__name__}({params})," + " it must be of type list, tuple or dict" + ) + + self._executed = stmt + if multi: + self._executed_list = [] + return self._execute_iter(self._connection.cmd_query_iter(stmt)) + + try: + self._handle_result(self._connection.cmd_query(stmt)) + except InterfaceError as err: + if self._connection.have_next_result: + raise InterfaceError( + "Use multi=True when executing multiple statements" + ) from err + raise + return None + + def _batch_insert( + self, operation: str, seq_params: Sequence[ParamsSequenceOrDictType] + ) -> Optional[bytes]: + """Implements multi row insert""" + + def remove_comments(match: re.Match) -> str: + """Remove comments from INSERT statements. + + This function is used while removing comments from INSERT + statements. If the matched string is a comment not enclosed + by quotes, it returns an empty string, else the string itself. + """ + if match.group(1): + return "" + return match.group(2) + + tmp = re.sub( + RE_SQL_ON_DUPLICATE, + "", + re.sub(RE_SQL_COMMENT, remove_comments, operation), + ) + + matches = re.search(RE_SQL_INSERT_VALUES, tmp) + if not matches: + raise InterfaceError( + "Failed rewriting statement for multi-row INSERT. Check SQL syntax" + ) + fmt = matches.group(1).encode(self._connection.python_charset) + values = [] + + try: + stmt = operation.encode(self._connection.python_charset) + for params in seq_params: + tmp = fmt + if isinstance(params, dict): + tmp = _bytestr_format_dict(tmp, self._process_params_dict(params)) + else: + psub = _ParamSubstitutor(self._process_params(params)) + tmp = RE_PY_PARAM.sub(psub, tmp) + if psub.remaining != 0: + raise ProgrammingError( + "Not all parameters were used in the SQL statement" + ) + values.append(tmp) + if fmt in stmt: + stmt = stmt.replace(fmt, b",".join(values), 1) + self._executed = stmt + return stmt + return None + except (UnicodeDecodeError, UnicodeEncodeError) as err: + raise ProgrammingError(str(err)) from err + except Error: + raise + except Exception as err: + raise InterfaceError(f"Failed executing the operation; {err}") from None + + def executemany( + self, operation: str, seq_params: Sequence[ParamsSequenceOrDictType] + ) -> Optional[Generator[MySQLCursor, None, None]]: + """Execute the given operation multiple times + + The executemany() method will execute the operation iterating + over the list of parameters in seq_params. + + Example: Inserting 3 new employees and their phone number + + data = [ + ('Jane','555-001'), + ('Joe', '555-001'), + ('John', '555-003') + ] + stmt = "INSERT INTO employees (name, phone) VALUES ('%s','%s)" + cursor.executemany(stmt, data) + + INSERT statements are optimized by batching the data, that is + using the MySQL multiple rows syntax. + + Results are discarded. If they are needed, consider looping over + data using the execute() method. + """ + if not operation or not seq_params: + return None + self._connection.handle_unread_result() + + try: + _ = iter(seq_params) + except TypeError as err: + raise ProgrammingError("Parameters for query must be an Iterable") from err + + # Optimize INSERTs by batching them + if re.match(RE_SQL_INSERT_STMT, operation): + if not seq_params: + self._rowcount = 0 + return None + stmt = self._batch_insert(operation, seq_params) + if stmt is not None: + self._executed = stmt + return self.execute(stmt) + + rowcnt = 0 + try: + for params in seq_params: + self.execute(operation, params) + if self.with_rows and self._have_unread_result(): + self.fetchall() + rowcnt += self._rowcount + except (ValueError, TypeError) as err: + raise InterfaceError(f"Failed executing the operation; {err}") from None + self._rowcount = rowcnt + return None + + def stored_results(self) -> Iterator[MySQLCursor]: + """Returns an iterator for stored results + + This method returns an iterator over results which are stored when + callproc() is called. The iterator will provide MySQLCursorBuffered + instances. + + Returns a iterator. + """ + return iter(self._stored_results) + + def callproc( + self, + procname: str, + args: Sequence[Any] = (), + ) -> Optional[Union[Dict[str, ToPythonOutputTypes], RowType]]: + """Calls a stored procedure with the given arguments + + The arguments will be set during this session, meaning + they will be called like ___arg where + is an enumeration (+1) of the arguments. + + Coding Example: + 1) Defining the Stored Routine in MySQL: + CREATE PROCEDURE multiply(IN pFac1 INT, IN pFac2 INT, OUT pProd INT) + BEGIN + SET pProd := pFac1 * pFac2; + END + + 2) Executing in Python: + args = (5, 5, 0) # 0 is to hold pprod + cursor.callproc('multiply', args) + print(cursor.fetchone()) + + For OUT and INOUT parameters the user should provide the + type of the parameter as well. The argument should be a + tuple with first item as the value of the parameter to pass + and second argument the type of the argument. + + In the above example, one can call callproc method like: + args = (5, 5, (0, 'INT')) + cursor.callproc('multiply', args) + + The type of the argument given in the tuple will be used by + the MySQL CAST function to convert the values in the corresponding + MySQL type (See CAST in MySQL Reference for more information) + + Does not return a value, but a result set will be + available when the CALL-statement execute successfully. + Raises exceptions when something is wrong. + """ + if not procname or not isinstance(procname, str): + raise ValueError("procname must be a string") + + if not isinstance(args, (tuple, list)): + raise ValueError("args must be a sequence") + + argfmt = "@_{name}_arg{index}" + self._stored_results = [] + + results = [] + try: + argnames = [] + argtypes = [] + + # MySQL itself does support calling procedures with their full + # name .. It's necessary to split + # by '.' and grab the procedure name from procname. + procname_abs = procname.split(".")[-1] + if args: + argvalues = [] + for idx, arg in enumerate(args): + argname = argfmt.format(name=procname_abs, index=idx + 1) + argnames.append(argname) + if isinstance(arg, tuple): + argtypes.append(f" CAST({argname} AS {arg[1]})") + argvalues.append(arg[0]) + else: + argtypes.append(argname) + argvalues.append(arg) + + placeholders = ",".join(f"{arg}=%s" for arg in argnames) + self.execute(f"SET {placeholders}", argvalues) + + call = f"CALL {procname}({','.join(argnames)})" + + # We disable consuming results temporary to make sure we + # getting all results + can_consume_results = self._connection.can_consume_results + for result in self._connection.cmd_query_iter(call): + self._connection.can_consume_results = False + if isinstance(self, (MySQLCursorDict, MySQLCursorBufferedDict)): + cursor_class = MySQLCursorBufferedDict + elif isinstance( + self, + (MySQLCursorNamedTuple, MySQLCursorBufferedNamedTuple), + ): + cursor_class = MySQLCursorBufferedNamedTuple + elif self._raw: + cursor_class = MySQLCursorBufferedRaw + else: + cursor_class = MySQLCursorBuffered + # pylint: disable=protected-access + cur = cursor_class(self._connection.get_self()) + cur._executed = f"(a result of {call})" + cur._handle_result(result) + # pylint: enable=protected-access + if cur.warnings is not None: + self._warnings = cur.warnings + if "columns" in result: + results.append(cur) + self._connection.can_consume_results = can_consume_results + + if argnames: + # Create names aliases to be compatible with namedtuples + args = [ + f"{name} AS {alias}" + for name, alias in zip( + argtypes, [arg.lstrip("@_") for arg in argnames] + ) + ] + select = f"SELECT {','.join(args)}" + self.execute(select) + self._stored_results = results + return self.fetchone() + + self._stored_results = results + return tuple() + + except Error: + raise + except Exception as err: + raise InterfaceError(f"Failed calling stored routine; {err}") from None + + def getlastrowid(self) -> Optional[int]: + """Returns the value generated for an AUTO_INCREMENT column + + Returns the value generated for an AUTO_INCREMENT column by + the previous INSERT or UPDATE statement. + + Returns a long value or None. + """ + return self._last_insert_id + + def _fetch_warnings(self) -> Optional[List[WarningType]]: + """ + Fetch warnings doing a SHOW WARNINGS. Can be called after getting + the result. + + Returns a result set or None when there were no warnings. + """ + res = [] + try: + cur = self._connection.cursor(raw=False) + cur.execute("SHOW WARNINGS") + res = cur.fetchall() + cur.close() + except Exception as err: + raise InterfaceError(f"Failed getting warnings; {err}") from None + + if res: + return res + + return None + + def _handle_warnings(self) -> None: + """Handle possible warnings after all results are consumed. + + Raises: + Error: Also raises exceptions if raise_on_warnings is set. + """ + if self._connection.get_warnings and self._warning_count: + self._warnings = self._fetch_warnings() + + if not self._warnings: + return + + err = get_mysql_exception( + self._warnings[0][1], + self._warnings[0][2], + warning=not self._connection.raise_on_warnings, + ) + + if self._connection.raise_on_warnings: + raise err + + warnings.warn(err, stacklevel=4) + + def _handle_eof(self, eof: EofPacketType) -> None: + """Handle EOF packet""" + self._connection.unread_result = False + self._nextrow = (None, None) + self._warning_count = eof["warning_count"] + self._handle_warnings() + + def _fetch_row(self, raw: bool = False) -> Optional[RowType]: + """Returns the next row in the result set + + Returns a tuple or None. + """ + if not self._have_unread_result(): + return None + row = None + + if self._nextrow == (None, None): + (row, eof) = self._connection.get_row( + binary=self._binary, columns=self.description, raw=raw + ) + else: + (row, eof) = self._nextrow + + if row: + self._nextrow = self._connection.get_row( + binary=self._binary, columns=self.description, raw=raw + ) + eof = self._nextrow[1] + if eof is not None: + self._handle_eof(eof) + if self._rowcount == -1: + self._rowcount = 1 + else: + self._rowcount += 1 + if eof: + self._handle_eof(eof) + + return row + + def fetchone(self) -> Optional[RowType]: + """Return next row of a query result set. + + Returns: + tuple or None: A row from query result set. + """ + self._check_executed() + return self._fetch_row() + + def fetchmany(self, size: Optional[int] = None) -> List[RowType]: + """Return the next set of rows of a query result set. + + When no more rows are available, it returns an empty list. + The number of rows returned can be specified using the size argument, + which defaults to one. + + Returns: + list: The next set of rows of a query result set. + """ + self._check_executed() + res = [] + cnt = size or self.arraysize + while cnt > 0 and self._have_unread_result(): + cnt -= 1 + row = self.fetchone() + if row: + res.append(row) + return res + + def fetchall(self) -> List[RowType]: + """Return all rows of a query result set. + + Returns: + list: A list of tuples with all rows of a query result set. + """ + self._check_executed() + if not self._have_unread_result(): + return [] + + (rows, eof) = self._connection.get_rows() + if self._nextrow[0]: + rows.insert(0, self._nextrow[0]) + + self._handle_eof(eof) + rowcount = len(rows) + if rowcount >= 0 and self._rowcount == -1: + self._rowcount = 0 + self._rowcount += rowcount + return rows + + @property + def column_names(self) -> Tuple[str, ...]: + """Returns column names + + This property returns the columns names as a tuple. + + Returns a tuple. + """ + if not self.description: + return tuple() + return tuple(d[0] for d in self.description) + + @property + def statement(self) -> Optional[str]: + """Returns the executed statement + + This property returns the executed statement. When multiple + statements were executed, the current statement in the iterator + will be returned. + """ + if self._executed is None: + return None + try: + return self._executed.strip().decode("utf-8") # type: ignore[union-attr] + except (AttributeError, UnicodeDecodeError): + return self._executed.strip() # type: ignore[return-value] + + @property + def with_rows(self) -> bool: + """Returns whether the cursor could have rows returned + + This property returns True when column descriptions are available + and possibly also rows, which will need to be fetched. + + Returns True or False. + """ + if not self.description: + return False + return True + + def __str__(self) -> str: + fmt = "{class_name}: {stmt}" + if self._executed: + try: + executed = self._executed.decode("utf-8") # type: ignore[union-attr] + except AttributeError: + executed = self._executed + if len(executed) > 40: + executed = executed[:40] + ".." + else: + executed = "(Nothing executed yet)" + return fmt.format(class_name=self.__class__.__name__, stmt=executed) + + +class MySQLCursorBuffered(MySQLCursor): + """Cursor which fetches rows within execute()""" + + def __init__( + self, connection: Optional[Type[MySQLConnectionAbstract]] = None + ) -> None: + super().__init__(connection) + self._rows: Optional[List[RowType]] = None + self._next_row: int = 0 + + def _handle_resultset(self) -> None: + (self._rows, eof) = self._connection.get_rows() + self._rowcount = len(self._rows) + self._handle_eof(eof) + self._next_row = 0 + try: + self._connection.unread_result = False + except AttributeError: + pass + + def reset(self, free: bool = True) -> None: + self._rows = None + + def _fetch_row(self, raw: bool = False) -> Optional[RowType]: + row = None + try: + row = self._rows[self._next_row] + except (IndexError, TypeError): + return None + else: + self._next_row += 1 + return row + return None + + def fetchone(self) -> Optional[RowType]: + """Return next row of a query result set. + + Returns: + tuple or None: A row from query result set. + """ + self._check_executed() + return self._fetch_row() + + def fetchall(self) -> List[RowType]: + """Return all rows of a query result set. + + Returns: + list: A list of tuples with all rows of a query result set. + """ + if self._executed is None or self._rows is None: + raise InterfaceError(ERR_NO_RESULT_TO_FETCH) + res = [] + res = self._rows[self._next_row :] + self._next_row = len(self._rows) + return res + + def fetchmany(self, size: Optional[int] = None) -> List[RowType]: + """Return the next set of rows of a query result set. + + When no more rows are available, it returns an empty list. + The number of rows returned can be specified using the size argument, + which defaults to one. + + Returns: + list: The next set of rows of a query result set. + """ + self._check_executed() + res = [] + cnt = size or self.arraysize + while cnt > 0: + cnt -= 1 + row = self.fetchone() + if row: + res.append(row) + + return res + + @property + def with_rows(self) -> bool: + return self._rows is not None + + +class MySQLCursorRaw(MySQLCursor): + """ + Skips conversion from MySQL datatypes to Python types when fetching rows. + """ + + _raw: bool = True + + def fetchone(self) -> Optional[RowType]: + """Return next row of a query result set. + + Returns: + tuple or None: A row from query result set. + """ + self._check_executed() + return self._fetch_row(raw=True) + + def fetchall(self) -> List[RowType]: + """Return all rows of a query result set. + + Returns: + list: A list of tuples with all rows of a query result set. + """ + self._check_executed() + if not self._have_unread_result(): + return [] + (rows, eof) = self._connection.get_rows(raw=True) + if self._nextrow[0]: + rows.insert(0, self._nextrow[0]) + self._handle_eof(eof) + rowcount = len(rows) + if rowcount >= 0 and self._rowcount == -1: + self._rowcount = 0 + self._rowcount += rowcount + return rows + + +class MySQLCursorBufferedRaw(MySQLCursorBuffered): + """ + Cursor which skips conversion from MySQL datatypes to Python types when + fetching rows and fetches rows within execute(). + """ + + _raw: bool = True + + def _handle_resultset(self) -> None: + (self._rows, eof) = self._connection.get_rows(raw=self._raw) + self._rowcount = len(self._rows) + self._handle_eof(eof) + self._next_row = 0 + try: + self._connection.unread_result = False + except AttributeError: + pass + + def fetchone(self) -> Optional[RowType]: + """Return next row of a query result set. + + Returns: + tuple or None: A row from query result set. + """ + self._check_executed() + return self._fetch_row() + + def fetchall(self) -> List[RowType]: + """Return all rows of a query result set. + + Returns: + list: A list of tuples with all rows of a query result set. + """ + self._check_executed() + return list(self._rows[self._next_row :]) + + @property + def with_rows(self) -> bool: + return self._rows is not None + + +class MySQLCursorPrepared(MySQLCursor): + """Cursor using MySQL Prepared Statements""" + + def __init__(self, connection: Optional[Type[MySQLConnectionAbstract]] = None): + super().__init__(connection) + self._rows: Optional[List[RowType]] = None + self._next_row: int = 0 + self._prepared: Optional[Dict[str, Union[int, List[DescriptionType]]]] = None + self._binary: bool = True + self._have_result: Optional[bool] = None + self._last_row_sent: bool = False + self._cursor_exists: bool = False + + def reset(self, free: bool = True) -> None: + if self._prepared: + try: + self._connection.cmd_stmt_close(self._prepared["statement_id"]) + except Error: + # We tried to deallocate, but it's OK when we fail. + pass + self._prepared = None + self._last_row_sent = False + self._cursor_exists = False + + def _handle_noresultset(self, res: ResultType) -> None: + self._handle_server_status(res.get("status_flag", res.get("server_status", 0))) + super()._handle_noresultset(res) + + def _handle_server_status(self, flags: int) -> None: + """Check for SERVER_STATUS_CURSOR_EXISTS and + SERVER_STATUS_LAST_ROW_SENT flags set by the server. + """ + self._cursor_exists = flags & ServerFlag.STATUS_CURSOR_EXISTS != 0 + self._last_row_sent = flags & ServerFlag.STATUS_LAST_ROW_SENT != 0 + + def _handle_eof(self, eof: EofPacketType) -> None: + self._handle_server_status(eof.get("status_flag", eof.get("server_status", 0))) + super()._handle_eof(eof) + + def callproc(self, procname: Any, args: Any = ()) -> NoReturn: + """Calls a stored procedue + + Not supported with MySQLCursorPrepared. + """ + raise NotSupportedError() + + def close(self) -> None: + """Close the cursor + + This method will try to deallocate the prepared statement and close + the cursor. + """ + self.reset() + super().close() + + def _row_to_python(self, rowdata: Any, desc: Any = None) -> Any: + """Convert row data from MySQL to Python types + + The conversion is done while reading binary data in the + protocol module. + """ + + def _handle_result(self, result: ResultType) -> None: + """Handle result after execution""" + if isinstance(result, dict): + self._connection.unread_result = False + self._have_result = False + self._handle_noresultset(result) + else: + self._description = result[1] + self._connection.unread_result = True + self._have_result = True + + if "status_flag" in result[2]: # type: ignore[operator] + self._handle_server_status(result[2]["status_flag"]) + elif "server_status" in result[2]: # type: ignore[operator] + self._handle_server_status(result[2]["server_status"]) + + def execute( + self, + operation: StrOrBytes, + params: Optional[ParamsSequenceOrDictType] = None, + multi: bool = False, + ) -> None: # multi is unused + """Prepare and execute a MySQL Prepared Statement + + This method will prepare the given operation and execute it using + the optionally given parameters. + + If the cursor instance already had a prepared statement, it is + first closed. + + Note: argument "multi" is unused. + """ + charset = self._connection.charset + if charset == "utf8mb4": + charset = "utf8" + + if not isinstance(operation, str): + try: + operation = operation.decode(charset) + except UnicodeDecodeError as err: + raise ProgrammingError(str(err)) from err + + if isinstance(params, dict): + replacement_keys = re.findall(RE_SQL_PYTHON_CAPTURE_PARAM_NAME, operation) + try: + # Replace params dict with params tuple in correct order. + params = tuple(params[key] for key in replacement_keys) + except KeyError as err: + raise ProgrammingError( + "Not all placeholders were found in the parameters dict" + ) from err + # Convert %(name)s to ? before sending it to MySQL + operation = re.sub(RE_SQL_PYTHON_REPLACE_PARAM, "?", operation) + + if operation is not self._executed: + if self._prepared: + self._connection.cmd_stmt_close(self._prepared["statement_id"]) + self._executed = operation + + try: + operation = operation.encode(charset) + except UnicodeEncodeError as err: + raise ProgrammingError(str(err)) from err + + if b"%s" in operation: + # Convert %s to ? before sending it to MySQL + operation = re.sub(RE_SQL_FIND_PARAM, b"?", operation) + + try: + self._prepared = self._connection.cmd_stmt_prepare(operation) + except Error: + self._executed = None + raise + + self._connection.cmd_stmt_reset(self._prepared["statement_id"]) + + if self._prepared["parameters"] and not params: + return + if params: + if not isinstance(params, (tuple, list)): + raise ProgrammingError( + errno=1210, + msg=f"Incorrect type of argument: {type(params).__name__}({params})" + ", it must be of type tuple or list the argument given to " + "the prepared statement", + ) + if len(self._prepared["parameters"]) != len(params): + raise ProgrammingError( + errno=1210, + msg="Incorrect number of arguments executing prepared statement", + ) + + if params is None: + params = () + res = self._connection.cmd_stmt_execute( + self._prepared["statement_id"], + data=params, + parameters=self._prepared["parameters"], + ) + self._handle_result(res) + + def executemany( + self, + operation: str, + seq_params: Sequence[ParamsSequenceType], + ) -> None: + """Prepare and execute a MySQL Prepared Statement many times + + This method will prepare the given operation and execute with each + tuple found the list seq_params. + + If the cursor instance already had a prepared statement, it is + first closed. + + executemany() simply calls execute(). + """ + rowcnt = 0 + try: + for params in seq_params: + self.execute(operation, params) + if self.with_rows and self._have_unread_result(): + self.fetchall() + rowcnt += self._rowcount + except (ValueError, TypeError) as err: + raise InterfaceError(f"Failed executing the operation; {err}") from None + self._rowcount = rowcnt + + def fetchone(self) -> Optional[RowType]: + """Return next row of a query result set. + + Returns: + tuple or None: A row from query result set. + """ + self._check_executed() + if self._cursor_exists: + self._connection.cmd_stmt_fetch(self._prepared["statement_id"]) + return self._fetch_row() or None + + def fetchmany(self, size: Optional[int] = None) -> List[RowType]: + """Return the next set of rows of a query result set. + + When no more rows are available, it returns an empty list. + The number of rows returned can be specified using the size argument, + which defaults to one. + + Returns: + list: The next set of rows of a query result set. + """ + self._check_executed() + res = [] + cnt = size or self.arraysize + while cnt > 0 and self._have_unread_result(): + cnt -= 1 + row = self._fetch_row() + if row: + res.append(row) + return res + + def fetchall(self) -> List[RowType]: + """Return all rows of a query result set. + + Returns: + list: A list of tuples with all rows of a query result set. + """ + self._check_executed() + rows = [] + if self._nextrow[0]: + rows.append(self._nextrow[0]) + while self._have_unread_result(): + if self._cursor_exists: + self._connection.cmd_stmt_fetch( + self._prepared["statement_id"], MAX_RESULTS + ) + (tmp, eof) = self._connection.get_rows( + binary=self._binary, columns=self.description + ) + rows.extend(tmp) + self._handle_eof(eof) + self._rowcount = len(rows) + return rows + + +class MySQLCursorDict(MySQLCursor): + """ + Cursor fetching rows as dictionaries. + + The fetch methods of this class will return dictionaries instead of tuples. + Each row is a dictionary that looks like: + row = { + "col1": value1, + "col2": value2 + } + """ + + def _row_to_python( + self, + rowdata: RowType, + desc: Optional[List[DescriptionType]] = None, # pylint: disable=unused-argument + ) -> Optional[Dict[str, ToPythonOutputTypes]]: + """Convert a MySQL text result row to Python types + + Returns a dictionary. + """ + return dict(zip(self.column_names, rowdata)) if rowdata else None + + def fetchone(self) -> Optional[Dict[str, ToPythonOutputTypes]]: + """Return next row of a query result set. + + Returns: + dict or None: A dict from query result set. + """ + return self._row_to_python(super().fetchone(), self.description) + + def fetchall(self) -> List[Optional[Dict[str, ToPythonOutputTypes]]]: + """Return all rows of a query result set. + + Returns: + list: A list of dictionaries with all rows of a query + result set where column names are used as keys. + """ + return [ + self._row_to_python(row, self.description) + for row in super().fetchall() + if row + ] + + +class MySQLCursorNamedTuple(MySQLCursor): + """ + Cursor fetching rows as named tuple. + + The fetch methods of this class will return namedtuples instead of tuples. + Each row is returned as a namedtuple and the values can be accessed as: + row.col1, row.col2 + """ + + def _row_to_python( + self, + rowdata: RowType, + desc: Optional[List[DescriptionType]] = None, # pylint: disable=unused-argument + ) -> Optional[RowType]: + """Convert a MySQL text result row to Python types + + Returns a named tuple. + """ + row = rowdata + + if row: + columns = tuple(self.column_names) + try: + named_tuple = NAMED_TUPLE_CACHE[columns] + except KeyError: + named_tuple = namedtuple("Row", columns) # type:ignore[no-redef, misc] + NAMED_TUPLE_CACHE[columns] = named_tuple + return named_tuple(*row) + return None + + def fetchone(self) -> Optional[RowType]: + """Return next row of a query result set. + + Returns: + tuple or None: A row from query result set. + """ + self._check_executed() + row = self._fetch_row() + if row: + if hasattr(self._connection, "converter"): + return self._row_to_python(row, self.description) + return row + return None + + def fetchall(self) -> List[Optional[RowType]]: + """Return all rows of a query result set. + + Returns: + list: A list of tuples with all rows of a query result set. + """ + self._check_executed() + if not self._have_unread_result(): + return [] + + (rows, eof) = self._connection.get_rows() + if self._nextrow[0]: + rows.insert(0, self._nextrow[0]) + res = [self._row_to_python(row, self.description) for row in rows] + + self._handle_eof(eof) + rowcount = len(rows) + if rowcount >= 0 and self._rowcount == -1: + self._rowcount = 0 + self._rowcount += rowcount + return res + + +class MySQLCursorBufferedDict(MySQLCursorDict, MySQLCursorBuffered): + """ + Buffered Cursor fetching rows as dictionaries. + """ + + def fetchone(self) -> Optional[Dict[str, ToPythonOutputTypes]]: + """Return next row of a query result set. + + Returns: + tuple or None: A row from query result set. + """ + self._check_executed() + row = self._fetch_row() + if row: + return self._row_to_python(row, self.description) + return None + + def fetchall(self) -> List[Optional[Dict[str, ToPythonOutputTypes]]]: + """Return all rows of a query result set. + + Returns: + list: A list of tuples with all rows of a query result set. + """ + if self._executed is None or self._rows is None: + raise InterfaceError(ERR_NO_RESULT_TO_FETCH) + res = [] + for row in self._rows[self._next_row :]: + res.append(self._row_to_python(row, self.description)) + self._next_row = len(self._rows) + return res + + +class MySQLCursorBufferedNamedTuple(MySQLCursorNamedTuple, MySQLCursorBuffered): + """ + Buffered Cursor fetching rows as named tuple. + """ + + def fetchone(self) -> Optional[RowType]: + """Return next row of a query result set. + + Returns: + tuple or None: A row from query result set. + """ + self._check_executed() + row = self._fetch_row() + if row: + return self._row_to_python(row, self.description) + return None + + def fetchall(self) -> List[Optional[RowType]]: + """Return all rows of a query result set. + + Returns: + list: A list of tuples with all rows of a query result set. + """ + if self._executed is None or self._rows is None: + raise InterfaceError(ERR_NO_RESULT_TO_FETCH) + res = [] + for row in self._rows[self._next_row :]: + res.append(self._row_to_python(row, self.description)) + self._next_row = len(self._rows) + return res + + +class MySQLCursorPreparedDict(MySQLCursorDict, MySQLCursorPrepared): # type: ignore[misc] + """ + This class is a blend of features from MySQLCursorDict and MySQLCursorPrepared + + Multiple inheritance in python is allowed but care must be taken + when assuming methods resolution. In the case of multiple + inheritance, a given attribute is first searched in the current + class if it's not found then it's searched in the parent classes. + The parent classes are searched in a left-right fashion and each + class is searched once. + Based on python's attribute resolution, in this case, attributes + are searched as follows: + 1. MySQLCursorPreparedDict (current class) + 2. MySQLCursorDict (left parent class) + 3. MySQLCursorPrepared (right parent class) + 4. MySQLCursor (base class) + """ + + def fetchmany( + self, size: Optional[int] = None + ) -> List[Dict[str, ToPythonOutputTypes]]: + """Return the next set of rows of a query result set. + + When no more rows are available, it returns an empty list. + The number of rows returned can be specified using the size argument, + which defaults to one. + + Returns: + list: The next set of rows of a query result set represented + as a list of dictionaries where column names are used as keys. + """ + return [ + self._row_to_python(row, self.description) + for row in super().fetchmany(size=size) + if row + ] diff --git a/mysql-sqlalchemy-workspace/lib/python3.9/site-packages/mysql/connector/cursor_cext.py b/mysql-sqlalchemy-workspace/lib/python3.9/site-packages/mysql/connector/cursor_cext.py new file mode 100644 index 00000000..246ab660 --- /dev/null +++ b/mysql-sqlalchemy-workspace/lib/python3.9/site-packages/mysql/connector/cursor_cext.py @@ -0,0 +1,1280 @@ +# Copyright (c) 2014, 2022, Oracle and/or its affiliates. +# +# This program is free software; you can redistribute it and/or modify +# it under the terms of the GNU General Public License, version 2.0, as +# published by the Free Software Foundation. +# +# This program is also distributed with certain software (including +# but not limited to OpenSSL) that is licensed under separate terms, +# as designated in a particular file or component or in included license +# documentation. The authors of MySQL hereby grant you an +# additional permission to link the program and your derivative works +# with the separately licensed software that they have included with +# MySQL. +# +# Without limiting anything contained in the foregoing, this file, +# which is part of MySQL Connector/Python, is also subject to the +# Universal FOSS Exception, version 1.0, a copy of which can be found at +# http://oss.oracle.com/licenses/universal-foss-exception. +# +# This program is distributed in the hope that it will be useful, but +# WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. +# See the GNU General Public License, version 2.0, for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program; if not, write to the Free Software Foundation, Inc., +# 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA + +# mypy: disable-error-code="assignment,arg-type,override,union-attr" + +"""Cursor classes using the C Extension.""" +from __future__ import annotations + +import re +import warnings +import weakref + +from collections import namedtuple +from typing import ( + Any, + Dict, + Generator, + Iterator, + List, + NoReturn, + Optional, + Sequence, + Tuple, + Type, + Union, +) +from weakref import CallableProxyType + +# pylint: disable=import-error,no-name-in-module +from _mysql_connector import MySQLInterfaceError, MySQLPrepStmt + +from .types import ( + CextEofPacketType, + CextResultType, + DescriptionType, + ParamsSequenceOrDictType, + ParamsSequenceType, + RowType, + StrOrBytes, + ToPythonOutputTypes, + WarningType, +) + +# pylint: enable=import-error,no-name-in-module +# isort: split + +from .abstracts import NAMED_TUPLE_CACHE, MySQLConnectionAbstract, MySQLCursorAbstract +from .cursor import ( + RE_PY_PARAM, + RE_SQL_COMMENT, + RE_SQL_FIND_PARAM, + RE_SQL_INSERT_STMT, + RE_SQL_INSERT_VALUES, + RE_SQL_ON_DUPLICATE, + RE_SQL_PYTHON_CAPTURE_PARAM_NAME, + RE_SQL_PYTHON_REPLACE_PARAM, + RE_SQL_SPLIT_STMTS, +) +from .errorcode import CR_NO_RESULT_SET +from .errors import ( + Error, + InterfaceError, + NotSupportedError, + ProgrammingError, + get_mysql_exception, +) + +ERR_NO_RESULT_TO_FETCH = "No result set to fetch from" + + +class _ParamSubstitutor: + + """ + Substitutes parameters into SQL statement. + """ + + def __init__(self, params: Sequence[bytes]) -> None: + self.params: Sequence[bytes] = params + self.index: int = 0 + + def __call__(self, matchobj: object) -> bytes: + index = self.index + self.index += 1 + try: + return self.params[index] + except IndexError: + raise ProgrammingError( + "Not enough parameters for the SQL statement" + ) from None + + @property + def remaining(self) -> int: + """Returns number of parameters remaining to be substituted""" + return len(self.params) - self.index + + +class CMySQLCursor(MySQLCursorAbstract): + + """Default cursor for interacting with MySQL using C Extension""" + + _raw: bool = False + _buffered: bool = False + _raw_as_string: bool = False + + def __init__(self, connection: Type[MySQLConnectionAbstract]) -> None: + """Initialize""" + MySQLCursorAbstract.__init__(self) + + self._affected_rows: int = -1 + self._rowcount: int = -1 + self._nextrow: Tuple[Optional[RowType], Optional[CextEofPacketType]] = ( + None, + None, + ) + + if not isinstance(connection, MySQLConnectionAbstract): + raise InterfaceError(errno=2048) + self._cnx: CallableProxyType[Type[MySQLConnectionAbstract]] = weakref.proxy( + connection + ) + + def reset(self, free: bool = True) -> None: + """Reset the cursor + + When free is True (default) the result will be freed. + """ + self._rowcount = -1 + self._nextrow = None + self._affected_rows = -1 + self._last_insert_id: int = 0 + self._warning_count: int = 0 + self._warnings: Optional[List[WarningType]] = None + self._warnings = None + self._warning_count = 0 + self._description: Optional[List[DescriptionType]] = None + self._executed_list: List[StrOrBytes] = [] + if free and self._cnx: + self._cnx.free_result() + super().reset() + + def _check_executed(self) -> None: + """Check if the statement has been executed. + + Raises an error if the statement has not been executed. + """ + if self._executed is None: + raise InterfaceError(ERR_NO_RESULT_TO_FETCH) + + def _fetch_warnings(self) -> Optional[List[WarningType]]: + """Fetch warnings + + Fetch warnings doing a SHOW WARNINGS. Can be called after getting + the result. + + Returns a result set or None when there were no warnings. + + Raises Error (or subclass) on errors. + + Returns list of tuples or None. + """ + warns = [] + try: + # force freeing result + self._cnx.consume_results() + _ = self._cnx.cmd_query("SHOW WARNINGS") + warns = self._cnx.get_rows()[0] + self._cnx.consume_results() + except MySQLInterfaceError as err: + raise get_mysql_exception( + msg=err.msg, errno=err.errno, sqlstate=err.sqlstate + ) from err + except Exception as err: + raise InterfaceError(f"Failed getting warnings; {err}") from None + + if warns: + return warns + + return None + + def _handle_warnings(self) -> None: + """Handle possible warnings after all results are consumed. + + Raises: + Error: Also raises exceptions if raise_on_warnings is set. + """ + if self._cnx.get_warnings and self._warning_count: + self._warnings = self._fetch_warnings() + + if not self._warnings: + return + + err = get_mysql_exception( + *self._warnings[0][1:3], warning=not self._cnx.raise_on_warnings + ) + if self._cnx.raise_on_warnings: + raise err + + warnings.warn(str(err), stacklevel=4) + + def _handle_result(self, result: Union[CextEofPacketType, CextResultType]) -> None: + """Handles the result after statement execution""" + if "columns" in result: + self._description = result["columns"] + self._rowcount = 0 + self._handle_resultset() + else: + self._last_insert_id = result["insert_id"] + self._warning_count = result["warning_count"] + self._affected_rows = result["affected_rows"] + self._rowcount = -1 + self._handle_warnings() + + def _handle_resultset(self) -> None: + """Handle a result set""" + + def _handle_eof(self) -> None: + """Handle end of reading the result + + Raises an Error on errors. + """ + self._warning_count = self._cnx.warning_count + self._handle_warnings() + if not self._cnx.more_results: + self._cnx.free_result() + + def _execute_iter(self) -> Generator[CMySQLCursor, None, None]: + """Generator returns MySQLCursor objects for multiple statements + + Deprecated: use nextset() method directly. + + This method is only used when multiple statements are executed + by the execute() method. It uses zip() to make an iterator from the + given query_iter (result of MySQLConnection.cmd_query_iter()) and + the list of statements that were executed. + """ + executed_list = RE_SQL_SPLIT_STMTS.split(self._executed) + i = 0 + self._executed = executed_list[i] + yield self + + while True: + try: + if not self.nextset(): + raise StopIteration + except InterfaceError as err: + # Result without result set + if err.errno != CR_NO_RESULT_SET: + raise + except StopIteration: + return + i += 1 + try: + self._executed = executed_list[i].strip() + except IndexError: + self._executed = executed_list[0] + yield self + return + + def execute( + self, + operation: StrOrBytes, + params: ParamsSequenceOrDictType = (), + multi: bool = False, + ) -> Optional[Generator[CMySQLCursor, None, None]]: + """Execute given statement using given parameters + + Deprecated: The multi argument is not needed and nextset() should + be used to handle multiple result sets. + """ + if not operation: + return None + + try: + if not self._cnx or self._cnx.is_closed(): + raise ProgrammingError + except (ProgrammingError, ReferenceError) as err: + raise ProgrammingError("Cursor is not connected", 2055) from err + self._cnx.handle_unread_result() + + stmt = "" + self.reset() + + try: + if isinstance(operation, str): + stmt = operation.encode(self._cnx.python_charset) + else: + stmt = operation + except (UnicodeDecodeError, UnicodeEncodeError) as err: + raise ProgrammingError(str(err)) from err + + if params: + prepared = self._cnx.prepare_for_mysql(params) + if isinstance(prepared, dict): + for key, value in prepared.items(): + stmt = stmt.replace(f"%({key})s".encode(), value) + elif isinstance(prepared, (list, tuple)): + psub = _ParamSubstitutor(prepared) + stmt = RE_PY_PARAM.sub(psub, stmt) + if psub.remaining != 0: + raise ProgrammingError( + "Not all parameters were used in the SQL statement" + ) + + try: + result = self._cnx.cmd_query( + stmt, + raw=self._raw, + buffered=self._buffered, + raw_as_string=self._raw_as_string, + ) + except MySQLInterfaceError as err: + raise get_mysql_exception( + msg=err.msg, errno=err.errno, sqlstate=err.sqlstate + ) from err + + self._executed = stmt + self._handle_result(result) + + if multi: + return self._execute_iter() + + return None + + def _batch_insert( + self, + operation: str, + seq_params: Sequence[ParamsSequenceOrDictType], + ) -> Optional[bytes]: + """Implements multi row insert""" + + def remove_comments(match: re.Match) -> str: + """Remove comments from INSERT statements. + + This function is used while removing comments from INSERT + statements. If the matched string is a comment not enclosed + by quotes, it returns an empty string, else the string itself. + """ + if match.group(1): + return "" + return match.group(2) + + tmp = re.sub( + RE_SQL_ON_DUPLICATE, + "", + re.sub(RE_SQL_COMMENT, remove_comments, operation), + ) + + matches = re.search(RE_SQL_INSERT_VALUES, tmp) + if not matches: + raise InterfaceError( + "Failed rewriting statement for multi-row INSERT. Check SQL syntax" + ) + fmt = matches.group(1).encode(self._cnx.python_charset) + values = [] + + try: + stmt = operation.encode(self._cnx.python_charset) + for params in seq_params: + tmp = fmt + prepared = self._cnx.prepare_for_mysql(params) + if isinstance(prepared, dict): + for key, value in prepared.items(): + tmp = tmp.replace(f"%({key})s".encode(), value) + elif isinstance(prepared, (list, tuple)): + psub = _ParamSubstitutor(prepared) + tmp = RE_PY_PARAM.sub(psub, tmp) + if psub.remaining != 0: + raise ProgrammingError( + "Not all parameters were used in the SQL statement" + ) + values.append(tmp) + + if fmt in stmt: + stmt = stmt.replace(fmt, b",".join(values), 1) + self._executed = stmt + return stmt + return None + except (UnicodeDecodeError, UnicodeEncodeError) as err: + raise ProgrammingError(str(err)) from err + except Exception as err: + raise InterfaceError(f"Failed executing the operation; {err}") from None + + def executemany( + self, + operation: str, + seq_params: Sequence[ParamsSequenceOrDictType], + ) -> Optional[Generator[CMySQLCursor, None, None]]: + """Execute the given operation multiple times + + The executemany() method will execute the operation iterating + over the list of parameters in seq_params. + + Example: Inserting 3 new employees and their phone number + + data = [ + ('Jane','555-001'), + ('Joe', '555-001'), + ('John', '555-003') + ] + stmt = "INSERT INTO employees (name, phone) VALUES ('%s','%s)" + cursor.executemany(stmt, data) + + INSERT statements are optimized by batching the data, that is + using the MySQL multiple rows syntax. + + Results are discarded! If they are needed, consider looping over + data using the execute() method. + """ + if not operation or not seq_params: + return None + + try: + if not self._cnx: + raise ProgrammingError + except (ProgrammingError, ReferenceError) as err: + raise ProgrammingError("Cursor is not connected") from err + self._cnx.handle_unread_result() + + if not isinstance(seq_params, (list, tuple)): + raise ProgrammingError("Parameters for query must be list or tuple.") + + # Optimize INSERTs by batching them + if re.match(RE_SQL_INSERT_STMT, operation): + if not seq_params: + self._rowcount = 0 + return None + stmt = self._batch_insert(operation, seq_params) + if stmt is not None: + self._executed = stmt + return self.execute(stmt) + + rowcnt = 0 + try: + # When processing read ops (e.g., SELECT), rowcnt is updated + # based on self._rowcount. For write ops (e.g., INSERT) is + # updated based on self._affected_rows. + # The variable self._description is None for write ops, that's + # why we use it as indicator for updating rowcnt. + for params in seq_params: + self.execute(operation, params) + if self.with_rows and self._cnx.unread_result: + self.fetchall() + rowcnt += self._rowcount if self.description else self._affected_rows + except (ValueError, TypeError) as err: + raise InterfaceError(f"Failed executing the operation; {err}") from None + + self._rowcount = rowcnt + return None + + @property + def description(self) -> Optional[List[DescriptionType]]: + """Returns description of columns in a result""" + return self._description + + @property + def rowcount(self) -> int: + """Returns the number of rows produced or affected""" + if self._rowcount == -1: + return self._affected_rows + return self._rowcount + + def close(self) -> bool: + """Close the cursor + + The result will be freed. + """ + if not self._cnx: + return False + + self._cnx.handle_unread_result() + self._warnings = None + self._cnx = None + return True + + def callproc( + self, + procname: str, + args: Sequence[Any] = (), + ) -> Optional[Union[Dict[str, ToPythonOutputTypes], RowType]]: + """Calls a stored procedure with the given arguments""" + if not procname or not isinstance(procname, str): + raise ValueError("procname must be a string") + + if not isinstance(args, (tuple, list)): + raise ValueError("args must be a sequence") + + argfmt = "@_{name}_arg{index}" + self._stored_results = [] + + try: + argnames = [] + argtypes = [] + + # MySQL itself does support calling procedures with their full + # name .. It's necessary to split + # by '.' and grab the procedure name from procname. + procname_abs = procname.split(".")[-1] + if args: + argvalues = [] + for idx, arg in enumerate(args): + argname = argfmt.format(name=procname_abs, index=idx + 1) + argnames.append(argname) + if isinstance(arg, tuple): + argtypes.append(f" CAST({argname} AS {arg[1]})") + argvalues.append(arg[0]) + else: + argtypes.append(argname) + argvalues.append(arg) + + placeholders = ",".join(f"{arg}=%s" for arg in argnames) + self.execute(f"SET {placeholders}", argvalues) + + call = f"CALL {procname}({','.join(argnames)})" + + result = self._cnx.cmd_query( + call, raw=self._raw, raw_as_string=self._raw_as_string + ) + + results = [] + while self._cnx.result_set_available: + result = self._cnx.fetch_eof_columns() + if isinstance(self, (CMySQLCursorDict, CMySQLCursorBufferedDict)): + cursor_class = CMySQLCursorBufferedDict + elif isinstance( + self, + (CMySQLCursorNamedTuple, CMySQLCursorBufferedNamedTuple), + ): + cursor_class = CMySQLCursorBufferedNamedTuple + elif self._raw: + cursor_class = CMySQLCursorBufferedRaw + else: + cursor_class = CMySQLCursorBuffered + # pylint: disable=protected-access + cur = cursor_class(self._cnx.get_self()) + cur._executed = f"(a result of {call})" + cur._handle_result(result) + # pylint: enable=protected-access + results.append(cur) + self._cnx.next_result() + self._stored_results = results + self._handle_eof() + + if argnames: + self.reset() + # Create names aliases to be compatible with namedtuples + args = [ + f"{name} AS {alias}" + for name, alias in zip( + argtypes, [arg.lstrip("@_") for arg in argnames] + ) + ] + select = f"SELECT {','.join(args)}" + self.execute(select) + + return self.fetchone() + return tuple() + + except Error: + raise + except Exception as err: + raise InterfaceError(f"Failed calling stored routine; {err}") from None + + def nextset(self) -> Optional[bool]: + """Skip to the next available result set""" + if not self._cnx.next_result(): + self.reset(free=True) + return None + self.reset(free=False) + + if not self._cnx.result_set_available: + eof = self._cnx.fetch_eof_status() + self._handle_result(eof) + raise InterfaceError(errno=CR_NO_RESULT_SET) + + self._handle_result(self._cnx.fetch_eof_columns()) + return True + + def fetchall(self) -> List[RowType]: + """Return all rows of a query result set. + + Returns: + list: A list of tuples with all rows of a query result set. + """ + self._check_executed() + if not self._cnx.unread_result: + return [] + + rows: Tuple[List[RowType], Optional[CextEofPacketType]] = self._cnx.get_rows() + if self._nextrow and self._nextrow[0]: + rows[0].insert(0, self._nextrow[0]) + + if not rows[0]: + self._handle_eof() + return [] + + self._rowcount += len(rows[0]) + self._handle_eof() + # self._cnx.handle_unread_result() + return rows[0] + + def fetchmany(self, size: int = 1) -> List[RowType]: + """Return the next set of rows of a query result set. + + When no more rows are available, it returns an empty list. + The number of rows returned can be specified using the size argument, + which defaults to one. + + Returns: + list: The next set of rows of a query result set. + """ + self._check_executed() + if self._nextrow and self._nextrow[0]: + rows = [self._nextrow[0]] + size -= 1 + else: + rows = [] + + if size and self._cnx.unread_result: + rows.extend(self._cnx.get_rows(size)[0]) + + if size: + if self._cnx.unread_result: + self._nextrow = self._cnx.get_row() + if ( + self._nextrow + and not self._nextrow[0] + and not self._cnx.more_results + ): + self._cnx.free_result() + else: + self._nextrow = (None, None) + + if not rows: + self._handle_eof() + return [] + + self._rowcount += len(rows) + return rows + + def fetchone(self) -> Optional[RowType]: + """Return next row of a query result set. + + Returns: + tuple or None: A row from query result set. + """ + self._check_executed() + row = self._nextrow + if not row and self._cnx.unread_result: + row = self._cnx.get_row() + + if row and row[0]: + self._nextrow = self._cnx.get_row() + if not self._nextrow[0] and not self._cnx.more_results: + self._cnx.free_result() + else: + self._handle_eof() + return None + self._rowcount += 1 + return row[0] + + def __iter__(self) -> Iterator[RowType]: + """Iteration over the result set + + Iteration over the result set which calls self.fetchone() + and returns the next row. + """ + return iter(self.fetchone, None) + + def stored_results(self) -> Generator[CMySQLCursor, None, None]: + """Returns an iterator for stored results + + This method returns an iterator over results which are stored when + callproc() is called. The iterator will provide MySQLCursorBuffered + instances. + + Returns a iterator. + """ + for result in self._stored_results: + yield result + self._stored_results = [] + + def __next__(self) -> RowType: + """Iteration over the result set + Used for iterating over the result set. Calls self.fetchone() + to get the next row. + + Raises StopIteration when no more rows are available. + """ + try: + row = self.fetchone() + except InterfaceError: + raise StopIteration from None + if not row: + raise StopIteration from None + return row + + @property + def column_names(self) -> Tuple[str, ...]: + """Returns column names + + This property returns the columns names as a tuple. + + Returns a tuple. + """ + if not self.description: + return () + return tuple(d[0] for d in self.description) + + @property + def statement(self) -> str: + """Returns the executed statement + + This property returns the executed statement. When multiple + statements were executed, the current statement in the iterator + will be returned. + """ + try: + return self._executed.strip().decode("utf8") + except AttributeError: + return self._executed.strip() # type: ignore[return-value] + + @property + def with_rows(self) -> bool: + """Returns whether the cursor could have rows returned + + This property returns True when column descriptions are available + and possibly also rows, which will need to be fetched. + + Returns True or False. + """ + if self.description: + return True + return False + + def __str__(self) -> str: + fmt = "{class_name}: {stmt}" + if self._executed: + try: + executed = self._executed.decode("utf-8") + except AttributeError: + executed = self._executed + if len(executed) > 40: + executed = executed[:40] + ".." + else: + executed = "(Nothing executed yet)" + + return fmt.format(class_name=self.__class__.__name__, stmt=executed) + + +class CMySQLCursorBuffered(CMySQLCursor): + + """Cursor using C Extension buffering results""" + + def __init__(self, connection: Type[MySQLConnectionAbstract]): + """Initialize""" + super().__init__(connection) + + self._rows: Optional[List[RowType]] = None + self._next_row: int = 0 + + def _handle_resultset(self) -> None: + """Handle a result set""" + self._rows = self._cnx.get_rows()[0] + self._next_row = 0 + self._rowcount: int = len(self._rows) + self._handle_eof() + + def reset(self, free: bool = True) -> None: + """Reset the cursor to default""" + self._rows = None + self._next_row = 0 + super().reset(free=free) + + def _fetch_row(self) -> Optional[RowType]: + """Returns the next row in the result set + + Returns a tuple or None. + """ + row = None + try: + row = self._rows[self._next_row] + except IndexError: + return None + else: + self._next_row += 1 + + return row + + def fetchall(self) -> List[RowType]: + """Return all rows of a query result set. + + Returns: + list: A list of tuples with all rows of a query result set. + """ + self._check_executed() + res = self._rows[self._next_row :] + self._next_row = len(self._rows) + return res + + def fetchmany(self, size: int = 1) -> List[RowType]: + """Return the next set of rows of a query result set. + + When no more rows are available, it returns an empty list. + The number of rows returned can be specified using the size argument, + which defaults to one. + + Returns: + list: The next set of rows of a query result set. + """ + self._check_executed() + res = [] + cnt = size or self.arraysize + while cnt > 0: + cnt -= 1 + row = self._fetch_row() + if row: + res.append(row) + else: + break + return res + + def fetchone(self) -> Optional[RowType]: + """Return next row of a query result set. + + Returns: + tuple or None: A row from query result set. + """ + self._check_executed() + return self._fetch_row() + + @property + def with_rows(self) -> bool: + """Returns whether the cursor could have rows returned + + This property returns True when rows are available, + which will need to be fetched. + + Returns True or False. + """ + return self._rows is not None + + +class CMySQLCursorRaw(CMySQLCursor): + """Cursor using C Extension return raw results""" + + _raw: bool = True + + +class CMySQLCursorBufferedRaw(CMySQLCursorBuffered): + """Cursor using C Extension buffering raw results""" + + _raw: bool = True + + +class CMySQLCursorDict(CMySQLCursor): + """Cursor using C Extension returning rows as dictionaries""" + + _raw: bool = False + + def fetchone(self) -> Optional[Dict[str, ToPythonOutputTypes]]: + """Return next row of a query result set. + + Returns: + dict or None: A dict from query result set. + """ + row = super().fetchone() + return dict(zip(self.column_names, row)) if row else None + + def fetchmany(self, size: int = 1) -> List[Dict[str, ToPythonOutputTypes]]: + """Return the next set of rows of a query result set. + + When no more rows are available, it returns an empty list. + The number of rows returned can be specified using the size argument, + which defaults to one. + + Returns: + list: The next set of rows of a query result set represented + as a list of dictionaries where column names are used as keys. + """ + res = super().fetchmany(size=size) + return [dict(zip(self.column_names, row)) for row in res] + + def fetchall(self) -> List[Dict[str, ToPythonOutputTypes]]: + """Return all rows of a query result set. + + Returns: + list: A list of dictionaries with all rows of a query + result set where column names are used as keys. + """ + res = super().fetchall() + return [dict(zip(self.column_names, row)) for row in res] + + +class CMySQLCursorBufferedDict(CMySQLCursorBuffered): + """Cursor using C Extension buffering and returning rows as dictionaries""" + + _raw = False + + def _fetch_row(self) -> Optional[Dict[str, ToPythonOutputTypes]]: + row = super()._fetch_row() + if row: + return dict(zip(self.column_names, row)) + return None + + def fetchall(self) -> List[Dict[str, ToPythonOutputTypes]]: + """Return all rows of a query result set. + + Returns: + list: A list of tuples with all rows of a query result set. + """ + res = super().fetchall() + return [dict(zip(self.column_names, row)) for row in res] + + +class CMySQLCursorNamedTuple(CMySQLCursor): + """Cursor using C Extension returning rows as named tuples""" + + named_tuple: Any = None + + def _handle_resultset(self) -> None: + """Handle a result set""" + super()._handle_resultset() + columns = tuple(self.column_names) + try: + self.named_tuple = NAMED_TUPLE_CACHE[columns] + except KeyError: + self.named_tuple = namedtuple("Row", columns) # type: ignore[misc] + NAMED_TUPLE_CACHE[columns] = self.named_tuple + + def fetchone(self) -> Optional[RowType]: + """Return next row of a query result set. + + Returns: + tuple or None: A row from query result set. + """ + row = super().fetchone() + if row: + return self.named_tuple(*row) + return None + + def fetchmany(self, size: int = 1) -> List[RowType]: + """Return the next set of rows of a query result set. + + When no more rows are available, it returns an empty list. + The number of rows returned can be specified using the size argument, + which defaults to one. + + Returns: + list: The next set of rows of a query result set. + """ + res = super().fetchmany(size=size) + if not res: + return [] + return [self.named_tuple(*res[0])] + + def fetchall(self) -> List[RowType]: + """Return all rows of a query result set. + + Returns: + list: A list of tuples with all rows of a query result set. + """ + res = super().fetchall() + return [self.named_tuple(*row) for row in res] + + +class CMySQLCursorBufferedNamedTuple(CMySQLCursorBuffered): + """Cursor using C Extension buffering and returning rows as named tuples""" + + named_tuple: Any = None + + def _handle_resultset(self) -> None: + super()._handle_resultset() + self.named_tuple = namedtuple("Row", self.column_names) # type: ignore[misc] + + def _fetch_row(self) -> Optional[RowType]: + row = super()._fetch_row() + if row: + return self.named_tuple(*row) + return None + + def fetchall(self) -> List[RowType]: + """Return all rows of a query result set. + + Returns: + list: A list of tuples with all rows of a query result set. + """ + res = super().fetchall() + return [self.named_tuple(*row) for row in res] + + +class CMySQLCursorPrepared(CMySQLCursor): + """Cursor using MySQL Prepared Statements""" + + def __init__(self, connection: Type[MySQLConnectionAbstract]): + super().__init__(connection) + self._rows: Optional[List[RowType]] = None + self._rowcount: int = 0 + self._next_row: int = 0 + self._binary: bool = True + self._stmt: Optional[MySQLPrepStmt] = None + + def _handle_eof(self) -> None: + """Handle EOF packet""" + self._nextrow = (None, None) + self._handle_warnings() + + def _fetch_row(self, raw: bool = False) -> Optional[RowType]: + """Returns the next row in the result set + + Returns a tuple or None. + """ + if not self._stmt or not self._stmt.have_result_set: + return None + row = None + + if self._nextrow == (None, None): + (row, eof) = self._cnx.get_row( + binary=self._binary, + columns=self.description, + raw=raw, + prep_stmt=self._stmt, + ) + else: + (row, eof) = self._nextrow + + if row: + self._nextrow = self._cnx.get_row( + binary=self._binary, + columns=self.description, + raw=raw, + prep_stmt=self._stmt, + ) + eof = self._nextrow[1] + if eof is not None: + self._warning_count = eof["warning_count"] + self._handle_eof() + if self._rowcount == -1: + self._rowcount = 1 + else: + self._rowcount += 1 + if eof: + self._warning_count = eof["warning_count"] + self._handle_eof() + + return row + + def callproc(self, procname: Any, args: Any = None) -> NoReturn: + """Calls a stored procedue + + Not supported with CMySQLCursorPrepared. + """ + raise NotSupportedError() + + def close(self) -> None: + """Close the cursor + + This method will try to deallocate the prepared statement and close + the cursor. + """ + if self._stmt: + self.reset() + self._cnx.cmd_stmt_close(self._stmt) + self._stmt = None + super().close() + + def reset(self, free: bool = True) -> None: + """Resets the prepared statement.""" + if self._stmt: + self._cnx.cmd_stmt_reset(self._stmt) + super().reset(free=free) + + def execute( + self, + operation: StrOrBytes, + params: Optional[ParamsSequenceOrDictType] = None, + multi: bool = False, + ) -> None: # multi is unused + """Prepare and execute a MySQL Prepared Statement + + This method will prepare the given operation and execute it using + the given parameters. + + If the cursor instance already had a prepared statement, it is + first closed. + + Note: argument "multi" is unused. + """ + if not operation: + return + + try: + if not self._cnx or self._cnx.is_closed(): + raise ProgrammingError + except (ProgrammingError, ReferenceError) as err: + raise ProgrammingError("Cursor is not connected", 2055) from err + + self._cnx.handle_unread_result(prepared=True) + + charset = self._cnx.charset + if charset == "utf8mb4": + charset = "utf8" + + if not isinstance(operation, str): + try: + operation = operation.decode(charset) + except UnicodeDecodeError as err: + raise ProgrammingError(str(err)) from err + + if isinstance(params, dict): + replacement_keys = re.findall(RE_SQL_PYTHON_CAPTURE_PARAM_NAME, operation) + try: + # Replace params dict with params tuple in correct order. + params = tuple(params[key] for key in replacement_keys) + except KeyError as err: + raise ProgrammingError( + "Not all placeholders were found in the parameters dict" + ) from err + # Convert %(name)s to ? before sending it to MySQL + operation = re.sub(RE_SQL_PYTHON_REPLACE_PARAM, "?", operation) + + if operation is not self._executed: + if self._stmt: + self._cnx.cmd_stmt_close(self._stmt) + self._executed = operation + + try: + operation = operation.encode(charset) + except UnicodeEncodeError as err: + raise ProgrammingError(str(err)) from err + + if b"%s" in operation: + # Convert %s to ? before sending it to MySQL + operation = re.sub(RE_SQL_FIND_PARAM, b"?", operation) + + try: + self._stmt = self._cnx.cmd_stmt_prepare(operation) + except Error: + self._executed = None + self._stmt = None + raise + + self._cnx.cmd_stmt_reset(self._stmt) + + if self._stmt.param_count > 0 and not params: + return + if params: + if not isinstance(params, (tuple, list)): + raise ProgrammingError( + errno=1210, + msg=f"Incorrect type of argument: {type(params).__name__}({params})" + ", it must be of type tuple or list the argument given to " + "the prepared statement", + ) + if self._stmt.param_count != len(params): + raise ProgrammingError( + errno=1210, + msg="Incorrect number of arguments executing prepared statement", + ) + + if params is None: + params = () + res = self._cnx.cmd_stmt_execute(self._stmt, *params) + if res: + self._handle_result(res) + + def executemany( + self, operation: str, seq_params: Sequence[ParamsSequenceType] + ) -> None: + """Prepare and execute a MySQL Prepared Statement many times + + This method will prepare the given operation and execute with each + tuple found the list seq_params. + + If the cursor instance already had a prepared statement, it is + first closed. + """ + rowcnt = 0 + try: + for params in seq_params: + self.execute(operation, params) + if self.with_rows: + self.fetchall() + rowcnt += self._rowcount + except (ValueError, TypeError) as err: + raise InterfaceError(f"Failed executing the operation; {err}") from err + self._rowcount = rowcnt + + def fetchone(self) -> Optional[RowType]: + """Return next row of a query result set. + + Returns: + tuple or None: A row from query result set. + """ + self._check_executed() + return self._fetch_row() or None + + def fetchmany(self, size: Optional[int] = None) -> List[RowType]: + """Return the next set of rows of a query result set. + + When no more rows are available, it returns an empty list. + The number of rows returned can be specified using the size argument, + which defaults to one. + + Returns: + list: The next set of rows of a query result set. + """ + self._check_executed() + res = [] + cnt = size or self.arraysize + while cnt > 0 and self._stmt.have_result_set: + cnt -= 1 + row = self._fetch_row() + if row: + res.append(row) + return res + + def fetchall(self) -> List[RowType]: + """Return all rows of a query result set. + + Returns: + list: A list of tuples with all rows of a query result set. + """ + self._check_executed() + if not self._stmt.have_result_set: + return [] + + rows = self._cnx.get_rows(prep_stmt=self._stmt) + if self._nextrow and self._nextrow[0]: + rows[0].insert(0, self._nextrow[0]) + + if not rows[0]: + self._handle_eof() + return [] + + self._rowcount += len(rows[0]) + self._handle_eof() + return rows[0] + + +class CMySQLCursorPreparedDict(CMySQLCursorDict, CMySQLCursorPrepared): # type: ignore[misc] + """This class is a blend of features from CMySQLCursorDict and CMySQLCursorPrepared + + Multiple inheritance in python is allowed but care must be taken + when assuming methods resolution. In the case of multiple + inheritance, a given attribute is first searched in the current + class if it's not found then it's searched in the parent classes. + The parent classes are searched in a left-right fashion and each + class is searched once. + Based on python's attribute resolution, in this case, attributes + are searched as follows: + 1. CMySQLCursorPreparedDict (current class) + 2. CMySQLCursorDict (left parent class) + 3. CMySQLCursorPrepared (right parent class) + 4. CMySQLCursor (base class) + """ diff --git a/mysql-sqlalchemy-workspace/lib/python3.9/site-packages/mysql/connector/custom_types.py b/mysql-sqlalchemy-workspace/lib/python3.9/site-packages/mysql/connector/custom_types.py new file mode 100644 index 00000000..7fcacbd6 --- /dev/null +++ b/mysql-sqlalchemy-workspace/lib/python3.9/site-packages/mysql/connector/custom_types.py @@ -0,0 +1,50 @@ +# Copyright (c) 2014, 2022, Oracle and/or its affiliates. All rights reserved. +# +# This program is free software; you can redistribute it and/or modify +# it under the terms of the GNU General Public License, version 2.0, as +# published by the Free Software Foundation. +# +# This program is also distributed with certain software (including +# but not limited to OpenSSL) that is licensed under separate terms, +# as designated in a particular file or component or in included license +# documentation. The authors of MySQL hereby grant you an +# additional permission to link the program and your derivative works +# with the separately licensed software that they have included with +# MySQL. +# +# Without limiting anything contained in the foregoing, this file, +# which is part of MySQL Connector/Python, is also subject to the +# Universal FOSS Exception, version 1.0, a copy of which can be found at +# http://oss.oracle.com/licenses/universal-foss-exception. +# +# This program is distributed in the hope that it will be useful, but +# WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. +# See the GNU General Public License, version 2.0, for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program; if not, write to the Free Software Foundation, Inc., +# 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA + +"""Custom Python types used by MySQL Connector/Python""" +from __future__ import annotations + +from typing import Type + + +class HexLiteral(str): + + """Class holding MySQL hex literals""" + + charset: str = "" + original: str = "" + + def __new__(cls: Type[HexLiteral], str_: str, charset: str = "utf8") -> HexLiteral: + hexed = [f"{i:02x}" for i in str_.encode(charset)] + obj = str.__new__(cls, "".join(hexed)) + obj.charset = charset + obj.original = str_ + return obj + + def __str__(self) -> str: + return "0x" + self diff --git a/mysql-sqlalchemy-workspace/lib/python3.9/site-packages/mysql/connector/dbapi.py b/mysql-sqlalchemy-workspace/lib/python3.9/site-packages/mysql/connector/dbapi.py new file mode 100644 index 00000000..f827aa18 --- /dev/null +++ b/mysql-sqlalchemy-workspace/lib/python3.9/site-packages/mysql/connector/dbapi.py @@ -0,0 +1,85 @@ +# Copyright (c) 2009, 2022, Oracle and/or its affiliates. All rights reserved. +# +# This program is free software; you can redistribute it and/or modify +# it under the terms of the GNU General Public License, version 2.0, as +# published by the Free Software Foundation. +# +# This program is also distributed with certain software (including +# but not limited to OpenSSL) that is licensed under separate terms, +# as designated in a particular file or component or in included license +# documentation. The authors of MySQL hereby grant you an +# additional permission to link the program and your derivative works +# with the separately licensed software that they have included with +# MySQL. +# +# Without limiting anything contained in the foregoing, this file, +# which is part of MySQL Connector/Python, is also subject to the +# Universal FOSS Exception, version 1.0, a copy of which can be found at +# http://oss.oracle.com/licenses/universal-foss-exception. +# +# This program is distributed in the hope that it will be useful, but +# WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. +# See the GNU General Public License, version 2.0, for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program; if not, write to the Free Software Foundation, Inc., +# 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA + +""" +This module implements some constructors and singletons as required by the +DB API v2.0 (PEP-249). +""" + +# Python Db API v2 +# pylint: disable=invalid-name +apilevel: str = "2.0" +threadsafety: int = 1 +paramstyle: str = "pyformat" + +import datetime +import time + +from typing import Tuple + +from . import constants + + +class _DBAPITypeObject: + def __init__(self, *values: int) -> None: + self.values: Tuple[int, ...] = values + + def __eq__(self, other: object) -> bool: + return other in self.values + + def __ne__(self, other: object) -> bool: + return other not in self.values + + +Date = datetime.date +Time = datetime.time +Timestamp = datetime.datetime + + +def DateFromTicks(ticks: int) -> datetime.date: + """Construct an object holding a date value from the given ticks value.""" + return Date(*time.localtime(ticks)[:3]) + + +def TimeFromTicks(ticks: int) -> datetime.time: + """Construct an object holding a time value from the given ticks value.""" + return Time(*time.localtime(ticks)[3:6]) + + +def TimestampFromTicks(ticks: int) -> datetime.datetime: + """Construct an object holding a time stamp from the given ticks value.""" + return Timestamp(*time.localtime(ticks)[:6]) + + +Binary = bytes + +STRING = _DBAPITypeObject(*constants.FieldType.get_string_types()) +BINARY = _DBAPITypeObject(*constants.FieldType.get_binary_types()) +NUMBER = _DBAPITypeObject(*constants.FieldType.get_number_types()) +DATETIME = _DBAPITypeObject(*constants.FieldType.get_timestamp_types()) +ROWID = _DBAPITypeObject() diff --git a/mysql-sqlalchemy-workspace/lib/python3.9/site-packages/mysql/connector/django/__init__.py b/mysql-sqlalchemy-workspace/lib/python3.9/site-packages/mysql/connector/django/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/mysql-sqlalchemy-workspace/lib/python3.9/site-packages/mysql/connector/django/base.py b/mysql-sqlalchemy-workspace/lib/python3.9/site-packages/mysql/connector/django/base.py new file mode 100644 index 00000000..d61bbdcc --- /dev/null +++ b/mysql-sqlalchemy-workspace/lib/python3.9/site-packages/mysql/connector/django/base.py @@ -0,0 +1,637 @@ +# Copyright (c) 2020, 2022, Oracle and/or its affiliates. +# +# This program is free software; you can redistribute it and/or modify +# it under the terms of the GNU General Public License, version 2.0, as +# published by the Free Software Foundation. +# +# This program is also distributed with certain software (including +# but not limited to OpenSSL) that is licensed under separate terms, +# as designated in a particular file or component or in included license +# documentation. The authors of MySQL hereby grant you an +# additional permission to link the program and your derivative works +# with the separately licensed software that they have included with +# MySQL. +# +# Without limiting anything contained in the foregoing, this file, +# which is part of MySQL Connector/Python, is also subject to the +# Universal FOSS Exception, version 1.0, a copy of which can be found at +# http://oss.oracle.com/licenses/universal-foss-exception. +# +# This program is distributed in the hope that it will be useful, but +# WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. +# See the GNU General Public License, version 2.0, for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program; if not, write to the Free Software Foundation, Inc., +# 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA + +# mypy: disable-error-code="override" + +"""Django database Backend using MySQL Connector/Python. + +This Django database backend is heavily based on the MySQL backend from Django. + +Changes include: +* Support for microseconds (MySQL 5.6.3 and later) +* Using INFORMATION_SCHEMA where possible +* Using new defaults for, for example SQL_AUTO_IS_NULL + +Requires and comes with MySQL Connector/Python v8.0.22 and later: + http://dev.mysql.com/downloads/connector/python/ +""" + +import warnings + +from datetime import datetime, time +from typing import Any, Dict, Generator, Iterator, List, Optional, Set, Tuple, Union + +from django.conf import settings +from django.core.exceptions import ImproperlyConfigured +from django.db import IntegrityError +from django.db.backends.base.base import BaseDatabaseWrapper +from django.utils import dateparse, timezone +from django.utils.functional import cached_property + +try: + import mysql.connector + + from mysql.connector.connection import MySQLConnection + from mysql.connector.connection_cext import CMySQLConnection + from mysql.connector.conversion import MySQLConverter + from mysql.connector.cursor import MySQLCursor + from mysql.connector.cursor_cext import CMySQLCursor + from mysql.connector.custom_types import HexLiteral + from mysql.connector.pooling import PooledMySQLConnection + from mysql.connector.types import ( + ParamsDictType, + ParamsSequenceOrDictType, + ParamsSequenceType, + RowType, + StrOrBytes, + ) +except ImportError as err: + raise ImproperlyConfigured(f"Error loading mysql.connector module: {err}") from err + +try: + from _mysql_connector import datetime_to_mysql +except ImportError: + HAVE_CEXT = False +else: + HAVE_CEXT = True + +from .client import DatabaseClient +from .creation import DatabaseCreation +from .features import DatabaseFeatures +from .introspection import DatabaseIntrospection +from .operations import DatabaseOperations +from .schema import DatabaseSchemaEditor +from .validation import DatabaseValidation + +Error = mysql.connector.Error +DatabaseError = mysql.connector.DatabaseError +NotSupportedError = mysql.connector.NotSupportedError +OperationalError = mysql.connector.OperationalError +ProgrammingError = mysql.connector.ProgrammingError + + +def adapt_datetime_with_timezone_support(value: datetime) -> StrOrBytes: + """Equivalent to DateTimeField.get_db_prep_value. Used only by raw SQL.""" + if settings.USE_TZ: + if timezone.is_naive(value): + warnings.warn( + f"MySQL received a naive datetime ({value})" + " while time zone support is active.", + RuntimeWarning, + ) + default_timezone = timezone.get_default_timezone() + value = timezone.make_aware(value, default_timezone) + value = value.astimezone(timezone.utc).replace(tzinfo=None) + if HAVE_CEXT: + mysql_datetime: bytes = datetime_to_mysql(value) + return mysql_datetime + return value.strftime("%Y-%m-%d %H:%M:%S.%f") + + +class CursorWrapper: + """Wrapper around MySQL Connector/Python's cursor class. + + The cursor class is defined by the options passed to MySQL + Connector/Python. If buffered option is True in those options, + MySQLCursorBuffered will be used. + """ + + codes_for_integrityerror = ( + 1048, # Column cannot be null + 1690, # BIGINT UNSIGNED value is out of range + 3819, # CHECK constraint is violated + 4025, # CHECK constraint failed + ) + + def __init__(self, cursor: Union[MySQLCursor, CMySQLCursor]) -> None: + self.cursor: Union[MySQLCursor, CMySQLCursor] = cursor + + @staticmethod + def _adapt_execute_args_dict(args: ParamsDictType) -> ParamsDictType: + if not args: + return args + new_args = dict(args) + for key, value in args.items(): + if isinstance(value, datetime): + new_args[key] = adapt_datetime_with_timezone_support(value) + + return new_args + + @staticmethod + def _adapt_execute_args( + args: Optional[ParamsSequenceType], + ) -> Optional[ParamsSequenceType]: + if not args: + return args + new_args = list(args) + for i, arg in enumerate(args): + if isinstance(arg, datetime): + new_args[i] = adapt_datetime_with_timezone_support(arg) + + return tuple(new_args) + + def execute( + self, query: str, args: Optional[ParamsSequenceOrDictType] = None + ) -> Optional[Generator[Union[MySQLCursor, CMySQLCursor], None, None]]: + """Executes the given operation + + This wrapper method around the execute()-method of the cursor is + mainly needed to re-raise using different exceptions. + """ + new_args: Optional[ParamsSequenceOrDictType] = None + if isinstance(args, dict): + new_args = self._adapt_execute_args_dict(args) + else: + new_args = self._adapt_execute_args(args) + try: + return self.cursor.execute(query, new_args) + except mysql.connector.OperationalError as exc: + if exc.args[0] in self.codes_for_integrityerror: + raise IntegrityError(*tuple(exc.args)) from None + raise + + def executemany( + self, + query: str, + args: Union[ + Tuple[ParamsSequenceOrDictType, ...], + List[ParamsSequenceOrDictType], + ], + ) -> Optional[Generator[Union[MySQLCursor, CMySQLCursor], None, None]]: + """Executes the given operation + + This wrapper method around the executemany()-method of the cursor is + mainly needed to re-raise using different exceptions. + """ + try: + return self.cursor.executemany(query, args) + except mysql.connector.OperationalError as exc: + if exc.args[0] in self.codes_for_integrityerror: + raise IntegrityError(*tuple(exc.args)) from None + raise + + def __getattr__(self, attr: Any) -> Any: + """Return an attribute of wrapped cursor""" + return getattr(self.cursor, attr) + + def __iter__(self) -> Iterator[RowType]: + """Return an iterator over wrapped cursor""" + return iter(self.cursor) + + +class DatabaseWrapper(BaseDatabaseWrapper): # pylint: disable=abstract-method + """Represent a database connection.""" + + vendor = "mysql" + # This dictionary maps Field objects to their associated MySQL column + # types, as strings. Column-type strings can contain format strings; they'll + # be interpolated against the values of Field.__dict__ before being output. + # If a column type is set to None, it won't be included in the output. + data_types = { + "AutoField": "integer AUTO_INCREMENT", + "BigAutoField": "bigint AUTO_INCREMENT", + "BinaryField": "longblob", + "BooleanField": "bool", + "CharField": "varchar(%(max_length)s)", + "DateField": "date", + "DateTimeField": "datetime(6)", + "DecimalField": "numeric(%(max_digits)s, %(decimal_places)s)", + "DurationField": "bigint", + "FileField": "varchar(%(max_length)s)", + "FilePathField": "varchar(%(max_length)s)", + "FloatField": "double precision", + "IntegerField": "integer", + "BigIntegerField": "bigint", + "IPAddressField": "char(15)", + "GenericIPAddressField": "char(39)", + "JSONField": "json", + "NullBooleanField": "bool", + "OneToOneField": "integer", + "PositiveBigIntegerField": "bigint UNSIGNED", + "PositiveIntegerField": "integer UNSIGNED", + "PositiveSmallIntegerField": "smallint UNSIGNED", + "SlugField": "varchar(%(max_length)s)", + "SmallAutoField": "smallint AUTO_INCREMENT", + "SmallIntegerField": "smallint", + "TextField": "longtext", + "TimeField": "time(6)", + "UUIDField": "char(32)", + } + + # For these data types: + # - MySQL < 8.0.13 doesn't accept default values and + # implicitly treat them as nullable + # - all versions of MySQL doesn't support full width database + # indexes + _limited_data_types = ( + "tinyblob", + "blob", + "mediumblob", + "longblob", + "tinytext", + "text", + "mediumtext", + "longtext", + "json", + ) + + operators = { + "exact": "= %s", + "iexact": "LIKE %s", + "contains": "LIKE BINARY %s", + "icontains": "LIKE %s", + "regex": "REGEXP BINARY %s", + "iregex": "REGEXP %s", + "gt": "> %s", + "gte": ">= %s", + "lt": "< %s", + "lte": "<= %s", + "startswith": "LIKE BINARY %s", + "endswith": "LIKE BINARY %s", + "istartswith": "LIKE %s", + "iendswith": "LIKE %s", + } + + # The patterns below are used to generate SQL pattern lookup clauses when + # the right-hand side of the lookup isn't a raw string (it might be an expression + # or the result of a bilateral transformation). + # In those cases, special characters for LIKE operators (e.g. \, *, _) should be + # escaped on database side. + # + # Note: we use str.format() here for readability as '%' is used as a wildcard for + # the LIKE operator. + pattern_esc = r"REPLACE(REPLACE(REPLACE({}, '\\', '\\\\'), '%%', '\%%'), '_', '\_')" + pattern_ops = { + "contains": "LIKE BINARY CONCAT('%%', {}, '%%')", + "icontains": "LIKE CONCAT('%%', {}, '%%')", + "startswith": "LIKE BINARY CONCAT({}, '%%')", + "istartswith": "LIKE CONCAT({}, '%%')", + "endswith": "LIKE BINARY CONCAT('%%', {})", + "iendswith": "LIKE CONCAT('%%', {})", + } + + isolation_level: Optional[str] = None + isolation_levels = { + "read uncommitted", + "read committed", + "repeatable read", + "serializable", + } + + Database = mysql.connector + SchemaEditorClass = DatabaseSchemaEditor + # Classes instantiated in __init__(). + client_class = DatabaseClient + creation_class = DatabaseCreation + features_class = DatabaseFeatures + introspection_class = DatabaseIntrospection + ops_class = DatabaseOperations + validation_class = DatabaseValidation + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, **kwargs) + + options = self.settings_dict.get("OPTIONS") + if options: + self._use_pure = options.get("use_pure", not HAVE_CEXT) + converter_class = options.get( + "converter_class", + DjangoMySQLConverter, + ) + if not issubclass(converter_class, DjangoMySQLConverter): + raise ProgrammingError( + "Converter class should be a subclass of " + "mysql.connector.django.base.DjangoMySQLConverter" + ) + self.converter = converter_class() + else: + self.converter = DjangoMySQLConverter() + self._use_pure = not HAVE_CEXT + + def __getattr__(self, attr: str) -> bool: + if attr.startswith("mysql_is"): + return False + raise AttributeError + + def get_connection_params(self) -> Dict[str, Any]: + kwargs = { + "charset": "utf8", + "use_unicode": True, + "buffered": False, + "consume_results": True, + } + + settings_dict = self.settings_dict + + if settings_dict["USER"]: + kwargs["user"] = settings_dict["USER"] + if settings_dict["NAME"]: + kwargs["database"] = settings_dict["NAME"] + if settings_dict["PASSWORD"]: + kwargs["passwd"] = settings_dict["PASSWORD"] + if settings_dict["HOST"].startswith("/"): + kwargs["unix_socket"] = settings_dict["HOST"] + elif settings_dict["HOST"]: + kwargs["host"] = settings_dict["HOST"] + if settings_dict["PORT"]: + kwargs["port"] = int(settings_dict["PORT"]) + if settings_dict.get("OPTIONS", {}).get("init_command"): + kwargs["init_command"] = settings_dict["OPTIONS"]["init_command"] + + # Raise exceptions for database warnings if DEBUG is on + kwargs["raise_on_warnings"] = settings.DEBUG + + kwargs["client_flags"] = [ + # Need potentially affected rows on UPDATE + mysql.connector.constants.ClientFlag.FOUND_ROWS, + ] + + try: + options = settings_dict["OPTIONS"].copy() + isolation_level = options.pop("isolation_level") + if isolation_level: + isolation_level = isolation_level.lower() + if isolation_level not in self.isolation_levels: + valid_levels = ", ".join( + f"'{level}'" for level in sorted(self.isolation_levels) + ) + raise ImproperlyConfigured( + f"Invalid transaction isolation level '{isolation_level}' " + f"specified.\nUse one of {valid_levels}, or None." + ) + self.isolation_level = isolation_level + kwargs.update(options) + except KeyError: + # OPTIONS missing is OK + pass + return kwargs + + def get_new_connection( + self, conn_params: Dict[str, Any] + ) -> Union[PooledMySQLConnection, MySQLConnection, CMySQLConnection]: + if "converter_class" not in conn_params: + conn_params["converter_class"] = DjangoMySQLConverter + cnx = mysql.connector.connect(**conn_params) + + return cnx + + def init_connection_state(self) -> None: + assignments = [] + if self.features.is_sql_auto_is_null_enabled: # type: ignore[attr-defined] + # SQL_AUTO_IS_NULL controls whether an AUTO_INCREMENT column on + # a recently inserted row will return when the field is tested + # for NULL. Disabling this brings this aspect of MySQL in line + # with SQL standards. + assignments.append("SET SQL_AUTO_IS_NULL = 0") + + if self.isolation_level: + assignments.append( + "SET SESSION TRANSACTION ISOLATION LEVEL " + f"{self.isolation_level.upper()}" + ) + + if assignments: + with self.cursor() as cursor: + cursor.execute("; ".join(assignments)) + + if "AUTOCOMMIT" in self.settings_dict: + try: + self.set_autocommit(self.settings_dict["AUTOCOMMIT"]) + except AttributeError: + self._set_autocommit(self.settings_dict["AUTOCOMMIT"]) + + def create_cursor(self, name: Any = None) -> CursorWrapper: + cursor = self.connection.cursor() + return CursorWrapper(cursor) + + def _rollback(self) -> None: + try: + BaseDatabaseWrapper._rollback(self) # type: ignore[attr-defined] + except NotSupportedError: + pass + + def _set_autocommit(self, autocommit: bool) -> None: + with self.wrap_database_errors: + self.connection.autocommit = autocommit + + def disable_constraint_checking(self) -> bool: + """ + Disable foreign key checks, primarily for use in adding rows with + forward references. Always return True to indicate constraint checks + need to be re-enabled. + """ + with self.cursor() as cursor: + cursor.execute("SET foreign_key_checks=0") + return True + + def enable_constraint_checking(self) -> None: + """ + Re-enable foreign key checks after they have been disabled. + """ + # Override needs_rollback in case constraint_checks_disabled is + # nested inside transaction.atomic. + self.needs_rollback, needs_rollback = False, self.needs_rollback + try: + with self.cursor() as cursor: + cursor.execute("SET foreign_key_checks=1") + finally: + self.needs_rollback = needs_rollback + + def check_constraints(self, table_names: Optional[List[str]] = None) -> None: + """ + Check each table name in `table_names` for rows with invalid foreign + key references. This method is intended to be used in conjunction with + `disable_constraint_checking()` and `enable_constraint_checking()`, to + determine if rows with invalid references were entered while constraint + checks were off. + """ + with self.cursor() as cursor: + if table_names is None: + table_names = self.introspection.table_names(cursor) + for table_name in table_names: + primary_key_column_name = self.introspection.get_primary_key_column( + cursor, table_name + ) + if not primary_key_column_name: + continue + key_columns = self.introspection.get_key_columns(cursor, table_name) + for ( + column_name, + referenced_table_name, + referenced_column_name, + ) in key_columns: + cursor.execute( + f""" + SELECT REFERRING.`{primary_key_column_name}`, + REFERRING.`{column_name}` + FROM `{table_name}` as REFERRING + LEFT JOIN `{referenced_table_name}` as REFERRED + ON ( + REFERRING.`{column_name}` = + REFERRED.`{referenced_column_name}` + ) + WHERE REFERRING.`{column_name}` IS NOT NULL + AND REFERRED.`{referenced_column_name}` IS NULL + """ + ) + for bad_row in cursor.fetchall(): + raise IntegrityError( + f"The row in table '{table_name}' with primary " + f"key '{bad_row[0]}' has an invalid foreign key: " + f"{table_name}.{column_name} contains a value " + f"'{bad_row[1]}' that does not have a " + f"corresponding value in " + f"{referenced_table_name}." + f"{referenced_column_name}." + ) + + def is_usable(self) -> bool: + try: + self.connection.ping() + except Error: + return False + else: + return True + + @cached_property + @staticmethod + def display_name() -> str: + """Display name.""" + return "MySQL" + + @cached_property + def data_type_check_constraints(self) -> Dict[str, str]: + """Mapping of Field objects to their SQL for CHECK constraints.""" + if self.features.supports_column_check_constraints: + check_constraints = { + "PositiveBigIntegerField": "`%(column)s` >= 0", + "PositiveIntegerField": "`%(column)s` >= 0", + "PositiveSmallIntegerField": "`%(column)s` >= 0", + } + return check_constraints + return {} + + @cached_property + def mysql_server_data(self) -> Dict[str, Any]: + """Return MySQL server data.""" + with self.temporary_connection() as cursor: + # Select some server variables and test if the time zone + # definitions are installed. CONVERT_TZ returns NULL if 'UTC' + # timezone isn't loaded into the mysql.time_zone table. + cursor.execute( + """ + SELECT VERSION(), + @@sql_mode, + @@default_storage_engine, + @@sql_auto_is_null, + @@lower_case_table_names, + CONVERT_TZ('2001-01-01 01:00:00', 'UTC', 'UTC') IS NOT NULL + """ + ) + row = cursor.fetchone() + return { + "version": row[0], + "sql_mode": row[1], + "default_storage_engine": row[2], + "sql_auto_is_null": bool(row[3]), + "lower_case_table_names": bool(row[4]), + "has_zoneinfo_database": bool(row[5]), + } + + @cached_property + def mysql_server_info(self) -> Any: + """Return MySQL version.""" + with self.temporary_connection() as cursor: + cursor.execute("SELECT VERSION()") + return cursor.fetchone()[0] + + @cached_property + def mysql_version(self) -> Tuple[int, ...]: + """Return MySQL version.""" + config = self.get_connection_params() + with mysql.connector.connect(**config) as conn: + server_version: Tuple[int, ...] = conn.get_server_version() + return server_version + + @cached_property + def sql_mode(self) -> Set[str]: + """Return SQL mode.""" + with self.cursor() as cursor: + cursor.execute("SELECT @@sql_mode") + sql_mode = cursor.fetchone() + return set(sql_mode[0].split(",") if sql_mode else ()) + + @property + def use_pure(self) -> bool: + """Return True if pure Python version is being used.""" + ans: bool = self._use_pure + return ans + + +class DjangoMySQLConverter(MySQLConverter): + """Custom converter for Django.""" + + # pylint: disable=unused-argument + + @staticmethod + def _time_to_python(value: bytes, dsc: Any = None) -> Optional[time]: + """Return MySQL TIME data type as datetime.time() + + Returns datetime.time() + """ + return dateparse.parse_time(value.decode("utf-8")) + + @staticmethod + def _datetime_to_python(value: bytes, dsc: Any = None) -> Optional[datetime]: + """Connector/Python always returns naive datetime.datetime + + Connector/Python always returns naive timestamps since MySQL has + no time zone support. + + - A naive datetime is a datetime that doesn't know its own timezone. + + Django needs a non-naive datetime, but in this method we don't need + to make a datetime value time zone aware since Django itself at some + point will make it aware (at least in versions 3.2.16 and 4.1.2) when + USE_TZ=True. This may change in a future release, we need to keep an + eye on this behaviour. + + Returns datetime.datetime() + """ + return MySQLConverter._datetime_to_python(value) if value else None + + # pylint: enable=unused-argument + + def _safestring_to_mysql(self, value: str) -> Union[bytes, HexLiteral]: + return self._str_to_mysql(value) + + def _safetext_to_mysql(self, value: str) -> Union[bytes, HexLiteral]: + return self._str_to_mysql(value) + + def _safebytes_to_mysql(self, value: bytes) -> bytes: + return self._bytes_to_mysql(value) diff --git a/mysql-sqlalchemy-workspace/lib/python3.9/site-packages/mysql/connector/django/client.py b/mysql-sqlalchemy-workspace/lib/python3.9/site-packages/mysql/connector/django/client.py new file mode 100644 index 00000000..090ccd5b --- /dev/null +++ b/mysql-sqlalchemy-workspace/lib/python3.9/site-packages/mysql/connector/django/client.py @@ -0,0 +1,106 @@ +# Copyright (c) 2020, 2022, Oracle and/or its affiliates. +# +# This program is free software; you can redistribute it and/or modify +# it under the terms of the GNU General Public License, version 2.0, as +# published by the Free Software Foundation. +# +# This program is also distributed with certain software (including +# but not limited to OpenSSL) that is licensed under separate terms, +# as designated in a particular file or component or in included license +# documentation. The authors of MySQL hereby grant you an +# additional permission to link the program and your derivative works +# with the separately licensed software that they have included with +# MySQL. +# +# Without limiting anything contained in the foregoing, this file, +# which is part of MySQL Connector/Python, is also subject to the +# Universal FOSS Exception, version 1.0, a copy of which can be found at +# http://oss.oracle.com/licenses/universal-foss-exception. +# +# This program is distributed in the hope that it will be useful, but +# WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. +# See the GNU General Public License, version 2.0, for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program; if not, write to the Free Software Foundation, Inc., +# 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA + +"""Database Client.""" + +import os +import subprocess + +from typing import Any, Dict, Iterable, List, Optional, Tuple + +from django.db.backends.base.client import BaseDatabaseClient + + +class DatabaseClient(BaseDatabaseClient): + """Encapsulate backend-specific methods for opening a client shell.""" + + executable_name = "mysql" + + @classmethod + def settings_to_cmd_args_env( + cls, settings_dict: Dict[str, Any], parameters: Optional[Iterable[str]] = None + ) -> Tuple[List[str], Optional[Dict[str, Any]]]: + args = [cls.executable_name] + + db = settings_dict["OPTIONS"].get("database", settings_dict["NAME"]) + user = settings_dict["OPTIONS"].get("user", settings_dict["USER"]) + passwd = settings_dict["OPTIONS"].get("password", settings_dict["PASSWORD"]) + host = settings_dict["OPTIONS"].get("host", settings_dict["HOST"]) + port = settings_dict["OPTIONS"].get("port", settings_dict["PORT"]) + ssl_ca = settings_dict["OPTIONS"].get("ssl_ca") + ssl_cert = settings_dict["OPTIONS"].get("ssl_cert") + ssl_key = settings_dict["OPTIONS"].get("ssl_key") + defaults_file = settings_dict["OPTIONS"].get("read_default_file") + charset = settings_dict["OPTIONS"].get("charset") + + # --defaults-file should always be the first option + if defaults_file: + args.append(f"--defaults-file={defaults_file}") + + # Load any custom init_commands. We always force SQL_MODE to TRADITIONAL + init_command = settings_dict["OPTIONS"].get("init_command", "") + args.append(f"--init-command=SET @@session.SQL_MODE=TRADITIONAL;{init_command}") + + if user: + args.append(f"--user={user}") + if passwd: + args.append(f"--password={passwd}") + + if host: + if "/" in host: + args.append(f"--socket={host}") + else: + args.append(f"--host={host}") + + if port: + args.append(f"--port={port}") + + if db: + args.append(f"--database={db}") + + if ssl_ca: + args.append(f"--ssl-ca={ssl_ca}") + if ssl_cert: + args.append(f"--ssl-cert={ssl_cert}") + if ssl_key: + args.append(f"--ssl-key={ssl_key}") + + if charset: + args.append(f"--default-character-set={charset}") + + if parameters: + args.extend(parameters) + + return args, None + + def runshell(self, parameters: Optional[Iterable[str]] = None) -> None: + args, env = self.settings_to_cmd_args_env( + self.connection.settings_dict, parameters + ) + env = {**os.environ, **env} if env else None + subprocess.run(args, env=env, check=True) diff --git a/mysql-sqlalchemy-workspace/lib/python3.9/site-packages/mysql/connector/django/compiler.py b/mysql-sqlalchemy-workspace/lib/python3.9/site-packages/mysql/connector/django/compiler.py new file mode 100644 index 00000000..1ee78713 --- /dev/null +++ b/mysql-sqlalchemy-workspace/lib/python3.9/site-packages/mysql/connector/django/compiler.py @@ -0,0 +1,45 @@ +# Copyright (c) 2020, 2022, Oracle and/or its affiliates. +# +# This program is free software; you can redistribute it and/or modify +# it under the terms of the GNU General Public License, version 2.0, as +# published by the Free Software Foundation. +# +# This program is also distributed with certain software (including +# but not limited to OpenSSL) that is licensed under separate terms, +# as designated in a particular file or component or in included license +# documentation. The authors of MySQL hereby grant you an +# additional permission to link the program and your derivative works +# with the separately licensed software that they have included with +# MySQL. +# +# Without limiting anything contained in the foregoing, this file, +# which is part of MySQL Connector/Python, is also subject to the +# Universal FOSS Exception, version 1.0, a copy of which can be found at +# http://oss.oracle.com/licenses/universal-foss-exception. +# +# This program is distributed in the hope that it will be useful, but +# WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. +# See the GNU General Public License, version 2.0, for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program; if not, write to the Free Software Foundation, Inc., +# 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA + +"""SQL Compiler classes.""" + +from django.db.backends.mysql.compiler import ( + SQLAggregateCompiler, + SQLCompiler, + SQLDeleteCompiler, + SQLInsertCompiler, + SQLUpdateCompiler, +) + +__all__ = [ + "SQLAggregateCompiler", + "SQLCompiler", + "SQLDeleteCompiler", + "SQLInsertCompiler", + "SQLUpdateCompiler", +] diff --git a/mysql-sqlalchemy-workspace/lib/python3.9/site-packages/mysql/connector/django/creation.py b/mysql-sqlalchemy-workspace/lib/python3.9/site-packages/mysql/connector/django/creation.py new file mode 100644 index 00000000..82f0853c --- /dev/null +++ b/mysql-sqlalchemy-workspace/lib/python3.9/site-packages/mysql/connector/django/creation.py @@ -0,0 +1,33 @@ +# Copyright (c) 2020, 2022, Oracle and/or its affiliates. +# +# This program is free software; you can redistribute it and/or modify +# it under the terms of the GNU General Public License, version 2.0, as +# published by the Free Software Foundation. +# +# This program is also distributed with certain software (including +# but not limited to OpenSSL) that is licensed under separate terms, +# as designated in a particular file or component or in included license +# documentation. The authors of MySQL hereby grant you an +# additional permission to link the program and your derivative works +# with the separately licensed software that they have included with +# MySQL. +# +# Without limiting anything contained in the foregoing, this file, +# which is part of MySQL Connector/Python, is also subject to the +# Universal FOSS Exception, version 1.0, a copy of which can be found at +# http://oss.oracle.com/licenses/universal-foss-exception. +# +# This program is distributed in the hope that it will be useful, but +# WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. +# See the GNU General Public License, version 2.0, for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program; if not, write to the Free Software Foundation, Inc., +# 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA + +"""Backend specific database creation.""" + +from django.db.backends.mysql.creation import DatabaseCreation + +__all__ = ["DatabaseCreation"] diff --git a/mysql-sqlalchemy-workspace/lib/python3.9/site-packages/mysql/connector/django/features.py b/mysql-sqlalchemy-workspace/lib/python3.9/site-packages/mysql/connector/django/features.py new file mode 100644 index 00000000..e8debb8d --- /dev/null +++ b/mysql-sqlalchemy-workspace/lib/python3.9/site-packages/mysql/connector/django/features.py @@ -0,0 +1,50 @@ +# Copyright (c) 2020, 2022, Oracle and/or its affiliates. +# +# This program is free software; you can redistribute it and/or modify +# it under the terms of the GNU General Public License, version 2.0, as +# published by the Free Software Foundation. +# +# This program is also distributed with certain software (including +# but not limited to OpenSSL) that is licensed under separate terms, +# as designated in a particular file or component or in included license +# documentation. The authors of MySQL hereby grant you an +# additional permission to link the program and your derivative works +# with the separately licensed software that they have included with +# MySQL. +# +# Without limiting anything contained in the foregoing, this file, +# which is part of MySQL Connector/Python, is also subject to the +# Universal FOSS Exception, version 1.0, a copy of which can be found at +# http://oss.oracle.com/licenses/universal-foss-exception. +# +# This program is distributed in the hope that it will be useful, but +# WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. +# See the GNU General Public License, version 2.0, for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program; if not, write to the Free Software Foundation, Inc., +# 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA + +"""Database Features.""" + +from typing import Any, List + +from django.db.backends.mysql.features import DatabaseFeatures as MySQLDatabaseFeatures +from django.utils.functional import cached_property + + +class DatabaseFeatures(MySQLDatabaseFeatures): + """Database Features Specification class.""" + + empty_fetchmany_value: List[Any] = [] + + @cached_property + def can_introspect_check_constraints(self) -> bool: # type: ignore[override] + """Check if backend support introspection CHECK of constraints.""" + return self.connection.mysql_version >= (8, 0, 16) + + @cached_property + def supports_microsecond_precision(self) -> bool: + """Check if backend support microsecond precision.""" + return self.connection.mysql_version >= (5, 6, 3) diff --git a/mysql-sqlalchemy-workspace/lib/python3.9/site-packages/mysql/connector/django/introspection.py b/mysql-sqlalchemy-workspace/lib/python3.9/site-packages/mysql/connector/django/introspection.py new file mode 100644 index 00000000..304a0ec2 --- /dev/null +++ b/mysql-sqlalchemy-workspace/lib/python3.9/site-packages/mysql/connector/django/introspection.py @@ -0,0 +1,461 @@ +# Copyright (c) 2020, 2022, Oracle and/or its affiliates. +# +# This program is free software; you can redistribute it and/or modify +# it under the terms of the GNU General Public License, version 2.0, as +# published by the Free Software Foundation. +# +# This program is also distributed with certain software (including +# but not limited to OpenSSL) that is licensed under separate terms, +# as designated in a particular file or component or in included license +# documentation. The authors of MySQL hereby grant you an +# additional permission to link the program and your derivative works +# with the separately licensed software that they have included with +# MySQL. +# +# Without limiting anything contained in the foregoing, this file, +# which is part of MySQL Connector/Python, is also subject to the +# Universal FOSS Exception, version 1.0, a copy of which can be found at +# http://oss.oracle.com/licenses/universal-foss-exception. +# +# This program is distributed in the hope that it will be useful, but +# WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. +# See the GNU General Public License, version 2.0, for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program; if not, write to the Free Software Foundation, Inc., +# 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA + +# mypy: disable-error-code="override,attr-defined,call-arg" + +"""Database Introspection.""" + +from collections import namedtuple +from typing import TYPE_CHECKING, Any, Dict, List, Optional, Set, Tuple + +import sqlparse + +from django import VERSION as DJANGO_VERSION +from django.db.backends.base.introspection import ( + BaseDatabaseIntrospection, + FieldInfo as BaseFieldInfo, + TableInfo, +) +from django.db.models import Index +from django.utils.datastructures import OrderedSet + +from mysql.connector.constants import FieldType + +# from .base import CursorWrapper produces a circular import error, +# avoiding importing CursorWrapper explicitly, using a documented +# trick; write the imports inside if TYPE_CHECKING: so that they +# are not executed at runtime. +# Ref: https://buildmedia.readthedocs.org/media/pdf/mypy/stable/mypy.pdf [page 42] +if TYPE_CHECKING: + # CursorWraper is used exclusively for type hinting + from mysql.connector.django.base import CursorWrapper + +# Based on my investigation, named tuples to +# comply with mypy need to define a static list or tuple +# for field_names (second argument). In this case, the field +# names are created dynamically for FieldInfo which triggers +# a mypy error. The solution is not straightforward since +# FieldInfo attributes are Django version dependent. Code +# refactory is needed to fix this issue. +FieldInfo = namedtuple( # type: ignore[misc] + "FieldInfo", + BaseFieldInfo._fields + ("extra", "is_unsigned", "has_json_constraint"), +) +if DJANGO_VERSION < (3, 2, 0): + InfoLine = namedtuple( + "InfoLine", + "col_name data_type max_len num_prec num_scale extra column_default " + "is_unsigned", + ) +else: + InfoLine = namedtuple( # type: ignore[no-redef] + "InfoLine", + "col_name data_type max_len num_prec num_scale extra column_default " + "collation is_unsigned", + ) + + +class DatabaseIntrospection(BaseDatabaseIntrospection): + """Encapsulate backend-specific introspection utilities.""" + + data_types_reverse = { + FieldType.BLOB: "TextField", + FieldType.DECIMAL: "DecimalField", + FieldType.NEWDECIMAL: "DecimalField", + FieldType.DATE: "DateField", + FieldType.DATETIME: "DateTimeField", + FieldType.DOUBLE: "FloatField", + FieldType.FLOAT: "FloatField", + FieldType.INT24: "IntegerField", + FieldType.LONG: "IntegerField", + FieldType.LONGLONG: "BigIntegerField", + FieldType.SHORT: "SmallIntegerField", + FieldType.STRING: "CharField", + FieldType.TIME: "TimeField", + FieldType.TIMESTAMP: "DateTimeField", + FieldType.TINY: "IntegerField", + FieldType.TINY_BLOB: "TextField", + FieldType.MEDIUM_BLOB: "TextField", + FieldType.LONG_BLOB: "TextField", + FieldType.VAR_STRING: "CharField", + } + + def get_field_type(self, data_type: str, description: FieldInfo) -> str: + field_type = super().get_field_type(data_type, description) # type: ignore[arg-type] + if "auto_increment" in description.extra: + if field_type == "IntegerField": + return "AutoField" + if field_type == "BigIntegerField": + return "BigAutoField" + if field_type == "SmallIntegerField": + return "SmallAutoField" + if description.is_unsigned: + if field_type == "BigIntegerField": + return "PositiveBigIntegerField" + if field_type == "IntegerField": + return "PositiveIntegerField" + if field_type == "SmallIntegerField": + return "PositiveSmallIntegerField" + # JSON data type is an alias for LONGTEXT in MariaDB, use check + # constraints clauses to introspect JSONField. + if description.has_json_constraint: + return "JSONField" + return field_type + + def get_table_list(self, cursor: "CursorWrapper") -> List[TableInfo]: + """Return a list of table and view names in the current database.""" + cursor.execute("SHOW FULL TABLES") + return [ + TableInfo(row[0], {"BASE TABLE": "t", "VIEW": "v"}.get(row[1])) + for row in cursor.fetchall() + ] + + def get_table_description( + self, cursor: "CursorWrapper", table_name: str + ) -> List[FieldInfo]: + """ + Return a description of the table with the DB-API cursor.description + interface." + """ + json_constraints: Dict[Any, Any] = {} + # A default collation for the given table. + cursor.execute( + """ + SELECT table_collation + FROM information_schema.tables + WHERE table_schema = DATABASE() + AND table_name = %s + """, + [table_name], + ) + row = cursor.fetchone() + default_column_collation = row[0] if row else "" + # information_schema database gives more accurate results for some figures: + # - varchar length returned by cursor.description is an internal length, + # not visible length (#5725) + # - precision and scale (for decimal fields) (#5014) + # - auto_increment is not available in cursor.description + if DJANGO_VERSION < (3, 2, 0): + cursor.execute( + """ + SELECT + column_name, data_type, character_maximum_length, + numeric_precision, numeric_scale, extra, column_default, + CASE + WHEN column_type LIKE '%% unsigned' THEN 1 + ELSE 0 + END AS is_unsigned + FROM information_schema.columns + WHERE table_name = %s AND table_schema = DATABASE() + """, + [table_name], + ) + else: + cursor.execute( + """ + SELECT + column_name, data_type, character_maximum_length, + numeric_precision, numeric_scale, extra, column_default, + CASE + WHEN collation_name = %s THEN NULL + ELSE collation_name + END AS collation_name, + CASE + WHEN column_type LIKE '%% unsigned' THEN 1 + ELSE 0 + END AS is_unsigned + FROM information_schema.columns + WHERE table_name = %s AND table_schema = DATABASE() + """, + [default_column_collation, table_name], + ) + field_info = {line[0]: InfoLine(*line) for line in cursor.fetchall()} + + cursor.execute( + f"SELECT * FROM {self.connection.ops.quote_name(table_name)} LIMIT 1" + ) + + def to_int(i: Any) -> Optional[int]: + return int(i) if i is not None else i + + fields = [] + for line in cursor.description: + info = field_info[line[0]] + if DJANGO_VERSION < (3, 2, 0): + fields.append( + FieldInfo( + *line[:3], + to_int(info.max_len) or line[3], + to_int(info.num_prec) or line[4], + to_int(info.num_scale) or line[5], + line[6], + info.column_default, + info.extra, + info.is_unsigned, + line[0] in json_constraints, + ) + ) + else: + fields.append( + FieldInfo( + *line[:3], + to_int(info.max_len) or line[3], + to_int(info.num_prec) or line[4], + to_int(info.num_scale) or line[5], + line[6], + info.column_default, + info.collation, + info.extra, + info.is_unsigned, + line[0] in json_constraints, + ) + ) + return fields + + def get_indexes( + self, cursor: "CursorWrapper", table_name: str + ) -> Dict[int, Dict[str, bool]]: + """Return indexes from table.""" + cursor.execute(f"SHOW INDEX FROM {self.connection.ops.quote_name(table_name)}") + # Do a two-pass search for indexes: on first pass check which indexes + # are multicolumn, on second pass check which single-column indexes + # are present. + rows = list(cursor.fetchall()) + multicol_indexes = set() + for row in rows: + if row[3] > 1: + multicol_indexes.add(row[2]) + indexes: Dict[int, Dict[str, bool]] = {} + for row in rows: + if row[2] in multicol_indexes: + continue + if row[4] not in indexes: + indexes[row[4]] = {"primary_key": False, "unique": False} + # It's possible to have the unique and PK constraints in + # separate indexes. + if row[2] == "PRIMARY": + indexes[row[4]]["primary_key"] = True + if not row[1]: + indexes[row[4]]["unique"] = True + return indexes + + def get_primary_key_column( + self, cursor: "CursorWrapper", table_name: str + ) -> Optional[int]: + """ + Returns the name of the primary key column for the given table + """ + for column in self.get_indexes(cursor, table_name).items(): + if column[1]["primary_key"]: + return column[0] + return None + + def get_sequences( + self, cursor: "CursorWrapper", table_name: str, table_fields: Any = () + ) -> List[Dict[str, str]]: + for field_info in self.get_table_description(cursor, table_name): + if "auto_increment" in field_info.extra: + # MySQL allows only one auto-increment column per table. + return [{"table": table_name, "column": field_info.name}] + return [] + + def get_relations( + self, cursor: "CursorWrapper", table_name: str + ) -> Dict[str, Tuple[str, str]]: + """ + Return a dictionary of {field_name: (field_name_other_table, other_table)} + representing all relationships to the given table. + """ + constraints = self.get_key_columns(cursor, table_name) + relations = {} + for my_fieldname, other_table, other_field in constraints: + relations[my_fieldname] = (other_field, other_table) + return relations + + def get_key_columns( + self, cursor: "CursorWrapper", table_name: str + ) -> List[Tuple[str, str, str]]: + """ + Return a list of (column_name, referenced_table_name, referenced_column_name) + for all key columns in the given table. + """ + key_columns: List[Any] = [] + cursor.execute( + """ + SELECT column_name, referenced_table_name, referenced_column_name + FROM information_schema.key_column_usage + WHERE table_name = %s + AND table_schema = DATABASE() + AND referenced_table_name IS NOT NULL + AND referenced_column_name IS NOT NULL""", + [table_name], + ) + key_columns.extend(cursor.fetchall()) + return key_columns + + def get_storage_engine(self, cursor: "CursorWrapper", table_name: str) -> str: + """ + Retrieve the storage engine for a given table. Return the default + storage engine if the table doesn't exist. + """ + cursor.execute( + "SELECT engine FROM information_schema.tables WHERE table_name = %s", + [table_name], + ) + result = cursor.fetchone() + # pylint: disable=protected-access + if not result: + return self.connection.features._mysql_storage_engine + # pylint: enable=protected-access + return result[0] + + def _parse_constraint_columns( + self, check_clause: Any, columns: Set[str] + ) -> OrderedSet: + check_columns: OrderedSet = OrderedSet() + statement = sqlparse.parse(check_clause)[0] + tokens = (token for token in statement.flatten() if not token.is_whitespace) + for token in tokens: + if ( + token.ttype == sqlparse.tokens.Name + and self.connection.ops.quote_name(token.value) == token.value + and token.value[1:-1] in columns + ): + check_columns.add(token.value[1:-1]) + return check_columns + + def get_constraints( + self, cursor: "CursorWrapper", table_name: str + ) -> Dict[str, Any]: + """ + Retrieve any constraints or keys (unique, pk, fk, check, index) across + one or more columns. + """ + constraints: Dict[str, Any] = {} + # Get the actual constraint names and columns + name_query = """ + SELECT kc.`constraint_name`, kc.`column_name`, + kc.`referenced_table_name`, kc.`referenced_column_name` + FROM information_schema.key_column_usage AS kc + WHERE + kc.table_schema = DATABASE() AND + kc.table_name = %s + ORDER BY kc.`ordinal_position` + """ + cursor.execute(name_query, [table_name]) + for constraint, column, ref_table, ref_column in cursor.fetchall(): + if constraint not in constraints: + constraints[constraint] = { + "columns": OrderedSet(), + "primary_key": False, + "unique": False, + "index": False, + "check": False, + "foreign_key": (ref_table, ref_column) if ref_column else None, + } + if self.connection.features.supports_index_column_ordering: + constraints[constraint]["orders"] = [] + constraints[constraint]["columns"].add(column) + # Now get the constraint types + type_query = """ + SELECT c.constraint_name, c.constraint_type + FROM information_schema.table_constraints AS c + WHERE + c.table_schema = DATABASE() AND + c.table_name = %s + """ + cursor.execute(type_query, [table_name]) + for constraint, kind in cursor.fetchall(): + if kind.lower() == "primary key": + constraints[constraint]["primary_key"] = True + constraints[constraint]["unique"] = True + elif kind.lower() == "unique": + constraints[constraint]["unique"] = True + # Add check constraints. + if self.connection.features.can_introspect_check_constraints: + unnamed_constraints_index = 0 + columns = { + info.name for info in self.get_table_description(cursor, table_name) + } + type_query = """ + SELECT cc.constraint_name, cc.check_clause + FROM + information_schema.check_constraints AS cc, + information_schema.table_constraints AS tc + WHERE + cc.constraint_schema = DATABASE() AND + tc.table_schema = cc.constraint_schema AND + cc.constraint_name = tc.constraint_name AND + tc.constraint_type = 'CHECK' AND + tc.table_name = %s + """ + cursor.execute(type_query, [table_name]) + for constraint, check_clause in cursor.fetchall(): + constraint_columns = self._parse_constraint_columns( + check_clause, columns + ) + # Ensure uniqueness of unnamed constraints. Unnamed unique + # and check columns constraints have the same name as + # a column. + if set(constraint_columns) == {constraint}: + unnamed_constraints_index += 1 + constraint = f"__unnamed_constraint_{unnamed_constraints_index}__" + constraints[constraint] = { + "columns": constraint_columns, + "primary_key": False, + "unique": False, + "index": False, + "check": True, + "foreign_key": None, + } + # Now add in the indexes + cursor.execute(f"SHOW INDEX FROM {self.connection.ops.quote_name(table_name)}") + for _, _, index, _, column, order, type_ in [ + x[:6] + (x[10],) for x in cursor.fetchall() + ]: + if index not in constraints: + constraints[index] = { + "columns": OrderedSet(), + "primary_key": False, + "unique": False, + "check": False, + "foreign_key": None, + } + if self.connection.features.supports_index_column_ordering: + constraints[index]["orders"] = [] + constraints[index]["index"] = True + constraints[index]["type"] = ( + Index.suffix if type_ == "BTREE" else type_.lower() + ) + constraints[index]["columns"].add(column) + if self.connection.features.supports_index_column_ordering: + constraints[index]["orders"].append("DESC" if order == "D" else "ASC") + # Convert the sorted sets to lists + for constraint in constraints.values(): + constraint["columns"] = list(constraint["columns"]) + return constraints diff --git a/mysql-sqlalchemy-workspace/lib/python3.9/site-packages/mysql/connector/django/operations.py b/mysql-sqlalchemy-workspace/lib/python3.9/site-packages/mysql/connector/django/operations.py new file mode 100644 index 00000000..d010b0bb --- /dev/null +++ b/mysql-sqlalchemy-workspace/lib/python3.9/site-packages/mysql/connector/django/operations.py @@ -0,0 +1,104 @@ +# Copyright (c) 2020, 2022, Oracle and/or its affiliates. +# +# This program is free software; you can redistribute it and/or modify +# it under the terms of the GNU General Public License, version 2.0, as +# published by the Free Software Foundation. +# +# This program is also distributed with certain software (including +# but not limited to OpenSSL) that is licensed under separate terms, +# as designated in a particular file or component or in included license +# documentation. The authors of MySQL hereby grant you an +# additional permission to link the program and your derivative works +# with the separately licensed software that they have included with +# MySQL. +# +# Without limiting anything contained in the foregoing, this file, +# which is part of MySQL Connector/Python, is also subject to the +# Universal FOSS Exception, version 1.0, a copy of which can be found at +# http://oss.oracle.com/licenses/universal-foss-exception. +# +# This program is distributed in the hope that it will be useful, but +# WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. +# See the GNU General Public License, version 2.0, for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program; if not, write to the Free Software Foundation, Inc., +# 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA + +# mypy: disable-error-code="override,attr-defined" + +"""Database Operations.""" + +from datetime import datetime, time +from typing import Optional + +from django.conf import settings +from django.db.backends.mysql.operations import ( + DatabaseOperations as MySQLDatabaseOperations, +) +from django.utils import timezone + +try: + from _mysql_connector import datetime_to_mysql, time_to_mysql +except ImportError: + HAVE_CEXT = False +else: + HAVE_CEXT = True + + +class DatabaseOperations(MySQLDatabaseOperations): + """Database Operations class.""" + + compiler_module = "mysql.connector.django.compiler" + + def regex_lookup(self, lookup_type: str) -> str: + """Return the string to use in a query when performing regular + expression lookup.""" + if self.connection.mysql_version < (8, 0, 0): + if lookup_type == "regex": + return "%s REGEXP BINARY %s" + return "%s REGEXP %s" + + match_option = "c" if lookup_type == "regex" else "i" + return f"REGEXP_LIKE(%s, %s, '{match_option}')" + + def adapt_datetimefield_value(self, value: Optional[datetime]) -> Optional[bytes]: + """Transform a datetime value to an object compatible with what is + expected by the backend driver for datetime columns.""" + return self.value_to_db_datetime(value) + + def value_to_db_datetime(self, value: Optional[datetime]) -> Optional[bytes]: + """Convert value to MySQL DATETIME.""" + ans: Optional[bytes] = None + if value is None: + return ans + # MySQL doesn't support tz-aware times + if timezone.is_aware(value): + if settings.USE_TZ: + value = value.astimezone(timezone.utc).replace(tzinfo=None) + else: + raise ValueError("MySQL backend does not support timezone-aware times") + if not self.connection.features.supports_microsecond_precision: + value = value.replace(microsecond=0) + if not self.connection.use_pure: + return datetime_to_mysql(value) + return self.connection.converter.to_mysql(value) + + def adapt_timefield_value(self, value: Optional[time]) -> Optional[bytes]: + """Transform a time value to an object compatible with what is expected + by the backend driver for time columns.""" + return self.value_to_db_time(value) + + def value_to_db_time(self, value: Optional[time]) -> Optional[bytes]: + """Convert value to MySQL TIME.""" + if value is None: + return None + + # MySQL doesn't support tz-aware times + if timezone.is_aware(value): + raise ValueError("MySQL backend does not support timezone-aware times") + + if not self.connection.use_pure: + return time_to_mysql(value) + return self.connection.converter.to_mysql(value) diff --git a/mysql-sqlalchemy-workspace/lib/python3.9/site-packages/mysql/connector/django/schema.py b/mysql-sqlalchemy-workspace/lib/python3.9/site-packages/mysql/connector/django/schema.py new file mode 100644 index 00000000..4d4f4545 --- /dev/null +++ b/mysql-sqlalchemy-workspace/lib/python3.9/site-packages/mysql/connector/django/schema.py @@ -0,0 +1,59 @@ +# Copyright (c) 2020, 2022, Oracle and/or its affiliates. +# +# This program is free software; you can redistribute it and/or modify +# it under the terms of the GNU General Public License, version 2.0, as +# published by the Free Software Foundation. +# +# This program is also distributed with certain software (including +# but not limited to OpenSSL) that is licensed under separate terms, +# as designated in a particular file or component or in included license +# documentation. The authors of MySQL hereby grant you an +# additional permission to link the program and your derivative works +# with the separately licensed software that they have included with +# MySQL. +# +# Without limiting anything contained in the foregoing, this file, +# which is part of MySQL Connector/Python, is also subject to the +# Universal FOSS Exception, version 1.0, a copy of which can be found at +# http://oss.oracle.com/licenses/universal-foss-exception. +# +# This program is distributed in the hope that it will be useful, but +# WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. +# See the GNU General Public License, version 2.0, for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program; if not, write to the Free Software Foundation, Inc., +# 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA + +# mypy: disable-error-code="override" + +"""Database schema editor.""" +from typing import Any + +from django.db.backends.mysql.schema import ( + DatabaseSchemaEditor as MySQLDatabaseSchemaEditor, +) + + +class DatabaseSchemaEditor(MySQLDatabaseSchemaEditor): + """This class is responsible for emitting schema-changing statements to the + databases. + """ + + def quote_value(self, value: Any) -> Any: + """Quote value.""" + self.connection.ensure_connection() + if isinstance(value, str): + value = value.replace("%", "%%") + quoted = self.connection.connection.converter.escape(value) + if isinstance(value, str) and isinstance(quoted, bytes): + quoted = quoted.decode() + return quoted + + def prepare_default(self, value: Any) -> Any: + """Implement the required abstract method. + + MySQL has requires_literal_defaults=False, therefore return the value. + """ + return value diff --git a/mysql-sqlalchemy-workspace/lib/python3.9/site-packages/mysql/connector/django/validation.py b/mysql-sqlalchemy-workspace/lib/python3.9/site-packages/mysql/connector/django/validation.py new file mode 100644 index 00000000..9096e1c8 --- /dev/null +++ b/mysql-sqlalchemy-workspace/lib/python3.9/site-packages/mysql/connector/django/validation.py @@ -0,0 +1,33 @@ +# Copyright (c) 2020, 2022, Oracle and/or its affiliates. +# +# This program is free software; you can redistribute it and/or modify +# it under the terms of the GNU General Public License, version 2.0, as +# published by the Free Software Foundation. +# +# This program is also distributed with certain software (including +# but not limited to OpenSSL) that is licensed under separate terms, +# as designated in a particular file or component or in included license +# documentation. The authors of MySQL hereby grant you an +# additional permission to link the program and your derivative works +# with the separately licensed software that they have included with +# MySQL. +# +# Without limiting anything contained in the foregoing, this file, +# which is part of MySQL Connector/Python, is also subject to the +# Universal FOSS Exception, version 1.0, a copy of which can be found at +# http://oss.oracle.com/licenses/universal-foss-exception. +# +# This program is distributed in the hope that it will be useful, but +# WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. +# See the GNU General Public License, version 2.0, for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program; if not, write to the Free Software Foundation, Inc., +# 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA + +"""Backend specific database validation.""" + +from django.db.backends.mysql.validation import DatabaseValidation + +__all__ = ["DatabaseValidation"] diff --git a/mysql-sqlalchemy-workspace/lib/python3.9/site-packages/mysql/connector/errorcode.py b/mysql-sqlalchemy-workspace/lib/python3.9/site-packages/mysql/connector/errorcode.py new file mode 100644 index 00000000..39fdb1bc --- /dev/null +++ b/mysql-sqlalchemy-workspace/lib/python3.9/site-packages/mysql/connector/errorcode.py @@ -0,0 +1,1877 @@ +# -*- coding: utf-8 -*- + +# Copyright (c) 2013, 2022, Oracle and/or its affiliates. All rights reserved. +# +# This program is free software; you can redistribute it and/or modify +# it under the terms of the GNU General Public License, version 2.0, as +# published by the Free Software Foundation. +# +# This program is also distributed with certain software (including +# but not limited to OpenSSL) that is licensed under separate terms, +# as designated in a particular file or component or in included license +# documentation. The authors of MySQL hereby grant you an +# additional permission to link the program and your derivative works +# with the separately licensed software that they have included with +# MySQL. +# +# Without limiting anything contained in the foregoing, this file, +# which is part of MySQL Connector/Python, is also subject to the +# Universal FOSS Exception, version 1.0, a copy of which can be found at +# http://oss.oracle.com/licenses/universal-foss-exception. +# +# This program is distributed in the hope that it will be useful, but +# WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. +# See the GNU General Public License, version 2.0, for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program; if not, write to the Free Software Foundation, Inc., +# 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA + +"""This module contains the MySQL Server and Client error codes.""" + +# This file was auto-generated. +_GENERATED_ON = "2021-08-11" +_MYSQL_VERSION = (8, 0, 27) + +# Start MySQL Errors +OBSOLETE_ER_HASHCHK = 1000 +OBSOLETE_ER_NISAMCHK = 1001 +ER_NO = 1002 +ER_YES = 1003 +ER_CANT_CREATE_FILE = 1004 +ER_CANT_CREATE_TABLE = 1005 +ER_CANT_CREATE_DB = 1006 +ER_DB_CREATE_EXISTS = 1007 +ER_DB_DROP_EXISTS = 1008 +OBSOLETE_ER_DB_DROP_DELETE = 1009 +ER_DB_DROP_RMDIR = 1010 +OBSOLETE_ER_CANT_DELETE_FILE = 1011 +ER_CANT_FIND_SYSTEM_REC = 1012 +ER_CANT_GET_STAT = 1013 +OBSOLETE_ER_CANT_GET_WD = 1014 +ER_CANT_LOCK = 1015 +ER_CANT_OPEN_FILE = 1016 +ER_FILE_NOT_FOUND = 1017 +ER_CANT_READ_DIR = 1018 +OBSOLETE_ER_CANT_SET_WD = 1019 +ER_CHECKREAD = 1020 +OBSOLETE_ER_DISK_FULL = 1021 +ER_DUP_KEY = 1022 +OBSOLETE_ER_ERROR_ON_CLOSE = 1023 +ER_ERROR_ON_READ = 1024 +ER_ERROR_ON_RENAME = 1025 +ER_ERROR_ON_WRITE = 1026 +ER_FILE_USED = 1027 +OBSOLETE_ER_FILSORT_ABORT = 1028 +OBSOLETE_ER_FORM_NOT_FOUND = 1029 +ER_GET_ERRNO = 1030 +ER_ILLEGAL_HA = 1031 +ER_KEY_NOT_FOUND = 1032 +ER_NOT_FORM_FILE = 1033 +ER_NOT_KEYFILE = 1034 +ER_OLD_KEYFILE = 1035 +ER_OPEN_AS_READONLY = 1036 +ER_OUTOFMEMORY = 1037 +ER_OUT_OF_SORTMEMORY = 1038 +OBSOLETE_ER_UNEXPECTED_EOF = 1039 +ER_CON_COUNT_ERROR = 1040 +ER_OUT_OF_RESOURCES = 1041 +ER_BAD_HOST_ERROR = 1042 +ER_HANDSHAKE_ERROR = 1043 +ER_DBACCESS_DENIED_ERROR = 1044 +ER_ACCESS_DENIED_ERROR = 1045 +ER_NO_DB_ERROR = 1046 +ER_UNKNOWN_COM_ERROR = 1047 +ER_BAD_NULL_ERROR = 1048 +ER_BAD_DB_ERROR = 1049 +ER_TABLE_EXISTS_ERROR = 1050 +ER_BAD_TABLE_ERROR = 1051 +ER_NON_UNIQ_ERROR = 1052 +ER_SERVER_SHUTDOWN = 1053 +ER_BAD_FIELD_ERROR = 1054 +ER_WRONG_FIELD_WITH_GROUP = 1055 +ER_WRONG_GROUP_FIELD = 1056 +ER_WRONG_SUM_SELECT = 1057 +ER_WRONG_VALUE_COUNT = 1058 +ER_TOO_LONG_IDENT = 1059 +ER_DUP_FIELDNAME = 1060 +ER_DUP_KEYNAME = 1061 +ER_DUP_ENTRY = 1062 +ER_WRONG_FIELD_SPEC = 1063 +ER_PARSE_ERROR = 1064 +ER_EMPTY_QUERY = 1065 +ER_NONUNIQ_TABLE = 1066 +ER_INVALID_DEFAULT = 1067 +ER_MULTIPLE_PRI_KEY = 1068 +ER_TOO_MANY_KEYS = 1069 +ER_TOO_MANY_KEY_PARTS = 1070 +ER_TOO_LONG_KEY = 1071 +ER_KEY_COLUMN_DOES_NOT_EXITS = 1072 +ER_BLOB_USED_AS_KEY = 1073 +ER_TOO_BIG_FIELDLENGTH = 1074 +ER_WRONG_AUTO_KEY = 1075 +ER_READY = 1076 +OBSOLETE_ER_NORMAL_SHUTDOWN = 1077 +OBSOLETE_ER_GOT_SIGNAL = 1078 +ER_SHUTDOWN_COMPLETE = 1079 +ER_FORCING_CLOSE = 1080 +ER_IPSOCK_ERROR = 1081 +ER_NO_SUCH_INDEX = 1082 +ER_WRONG_FIELD_TERMINATORS = 1083 +ER_BLOBS_AND_NO_TERMINATED = 1084 +ER_TEXTFILE_NOT_READABLE = 1085 +ER_FILE_EXISTS_ERROR = 1086 +ER_LOAD_INFO = 1087 +ER_ALTER_INFO = 1088 +ER_WRONG_SUB_KEY = 1089 +ER_CANT_REMOVE_ALL_FIELDS = 1090 +ER_CANT_DROP_FIELD_OR_KEY = 1091 +ER_INSERT_INFO = 1092 +ER_UPDATE_TABLE_USED = 1093 +ER_NO_SUCH_THREAD = 1094 +ER_KILL_DENIED_ERROR = 1095 +ER_NO_TABLES_USED = 1096 +ER_TOO_BIG_SET = 1097 +ER_NO_UNIQUE_LOGFILE = 1098 +ER_TABLE_NOT_LOCKED_FOR_WRITE = 1099 +ER_TABLE_NOT_LOCKED = 1100 +ER_BLOB_CANT_HAVE_DEFAULT = 1101 +ER_WRONG_DB_NAME = 1102 +ER_WRONG_TABLE_NAME = 1103 +ER_TOO_BIG_SELECT = 1104 +ER_UNKNOWN_ERROR = 1105 +ER_UNKNOWN_PROCEDURE = 1106 +ER_WRONG_PARAMCOUNT_TO_PROCEDURE = 1107 +ER_WRONG_PARAMETERS_TO_PROCEDURE = 1108 +ER_UNKNOWN_TABLE = 1109 +ER_FIELD_SPECIFIED_TWICE = 1110 +ER_INVALID_GROUP_FUNC_USE = 1111 +ER_UNSUPPORTED_EXTENSION = 1112 +ER_TABLE_MUST_HAVE_COLUMNS = 1113 +ER_RECORD_FILE_FULL = 1114 +ER_UNKNOWN_CHARACTER_SET = 1115 +ER_TOO_MANY_TABLES = 1116 +ER_TOO_MANY_FIELDS = 1117 +ER_TOO_BIG_ROWSIZE = 1118 +ER_STACK_OVERRUN = 1119 +ER_WRONG_OUTER_JOIN_UNUSED = 1120 +ER_NULL_COLUMN_IN_INDEX = 1121 +ER_CANT_FIND_UDF = 1122 +ER_CANT_INITIALIZE_UDF = 1123 +ER_UDF_NO_PATHS = 1124 +ER_UDF_EXISTS = 1125 +ER_CANT_OPEN_LIBRARY = 1126 +ER_CANT_FIND_DL_ENTRY = 1127 +ER_FUNCTION_NOT_DEFINED = 1128 +ER_HOST_IS_BLOCKED = 1129 +ER_HOST_NOT_PRIVILEGED = 1130 +ER_PASSWORD_ANONYMOUS_USER = 1131 +ER_PASSWORD_NOT_ALLOWED = 1132 +ER_PASSWORD_NO_MATCH = 1133 +ER_UPDATE_INFO = 1134 +ER_CANT_CREATE_THREAD = 1135 +ER_WRONG_VALUE_COUNT_ON_ROW = 1136 +ER_CANT_REOPEN_TABLE = 1137 +ER_INVALID_USE_OF_NULL = 1138 +ER_REGEXP_ERROR = 1139 +ER_MIX_OF_GROUP_FUNC_AND_FIELDS = 1140 +ER_NONEXISTING_GRANT = 1141 +ER_TABLEACCESS_DENIED_ERROR = 1142 +ER_COLUMNACCESS_DENIED_ERROR = 1143 +ER_ILLEGAL_GRANT_FOR_TABLE = 1144 +ER_GRANT_WRONG_HOST_OR_USER = 1145 +ER_NO_SUCH_TABLE = 1146 +ER_NONEXISTING_TABLE_GRANT = 1147 +ER_NOT_ALLOWED_COMMAND = 1148 +ER_SYNTAX_ERROR = 1149 +OBSOLETE_ER_UNUSED1 = 1150 +OBSOLETE_ER_UNUSED2 = 1151 +ER_ABORTING_CONNECTION = 1152 +ER_NET_PACKET_TOO_LARGE = 1153 +ER_NET_READ_ERROR_FROM_PIPE = 1154 +ER_NET_FCNTL_ERROR = 1155 +ER_NET_PACKETS_OUT_OF_ORDER = 1156 +ER_NET_UNCOMPRESS_ERROR = 1157 +ER_NET_READ_ERROR = 1158 +ER_NET_READ_INTERRUPTED = 1159 +ER_NET_ERROR_ON_WRITE = 1160 +ER_NET_WRITE_INTERRUPTED = 1161 +ER_TOO_LONG_STRING = 1162 +ER_TABLE_CANT_HANDLE_BLOB = 1163 +ER_TABLE_CANT_HANDLE_AUTO_INCREMENT = 1164 +OBSOLETE_ER_UNUSED3 = 1165 +ER_WRONG_COLUMN_NAME = 1166 +ER_WRONG_KEY_COLUMN = 1167 +ER_WRONG_MRG_TABLE = 1168 +ER_DUP_UNIQUE = 1169 +ER_BLOB_KEY_WITHOUT_LENGTH = 1170 +ER_PRIMARY_CANT_HAVE_NULL = 1171 +ER_TOO_MANY_ROWS = 1172 +ER_REQUIRES_PRIMARY_KEY = 1173 +OBSOLETE_ER_NO_RAID_COMPILED = 1174 +ER_UPDATE_WITHOUT_KEY_IN_SAFE_MODE = 1175 +ER_KEY_DOES_NOT_EXITS = 1176 +ER_CHECK_NO_SUCH_TABLE = 1177 +ER_CHECK_NOT_IMPLEMENTED = 1178 +ER_CANT_DO_THIS_DURING_AN_TRANSACTION = 1179 +ER_ERROR_DURING_COMMIT = 1180 +ER_ERROR_DURING_ROLLBACK = 1181 +ER_ERROR_DURING_FLUSH_LOGS = 1182 +OBSOLETE_ER_ERROR_DURING_CHECKPOINT = 1183 +ER_NEW_ABORTING_CONNECTION = 1184 +OBSOLETE_ER_DUMP_NOT_IMPLEMENTED = 1185 +OBSOLETE_ER_FLUSH_MASTER_BINLOG_CLOSED = 1186 +OBSOLETE_ER_INDEX_REBUILD = 1187 +ER_MASTER = 1188 +ER_MASTER_NET_READ = 1189 +ER_MASTER_NET_WRITE = 1190 +ER_FT_MATCHING_KEY_NOT_FOUND = 1191 +ER_LOCK_OR_ACTIVE_TRANSACTION = 1192 +ER_UNKNOWN_SYSTEM_VARIABLE = 1193 +ER_CRASHED_ON_USAGE = 1194 +ER_CRASHED_ON_REPAIR = 1195 +ER_WARNING_NOT_COMPLETE_ROLLBACK = 1196 +ER_TRANS_CACHE_FULL = 1197 +OBSOLETE_ER_SLAVE_MUST_STOP = 1198 +ER_SLAVE_NOT_RUNNING = 1199 +ER_BAD_SLAVE = 1200 +ER_MASTER_INFO = 1201 +ER_SLAVE_THREAD = 1202 +ER_TOO_MANY_USER_CONNECTIONS = 1203 +ER_SET_CONSTANTS_ONLY = 1204 +ER_LOCK_WAIT_TIMEOUT = 1205 +ER_LOCK_TABLE_FULL = 1206 +ER_READ_ONLY_TRANSACTION = 1207 +OBSOLETE_ER_DROP_DB_WITH_READ_LOCK = 1208 +OBSOLETE_ER_CREATE_DB_WITH_READ_LOCK = 1209 +ER_WRONG_ARGUMENTS = 1210 +ER_NO_PERMISSION_TO_CREATE_USER = 1211 +OBSOLETE_ER_UNION_TABLES_IN_DIFFERENT_DIR = 1212 +ER_LOCK_DEADLOCK = 1213 +ER_TABLE_CANT_HANDLE_FT = 1214 +ER_CANNOT_ADD_FOREIGN = 1215 +ER_NO_REFERENCED_ROW = 1216 +ER_ROW_IS_REFERENCED = 1217 +ER_CONNECT_TO_MASTER = 1218 +OBSOLETE_ER_QUERY_ON_MASTER = 1219 +ER_ERROR_WHEN_EXECUTING_COMMAND = 1220 +ER_WRONG_USAGE = 1221 +ER_WRONG_NUMBER_OF_COLUMNS_IN_SELECT = 1222 +ER_CANT_UPDATE_WITH_READLOCK = 1223 +ER_MIXING_NOT_ALLOWED = 1224 +ER_DUP_ARGUMENT = 1225 +ER_USER_LIMIT_REACHED = 1226 +ER_SPECIFIC_ACCESS_DENIED_ERROR = 1227 +ER_LOCAL_VARIABLE = 1228 +ER_GLOBAL_VARIABLE = 1229 +ER_NO_DEFAULT = 1230 +ER_WRONG_VALUE_FOR_VAR = 1231 +ER_WRONG_TYPE_FOR_VAR = 1232 +ER_VAR_CANT_BE_READ = 1233 +ER_CANT_USE_OPTION_HERE = 1234 +ER_NOT_SUPPORTED_YET = 1235 +ER_MASTER_FATAL_ERROR_READING_BINLOG = 1236 +ER_SLAVE_IGNORED_TABLE = 1237 +ER_INCORRECT_GLOBAL_LOCAL_VAR = 1238 +ER_WRONG_FK_DEF = 1239 +ER_KEY_REF_DO_NOT_MATCH_TABLE_REF = 1240 +ER_OPERAND_COLUMNS = 1241 +ER_SUBQUERY_NO_1_ROW = 1242 +ER_UNKNOWN_STMT_HANDLER = 1243 +ER_CORRUPT_HELP_DB = 1244 +OBSOLETE_ER_CYCLIC_REFERENCE = 1245 +ER_AUTO_CONVERT = 1246 +ER_ILLEGAL_REFERENCE = 1247 +ER_DERIVED_MUST_HAVE_ALIAS = 1248 +ER_SELECT_REDUCED = 1249 +ER_TABLENAME_NOT_ALLOWED_HERE = 1250 +ER_NOT_SUPPORTED_AUTH_MODE = 1251 +ER_SPATIAL_CANT_HAVE_NULL = 1252 +ER_COLLATION_CHARSET_MISMATCH = 1253 +OBSOLETE_ER_SLAVE_WAS_RUNNING = 1254 +OBSOLETE_ER_SLAVE_WAS_NOT_RUNNING = 1255 +ER_TOO_BIG_FOR_UNCOMPRESS = 1256 +ER_ZLIB_Z_MEM_ERROR = 1257 +ER_ZLIB_Z_BUF_ERROR = 1258 +ER_ZLIB_Z_DATA_ERROR = 1259 +ER_CUT_VALUE_GROUP_CONCAT = 1260 +ER_WARN_TOO_FEW_RECORDS = 1261 +ER_WARN_TOO_MANY_RECORDS = 1262 +ER_WARN_NULL_TO_NOTNULL = 1263 +ER_WARN_DATA_OUT_OF_RANGE = 1264 +WARN_DATA_TRUNCATED = 1265 +ER_WARN_USING_OTHER_HANDLER = 1266 +ER_CANT_AGGREGATE_2COLLATIONS = 1267 +OBSOLETE_ER_DROP_USER = 1268 +ER_REVOKE_GRANTS = 1269 +ER_CANT_AGGREGATE_3COLLATIONS = 1270 +ER_CANT_AGGREGATE_NCOLLATIONS = 1271 +ER_VARIABLE_IS_NOT_STRUCT = 1272 +ER_UNKNOWN_COLLATION = 1273 +ER_SLAVE_IGNORED_SSL_PARAMS = 1274 +OBSOLETE_ER_SERVER_IS_IN_SECURE_AUTH_MODE = 1275 +ER_WARN_FIELD_RESOLVED = 1276 +ER_BAD_SLAVE_UNTIL_COND = 1277 +ER_MISSING_SKIP_SLAVE = 1278 +ER_UNTIL_COND_IGNORED = 1279 +ER_WRONG_NAME_FOR_INDEX = 1280 +ER_WRONG_NAME_FOR_CATALOG = 1281 +OBSOLETE_ER_WARN_QC_RESIZE = 1282 +ER_BAD_FT_COLUMN = 1283 +ER_UNKNOWN_KEY_CACHE = 1284 +ER_WARN_HOSTNAME_WONT_WORK = 1285 +ER_UNKNOWN_STORAGE_ENGINE = 1286 +ER_WARN_DEPRECATED_SYNTAX = 1287 +ER_NON_UPDATABLE_TABLE = 1288 +ER_FEATURE_DISABLED = 1289 +ER_OPTION_PREVENTS_STATEMENT = 1290 +ER_DUPLICATED_VALUE_IN_TYPE = 1291 +ER_TRUNCATED_WRONG_VALUE = 1292 +OBSOLETE_ER_TOO_MUCH_AUTO_TIMESTAMP_COLS = 1293 +ER_INVALID_ON_UPDATE = 1294 +ER_UNSUPPORTED_PS = 1295 +ER_GET_ERRMSG = 1296 +ER_GET_TEMPORARY_ERRMSG = 1297 +ER_UNKNOWN_TIME_ZONE = 1298 +ER_WARN_INVALID_TIMESTAMP = 1299 +ER_INVALID_CHARACTER_STRING = 1300 +ER_WARN_ALLOWED_PACKET_OVERFLOWED = 1301 +ER_CONFLICTING_DECLARATIONS = 1302 +ER_SP_NO_RECURSIVE_CREATE = 1303 +ER_SP_ALREADY_EXISTS = 1304 +ER_SP_DOES_NOT_EXIST = 1305 +ER_SP_DROP_FAILED = 1306 +ER_SP_STORE_FAILED = 1307 +ER_SP_LILABEL_MISMATCH = 1308 +ER_SP_LABEL_REDEFINE = 1309 +ER_SP_LABEL_MISMATCH = 1310 +ER_SP_UNINIT_VAR = 1311 +ER_SP_BADSELECT = 1312 +ER_SP_BADRETURN = 1313 +ER_SP_BADSTATEMENT = 1314 +ER_UPDATE_LOG_DEPRECATED_IGNORED = 1315 +ER_UPDATE_LOG_DEPRECATED_TRANSLATED = 1316 +ER_QUERY_INTERRUPTED = 1317 +ER_SP_WRONG_NO_OF_ARGS = 1318 +ER_SP_COND_MISMATCH = 1319 +ER_SP_NORETURN = 1320 +ER_SP_NORETURNEND = 1321 +ER_SP_BAD_CURSOR_QUERY = 1322 +ER_SP_BAD_CURSOR_SELECT = 1323 +ER_SP_CURSOR_MISMATCH = 1324 +ER_SP_CURSOR_ALREADY_OPEN = 1325 +ER_SP_CURSOR_NOT_OPEN = 1326 +ER_SP_UNDECLARED_VAR = 1327 +ER_SP_WRONG_NO_OF_FETCH_ARGS = 1328 +ER_SP_FETCH_NO_DATA = 1329 +ER_SP_DUP_PARAM = 1330 +ER_SP_DUP_VAR = 1331 +ER_SP_DUP_COND = 1332 +ER_SP_DUP_CURS = 1333 +ER_SP_CANT_ALTER = 1334 +ER_SP_SUBSELECT_NYI = 1335 +ER_STMT_NOT_ALLOWED_IN_SF_OR_TRG = 1336 +ER_SP_VARCOND_AFTER_CURSHNDLR = 1337 +ER_SP_CURSOR_AFTER_HANDLER = 1338 +ER_SP_CASE_NOT_FOUND = 1339 +ER_FPARSER_TOO_BIG_FILE = 1340 +ER_FPARSER_BAD_HEADER = 1341 +ER_FPARSER_EOF_IN_COMMENT = 1342 +ER_FPARSER_ERROR_IN_PARAMETER = 1343 +ER_FPARSER_EOF_IN_UNKNOWN_PARAMETER = 1344 +ER_VIEW_NO_EXPLAIN = 1345 +OBSOLETE_ER_FRM_UNKNOWN_TYPE = 1346 +ER_WRONG_OBJECT = 1347 +ER_NONUPDATEABLE_COLUMN = 1348 +OBSOLETE_ER_VIEW_SELECT_DERIVED_UNUSED = 1349 +ER_VIEW_SELECT_CLAUSE = 1350 +ER_VIEW_SELECT_VARIABLE = 1351 +ER_VIEW_SELECT_TMPTABLE = 1352 +ER_VIEW_WRONG_LIST = 1353 +ER_WARN_VIEW_MERGE = 1354 +ER_WARN_VIEW_WITHOUT_KEY = 1355 +ER_VIEW_INVALID = 1356 +ER_SP_NO_DROP_SP = 1357 +OBSOLETE_ER_SP_GOTO_IN_HNDLR = 1358 +ER_TRG_ALREADY_EXISTS = 1359 +ER_TRG_DOES_NOT_EXIST = 1360 +ER_TRG_ON_VIEW_OR_TEMP_TABLE = 1361 +ER_TRG_CANT_CHANGE_ROW = 1362 +ER_TRG_NO_SUCH_ROW_IN_TRG = 1363 +ER_NO_DEFAULT_FOR_FIELD = 1364 +ER_DIVISION_BY_ZERO = 1365 +ER_TRUNCATED_WRONG_VALUE_FOR_FIELD = 1366 +ER_ILLEGAL_VALUE_FOR_TYPE = 1367 +ER_VIEW_NONUPD_CHECK = 1368 +ER_VIEW_CHECK_FAILED = 1369 +ER_PROCACCESS_DENIED_ERROR = 1370 +ER_RELAY_LOG_FAIL = 1371 +OBSOLETE_ER_PASSWD_LENGTH = 1372 +ER_UNKNOWN_TARGET_BINLOG = 1373 +ER_IO_ERR_LOG_INDEX_READ = 1374 +ER_BINLOG_PURGE_PROHIBITED = 1375 +ER_FSEEK_FAIL = 1376 +ER_BINLOG_PURGE_FATAL_ERR = 1377 +ER_LOG_IN_USE = 1378 +ER_LOG_PURGE_UNKNOWN_ERR = 1379 +ER_RELAY_LOG_INIT = 1380 +ER_NO_BINARY_LOGGING = 1381 +ER_RESERVED_SYNTAX = 1382 +OBSOLETE_ER_WSAS_FAILED = 1383 +OBSOLETE_ER_DIFF_GROUPS_PROC = 1384 +OBSOLETE_ER_NO_GROUP_FOR_PROC = 1385 +OBSOLETE_ER_ORDER_WITH_PROC = 1386 +OBSOLETE_ER_LOGGING_PROHIBIT_CHANGING_OF = 1387 +OBSOLETE_ER_NO_FILE_MAPPING = 1388 +OBSOLETE_ER_WRONG_MAGIC = 1389 +ER_PS_MANY_PARAM = 1390 +ER_KEY_PART_0 = 1391 +ER_VIEW_CHECKSUM = 1392 +ER_VIEW_MULTIUPDATE = 1393 +ER_VIEW_NO_INSERT_FIELD_LIST = 1394 +ER_VIEW_DELETE_MERGE_VIEW = 1395 +ER_CANNOT_USER = 1396 +ER_XAER_NOTA = 1397 +ER_XAER_INVAL = 1398 +ER_XAER_RMFAIL = 1399 +ER_XAER_OUTSIDE = 1400 +ER_XAER_RMERR = 1401 +ER_XA_RBROLLBACK = 1402 +ER_NONEXISTING_PROC_GRANT = 1403 +ER_PROC_AUTO_GRANT_FAIL = 1404 +ER_PROC_AUTO_REVOKE_FAIL = 1405 +ER_DATA_TOO_LONG = 1406 +ER_SP_BAD_SQLSTATE = 1407 +ER_STARTUP = 1408 +ER_LOAD_FROM_FIXED_SIZE_ROWS_TO_VAR = 1409 +ER_CANT_CREATE_USER_WITH_GRANT = 1410 +ER_WRONG_VALUE_FOR_TYPE = 1411 +ER_TABLE_DEF_CHANGED = 1412 +ER_SP_DUP_HANDLER = 1413 +ER_SP_NOT_VAR_ARG = 1414 +ER_SP_NO_RETSET = 1415 +ER_CANT_CREATE_GEOMETRY_OBJECT = 1416 +OBSOLETE_ER_FAILED_ROUTINE_BREAK_BINLOG = 1417 +ER_BINLOG_UNSAFE_ROUTINE = 1418 +ER_BINLOG_CREATE_ROUTINE_NEED_SUPER = 1419 +OBSOLETE_ER_EXEC_STMT_WITH_OPEN_CURSOR = 1420 +ER_STMT_HAS_NO_OPEN_CURSOR = 1421 +ER_COMMIT_NOT_ALLOWED_IN_SF_OR_TRG = 1422 +ER_NO_DEFAULT_FOR_VIEW_FIELD = 1423 +ER_SP_NO_RECURSION = 1424 +ER_TOO_BIG_SCALE = 1425 +ER_TOO_BIG_PRECISION = 1426 +ER_M_BIGGER_THAN_D = 1427 +ER_WRONG_LOCK_OF_SYSTEM_TABLE = 1428 +ER_CONNECT_TO_FOREIGN_DATA_SOURCE = 1429 +ER_QUERY_ON_FOREIGN_DATA_SOURCE = 1430 +ER_FOREIGN_DATA_SOURCE_DOESNT_EXIST = 1431 +ER_FOREIGN_DATA_STRING_INVALID_CANT_CREATE = 1432 +ER_FOREIGN_DATA_STRING_INVALID = 1433 +OBSOLETE_ER_CANT_CREATE_FEDERATED_TABLE = 1434 +ER_TRG_IN_WRONG_SCHEMA = 1435 +ER_STACK_OVERRUN_NEED_MORE = 1436 +ER_TOO_LONG_BODY = 1437 +ER_WARN_CANT_DROP_DEFAULT_KEYCACHE = 1438 +ER_TOO_BIG_DISPLAYWIDTH = 1439 +ER_XAER_DUPID = 1440 +ER_DATETIME_FUNCTION_OVERFLOW = 1441 +ER_CANT_UPDATE_USED_TABLE_IN_SF_OR_TRG = 1442 +ER_VIEW_PREVENT_UPDATE = 1443 +ER_PS_NO_RECURSION = 1444 +ER_SP_CANT_SET_AUTOCOMMIT = 1445 +OBSOLETE_ER_MALFORMED_DEFINER = 1446 +ER_VIEW_FRM_NO_USER = 1447 +ER_VIEW_OTHER_USER = 1448 +ER_NO_SUCH_USER = 1449 +ER_FORBID_SCHEMA_CHANGE = 1450 +ER_ROW_IS_REFERENCED_2 = 1451 +ER_NO_REFERENCED_ROW_2 = 1452 +ER_SP_BAD_VAR_SHADOW = 1453 +ER_TRG_NO_DEFINER = 1454 +ER_OLD_FILE_FORMAT = 1455 +ER_SP_RECURSION_LIMIT = 1456 +OBSOLETE_ER_SP_PROC_TABLE_CORRUPT = 1457 +ER_SP_WRONG_NAME = 1458 +ER_TABLE_NEEDS_UPGRADE = 1459 +ER_SP_NO_AGGREGATE = 1460 +ER_MAX_PREPARED_STMT_COUNT_REACHED = 1461 +ER_VIEW_RECURSIVE = 1462 +ER_NON_GROUPING_FIELD_USED = 1463 +ER_TABLE_CANT_HANDLE_SPKEYS = 1464 +ER_NO_TRIGGERS_ON_SYSTEM_SCHEMA = 1465 +ER_REMOVED_SPACES = 1466 +ER_AUTOINC_READ_FAILED = 1467 +ER_USERNAME = 1468 +ER_HOSTNAME = 1469 +ER_WRONG_STRING_LENGTH = 1470 +ER_NON_INSERTABLE_TABLE = 1471 +ER_ADMIN_WRONG_MRG_TABLE = 1472 +ER_TOO_HIGH_LEVEL_OF_NESTING_FOR_SELECT = 1473 +ER_NAME_BECOMES_EMPTY = 1474 +ER_AMBIGUOUS_FIELD_TERM = 1475 +ER_FOREIGN_SERVER_EXISTS = 1476 +ER_FOREIGN_SERVER_DOESNT_EXIST = 1477 +ER_ILLEGAL_HA_CREATE_OPTION = 1478 +ER_PARTITION_REQUIRES_VALUES_ERROR = 1479 +ER_PARTITION_WRONG_VALUES_ERROR = 1480 +ER_PARTITION_MAXVALUE_ERROR = 1481 +OBSOLETE_ER_PARTITION_SUBPARTITION_ERROR = 1482 +OBSOLETE_ER_PARTITION_SUBPART_MIX_ERROR = 1483 +ER_PARTITION_WRONG_NO_PART_ERROR = 1484 +ER_PARTITION_WRONG_NO_SUBPART_ERROR = 1485 +ER_WRONG_EXPR_IN_PARTITION_FUNC_ERROR = 1486 +OBSOLETE_ER_NO_CONST_EXPR_IN_RANGE_OR_LIST_ERROR = 1487 +ER_FIELD_NOT_FOUND_PART_ERROR = 1488 +OBSOLETE_ER_LIST_OF_FIELDS_ONLY_IN_HASH_ERROR = 1489 +ER_INCONSISTENT_PARTITION_INFO_ERROR = 1490 +ER_PARTITION_FUNC_NOT_ALLOWED_ERROR = 1491 +ER_PARTITIONS_MUST_BE_DEFINED_ERROR = 1492 +ER_RANGE_NOT_INCREASING_ERROR = 1493 +ER_INCONSISTENT_TYPE_OF_FUNCTIONS_ERROR = 1494 +ER_MULTIPLE_DEF_CONST_IN_LIST_PART_ERROR = 1495 +ER_PARTITION_ENTRY_ERROR = 1496 +ER_MIX_HANDLER_ERROR = 1497 +ER_PARTITION_NOT_DEFINED_ERROR = 1498 +ER_TOO_MANY_PARTITIONS_ERROR = 1499 +ER_SUBPARTITION_ERROR = 1500 +ER_CANT_CREATE_HANDLER_FILE = 1501 +ER_BLOB_FIELD_IN_PART_FUNC_ERROR = 1502 +ER_UNIQUE_KEY_NEED_ALL_FIELDS_IN_PF = 1503 +ER_NO_PARTS_ERROR = 1504 +ER_PARTITION_MGMT_ON_NONPARTITIONED = 1505 +ER_FOREIGN_KEY_ON_PARTITIONED = 1506 +ER_DROP_PARTITION_NON_EXISTENT = 1507 +ER_DROP_LAST_PARTITION = 1508 +ER_COALESCE_ONLY_ON_HASH_PARTITION = 1509 +ER_REORG_HASH_ONLY_ON_SAME_NO = 1510 +ER_REORG_NO_PARAM_ERROR = 1511 +ER_ONLY_ON_RANGE_LIST_PARTITION = 1512 +ER_ADD_PARTITION_SUBPART_ERROR = 1513 +ER_ADD_PARTITION_NO_NEW_PARTITION = 1514 +ER_COALESCE_PARTITION_NO_PARTITION = 1515 +ER_REORG_PARTITION_NOT_EXIST = 1516 +ER_SAME_NAME_PARTITION = 1517 +ER_NO_BINLOG_ERROR = 1518 +ER_CONSECUTIVE_REORG_PARTITIONS = 1519 +ER_REORG_OUTSIDE_RANGE = 1520 +ER_PARTITION_FUNCTION_FAILURE = 1521 +OBSOLETE_ER_PART_STATE_ERROR = 1522 +ER_LIMITED_PART_RANGE = 1523 +ER_PLUGIN_IS_NOT_LOADED = 1524 +ER_WRONG_VALUE = 1525 +ER_NO_PARTITION_FOR_GIVEN_VALUE = 1526 +ER_FILEGROUP_OPTION_ONLY_ONCE = 1527 +ER_CREATE_FILEGROUP_FAILED = 1528 +ER_DROP_FILEGROUP_FAILED = 1529 +ER_TABLESPACE_AUTO_EXTEND_ERROR = 1530 +ER_WRONG_SIZE_NUMBER = 1531 +ER_SIZE_OVERFLOW_ERROR = 1532 +ER_ALTER_FILEGROUP_FAILED = 1533 +ER_BINLOG_ROW_LOGGING_FAILED = 1534 +OBSOLETE_ER_BINLOG_ROW_WRONG_TABLE_DEF = 1535 +OBSOLETE_ER_BINLOG_ROW_RBR_TO_SBR = 1536 +ER_EVENT_ALREADY_EXISTS = 1537 +OBSOLETE_ER_EVENT_STORE_FAILED = 1538 +ER_EVENT_DOES_NOT_EXIST = 1539 +OBSOLETE_ER_EVENT_CANT_ALTER = 1540 +OBSOLETE_ER_EVENT_DROP_FAILED = 1541 +ER_EVENT_INTERVAL_NOT_POSITIVE_OR_TOO_BIG = 1542 +ER_EVENT_ENDS_BEFORE_STARTS = 1543 +ER_EVENT_EXEC_TIME_IN_THE_PAST = 1544 +OBSOLETE_ER_EVENT_OPEN_TABLE_FAILED = 1545 +OBSOLETE_ER_EVENT_NEITHER_M_EXPR_NOR_M_AT = 1546 +OBSOLETE_ER_COL_COUNT_DOESNT_MATCH_CORRUPTED = 1547 +OBSOLETE_ER_CANNOT_LOAD_FROM_TABLE = 1548 +OBSOLETE_ER_EVENT_CANNOT_DELETE = 1549 +OBSOLETE_ER_EVENT_COMPILE_ERROR = 1550 +ER_EVENT_SAME_NAME = 1551 +OBSOLETE_ER_EVENT_DATA_TOO_LONG = 1552 +ER_DROP_INDEX_FK = 1553 +ER_WARN_DEPRECATED_SYNTAX_WITH_VER = 1554 +OBSOLETE_ER_CANT_WRITE_LOCK_LOG_TABLE = 1555 +ER_CANT_LOCK_LOG_TABLE = 1556 +ER_FOREIGN_DUPLICATE_KEY_OLD_UNUSED = 1557 +ER_COL_COUNT_DOESNT_MATCH_PLEASE_UPDATE = 1558 +OBSOLETE_ER_TEMP_TABLE_PREVENTS_SWITCH_OUT_OF_RBR = 1559 +ER_STORED_FUNCTION_PREVENTS_SWITCH_BINLOG_FORMAT = 1560 +OBSOLETE_ER_NDB_CANT_SWITCH_BINLOG_FORMAT = 1561 +ER_PARTITION_NO_TEMPORARY = 1562 +ER_PARTITION_CONST_DOMAIN_ERROR = 1563 +ER_PARTITION_FUNCTION_IS_NOT_ALLOWED = 1564 +OBSOLETE_ER_DDL_LOG_ERROR_UNUSED = 1565 +ER_NULL_IN_VALUES_LESS_THAN = 1566 +ER_WRONG_PARTITION_NAME = 1567 +ER_CANT_CHANGE_TX_CHARACTERISTICS = 1568 +ER_DUP_ENTRY_AUTOINCREMENT_CASE = 1569 +OBSOLETE_ER_EVENT_MODIFY_QUEUE_ERROR = 1570 +ER_EVENT_SET_VAR_ERROR = 1571 +ER_PARTITION_MERGE_ERROR = 1572 +OBSOLETE_ER_CANT_ACTIVATE_LOG = 1573 +OBSOLETE_ER_RBR_NOT_AVAILABLE = 1574 +ER_BASE64_DECODE_ERROR = 1575 +ER_EVENT_RECURSION_FORBIDDEN = 1576 +OBSOLETE_ER_EVENTS_DB_ERROR = 1577 +ER_ONLY_INTEGERS_ALLOWED = 1578 +ER_UNSUPORTED_LOG_ENGINE = 1579 +ER_BAD_LOG_STATEMENT = 1580 +ER_CANT_RENAME_LOG_TABLE = 1581 +ER_WRONG_PARAMCOUNT_TO_NATIVE_FCT = 1582 +ER_WRONG_PARAMETERS_TO_NATIVE_FCT = 1583 +ER_WRONG_PARAMETERS_TO_STORED_FCT = 1584 +ER_NATIVE_FCT_NAME_COLLISION = 1585 +ER_DUP_ENTRY_WITH_KEY_NAME = 1586 +ER_BINLOG_PURGE_EMFILE = 1587 +ER_EVENT_CANNOT_CREATE_IN_THE_PAST = 1588 +ER_EVENT_CANNOT_ALTER_IN_THE_PAST = 1589 +OBSOLETE_ER_SLAVE_INCIDENT = 1590 +ER_NO_PARTITION_FOR_GIVEN_VALUE_SILENT = 1591 +ER_BINLOG_UNSAFE_STATEMENT = 1592 +ER_BINLOG_FATAL_ERROR = 1593 +OBSOLETE_ER_SLAVE_RELAY_LOG_READ_FAILURE = 1594 +OBSOLETE_ER_SLAVE_RELAY_LOG_WRITE_FAILURE = 1595 +OBSOLETE_ER_SLAVE_CREATE_EVENT_FAILURE = 1596 +OBSOLETE_ER_SLAVE_MASTER_COM_FAILURE = 1597 +ER_BINLOG_LOGGING_IMPOSSIBLE = 1598 +ER_VIEW_NO_CREATION_CTX = 1599 +ER_VIEW_INVALID_CREATION_CTX = 1600 +OBSOLETE_ER_SR_INVALID_CREATION_CTX = 1601 +ER_TRG_CORRUPTED_FILE = 1602 +ER_TRG_NO_CREATION_CTX = 1603 +ER_TRG_INVALID_CREATION_CTX = 1604 +ER_EVENT_INVALID_CREATION_CTX = 1605 +ER_TRG_CANT_OPEN_TABLE = 1606 +OBSOLETE_ER_CANT_CREATE_SROUTINE = 1607 +OBSOLETE_ER_NEVER_USED = 1608 +ER_NO_FORMAT_DESCRIPTION_EVENT_BEFORE_BINLOG_STATEMENT = 1609 +ER_SLAVE_CORRUPT_EVENT = 1610 +OBSOLETE_ER_LOAD_DATA_INVALID_COLUMN_UNUSED = 1611 +ER_LOG_PURGE_NO_FILE = 1612 +ER_XA_RBTIMEOUT = 1613 +ER_XA_RBDEADLOCK = 1614 +ER_NEED_REPREPARE = 1615 +OBSOLETE_ER_DELAYED_NOT_SUPPORTED = 1616 +WARN_NO_MASTER_INFO = 1617 +WARN_OPTION_IGNORED = 1618 +ER_PLUGIN_DELETE_BUILTIN = 1619 +WARN_PLUGIN_BUSY = 1620 +ER_VARIABLE_IS_READONLY = 1621 +ER_WARN_ENGINE_TRANSACTION_ROLLBACK = 1622 +OBSOLETE_ER_SLAVE_HEARTBEAT_FAILURE = 1623 +ER_SLAVE_HEARTBEAT_VALUE_OUT_OF_RANGE = 1624 +ER_NDB_REPLICATION_SCHEMA_ERROR = 1625 +ER_CONFLICT_FN_PARSE_ERROR = 1626 +ER_EXCEPTIONS_WRITE_ERROR = 1627 +ER_TOO_LONG_TABLE_COMMENT = 1628 +ER_TOO_LONG_FIELD_COMMENT = 1629 +ER_FUNC_INEXISTENT_NAME_COLLISION = 1630 +ER_DATABASE_NAME = 1631 +ER_TABLE_NAME = 1632 +ER_PARTITION_NAME = 1633 +ER_SUBPARTITION_NAME = 1634 +ER_TEMPORARY_NAME = 1635 +ER_RENAMED_NAME = 1636 +ER_TOO_MANY_CONCURRENT_TRXS = 1637 +WARN_NON_ASCII_SEPARATOR_NOT_IMPLEMENTED = 1638 +ER_DEBUG_SYNC_TIMEOUT = 1639 +ER_DEBUG_SYNC_HIT_LIMIT = 1640 +ER_DUP_SIGNAL_SET = 1641 +ER_SIGNAL_WARN = 1642 +ER_SIGNAL_NOT_FOUND = 1643 +ER_SIGNAL_EXCEPTION = 1644 +ER_RESIGNAL_WITHOUT_ACTIVE_HANDLER = 1645 +ER_SIGNAL_BAD_CONDITION_TYPE = 1646 +WARN_COND_ITEM_TRUNCATED = 1647 +ER_COND_ITEM_TOO_LONG = 1648 +ER_UNKNOWN_LOCALE = 1649 +ER_SLAVE_IGNORE_SERVER_IDS = 1650 +OBSOLETE_ER_QUERY_CACHE_DISABLED = 1651 +ER_SAME_NAME_PARTITION_FIELD = 1652 +ER_PARTITION_COLUMN_LIST_ERROR = 1653 +ER_WRONG_TYPE_COLUMN_VALUE_ERROR = 1654 +ER_TOO_MANY_PARTITION_FUNC_FIELDS_ERROR = 1655 +ER_MAXVALUE_IN_VALUES_IN = 1656 +ER_TOO_MANY_VALUES_ERROR = 1657 +ER_ROW_SINGLE_PARTITION_FIELD_ERROR = 1658 +ER_FIELD_TYPE_NOT_ALLOWED_AS_PARTITION_FIELD = 1659 +ER_PARTITION_FIELDS_TOO_LONG = 1660 +ER_BINLOG_ROW_ENGINE_AND_STMT_ENGINE = 1661 +ER_BINLOG_ROW_MODE_AND_STMT_ENGINE = 1662 +ER_BINLOG_UNSAFE_AND_STMT_ENGINE = 1663 +ER_BINLOG_ROW_INJECTION_AND_STMT_ENGINE = 1664 +ER_BINLOG_STMT_MODE_AND_ROW_ENGINE = 1665 +ER_BINLOG_ROW_INJECTION_AND_STMT_MODE = 1666 +ER_BINLOG_MULTIPLE_ENGINES_AND_SELF_LOGGING_ENGINE = 1667 +ER_BINLOG_UNSAFE_LIMIT = 1668 +OBSOLETE_ER_UNUSED4 = 1669 +ER_BINLOG_UNSAFE_SYSTEM_TABLE = 1670 +ER_BINLOG_UNSAFE_AUTOINC_COLUMNS = 1671 +ER_BINLOG_UNSAFE_UDF = 1672 +ER_BINLOG_UNSAFE_SYSTEM_VARIABLE = 1673 +ER_BINLOG_UNSAFE_SYSTEM_FUNCTION = 1674 +ER_BINLOG_UNSAFE_NONTRANS_AFTER_TRANS = 1675 +ER_MESSAGE_AND_STATEMENT = 1676 +OBSOLETE_ER_SLAVE_CONVERSION_FAILED = 1677 +ER_SLAVE_CANT_CREATE_CONVERSION = 1678 +ER_INSIDE_TRANSACTION_PREVENTS_SWITCH_BINLOG_FORMAT = 1679 +ER_PATH_LENGTH = 1680 +ER_WARN_DEPRECATED_SYNTAX_NO_REPLACEMENT = 1681 +ER_WRONG_NATIVE_TABLE_STRUCTURE = 1682 +ER_WRONG_PERFSCHEMA_USAGE = 1683 +ER_WARN_I_S_SKIPPED_TABLE = 1684 +ER_INSIDE_TRANSACTION_PREVENTS_SWITCH_BINLOG_DIRECT = 1685 +ER_STORED_FUNCTION_PREVENTS_SWITCH_BINLOG_DIRECT = 1686 +ER_SPATIAL_MUST_HAVE_GEOM_COL = 1687 +ER_TOO_LONG_INDEX_COMMENT = 1688 +ER_LOCK_ABORTED = 1689 +ER_DATA_OUT_OF_RANGE = 1690 +OBSOLETE_ER_WRONG_SPVAR_TYPE_IN_LIMIT = 1691 +ER_BINLOG_UNSAFE_MULTIPLE_ENGINES_AND_SELF_LOGGING_ENGINE = 1692 +ER_BINLOG_UNSAFE_MIXED_STATEMENT = 1693 +ER_INSIDE_TRANSACTION_PREVENTS_SWITCH_SQL_LOG_BIN = 1694 +ER_STORED_FUNCTION_PREVENTS_SWITCH_SQL_LOG_BIN = 1695 +ER_FAILED_READ_FROM_PAR_FILE = 1696 +ER_VALUES_IS_NOT_INT_TYPE_ERROR = 1697 +ER_ACCESS_DENIED_NO_PASSWORD_ERROR = 1698 +ER_SET_PASSWORD_AUTH_PLUGIN = 1699 +OBSOLETE_ER_GRANT_PLUGIN_USER_EXISTS = 1700 +ER_TRUNCATE_ILLEGAL_FK = 1701 +ER_PLUGIN_IS_PERMANENT = 1702 +ER_SLAVE_HEARTBEAT_VALUE_OUT_OF_RANGE_MIN = 1703 +ER_SLAVE_HEARTBEAT_VALUE_OUT_OF_RANGE_MAX = 1704 +ER_STMT_CACHE_FULL = 1705 +ER_MULTI_UPDATE_KEY_CONFLICT = 1706 +ER_TABLE_NEEDS_REBUILD = 1707 +WARN_OPTION_BELOW_LIMIT = 1708 +ER_INDEX_COLUMN_TOO_LONG = 1709 +ER_ERROR_IN_TRIGGER_BODY = 1710 +ER_ERROR_IN_UNKNOWN_TRIGGER_BODY = 1711 +ER_INDEX_CORRUPT = 1712 +ER_UNDO_RECORD_TOO_BIG = 1713 +ER_BINLOG_UNSAFE_INSERT_IGNORE_SELECT = 1714 +ER_BINLOG_UNSAFE_INSERT_SELECT_UPDATE = 1715 +ER_BINLOG_UNSAFE_REPLACE_SELECT = 1716 +ER_BINLOG_UNSAFE_CREATE_IGNORE_SELECT = 1717 +ER_BINLOG_UNSAFE_CREATE_REPLACE_SELECT = 1718 +ER_BINLOG_UNSAFE_UPDATE_IGNORE = 1719 +ER_PLUGIN_NO_UNINSTALL = 1720 +ER_PLUGIN_NO_INSTALL = 1721 +ER_BINLOG_UNSAFE_WRITE_AUTOINC_SELECT = 1722 +ER_BINLOG_UNSAFE_CREATE_SELECT_AUTOINC = 1723 +ER_BINLOG_UNSAFE_INSERT_TWO_KEYS = 1724 +ER_TABLE_IN_FK_CHECK = 1725 +ER_UNSUPPORTED_ENGINE = 1726 +ER_BINLOG_UNSAFE_AUTOINC_NOT_FIRST = 1727 +ER_CANNOT_LOAD_FROM_TABLE_V2 = 1728 +ER_MASTER_DELAY_VALUE_OUT_OF_RANGE = 1729 +ER_ONLY_FD_AND_RBR_EVENTS_ALLOWED_IN_BINLOG_STATEMENT = 1730 +ER_PARTITION_EXCHANGE_DIFFERENT_OPTION = 1731 +ER_PARTITION_EXCHANGE_PART_TABLE = 1732 +ER_PARTITION_EXCHANGE_TEMP_TABLE = 1733 +ER_PARTITION_INSTEAD_OF_SUBPARTITION = 1734 +ER_UNKNOWN_PARTITION = 1735 +ER_TABLES_DIFFERENT_METADATA = 1736 +ER_ROW_DOES_NOT_MATCH_PARTITION = 1737 +ER_BINLOG_CACHE_SIZE_GREATER_THAN_MAX = 1738 +ER_WARN_INDEX_NOT_APPLICABLE = 1739 +ER_PARTITION_EXCHANGE_FOREIGN_KEY = 1740 +OBSOLETE_ER_NO_SUCH_KEY_VALUE = 1741 +ER_RPL_INFO_DATA_TOO_LONG = 1742 +OBSOLETE_ER_NETWORK_READ_EVENT_CHECKSUM_FAILURE = 1743 +OBSOLETE_ER_BINLOG_READ_EVENT_CHECKSUM_FAILURE = 1744 +ER_BINLOG_STMT_CACHE_SIZE_GREATER_THAN_MAX = 1745 +ER_CANT_UPDATE_TABLE_IN_CREATE_TABLE_SELECT = 1746 +ER_PARTITION_CLAUSE_ON_NONPARTITIONED = 1747 +ER_ROW_DOES_NOT_MATCH_GIVEN_PARTITION_SET = 1748 +OBSOLETE_ER_NO_SUCH_PARTITION__UNUSED = 1749 +ER_CHANGE_RPL_INFO_REPOSITORY_FAILURE = 1750 +ER_WARNING_NOT_COMPLETE_ROLLBACK_WITH_CREATED_TEMP_TABLE = 1751 +ER_WARNING_NOT_COMPLETE_ROLLBACK_WITH_DROPPED_TEMP_TABLE = 1752 +ER_MTS_FEATURE_IS_NOT_SUPPORTED = 1753 +ER_MTS_UPDATED_DBS_GREATER_MAX = 1754 +ER_MTS_CANT_PARALLEL = 1755 +ER_MTS_INCONSISTENT_DATA = 1756 +ER_FULLTEXT_NOT_SUPPORTED_WITH_PARTITIONING = 1757 +ER_DA_INVALID_CONDITION_NUMBER = 1758 +ER_INSECURE_PLAIN_TEXT = 1759 +ER_INSECURE_CHANGE_MASTER = 1760 +ER_FOREIGN_DUPLICATE_KEY_WITH_CHILD_INFO = 1761 +ER_FOREIGN_DUPLICATE_KEY_WITHOUT_CHILD_INFO = 1762 +ER_SQLTHREAD_WITH_SECURE_SLAVE = 1763 +ER_TABLE_HAS_NO_FT = 1764 +ER_VARIABLE_NOT_SETTABLE_IN_SF_OR_TRIGGER = 1765 +ER_VARIABLE_NOT_SETTABLE_IN_TRANSACTION = 1766 +OBSOLETE_ER_GTID_NEXT_IS_NOT_IN_GTID_NEXT_LIST = 1767 +OBSOLETE_ER_CANT_CHANGE_GTID_NEXT_IN_TRANSACTION = 1768 +ER_SET_STATEMENT_CANNOT_INVOKE_FUNCTION = 1769 +ER_GTID_NEXT_CANT_BE_AUTOMATIC_IF_GTID_NEXT_LIST_IS_NON_NULL = 1770 +OBSOLETE_ER_SKIPPING_LOGGED_TRANSACTION = 1771 +ER_MALFORMED_GTID_SET_SPECIFICATION = 1772 +ER_MALFORMED_GTID_SET_ENCODING = 1773 +ER_MALFORMED_GTID_SPECIFICATION = 1774 +ER_GNO_EXHAUSTED = 1775 +ER_BAD_SLAVE_AUTO_POSITION = 1776 +ER_AUTO_POSITION_REQUIRES_GTID_MODE_NOT_OFF = 1777 +ER_CANT_DO_IMPLICIT_COMMIT_IN_TRX_WHEN_GTID_NEXT_IS_SET = 1778 +ER_GTID_MODE_ON_REQUIRES_ENFORCE_GTID_CONSISTENCY_ON = 1779 +OBSOLETE_ER_GTID_MODE_REQUIRES_BINLOG = 1780 +ER_CANT_SET_GTID_NEXT_TO_GTID_WHEN_GTID_MODE_IS_OFF = 1781 +ER_CANT_SET_GTID_NEXT_TO_ANONYMOUS_WHEN_GTID_MODE_IS_ON = 1782 +ER_CANT_SET_GTID_NEXT_LIST_TO_NON_NULL_WHEN_GTID_MODE_IS_OFF = 1783 +OBSOLETE_ER_FOUND_GTID_EVENT_WHEN_GTID_MODE_IS_OFF__UNUSED = 1784 +ER_GTID_UNSAFE_NON_TRANSACTIONAL_TABLE = 1785 +ER_GTID_UNSAFE_CREATE_SELECT = 1786 +OBSOLETE_ER_GTID_UNSAFE_CREATE_DROP_TEMP_TABLE_IN_TRANSACTION = 1787 +ER_GTID_MODE_CAN_ONLY_CHANGE_ONE_STEP_AT_A_TIME = 1788 +ER_MASTER_HAS_PURGED_REQUIRED_GTIDS = 1789 +ER_CANT_SET_GTID_NEXT_WHEN_OWNING_GTID = 1790 +ER_UNKNOWN_EXPLAIN_FORMAT = 1791 +ER_CANT_EXECUTE_IN_READ_ONLY_TRANSACTION = 1792 +ER_TOO_LONG_TABLE_PARTITION_COMMENT = 1793 +ER_SLAVE_CONFIGURATION = 1794 +ER_INNODB_FT_LIMIT = 1795 +ER_INNODB_NO_FT_TEMP_TABLE = 1796 +ER_INNODB_FT_WRONG_DOCID_COLUMN = 1797 +ER_INNODB_FT_WRONG_DOCID_INDEX = 1798 +ER_INNODB_ONLINE_LOG_TOO_BIG = 1799 +ER_UNKNOWN_ALTER_ALGORITHM = 1800 +ER_UNKNOWN_ALTER_LOCK = 1801 +ER_MTS_CHANGE_MASTER_CANT_RUN_WITH_GAPS = 1802 +ER_MTS_RECOVERY_FAILURE = 1803 +ER_MTS_RESET_WORKERS = 1804 +ER_COL_COUNT_DOESNT_MATCH_CORRUPTED_V2 = 1805 +ER_SLAVE_SILENT_RETRY_TRANSACTION = 1806 +ER_DISCARD_FK_CHECKS_RUNNING = 1807 +ER_TABLE_SCHEMA_MISMATCH = 1808 +ER_TABLE_IN_SYSTEM_TABLESPACE = 1809 +ER_IO_READ_ERROR = 1810 +ER_IO_WRITE_ERROR = 1811 +ER_TABLESPACE_MISSING = 1812 +ER_TABLESPACE_EXISTS = 1813 +ER_TABLESPACE_DISCARDED = 1814 +ER_INTERNAL_ERROR = 1815 +ER_INNODB_IMPORT_ERROR = 1816 +ER_INNODB_INDEX_CORRUPT = 1817 +ER_INVALID_YEAR_COLUMN_LENGTH = 1818 +ER_NOT_VALID_PASSWORD = 1819 +ER_MUST_CHANGE_PASSWORD = 1820 +ER_FK_NO_INDEX_CHILD = 1821 +ER_FK_NO_INDEX_PARENT = 1822 +ER_FK_FAIL_ADD_SYSTEM = 1823 +ER_FK_CANNOT_OPEN_PARENT = 1824 +ER_FK_INCORRECT_OPTION = 1825 +ER_FK_DUP_NAME = 1826 +ER_PASSWORD_FORMAT = 1827 +ER_FK_COLUMN_CANNOT_DROP = 1828 +ER_FK_COLUMN_CANNOT_DROP_CHILD = 1829 +ER_FK_COLUMN_NOT_NULL = 1830 +ER_DUP_INDEX = 1831 +ER_FK_COLUMN_CANNOT_CHANGE = 1832 +ER_FK_COLUMN_CANNOT_CHANGE_CHILD = 1833 +OBSOLETE_ER_UNUSED5 = 1834 +ER_MALFORMED_PACKET = 1835 +ER_READ_ONLY_MODE = 1836 +ER_GTID_NEXT_TYPE_UNDEFINED_GTID = 1837 +ER_VARIABLE_NOT_SETTABLE_IN_SP = 1838 +OBSOLETE_ER_CANT_SET_GTID_PURGED_WHEN_GTID_MODE_IS_OFF = 1839 +ER_CANT_SET_GTID_PURGED_WHEN_GTID_EXECUTED_IS_NOT_EMPTY = 1840 +ER_CANT_SET_GTID_PURGED_WHEN_OWNED_GTIDS_IS_NOT_EMPTY = 1841 +ER_GTID_PURGED_WAS_CHANGED = 1842 +ER_GTID_EXECUTED_WAS_CHANGED = 1843 +ER_BINLOG_STMT_MODE_AND_NO_REPL_TABLES = 1844 +ER_ALTER_OPERATION_NOT_SUPPORTED = 1845 +ER_ALTER_OPERATION_NOT_SUPPORTED_REASON = 1846 +ER_ALTER_OPERATION_NOT_SUPPORTED_REASON_COPY = 1847 +ER_ALTER_OPERATION_NOT_SUPPORTED_REASON_PARTITION = 1848 +ER_ALTER_OPERATION_NOT_SUPPORTED_REASON_FK_RENAME = 1849 +ER_ALTER_OPERATION_NOT_SUPPORTED_REASON_COLUMN_TYPE = 1850 +ER_ALTER_OPERATION_NOT_SUPPORTED_REASON_FK_CHECK = 1851 +OBSOLETE_ER_UNUSED6 = 1852 +ER_ALTER_OPERATION_NOT_SUPPORTED_REASON_NOPK = 1853 +ER_ALTER_OPERATION_NOT_SUPPORTED_REASON_AUTOINC = 1854 +ER_ALTER_OPERATION_NOT_SUPPORTED_REASON_HIDDEN_FTS = 1855 +ER_ALTER_OPERATION_NOT_SUPPORTED_REASON_CHANGE_FTS = 1856 +ER_ALTER_OPERATION_NOT_SUPPORTED_REASON_FTS = 1857 +OBSOLETE_ER_SQL_REPLICA_SKIP_COUNTER_NOT_SETTABLE_IN_GTID_MODE = 1858 +ER_DUP_UNKNOWN_IN_INDEX = 1859 +ER_IDENT_CAUSES_TOO_LONG_PATH = 1860 +ER_ALTER_OPERATION_NOT_SUPPORTED_REASON_NOT_NULL = 1861 +ER_MUST_CHANGE_PASSWORD_LOGIN = 1862 +ER_ROW_IN_WRONG_PARTITION = 1863 +ER_MTS_EVENT_BIGGER_PENDING_JOBS_SIZE_MAX = 1864 +OBSOLETE_ER_INNODB_NO_FT_USES_PARSER = 1865 +ER_BINLOG_LOGICAL_CORRUPTION = 1866 +ER_WARN_PURGE_LOG_IN_USE = 1867 +ER_WARN_PURGE_LOG_IS_ACTIVE = 1868 +ER_AUTO_INCREMENT_CONFLICT = 1869 +WARN_ON_BLOCKHOLE_IN_RBR = 1870 +ER_SLAVE_MI_INIT_REPOSITORY = 1871 +ER_SLAVE_RLI_INIT_REPOSITORY = 1872 +ER_ACCESS_DENIED_CHANGE_USER_ERROR = 1873 +ER_INNODB_READ_ONLY = 1874 +ER_STOP_SLAVE_SQL_THREAD_TIMEOUT = 1875 +ER_STOP_SLAVE_IO_THREAD_TIMEOUT = 1876 +ER_TABLE_CORRUPT = 1877 +ER_TEMP_FILE_WRITE_FAILURE = 1878 +ER_INNODB_FT_AUX_NOT_HEX_ID = 1879 +ER_OLD_TEMPORALS_UPGRADED = 1880 +ER_INNODB_FORCED_RECOVERY = 1881 +ER_AES_INVALID_IV = 1882 +ER_PLUGIN_CANNOT_BE_UNINSTALLED = 1883 +ER_GTID_UNSAFE_BINLOG_SPLITTABLE_STATEMENT_AND_ASSIGNED_GTID = 1884 +ER_SLAVE_HAS_MORE_GTIDS_THAN_MASTER = 1885 +ER_MISSING_KEY = 1886 +WARN_NAMED_PIPE_ACCESS_EVERYONE = 1887 +ER_FILE_CORRUPT = 3000 +ER_ERROR_ON_MASTER = 3001 +OBSOLETE_ER_INCONSISTENT_ERROR = 3002 +ER_STORAGE_ENGINE_NOT_LOADED = 3003 +ER_GET_STACKED_DA_WITHOUT_ACTIVE_HANDLER = 3004 +ER_WARN_LEGACY_SYNTAX_CONVERTED = 3005 +ER_BINLOG_UNSAFE_FULLTEXT_PLUGIN = 3006 +ER_CANNOT_DISCARD_TEMPORARY_TABLE = 3007 +ER_FK_DEPTH_EXCEEDED = 3008 +ER_COL_COUNT_DOESNT_MATCH_PLEASE_UPDATE_V2 = 3009 +ER_WARN_TRIGGER_DOESNT_HAVE_CREATED = 3010 +ER_REFERENCED_TRG_DOES_NOT_EXIST = 3011 +ER_EXPLAIN_NOT_SUPPORTED = 3012 +ER_INVALID_FIELD_SIZE = 3013 +ER_MISSING_HA_CREATE_OPTION = 3014 +ER_ENGINE_OUT_OF_MEMORY = 3015 +ER_PASSWORD_EXPIRE_ANONYMOUS_USER = 3016 +ER_SLAVE_SQL_THREAD_MUST_STOP = 3017 +ER_NO_FT_MATERIALIZED_SUBQUERY = 3018 +ER_INNODB_UNDO_LOG_FULL = 3019 +ER_INVALID_ARGUMENT_FOR_LOGARITHM = 3020 +ER_SLAVE_CHANNEL_IO_THREAD_MUST_STOP = 3021 +ER_WARN_OPEN_TEMP_TABLES_MUST_BE_ZERO = 3022 +ER_WARN_ONLY_MASTER_LOG_FILE_NO_POS = 3023 +ER_QUERY_TIMEOUT = 3024 +ER_NON_RO_SELECT_DISABLE_TIMER = 3025 +ER_DUP_LIST_ENTRY = 3026 +OBSOLETE_ER_SQL_MODE_NO_EFFECT = 3027 +ER_AGGREGATE_ORDER_FOR_UNION = 3028 +ER_AGGREGATE_ORDER_NON_AGG_QUERY = 3029 +ER_SLAVE_WORKER_STOPPED_PREVIOUS_THD_ERROR = 3030 +ER_DONT_SUPPORT_REPLICA_PRESERVE_COMMIT_ORDER = 3031 +ER_SERVER_OFFLINE_MODE = 3032 +ER_GIS_DIFFERENT_SRIDS = 3033 +ER_GIS_UNSUPPORTED_ARGUMENT = 3034 +ER_GIS_UNKNOWN_ERROR = 3035 +ER_GIS_UNKNOWN_EXCEPTION = 3036 +ER_GIS_INVALID_DATA = 3037 +ER_BOOST_GEOMETRY_EMPTY_INPUT_EXCEPTION = 3038 +ER_BOOST_GEOMETRY_CENTROID_EXCEPTION = 3039 +ER_BOOST_GEOMETRY_OVERLAY_INVALID_INPUT_EXCEPTION = 3040 +ER_BOOST_GEOMETRY_TURN_INFO_EXCEPTION = 3041 +ER_BOOST_GEOMETRY_SELF_INTERSECTION_POINT_EXCEPTION = 3042 +ER_BOOST_GEOMETRY_UNKNOWN_EXCEPTION = 3043 +ER_STD_BAD_ALLOC_ERROR = 3044 +ER_STD_DOMAIN_ERROR = 3045 +ER_STD_LENGTH_ERROR = 3046 +ER_STD_INVALID_ARGUMENT = 3047 +ER_STD_OUT_OF_RANGE_ERROR = 3048 +ER_STD_OVERFLOW_ERROR = 3049 +ER_STD_RANGE_ERROR = 3050 +ER_STD_UNDERFLOW_ERROR = 3051 +ER_STD_LOGIC_ERROR = 3052 +ER_STD_RUNTIME_ERROR = 3053 +ER_STD_UNKNOWN_EXCEPTION = 3054 +ER_GIS_DATA_WRONG_ENDIANESS = 3055 +ER_CHANGE_MASTER_PASSWORD_LENGTH = 3056 +ER_USER_LOCK_WRONG_NAME = 3057 +ER_USER_LOCK_DEADLOCK = 3058 +ER_REPLACE_INACCESSIBLE_ROWS = 3059 +ER_ALTER_OPERATION_NOT_SUPPORTED_REASON_GIS = 3060 +ER_ILLEGAL_USER_VAR = 3061 +ER_GTID_MODE_OFF = 3062 +OBSOLETE_ER_UNSUPPORTED_BY_REPLICATION_THREAD = 3063 +ER_INCORRECT_TYPE = 3064 +ER_FIELD_IN_ORDER_NOT_SELECT = 3065 +ER_AGGREGATE_IN_ORDER_NOT_SELECT = 3066 +ER_INVALID_RPL_WILD_TABLE_FILTER_PATTERN = 3067 +ER_NET_OK_PACKET_TOO_LARGE = 3068 +ER_INVALID_JSON_DATA = 3069 +ER_INVALID_GEOJSON_MISSING_MEMBER = 3070 +ER_INVALID_GEOJSON_WRONG_TYPE = 3071 +ER_INVALID_GEOJSON_UNSPECIFIED = 3072 +ER_DIMENSION_UNSUPPORTED = 3073 +ER_SLAVE_CHANNEL_DOES_NOT_EXIST = 3074 +OBSOLETE_ER_SLAVE_MULTIPLE_CHANNELS_HOST_PORT = 3075 +ER_SLAVE_CHANNEL_NAME_INVALID_OR_TOO_LONG = 3076 +ER_SLAVE_NEW_CHANNEL_WRONG_REPOSITORY = 3077 +OBSOLETE_ER_SLAVE_CHANNEL_DELETE = 3078 +ER_SLAVE_MULTIPLE_CHANNELS_CMD = 3079 +ER_SLAVE_MAX_CHANNELS_EXCEEDED = 3080 +ER_SLAVE_CHANNEL_MUST_STOP = 3081 +ER_SLAVE_CHANNEL_NOT_RUNNING = 3082 +ER_SLAVE_CHANNEL_WAS_RUNNING = 3083 +ER_SLAVE_CHANNEL_WAS_NOT_RUNNING = 3084 +ER_SLAVE_CHANNEL_SQL_THREAD_MUST_STOP = 3085 +ER_SLAVE_CHANNEL_SQL_SKIP_COUNTER = 3086 +ER_WRONG_FIELD_WITH_GROUP_V2 = 3087 +ER_MIX_OF_GROUP_FUNC_AND_FIELDS_V2 = 3088 +ER_WARN_DEPRECATED_SYSVAR_UPDATE = 3089 +ER_WARN_DEPRECATED_SQLMODE = 3090 +ER_CANNOT_LOG_PARTIAL_DROP_DATABASE_WITH_GTID = 3091 +ER_GROUP_REPLICATION_CONFIGURATION = 3092 +ER_GROUP_REPLICATION_RUNNING = 3093 +ER_GROUP_REPLICATION_APPLIER_INIT_ERROR = 3094 +ER_GROUP_REPLICATION_STOP_APPLIER_THREAD_TIMEOUT = 3095 +ER_GROUP_REPLICATION_COMMUNICATION_LAYER_SESSION_ERROR = 3096 +ER_GROUP_REPLICATION_COMMUNICATION_LAYER_JOIN_ERROR = 3097 +ER_BEFORE_DML_VALIDATION_ERROR = 3098 +ER_PREVENTS_VARIABLE_WITHOUT_RBR = 3099 +ER_RUN_HOOK_ERROR = 3100 +ER_TRANSACTION_ROLLBACK_DURING_COMMIT = 3101 +ER_GENERATED_COLUMN_FUNCTION_IS_NOT_ALLOWED = 3102 +ER_UNSUPPORTED_ALTER_INPLACE_ON_VIRTUAL_COLUMN = 3103 +ER_WRONG_FK_OPTION_FOR_GENERATED_COLUMN = 3104 +ER_NON_DEFAULT_VALUE_FOR_GENERATED_COLUMN = 3105 +ER_UNSUPPORTED_ACTION_ON_GENERATED_COLUMN = 3106 +ER_GENERATED_COLUMN_NON_PRIOR = 3107 +ER_DEPENDENT_BY_GENERATED_COLUMN = 3108 +ER_GENERATED_COLUMN_REF_AUTO_INC = 3109 +ER_FEATURE_NOT_AVAILABLE = 3110 +ER_CANT_SET_GTID_MODE = 3111 +ER_CANT_USE_AUTO_POSITION_WITH_GTID_MODE_OFF = 3112 +OBSOLETE_ER_CANT_REPLICATE_ANONYMOUS_WITH_AUTO_POSITION = 3113 +OBSOLETE_ER_CANT_REPLICATE_ANONYMOUS_WITH_GTID_MODE_ON = 3114 +OBSOLETE_ER_CANT_REPLICATE_GTID_WITH_GTID_MODE_OFF = 3115 +ER_CANT_ENFORCE_GTID_CONSISTENCY_WITH_ONGOING_GTID_VIOLATING_TX = 3116 +ER_ENFORCE_GTID_CONSISTENCY_WARN_WITH_ONGOING_GTID_VIOLATING_TX = 3117 +ER_ACCOUNT_HAS_BEEN_LOCKED = 3118 +ER_WRONG_TABLESPACE_NAME = 3119 +ER_TABLESPACE_IS_NOT_EMPTY = 3120 +ER_WRONG_FILE_NAME = 3121 +ER_BOOST_GEOMETRY_INCONSISTENT_TURNS_EXCEPTION = 3122 +ER_WARN_OPTIMIZER_HINT_SYNTAX_ERROR = 3123 +ER_WARN_BAD_MAX_EXECUTION_TIME = 3124 +ER_WARN_UNSUPPORTED_MAX_EXECUTION_TIME = 3125 +ER_WARN_CONFLICTING_HINT = 3126 +ER_WARN_UNKNOWN_QB_NAME = 3127 +ER_UNRESOLVED_HINT_NAME = 3128 +ER_WARN_ON_MODIFYING_GTID_EXECUTED_TABLE = 3129 +ER_PLUGGABLE_PROTOCOL_COMMAND_NOT_SUPPORTED = 3130 +ER_LOCKING_SERVICE_WRONG_NAME = 3131 +ER_LOCKING_SERVICE_DEADLOCK = 3132 +ER_LOCKING_SERVICE_TIMEOUT = 3133 +ER_GIS_MAX_POINTS_IN_GEOMETRY_OVERFLOWED = 3134 +ER_SQL_MODE_MERGED = 3135 +ER_VTOKEN_PLUGIN_TOKEN_MISMATCH = 3136 +ER_VTOKEN_PLUGIN_TOKEN_NOT_FOUND = 3137 +ER_CANT_SET_VARIABLE_WHEN_OWNING_GTID = 3138 +ER_SLAVE_CHANNEL_OPERATION_NOT_ALLOWED = 3139 +ER_INVALID_JSON_TEXT = 3140 +ER_INVALID_JSON_TEXT_IN_PARAM = 3141 +ER_INVALID_JSON_BINARY_DATA = 3142 +ER_INVALID_JSON_PATH = 3143 +ER_INVALID_JSON_CHARSET = 3144 +ER_INVALID_JSON_CHARSET_IN_FUNCTION = 3145 +ER_INVALID_TYPE_FOR_JSON = 3146 +ER_INVALID_CAST_TO_JSON = 3147 +ER_INVALID_JSON_PATH_CHARSET = 3148 +ER_INVALID_JSON_PATH_WILDCARD = 3149 +ER_JSON_VALUE_TOO_BIG = 3150 +ER_JSON_KEY_TOO_BIG = 3151 +ER_JSON_USED_AS_KEY = 3152 +ER_JSON_VACUOUS_PATH = 3153 +ER_JSON_BAD_ONE_OR_ALL_ARG = 3154 +ER_NUMERIC_JSON_VALUE_OUT_OF_RANGE = 3155 +ER_INVALID_JSON_VALUE_FOR_CAST = 3156 +ER_JSON_DOCUMENT_TOO_DEEP = 3157 +ER_JSON_DOCUMENT_NULL_KEY = 3158 +ER_SECURE_TRANSPORT_REQUIRED = 3159 +ER_NO_SECURE_TRANSPORTS_CONFIGURED = 3160 +ER_DISABLED_STORAGE_ENGINE = 3161 +ER_USER_DOES_NOT_EXIST = 3162 +ER_USER_ALREADY_EXISTS = 3163 +ER_AUDIT_API_ABORT = 3164 +ER_INVALID_JSON_PATH_ARRAY_CELL = 3165 +ER_BUFPOOL_RESIZE_INPROGRESS = 3166 +ER_FEATURE_DISABLED_SEE_DOC = 3167 +ER_SERVER_ISNT_AVAILABLE = 3168 +ER_SESSION_WAS_KILLED = 3169 +ER_CAPACITY_EXCEEDED = 3170 +ER_CAPACITY_EXCEEDED_IN_RANGE_OPTIMIZER = 3171 +OBSOLETE_ER_TABLE_NEEDS_UPG_PART = 3172 +ER_CANT_WAIT_FOR_EXECUTED_GTID_SET_WHILE_OWNING_A_GTID = 3173 +ER_CANNOT_ADD_FOREIGN_BASE_COL_VIRTUAL = 3174 +ER_CANNOT_CREATE_VIRTUAL_INDEX_CONSTRAINT = 3175 +ER_ERROR_ON_MODIFYING_GTID_EXECUTED_TABLE = 3176 +ER_LOCK_REFUSED_BY_ENGINE = 3177 +ER_UNSUPPORTED_ALTER_ONLINE_ON_VIRTUAL_COLUMN = 3178 +ER_MASTER_KEY_ROTATION_NOT_SUPPORTED_BY_SE = 3179 +OBSOLETE_ER_MASTER_KEY_ROTATION_ERROR_BY_SE = 3180 +ER_MASTER_KEY_ROTATION_BINLOG_FAILED = 3181 +ER_MASTER_KEY_ROTATION_SE_UNAVAILABLE = 3182 +ER_TABLESPACE_CANNOT_ENCRYPT = 3183 +ER_INVALID_ENCRYPTION_OPTION = 3184 +ER_CANNOT_FIND_KEY_IN_KEYRING = 3185 +ER_CAPACITY_EXCEEDED_IN_PARSER = 3186 +ER_UNSUPPORTED_ALTER_ENCRYPTION_INPLACE = 3187 +ER_KEYRING_UDF_KEYRING_SERVICE_ERROR = 3188 +ER_USER_COLUMN_OLD_LENGTH = 3189 +ER_CANT_RESET_MASTER = 3190 +ER_GROUP_REPLICATION_MAX_GROUP_SIZE = 3191 +ER_CANNOT_ADD_FOREIGN_BASE_COL_STORED = 3192 +ER_TABLE_REFERENCED = 3193 +OBSOLETE_ER_PARTITION_ENGINE_DEPRECATED_FOR_TABLE = 3194 +OBSOLETE_ER_WARN_USING_GEOMFROMWKB_TO_SET_SRID_ZERO = 3195 +OBSOLETE_ER_WARN_USING_GEOMFROMWKB_TO_SET_SRID = 3196 +ER_XA_RETRY = 3197 +ER_KEYRING_AWS_UDF_AWS_KMS_ERROR = 3198 +ER_BINLOG_UNSAFE_XA = 3199 +ER_UDF_ERROR = 3200 +ER_KEYRING_MIGRATION_FAILURE = 3201 +ER_KEYRING_ACCESS_DENIED_ERROR = 3202 +ER_KEYRING_MIGRATION_STATUS = 3203 +OBSOLETE_ER_PLUGIN_FAILED_TO_OPEN_TABLES = 3204 +OBSOLETE_ER_PLUGIN_FAILED_TO_OPEN_TABLE = 3205 +OBSOLETE_ER_AUDIT_LOG_NO_KEYRING_PLUGIN_INSTALLED = 3206 +OBSOLETE_ER_AUDIT_LOG_ENCRYPTION_PASSWORD_HAS_NOT_BEEN_SET = 3207 +OBSOLETE_ER_AUDIT_LOG_COULD_NOT_CREATE_AES_KEY = 3208 +OBSOLETE_ER_AUDIT_LOG_ENCRYPTION_PASSWORD_CANNOT_BE_FETCHED = 3209 +OBSOLETE_ER_AUDIT_LOG_JSON_FILTERING_NOT_ENABLED = 3210 +OBSOLETE_ER_AUDIT_LOG_UDF_INSUFFICIENT_PRIVILEGE = 3211 +OBSOLETE_ER_AUDIT_LOG_SUPER_PRIVILEGE_REQUIRED = 3212 +OBSOLETE_ER_COULD_NOT_REINITIALIZE_AUDIT_LOG_FILTERS = 3213 +OBSOLETE_ER_AUDIT_LOG_UDF_INVALID_ARGUMENT_TYPE = 3214 +OBSOLETE_ER_AUDIT_LOG_UDF_INVALID_ARGUMENT_COUNT = 3215 +OBSOLETE_ER_AUDIT_LOG_HAS_NOT_BEEN_INSTALLED = 3216 +OBSOLETE_ER_AUDIT_LOG_UDF_READ_INVALID_MAX_ARRAY_LENGTH_ARG_TYPE = 3217 +ER_AUDIT_LOG_UDF_READ_INVALID_MAX_ARRAY_LENGTH_ARG_VALUE = 3218 +OBSOLETE_ER_AUDIT_LOG_JSON_FILTER_PARSING_ERROR = 3219 +OBSOLETE_ER_AUDIT_LOG_JSON_FILTER_NAME_CANNOT_BE_EMPTY = 3220 +OBSOLETE_ER_AUDIT_LOG_JSON_USER_NAME_CANNOT_BE_EMPTY = 3221 +OBSOLETE_ER_AUDIT_LOG_JSON_FILTER_DOES_NOT_EXISTS = 3222 +OBSOLETE_ER_AUDIT_LOG_USER_FIRST_CHARACTER_MUST_BE_ALPHANUMERIC = 3223 +OBSOLETE_ER_AUDIT_LOG_USER_NAME_INVALID_CHARACTER = 3224 +OBSOLETE_ER_AUDIT_LOG_HOST_NAME_INVALID_CHARACTER = 3225 +OBSOLETE_ER_XA_REPLICATION_FILTERS = 3226 +OBSOLETE_ER_CANT_OPEN_ERROR_LOG = 3227 +OBSOLETE_ER_GROUPING_ON_TIMESTAMP_IN_DST = 3228 +OBSOLETE_ER_CANT_START_SERVER_NAMED_PIPE = 3229 +ER_WRITE_SET_EXCEEDS_LIMIT = 3230 +ER_UNSUPPORT_COMPRESSED_TEMPORARY_TABLE = 3500 +ER_ACL_OPERATION_FAILED = 3501 +ER_UNSUPPORTED_INDEX_ALGORITHM = 3502 +ER_NO_SUCH_DB = 3503 +ER_TOO_BIG_ENUM = 3504 +ER_TOO_LONG_SET_ENUM_VALUE = 3505 +ER_INVALID_DD_OBJECT = 3506 +ER_UPDATING_DD_TABLE = 3507 +ER_INVALID_DD_OBJECT_ID = 3508 +ER_INVALID_DD_OBJECT_NAME = 3509 +ER_TABLESPACE_MISSING_WITH_NAME = 3510 +ER_TOO_LONG_ROUTINE_COMMENT = 3511 +ER_SP_LOAD_FAILED = 3512 +ER_INVALID_BITWISE_OPERANDS_SIZE = 3513 +ER_INVALID_BITWISE_AGGREGATE_OPERANDS_SIZE = 3514 +ER_WARN_UNSUPPORTED_HINT = 3515 +ER_UNEXPECTED_GEOMETRY_TYPE = 3516 +ER_SRS_PARSE_ERROR = 3517 +ER_SRS_PROJ_PARAMETER_MISSING = 3518 +ER_WARN_SRS_NOT_FOUND = 3519 +ER_SRS_NOT_CARTESIAN = 3520 +ER_SRS_NOT_CARTESIAN_UNDEFINED = 3521 +ER_PK_INDEX_CANT_BE_INVISIBLE = 3522 +ER_UNKNOWN_AUTHID = 3523 +ER_FAILED_ROLE_GRANT = 3524 +ER_OPEN_ROLE_TABLES = 3525 +ER_FAILED_DEFAULT_ROLES = 3526 +ER_COMPONENTS_NO_SCHEME = 3527 +ER_COMPONENTS_NO_SCHEME_SERVICE = 3528 +ER_COMPONENTS_CANT_LOAD = 3529 +ER_ROLE_NOT_GRANTED = 3530 +ER_FAILED_REVOKE_ROLE = 3531 +ER_RENAME_ROLE = 3532 +ER_COMPONENTS_CANT_ACQUIRE_SERVICE_IMPLEMENTATION = 3533 +ER_COMPONENTS_CANT_SATISFY_DEPENDENCY = 3534 +ER_COMPONENTS_LOAD_CANT_REGISTER_SERVICE_IMPLEMENTATION = 3535 +ER_COMPONENTS_LOAD_CANT_INITIALIZE = 3536 +ER_COMPONENTS_UNLOAD_NOT_LOADED = 3537 +ER_COMPONENTS_UNLOAD_CANT_DEINITIALIZE = 3538 +ER_COMPONENTS_CANT_RELEASE_SERVICE = 3539 +ER_COMPONENTS_UNLOAD_CANT_UNREGISTER_SERVICE = 3540 +ER_COMPONENTS_CANT_UNLOAD = 3541 +ER_WARN_UNLOAD_THE_NOT_PERSISTED = 3542 +ER_COMPONENT_TABLE_INCORRECT = 3543 +ER_COMPONENT_MANIPULATE_ROW_FAILED = 3544 +ER_COMPONENTS_UNLOAD_DUPLICATE_IN_GROUP = 3545 +ER_CANT_SET_GTID_PURGED_DUE_SETS_CONSTRAINTS = 3546 +ER_CANNOT_LOCK_USER_MANAGEMENT_CACHES = 3547 +ER_SRS_NOT_FOUND = 3548 +ER_VARIABLE_NOT_PERSISTED = 3549 +ER_IS_QUERY_INVALID_CLAUSE = 3550 +ER_UNABLE_TO_STORE_STATISTICS = 3551 +ER_NO_SYSTEM_SCHEMA_ACCESS = 3552 +ER_NO_SYSTEM_TABLESPACE_ACCESS = 3553 +ER_NO_SYSTEM_TABLE_ACCESS = 3554 +ER_NO_SYSTEM_TABLE_ACCESS_FOR_DICTIONARY_TABLE = 3555 +ER_NO_SYSTEM_TABLE_ACCESS_FOR_SYSTEM_TABLE = 3556 +ER_NO_SYSTEM_TABLE_ACCESS_FOR_TABLE = 3557 +ER_INVALID_OPTION_KEY = 3558 +ER_INVALID_OPTION_VALUE = 3559 +ER_INVALID_OPTION_KEY_VALUE_PAIR = 3560 +ER_INVALID_OPTION_START_CHARACTER = 3561 +ER_INVALID_OPTION_END_CHARACTER = 3562 +ER_INVALID_OPTION_CHARACTERS = 3563 +ER_DUPLICATE_OPTION_KEY = 3564 +ER_WARN_SRS_NOT_FOUND_AXIS_ORDER = 3565 +ER_NO_ACCESS_TO_NATIVE_FCT = 3566 +ER_RESET_MASTER_TO_VALUE_OUT_OF_RANGE = 3567 +ER_UNRESOLVED_TABLE_LOCK = 3568 +ER_DUPLICATE_TABLE_LOCK = 3569 +ER_BINLOG_UNSAFE_SKIP_LOCKED = 3570 +ER_BINLOG_UNSAFE_NOWAIT = 3571 +ER_LOCK_NOWAIT = 3572 +ER_CTE_RECURSIVE_REQUIRES_UNION = 3573 +ER_CTE_RECURSIVE_REQUIRES_NONRECURSIVE_FIRST = 3574 +ER_CTE_RECURSIVE_FORBIDS_AGGREGATION = 3575 +ER_CTE_RECURSIVE_FORBIDDEN_JOIN_ORDER = 3576 +ER_CTE_RECURSIVE_REQUIRES_SINGLE_REFERENCE = 3577 +ER_SWITCH_TMP_ENGINE = 3578 +ER_WINDOW_NO_SUCH_WINDOW = 3579 +ER_WINDOW_CIRCULARITY_IN_WINDOW_GRAPH = 3580 +ER_WINDOW_NO_CHILD_PARTITIONING = 3581 +ER_WINDOW_NO_INHERIT_FRAME = 3582 +ER_WINDOW_NO_REDEFINE_ORDER_BY = 3583 +ER_WINDOW_FRAME_START_ILLEGAL = 3584 +ER_WINDOW_FRAME_END_ILLEGAL = 3585 +ER_WINDOW_FRAME_ILLEGAL = 3586 +ER_WINDOW_RANGE_FRAME_ORDER_TYPE = 3587 +ER_WINDOW_RANGE_FRAME_TEMPORAL_TYPE = 3588 +ER_WINDOW_RANGE_FRAME_NUMERIC_TYPE = 3589 +ER_WINDOW_RANGE_BOUND_NOT_CONSTANT = 3590 +ER_WINDOW_DUPLICATE_NAME = 3591 +ER_WINDOW_ILLEGAL_ORDER_BY = 3592 +ER_WINDOW_INVALID_WINDOW_FUNC_USE = 3593 +ER_WINDOW_INVALID_WINDOW_FUNC_ALIAS_USE = 3594 +ER_WINDOW_NESTED_WINDOW_FUNC_USE_IN_WINDOW_SPEC = 3595 +ER_WINDOW_ROWS_INTERVAL_USE = 3596 +ER_WINDOW_NO_GROUP_ORDER_UNUSED = 3597 +ER_WINDOW_EXPLAIN_JSON = 3598 +ER_WINDOW_FUNCTION_IGNORES_FRAME = 3599 +ER_WL9236_NOW_UNUSED = 3600 +ER_INVALID_NO_OF_ARGS = 3601 +ER_FIELD_IN_GROUPING_NOT_GROUP_BY = 3602 +ER_TOO_LONG_TABLESPACE_COMMENT = 3603 +ER_ENGINE_CANT_DROP_TABLE = 3604 +ER_ENGINE_CANT_DROP_MISSING_TABLE = 3605 +ER_TABLESPACE_DUP_FILENAME = 3606 +ER_DB_DROP_RMDIR2 = 3607 +ER_IMP_NO_FILES_MATCHED = 3608 +ER_IMP_SCHEMA_DOES_NOT_EXIST = 3609 +ER_IMP_TABLE_ALREADY_EXISTS = 3610 +ER_IMP_INCOMPATIBLE_MYSQLD_VERSION = 3611 +ER_IMP_INCOMPATIBLE_DD_VERSION = 3612 +ER_IMP_INCOMPATIBLE_SDI_VERSION = 3613 +ER_WARN_INVALID_HINT = 3614 +ER_VAR_DOES_NOT_EXIST = 3615 +ER_LONGITUDE_OUT_OF_RANGE = 3616 +ER_LATITUDE_OUT_OF_RANGE = 3617 +ER_NOT_IMPLEMENTED_FOR_GEOGRAPHIC_SRS = 3618 +ER_ILLEGAL_PRIVILEGE_LEVEL = 3619 +ER_NO_SYSTEM_VIEW_ACCESS = 3620 +ER_COMPONENT_FILTER_FLABBERGASTED = 3621 +ER_PART_EXPR_TOO_LONG = 3622 +ER_UDF_DROP_DYNAMICALLY_REGISTERED = 3623 +ER_UNABLE_TO_STORE_COLUMN_STATISTICS = 3624 +ER_UNABLE_TO_UPDATE_COLUMN_STATISTICS = 3625 +ER_UNABLE_TO_DROP_COLUMN_STATISTICS = 3626 +ER_UNABLE_TO_BUILD_HISTOGRAM = 3627 +ER_MANDATORY_ROLE = 3628 +ER_MISSING_TABLESPACE_FILE = 3629 +ER_PERSIST_ONLY_ACCESS_DENIED_ERROR = 3630 +ER_CMD_NEED_SUPER = 3631 +ER_PATH_IN_DATADIR = 3632 +ER_CLONE_DDL_IN_PROGRESS = 3633 +ER_CLONE_TOO_MANY_CONCURRENT_CLONES = 3634 +ER_APPLIER_LOG_EVENT_VALIDATION_ERROR = 3635 +ER_CTE_MAX_RECURSION_DEPTH = 3636 +ER_NOT_HINT_UPDATABLE_VARIABLE = 3637 +ER_CREDENTIALS_CONTRADICT_TO_HISTORY = 3638 +ER_WARNING_PASSWORD_HISTORY_CLAUSES_VOID = 3639 +ER_CLIENT_DOES_NOT_SUPPORT = 3640 +ER_I_S_SKIPPED_TABLESPACE = 3641 +ER_TABLESPACE_ENGINE_MISMATCH = 3642 +ER_WRONG_SRID_FOR_COLUMN = 3643 +ER_CANNOT_ALTER_SRID_DUE_TO_INDEX = 3644 +ER_WARN_BINLOG_PARTIAL_UPDATES_DISABLED = 3645 +ER_WARN_BINLOG_V1_ROW_EVENTS_DISABLED = 3646 +ER_WARN_BINLOG_PARTIAL_UPDATES_SUGGESTS_PARTIAL_IMAGES = 3647 +ER_COULD_NOT_APPLY_JSON_DIFF = 3648 +ER_CORRUPTED_JSON_DIFF = 3649 +ER_RESOURCE_GROUP_EXISTS = 3650 +ER_RESOURCE_GROUP_NOT_EXISTS = 3651 +ER_INVALID_VCPU_ID = 3652 +ER_INVALID_VCPU_RANGE = 3653 +ER_INVALID_THREAD_PRIORITY = 3654 +ER_DISALLOWED_OPERATION = 3655 +ER_RESOURCE_GROUP_BUSY = 3656 +ER_RESOURCE_GROUP_DISABLED = 3657 +ER_FEATURE_UNSUPPORTED = 3658 +ER_ATTRIBUTE_IGNORED = 3659 +ER_INVALID_THREAD_ID = 3660 +ER_RESOURCE_GROUP_BIND_FAILED = 3661 +ER_INVALID_USE_OF_FORCE_OPTION = 3662 +ER_GROUP_REPLICATION_COMMAND_FAILURE = 3663 +ER_SDI_OPERATION_FAILED = 3664 +ER_MISSING_JSON_TABLE_VALUE = 3665 +ER_WRONG_JSON_TABLE_VALUE = 3666 +ER_TF_MUST_HAVE_ALIAS = 3667 +ER_TF_FORBIDDEN_JOIN_TYPE = 3668 +ER_JT_VALUE_OUT_OF_RANGE = 3669 +ER_JT_MAX_NESTED_PATH = 3670 +ER_PASSWORD_EXPIRATION_NOT_SUPPORTED_BY_AUTH_METHOD = 3671 +ER_INVALID_GEOJSON_CRS_NOT_TOP_LEVEL = 3672 +ER_BAD_NULL_ERROR_NOT_IGNORED = 3673 +WARN_USELESS_SPATIAL_INDEX = 3674 +ER_DISK_FULL_NOWAIT = 3675 +ER_PARSE_ERROR_IN_DIGEST_FN = 3676 +ER_UNDISCLOSED_PARSE_ERROR_IN_DIGEST_FN = 3677 +ER_SCHEMA_DIR_EXISTS = 3678 +ER_SCHEMA_DIR_MISSING = 3679 +ER_SCHEMA_DIR_CREATE_FAILED = 3680 +ER_SCHEMA_DIR_UNKNOWN = 3681 +ER_ONLY_IMPLEMENTED_FOR_SRID_0_AND_4326 = 3682 +ER_BINLOG_EXPIRE_LOG_DAYS_AND_SECS_USED_TOGETHER = 3683 +ER_REGEXP_BUFFER_OVERFLOW = 3684 +ER_REGEXP_ILLEGAL_ARGUMENT = 3685 +ER_REGEXP_INDEX_OUTOFBOUNDS_ERROR = 3686 +ER_REGEXP_INTERNAL_ERROR = 3687 +ER_REGEXP_RULE_SYNTAX = 3688 +ER_REGEXP_BAD_ESCAPE_SEQUENCE = 3689 +ER_REGEXP_UNIMPLEMENTED = 3690 +ER_REGEXP_MISMATCHED_PAREN = 3691 +ER_REGEXP_BAD_INTERVAL = 3692 +ER_REGEXP_MAX_LT_MIN = 3693 +ER_REGEXP_INVALID_BACK_REF = 3694 +ER_REGEXP_LOOK_BEHIND_LIMIT = 3695 +ER_REGEXP_MISSING_CLOSE_BRACKET = 3696 +ER_REGEXP_INVALID_RANGE = 3697 +ER_REGEXP_STACK_OVERFLOW = 3698 +ER_REGEXP_TIME_OUT = 3699 +ER_REGEXP_PATTERN_TOO_BIG = 3700 +ER_CANT_SET_ERROR_LOG_SERVICE = 3701 +ER_EMPTY_PIPELINE_FOR_ERROR_LOG_SERVICE = 3702 +ER_COMPONENT_FILTER_DIAGNOSTICS = 3703 +ER_NOT_IMPLEMENTED_FOR_CARTESIAN_SRS = 3704 +ER_NOT_IMPLEMENTED_FOR_PROJECTED_SRS = 3705 +ER_NONPOSITIVE_RADIUS = 3706 +ER_RESTART_SERVER_FAILED = 3707 +ER_SRS_MISSING_MANDATORY_ATTRIBUTE = 3708 +ER_SRS_MULTIPLE_ATTRIBUTE_DEFINITIONS = 3709 +ER_SRS_NAME_CANT_BE_EMPTY_OR_WHITESPACE = 3710 +ER_SRS_ORGANIZATION_CANT_BE_EMPTY_OR_WHITESPACE = 3711 +ER_SRS_ID_ALREADY_EXISTS = 3712 +ER_WARN_SRS_ID_ALREADY_EXISTS = 3713 +ER_CANT_MODIFY_SRID_0 = 3714 +ER_WARN_RESERVED_SRID_RANGE = 3715 +ER_CANT_MODIFY_SRS_USED_BY_COLUMN = 3716 +ER_SRS_INVALID_CHARACTER_IN_ATTRIBUTE = 3717 +ER_SRS_ATTRIBUTE_STRING_TOO_LONG = 3718 +ER_DEPRECATED_UTF8_ALIAS = 3719 +ER_DEPRECATED_NATIONAL = 3720 +ER_INVALID_DEFAULT_UTF8MB4_COLLATION = 3721 +ER_UNABLE_TO_COLLECT_LOG_STATUS = 3722 +ER_RESERVED_TABLESPACE_NAME = 3723 +ER_UNABLE_TO_SET_OPTION = 3724 +ER_SLAVE_POSSIBLY_DIVERGED_AFTER_DDL = 3725 +ER_SRS_NOT_GEOGRAPHIC = 3726 +ER_POLYGON_TOO_LARGE = 3727 +ER_SPATIAL_UNIQUE_INDEX = 3728 +ER_INDEX_TYPE_NOT_SUPPORTED_FOR_SPATIAL_INDEX = 3729 +ER_FK_CANNOT_DROP_PARENT = 3730 +ER_GEOMETRY_PARAM_LONGITUDE_OUT_OF_RANGE = 3731 +ER_GEOMETRY_PARAM_LATITUDE_OUT_OF_RANGE = 3732 +ER_FK_CANNOT_USE_VIRTUAL_COLUMN = 3733 +ER_FK_NO_COLUMN_PARENT = 3734 +ER_CANT_SET_ERROR_SUPPRESSION_LIST = 3735 +ER_SRS_GEOGCS_INVALID_AXES = 3736 +ER_SRS_INVALID_SEMI_MAJOR_AXIS = 3737 +ER_SRS_INVALID_INVERSE_FLATTENING = 3738 +ER_SRS_INVALID_ANGULAR_UNIT = 3739 +ER_SRS_INVALID_PRIME_MERIDIAN = 3740 +ER_TRANSFORM_SOURCE_SRS_NOT_SUPPORTED = 3741 +ER_TRANSFORM_TARGET_SRS_NOT_SUPPORTED = 3742 +ER_TRANSFORM_SOURCE_SRS_MISSING_TOWGS84 = 3743 +ER_TRANSFORM_TARGET_SRS_MISSING_TOWGS84 = 3744 +ER_TEMP_TABLE_PREVENTS_SWITCH_SESSION_BINLOG_FORMAT = 3745 +ER_TEMP_TABLE_PREVENTS_SWITCH_GLOBAL_BINLOG_FORMAT = 3746 +ER_RUNNING_APPLIER_PREVENTS_SWITCH_GLOBAL_BINLOG_FORMAT = 3747 +ER_CLIENT_GTID_UNSAFE_CREATE_DROP_TEMP_TABLE_IN_TRX_IN_SBR = 3748 +OBSOLETE_ER_XA_CANT_CREATE_MDL_BACKUP = 3749 +ER_TABLE_WITHOUT_PK = 3750 +ER_WARN_DATA_TRUNCATED_FUNCTIONAL_INDEX = 3751 +ER_WARN_DATA_OUT_OF_RANGE_FUNCTIONAL_INDEX = 3752 +ER_FUNCTIONAL_INDEX_ON_JSON_OR_GEOMETRY_FUNCTION = 3753 +ER_FUNCTIONAL_INDEX_REF_AUTO_INCREMENT = 3754 +ER_CANNOT_DROP_COLUMN_FUNCTIONAL_INDEX = 3755 +ER_FUNCTIONAL_INDEX_PRIMARY_KEY = 3756 +ER_FUNCTIONAL_INDEX_ON_LOB = 3757 +ER_FUNCTIONAL_INDEX_FUNCTION_IS_NOT_ALLOWED = 3758 +ER_FULLTEXT_FUNCTIONAL_INDEX = 3759 +ER_SPATIAL_FUNCTIONAL_INDEX = 3760 +ER_WRONG_KEY_COLUMN_FUNCTIONAL_INDEX = 3761 +ER_FUNCTIONAL_INDEX_ON_FIELD = 3762 +ER_GENERATED_COLUMN_NAMED_FUNCTION_IS_NOT_ALLOWED = 3763 +ER_GENERATED_COLUMN_ROW_VALUE = 3764 +ER_GENERATED_COLUMN_VARIABLES = 3765 +ER_DEPENDENT_BY_DEFAULT_GENERATED_VALUE = 3766 +ER_DEFAULT_VAL_GENERATED_NON_PRIOR = 3767 +ER_DEFAULT_VAL_GENERATED_REF_AUTO_INC = 3768 +ER_DEFAULT_VAL_GENERATED_FUNCTION_IS_NOT_ALLOWED = 3769 +ER_DEFAULT_VAL_GENERATED_NAMED_FUNCTION_IS_NOT_ALLOWED = 3770 +ER_DEFAULT_VAL_GENERATED_ROW_VALUE = 3771 +ER_DEFAULT_VAL_GENERATED_VARIABLES = 3772 +ER_DEFAULT_AS_VAL_GENERATED = 3773 +ER_UNSUPPORTED_ACTION_ON_DEFAULT_VAL_GENERATED = 3774 +ER_GTID_UNSAFE_ALTER_ADD_COL_WITH_DEFAULT_EXPRESSION = 3775 +ER_FK_CANNOT_CHANGE_ENGINE = 3776 +ER_WARN_DEPRECATED_USER_SET_EXPR = 3777 +ER_WARN_DEPRECATED_UTF8MB3_COLLATION = 3778 +ER_WARN_DEPRECATED_NESTED_COMMENT_SYNTAX = 3779 +ER_FK_INCOMPATIBLE_COLUMNS = 3780 +ER_GR_HOLD_WAIT_TIMEOUT = 3781 +ER_GR_HOLD_KILLED = 3782 +ER_GR_HOLD_MEMBER_STATUS_ERROR = 3783 +ER_RPL_ENCRYPTION_FAILED_TO_FETCH_KEY = 3784 +ER_RPL_ENCRYPTION_KEY_NOT_FOUND = 3785 +ER_RPL_ENCRYPTION_KEYRING_INVALID_KEY = 3786 +ER_RPL_ENCRYPTION_HEADER_ERROR = 3787 +ER_RPL_ENCRYPTION_FAILED_TO_ROTATE_LOGS = 3788 +ER_RPL_ENCRYPTION_KEY_EXISTS_UNEXPECTED = 3789 +ER_RPL_ENCRYPTION_FAILED_TO_GENERATE_KEY = 3790 +ER_RPL_ENCRYPTION_FAILED_TO_STORE_KEY = 3791 +ER_RPL_ENCRYPTION_FAILED_TO_REMOVE_KEY = 3792 +ER_RPL_ENCRYPTION_UNABLE_TO_CHANGE_OPTION = 3793 +ER_RPL_ENCRYPTION_MASTER_KEY_RECOVERY_FAILED = 3794 +ER_SLOW_LOG_MODE_IGNORED_WHEN_NOT_LOGGING_TO_FILE = 3795 +ER_GRP_TRX_CONSISTENCY_NOT_ALLOWED = 3796 +ER_GRP_TRX_CONSISTENCY_BEFORE = 3797 +ER_GRP_TRX_CONSISTENCY_AFTER_ON_TRX_BEGIN = 3798 +ER_GRP_TRX_CONSISTENCY_BEGIN_NOT_ALLOWED = 3799 +ER_FUNCTIONAL_INDEX_ROW_VALUE_IS_NOT_ALLOWED = 3800 +ER_RPL_ENCRYPTION_FAILED_TO_ENCRYPT = 3801 +ER_PAGE_TRACKING_NOT_STARTED = 3802 +ER_PAGE_TRACKING_RANGE_NOT_TRACKED = 3803 +ER_PAGE_TRACKING_CANNOT_PURGE = 3804 +ER_RPL_ENCRYPTION_CANNOT_ROTATE_BINLOG_MASTER_KEY = 3805 +ER_BINLOG_MASTER_KEY_RECOVERY_OUT_OF_COMBINATION = 3806 +ER_BINLOG_MASTER_KEY_ROTATION_FAIL_TO_OPERATE_KEY = 3807 +ER_BINLOG_MASTER_KEY_ROTATION_FAIL_TO_ROTATE_LOGS = 3808 +ER_BINLOG_MASTER_KEY_ROTATION_FAIL_TO_REENCRYPT_LOG = 3809 +ER_BINLOG_MASTER_KEY_ROTATION_FAIL_TO_CLEANUP_UNUSED_KEYS = 3810 +ER_BINLOG_MASTER_KEY_ROTATION_FAIL_TO_CLEANUP_AUX_KEY = 3811 +ER_NON_BOOLEAN_EXPR_FOR_CHECK_CONSTRAINT = 3812 +ER_COLUMN_CHECK_CONSTRAINT_REFERENCES_OTHER_COLUMN = 3813 +ER_CHECK_CONSTRAINT_NAMED_FUNCTION_IS_NOT_ALLOWED = 3814 +ER_CHECK_CONSTRAINT_FUNCTION_IS_NOT_ALLOWED = 3815 +ER_CHECK_CONSTRAINT_VARIABLES = 3816 +ER_CHECK_CONSTRAINT_ROW_VALUE = 3817 +ER_CHECK_CONSTRAINT_REFERS_AUTO_INCREMENT_COLUMN = 3818 +ER_CHECK_CONSTRAINT_VIOLATED = 3819 +ER_CHECK_CONSTRAINT_REFERS_UNKNOWN_COLUMN = 3820 +ER_CHECK_CONSTRAINT_NOT_FOUND = 3821 +ER_CHECK_CONSTRAINT_DUP_NAME = 3822 +ER_CHECK_CONSTRAINT_CLAUSE_USING_FK_REFER_ACTION_COLUMN = 3823 +WARN_UNENCRYPTED_TABLE_IN_ENCRYPTED_DB = 3824 +ER_INVALID_ENCRYPTION_REQUEST = 3825 +ER_CANNOT_SET_TABLE_ENCRYPTION = 3826 +ER_CANNOT_SET_DATABASE_ENCRYPTION = 3827 +ER_CANNOT_SET_TABLESPACE_ENCRYPTION = 3828 +ER_TABLESPACE_CANNOT_BE_ENCRYPTED = 3829 +ER_TABLESPACE_CANNOT_BE_DECRYPTED = 3830 +ER_TABLESPACE_TYPE_UNKNOWN = 3831 +ER_TARGET_TABLESPACE_UNENCRYPTED = 3832 +ER_CANNOT_USE_ENCRYPTION_CLAUSE = 3833 +ER_INVALID_MULTIPLE_CLAUSES = 3834 +ER_UNSUPPORTED_USE_OF_GRANT_AS = 3835 +ER_UKNOWN_AUTH_ID_OR_ACCESS_DENIED_FOR_GRANT_AS = 3836 +ER_DEPENDENT_BY_FUNCTIONAL_INDEX = 3837 +ER_PLUGIN_NOT_EARLY = 3838 +ER_INNODB_REDO_LOG_ARCHIVE_START_SUBDIR_PATH = 3839 +ER_INNODB_REDO_LOG_ARCHIVE_START_TIMEOUT = 3840 +ER_INNODB_REDO_LOG_ARCHIVE_DIRS_INVALID = 3841 +ER_INNODB_REDO_LOG_ARCHIVE_LABEL_NOT_FOUND = 3842 +ER_INNODB_REDO_LOG_ARCHIVE_DIR_EMPTY = 3843 +ER_INNODB_REDO_LOG_ARCHIVE_NO_SUCH_DIR = 3844 +ER_INNODB_REDO_LOG_ARCHIVE_DIR_CLASH = 3845 +ER_INNODB_REDO_LOG_ARCHIVE_DIR_PERMISSIONS = 3846 +ER_INNODB_REDO_LOG_ARCHIVE_FILE_CREATE = 3847 +ER_INNODB_REDO_LOG_ARCHIVE_ACTIVE = 3848 +ER_INNODB_REDO_LOG_ARCHIVE_INACTIVE = 3849 +ER_INNODB_REDO_LOG_ARCHIVE_FAILED = 3850 +ER_INNODB_REDO_LOG_ARCHIVE_SESSION = 3851 +ER_STD_REGEX_ERROR = 3852 +ER_INVALID_JSON_TYPE = 3853 +ER_CANNOT_CONVERT_STRING = 3854 +ER_DEPENDENT_BY_PARTITION_FUNC = 3855 +ER_WARN_DEPRECATED_FLOAT_AUTO_INCREMENT = 3856 +ER_RPL_CANT_STOP_SLAVE_WHILE_LOCKED_BACKUP = 3857 +ER_WARN_DEPRECATED_FLOAT_DIGITS = 3858 +ER_WARN_DEPRECATED_FLOAT_UNSIGNED = 3859 +ER_WARN_DEPRECATED_INTEGER_DISPLAY_WIDTH = 3860 +ER_WARN_DEPRECATED_ZEROFILL = 3861 +ER_CLONE_DONOR = 3862 +ER_CLONE_PROTOCOL = 3863 +ER_CLONE_DONOR_VERSION = 3864 +ER_CLONE_OS = 3865 +ER_CLONE_PLATFORM = 3866 +ER_CLONE_CHARSET = 3867 +ER_CLONE_CONFIG = 3868 +ER_CLONE_SYS_CONFIG = 3869 +ER_CLONE_PLUGIN_MATCH = 3870 +ER_CLONE_LOOPBACK = 3871 +ER_CLONE_ENCRYPTION = 3872 +ER_CLONE_DISK_SPACE = 3873 +ER_CLONE_IN_PROGRESS = 3874 +ER_CLONE_DISALLOWED = 3875 +ER_CANNOT_GRANT_ROLES_TO_ANONYMOUS_USER = 3876 +ER_SECONDARY_ENGINE_PLUGIN = 3877 +ER_SECOND_PASSWORD_CANNOT_BE_EMPTY = 3878 +ER_DB_ACCESS_DENIED = 3879 +ER_DA_AUTH_ID_WITH_SYSTEM_USER_PRIV_IN_MANDATORY_ROLES = 3880 +ER_DA_RPL_GTID_TABLE_CANNOT_OPEN = 3881 +ER_GEOMETRY_IN_UNKNOWN_LENGTH_UNIT = 3882 +ER_DA_PLUGIN_INSTALL_ERROR = 3883 +ER_NO_SESSION_TEMP = 3884 +ER_DA_UNKNOWN_ERROR_NUMBER = 3885 +ER_COLUMN_CHANGE_SIZE = 3886 +ER_REGEXP_INVALID_CAPTURE_GROUP_NAME = 3887 +ER_DA_SSL_LIBRARY_ERROR = 3888 +ER_SECONDARY_ENGINE = 3889 +ER_SECONDARY_ENGINE_DDL = 3890 +ER_INCORRECT_CURRENT_PASSWORD = 3891 +ER_MISSING_CURRENT_PASSWORD = 3892 +ER_CURRENT_PASSWORD_NOT_REQUIRED = 3893 +ER_PASSWORD_CANNOT_BE_RETAINED_ON_PLUGIN_CHANGE = 3894 +ER_CURRENT_PASSWORD_CANNOT_BE_RETAINED = 3895 +ER_PARTIAL_REVOKES_EXIST = 3896 +ER_CANNOT_GRANT_SYSTEM_PRIV_TO_MANDATORY_ROLE = 3897 +ER_XA_REPLICATION_FILTERS = 3898 +ER_UNSUPPORTED_SQL_MODE = 3899 +ER_REGEXP_INVALID_FLAG = 3900 +ER_PARTIAL_REVOKE_AND_DB_GRANT_BOTH_EXISTS = 3901 +ER_UNIT_NOT_FOUND = 3902 +ER_INVALID_JSON_VALUE_FOR_FUNC_INDEX = 3903 +ER_JSON_VALUE_OUT_OF_RANGE_FOR_FUNC_INDEX = 3904 +ER_EXCEEDED_MV_KEYS_NUM = 3905 +ER_EXCEEDED_MV_KEYS_SPACE = 3906 +ER_FUNCTIONAL_INDEX_DATA_IS_TOO_LONG = 3907 +ER_WRONG_MVI_VALUE = 3908 +ER_WARN_FUNC_INDEX_NOT_APPLICABLE = 3909 +ER_GRP_RPL_UDF_ERROR = 3910 +ER_UPDATE_GTID_PURGED_WITH_GR = 3911 +ER_GROUPING_ON_TIMESTAMP_IN_DST = 3912 +ER_TABLE_NAME_CAUSES_TOO_LONG_PATH = 3913 +ER_AUDIT_LOG_INSUFFICIENT_PRIVILEGE = 3914 +OBSOLETE_ER_AUDIT_LOG_PASSWORD_HAS_BEEN_COPIED = 3915 +ER_DA_GRP_RPL_STARTED_AUTO_REJOIN = 3916 +ER_SYSVAR_CHANGE_DURING_QUERY = 3917 +ER_GLOBSTAT_CHANGE_DURING_QUERY = 3918 +ER_GRP_RPL_MESSAGE_SERVICE_INIT_FAILURE = 3919 +ER_CHANGE_MASTER_WRONG_COMPRESSION_ALGORITHM_CLIENT = 3920 +ER_CHANGE_MASTER_WRONG_COMPRESSION_LEVEL_CLIENT = 3921 +ER_WRONG_COMPRESSION_ALGORITHM_CLIENT = 3922 +ER_WRONG_COMPRESSION_LEVEL_CLIENT = 3923 +ER_CHANGE_MASTER_WRONG_COMPRESSION_ALGORITHM_LIST_CLIENT = 3924 +ER_CLIENT_PRIVILEGE_CHECKS_USER_CANNOT_BE_ANONYMOUS = 3925 +ER_CLIENT_PRIVILEGE_CHECKS_USER_DOES_NOT_EXIST = 3926 +ER_CLIENT_PRIVILEGE_CHECKS_USER_CORRUPT = 3927 +ER_CLIENT_PRIVILEGE_CHECKS_USER_NEEDS_RPL_APPLIER_PRIV = 3928 +ER_WARN_DA_PRIVILEGE_NOT_REGISTERED = 3929 +ER_CLIENT_KEYRING_UDF_KEY_INVALID = 3930 +ER_CLIENT_KEYRING_UDF_KEY_TYPE_INVALID = 3931 +ER_CLIENT_KEYRING_UDF_KEY_TOO_LONG = 3932 +ER_CLIENT_KEYRING_UDF_KEY_TYPE_TOO_LONG = 3933 +ER_JSON_SCHEMA_VALIDATION_ERROR_WITH_DETAILED_REPORT = 3934 +ER_DA_UDF_INVALID_CHARSET_SPECIFIED = 3935 +ER_DA_UDF_INVALID_CHARSET = 3936 +ER_DA_UDF_INVALID_COLLATION = 3937 +ER_DA_UDF_INVALID_EXTENSION_ARGUMENT_TYPE = 3938 +ER_MULTIPLE_CONSTRAINTS_WITH_SAME_NAME = 3939 +ER_CONSTRAINT_NOT_FOUND = 3940 +ER_ALTER_CONSTRAINT_ENFORCEMENT_NOT_SUPPORTED = 3941 +ER_TABLE_VALUE_CONSTRUCTOR_MUST_HAVE_COLUMNS = 3942 +ER_TABLE_VALUE_CONSTRUCTOR_CANNOT_HAVE_DEFAULT = 3943 +ER_CLIENT_QUERY_FAILURE_INVALID_NON_ROW_FORMAT = 3944 +ER_REQUIRE_ROW_FORMAT_INVALID_VALUE = 3945 +ER_FAILED_TO_DETERMINE_IF_ROLE_IS_MANDATORY = 3946 +ER_FAILED_TO_FETCH_MANDATORY_ROLE_LIST = 3947 +ER_CLIENT_LOCAL_FILES_DISABLED = 3948 +ER_IMP_INCOMPATIBLE_CFG_VERSION = 3949 +ER_DA_OOM = 3950 +ER_DA_UDF_INVALID_ARGUMENT_TO_SET_CHARSET = 3951 +ER_DA_UDF_INVALID_RETURN_TYPE_TO_SET_CHARSET = 3952 +ER_MULTIPLE_INTO_CLAUSES = 3953 +ER_MISPLACED_INTO = 3954 +ER_USER_ACCESS_DENIED_FOR_USER_ACCOUNT_BLOCKED_BY_PASSWORD_LOCK = 3955 +ER_WARN_DEPRECATED_YEAR_UNSIGNED = 3956 +ER_CLONE_NETWORK_PACKET = 3957 +ER_SDI_OPERATION_FAILED_MISSING_RECORD = 3958 +ER_DEPENDENT_BY_CHECK_CONSTRAINT = 3959 +ER_GRP_OPERATION_NOT_ALLOWED_GR_MUST_STOP = 3960 +ER_WARN_DEPRECATED_JSON_TABLE_ON_ERROR_ON_EMPTY = 3961 +ER_WARN_DEPRECATED_INNER_INTO = 3962 +ER_WARN_DEPRECATED_VALUES_FUNCTION_ALWAYS_NULL = 3963 +ER_WARN_DEPRECATED_SQL_CALC_FOUND_ROWS = 3964 +ER_WARN_DEPRECATED_FOUND_ROWS = 3965 +ER_MISSING_JSON_VALUE = 3966 +ER_MULTIPLE_JSON_VALUES = 3967 +ER_HOSTNAME_TOO_LONG = 3968 +ER_WARN_CLIENT_DEPRECATED_PARTITION_PREFIX_KEY = 3969 +ER_GROUP_REPLICATION_USER_EMPTY_MSG = 3970 +ER_GROUP_REPLICATION_USER_MANDATORY_MSG = 3971 +ER_GROUP_REPLICATION_PASSWORD_LENGTH = 3972 +ER_SUBQUERY_TRANSFORM_REJECTED = 3973 +ER_DA_GRP_RPL_RECOVERY_ENDPOINT_FORMAT = 3974 +ER_DA_GRP_RPL_RECOVERY_ENDPOINT_INVALID = 3975 +ER_WRONG_VALUE_FOR_VAR_PLUS_ACTIONABLE_PART = 3976 +ER_STATEMENT_NOT_ALLOWED_AFTER_START_TRANSACTION = 3977 +ER_FOREIGN_KEY_WITH_ATOMIC_CREATE_SELECT = 3978 +ER_NOT_ALLOWED_WITH_START_TRANSACTION = 3979 +ER_INVALID_JSON_ATTRIBUTE = 3980 +ER_ENGINE_ATTRIBUTE_NOT_SUPPORTED = 3981 +ER_INVALID_USER_ATTRIBUTE_JSON = 3982 +ER_INNODB_REDO_DISABLED = 3983 +ER_INNODB_REDO_ARCHIVING_ENABLED = 3984 +ER_MDL_OUT_OF_RESOURCES = 3985 +ER_IMPLICIT_COMPARISON_FOR_JSON = 3986 +ER_FUNCTION_DOES_NOT_SUPPORT_CHARACTER_SET = 3987 +ER_IMPOSSIBLE_STRING_CONVERSION = 3988 +ER_SCHEMA_READ_ONLY = 3989 +ER_RPL_ASYNC_RECONNECT_GTID_MODE_OFF = 3990 +ER_RPL_ASYNC_RECONNECT_AUTO_POSITION_OFF = 3991 +ER_DISABLE_GTID_MODE_REQUIRES_ASYNC_RECONNECT_OFF = 3992 +ER_DISABLE_AUTO_POSITION_REQUIRES_ASYNC_RECONNECT_OFF = 3993 +ER_INVALID_PARAMETER_USE = 3994 +ER_CHARACTER_SET_MISMATCH = 3995 +ER_WARN_VAR_VALUE_CHANGE_NOT_SUPPORTED = 3996 +ER_INVALID_TIME_ZONE_INTERVAL = 3997 +ER_INVALID_CAST = 3998 +ER_HYPERGRAPH_NOT_SUPPORTED_YET = 3999 +ER_WARN_HYPERGRAPH_EXPERIMENTAL = 4000 +ER_DA_NO_ERROR_LOG_PARSER_CONFIGURED = 4001 +ER_DA_ERROR_LOG_TABLE_DISABLED = 4002 +ER_DA_ERROR_LOG_MULTIPLE_FILTERS = 4003 +ER_DA_CANT_OPEN_ERROR_LOG = 4004 +ER_USER_REFERENCED_AS_DEFINER = 4005 +ER_CANNOT_USER_REFERENCED_AS_DEFINER = 4006 +ER_REGEX_NUMBER_TOO_BIG = 4007 +ER_SPVAR_NONINTEGER_TYPE = 4008 +WARN_UNSUPPORTED_ACL_TABLES_READ = 4009 +ER_BINLOG_UNSAFE_ACL_TABLE_READ_IN_DML_DDL = 4010 +ER_STOP_REPLICA_MONITOR_IO_THREAD_TIMEOUT = 4011 +ER_STARTING_REPLICA_MONITOR_IO_THREAD = 4012 +ER_CANT_USE_ANONYMOUS_TO_GTID_WITH_GTID_MODE_NOT_ON = 4013 +ER_CANT_COMBINE_ANONYMOUS_TO_GTID_AND_AUTOPOSITION = 4014 +ER_ASSIGN_GTIDS_TO_ANONYMOUS_TRANSACTIONS_REQUIRES_GTID_MODE_ON = 4015 +ER_SQL_REPLICA_SKIP_COUNTER_USED_WITH_GTID_MODE_ON = 4016 +ER_USING_ASSIGN_GTIDS_TO_ANONYMOUS_TRANSACTIONS_AS_LOCAL_OR_UUID = 4017 +ER_CANT_SET_ANONYMOUS_TO_GTID_AND_WAIT_UNTIL_SQL_THD_AFTER_GTIDS = 4018 +ER_CANT_SET_SQL_AFTER_OR_BEFORE_GTIDS_WITH_ANONYMOUS_TO_GTID = 4019 +ER_ANONYMOUS_TO_GTID_UUID_SAME_AS_GROUP_NAME = 4020 +ER_CANT_USE_SAME_UUID_AS_GROUP_NAME = 4021 +ER_GRP_RPL_RECOVERY_CHANNEL_STILL_RUNNING = 4022 +ER_INNODB_INVALID_AUTOEXTEND_SIZE_VALUE = 4023 +ER_INNODB_INCOMPATIBLE_WITH_TABLESPACE = 4024 +ER_INNODB_AUTOEXTEND_SIZE_OUT_OF_RANGE = 4025 +ER_CANNOT_USE_AUTOEXTEND_SIZE_CLAUSE = 4026 +ER_ROLE_GRANTED_TO_ITSELF = 4027 +ER_TABLE_MUST_HAVE_A_VISIBLE_COLUMN = 4028 +ER_INNODB_COMPRESSION_FAILURE = 4029 +ER_WARN_ASYNC_CONN_FAILOVER_NETWORK_NAMESPACE = 4030 +ER_CLIENT_INTERACTION_TIMEOUT = 4031 +ER_INVALID_CAST_TO_GEOMETRY = 4032 +ER_INVALID_CAST_POLYGON_RING_DIRECTION = 4033 +ER_GIS_DIFFERENT_SRIDS_AGGREGATION = 4034 +ER_RELOAD_KEYRING_FAILURE = 4035 +ER_SDI_GET_KEYS_INVALID_TABLESPACE = 4036 +ER_CHANGE_RPL_SRC_WRONG_COMPRESSION_ALGORITHM_SIZE = 4037 +ER_WARN_DEPRECATED_TLS_VERSION_FOR_CHANNEL_CLI = 4038 +ER_CANT_USE_SAME_UUID_AS_VIEW_CHANGE_UUID = 4039 +ER_ANONYMOUS_TO_GTID_UUID_SAME_AS_VIEW_CHANGE_UUID = 4040 +ER_GRP_RPL_VIEW_CHANGE_UUID_FAIL_GET_VARIABLE = 4041 +ER_WARN_ADUIT_LOG_MAX_SIZE_AND_PRUNE_SECONDS = 4042 +ER_WARN_ADUIT_LOG_MAX_SIZE_CLOSE_TO_ROTATE_ON_SIZE = 4043 +ER_KERBEROS_CREATE_USER = 4044 +ER_INSTALL_PLUGIN_CONFLICT_CLIENT = 4045 +ER_DA_ERROR_LOG_COMPONENT_FLUSH_FAILED = 4046 +ER_WARN_SQL_AFTER_MTS_GAPS_GAP_NOT_CALCULATED = 4047 +ER_INVALID_ASSIGNMENT_TARGET = 4048 +ER_OPERATION_NOT_ALLOWED_ON_GR_SECONDARY = 4049 +ER_GRP_RPL_FAILOVER_CHANNEL_STATUS_PROPAGATION = 4050 +ER_WARN_AUDIT_LOG_FORMAT_UNIX_TIMESTAMP_ONLY_WHEN_JSON = 4051 +ER_INVALID_MFA_PLUGIN_SPECIFIED = 4052 +ER_IDENTIFIED_BY_UNSUPPORTED = 4053 +ER_INVALID_PLUGIN_FOR_REGISTRATION = 4054 +ER_PLUGIN_REQUIRES_REGISTRATION = 4055 +ER_MFA_METHOD_EXISTS = 4056 +ER_MFA_METHOD_NOT_EXISTS = 4057 +ER_AUTHENTICATION_POLICY_MISMATCH = 4058 +ER_PLUGIN_REGISTRATION_DONE = 4059 +ER_INVALID_USER_FOR_REGISTRATION = 4060 +ER_USER_REGISTRATION_FAILED = 4061 +ER_MFA_METHODS_INVALID_ORDER = 4062 +ER_MFA_METHODS_IDENTICAL = 4063 +ER_INVALID_MFA_OPERATIONS_FOR_PASSWORDLESS_USER = 4064 +ER_CHANGE_REPLICATION_SOURCE_NO_OPTIONS_FOR_GTID_ONLY = 4065 +ER_CHANGE_REP_SOURCE_CANT_DISABLE_REQ_ROW_FORMAT_WITH_GTID_ONLY = 4066 +ER_CHANGE_REP_SOURCE_CANT_DISABLE_AUTO_POSITION_WITH_GTID_ONLY = 4067 +ER_CHANGE_REP_SOURCE_CANT_DISABLE_GTID_ONLY_WITHOUT_POSITIONS = 4068 +ER_CHANGE_REP_SOURCE_CANT_DISABLE_AUTO_POS_WITHOUT_POSITIONS = 4069 +ER_CHANGE_REP_SOURCE_GR_CHANNEL_WITH_GTID_MODE_NOT_ON = 4070 +ER_CANT_USE_GTID_ONLY_WITH_GTID_MODE_NOT_ON = 4071 +ER_WARN_C_DISABLE_GTID_ONLY_WITH_SOURCE_AUTO_POS_INVALID_POS = 4072 +ER_DA_SSL_FIPS_MODE_ERROR = 4073 +CR_UNKNOWN_ERROR = 2000 +CR_SOCKET_CREATE_ERROR = 2001 +CR_CONNECTION_ERROR = 2002 +CR_CONN_HOST_ERROR = 2003 +CR_IPSOCK_ERROR = 2004 +CR_UNKNOWN_HOST = 2005 +CR_SERVER_GONE_ERROR = 2006 +CR_VERSION_ERROR = 2007 +CR_OUT_OF_MEMORY = 2008 +CR_WRONG_HOST_INFO = 2009 +CR_LOCALHOST_CONNECTION = 2010 +CR_TCP_CONNECTION = 2011 +CR_SERVER_HANDSHAKE_ERR = 2012 +CR_SERVER_LOST = 2013 +CR_COMMANDS_OUT_OF_SYNC = 2014 +CR_NAMEDPIPE_CONNECTION = 2015 +CR_NAMEDPIPEWAIT_ERROR = 2016 +CR_NAMEDPIPEOPEN_ERROR = 2017 +CR_NAMEDPIPESETSTATE_ERROR = 2018 +CR_CANT_READ_CHARSET = 2019 +CR_NET_PACKET_TOO_LARGE = 2020 +CR_EMBEDDED_CONNECTION = 2021 +CR_PROBE_SLAVE_STATUS = 2022 +CR_PROBE_SLAVE_HOSTS = 2023 +CR_PROBE_SLAVE_CONNECT = 2024 +CR_PROBE_MASTER_CONNECT = 2025 +CR_SSL_CONNECTION_ERROR = 2026 +CR_MALFORMED_PACKET = 2027 +CR_WRONG_LICENSE = 2028 +CR_NULL_POINTER = 2029 +CR_NO_PREPARE_STMT = 2030 +CR_PARAMS_NOT_BOUND = 2031 +CR_DATA_TRUNCATED = 2032 +CR_NO_PARAMETERS_EXISTS = 2033 +CR_INVALID_PARAMETER_NO = 2034 +CR_INVALID_BUFFER_USE = 2035 +CR_UNSUPPORTED_PARAM_TYPE = 2036 +CR_SHARED_MEMORY_CONNECTION = 2037 +CR_SHARED_MEMORY_CONNECT_REQUEST_ERROR = 2038 +CR_SHARED_MEMORY_CONNECT_ANSWER_ERROR = 2039 +CR_SHARED_MEMORY_CONNECT_FILE_MAP_ERROR = 2040 +CR_SHARED_MEMORY_CONNECT_MAP_ERROR = 2041 +CR_SHARED_MEMORY_FILE_MAP_ERROR = 2042 +CR_SHARED_MEMORY_MAP_ERROR = 2043 +CR_SHARED_MEMORY_EVENT_ERROR = 2044 +CR_SHARED_MEMORY_CONNECT_ABANDONED_ERROR = 2045 +CR_SHARED_MEMORY_CONNECT_SET_ERROR = 2046 +CR_CONN_UNKNOW_PROTOCOL = 2047 +CR_INVALID_CONN_HANDLE = 2048 +CR_UNUSED_1 = 2049 +CR_FETCH_CANCELED = 2050 +CR_NO_DATA = 2051 +CR_NO_STMT_METADATA = 2052 +CR_NO_RESULT_SET = 2053 +CR_NOT_IMPLEMENTED = 2054 +CR_SERVER_LOST_EXTENDED = 2055 +CR_STMT_CLOSED = 2056 +CR_NEW_STMT_METADATA = 2057 +CR_ALREADY_CONNECTED = 2058 +CR_AUTH_PLUGIN_CANNOT_LOAD = 2059 +CR_DUPLICATE_CONNECTION_ATTR = 2060 +CR_AUTH_PLUGIN_ERR = 2061 +CR_INSECURE_API_ERR = 2062 +CR_FILE_NAME_TOO_LONG = 2063 +CR_SSL_FIPS_MODE_ERR = 2064 +CR_DEPRECATED_COMPRESSION_NOT_SUPPORTED = 2065 +CR_COMPRESSION_WRONGLY_CONFIGURED = 2066 +CR_KERBEROS_USER_NOT_FOUND = 2067 +CR_LOAD_DATA_LOCAL_INFILE_REJECTED = 2068 +CR_LOAD_DATA_LOCAL_INFILE_REALPATH_FAIL = 2069 +CR_DNS_SRV_LOOKUP_FAILED = 2070 +CR_MANDATORY_TRACKER_NOT_FOUND = 2071 +CR_INVALID_FACTOR_NO = 2072 +# End MySQL Errors + +# Start X Plugin Errors +ER_X_BAD_MESSAGE = 5000 +ER_X_CAPABILITIES_PREPARE_FAILED = 5001 +ER_X_CAPABILITY_NOT_FOUND = 5002 +ER_X_INVALID_PROTOCOL_DATA = 5003 +ER_X_BAD_CONNECTION_SESSION_ATTRIBUTE_VALUE_LENGTH = 5004 +ER_X_BAD_CONNECTION_SESSION_ATTRIBUTE_KEY_LENGTH = 5005 +ER_X_BAD_CONNECTION_SESSION_ATTRIBUTE_EMPTY_KEY = 5006 +ER_X_BAD_CONNECTION_SESSION_ATTRIBUTE_LENGTH = 5007 +ER_X_BAD_CONNECTION_SESSION_ATTRIBUTE_TYPE = 5008 +ER_X_CAPABILITY_SET_NOT_ALLOWED = 5009 +ER_X_SERVICE_ERROR = 5010 +ER_X_SESSION = 5011 +ER_X_INVALID_ARGUMENT = 5012 +ER_X_MISSING_ARGUMENT = 5013 +ER_X_BAD_INSERT_DATA = 5014 +ER_X_CMD_NUM_ARGUMENTS = 5015 +ER_X_CMD_ARGUMENT_TYPE = 5016 +ER_X_CMD_ARGUMENT_VALUE = 5017 +ER_X_BAD_UPSERT_DATA = 5018 +ER_X_DUPLICATED_CAPABILITIES = 5019 +ER_X_CMD_ARGUMENT_OBJECT_EMPTY = 5020 +ER_X_CMD_INVALID_ARGUMENT = 5021 +ER_X_BAD_UPDATE_DATA = 5050 +ER_X_BAD_TYPE_OF_UPDATE = 5051 +ER_X_BAD_COLUMN_TO_UPDATE = 5052 +ER_X_BAD_MEMBER_TO_UPDATE = 5053 +ER_X_BAD_STATEMENT_ID = 5110 +ER_X_BAD_CURSOR_ID = 5111 +ER_X_BAD_SCHEMA = 5112 +ER_X_BAD_TABLE = 5113 +ER_X_BAD_PROJECTION = 5114 +ER_X_DOC_ID_MISSING = 5115 +ER_X_DUPLICATE_ENTRY = 5116 +ER_X_DOC_REQUIRED_FIELD_MISSING = 5117 +ER_X_PROJ_BAD_KEY_NAME = 5120 +ER_X_BAD_DOC_PATH = 5121 +ER_X_CURSOR_EXISTS = 5122 +ER_X_CURSOR_REACHED_EOF = 5123 +ER_X_PREPARED_STATMENT_CAN_HAVE_ONE_CURSOR = 5131 +ER_X_PREPARED_EXECUTE_ARGUMENT_NOT_SUPPORTED = 5133 +ER_X_PREPARED_EXECUTE_ARGUMENT_CONSISTENCY = 5134 +ER_X_EXPR_BAD_OPERATOR = 5150 +ER_X_EXPR_BAD_NUM_ARGS = 5151 +ER_X_EXPR_MISSING_ARG = 5152 +ER_X_EXPR_BAD_TYPE_VALUE = 5153 +ER_X_EXPR_BAD_VALUE = 5154 +ER_X_INVALID_COLLECTION = 5156 +ER_X_INVALID_ADMIN_COMMAND = 5157 +ER_X_EXPECT_NOT_OPEN = 5158 +ER_X_EXPECT_NO_ERROR_FAILED = 5159 +ER_X_EXPECT_BAD_CONDITION = 5160 +ER_X_EXPECT_BAD_CONDITION_VALUE = 5161 +ER_X_INVALID_NAMESPACE = 5162 +ER_X_BAD_NOTICE = 5163 +ER_X_CANNOT_DISABLE_NOTICE = 5164 +ER_X_BAD_CONFIGURATION = 5165 +ER_X_MYSQLX_ACCOUNT_MISSING_PERMISSIONS = 5167 +ER_X_EXPECT_FIELD_EXISTS_FAILED = 5168 +ER_X_BAD_LOCKING = 5169 +ER_X_FRAME_COMPRESSION_DISABLED = 5170 +ER_X_DECOMPRESSION_FAILED = 5171 +ER_X_BAD_COMPRESSED_FRAME = 5174 +ER_X_CAPABILITY_COMPRESSION_INVALID_ALGORITHM = 5175 +ER_X_CAPABILITY_COMPRESSION_INVALID_SERVER_STYLE = 5176 +ER_X_CAPABILITY_COMPRESSION_INVALID_CLIENT_STYLE = 5177 +ER_X_CAPABILITY_COMPRESSION_INVALID_OPTION = 5178 +ER_X_CAPABILITY_COMPRESSION_MISSING_REQUIRED_FIELDS = 5179 +ER_X_DOCUMENT_DOESNT_MATCH_EXPECTED_SCHEMA = 5180 +ER_X_COLLECTION_OPTION_DOESNT_EXISTS = 5181 +ER_X_INVALID_VALIDATION_SCHEMA = 5182 +# End X Plugin Errors diff --git a/mysql-sqlalchemy-workspace/lib/python3.9/site-packages/mysql/connector/errors.py b/mysql-sqlalchemy-workspace/lib/python3.9/site-packages/mysql/connector/errors.py new file mode 100644 index 00000000..44453e77 --- /dev/null +++ b/mysql-sqlalchemy-workspace/lib/python3.9/site-packages/mysql/connector/errors.py @@ -0,0 +1,337 @@ +# Copyright (c) 2009, 2022, Oracle and/or its affiliates. +# +# This program is free software; you can redistribute it and/or modify +# it under the terms of the GNU General Public License, version 2.0, as +# published by the Free Software Foundation. +# +# This program is also distributed with certain software (including +# but not limited to OpenSSL) that is licensed under separate terms, +# as designated in a particular file or component or in included license +# documentation. The authors of MySQL hereby grant you an +# additional permission to link the program and your derivative works +# with the separately licensed software that they have included with +# MySQL. +# +# Without limiting anything contained in the foregoing, this file, +# which is part of MySQL Connector/Python, is also subject to the +# Universal FOSS Exception, version 1.0, a copy of which can be found at +# http://oss.oracle.com/licenses/universal-foss-exception. +# +# This program is distributed in the hope that it will be useful, but +# WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. +# See the GNU General Public License, version 2.0, for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program; if not, write to the Free Software Foundation, Inc., +# 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA + +"""Python exceptions.""" +from typing import Dict, Mapping, Optional, Tuple, Type, Union + +from .locales import get_client_error +from .types import StrOrBytes +from .utils import read_bytes, read_int + + +class Error(Exception): + """Exception that is base class for all other error exceptions""" + + def __init__( + self, + msg: Optional[str] = None, + errno: Optional[int] = None, + values: Optional[Tuple[Union[int, str], ...]] = None, + sqlstate: Optional[str] = None, + ) -> None: + super().__init__() + self.msg = msg + self._full_msg = self.msg + self.errno = errno or -1 + self.sqlstate = sqlstate + + if not self.msg and (2000 <= self.errno < 3000): + self.msg = get_client_error(self.errno) + if values is not None: + try: + self.msg = self.msg % values + except TypeError as err: + self.msg = f"{self.msg} (Warning: {err})" + elif not self.msg: + self._full_msg = self.msg = "Unknown error" + + if self.msg and self.errno != -1: + fields = {"errno": self.errno, "msg": self.msg} + if self.sqlstate: + fmt = "{errno} ({state}): {msg}" + fields["state"] = self.sqlstate + else: + fmt = "{errno}: {msg}" + self._full_msg = fmt.format(**fields) + + self.args = (self.errno, self._full_msg, self.sqlstate) + + def __str__(self) -> str: + return self._full_msg + + +class Warning(Exception): # pylint: disable=redefined-builtin + """Exception for important warnings""" + + +class InterfaceError(Error): + """Exception for errors related to the interface""" + + +class DatabaseError(Error): + """Exception for errors related to the database""" + + +class InternalError(DatabaseError): + """Exception for errors internal database errors""" + + +class OperationalError(DatabaseError): + """Exception for errors related to the database's operation""" + + +class ProgrammingError(DatabaseError): + """Exception for errors programming errors""" + + +class IntegrityError(DatabaseError): + """Exception for errors regarding relational integrity""" + + +class DataError(DatabaseError): + """Exception for errors reporting problems with processed data""" + + +class NotSupportedError(DatabaseError): + """Exception for errors when an unsupported database feature was used""" + + +class PoolError(Error): + """Exception for errors relating to connection pooling""" + + +ErrorClassTypes = Union[ + Type[Error], + Type[InterfaceError], + Type[DatabaseError], + Type[InternalError], + Type[OperationalError], + Type[ProgrammingError], + Type[IntegrityError], + Type[DataError], + Type[NotSupportedError], + Type[PoolError], +] +ErrorTypes = Union[ + Error, + InterfaceError, + DatabaseError, + InternalError, + OperationalError, + ProgrammingError, + IntegrityError, + DataError, + NotSupportedError, + PoolError, + Warning, +] +# _CUSTOM_ERROR_EXCEPTIONS holds custom exceptions and is used by the +# function custom_error_exception. _ERROR_EXCEPTIONS (at bottom of module) +# is similar, but hardcoded exceptions. +_CUSTOM_ERROR_EXCEPTIONS: Dict[int, ErrorClassTypes] = {} + + +def custom_error_exception( + error: Optional[Union[int, Dict[int, Optional[ErrorClassTypes]]]] = None, + exception: Optional[ErrorClassTypes] = None, +) -> Mapping[int, Optional[ErrorClassTypes]]: + """Define custom exceptions for MySQL server errors + + This function defines custom exceptions for MySQL server errors and + returns the current set customizations. + + If error is a MySQL Server error number, then you have to pass also the + exception class. + + The error argument can also be a dictionary in which case the key is + the server error number, and value the exception to be raised. + + If none of the arguments are given, then custom_error_exception() will + simply return the current set customizations. + + To reset the customizations, simply supply an empty dictionary. + + Examples: + import mysql.connector + from mysql.connector import errorcode + + # Server error 1028 should raise a DatabaseError + mysql.connector.custom_error_exception( + 1028, mysql.connector.DatabaseError) + + # Or using a dictionary: + mysql.connector.custom_error_exception({ + 1028: mysql.connector.DatabaseError, + 1029: mysql.connector.OperationalError, + }) + + # Reset + mysql.connector.custom_error_exception({}) + + Returns a dictionary. + """ + global _CUSTOM_ERROR_EXCEPTIONS # pylint: disable=global-statement + + if isinstance(error, dict) and not error: + _CUSTOM_ERROR_EXCEPTIONS = {} + return _CUSTOM_ERROR_EXCEPTIONS + + if not error and not exception: + return _CUSTOM_ERROR_EXCEPTIONS + + if not isinstance(error, (int, dict)): + raise ValueError("The error argument should be either an integer or dictionary") + + if isinstance(error, int): + error = {error: exception} + + for errno, _exception in error.items(): + if not isinstance(errno, int): + raise ValueError("Error number should be an integer") + try: + if _exception is None or not issubclass(_exception, Exception): + raise TypeError + except TypeError as err: + raise ValueError("Exception should be subclass of Exception") from err + _CUSTOM_ERROR_EXCEPTIONS[errno] = _exception + + return _CUSTOM_ERROR_EXCEPTIONS + + +def get_mysql_exception( + errno: int, + msg: Optional[str] = None, + sqlstate: Optional[str] = None, + warning: Optional[bool] = False, +) -> ErrorTypes: + """Get the exception matching the MySQL error + + This function will return an exception based on the SQLState. The given + message will be passed on in the returned exception. + + The exception returned can be customized using the + mysql.connector.custom_error_exception() function. + + Returns an Exception + """ + try: + return _CUSTOM_ERROR_EXCEPTIONS[errno](msg=msg, errno=errno, sqlstate=sqlstate) + except KeyError: + # Error was not mapped to particular exception + pass + + try: + return _ERROR_EXCEPTIONS[errno](msg=msg, errno=errno, sqlstate=sqlstate) + except KeyError: + # Error was not mapped to particular exception + pass + + if not sqlstate: + if warning: + return Warning(errno, msg) + return DatabaseError(msg=msg, errno=errno) + + try: + return _SQLSTATE_CLASS_EXCEPTION[sqlstate[0:2]]( + msg=msg, errno=errno, sqlstate=sqlstate + ) + except KeyError: + # Return default InterfaceError + return DatabaseError(msg=msg, errno=errno, sqlstate=sqlstate) + + +def get_exception(packet: bytes) -> ErrorTypes: + """Returns an exception object based on the MySQL error + + Returns an exception object based on the MySQL error in the given + packet. + + Returns an Error-Object. + """ + errno = errmsg = None + + try: + if packet[4] != 255: + raise ValueError("Packet is not an error packet") + except IndexError as err: + return InterfaceError(f"Failed getting Error information ({err})") + + sqlstate: Optional[StrOrBytes] = None + try: + packet = packet[5:] + packet, errno = read_int(packet, 2) + if packet[0] != 35: + # Error without SQLState + if isinstance(packet, (bytes, bytearray)): + errmsg = packet.decode("utf8") + else: + errmsg = packet + else: + packet, sqlstate = read_bytes(packet[1:], 5) + sqlstate = sqlstate.decode("utf8") + errmsg = packet.decode("utf8") + except (IndexError, UnicodeError) as err: + return InterfaceError(f"Failed getting Error information ({err})") + else: + return get_mysql_exception(errno, errmsg, sqlstate) # type: ignore[arg-type] + + +_SQLSTATE_CLASS_EXCEPTION: Dict[str, ErrorClassTypes] = { + "02": DataError, # no data + "07": DatabaseError, # dynamic SQL error + "08": OperationalError, # connection exception + "0A": NotSupportedError, # feature not supported + "21": DataError, # cardinality violation + "22": DataError, # data exception + "23": IntegrityError, # integrity constraint violation + "24": ProgrammingError, # invalid cursor state + "25": ProgrammingError, # invalid transaction state + "26": ProgrammingError, # invalid SQL statement name + "27": ProgrammingError, # triggered data change violation + "28": ProgrammingError, # invalid authorization specification + "2A": ProgrammingError, # direct SQL syntax error or access rule violation + "2B": DatabaseError, # dependent privilege descriptors still exist + "2C": ProgrammingError, # invalid character set name + "2D": DatabaseError, # invalid transaction termination + "2E": DatabaseError, # invalid connection name + "33": DatabaseError, # invalid SQL descriptor name + "34": ProgrammingError, # invalid cursor name + "35": ProgrammingError, # invalid condition number + "37": ProgrammingError, # dynamic SQL syntax error or access rule violation + "3C": ProgrammingError, # ambiguous cursor name + "3D": ProgrammingError, # invalid catalog name + "3F": ProgrammingError, # invalid schema name + "40": InternalError, # transaction rollback + "42": ProgrammingError, # syntax error or access rule violation + "44": InternalError, # with check option violation + "HZ": OperationalError, # remote database access + "XA": IntegrityError, + "0K": OperationalError, + "HY": DatabaseError, # default when no SQLState provided by MySQL server +} + +_ERROR_EXCEPTIONS: Dict[int, ErrorClassTypes] = { + 1243: ProgrammingError, + 1210: ProgrammingError, + 2002: InterfaceError, + 2013: OperationalError, + 2049: NotSupportedError, + 2055: OperationalError, + 2061: InterfaceError, + 2026: InterfaceError, +} diff --git a/mysql-sqlalchemy-workspace/lib/python3.9/site-packages/mysql/connector/locales/__init__.py b/mysql-sqlalchemy-workspace/lib/python3.9/site-packages/mysql/connector/locales/__init__.py new file mode 100644 index 00000000..f6727ee9 --- /dev/null +++ b/mysql-sqlalchemy-workspace/lib/python3.9/site-packages/mysql/connector/locales/__init__.py @@ -0,0 +1,80 @@ +# Copyright (c) 2012, 2022, Oracle and/or its affiliates. All rights reserved. +# +# This program is free software; you can redistribute it and/or modify +# it under the terms of the GNU General Public License, version 2.0, as +# published by the Free Software Foundation. +# +# This program is also distributed with certain software (including +# but not limited to OpenSSL) that is licensed under separate terms, +# as designated in a particular file or component or in included license +# documentation. The authors of MySQL hereby grant you an +# additional permission to link the program and your derivative works +# with the separately licensed software that they have included with +# MySQL. +# +# Without limiting anything contained in the foregoing, this file, +# which is part of MySQL Connector/Python, is also subject to the +# Universal FOSS Exception, version 1.0, a copy of which can be found at +# http://oss.oracle.com/licenses/universal-foss-exception. +# +# This program is distributed in the hope that it will be useful, but +# WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. +# See the GNU General Public License, version 2.0, for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program; if not, write to the Free Software Foundation, Inc., +# 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA + +"""Translations.""" + +from typing import List, Optional, Union + +__all__: List[str] = ["get_client_error"] + +from .. import errorcode + + +def get_client_error(error: Union[int, str], language: str = "eng") -> Optional[str]: + """Lookup client error + + This function will lookup the client error message based on the given + error and return the error message. If the error was not found, + None will be returned. + + Error can be either an integer or a string. For example: + error: 2000 + error: CR_UNKNOWN_ERROR + + The language attribute can be used to retrieve a localized message, when + available. + + Returns a string or None. + """ + try: + tmp = __import__( + f"mysql.connector.locales.{language}", + globals(), + locals(), + ["client_error"], + ) + except ImportError: + raise ImportError( + f"No localization support for language '{language}'" + ) from None + client_error = tmp.client_error + + if isinstance(error, int): + errno = error + for key, value in errorcode.__dict__.items(): + if value == errno: + error = key + break + + if isinstance(error, (str)): + try: + return getattr(client_error, error) + except AttributeError: + return None + + raise ValueError("error argument needs to be either an integer or string") diff --git a/mysql-sqlalchemy-workspace/lib/python3.9/site-packages/mysql/connector/locales/eng/__init__.py b/mysql-sqlalchemy-workspace/lib/python3.9/site-packages/mysql/connector/locales/eng/__init__.py new file mode 100644 index 00000000..2e1c02b1 --- /dev/null +++ b/mysql-sqlalchemy-workspace/lib/python3.9/site-packages/mysql/connector/locales/eng/__init__.py @@ -0,0 +1,30 @@ +# Copyright (c) 2012, 2017, Oracle and/or its affiliates. All rights reserved. +# +# This program is free software; you can redistribute it and/or modify +# it under the terms of the GNU General Public License, version 2.0, as +# published by the Free Software Foundation. +# +# This program is also distributed with certain software (including +# but not limited to OpenSSL) that is licensed under separate terms, +# as designated in a particular file or component or in included license +# documentation. The authors of MySQL hereby grant you an +# additional permission to link the program and your derivative works +# with the separately licensed software that they have included with +# MySQL. +# +# Without limiting anything contained in the foregoing, this file, +# which is part of MySQL Connector/Python, is also subject to the +# Universal FOSS Exception, version 1.0, a copy of which can be found at +# http://oss.oracle.com/licenses/universal-foss-exception. +# +# This program is distributed in the hope that it will be useful, but +# WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. +# See the GNU General Public License, version 2.0, for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program; if not, write to the Free Software Foundation, Inc., +# 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA + +"""English Content +""" diff --git a/mysql-sqlalchemy-workspace/lib/python3.9/site-packages/mysql/connector/locales/eng/client_error.py b/mysql-sqlalchemy-workspace/lib/python3.9/site-packages/mysql/connector/locales/eng/client_error.py new file mode 100644 index 00000000..89fb3553 --- /dev/null +++ b/mysql-sqlalchemy-workspace/lib/python3.9/site-packages/mysql/connector/locales/eng/client_error.py @@ -0,0 +1,152 @@ +# -*- coding: utf-8 -*- + +# Copyright (c) 2013, 2022, Oracle and/or its affiliates. All rights reserved. +# +# This program is free software; you can redistribute it and/or modify +# it under the terms of the GNU General Public License, version 2.0, as +# published by the Free Software Foundation. +# +# This program is also distributed with certain software (including +# but not limited to OpenSSL) that is licensed under separate terms, +# as designated in a particular file or component or in included license +# documentation. The authors of MySQL hereby grant you an +# additional permission to link the program and your derivative works +# with the separately licensed software that they have included with +# MySQL. +# +# Without limiting anything contained in the foregoing, this file, +# which is part of MySQL Connector/Python, is also subject to the +# Universal FOSS Exception, version 1.0, a copy of which can be found at +# http://oss.oracle.com/licenses/universal-foss-exception. +# +# This program is distributed in the hope that it will be useful, but +# WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. +# See the GNU General Public License, version 2.0, for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program; if not, write to the Free Software Foundation, Inc., +# 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA + +"""MySQL Error Messages.""" + +# This file was auto-generated. +_GENERATED_ON = "2021-08-11" +_MYSQL_VERSION = (8, 0, 27) + +# pylint: disable=line-too-long +# Start MySQL Error messages +CR_UNKNOWN_ERROR = "Unknown MySQL error" +CR_SOCKET_CREATE_ERROR = "Can't create UNIX socket (%s)" +CR_CONNECTION_ERROR = ( + "Can't connect to local MySQL server through socket '%-.100s' (%s)" +) +CR_CONN_HOST_ERROR = "Can't connect to MySQL server on '%-.100s:%u' (%s)" +CR_IPSOCK_ERROR = "Can't create TCP/IP socket (%s)" +CR_UNKNOWN_HOST = "Unknown MySQL server host '%-.100s' (%s)" +CR_SERVER_GONE_ERROR = "MySQL server has gone away" +CR_VERSION_ERROR = "Protocol mismatch; server version = %s, client version = %s" +CR_OUT_OF_MEMORY = "MySQL client ran out of memory" +CR_WRONG_HOST_INFO = "Wrong host info" +CR_LOCALHOST_CONNECTION = "Localhost via UNIX socket" +CR_TCP_CONNECTION = "%-.100s via TCP/IP" +CR_SERVER_HANDSHAKE_ERR = "Error in server handshake" +CR_SERVER_LOST = "Lost connection to MySQL server during query" +CR_COMMANDS_OUT_OF_SYNC = "Commands out of sync; you can't run this command now" +CR_NAMEDPIPE_CONNECTION = "Named pipe: %-.32s" +CR_NAMEDPIPEWAIT_ERROR = "Can't wait for named pipe to host: %-.64s pipe: %-.32s (%s)" +CR_NAMEDPIPEOPEN_ERROR = "Can't open named pipe to host: %-.64s pipe: %-.32s (%s)" +CR_NAMEDPIPESETSTATE_ERROR = ( + "Can't set state of named pipe to host: %-.64s pipe: %-.32s (%s)" +) +CR_CANT_READ_CHARSET = "Can't initialize character set %-.32s (path: %-.100s)" +CR_NET_PACKET_TOO_LARGE = "Got packet bigger than 'max_allowed_packet' bytes" +CR_EMBEDDED_CONNECTION = "Embedded server" +CR_PROBE_SLAVE_STATUS = "Error on SHOW SLAVE STATUS:" +CR_PROBE_SLAVE_HOSTS = "Error on SHOW SLAVE HOSTS:" +CR_PROBE_SLAVE_CONNECT = "Error connecting to slave:" +CR_PROBE_MASTER_CONNECT = "Error connecting to master:" +CR_SSL_CONNECTION_ERROR = "SSL connection error: %-.100s" +CR_MALFORMED_PACKET = "Malformed packet" +CR_WRONG_LICENSE = "This client library is licensed only for use with MySQL servers having '%s' license" +CR_NULL_POINTER = "Invalid use of null pointer" +CR_NO_PREPARE_STMT = "Statement not prepared" +CR_PARAMS_NOT_BOUND = "No data supplied for parameters in prepared statement" +CR_DATA_TRUNCATED = "Data truncated" +CR_NO_PARAMETERS_EXISTS = "No parameters exist in the statement" +CR_INVALID_PARAMETER_NO = "Invalid parameter number" +CR_INVALID_BUFFER_USE = ( + "Can't send long data for non-string/non-binary data types (parameter: %s)" +) +CR_UNSUPPORTED_PARAM_TYPE = "Using unsupported buffer type: %s (parameter: %s)" +CR_SHARED_MEMORY_CONNECTION = "Shared memory: %-.100s" +CR_SHARED_MEMORY_CONNECT_REQUEST_ERROR = ( + "Can't open shared memory; client could not create request event (%s)" +) +CR_SHARED_MEMORY_CONNECT_ANSWER_ERROR = ( + "Can't open shared memory; no answer event received from server (%s)" +) +CR_SHARED_MEMORY_CONNECT_FILE_MAP_ERROR = ( + "Can't open shared memory; server could not allocate file mapping (%s)" +) +CR_SHARED_MEMORY_CONNECT_MAP_ERROR = ( + "Can't open shared memory; server could not get pointer to file mapping (%s)" +) +CR_SHARED_MEMORY_FILE_MAP_ERROR = ( + "Can't open shared memory; client could not allocate file mapping (%s)" +) +CR_SHARED_MEMORY_MAP_ERROR = ( + "Can't open shared memory; client could not get pointer to file mapping (%s)" +) +CR_SHARED_MEMORY_EVENT_ERROR = ( + "Can't open shared memory; client could not create %s event (%s)" +) +CR_SHARED_MEMORY_CONNECT_ABANDONED_ERROR = ( + "Can't open shared memory; no answer from server (%s)" +) +CR_SHARED_MEMORY_CONNECT_SET_ERROR = ( + "Can't open shared memory; cannot send request event to server (%s)" +) +CR_CONN_UNKNOW_PROTOCOL = "Wrong or unknown protocol" +CR_INVALID_CONN_HANDLE = "Invalid connection handle" +CR_UNUSED_1 = "Connection using old (pre-4.1.1) authentication protocol refused (client option 'secure_auth' enabled)" +CR_FETCH_CANCELED = "Row retrieval was canceled by mysql_stmt_close() call" +CR_NO_DATA = "Attempt to read column without prior row fetch" +CR_NO_STMT_METADATA = "Prepared statement contains no metadata" +CR_NO_RESULT_SET = ( + "Attempt to read a row while there is no result set associated with the statement" +) +CR_NOT_IMPLEMENTED = "This feature is not implemented yet" +CR_SERVER_LOST_EXTENDED = "Lost connection to MySQL server at '%s', system error: %s" +CR_STMT_CLOSED = "Statement closed indirectly because of a preceding %s() call" +CR_NEW_STMT_METADATA = "The number of columns in the result set differs from the number of bound buffers. You must reset the statement, rebind the result set columns, and execute the statement again" +CR_ALREADY_CONNECTED = ( + "This handle is already connected. Use a separate handle for each connection." +) +CR_AUTH_PLUGIN_CANNOT_LOAD = "Authentication plugin '%s' cannot be loaded: %s" +CR_DUPLICATE_CONNECTION_ATTR = "There is an attribute with the same name already" +CR_AUTH_PLUGIN_ERR = "Authentication plugin '%s' reported error: %s" +CR_INSECURE_API_ERR = "Insecure API function call: '%s' Use instead: '%s'" +CR_FILE_NAME_TOO_LONG = "File name is too long" +CR_SSL_FIPS_MODE_ERR = "Set FIPS mode ON/STRICT failed" +CR_DEPRECATED_COMPRESSION_NOT_SUPPORTED = ( + "Compression protocol not supported with asynchronous protocol" +) +CR_COMPRESSION_WRONGLY_CONFIGURED = ( + "Connection failed due to wrongly configured compression algorithm" +) +CR_KERBEROS_USER_NOT_FOUND = ( + "SSO user not found, Please perform SSO authentication using kerberos." +) +CR_LOAD_DATA_LOCAL_INFILE_REJECTED = ( + "LOAD DATA LOCAL INFILE file request rejected due to restrictions on access." +) +CR_LOAD_DATA_LOCAL_INFILE_REALPATH_FAIL = ( + "Determining the real path for '%s' failed with error (%s): %s" +) +CR_DNS_SRV_LOOKUP_FAILED = "DNS SRV lookup failed with error : %s" +CR_MANDATORY_TRACKER_NOT_FOUND = ( + "Client does not recognise tracker type %s marked as mandatory by server." +) +CR_INVALID_FACTOR_NO = "Invalid first argument for MYSQL_OPT_USER_PASSWORD option. Valid value should be between 1 and 3 inclusive." +# End MySQL Error messages diff --git a/mysql-sqlalchemy-workspace/lib/python3.9/site-packages/mysql/connector/network.py b/mysql-sqlalchemy-workspace/lib/python3.9/site-packages/mysql/connector/network.py new file mode 100644 index 00000000..50364cfb --- /dev/null +++ b/mysql-sqlalchemy-workspace/lib/python3.9/site-packages/mysql/connector/network.py @@ -0,0 +1,611 @@ +# Copyright (c) 2012, 2022, Oracle and/or its affiliates. +# +# This program is free software; you can redistribute it and/or modify +# it under the terms of the GNU General Public License, version 2.0, as +# published by the Free Software Foundation. +# +# This program is also distributed with certain software (including +# but not limited to OpenSSL) that is licensed under separate terms, +# as designated in a particular file or component or in included license +# documentation. The authors of MySQL hereby grant you an +# additional permission to link the program and your derivative works +# with the separately licensed software that they have included with +# MySQL. +# +# Without limiting anything contained in the foregoing, this file, +# which is part of MySQL Connector/Python, is also subject to the +# Universal FOSS Exception, version 1.0, a copy of which can be found at +# http://oss.oracle.com/licenses/universal-foss-exception. +# +# This program is distributed in the hope that it will be useful, but +# WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. +# See the GNU General Public License, version 2.0, for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program; if not, write to the Free Software Foundation, Inc., +# 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA + +# mypy: disable-error-code="attr-defined" + +"""Module implementing low-level socket communication with MySQL servers. +""" + +import os +import socket +import struct +import warnings +import zlib + +from collections import deque + +try: + import ssl + + TLS_VERSIONS = { + "TLSv1": ssl.PROTOCOL_TLSv1, + "TLSv1.1": ssl.PROTOCOL_TLSv1_1, + "TLSv1.2": ssl.PROTOCOL_TLSv1_2, + } + # TLSv1.3 included in PROTOCOL_TLS, but PROTOCOL_TLS is not included on 3.4 + TLS_VERSIONS["TLSv1.3"] = ( + ssl.PROTOCOL_TLS + if hasattr(ssl, "PROTOCOL_TLS") + else ssl.PROTOCOL_SSLv23 # Alias of PROTOCOL_TLS + ) + TLS_V1_3_SUPPORTED = hasattr(ssl, "HAS_TLSv1_3") and ssl.HAS_TLSv1_3 +except ImportError: + # If import fails, we don't have SSL support. + TLS_V1_3_SUPPORTED = False + +from typing import Any, Deque, List, Optional, Tuple, Union + +from .constants import MAX_PACKET_LENGTH +from .errors import InterfaceError, NotSupportedError, OperationalError +from .types import StrOrBytesPath +from .utils import init_bytearray + + +def _strioerror(err: IOError) -> str: + """Reformat the IOError error message + + This function reformats the IOError error message. + """ + if not err.errno: + return str(err) + return f"{err.errno} {err.strerror}" + + +def _prepare_packets(buf: bytes, pktnr: int) -> List[bytes]: + """Prepare a packet for sending to the MySQL server""" + pkts = [] + pllen = len(buf) + maxpktlen = MAX_PACKET_LENGTH + while pllen > maxpktlen: + pkts.append(b"\xff\xff\xff" + struct.pack(" None: + # holds the socket connection + self.sock: Optional[socket.socket] = None + self._connection_timeout: Optional[int] = None + self._packet_number: int = -1 + self._compressed_packet_number: int = -1 + self._packet_queue: Deque[bytearray] = deque() + self.server_host: Optional[str] = None + self.recvsize: int = 8192 + + def next_packet_number(self) -> int: + """Increments the packet number""" + self._packet_number = self._packet_number + 1 + if self._packet_number > 255: + self._packet_number = 0 + return self._packet_number + + def next_compressed_packet_number(self) -> int: + """Increments the compressed packet number""" + self._compressed_packet_number = self._compressed_packet_number + 1 + if self._compressed_packet_number > 255: + self._compressed_packet_number = 0 + return self._compressed_packet_number + + def open_connection(self) -> Any: + """Open the socket""" + raise NotImplementedError + + def get_address(self) -> Any: + """Get the location of the socket""" + raise NotImplementedError + + def shutdown(self) -> None: + """Shut down the socket before closing it""" + try: + self.sock.shutdown(socket.SHUT_RDWR) + self.sock.close() + del self._packet_queue + except (AttributeError, OSError): + pass + + def close_connection(self) -> None: + """Close the socket""" + try: + self.sock.close() + del self._packet_queue + except (AttributeError, OSError): + pass + + def __del__(self) -> None: + self.shutdown() + + def send_plain( + self, + buf: bytes, + packet_number: Optional[int] = None, + compressed_packet_number: Optional[int] = None, + ) -> None: + """Send packets to the MySQL server""" + # Keep 'compressed_packet_number' for API backward compatibility + _ = compressed_packet_number + if packet_number is None: + self.next_packet_number() + else: + self._packet_number = packet_number + packets = _prepare_packets(buf, self._packet_number) + for packet in packets: + try: + self.sock.sendall(packet) + except IOError as err: + raise OperationalError( + errno=2055, values=(self.get_address(), _strioerror(err)) + ) from err + except AttributeError as err: + raise OperationalError(errno=2006) from err + + send = send_plain + + def send_compressed( + self, + buf: bytes, + packet_number: Optional[int] = None, + compressed_packet_number: Optional[int] = None, + ) -> None: + """Send compressed packets to the MySQL server""" + if packet_number is None: + self.next_packet_number() + else: + self._packet_number = packet_number + if compressed_packet_number is None: + self.next_compressed_packet_number() + else: + self._compressed_packet_number = compressed_packet_number + + pktnr = self._packet_number + pllen = len(buf) + zpkts = [] + maxpktlen = MAX_PACKET_LENGTH + if pllen > maxpktlen: + pkts = _prepare_packets(buf, pktnr) + tmpbuf = b"".join(pkts) + del pkts + zbuf = zlib.compress(tmpbuf[:16384]) + header = ( + struct.pack(" maxpktlen: + zbuf = zlib.compress(tmpbuf[:maxpktlen]) + header = ( + struct.pack(" 50: + zbuf = zlib.compress(pkt) + zpkts.append( + struct.pack(" bytearray: + """Receive packets from the MySQL server""" + try: + # Read the header of the MySQL packet, 4 bytes + packet = bytearray(b"") + packet_len = 0 + while packet_len < 4: + chunk = self.sock.recv(4 - packet_len) + if not chunk: + raise InterfaceError(errno=2013) + packet += chunk + packet_len = len(packet) + + # Save the packet number and payload length + self._packet_number = packet[3] + payload_len = struct.unpack(" 0: + raise InterfaceError(errno=2013) + packet_view = packet_view[read:] + rest -= read + return packet + except IOError as err: + raise OperationalError( + errno=2055, values=(self.get_address(), _strioerror(err)) + ) from err + + recv = recv_plain + + def _split_zipped_payload(self, packet_bunch: bytearray) -> None: + """Split compressed payload""" + while packet_bunch: + payload_length = struct.unpack(" Optional[bytearray]: + """Receive compressed packets from the MySQL server""" + try: + pkt = self._packet_queue.popleft() + self._packet_number = pkt[3] + return pkt + except IndexError: + pass + + header = bytearray(b"") + packets = [] + try: + abyte = self.sock.recv(1) + while abyte and len(header) < 7: + header += abyte + abyte = self.sock.recv(1) + while header: + if len(header) < 7: + raise InterfaceError(errno=2013) + + # Get length of compressed packet + zip_payload_length = struct.unpack(" None: + """Set the connection timeout""" + self._connection_timeout = timeout + if self.sock: + self.sock.settimeout(timeout) + + def switch_to_ssl( + self, + ca: StrOrBytesPath, + cert: StrOrBytesPath, + key: StrOrBytesPath, + verify_cert: bool = False, + verify_identity: bool = False, + cipher_suites: Optional[str] = None, + tls_versions: Optional[List[str]] = None, + ) -> None: + """Switch the socket to use SSL""" + if not self.sock: + raise InterfaceError(errno=2048) + + try: + if verify_cert: + cert_reqs = ssl.CERT_REQUIRED + elif verify_identity: + cert_reqs = ssl.CERT_OPTIONAL + else: + cert_reqs = ssl.CERT_NONE + + if tls_versions is None or not tls_versions: + context = ssl.create_default_context() + if not verify_identity: + context.check_hostname = False + else: + tls_versions.sort(reverse=True) + + tls_version = tls_versions[0] + if ( + not TLS_V1_3_SUPPORTED + and tls_version == "TLSv1.3" + and len(tls_versions) > 1 + ): + tls_version = tls_versions[1] + ssl_protocol = TLS_VERSIONS[tls_version] + context = ssl.SSLContext(ssl_protocol) + + if tls_version == "TLSv1.3": + if "TLSv1.2" not in tls_versions: + context.options |= ssl.OP_NO_TLSv1_2 + if "TLSv1.1" not in tls_versions: + context.options |= ssl.OP_NO_TLSv1_1 + if "TLSv1" not in tls_versions: + context.options |= ssl.OP_NO_TLSv1 + + context.check_hostname = False + context.verify_mode = cert_reqs + context.load_default_certs() + + if ca: + try: + context.load_verify_locations(ca) + except (IOError, ssl.SSLError) as err: + self.sock.close() + raise InterfaceError(f"Invalid CA Certificate: {err}") from err + if cert: + try: + context.load_cert_chain(cert, key) + except (IOError, ssl.SSLError) as err: + self.sock.close() + raise InterfaceError(f"Invalid Certificate/Key: {err}") from err + if cipher_suites: + context.set_ciphers(cipher_suites) + + if hasattr(self, "server_host"): + self.sock = context.wrap_socket( + self.sock, server_hostname=self.server_host + ) + else: + self.sock = context.wrap_socket(self.sock) + + if verify_identity: + context.check_hostname = True + hostnames: List[str] = [self.server_host] if self.server_host else [] + if os.name == "nt" and self.server_host == "localhost": + hostnames = ["localhost", "127.0.0.1"] + aliases = socket.gethostbyaddr(self.server_host) + hostnames.extend([aliases[0]] + aliases[1]) + match_found = False + errs = [] + for hostname in hostnames: + try: + # Deprecated in Python 3.7 without a replacement and + # should be removed in the future, since OpenSSL now + # performs hostname matching + # pylint: disable=deprecated-method + ssl.match_hostname(self.sock.getpeercert(), hostname) + # pylint: enable=deprecated-method + except ssl.CertificateError as err: + errs.append(str(err)) + else: + match_found = True + break + if not match_found: + self.sock.close() + raise InterfaceError( + f"Unable to verify server identity: {', '.join(errs)}" + ) + except NameError as err: + raise NotSupportedError("Python installation has no SSL support") from err + except (ssl.SSLError, IOError) as err: + raise InterfaceError( + errno=2055, values=(self.get_address(), _strioerror(err)) + ) from err + except ssl.CertificateError as err: + raise InterfaceError(str(err)) from err + except NotImplementedError as err: + raise InterfaceError(str(err)) from err + + +class MySQLUnixSocket(BaseMySQLSocket): + """MySQL socket class using UNIX sockets + + Opens a connection through the UNIX socket of the MySQL Server. + """ + + def __init__(self, unix_socket: str = "/tmp/mysql.sock") -> None: + super().__init__() + self.unix_socket: str = unix_socket + + def get_address(self) -> str: + return self.unix_socket + + def open_connection(self) -> None: + try: + self.sock = socket.socket( + socket.AF_UNIX, socket.SOCK_STREAM # pylint: disable=no-member + ) + self.sock.settimeout(self._connection_timeout) + self.sock.connect(self.unix_socket) + except IOError as err: + raise InterfaceError( + errno=2002, values=(self.get_address(), _strioerror(err)) + ) from err + except Exception as err: + raise InterfaceError(str(err)) from err + + def switch_to_ssl( + self, *args: Any, **kwargs: Any # pylint: disable=unused-argument + ) -> None: + """Switch the socket to use SSL.""" + warnings.warn( + "SSL is disabled when using unix socket connections", + Warning, + ) + + +class MySQLTCPSocket(BaseMySQLSocket): + """MySQL socket class using TCP/IP + + Opens a TCP/IP connection to the MySQL Server. + """ + + def __init__( + self, host: str = "127.0.0.1", port: int = 3306, force_ipv6: bool = False + ) -> None: + super().__init__() + self.server_host: str = host + self.server_port: int = port + self.force_ipv6: bool = force_ipv6 + self._family: int = 0 + + def get_address(self) -> str: + return f"{self.server_host}:{self.server_port}" + + def open_connection(self) -> None: + """Open the TCP/IP connection to the MySQL server""" + # pylint: disable=no-member + # Get address information + addrinfo: Union[ + Tuple[None, None, None, None, None], + Tuple[ + socket.AddressFamily, + socket.SocketKind, + int, + str, + Union[Tuple[str, int], Tuple[str, int, int, int]], + ], + ] = (None, None, None, None, None) + try: + addrinfos = socket.getaddrinfo( + self.server_host, + self.server_port, + 0, + socket.SOCK_STREAM, + socket.SOL_TCP, + ) + # If multiple results we favor IPv4, unless IPv6 was forced. + for info in addrinfos: + if self.force_ipv6 and info[0] == socket.AF_INET6: + addrinfo = info + break + if info[0] == socket.AF_INET: + addrinfo = info + break + if self.force_ipv6 and addrinfo[0] is None: + raise InterfaceError(f"No IPv6 address found for {self.server_host}") + if addrinfo[0] is None: + addrinfo = addrinfos[0] + except IOError as err: + raise InterfaceError( + errno=2003, values=(self.get_address(), _strioerror(err)) + ) from err + else: + (self._family, socktype, proto, _, sockaddr) = addrinfo + + # Instanciate the socket and connect + try: + self.sock = socket.socket(self._family, socktype, proto) + self.sock.settimeout(self._connection_timeout) + self.sock.connect(sockaddr) + except IOError as err: + raise InterfaceError( + errno=2003, + values=( + self.server_host, + self.server_port, + _strioerror(err), + ), + ) from err + except Exception as err: + raise OperationalError(str(err)) from err diff --git a/mysql-sqlalchemy-workspace/lib/python3.9/site-packages/mysql/connector/optionfiles.py b/mysql-sqlalchemy-workspace/lib/python3.9/site-packages/mysql/connector/optionfiles.py new file mode 100644 index 00000000..13f71321 --- /dev/null +++ b/mysql-sqlalchemy-workspace/lib/python3.9/site-packages/mysql/connector/optionfiles.py @@ -0,0 +1,357 @@ +# Copyright (c) 2014, 2022, Oracle and/or its affiliates. +# +# This program is free software; you can redistribute it and/or modify +# it under the terms of the GNU General Public License, version 2.0, as +# published by the Free Software Foundation. +# +# This program is also distributed with certain software (including +# but not limited to OpenSSL) that is licensed under separate terms, +# as designated in a particular file or component or in included license +# documentation. The authors of MySQL hereby grant you an +# additional permission to link the program and your derivative works +# with the separately licensed software that they have included with +# MySQL. +# +# Without limiting anything contained in the foregoing, this file, +# which is part of MySQL Connector/Python, is also subject to the +# Universal FOSS Exception, version 1.0, a copy of which can be found at +# http://oss.oracle.com/licenses/universal-foss-exception. +# +# This program is distributed in the hope that it will be useful, but +# WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. +# See the GNU General Public License, version 2.0, for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program; if not, write to the Free Software Foundation, Inc., +# 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA + +# mypy: disable-error-code="attr-defined" + +"""Implements parser to parse MySQL option files.""" + +import codecs +import io +import os +import re + +from configparser import ConfigParser as SafeConfigParser, MissingSectionHeaderError +from typing import Any, Dict, List, Optional, Tuple, Union + +from .constants import CNX_POOL_ARGS, DEFAULT_CONFIGURATION + +DEFAULT_EXTENSIONS: Dict[str, Tuple[str, ...]] = { + "nt": ("ini", "cnf"), + "posix": ("cnf",), +} + + +def read_option_files(**config: Union[str, List[str]]) -> Dict[str, Any]: + """ + Read option files for connection parameters. + + Checks if connection arguments contain option file arguments, and then + reads option files accordingly. + """ + if "option_files" in config: + try: + if isinstance(config["option_groups"], str): + config["option_groups"] = [config["option_groups"]] + groups = config["option_groups"] + del config["option_groups"] + except KeyError: + groups = ["client", "connector_python"] + + if isinstance(config["option_files"], str): + config["option_files"] = [config["option_files"]] + option_parser = MySQLOptionsParser( + list(config["option_files"]), keep_dashes=False + ) + del config["option_files"] + + config_from_file = option_parser.get_groups_as_dict_with_priority(*groups) + config_options: Dict[str, Tuple[str, int]] = {} + for group in groups: + try: + for option, value in config_from_file[group].items(): + try: + if option == "socket": + option = "unix_socket" + + if option not in CNX_POOL_ARGS and option != "failover": + _ = DEFAULT_CONFIGURATION[option] + + if ( + option not in config_options + or config_options[option][1] <= value[1] + ): + config_options[option] = value + except KeyError: + if group == "connector_python": + raise AttributeError( + f"Unsupported argument '{option}'" + ) from None + except KeyError: + continue + + not_evaluate = ("password", "passwd") + for option, value in config_options.items(): + if option not in config: + try: + if option in not_evaluate: + config[option] = value[0] + else: + config[option] = eval(value[0]) # pylint: disable=eval-used + except (NameError, SyntaxError): + config[option] = value[0] + + return config + + +class MySQLOptionsParser(SafeConfigParser): + """This class implements methods to parse MySQL option files""" + + def __init__( + self, files: Optional[Union[List[str], str]] = None, keep_dashes: bool = True + ) -> None: + """Initialize + + If defaults is True, default option files are read first + + Raises ValueError if defaults is set to True but defaults files + cannot be found. + """ + + # Regular expression to allow options with no value(For Python v2.6) + self.optcre: re.Pattern = re.compile( + r"(?P