`_
-
-
diff --git a/venv/Lib/site-packages/Flask_WTF-0.14.2.dist-info/RECORD b/venv/Lib/site-packages/Flask_WTF-0.14.2.dist-info/RECORD
deleted file mode 100644
index 68e0f28..0000000
--- a/venv/Lib/site-packages/Flask_WTF-0.14.2.dist-info/RECORD
+++ /dev/null
@@ -1,30 +0,0 @@
-Flask_WTF-0.14.2.dist-info/DESCRIPTION.rst,sha256=vyJWnOD4vgnZ6x2ERr5EH1l2uzLxXCBhr_O1L6Ell2E,584
-Flask_WTF-0.14.2.dist-info/LICENSE.txt,sha256=oHX42YrP2wXdmHFiQrniwbOrmHIpJrPEz2yRasFOg1A,1490
-Flask_WTF-0.14.2.dist-info/METADATA,sha256=M8ZfImxUciRZ5Av5r1x37JnEC3wG5sacQv346wmldHU,1846
-Flask_WTF-0.14.2.dist-info/RECORD,,
-Flask_WTF-0.14.2.dist-info/WHEEL,sha256=5wvfB7GvgZAbKBSE9uX9Zbi6LCL-_KgezgHblXhCRnM,113
-Flask_WTF-0.14.2.dist-info/metadata.json,sha256=qGwhg5DSr2WilK8cvCcQsdrtDJ5NFgR1faLrO8YZCAY,1370
-Flask_WTF-0.14.2.dist-info/top_level.txt,sha256=zK3flQPSjYTkAMjB0V6Jhu3jyotC0biL1mMhzitYoog,10
-flask_wtf/__init__.py,sha256=zNLRzvfi7PLTc7jkqQT7pzgtsw9_9eN7BfO4fzwKxJc,406
-flask_wtf/_compat.py,sha256=4h1U_W5vbM9L8sJ4ZPFevuneM1TirnBTTVrsHRH3uUE,849
-flask_wtf/csrf.py,sha256=suKAZarzLIBuiJFqwP--RldEYabPj0DGfYkQA32Cc1E,11554
-flask_wtf/file.py,sha256=2UnODjSq47IjsFQMiu_z218vFA5pnQ9nL1FpX7hpK1M,2971
-flask_wtf/form.py,sha256=lpx-ItUnKjYOW8VxQpBAlbhoROJNd2PHi3v0loPPyYI,4948
-flask_wtf/html5.py,sha256=ReZHJto8DAZkO3BxUDdHnkyz5mM21KtqKYh0achJ5IM,372
-flask_wtf/i18n.py,sha256=xMB_jHCOaWfF1RXm7E6hsRHwPsUyVyKX2Rhy3tBOUgk,1790
-flask_wtf/recaptcha/__init__.py,sha256=q3TC7tZPSAZ3On3GApZKGn0EcydX4zprisbyTlhN3sQ,86
-flask_wtf/recaptcha/fields.py,sha256=kN_10iZYQcYg1EtxFp4B87BlFnnrJCktrh7bTykOVj4,453
-flask_wtf/recaptcha/validators.py,sha256=8UgjA72OxUyHVk_lm8-fGhPEvKgkMtsoFNt7yzjo0xw,2398
-flask_wtf/recaptcha/widgets.py,sha256=me-oaqMNPW2BLujNTuDHCXWcVhh6eI7wlm6_TIrIF_U,1267
-Flask_WTF-0.14.2.dist-info/INSTALLER,sha256=zuuue4knoyJ-UwPPXg8fezS7VCrXJQrAP7zeNuwvFQg,4
-flask_wtf/recaptcha/__pycache__/fields.cpython-36.pyc,,
-flask_wtf/recaptcha/__pycache__/validators.cpython-36.pyc,,
-flask_wtf/recaptcha/__pycache__/widgets.cpython-36.pyc,,
-flask_wtf/recaptcha/__pycache__/__init__.cpython-36.pyc,,
-flask_wtf/__pycache__/csrf.cpython-36.pyc,,
-flask_wtf/__pycache__/file.cpython-36.pyc,,
-flask_wtf/__pycache__/form.cpython-36.pyc,,
-flask_wtf/__pycache__/html5.cpython-36.pyc,,
-flask_wtf/__pycache__/i18n.cpython-36.pyc,,
-flask_wtf/__pycache__/_compat.cpython-36.pyc,,
-flask_wtf/__pycache__/__init__.cpython-36.pyc,,
diff --git a/venv/Lib/site-packages/Flask_WTF-0.14.2.dist-info/WHEEL b/venv/Lib/site-packages/Flask_WTF-0.14.2.dist-info/WHEEL
deleted file mode 100644
index 7bf9daa..0000000
--- a/venv/Lib/site-packages/Flask_WTF-0.14.2.dist-info/WHEEL
+++ /dev/null
@@ -1,6 +0,0 @@
-Wheel-Version: 1.0
-Generator: bdist_wheel (0.30.0.a0)
-Root-Is-Purelib: true
-Tag: py2-none-any
-Tag: py3-none-any
-
diff --git a/venv/Lib/site-packages/Flask_WTF-0.14.2.dist-info/metadata.json b/venv/Lib/site-packages/Flask_WTF-0.14.2.dist-info/metadata.json
deleted file mode 100644
index d48bac6..0000000
--- a/venv/Lib/site-packages/Flask_WTF-0.14.2.dist-info/metadata.json
+++ /dev/null
@@ -1 +0,0 @@
-{"classifiers": ["Development Status :: 5 - Production/Stable", "Environment :: Web Environment", "Framework :: Flask", "Intended Audience :: Developers", "License :: OSI Approved :: BSD License", "Operating System :: OS Independent", "Programming Language :: Python", "Programming Language :: Python :: 2", "Programming Language :: Python :: 2.6", "Programming Language :: Python :: 2.7", "Programming Language :: Python :: 3", "Programming Language :: Python :: 3.3", "Programming Language :: Python :: 3.4", "Programming Language :: Python :: 3.5", "Programming Language :: Python :: 3.6", "Programming Language :: Python :: Implementation :: CPython", "Programming Language :: Python :: Implementation :: PyPy", "Topic :: Internet :: WWW/HTTP :: Dynamic Content", "Topic :: Software Development :: Libraries :: Python Modules"], "extensions": {"python.details": {"contacts": [{"email": "me@lepture.com", "name": "Hsiaoming Yang", "role": "author"}], "document_names": {"description": "DESCRIPTION.rst", "license": "LICENSE.txt"}, "project_urls": {"Home": "https://github.com/lepture/flask-wtf"}}}, "extras": [], "generator": "bdist_wheel (0.30.0.a0)", "license": "BSD", "metadata_version": "2.0", "name": "Flask-WTF", "platform": "any", "run_requires": [{"requires": ["Flask", "WTForms"]}], "summary": "Simple integration of Flask and WTForms.", "version": "0.14.2"}
\ No newline at end of file
diff --git a/venv/Lib/site-packages/Flask_WTF-0.14.2.dist-info/top_level.txt b/venv/Lib/site-packages/Flask_WTF-0.14.2.dist-info/top_level.txt
deleted file mode 100644
index 716f422..0000000
--- a/venv/Lib/site-packages/Flask_WTF-0.14.2.dist-info/top_level.txt
+++ /dev/null
@@ -1 +0,0 @@
-flask_wtf
diff --git a/venv/Lib/site-packages/Jinja2-2.10.dist-info/DESCRIPTION.rst b/venv/Lib/site-packages/Jinja2-2.10.dist-info/DESCRIPTION.rst
deleted file mode 100644
index 1594da5..0000000
--- a/venv/Lib/site-packages/Jinja2-2.10.dist-info/DESCRIPTION.rst
+++ /dev/null
@@ -1,37 +0,0 @@
-
-Jinja2
-~~~~~~
-
-Jinja2 is a template engine written in pure Python. It provides a
-`Django`_ inspired non-XML syntax but supports inline expressions and
-an optional `sandboxed`_ environment.
-
-Nutshell
---------
-
-Here a small example of a Jinja template::
-
- {% extends 'base.html' %}
- {% block title %}Memberlist{% endblock %}
- {% block content %}
-
- {% endblock %}
-
-Philosophy
-----------
-
-Application logic is for the controller but don't try to make the life
-for the template designer too hard by giving him too few functionality.
-
-For more informations visit the new `Jinja2 webpage`_ and `documentation`_.
-
-.. _sandboxed: https://en.wikipedia.org/wiki/Sandbox_(computer_security)
-.. _Django: https://www.djangoproject.com/
-.. _Jinja2 webpage: http://jinja.pocoo.org/
-.. _documentation: http://jinja.pocoo.org/2/documentation/
-
-
diff --git a/venv/Lib/site-packages/Jinja2-2.10.dist-info/INSTALLER b/venv/Lib/site-packages/Jinja2-2.10.dist-info/INSTALLER
deleted file mode 100644
index a1b589e..0000000
--- a/venv/Lib/site-packages/Jinja2-2.10.dist-info/INSTALLER
+++ /dev/null
@@ -1 +0,0 @@
-pip
diff --git a/venv/Lib/site-packages/Jinja2-2.10.dist-info/LICENSE.txt b/venv/Lib/site-packages/Jinja2-2.10.dist-info/LICENSE.txt
deleted file mode 100644
index 31bf900..0000000
--- a/venv/Lib/site-packages/Jinja2-2.10.dist-info/LICENSE.txt
+++ /dev/null
@@ -1,31 +0,0 @@
-Copyright (c) 2009 by the Jinja Team, see AUTHORS for more details.
-
-Some rights reserved.
-
-Redistribution and use in source and binary forms, with or without
-modification, are permitted provided that the following conditions are
-met:
-
- * Redistributions of source code must retain the above copyright
- notice, this list of conditions and the following disclaimer.
-
- * Redistributions in binary form must reproduce the above
- copyright notice, this list of conditions and the following
- disclaimer in the documentation and/or other materials provided
- with the distribution.
-
- * The names of the contributors may not be used to endorse or
- promote products derived from this software without specific
- prior written permission.
-
-THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
diff --git a/venv/Lib/site-packages/Jinja2-2.10.dist-info/METADATA b/venv/Lib/site-packages/Jinja2-2.10.dist-info/METADATA
deleted file mode 100644
index 40f2b46..0000000
--- a/venv/Lib/site-packages/Jinja2-2.10.dist-info/METADATA
+++ /dev/null
@@ -1,68 +0,0 @@
-Metadata-Version: 2.0
-Name: Jinja2
-Version: 2.10
-Summary: A small but fast and easy to use stand-alone template engine written in pure python.
-Home-page: http://jinja.pocoo.org/
-Author: Armin Ronacher
-Author-email: armin.ronacher@active-4.com
-License: BSD
-Description-Content-Type: UNKNOWN
-Platform: UNKNOWN
-Classifier: Development Status :: 5 - Production/Stable
-Classifier: Environment :: Web Environment
-Classifier: Intended Audience :: Developers
-Classifier: License :: OSI Approved :: BSD License
-Classifier: Operating System :: OS Independent
-Classifier: Programming Language :: Python
-Classifier: Programming Language :: Python :: 2
-Classifier: Programming Language :: Python :: 2.6
-Classifier: Programming Language :: Python :: 2.7
-Classifier: Programming Language :: Python :: 3
-Classifier: Programming Language :: Python :: 3.3
-Classifier: Programming Language :: Python :: 3.4
-Classifier: Programming Language :: Python :: 3.5
-Classifier: Programming Language :: Python :: 3.6
-Classifier: Topic :: Internet :: WWW/HTTP :: Dynamic Content
-Classifier: Topic :: Software Development :: Libraries :: Python Modules
-Classifier: Topic :: Text Processing :: Markup :: HTML
-Requires-Dist: MarkupSafe (>=0.23)
-Provides-Extra: i18n
-Requires-Dist: Babel (>=0.8); extra == 'i18n'
-
-
-Jinja2
-~~~~~~
-
-Jinja2 is a template engine written in pure Python. It provides a
-`Django`_ inspired non-XML syntax but supports inline expressions and
-an optional `sandboxed`_ environment.
-
-Nutshell
---------
-
-Here a small example of a Jinja template::
-
- {% extends 'base.html' %}
- {% block title %}Memberlist{% endblock %}
- {% block content %}
-
- {% endblock %}
-
-Philosophy
-----------
-
-Application logic is for the controller but don't try to make the life
-for the template designer too hard by giving him too few functionality.
-
-For more informations visit the new `Jinja2 webpage`_ and `documentation`_.
-
-.. _sandboxed: https://en.wikipedia.org/wiki/Sandbox_(computer_security)
-.. _Django: https://www.djangoproject.com/
-.. _Jinja2 webpage: http://jinja.pocoo.org/
-.. _documentation: http://jinja.pocoo.org/2/documentation/
-
-
diff --git a/venv/Lib/site-packages/Jinja2-2.10.dist-info/RECORD b/venv/Lib/site-packages/Jinja2-2.10.dist-info/RECORD
deleted file mode 100644
index 4bef2c1..0000000
--- a/venv/Lib/site-packages/Jinja2-2.10.dist-info/RECORD
+++ /dev/null
@@ -1,63 +0,0 @@
-Jinja2-2.10.dist-info/DESCRIPTION.rst,sha256=b5ckFDoM7vVtz_mAsJD4OPteFKCqE7beu353g4COoYI,978
-Jinja2-2.10.dist-info/LICENSE.txt,sha256=JvzUNv3Io51EiWrAPm8d_SXjhJnEjyDYvB3Tvwqqils,1554
-Jinja2-2.10.dist-info/METADATA,sha256=18EgU8zR6-av-0-5y_gXebzK4GnBB_76lALUsl-6QHM,2258
-Jinja2-2.10.dist-info/RECORD,,
-Jinja2-2.10.dist-info/WHEEL,sha256=kdsN-5OJAZIiHN-iO4Rhl82KyS0bDWf4uBwMbkNafr8,110
-Jinja2-2.10.dist-info/entry_points.txt,sha256=NdzVcOrqyNyKDxD09aERj__3bFx2paZhizFDsKmVhiA,72
-Jinja2-2.10.dist-info/metadata.json,sha256=NPUJ9TMBxVQAv_kTJzvU8HwmP-4XZvbK9mz6_4YUVl4,1473
-Jinja2-2.10.dist-info/top_level.txt,sha256=PkeVWtLb3-CqjWi1fO29OCbj55EhX_chhKrCdrVe_zs,7
-jinja2/__init__.py,sha256=xJHjaMoy51_KXn1wf0cysH6tUUifUxZCwSOfcJGEYZw,2614
-jinja2/_compat.py,sha256=xP60CE5Qr8FTYcDE1f54tbZLKGvMwYml4-8T7Q4KG9k,2596
-jinja2/_identifier.py,sha256=W1QBSY-iJsyt6oR_nKSuNNCzV95vLIOYgUNPUI1d5gU,1726
-jinja2/asyncfilters.py,sha256=cTDPvrS8Hp_IkwsZ1m9af_lr5nHysw7uTa5gV0NmZVE,4144
-jinja2/asyncsupport.py,sha256=UErQ3YlTLaSjFb94P4MVn08-aVD9jJxty2JVfMRb-1M,7878
-jinja2/bccache.py,sha256=nQldx0ZRYANMyfvOihRoYFKSlUdd5vJkS7BjxNwlOZM,12794
-jinja2/compiler.py,sha256=BqC5U6JxObSRhblyT_a6Tp5GtEU5z3US1a4jLQaxxgo,65386
-jinja2/constants.py,sha256=uwwV8ZUhHhacAuz5PTwckfsbqBaqM7aKfyJL7kGX5YQ,1626
-jinja2/debug.py,sha256=WTVeUFGUa4v6ReCsYv-iVPa3pkNB75OinJt3PfxNdXs,12045
-jinja2/defaults.py,sha256=Em-95hmsJxIenDCZFB1YSvf9CNhe9rBmytN3yUrBcWA,1400
-jinja2/environment.py,sha256=VnkAkqw8JbjZct4tAyHlpBrka2vqB-Z58RAP-32P1ZY,50849
-jinja2/exceptions.py,sha256=_Rj-NVi98Q6AiEjYQOsP8dEIdu5AlmRHzcSNOPdWix4,4428
-jinja2/ext.py,sha256=atMQydEC86tN1zUsdQiHw5L5cF62nDbqGue25Yiu3N4,24500
-jinja2/filters.py,sha256=yOAJk0MsH-_gEC0i0U6NweVQhbtYaC-uE8xswHFLF4w,36528
-jinja2/idtracking.py,sha256=2GbDSzIvGArEBGLkovLkqEfmYxmWsEf8c3QZwM4uNsw,9197
-jinja2/lexer.py,sha256=ySEPoXd1g7wRjsuw23uimS6nkGN5aqrYwcOKxCaVMBQ,28559
-jinja2/loaders.py,sha256=xiTuURKAEObyym0nU8PCIXu_Qp8fn0AJ5oIADUUm-5Q,17382
-jinja2/meta.py,sha256=fmKHxkmZYAOm9QyWWy8EMd6eefAIh234rkBMW2X4ZR8,4340
-jinja2/nativetypes.py,sha256=_sJhS8f-8Q0QMIC0dm1YEdLyxEyoO-kch8qOL5xUDfE,7308
-jinja2/nodes.py,sha256=L10L_nQDfubLhO3XjpF9qz46FSh2clL-3e49ogVlMmA,30853
-jinja2/optimizer.py,sha256=MsdlFACJ0FRdPtjmCAdt7JQ9SGrXFaDNUaslsWQaG3M,1722
-jinja2/parser.py,sha256=lPzTEbcpTRBLw8ii6OYyExHeAhaZLMA05Hpv4ll3ULk,35875
-jinja2/runtime.py,sha256=DHdD38Pq8gj7uWQC5usJyWFoNWL317A9AvXOW_CLB34,27755
-jinja2/sandbox.py,sha256=TVyZHlNqqTzsv9fv2NvJNmSdWRHTguhyMHdxjWms32U,16708
-jinja2/tests.py,sha256=iJQLwbapZr-EKquTG_fVOVdwHUUKf3SX9eNkjQDF8oU,4237
-jinja2/utils.py,sha256=q24VupGZotQ-uOyrJxCaXtDWhZC1RgsQG7kcdmjck2Q,20629
-jinja2/visitor.py,sha256=JD1H1cANA29JcntFfN5fPyqQxB4bI4wC00BzZa-XHks,3316
-Jinja2-2.10.dist-info/INSTALLER,sha256=zuuue4knoyJ-UwPPXg8fezS7VCrXJQrAP7zeNuwvFQg,4
-jinja2/__pycache__/asyncfilters.cpython-36.pyc,,
-jinja2/__pycache__/asyncsupport.cpython-36.pyc,,
-jinja2/__pycache__/bccache.cpython-36.pyc,,
-jinja2/__pycache__/compiler.cpython-36.pyc,,
-jinja2/__pycache__/constants.cpython-36.pyc,,
-jinja2/__pycache__/debug.cpython-36.pyc,,
-jinja2/__pycache__/defaults.cpython-36.pyc,,
-jinja2/__pycache__/environment.cpython-36.pyc,,
-jinja2/__pycache__/exceptions.cpython-36.pyc,,
-jinja2/__pycache__/ext.cpython-36.pyc,,
-jinja2/__pycache__/filters.cpython-36.pyc,,
-jinja2/__pycache__/idtracking.cpython-36.pyc,,
-jinja2/__pycache__/lexer.cpython-36.pyc,,
-jinja2/__pycache__/loaders.cpython-36.pyc,,
-jinja2/__pycache__/meta.cpython-36.pyc,,
-jinja2/__pycache__/nativetypes.cpython-36.pyc,,
-jinja2/__pycache__/nodes.cpython-36.pyc,,
-jinja2/__pycache__/optimizer.cpython-36.pyc,,
-jinja2/__pycache__/parser.cpython-36.pyc,,
-jinja2/__pycache__/runtime.cpython-36.pyc,,
-jinja2/__pycache__/sandbox.cpython-36.pyc,,
-jinja2/__pycache__/tests.cpython-36.pyc,,
-jinja2/__pycache__/utils.cpython-36.pyc,,
-jinja2/__pycache__/visitor.cpython-36.pyc,,
-jinja2/__pycache__/_compat.cpython-36.pyc,,
-jinja2/__pycache__/_identifier.cpython-36.pyc,,
-jinja2/__pycache__/__init__.cpython-36.pyc,,
diff --git a/venv/Lib/site-packages/Jinja2-2.10.dist-info/WHEEL b/venv/Lib/site-packages/Jinja2-2.10.dist-info/WHEEL
deleted file mode 100644
index 7332a41..0000000
--- a/venv/Lib/site-packages/Jinja2-2.10.dist-info/WHEEL
+++ /dev/null
@@ -1,6 +0,0 @@
-Wheel-Version: 1.0
-Generator: bdist_wheel (0.30.0)
-Root-Is-Purelib: true
-Tag: py2-none-any
-Tag: py3-none-any
-
diff --git a/venv/Lib/site-packages/Jinja2-2.10.dist-info/entry_points.txt b/venv/Lib/site-packages/Jinja2-2.10.dist-info/entry_points.txt
deleted file mode 100644
index 32e6b75..0000000
--- a/venv/Lib/site-packages/Jinja2-2.10.dist-info/entry_points.txt
+++ /dev/null
@@ -1,4 +0,0 @@
-
- [babel.extractors]
- jinja2 = jinja2.ext:babel_extract[i18n]
-
\ No newline at end of file
diff --git a/venv/Lib/site-packages/Jinja2-2.10.dist-info/metadata.json b/venv/Lib/site-packages/Jinja2-2.10.dist-info/metadata.json
deleted file mode 100644
index 7f5dc38..0000000
--- a/venv/Lib/site-packages/Jinja2-2.10.dist-info/metadata.json
+++ /dev/null
@@ -1 +0,0 @@
-{"classifiers": ["Development Status :: 5 - Production/Stable", "Environment :: Web Environment", "Intended Audience :: Developers", "License :: OSI Approved :: BSD License", "Operating System :: OS Independent", "Programming Language :: Python", "Programming Language :: Python :: 2", "Programming Language :: Python :: 2.6", "Programming Language :: Python :: 2.7", "Programming Language :: Python :: 3", "Programming Language :: Python :: 3.3", "Programming Language :: Python :: 3.4", "Programming Language :: Python :: 3.5", "Programming Language :: Python :: 3.6", "Topic :: Internet :: WWW/HTTP :: Dynamic Content", "Topic :: Software Development :: Libraries :: Python Modules", "Topic :: Text Processing :: Markup :: HTML"], "description_content_type": "UNKNOWN", "extensions": {"python.details": {"contacts": [{"email": "armin.ronacher@active-4.com", "name": "Armin Ronacher", "role": "author"}], "document_names": {"description": "DESCRIPTION.rst", "license": "LICENSE.txt"}, "project_urls": {"Home": "http://jinja.pocoo.org/"}}, "python.exports": {"babel.extractors": {"jinja2": "jinja2.ext:babel_extract [i18n]"}}}, "extras": ["i18n"], "generator": "bdist_wheel (0.30.0)", "license": "BSD", "metadata_version": "2.0", "name": "Jinja2", "run_requires": [{"extra": "i18n", "requires": ["Babel (>=0.8)"]}, {"requires": ["MarkupSafe (>=0.23)"]}], "summary": "A small but fast and easy to use stand-alone template engine written in pure python.", "version": "2.10"}
\ No newline at end of file
diff --git a/venv/Lib/site-packages/Jinja2-2.10.dist-info/top_level.txt b/venv/Lib/site-packages/Jinja2-2.10.dist-info/top_level.txt
deleted file mode 100644
index 7f7afbf..0000000
--- a/venv/Lib/site-packages/Jinja2-2.10.dist-info/top_level.txt
+++ /dev/null
@@ -1 +0,0 @@
-jinja2
diff --git a/venv/Lib/site-packages/Mako-1.0.7-py3.6.egg-info/PKG-INFO b/venv/Lib/site-packages/Mako-1.0.7-py3.6.egg-info/PKG-INFO
deleted file mode 100644
index f7b87d1..0000000
--- a/venv/Lib/site-packages/Mako-1.0.7-py3.6.egg-info/PKG-INFO
+++ /dev/null
@@ -1,71 +0,0 @@
-Metadata-Version: 1.1
-Name: Mako
-Version: 1.0.7
-Summary: A super-fast templating language that borrows the best ideas from the existing templating languages.
-Home-page: http://www.makotemplates.org/
-Author: Mike Bayer
-Author-email: mike@zzzcomputing.com
-License: MIT
-Description: =========================
- Mako Templates for Python
- =========================
-
- Mako is a template library written in Python. It provides a familiar, non-XML
- syntax which compiles into Python modules for maximum performance. Mako's
- syntax and API borrows from the best ideas of many others, including Django
- templates, Cheetah, Myghty, and Genshi. Conceptually, Mako is an embedded
- Python (i.e. Python Server Page) language, which refines the familiar ideas
- of componentized layout and inheritance to produce one of the most
- straightforward and flexible models available, while also maintaining close
- ties to Python calling and scoping semantics.
-
- Nutshell
- ========
-
- ::
-
- <%inherit file="base.html"/>
- <%
- rows = [[v for v in range(0,10)] for row in range(0,10)]
- %>
-
- % for row in rows:
- ${makerow(row)}
- % endfor
-
-
- <%def name="makerow(row)">
-
- % for name in row:
- ${name} | \
- % endfor
-
- %def>
-
- Philosophy
- ===========
-
- Python is a great scripting language. Don't reinvent the wheel...your templates can handle it !
-
- Documentation
- ==============
-
- See documentation for Mako at http://www.makotemplates.org/docs/
-
- License
- ========
-
- Mako is licensed under an MIT-style license (see LICENSE).
- Other incorporated projects may be licensed under different licenses.
- All licenses allow for non-commercial and commercial use.
-
-Keywords: templates
-Platform: UNKNOWN
-Classifier: Development Status :: 5 - Production/Stable
-Classifier: Environment :: Web Environment
-Classifier: Intended Audience :: Developers
-Classifier: Programming Language :: Python
-Classifier: Programming Language :: Python :: 3
-Classifier: Programming Language :: Python :: Implementation :: CPython
-Classifier: Programming Language :: Python :: Implementation :: PyPy
-Classifier: Topic :: Internet :: WWW/HTTP :: Dynamic Content
diff --git a/venv/Lib/site-packages/Mako-1.0.7-py3.6.egg-info/SOURCES.txt b/venv/Lib/site-packages/Mako-1.0.7-py3.6.egg-info/SOURCES.txt
deleted file mode 100644
index f04785b..0000000
--- a/venv/Lib/site-packages/Mako-1.0.7-py3.6.egg-info/SOURCES.txt
+++ /dev/null
@@ -1,192 +0,0 @@
-AUTHORS
-CHANGES
-LICENSE
-MANIFEST.in
-README.rst
-setup.cfg
-setup.py
-Mako.egg-info/PKG-INFO
-Mako.egg-info/SOURCES.txt
-Mako.egg-info/dependency_links.txt
-Mako.egg-info/entry_points.txt
-Mako.egg-info/not-zip-safe
-Mako.egg-info/requires.txt
-Mako.egg-info/top_level.txt
-doc/caching.html
-doc/changelog.html
-doc/defs.html
-doc/filtering.html
-doc/genindex.html
-doc/index.html
-doc/inheritance.html
-doc/namespaces.html
-doc/runtime.html
-doc/search.html
-doc/searchindex.js
-doc/syntax.html
-doc/unicode.html
-doc/usage.html
-doc/_sources/caching.rst.txt
-doc/_sources/changelog.rst.txt
-doc/_sources/defs.rst.txt
-doc/_sources/filtering.rst.txt
-doc/_sources/index.rst.txt
-doc/_sources/inheritance.rst.txt
-doc/_sources/namespaces.rst.txt
-doc/_sources/runtime.rst.txt
-doc/_sources/syntax.rst.txt
-doc/_sources/unicode.rst.txt
-doc/_sources/usage.rst.txt
-doc/_static/basic.css
-doc/_static/changelog.css
-doc/_static/classic.css
-doc/_static/comment-bright.png
-doc/_static/comment-close.png
-doc/_static/comment.png
-doc/_static/default.css
-doc/_static/docs.css
-doc/_static/doctools.js
-doc/_static/down-pressed.png
-doc/_static/down.png
-doc/_static/file.png
-doc/_static/jquery-3.1.0.js
-doc/_static/jquery.js
-doc/_static/makoLogo.png
-doc/_static/minus.png
-doc/_static/plus.png
-doc/_static/pygments.css
-doc/_static/searchtools.js
-doc/_static/sidebar.js
-doc/_static/site.css
-doc/_static/sphinx_paramlinks.css
-doc/_static/underscore-1.3.1.js
-doc/_static/underscore.js
-doc/_static/up-pressed.png
-doc/_static/up.png
-doc/_static/websupport.js
-doc/build/Makefile
-doc/build/caching.rst
-doc/build/changelog.rst
-doc/build/conf.py
-doc/build/defs.rst
-doc/build/filtering.rst
-doc/build/index.rst
-doc/build/inheritance.rst
-doc/build/namespaces.rst
-doc/build/requirements.txt
-doc/build/runtime.rst
-doc/build/syntax.rst
-doc/build/unicode.rst
-doc/build/usage.rst
-doc/build/builder/__init__.py
-doc/build/builder/builders.py
-doc/build/builder/util.py
-doc/build/static/docs.css
-doc/build/static/makoLogo.png
-doc/build/static/site.css
-doc/build/templates/base.mako
-doc/build/templates/genindex.mako
-doc/build/templates/layout.mako
-doc/build/templates/page.mako
-doc/build/templates/rtd_layout.mako
-doc/build/templates/search.mako
-examples/bench/basic.py
-examples/bench/cheetah/footer.tmpl
-examples/bench/cheetah/header.tmpl
-examples/bench/cheetah/template.tmpl
-examples/bench/django/templatetags/__init__.py
-examples/bench/django/templatetags/bench.py
-examples/bench/kid/base.kid
-examples/bench/kid/template.kid
-examples/bench/myghty/base.myt
-examples/bench/myghty/template.myt
-examples/wsgi/run_wsgi.py
-mako/__init__.py
-mako/_ast_util.py
-mako/ast.py
-mako/cache.py
-mako/cmd.py
-mako/codegen.py
-mako/compat.py
-mako/exceptions.py
-mako/filters.py
-mako/lexer.py
-mako/lookup.py
-mako/parsetree.py
-mako/pygen.py
-mako/pyparser.py
-mako/runtime.py
-mako/template.py
-mako/util.py
-mako/ext/__init__.py
-mako/ext/autohandler.py
-mako/ext/babelplugin.py
-mako/ext/beaker_cache.py
-mako/ext/extract.py
-mako/ext/linguaplugin.py
-mako/ext/preprocessors.py
-mako/ext/pygmentplugin.py
-mako/ext/turbogears.py
-test/__init__.py
-test/sample_module_namespace.py
-test/test_ast.py
-test/test_block.py
-test/test_cache.py
-test/test_call.py
-test/test_cmd.py
-test/test_decorators.py
-test/test_def.py
-test/test_exceptions.py
-test/test_filters.py
-test/test_inheritance.py
-test/test_lexer.py
-test/test_lookup.py
-test/test_loop.py
-test/test_lru.py
-test/test_namespace.py
-test/test_pygen.py
-test/test_runtime.py
-test/test_template.py
-test/test_tgplugin.py
-test/test_util.py
-test/util.py
-test/ext/__init__.py
-test/ext/test_babelplugin.py
-test/ext/test_linguaplugin.py
-test/foo/__init__.py
-test/foo/test_ns.py
-test/templates/badbom.html
-test/templates/bom.html
-test/templates/bommagic.html
-test/templates/chs_unicode.html
-test/templates/chs_unicode_py3k.html
-test/templates/chs_utf8.html
-test/templates/cmd_good.mako
-test/templates/cmd_runtime.mako
-test/templates/cmd_syntax.mako
-test/templates/crlf.html
-test/templates/gettext.mako
-test/templates/gettext_cp1251.mako
-test/templates/gettext_utf8.mako
-test/templates/index.html
-test/templates/internationalization.html
-test/templates/modtest.html
-test/templates/read_unicode.html
-test/templates/read_unicode_py3k.html
-test/templates/runtimeerr.html
-test/templates/runtimeerr_py3k.html
-test/templates/unicode.html
-test/templates/unicode_arguments.html
-test/templates/unicode_arguments_py3k.html
-test/templates/unicode_code.html
-test/templates/unicode_code_py3k.html
-test/templates/unicode_expr.html
-test/templates/unicode_expr_py3k.html
-test/templates/unicode_runtime_error.html
-test/templates/unicode_syntax_error.html
-test/templates/foo/modtest.html.py
-test/templates/othersubdir/foo.html
-test/templates/subdir/incl.html
-test/templates/subdir/index.html
-test/templates/subdir/modtest.html
-test/templates/subdir/foo/modtest.html.py
\ No newline at end of file
diff --git a/venv/Lib/site-packages/Mako-1.0.7-py3.6.egg-info/dependency_links.txt b/venv/Lib/site-packages/Mako-1.0.7-py3.6.egg-info/dependency_links.txt
deleted file mode 100644
index 8b13789..0000000
--- a/venv/Lib/site-packages/Mako-1.0.7-py3.6.egg-info/dependency_links.txt
+++ /dev/null
@@ -1 +0,0 @@
-
diff --git a/venv/Lib/site-packages/Mako-1.0.7-py3.6.egg-info/entry_points.txt b/venv/Lib/site-packages/Mako-1.0.7-py3.6.egg-info/entry_points.txt
deleted file mode 100644
index 3b15006..0000000
--- a/venv/Lib/site-packages/Mako-1.0.7-py3.6.egg-info/entry_points.txt
+++ /dev/null
@@ -1,20 +0,0 @@
-
- [python.templating.engines]
- mako = mako.ext.turbogears:TGPlugin
-
- [pygments.lexers]
- mako = mako.ext.pygmentplugin:MakoLexer
- html+mako = mako.ext.pygmentplugin:MakoHtmlLexer
- xml+mako = mako.ext.pygmentplugin:MakoXmlLexer
- js+mako = mako.ext.pygmentplugin:MakoJavascriptLexer
- css+mako = mako.ext.pygmentplugin:MakoCssLexer
-
- [babel.extractors]
- mako = mako.ext.babelplugin:extract
-
- [lingua.extractors]
- mako = mako.ext.linguaplugin:LinguaMakoExtractor
-
- [console_scripts]
- mako-render = mako.cmd:cmdline
-
\ No newline at end of file
diff --git a/venv/Lib/site-packages/Mako-1.0.7-py3.6.egg-info/installed-files.txt b/venv/Lib/site-packages/Mako-1.0.7-py3.6.egg-info/installed-files.txt
deleted file mode 100644
index 6f52c1b..0000000
--- a/venv/Lib/site-packages/Mako-1.0.7-py3.6.egg-info/installed-files.txt
+++ /dev/null
@@ -1,62 +0,0 @@
-..\mako\ast.py
-..\mako\cache.py
-..\mako\cmd.py
-..\mako\codegen.py
-..\mako\compat.py
-..\mako\exceptions.py
-..\mako\filters.py
-..\mako\lexer.py
-..\mako\lookup.py
-..\mako\parsetree.py
-..\mako\pygen.py
-..\mako\pyparser.py
-..\mako\runtime.py
-..\mako\template.py
-..\mako\util.py
-..\mako\_ast_util.py
-..\mako\__init__.py
-..\mako\ext\autohandler.py
-..\mako\ext\babelplugin.py
-..\mako\ext\beaker_cache.py
-..\mako\ext\extract.py
-..\mako\ext\linguaplugin.py
-..\mako\ext\preprocessors.py
-..\mako\ext\pygmentplugin.py
-..\mako\ext\turbogears.py
-..\mako\ext\__init__.py
-..\mako\__pycache__\ast.cpython-36.pyc
-..\mako\__pycache__\cache.cpython-36.pyc
-..\mako\__pycache__\cmd.cpython-36.pyc
-..\mako\__pycache__\codegen.cpython-36.pyc
-..\mako\__pycache__\compat.cpython-36.pyc
-..\mako\__pycache__\exceptions.cpython-36.pyc
-..\mako\__pycache__\filters.cpython-36.pyc
-..\mako\__pycache__\lexer.cpython-36.pyc
-..\mako\__pycache__\lookup.cpython-36.pyc
-..\mako\__pycache__\parsetree.cpython-36.pyc
-..\mako\__pycache__\pygen.cpython-36.pyc
-..\mako\__pycache__\pyparser.cpython-36.pyc
-..\mako\__pycache__\runtime.cpython-36.pyc
-..\mako\__pycache__\template.cpython-36.pyc
-..\mako\__pycache__\util.cpython-36.pyc
-..\mako\__pycache__\_ast_util.cpython-36.pyc
-..\mako\__pycache__\__init__.cpython-36.pyc
-..\mako\ext\__pycache__\autohandler.cpython-36.pyc
-..\mako\ext\__pycache__\babelplugin.cpython-36.pyc
-..\mako\ext\__pycache__\beaker_cache.cpython-36.pyc
-..\mako\ext\__pycache__\extract.cpython-36.pyc
-..\mako\ext\__pycache__\linguaplugin.cpython-36.pyc
-..\mako\ext\__pycache__\preprocessors.cpython-36.pyc
-..\mako\ext\__pycache__\pygmentplugin.cpython-36.pyc
-..\mako\ext\__pycache__\turbogears.cpython-36.pyc
-..\mako\ext\__pycache__\__init__.cpython-36.pyc
-dependency_links.txt
-entry_points.txt
-not-zip-safe
-PKG-INFO
-requires.txt
-SOURCES.txt
-top_level.txt
-..\..\..\Scripts\mako-render-script.py
-..\..\..\Scripts\mako-render.exe
-..\..\..\Scripts\mako-render.exe.manifest
diff --git a/venv/Lib/site-packages/Mako-1.0.7-py3.6.egg-info/not-zip-safe b/venv/Lib/site-packages/Mako-1.0.7-py3.6.egg-info/not-zip-safe
deleted file mode 100644
index 8b13789..0000000
--- a/venv/Lib/site-packages/Mako-1.0.7-py3.6.egg-info/not-zip-safe
+++ /dev/null
@@ -1 +0,0 @@
-
diff --git a/venv/Lib/site-packages/Mako-1.0.7-py3.6.egg-info/requires.txt b/venv/Lib/site-packages/Mako-1.0.7-py3.6.egg-info/requires.txt
deleted file mode 100644
index 4083f59..0000000
--- a/venv/Lib/site-packages/Mako-1.0.7-py3.6.egg-info/requires.txt
+++ /dev/null
@@ -1 +0,0 @@
-MarkupSafe>=0.9.2
diff --git a/venv/Lib/site-packages/Mako-1.0.7-py3.6.egg-info/top_level.txt b/venv/Lib/site-packages/Mako-1.0.7-py3.6.egg-info/top_level.txt
deleted file mode 100644
index 2951cdd..0000000
--- a/venv/Lib/site-packages/Mako-1.0.7-py3.6.egg-info/top_level.txt
+++ /dev/null
@@ -1 +0,0 @@
-mako
diff --git a/venv/Lib/site-packages/MarkupSafe-1.0-py3.6.egg-info/PKG-INFO b/venv/Lib/site-packages/MarkupSafe-1.0-py3.6.egg-info/PKG-INFO
deleted file mode 100644
index 6f2568f..0000000
--- a/venv/Lib/site-packages/MarkupSafe-1.0-py3.6.egg-info/PKG-INFO
+++ /dev/null
@@ -1,133 +0,0 @@
-Metadata-Version: 1.1
-Name: MarkupSafe
-Version: 1.0
-Summary: Implements a XML/HTML/XHTML Markup safe string for Python
-Home-page: http://github.com/pallets/markupsafe
-Author: Armin Ronacher
-Author-email: armin.ronacher@active-4.com
-License: BSD
-Description: MarkupSafe
- ==========
-
- Implements a unicode subclass that supports HTML strings:
-
- .. code-block:: python
-
- >>> from markupsafe import Markup, escape
- >>> escape("")
- Markup(u'<script>alert(document.cookie);</script>')
- >>> tmpl = Markup("%s")
- >>> tmpl % "Peter > Lustig"
- Markup(u'Peter > Lustig')
-
- If you want to make an object unicode that is not yet unicode
- but don't want to lose the taint information, you can use the
- ``soft_unicode`` function. (On Python 3 you can also use ``soft_str`` which
- is a different name for the same function).
-
- .. code-block:: python
-
- >>> from markupsafe import soft_unicode
- >>> soft_unicode(42)
- u'42'
- >>> soft_unicode(Markup('foo'))
- Markup(u'foo')
-
- HTML Representations
- --------------------
-
- Objects can customize their HTML markup equivalent by overriding
- the ``__html__`` function:
-
- .. code-block:: python
-
- >>> class Foo(object):
- ... def __html__(self):
- ... return 'Nice'
- ...
- >>> escape(Foo())
- Markup(u'Nice')
- >>> Markup(Foo())
- Markup(u'Nice')
-
- Silent Escapes
- --------------
-
- Since MarkupSafe 0.10 there is now also a separate escape function
- called ``escape_silent`` that returns an empty string for ``None`` for
- consistency with other systems that return empty strings for ``None``
- when escaping (for instance Pylons' webhelpers).
-
- If you also want to use this for the escape method of the Markup
- object, you can create your own subclass that does that:
-
- .. code-block:: python
-
- from markupsafe import Markup, escape_silent as escape
-
- class SilentMarkup(Markup):
- __slots__ = ()
-
- @classmethod
- def escape(cls, s):
- return cls(escape(s))
-
- New-Style String Formatting
- ---------------------------
-
- Starting with MarkupSafe 0.21 new style string formats from Python 2.6 and
- 3.x are now fully supported. Previously the escape behavior of those
- functions was spotty at best. The new implementations operates under the
- following algorithm:
-
- 1. if an object has an ``__html_format__`` method it is called as
- replacement for ``__format__`` with the format specifier. It either
- has to return a string or markup object.
- 2. if an object has an ``__html__`` method it is called.
- 3. otherwise the default format system of Python kicks in and the result
- is HTML escaped.
-
- Here is how you can implement your own formatting:
-
- .. code-block:: python
-
- class User(object):
-
- def __init__(self, id, username):
- self.id = id
- self.username = username
-
- def __html_format__(self, format_spec):
- if format_spec == 'link':
- return Markup('{1}').format(
- self.id,
- self.__html__(),
- )
- elif format_spec:
- raise ValueError('Invalid format spec')
- return self.__html__()
-
- def __html__(self):
- return Markup('{0}').format(self.username)
-
- And to format that user:
-
- .. code-block:: python
-
- >>> user = User(1, 'foo')
- >>> Markup('User: {0:link}').format(user)
- Markup(u'
User: foo')
-
- Markupsafe supports Python 2.6, 2.7 and Python 3.3 and higher.
-
-Platform: UNKNOWN
-Classifier: Development Status :: 5 - Production/Stable
-Classifier: Environment :: Web Environment
-Classifier: Intended Audience :: Developers
-Classifier: License :: OSI Approved :: BSD License
-Classifier: Operating System :: OS Independent
-Classifier: Programming Language :: Python
-Classifier: Programming Language :: Python :: 3
-Classifier: Topic :: Internet :: WWW/HTTP :: Dynamic Content
-Classifier: Topic :: Software Development :: Libraries :: Python Modules
-Classifier: Topic :: Text Processing :: Markup :: HTML
diff --git a/venv/Lib/site-packages/MarkupSafe-1.0-py3.6.egg-info/SOURCES.txt b/venv/Lib/site-packages/MarkupSafe-1.0-py3.6.egg-info/SOURCES.txt
deleted file mode 100644
index 210b339..0000000
--- a/venv/Lib/site-packages/MarkupSafe-1.0-py3.6.egg-info/SOURCES.txt
+++ /dev/null
@@ -1,18 +0,0 @@
-AUTHORS
-CHANGES
-LICENSE
-MANIFEST.in
-README.rst
-setup.cfg
-setup.py
-tests.py
-MarkupSafe.egg-info/PKG-INFO
-MarkupSafe.egg-info/SOURCES.txt
-MarkupSafe.egg-info/dependency_links.txt
-MarkupSafe.egg-info/not-zip-safe
-MarkupSafe.egg-info/top_level.txt
-markupsafe/__init__.py
-markupsafe/_compat.py
-markupsafe/_constants.py
-markupsafe/_native.py
-markupsafe/_speedups.c
\ No newline at end of file
diff --git a/venv/Lib/site-packages/MarkupSafe-1.0-py3.6.egg-info/dependency_links.txt b/venv/Lib/site-packages/MarkupSafe-1.0-py3.6.egg-info/dependency_links.txt
deleted file mode 100644
index 8b13789..0000000
--- a/venv/Lib/site-packages/MarkupSafe-1.0-py3.6.egg-info/dependency_links.txt
+++ /dev/null
@@ -1 +0,0 @@
-
diff --git a/venv/Lib/site-packages/MarkupSafe-1.0-py3.6.egg-info/installed-files.txt b/venv/Lib/site-packages/MarkupSafe-1.0-py3.6.egg-info/installed-files.txt
deleted file mode 100644
index b484008..0000000
--- a/venv/Lib/site-packages/MarkupSafe-1.0-py3.6.egg-info/installed-files.txt
+++ /dev/null
@@ -1,14 +0,0 @@
-..\markupsafe\_compat.py
-..\markupsafe\_constants.py
-..\markupsafe\_native.py
-..\markupsafe\__init__.py
-..\markupsafe\_speedups.c
-..\markupsafe\__pycache__\_compat.cpython-36.pyc
-..\markupsafe\__pycache__\_constants.cpython-36.pyc
-..\markupsafe\__pycache__\_native.cpython-36.pyc
-..\markupsafe\__pycache__\__init__.cpython-36.pyc
-dependency_links.txt
-not-zip-safe
-PKG-INFO
-SOURCES.txt
-top_level.txt
diff --git a/venv/Lib/site-packages/MarkupSafe-1.0-py3.6.egg-info/not-zip-safe b/venv/Lib/site-packages/MarkupSafe-1.0-py3.6.egg-info/not-zip-safe
deleted file mode 100644
index 8b13789..0000000
--- a/venv/Lib/site-packages/MarkupSafe-1.0-py3.6.egg-info/not-zip-safe
+++ /dev/null
@@ -1 +0,0 @@
-
diff --git a/venv/Lib/site-packages/MarkupSafe-1.0-py3.6.egg-info/top_level.txt b/venv/Lib/site-packages/MarkupSafe-1.0-py3.6.egg-info/top_level.txt
deleted file mode 100644
index 75bf729..0000000
--- a/venv/Lib/site-packages/MarkupSafe-1.0-py3.6.egg-info/top_level.txt
+++ /dev/null
@@ -1 +0,0 @@
-markupsafe
diff --git a/venv/Lib/site-packages/SQLAlchemy-1.2.3-py3.6.egg-info/PKG-INFO b/venv/Lib/site-packages/SQLAlchemy-1.2.3-py3.6.egg-info/PKG-INFO
deleted file mode 100644
index ed76311..0000000
--- a/venv/Lib/site-packages/SQLAlchemy-1.2.3-py3.6.egg-info/PKG-INFO
+++ /dev/null
@@ -1,154 +0,0 @@
-Metadata-Version: 1.1
-Name: SQLAlchemy
-Version: 1.2.3
-Summary: Database Abstraction Library
-Home-page: http://www.sqlalchemy.org
-Author: Mike Bayer
-Author-email: mike_mp@zzzcomputing.com
-License: MIT License
-Description: SQLAlchemy
- ==========
-
- The Python SQL Toolkit and Object Relational Mapper
-
- Introduction
- -------------
-
- SQLAlchemy is the Python SQL toolkit and Object Relational Mapper
- that gives application developers the full power and
- flexibility of SQL. SQLAlchemy provides a full suite
- of well known enterprise-level persistence patterns,
- designed for efficient and high-performing database
- access, adapted into a simple and Pythonic domain
- language.
-
- Major SQLAlchemy features include:
-
- * An industrial strength ORM, built
- from the core on the identity map, unit of work,
- and data mapper patterns. These patterns
- allow transparent persistence of objects
- using a declarative configuration system.
- Domain models
- can be constructed and manipulated naturally,
- and changes are synchronized with the
- current transaction automatically.
- * A relationally-oriented query system, exposing
- the full range of SQL's capabilities
- explicitly, including joins, subqueries,
- correlation, and most everything else,
- in terms of the object model.
- Writing queries with the ORM uses the same
- techniques of relational composition you use
- when writing SQL. While you can drop into
- literal SQL at any time, it's virtually never
- needed.
- * A comprehensive and flexible system
- of eager loading for related collections and objects.
- Collections are cached within a session,
- and can be loaded on individual access, all
- at once using joins, or by query per collection
- across the full result set.
- * A Core SQL construction system and DBAPI
- interaction layer. The SQLAlchemy Core is
- separate from the ORM and is a full database
- abstraction layer in its own right, and includes
- an extensible Python-based SQL expression
- language, schema metadata, connection pooling,
- type coercion, and custom types.
- * All primary and foreign key constraints are
- assumed to be composite and natural. Surrogate
- integer primary keys are of course still the
- norm, but SQLAlchemy never assumes or hardcodes
- to this model.
- * Database introspection and generation. Database
- schemas can be "reflected" in one step into
- Python structures representing database metadata;
- those same structures can then generate
- CREATE statements right back out - all within
- the Core, independent of the ORM.
-
- SQLAlchemy's philosophy:
-
- * SQL databases behave less and less like object
- collections the more size and performance start to
- matter; object collections behave less and less like
- tables and rows the more abstraction starts to matter.
- SQLAlchemy aims to accommodate both of these
- principles.
- * An ORM doesn't need to hide the "R". A relational
- database provides rich, set-based functionality
- that should be fully exposed. SQLAlchemy's
- ORM provides an open-ended set of patterns
- that allow a developer to construct a custom
- mediation layer between a domain model and
- a relational schema, turning the so-called
- "object relational impedance" issue into
- a distant memory.
- * The developer, in all cases, makes all decisions
- regarding the design, structure, and naming conventions
- of both the object model as well as the relational
- schema. SQLAlchemy only provides the means
- to automate the execution of these decisions.
- * With SQLAlchemy, there's no such thing as
- "the ORM generated a bad query" - you
- retain full control over the structure of
- queries, including how joins are organized,
- how subqueries and correlation is used, what
- columns are requested. Everything SQLAlchemy
- does is ultimately the result of a developer-
- initiated decision.
- * Don't use an ORM if the problem doesn't need one.
- SQLAlchemy consists of a Core and separate ORM
- component. The Core offers a full SQL expression
- language that allows Pythonic construction
- of SQL constructs that render directly to SQL
- strings for a target database, returning
- result sets that are essentially enhanced DBAPI
- cursors.
- * Transactions should be the norm. With SQLAlchemy's
- ORM, nothing goes to permanent storage until
- commit() is called. SQLAlchemy encourages applications
- to create a consistent means of delineating
- the start and end of a series of operations.
- * Never render a literal value in a SQL statement.
- Bound parameters are used to the greatest degree
- possible, allowing query optimizers to cache
- query plans effectively and making SQL injection
- attacks a non-issue.
-
- Documentation
- -------------
-
- Latest documentation is at:
-
- http://www.sqlalchemy.org/docs/
-
- Installation / Requirements
- ---------------------------
-
- Full documentation for installation is at
- `Installation `_.
-
- Getting Help / Development / Bug reporting
- ------------------------------------------
-
- Please refer to the `SQLAlchemy Community Guide `_.
-
- License
- -------
-
- SQLAlchemy is distributed under the `MIT license
- `_.
-
-
-Platform: UNKNOWN
-Classifier: Development Status :: 5 - Production/Stable
-Classifier: Intended Audience :: Developers
-Classifier: License :: OSI Approved :: MIT License
-Classifier: Programming Language :: Python
-Classifier: Programming Language :: Python :: 3
-Classifier: Programming Language :: Python :: Implementation :: CPython
-Classifier: Programming Language :: Python :: Implementation :: PyPy
-Classifier: Topic :: Database :: Front-Ends
-Classifier: Operating System :: OS Independent
diff --git a/venv/Lib/site-packages/SQLAlchemy-1.2.3-py3.6.egg-info/SOURCES.txt b/venv/Lib/site-packages/SQLAlchemy-1.2.3-py3.6.egg-info/SOURCES.txt
deleted file mode 100644
index 5cf9647..0000000
--- a/venv/Lib/site-packages/SQLAlchemy-1.2.3-py3.6.egg-info/SOURCES.txt
+++ /dev/null
@@ -1,829 +0,0 @@
-AUTHORS
-CHANGES
-LICENSE
-MANIFEST.in
-README.dialects.rst
-README.rst
-README.unittests.rst
-setup.cfg
-setup.py
-sqla_nose.py
-tox.ini
-doc/contents.html
-doc/copyright.html
-doc/errors.html
-doc/genindex.html
-doc/glossary.html
-doc/index.html
-doc/intro.html
-doc/notfound.html
-doc/search.html
-doc/searchindex.js
-doc/_images/sqla_arch_small.png
-doc/_images/sqla_engine_arch.png
-doc/_modules/index.html
-doc/_modules/examples/adjacency_list/adjacency_list.html
-doc/_modules/examples/association/basic_association.html
-doc/_modules/examples/association/dict_of_sets_with_default.html
-doc/_modules/examples/association/proxied_association.html
-doc/_modules/examples/custom_attributes/active_column_defaults.html
-doc/_modules/examples/custom_attributes/custom_management.html
-doc/_modules/examples/custom_attributes/listen_for_events.html
-doc/_modules/examples/dogpile_caching/advanced.html
-doc/_modules/examples/dogpile_caching/caching_query.html
-doc/_modules/examples/dogpile_caching/environment.html
-doc/_modules/examples/dogpile_caching/fixture_data.html
-doc/_modules/examples/dogpile_caching/helloworld.html
-doc/_modules/examples/dogpile_caching/local_session_caching.html
-doc/_modules/examples/dogpile_caching/model.html
-doc/_modules/examples/dogpile_caching/relationship_caching.html
-doc/_modules/examples/dynamic_dict/dynamic_dict.html
-doc/_modules/examples/elementtree/adjacency_list.html
-doc/_modules/examples/elementtree/optimized_al.html
-doc/_modules/examples/elementtree/pickle.html
-doc/_modules/examples/generic_associations/discriminator_on_association.html
-doc/_modules/examples/generic_associations/generic_fk.html
-doc/_modules/examples/generic_associations/table_per_association.html
-doc/_modules/examples/generic_associations/table_per_related.html
-doc/_modules/examples/graphs/directed_graph.html
-doc/_modules/examples/inheritance/concrete.html
-doc/_modules/examples/inheritance/joined.html
-doc/_modules/examples/inheritance/single.html
-doc/_modules/examples/join_conditions/cast.html
-doc/_modules/examples/join_conditions/threeway.html
-doc/_modules/examples/large_collection/large_collection.html
-doc/_modules/examples/materialized_paths/materialized_paths.html
-doc/_modules/examples/nested_sets/nested_sets.html
-doc/_modules/examples/performance/__main__.html
-doc/_modules/examples/performance/bulk_inserts.html
-doc/_modules/examples/performance/bulk_updates.html
-doc/_modules/examples/performance/large_resultsets.html
-doc/_modules/examples/performance/short_selects.html
-doc/_modules/examples/performance/single_inserts.html
-doc/_modules/examples/postgis/postgis.html
-doc/_modules/examples/sharding/attribute_shard.html
-doc/_modules/examples/versioned_history/history_meta.html
-doc/_modules/examples/versioned_history/test_versioning.html
-doc/_modules/examples/versioned_rows/versioned_map.html
-doc/_modules/examples/versioned_rows/versioned_rows.html
-doc/_modules/examples/vertical/dictlike-polymorphic.html
-doc/_modules/examples/vertical/dictlike.html
-doc/_static/basic.css
-doc/_static/changelog.css
-doc/_static/comment-bright.png
-doc/_static/comment-close.png
-doc/_static/comment.png
-doc/_static/detectmobile.js
-doc/_static/docs.css
-doc/_static/doctools.js
-doc/_static/documentation_options.js
-doc/_static/down-pressed.png
-doc/_static/down.png
-doc/_static/file.png
-doc/_static/init.js
-doc/_static/jquery-3.2.1.js
-doc/_static/jquery.js
-doc/_static/minus.png
-doc/_static/plus.png
-doc/_static/pygments.css
-doc/_static/searchtools.js
-doc/_static/sphinx_paramlinks.css
-doc/_static/underscore-1.3.1.js
-doc/_static/underscore.js
-doc/_static/up-pressed.png
-doc/_static/up.png
-doc/_static/websupport.js
-doc/build/Makefile
-doc/build/conf.py
-doc/build/contents.rst
-doc/build/copyright.rst
-doc/build/corrections.py
-doc/build/errors.rst
-doc/build/glossary.rst
-doc/build/index.rst
-doc/build/intro.rst
-doc/build/requirements.txt
-doc/build/sqla_arch_small.png
-doc/build/changelog/changelog_01.rst
-doc/build/changelog/changelog_02.rst
-doc/build/changelog/changelog_03.rst
-doc/build/changelog/changelog_04.rst
-doc/build/changelog/changelog_05.rst
-doc/build/changelog/changelog_06.rst
-doc/build/changelog/changelog_07.rst
-doc/build/changelog/changelog_08.rst
-doc/build/changelog/changelog_09.rst
-doc/build/changelog/changelog_10.rst
-doc/build/changelog/changelog_11.rst
-doc/build/changelog/changelog_12.rst
-doc/build/changelog/index.rst
-doc/build/changelog/migration_04.rst
-doc/build/changelog/migration_05.rst
-doc/build/changelog/migration_06.rst
-doc/build/changelog/migration_07.rst
-doc/build/changelog/migration_08.rst
-doc/build/changelog/migration_09.rst
-doc/build/changelog/migration_10.rst
-doc/build/changelog/migration_11.rst
-doc/build/changelog/migration_12.rst
-doc/build/changelog/unreleased_10/4065.rst
-doc/build/changelog/unreleased_10/README.txt
-doc/build/changelog/unreleased_11/README.txt
-doc/build/changelog/unreleased_12/README.txt
-doc/build/core/api_basics.rst
-doc/build/core/compiler.rst
-doc/build/core/connections.rst
-doc/build/core/constraints.rst
-doc/build/core/custom_types.rst
-doc/build/core/ddl.rst
-doc/build/core/defaults.rst
-doc/build/core/dml.rst
-doc/build/core/engines.rst
-doc/build/core/engines_connections.rst
-doc/build/core/event.rst
-doc/build/core/events.rst
-doc/build/core/exceptions.rst
-doc/build/core/expression_api.rst
-doc/build/core/functions.rst
-doc/build/core/index.rst
-doc/build/core/inspection.rst
-doc/build/core/interfaces.rst
-doc/build/core/internals.rst
-doc/build/core/metadata.rst
-doc/build/core/pooling.rst
-doc/build/core/reflection.rst
-doc/build/core/schema.rst
-doc/build/core/selectable.rst
-doc/build/core/serializer.rst
-doc/build/core/sqla_engine_arch.png
-doc/build/core/sqlelement.rst
-doc/build/core/tutorial.rst
-doc/build/core/type_api.rst
-doc/build/core/type_basics.rst
-doc/build/core/types.rst
-doc/build/dialects/firebird.rst
-doc/build/dialects/index.rst
-doc/build/dialects/mssql.rst
-doc/build/dialects/mysql.rst
-doc/build/dialects/oracle.rst
-doc/build/dialects/postgresql.rst
-doc/build/dialects/sqlite.rst
-doc/build/dialects/sybase.rst
-doc/build/faq/connections.rst
-doc/build/faq/index.rst
-doc/build/faq/metadata_schema.rst
-doc/build/faq/ormconfiguration.rst
-doc/build/faq/performance.rst
-doc/build/faq/sessions.rst
-doc/build/faq/sqlexpressions.rst
-doc/build/orm/backref.rst
-doc/build/orm/basic_relationships.rst
-doc/build/orm/cascades.rst
-doc/build/orm/classical.rst
-doc/build/orm/collections.rst
-doc/build/orm/composites.rst
-doc/build/orm/constructors.rst
-doc/build/orm/contextual.rst
-doc/build/orm/deprecated.rst
-doc/build/orm/events.rst
-doc/build/orm/examples.rst
-doc/build/orm/exceptions.rst
-doc/build/orm/extending.rst
-doc/build/orm/index.rst
-doc/build/orm/inheritance.rst
-doc/build/orm/inheritance_loading.rst
-doc/build/orm/internals.rst
-doc/build/orm/join_conditions.rst
-doc/build/orm/loading.rst
-doc/build/orm/loading_columns.rst
-doc/build/orm/loading_objects.rst
-doc/build/orm/loading_relationships.rst
-doc/build/orm/mapped_attributes.rst
-doc/build/orm/mapped_sql_expr.rst
-doc/build/orm/mapper_config.rst
-doc/build/orm/mapping_api.rst
-doc/build/orm/mapping_columns.rst
-doc/build/orm/mapping_styles.rst
-doc/build/orm/nonstandard_mappings.rst
-doc/build/orm/persistence_techniques.rst
-doc/build/orm/query.rst
-doc/build/orm/relationship_api.rst
-doc/build/orm/relationship_persistence.rst
-doc/build/orm/relationships.rst
-doc/build/orm/scalar_mapping.rst
-doc/build/orm/self_referential.rst
-doc/build/orm/session.rst
-doc/build/orm/session_api.rst
-doc/build/orm/session_basics.rst
-doc/build/orm/session_events.rst
-doc/build/orm/session_state_management.rst
-doc/build/orm/session_transaction.rst
-doc/build/orm/tutorial.rst
-doc/build/orm/versioning.rst
-doc/build/orm/extensions/associationproxy.rst
-doc/build/orm/extensions/automap.rst
-doc/build/orm/extensions/baked.rst
-doc/build/orm/extensions/horizontal_shard.rst
-doc/build/orm/extensions/hybrid.rst
-doc/build/orm/extensions/index.rst
-doc/build/orm/extensions/indexable.rst
-doc/build/orm/extensions/instrumentation.rst
-doc/build/orm/extensions/mutable.rst
-doc/build/orm/extensions/orderinglist.rst
-doc/build/orm/extensions/declarative/api.rst
-doc/build/orm/extensions/declarative/basic_use.rst
-doc/build/orm/extensions/declarative/index.rst
-doc/build/orm/extensions/declarative/inheritance.rst
-doc/build/orm/extensions/declarative/mixins.rst
-doc/build/orm/extensions/declarative/relationships.rst
-doc/build/orm/extensions/declarative/table_config.rst
-doc/build/texinputs/Makefile
-doc/build/texinputs/sphinx.sty
-doc/changelog/changelog_01.html
-doc/changelog/changelog_02.html
-doc/changelog/changelog_03.html
-doc/changelog/changelog_04.html
-doc/changelog/changelog_05.html
-doc/changelog/changelog_06.html
-doc/changelog/changelog_07.html
-doc/changelog/changelog_08.html
-doc/changelog/changelog_09.html
-doc/changelog/changelog_10.html
-doc/changelog/changelog_11.html
-doc/changelog/changelog_12.html
-doc/changelog/index.html
-doc/changelog/migration_04.html
-doc/changelog/migration_05.html
-doc/changelog/migration_06.html
-doc/changelog/migration_07.html
-doc/changelog/migration_08.html
-doc/changelog/migration_09.html
-doc/changelog/migration_10.html
-doc/changelog/migration_11.html
-doc/changelog/migration_12.html
-doc/core/api_basics.html
-doc/core/compiler.html
-doc/core/connections.html
-doc/core/constraints.html
-doc/core/custom_types.html
-doc/core/ddl.html
-doc/core/defaults.html
-doc/core/dml.html
-doc/core/engines.html
-doc/core/engines_connections.html
-doc/core/event.html
-doc/core/events.html
-doc/core/exceptions.html
-doc/core/expression_api.html
-doc/core/functions.html
-doc/core/index.html
-doc/core/inspection.html
-doc/core/interfaces.html
-doc/core/internals.html
-doc/core/metadata.html
-doc/core/pooling.html
-doc/core/reflection.html
-doc/core/schema.html
-doc/core/selectable.html
-doc/core/serializer.html
-doc/core/sqlelement.html
-doc/core/tutorial.html
-doc/core/type_api.html
-doc/core/type_basics.html
-doc/core/types.html
-doc/dialects/firebird.html
-doc/dialects/index.html
-doc/dialects/mssql.html
-doc/dialects/mysql.html
-doc/dialects/oracle.html
-doc/dialects/postgresql.html
-doc/dialects/sqlite.html
-doc/dialects/sybase.html
-doc/faq/connections.html
-doc/faq/index.html
-doc/faq/metadata_schema.html
-doc/faq/ormconfiguration.html
-doc/faq/performance.html
-doc/faq/sessions.html
-doc/faq/sqlexpressions.html
-doc/orm/backref.html
-doc/orm/basic_relationships.html
-doc/orm/cascades.html
-doc/orm/classical.html
-doc/orm/collections.html
-doc/orm/composites.html
-doc/orm/constructors.html
-doc/orm/contextual.html
-doc/orm/deprecated.html
-doc/orm/events.html
-doc/orm/examples.html
-doc/orm/exceptions.html
-doc/orm/extending.html
-doc/orm/index.html
-doc/orm/inheritance.html
-doc/orm/inheritance_loading.html
-doc/orm/internals.html
-doc/orm/join_conditions.html
-doc/orm/loading.html
-doc/orm/loading_columns.html
-doc/orm/loading_objects.html
-doc/orm/loading_relationships.html
-doc/orm/mapped_attributes.html
-doc/orm/mapped_sql_expr.html
-doc/orm/mapper_config.html
-doc/orm/mapping_api.html
-doc/orm/mapping_columns.html
-doc/orm/mapping_styles.html
-doc/orm/nonstandard_mappings.html
-doc/orm/persistence_techniques.html
-doc/orm/query.html
-doc/orm/relationship_api.html
-doc/orm/relationship_persistence.html
-doc/orm/relationships.html
-doc/orm/scalar_mapping.html
-doc/orm/self_referential.html
-doc/orm/session.html
-doc/orm/session_api.html
-doc/orm/session_basics.html
-doc/orm/session_events.html
-doc/orm/session_state_management.html
-doc/orm/session_transaction.html
-doc/orm/tutorial.html
-doc/orm/versioning.html
-doc/orm/extensions/associationproxy.html
-doc/orm/extensions/automap.html
-doc/orm/extensions/baked.html
-doc/orm/extensions/horizontal_shard.html
-doc/orm/extensions/hybrid.html
-doc/orm/extensions/index.html
-doc/orm/extensions/indexable.html
-doc/orm/extensions/instrumentation.html
-doc/orm/extensions/mutable.html
-doc/orm/extensions/orderinglist.html
-doc/orm/extensions/declarative/api.html
-doc/orm/extensions/declarative/basic_use.html
-doc/orm/extensions/declarative/index.html
-doc/orm/extensions/declarative/inheritance.html
-doc/orm/extensions/declarative/mixins.html
-doc/orm/extensions/declarative/relationships.html
-doc/orm/extensions/declarative/table_config.html
-examples/__init__.py
-examples/adjacency_list/__init__.py
-examples/adjacency_list/adjacency_list.py
-examples/association/__init__.py
-examples/association/basic_association.py
-examples/association/dict_of_sets_with_default.py
-examples/association/proxied_association.py
-examples/custom_attributes/__init__.py
-examples/custom_attributes/active_column_defaults.py
-examples/custom_attributes/custom_management.py
-examples/custom_attributes/listen_for_events.py
-examples/dogpile_caching/__init__.py
-examples/dogpile_caching/advanced.py
-examples/dogpile_caching/caching_query.py
-examples/dogpile_caching/environment.py
-examples/dogpile_caching/fixture_data.py
-examples/dogpile_caching/helloworld.py
-examples/dogpile_caching/local_session_caching.py
-examples/dogpile_caching/model.py
-examples/dogpile_caching/relationship_caching.py
-examples/dynamic_dict/__init__.py
-examples/dynamic_dict/dynamic_dict.py
-examples/elementtree/__init__.py
-examples/elementtree/adjacency_list.py
-examples/elementtree/optimized_al.py
-examples/elementtree/pickle.py
-examples/elementtree/test.xml
-examples/elementtree/test2.xml
-examples/elementtree/test3.xml
-examples/generic_associations/__init__.py
-examples/generic_associations/discriminator_on_association.py
-examples/generic_associations/generic_fk.py
-examples/generic_associations/table_per_association.py
-examples/generic_associations/table_per_related.py
-examples/graphs/__init__.py
-examples/graphs/directed_graph.py
-examples/inheritance/__init__.py
-examples/inheritance/concrete.py
-examples/inheritance/joined.py
-examples/inheritance/single.py
-examples/join_conditions/__init__.py
-examples/join_conditions/cast.py
-examples/join_conditions/threeway.py
-examples/large_collection/__init__.py
-examples/large_collection/large_collection.py
-examples/materialized_paths/__init__.py
-examples/materialized_paths/materialized_paths.py
-examples/nested_sets/__init__.py
-examples/nested_sets/nested_sets.py
-examples/performance/__init__.py
-examples/performance/__main__.py
-examples/performance/bulk_inserts.py
-examples/performance/bulk_updates.py
-examples/performance/large_resultsets.py
-examples/performance/short_selects.py
-examples/performance/single_inserts.py
-examples/postgis/__init__.py
-examples/postgis/postgis.py
-examples/sharding/__init__.py
-examples/sharding/attribute_shard.py
-examples/versioned_history/__init__.py
-examples/versioned_history/history_meta.py
-examples/versioned_history/test_versioning.py
-examples/versioned_rows/__init__.py
-examples/versioned_rows/versioned_map.py
-examples/versioned_rows/versioned_rows.py
-examples/vertical/__init__.py
-examples/vertical/dictlike-polymorphic.py
-examples/vertical/dictlike.py
-lib/SQLAlchemy.egg-info/PKG-INFO
-lib/SQLAlchemy.egg-info/SOURCES.txt
-lib/SQLAlchemy.egg-info/dependency_links.txt
-lib/SQLAlchemy.egg-info/requires.txt
-lib/SQLAlchemy.egg-info/top_level.txt
-lib/sqlalchemy/__init__.py
-lib/sqlalchemy/events.py
-lib/sqlalchemy/exc.py
-lib/sqlalchemy/inspection.py
-lib/sqlalchemy/interfaces.py
-lib/sqlalchemy/log.py
-lib/sqlalchemy/pool.py
-lib/sqlalchemy/processors.py
-lib/sqlalchemy/schema.py
-lib/sqlalchemy/types.py
-lib/sqlalchemy/cextension/processors.c
-lib/sqlalchemy/cextension/resultproxy.c
-lib/sqlalchemy/cextension/utils.c
-lib/sqlalchemy/connectors/__init__.py
-lib/sqlalchemy/connectors/mxodbc.py
-lib/sqlalchemy/connectors/pyodbc.py
-lib/sqlalchemy/connectors/zxJDBC.py
-lib/sqlalchemy/databases/__init__.py
-lib/sqlalchemy/dialects/__init__.py
-lib/sqlalchemy/dialects/type_migration_guidelines.txt
-lib/sqlalchemy/dialects/firebird/__init__.py
-lib/sqlalchemy/dialects/firebird/base.py
-lib/sqlalchemy/dialects/firebird/fdb.py
-lib/sqlalchemy/dialects/firebird/kinterbasdb.py
-lib/sqlalchemy/dialects/mssql/__init__.py
-lib/sqlalchemy/dialects/mssql/adodbapi.py
-lib/sqlalchemy/dialects/mssql/base.py
-lib/sqlalchemy/dialects/mssql/information_schema.py
-lib/sqlalchemy/dialects/mssql/mxodbc.py
-lib/sqlalchemy/dialects/mssql/pymssql.py
-lib/sqlalchemy/dialects/mssql/pyodbc.py
-lib/sqlalchemy/dialects/mssql/zxjdbc.py
-lib/sqlalchemy/dialects/mysql/__init__.py
-lib/sqlalchemy/dialects/mysql/base.py
-lib/sqlalchemy/dialects/mysql/cymysql.py
-lib/sqlalchemy/dialects/mysql/dml.py
-lib/sqlalchemy/dialects/mysql/enumerated.py
-lib/sqlalchemy/dialects/mysql/gaerdbms.py
-lib/sqlalchemy/dialects/mysql/json.py
-lib/sqlalchemy/dialects/mysql/mysqlconnector.py
-lib/sqlalchemy/dialects/mysql/mysqldb.py
-lib/sqlalchemy/dialects/mysql/oursql.py
-lib/sqlalchemy/dialects/mysql/pymysql.py
-lib/sqlalchemy/dialects/mysql/pyodbc.py
-lib/sqlalchemy/dialects/mysql/reflection.py
-lib/sqlalchemy/dialects/mysql/types.py
-lib/sqlalchemy/dialects/mysql/zxjdbc.py
-lib/sqlalchemy/dialects/oracle/__init__.py
-lib/sqlalchemy/dialects/oracle/base.py
-lib/sqlalchemy/dialects/oracle/cx_oracle.py
-lib/sqlalchemy/dialects/oracle/zxjdbc.py
-lib/sqlalchemy/dialects/postgresql/__init__.py
-lib/sqlalchemy/dialects/postgresql/array.py
-lib/sqlalchemy/dialects/postgresql/base.py
-lib/sqlalchemy/dialects/postgresql/dml.py
-lib/sqlalchemy/dialects/postgresql/ext.py
-lib/sqlalchemy/dialects/postgresql/hstore.py
-lib/sqlalchemy/dialects/postgresql/json.py
-lib/sqlalchemy/dialects/postgresql/pg8000.py
-lib/sqlalchemy/dialects/postgresql/psycopg2.py
-lib/sqlalchemy/dialects/postgresql/psycopg2cffi.py
-lib/sqlalchemy/dialects/postgresql/pygresql.py
-lib/sqlalchemy/dialects/postgresql/pypostgresql.py
-lib/sqlalchemy/dialects/postgresql/ranges.py
-lib/sqlalchemy/dialects/postgresql/zxjdbc.py
-lib/sqlalchemy/dialects/sqlite/__init__.py
-lib/sqlalchemy/dialects/sqlite/base.py
-lib/sqlalchemy/dialects/sqlite/pysqlcipher.py
-lib/sqlalchemy/dialects/sqlite/pysqlite.py
-lib/sqlalchemy/dialects/sybase/__init__.py
-lib/sqlalchemy/dialects/sybase/base.py
-lib/sqlalchemy/dialects/sybase/mxodbc.py
-lib/sqlalchemy/dialects/sybase/pyodbc.py
-lib/sqlalchemy/dialects/sybase/pysybase.py
-lib/sqlalchemy/engine/__init__.py
-lib/sqlalchemy/engine/base.py
-lib/sqlalchemy/engine/default.py
-lib/sqlalchemy/engine/interfaces.py
-lib/sqlalchemy/engine/reflection.py
-lib/sqlalchemy/engine/result.py
-lib/sqlalchemy/engine/strategies.py
-lib/sqlalchemy/engine/threadlocal.py
-lib/sqlalchemy/engine/url.py
-lib/sqlalchemy/engine/util.py
-lib/sqlalchemy/event/__init__.py
-lib/sqlalchemy/event/api.py
-lib/sqlalchemy/event/attr.py
-lib/sqlalchemy/event/base.py
-lib/sqlalchemy/event/legacy.py
-lib/sqlalchemy/event/registry.py
-lib/sqlalchemy/ext/__init__.py
-lib/sqlalchemy/ext/associationproxy.py
-lib/sqlalchemy/ext/automap.py
-lib/sqlalchemy/ext/baked.py
-lib/sqlalchemy/ext/compiler.py
-lib/sqlalchemy/ext/horizontal_shard.py
-lib/sqlalchemy/ext/hybrid.py
-lib/sqlalchemy/ext/indexable.py
-lib/sqlalchemy/ext/instrumentation.py
-lib/sqlalchemy/ext/mutable.py
-lib/sqlalchemy/ext/orderinglist.py
-lib/sqlalchemy/ext/serializer.py
-lib/sqlalchemy/ext/declarative/__init__.py
-lib/sqlalchemy/ext/declarative/api.py
-lib/sqlalchemy/ext/declarative/base.py
-lib/sqlalchemy/ext/declarative/clsregistry.py
-lib/sqlalchemy/orm/__init__.py
-lib/sqlalchemy/orm/attributes.py
-lib/sqlalchemy/orm/base.py
-lib/sqlalchemy/orm/collections.py
-lib/sqlalchemy/orm/dependency.py
-lib/sqlalchemy/orm/deprecated_interfaces.py
-lib/sqlalchemy/orm/descriptor_props.py
-lib/sqlalchemy/orm/dynamic.py
-lib/sqlalchemy/orm/evaluator.py
-lib/sqlalchemy/orm/events.py
-lib/sqlalchemy/orm/exc.py
-lib/sqlalchemy/orm/identity.py
-lib/sqlalchemy/orm/instrumentation.py
-lib/sqlalchemy/orm/interfaces.py
-lib/sqlalchemy/orm/loading.py
-lib/sqlalchemy/orm/mapper.py
-lib/sqlalchemy/orm/path_registry.py
-lib/sqlalchemy/orm/persistence.py
-lib/sqlalchemy/orm/properties.py
-lib/sqlalchemy/orm/query.py
-lib/sqlalchemy/orm/relationships.py
-lib/sqlalchemy/orm/scoping.py
-lib/sqlalchemy/orm/session.py
-lib/sqlalchemy/orm/state.py
-lib/sqlalchemy/orm/strategies.py
-lib/sqlalchemy/orm/strategy_options.py
-lib/sqlalchemy/orm/sync.py
-lib/sqlalchemy/orm/unitofwork.py
-lib/sqlalchemy/orm/util.py
-lib/sqlalchemy/sql/__init__.py
-lib/sqlalchemy/sql/annotation.py
-lib/sqlalchemy/sql/base.py
-lib/sqlalchemy/sql/compiler.py
-lib/sqlalchemy/sql/crud.py
-lib/sqlalchemy/sql/ddl.py
-lib/sqlalchemy/sql/default_comparator.py
-lib/sqlalchemy/sql/dml.py
-lib/sqlalchemy/sql/elements.py
-lib/sqlalchemy/sql/expression.py
-lib/sqlalchemy/sql/functions.py
-lib/sqlalchemy/sql/naming.py
-lib/sqlalchemy/sql/operators.py
-lib/sqlalchemy/sql/schema.py
-lib/sqlalchemy/sql/selectable.py
-lib/sqlalchemy/sql/sqltypes.py
-lib/sqlalchemy/sql/type_api.py
-lib/sqlalchemy/sql/util.py
-lib/sqlalchemy/sql/visitors.py
-lib/sqlalchemy/testing/__init__.py
-lib/sqlalchemy/testing/assertions.py
-lib/sqlalchemy/testing/assertsql.py
-lib/sqlalchemy/testing/config.py
-lib/sqlalchemy/testing/engines.py
-lib/sqlalchemy/testing/entities.py
-lib/sqlalchemy/testing/exclusions.py
-lib/sqlalchemy/testing/fixtures.py
-lib/sqlalchemy/testing/mock.py
-lib/sqlalchemy/testing/pickleable.py
-lib/sqlalchemy/testing/profiling.py
-lib/sqlalchemy/testing/provision.py
-lib/sqlalchemy/testing/replay_fixture.py
-lib/sqlalchemy/testing/requirements.py
-lib/sqlalchemy/testing/runner.py
-lib/sqlalchemy/testing/schema.py
-lib/sqlalchemy/testing/util.py
-lib/sqlalchemy/testing/warnings.py
-lib/sqlalchemy/testing/plugin/__init__.py
-lib/sqlalchemy/testing/plugin/bootstrap.py
-lib/sqlalchemy/testing/plugin/noseplugin.py
-lib/sqlalchemy/testing/plugin/plugin_base.py
-lib/sqlalchemy/testing/plugin/pytestplugin.py
-lib/sqlalchemy/testing/suite/__init__.py
-lib/sqlalchemy/testing/suite/test_ddl.py
-lib/sqlalchemy/testing/suite/test_dialect.py
-lib/sqlalchemy/testing/suite/test_insert.py
-lib/sqlalchemy/testing/suite/test_reflection.py
-lib/sqlalchemy/testing/suite/test_results.py
-lib/sqlalchemy/testing/suite/test_select.py
-lib/sqlalchemy/testing/suite/test_sequence.py
-lib/sqlalchemy/testing/suite/test_types.py
-lib/sqlalchemy/testing/suite/test_update_delete.py
-lib/sqlalchemy/util/__init__.py
-lib/sqlalchemy/util/_collections.py
-lib/sqlalchemy/util/compat.py
-lib/sqlalchemy/util/deprecations.py
-lib/sqlalchemy/util/langhelpers.py
-lib/sqlalchemy/util/queue.py
-lib/sqlalchemy/util/topological.py
-test/__init__.py
-test/binary_data_one.dat
-test/binary_data_two.dat
-test/conftest.py
-test/requirements.py
-test/aaa_profiling/__init__.py
-test/aaa_profiling/test_compiler.py
-test/aaa_profiling/test_memusage.py
-test/aaa_profiling/test_orm.py
-test/aaa_profiling/test_pool.py
-test/aaa_profiling/test_resultset.py
-test/aaa_profiling/test_zoomark.py
-test/aaa_profiling/test_zoomark_orm.py
-test/base/__init__.py
-test/base/test_dependency.py
-test/base/test_events.py
-test/base/test_except.py
-test/base/test_inspect.py
-test/base/test_tutorials.py
-test/base/test_utils.py
-test/dialect/__init__.py
-test/dialect/test_all.py
-test/dialect/test_firebird.py
-test/dialect/test_mxodbc.py
-test/dialect/test_pyodbc.py
-test/dialect/test_sqlite.py
-test/dialect/test_suite.py
-test/dialect/test_sybase.py
-test/dialect/mssql/__init__.py
-test/dialect/mssql/test_compiler.py
-test/dialect/mssql/test_engine.py
-test/dialect/mssql/test_query.py
-test/dialect/mssql/test_reflection.py
-test/dialect/mssql/test_types.py
-test/dialect/mysql/__init__.py
-test/dialect/mysql/test_compiler.py
-test/dialect/mysql/test_dialect.py
-test/dialect/mysql/test_on_duplicate.py
-test/dialect/mysql/test_query.py
-test/dialect/mysql/test_reflection.py
-test/dialect/mysql/test_types.py
-test/dialect/oracle/__init__.py
-test/dialect/oracle/test_compiler.py
-test/dialect/oracle/test_dialect.py
-test/dialect/oracle/test_reflection.py
-test/dialect/oracle/test_types.py
-test/dialect/postgresql/__init__.py
-test/dialect/postgresql/test_compiler.py
-test/dialect/postgresql/test_dialect.py
-test/dialect/postgresql/test_on_conflict.py
-test/dialect/postgresql/test_query.py
-test/dialect/postgresql/test_reflection.py
-test/dialect/postgresql/test_types.py
-test/engine/__init__.py
-test/engine/test_bind.py
-test/engine/test_ddlevents.py
-test/engine/test_execute.py
-test/engine/test_logging.py
-test/engine/test_parseconnect.py
-test/engine/test_pool.py
-test/engine/test_processors.py
-test/engine/test_reconnect.py
-test/engine/test_reflection.py
-test/engine/test_transaction.py
-test/ext/__init__.py
-test/ext/test_associationproxy.py
-test/ext/test_automap.py
-test/ext/test_baked.py
-test/ext/test_compiler.py
-test/ext/test_extendedattr.py
-test/ext/test_horizontal_shard.py
-test/ext/test_hybrid.py
-test/ext/test_indexable.py
-test/ext/test_mutable.py
-test/ext/test_orderinglist.py
-test/ext/test_serializer.py
-test/ext/declarative/__init__.py
-test/ext/declarative/test_basic.py
-test/ext/declarative/test_clsregistry.py
-test/ext/declarative/test_inheritance.py
-test/ext/declarative/test_mixin.py
-test/ext/declarative/test_reflection.py
-test/orm/__init__.py
-test/orm/_fixtures.py
-test/orm/test_association.py
-test/orm/test_assorted_eager.py
-test/orm/test_attributes.py
-test/orm/test_backref_mutations.py
-test/orm/test_bind.py
-test/orm/test_bulk.py
-test/orm/test_bundle.py
-test/orm/test_cascade.py
-test/orm/test_collection.py
-test/orm/test_compile.py
-test/orm/test_composites.py
-test/orm/test_cycles.py
-test/orm/test_default_strategies.py
-test/orm/test_defaults.py
-test/orm/test_deferred.py
-test/orm/test_deprecations.py
-test/orm/test_descriptor.py
-test/orm/test_dynamic.py
-test/orm/test_eager_relations.py
-test/orm/test_evaluator.py
-test/orm/test_events.py
-test/orm/test_expire.py
-test/orm/test_froms.py
-test/orm/test_generative.py
-test/orm/test_hasparent.py
-test/orm/test_immediate_load.py
-test/orm/test_inspect.py
-test/orm/test_instrumentation.py
-test/orm/test_joins.py
-test/orm/test_lazy_relations.py
-test/orm/test_load_on_fks.py
-test/orm/test_loading.py
-test/orm/test_lockmode.py
-test/orm/test_manytomany.py
-test/orm/test_mapper.py
-test/orm/test_merge.py
-test/orm/test_naturalpks.py
-test/orm/test_of_type.py
-test/orm/test_onetoone.py
-test/orm/test_options.py
-test/orm/test_pickled.py
-test/orm/test_query.py
-test/orm/test_rel_fn.py
-test/orm/test_relationships.py
-test/orm/test_scoping.py
-test/orm/test_selectable.py
-test/orm/test_selectin_relations.py
-test/orm/test_session.py
-test/orm/test_subquery_relations.py
-test/orm/test_sync.py
-test/orm/test_transaction.py
-test/orm/test_unitofwork.py
-test/orm/test_unitofworkv2.py
-test/orm/test_update_delete.py
-test/orm/test_utils.py
-test/orm/test_validators.py
-test/orm/test_versioning.py
-test/orm/inheritance/__init__.py
-test/orm/inheritance/_poly_fixtures.py
-test/orm/inheritance/test_abc_inheritance.py
-test/orm/inheritance/test_abc_polymorphic.py
-test/orm/inheritance/test_assorted_poly.py
-test/orm/inheritance/test_basic.py
-test/orm/inheritance/test_concrete.py
-test/orm/inheritance/test_magazine.py
-test/orm/inheritance/test_manytomany.py
-test/orm/inheritance/test_poly_linked_list.py
-test/orm/inheritance/test_poly_loading.py
-test/orm/inheritance/test_poly_persistence.py
-test/orm/inheritance/test_polymorphic_rel.py
-test/orm/inheritance/test_productspec.py
-test/orm/inheritance/test_relationship.py
-test/orm/inheritance/test_selects.py
-test/orm/inheritance/test_single.py
-test/orm/inheritance/test_with_poly.py
-test/perf/invalidate_stresstest.py
-test/perf/orm2010.py
-test/sql/__init__.py
-test/sql/test_case_statement.py
-test/sql/test_compiler.py
-test/sql/test_constraints.py
-test/sql/test_cte.py
-test/sql/test_ddlemit.py
-test/sql/test_defaults.py
-test/sql/test_delete.py
-test/sql/test_functions.py
-test/sql/test_generative.py
-test/sql/test_insert.py
-test/sql/test_insert_exec.py
-test/sql/test_inspect.py
-test/sql/test_join_rewriting.py
-test/sql/test_labels.py
-test/sql/test_lateral.py
-test/sql/test_metadata.py
-test/sql/test_operators.py
-test/sql/test_query.py
-test/sql/test_quote.py
-test/sql/test_resultset.py
-test/sql/test_returning.py
-test/sql/test_rowcount.py
-test/sql/test_selectable.py
-test/sql/test_tablesample.py
-test/sql/test_text.py
-test/sql/test_type_expressions.py
-test/sql/test_types.py
-test/sql/test_unicode.py
-test/sql/test_update.py
-test/sql/test_utils.py
\ No newline at end of file
diff --git a/venv/Lib/site-packages/SQLAlchemy-1.2.3-py3.6.egg-info/dependency_links.txt b/venv/Lib/site-packages/SQLAlchemy-1.2.3-py3.6.egg-info/dependency_links.txt
deleted file mode 100644
index 8b13789..0000000
--- a/venv/Lib/site-packages/SQLAlchemy-1.2.3-py3.6.egg-info/dependency_links.txt
+++ /dev/null
@@ -1 +0,0 @@
-
diff --git a/venv/Lib/site-packages/SQLAlchemy-1.2.3-py3.6.egg-info/installed-files.txt b/venv/Lib/site-packages/SQLAlchemy-1.2.3-py3.6.egg-info/installed-files.txt
deleted file mode 100644
index 0723748..0000000
--- a/venv/Lib/site-packages/SQLAlchemy-1.2.3-py3.6.egg-info/installed-files.txt
+++ /dev/null
@@ -1,385 +0,0 @@
-..\sqlalchemy\events.py
-..\sqlalchemy\exc.py
-..\sqlalchemy\inspection.py
-..\sqlalchemy\interfaces.py
-..\sqlalchemy\log.py
-..\sqlalchemy\pool.py
-..\sqlalchemy\processors.py
-..\sqlalchemy\schema.py
-..\sqlalchemy\types.py
-..\sqlalchemy\__init__.py
-..\sqlalchemy\connectors\mxodbc.py
-..\sqlalchemy\connectors\pyodbc.py
-..\sqlalchemy\connectors\zxJDBC.py
-..\sqlalchemy\connectors\__init__.py
-..\sqlalchemy\databases\__init__.py
-..\sqlalchemy\dialects\__init__.py
-..\sqlalchemy\engine\base.py
-..\sqlalchemy\engine\default.py
-..\sqlalchemy\engine\interfaces.py
-..\sqlalchemy\engine\reflection.py
-..\sqlalchemy\engine\result.py
-..\sqlalchemy\engine\strategies.py
-..\sqlalchemy\engine\threadlocal.py
-..\sqlalchemy\engine\url.py
-..\sqlalchemy\engine\util.py
-..\sqlalchemy\engine\__init__.py
-..\sqlalchemy\event\api.py
-..\sqlalchemy\event\attr.py
-..\sqlalchemy\event\base.py
-..\sqlalchemy\event\legacy.py
-..\sqlalchemy\event\registry.py
-..\sqlalchemy\event\__init__.py
-..\sqlalchemy\ext\associationproxy.py
-..\sqlalchemy\ext\automap.py
-..\sqlalchemy\ext\baked.py
-..\sqlalchemy\ext\compiler.py
-..\sqlalchemy\ext\horizontal_shard.py
-..\sqlalchemy\ext\hybrid.py
-..\sqlalchemy\ext\indexable.py
-..\sqlalchemy\ext\instrumentation.py
-..\sqlalchemy\ext\mutable.py
-..\sqlalchemy\ext\orderinglist.py
-..\sqlalchemy\ext\serializer.py
-..\sqlalchemy\ext\__init__.py
-..\sqlalchemy\orm\attributes.py
-..\sqlalchemy\orm\base.py
-..\sqlalchemy\orm\collections.py
-..\sqlalchemy\orm\dependency.py
-..\sqlalchemy\orm\deprecated_interfaces.py
-..\sqlalchemy\orm\descriptor_props.py
-..\sqlalchemy\orm\dynamic.py
-..\sqlalchemy\orm\evaluator.py
-..\sqlalchemy\orm\events.py
-..\sqlalchemy\orm\exc.py
-..\sqlalchemy\orm\identity.py
-..\sqlalchemy\orm\instrumentation.py
-..\sqlalchemy\orm\interfaces.py
-..\sqlalchemy\orm\loading.py
-..\sqlalchemy\orm\mapper.py
-..\sqlalchemy\orm\path_registry.py
-..\sqlalchemy\orm\persistence.py
-..\sqlalchemy\orm\properties.py
-..\sqlalchemy\orm\query.py
-..\sqlalchemy\orm\relationships.py
-..\sqlalchemy\orm\scoping.py
-..\sqlalchemy\orm\session.py
-..\sqlalchemy\orm\state.py
-..\sqlalchemy\orm\strategies.py
-..\sqlalchemy\orm\strategy_options.py
-..\sqlalchemy\orm\sync.py
-..\sqlalchemy\orm\unitofwork.py
-..\sqlalchemy\orm\util.py
-..\sqlalchemy\orm\__init__.py
-..\sqlalchemy\sql\annotation.py
-..\sqlalchemy\sql\base.py
-..\sqlalchemy\sql\compiler.py
-..\sqlalchemy\sql\crud.py
-..\sqlalchemy\sql\ddl.py
-..\sqlalchemy\sql\default_comparator.py
-..\sqlalchemy\sql\dml.py
-..\sqlalchemy\sql\elements.py
-..\sqlalchemy\sql\expression.py
-..\sqlalchemy\sql\functions.py
-..\sqlalchemy\sql\naming.py
-..\sqlalchemy\sql\operators.py
-..\sqlalchemy\sql\schema.py
-..\sqlalchemy\sql\selectable.py
-..\sqlalchemy\sql\sqltypes.py
-..\sqlalchemy\sql\type_api.py
-..\sqlalchemy\sql\util.py
-..\sqlalchemy\sql\visitors.py
-..\sqlalchemy\sql\__init__.py
-..\sqlalchemy\testing\assertions.py
-..\sqlalchemy\testing\assertsql.py
-..\sqlalchemy\testing\config.py
-..\sqlalchemy\testing\engines.py
-..\sqlalchemy\testing\entities.py
-..\sqlalchemy\testing\exclusions.py
-..\sqlalchemy\testing\fixtures.py
-..\sqlalchemy\testing\mock.py
-..\sqlalchemy\testing\pickleable.py
-..\sqlalchemy\testing\profiling.py
-..\sqlalchemy\testing\provision.py
-..\sqlalchemy\testing\replay_fixture.py
-..\sqlalchemy\testing\requirements.py
-..\sqlalchemy\testing\runner.py
-..\sqlalchemy\testing\schema.py
-..\sqlalchemy\testing\util.py
-..\sqlalchemy\testing\warnings.py
-..\sqlalchemy\testing\__init__.py
-..\sqlalchemy\util\compat.py
-..\sqlalchemy\util\deprecations.py
-..\sqlalchemy\util\langhelpers.py
-..\sqlalchemy\util\queue.py
-..\sqlalchemy\util\topological.py
-..\sqlalchemy\util\_collections.py
-..\sqlalchemy\util\__init__.py
-..\sqlalchemy\dialects\firebird\base.py
-..\sqlalchemy\dialects\firebird\fdb.py
-..\sqlalchemy\dialects\firebird\kinterbasdb.py
-..\sqlalchemy\dialects\firebird\__init__.py
-..\sqlalchemy\dialects\mssql\adodbapi.py
-..\sqlalchemy\dialects\mssql\base.py
-..\sqlalchemy\dialects\mssql\information_schema.py
-..\sqlalchemy\dialects\mssql\mxodbc.py
-..\sqlalchemy\dialects\mssql\pymssql.py
-..\sqlalchemy\dialects\mssql\pyodbc.py
-..\sqlalchemy\dialects\mssql\zxjdbc.py
-..\sqlalchemy\dialects\mssql\__init__.py
-..\sqlalchemy\dialects\mysql\base.py
-..\sqlalchemy\dialects\mysql\cymysql.py
-..\sqlalchemy\dialects\mysql\dml.py
-..\sqlalchemy\dialects\mysql\enumerated.py
-..\sqlalchemy\dialects\mysql\gaerdbms.py
-..\sqlalchemy\dialects\mysql\json.py
-..\sqlalchemy\dialects\mysql\mysqlconnector.py
-..\sqlalchemy\dialects\mysql\mysqldb.py
-..\sqlalchemy\dialects\mysql\oursql.py
-..\sqlalchemy\dialects\mysql\pymysql.py
-..\sqlalchemy\dialects\mysql\pyodbc.py
-..\sqlalchemy\dialects\mysql\reflection.py
-..\sqlalchemy\dialects\mysql\types.py
-..\sqlalchemy\dialects\mysql\zxjdbc.py
-..\sqlalchemy\dialects\mysql\__init__.py
-..\sqlalchemy\dialects\oracle\base.py
-..\sqlalchemy\dialects\oracle\cx_oracle.py
-..\sqlalchemy\dialects\oracle\zxjdbc.py
-..\sqlalchemy\dialects\oracle\__init__.py
-..\sqlalchemy\dialects\postgresql\array.py
-..\sqlalchemy\dialects\postgresql\base.py
-..\sqlalchemy\dialects\postgresql\dml.py
-..\sqlalchemy\dialects\postgresql\ext.py
-..\sqlalchemy\dialects\postgresql\hstore.py
-..\sqlalchemy\dialects\postgresql\json.py
-..\sqlalchemy\dialects\postgresql\pg8000.py
-..\sqlalchemy\dialects\postgresql\psycopg2.py
-..\sqlalchemy\dialects\postgresql\psycopg2cffi.py
-..\sqlalchemy\dialects\postgresql\pygresql.py
-..\sqlalchemy\dialects\postgresql\pypostgresql.py
-..\sqlalchemy\dialects\postgresql\ranges.py
-..\sqlalchemy\dialects\postgresql\zxjdbc.py
-..\sqlalchemy\dialects\postgresql\__init__.py
-..\sqlalchemy\dialects\sqlite\base.py
-..\sqlalchemy\dialects\sqlite\pysqlcipher.py
-..\sqlalchemy\dialects\sqlite\pysqlite.py
-..\sqlalchemy\dialects\sqlite\__init__.py
-..\sqlalchemy\dialects\sybase\base.py
-..\sqlalchemy\dialects\sybase\mxodbc.py
-..\sqlalchemy\dialects\sybase\pyodbc.py
-..\sqlalchemy\dialects\sybase\pysybase.py
-..\sqlalchemy\dialects\sybase\__init__.py
-..\sqlalchemy\ext\declarative\api.py
-..\sqlalchemy\ext\declarative\base.py
-..\sqlalchemy\ext\declarative\clsregistry.py
-..\sqlalchemy\ext\declarative\__init__.py
-..\sqlalchemy\testing\plugin\bootstrap.py
-..\sqlalchemy\testing\plugin\noseplugin.py
-..\sqlalchemy\testing\plugin\plugin_base.py
-..\sqlalchemy\testing\plugin\pytestplugin.py
-..\sqlalchemy\testing\plugin\__init__.py
-..\sqlalchemy\testing\suite\test_ddl.py
-..\sqlalchemy\testing\suite\test_dialect.py
-..\sqlalchemy\testing\suite\test_insert.py
-..\sqlalchemy\testing\suite\test_reflection.py
-..\sqlalchemy\testing\suite\test_results.py
-..\sqlalchemy\testing\suite\test_select.py
-..\sqlalchemy\testing\suite\test_sequence.py
-..\sqlalchemy\testing\suite\test_types.py
-..\sqlalchemy\testing\suite\test_update_delete.py
-..\sqlalchemy\testing\suite\__init__.py
-..\sqlalchemy\__pycache__\events.cpython-36.pyc
-..\sqlalchemy\__pycache__\exc.cpython-36.pyc
-..\sqlalchemy\__pycache__\inspection.cpython-36.pyc
-..\sqlalchemy\__pycache__\interfaces.cpython-36.pyc
-..\sqlalchemy\__pycache__\log.cpython-36.pyc
-..\sqlalchemy\__pycache__\pool.cpython-36.pyc
-..\sqlalchemy\__pycache__\processors.cpython-36.pyc
-..\sqlalchemy\__pycache__\schema.cpython-36.pyc
-..\sqlalchemy\__pycache__\types.cpython-36.pyc
-..\sqlalchemy\__pycache__\__init__.cpython-36.pyc
-..\sqlalchemy\connectors\__pycache__\mxodbc.cpython-36.pyc
-..\sqlalchemy\connectors\__pycache__\pyodbc.cpython-36.pyc
-..\sqlalchemy\connectors\__pycache__\zxJDBC.cpython-36.pyc
-..\sqlalchemy\connectors\__pycache__\__init__.cpython-36.pyc
-..\sqlalchemy\databases\__pycache__\__init__.cpython-36.pyc
-..\sqlalchemy\dialects\__pycache__\__init__.cpython-36.pyc
-..\sqlalchemy\engine\__pycache__\base.cpython-36.pyc
-..\sqlalchemy\engine\__pycache__\default.cpython-36.pyc
-..\sqlalchemy\engine\__pycache__\interfaces.cpython-36.pyc
-..\sqlalchemy\engine\__pycache__\reflection.cpython-36.pyc
-..\sqlalchemy\engine\__pycache__\result.cpython-36.pyc
-..\sqlalchemy\engine\__pycache__\strategies.cpython-36.pyc
-..\sqlalchemy\engine\__pycache__\threadlocal.cpython-36.pyc
-..\sqlalchemy\engine\__pycache__\url.cpython-36.pyc
-..\sqlalchemy\engine\__pycache__\util.cpython-36.pyc
-..\sqlalchemy\engine\__pycache__\__init__.cpython-36.pyc
-..\sqlalchemy\event\__pycache__\api.cpython-36.pyc
-..\sqlalchemy\event\__pycache__\attr.cpython-36.pyc
-..\sqlalchemy\event\__pycache__\base.cpython-36.pyc
-..\sqlalchemy\event\__pycache__\legacy.cpython-36.pyc
-..\sqlalchemy\event\__pycache__\registry.cpython-36.pyc
-..\sqlalchemy\event\__pycache__\__init__.cpython-36.pyc
-..\sqlalchemy\ext\__pycache__\associationproxy.cpython-36.pyc
-..\sqlalchemy\ext\__pycache__\automap.cpython-36.pyc
-..\sqlalchemy\ext\__pycache__\baked.cpython-36.pyc
-..\sqlalchemy\ext\__pycache__\compiler.cpython-36.pyc
-..\sqlalchemy\ext\__pycache__\horizontal_shard.cpython-36.pyc
-..\sqlalchemy\ext\__pycache__\hybrid.cpython-36.pyc
-..\sqlalchemy\ext\__pycache__\indexable.cpython-36.pyc
-..\sqlalchemy\ext\__pycache__\instrumentation.cpython-36.pyc
-..\sqlalchemy\ext\__pycache__\mutable.cpython-36.pyc
-..\sqlalchemy\ext\__pycache__\orderinglist.cpython-36.pyc
-..\sqlalchemy\ext\__pycache__\serializer.cpython-36.pyc
-..\sqlalchemy\ext\__pycache__\__init__.cpython-36.pyc
-..\sqlalchemy\orm\__pycache__\attributes.cpython-36.pyc
-..\sqlalchemy\orm\__pycache__\base.cpython-36.pyc
-..\sqlalchemy\orm\__pycache__\collections.cpython-36.pyc
-..\sqlalchemy\orm\__pycache__\dependency.cpython-36.pyc
-..\sqlalchemy\orm\__pycache__\deprecated_interfaces.cpython-36.pyc
-..\sqlalchemy\orm\__pycache__\descriptor_props.cpython-36.pyc
-..\sqlalchemy\orm\__pycache__\dynamic.cpython-36.pyc
-..\sqlalchemy\orm\__pycache__\evaluator.cpython-36.pyc
-..\sqlalchemy\orm\__pycache__\events.cpython-36.pyc
-..\sqlalchemy\orm\__pycache__\exc.cpython-36.pyc
-..\sqlalchemy\orm\__pycache__\identity.cpython-36.pyc
-..\sqlalchemy\orm\__pycache__\instrumentation.cpython-36.pyc
-..\sqlalchemy\orm\__pycache__\interfaces.cpython-36.pyc
-..\sqlalchemy\orm\__pycache__\loading.cpython-36.pyc
-..\sqlalchemy\orm\__pycache__\mapper.cpython-36.pyc
-..\sqlalchemy\orm\__pycache__\path_registry.cpython-36.pyc
-..\sqlalchemy\orm\__pycache__\persistence.cpython-36.pyc
-..\sqlalchemy\orm\__pycache__\properties.cpython-36.pyc
-..\sqlalchemy\orm\__pycache__\query.cpython-36.pyc
-..\sqlalchemy\orm\__pycache__\relationships.cpython-36.pyc
-..\sqlalchemy\orm\__pycache__\scoping.cpython-36.pyc
-..\sqlalchemy\orm\__pycache__\session.cpython-36.pyc
-..\sqlalchemy\orm\__pycache__\state.cpython-36.pyc
-..\sqlalchemy\orm\__pycache__\strategies.cpython-36.pyc
-..\sqlalchemy\orm\__pycache__\strategy_options.cpython-36.pyc
-..\sqlalchemy\orm\__pycache__\sync.cpython-36.pyc
-..\sqlalchemy\orm\__pycache__\unitofwork.cpython-36.pyc
-..\sqlalchemy\orm\__pycache__\util.cpython-36.pyc
-..\sqlalchemy\orm\__pycache__\__init__.cpython-36.pyc
-..\sqlalchemy\sql\__pycache__\annotation.cpython-36.pyc
-..\sqlalchemy\sql\__pycache__\base.cpython-36.pyc
-..\sqlalchemy\sql\__pycache__\compiler.cpython-36.pyc
-..\sqlalchemy\sql\__pycache__\crud.cpython-36.pyc
-..\sqlalchemy\sql\__pycache__\ddl.cpython-36.pyc
-..\sqlalchemy\sql\__pycache__\default_comparator.cpython-36.pyc
-..\sqlalchemy\sql\__pycache__\dml.cpython-36.pyc
-..\sqlalchemy\sql\__pycache__\elements.cpython-36.pyc
-..\sqlalchemy\sql\__pycache__\expression.cpython-36.pyc
-..\sqlalchemy\sql\__pycache__\functions.cpython-36.pyc
-..\sqlalchemy\sql\__pycache__\naming.cpython-36.pyc
-..\sqlalchemy\sql\__pycache__\operators.cpython-36.pyc
-..\sqlalchemy\sql\__pycache__\schema.cpython-36.pyc
-..\sqlalchemy\sql\__pycache__\selectable.cpython-36.pyc
-..\sqlalchemy\sql\__pycache__\sqltypes.cpython-36.pyc
-..\sqlalchemy\sql\__pycache__\type_api.cpython-36.pyc
-..\sqlalchemy\sql\__pycache__\util.cpython-36.pyc
-..\sqlalchemy\sql\__pycache__\visitors.cpython-36.pyc
-..\sqlalchemy\sql\__pycache__\__init__.cpython-36.pyc
-..\sqlalchemy\testing\__pycache__\assertions.cpython-36.pyc
-..\sqlalchemy\testing\__pycache__\assertsql.cpython-36.pyc
-..\sqlalchemy\testing\__pycache__\config.cpython-36.pyc
-..\sqlalchemy\testing\__pycache__\engines.cpython-36.pyc
-..\sqlalchemy\testing\__pycache__\entities.cpython-36.pyc
-..\sqlalchemy\testing\__pycache__\exclusions.cpython-36.pyc
-..\sqlalchemy\testing\__pycache__\fixtures.cpython-36.pyc
-..\sqlalchemy\testing\__pycache__\mock.cpython-36.pyc
-..\sqlalchemy\testing\__pycache__\pickleable.cpython-36.pyc
-..\sqlalchemy\testing\__pycache__\profiling.cpython-36.pyc
-..\sqlalchemy\testing\__pycache__\provision.cpython-36.pyc
-..\sqlalchemy\testing\__pycache__\replay_fixture.cpython-36.pyc
-..\sqlalchemy\testing\__pycache__\requirements.cpython-36.pyc
-..\sqlalchemy\testing\__pycache__\runner.cpython-36.pyc
-..\sqlalchemy\testing\__pycache__\schema.cpython-36.pyc
-..\sqlalchemy\testing\__pycache__\util.cpython-36.pyc
-..\sqlalchemy\testing\__pycache__\warnings.cpython-36.pyc
-..\sqlalchemy\testing\__pycache__\__init__.cpython-36.pyc
-..\sqlalchemy\util\__pycache__\compat.cpython-36.pyc
-..\sqlalchemy\util\__pycache__\deprecations.cpython-36.pyc
-..\sqlalchemy\util\__pycache__\langhelpers.cpython-36.pyc
-..\sqlalchemy\util\__pycache__\queue.cpython-36.pyc
-..\sqlalchemy\util\__pycache__\topological.cpython-36.pyc
-..\sqlalchemy\util\__pycache__\_collections.cpython-36.pyc
-..\sqlalchemy\util\__pycache__\__init__.cpython-36.pyc
-..\sqlalchemy\dialects\firebird\__pycache__\base.cpython-36.pyc
-..\sqlalchemy\dialects\firebird\__pycache__\fdb.cpython-36.pyc
-..\sqlalchemy\dialects\firebird\__pycache__\kinterbasdb.cpython-36.pyc
-..\sqlalchemy\dialects\firebird\__pycache__\__init__.cpython-36.pyc
-..\sqlalchemy\dialects\mssql\__pycache__\adodbapi.cpython-36.pyc
-..\sqlalchemy\dialects\mssql\__pycache__\base.cpython-36.pyc
-..\sqlalchemy\dialects\mssql\__pycache__\information_schema.cpython-36.pyc
-..\sqlalchemy\dialects\mssql\__pycache__\mxodbc.cpython-36.pyc
-..\sqlalchemy\dialects\mssql\__pycache__\pymssql.cpython-36.pyc
-..\sqlalchemy\dialects\mssql\__pycache__\pyodbc.cpython-36.pyc
-..\sqlalchemy\dialects\mssql\__pycache__\zxjdbc.cpython-36.pyc
-..\sqlalchemy\dialects\mssql\__pycache__\__init__.cpython-36.pyc
-..\sqlalchemy\dialects\mysql\__pycache__\base.cpython-36.pyc
-..\sqlalchemy\dialects\mysql\__pycache__\cymysql.cpython-36.pyc
-..\sqlalchemy\dialects\mysql\__pycache__\dml.cpython-36.pyc
-..\sqlalchemy\dialects\mysql\__pycache__\enumerated.cpython-36.pyc
-..\sqlalchemy\dialects\mysql\__pycache__\gaerdbms.cpython-36.pyc
-..\sqlalchemy\dialects\mysql\__pycache__\json.cpython-36.pyc
-..\sqlalchemy\dialects\mysql\__pycache__\mysqlconnector.cpython-36.pyc
-..\sqlalchemy\dialects\mysql\__pycache__\mysqldb.cpython-36.pyc
-..\sqlalchemy\dialects\mysql\__pycache__\oursql.cpython-36.pyc
-..\sqlalchemy\dialects\mysql\__pycache__\pymysql.cpython-36.pyc
-..\sqlalchemy\dialects\mysql\__pycache__\pyodbc.cpython-36.pyc
-..\sqlalchemy\dialects\mysql\__pycache__\reflection.cpython-36.pyc
-..\sqlalchemy\dialects\mysql\__pycache__\types.cpython-36.pyc
-..\sqlalchemy\dialects\mysql\__pycache__\zxjdbc.cpython-36.pyc
-..\sqlalchemy\dialects\mysql\__pycache__\__init__.cpython-36.pyc
-..\sqlalchemy\dialects\oracle\__pycache__\base.cpython-36.pyc
-..\sqlalchemy\dialects\oracle\__pycache__\cx_oracle.cpython-36.pyc
-..\sqlalchemy\dialects\oracle\__pycache__\zxjdbc.cpython-36.pyc
-..\sqlalchemy\dialects\oracle\__pycache__\__init__.cpython-36.pyc
-..\sqlalchemy\dialects\postgresql\__pycache__\array.cpython-36.pyc
-..\sqlalchemy\dialects\postgresql\__pycache__\base.cpython-36.pyc
-..\sqlalchemy\dialects\postgresql\__pycache__\dml.cpython-36.pyc
-..\sqlalchemy\dialects\postgresql\__pycache__\ext.cpython-36.pyc
-..\sqlalchemy\dialects\postgresql\__pycache__\hstore.cpython-36.pyc
-..\sqlalchemy\dialects\postgresql\__pycache__\json.cpython-36.pyc
-..\sqlalchemy\dialects\postgresql\__pycache__\pg8000.cpython-36.pyc
-..\sqlalchemy\dialects\postgresql\__pycache__\psycopg2.cpython-36.pyc
-..\sqlalchemy\dialects\postgresql\__pycache__\psycopg2cffi.cpython-36.pyc
-..\sqlalchemy\dialects\postgresql\__pycache__\pygresql.cpython-36.pyc
-..\sqlalchemy\dialects\postgresql\__pycache__\pypostgresql.cpython-36.pyc
-..\sqlalchemy\dialects\postgresql\__pycache__\ranges.cpython-36.pyc
-..\sqlalchemy\dialects\postgresql\__pycache__\zxjdbc.cpython-36.pyc
-..\sqlalchemy\dialects\postgresql\__pycache__\__init__.cpython-36.pyc
-..\sqlalchemy\dialects\sqlite\__pycache__\base.cpython-36.pyc
-..\sqlalchemy\dialects\sqlite\__pycache__\pysqlcipher.cpython-36.pyc
-..\sqlalchemy\dialects\sqlite\__pycache__\pysqlite.cpython-36.pyc
-..\sqlalchemy\dialects\sqlite\__pycache__\__init__.cpython-36.pyc
-..\sqlalchemy\dialects\sybase\__pycache__\base.cpython-36.pyc
-..\sqlalchemy\dialects\sybase\__pycache__\mxodbc.cpython-36.pyc
-..\sqlalchemy\dialects\sybase\__pycache__\pyodbc.cpython-36.pyc
-..\sqlalchemy\dialects\sybase\__pycache__\pysybase.cpython-36.pyc
-..\sqlalchemy\dialects\sybase\__pycache__\__init__.cpython-36.pyc
-..\sqlalchemy\ext\declarative\__pycache__\api.cpython-36.pyc
-..\sqlalchemy\ext\declarative\__pycache__\base.cpython-36.pyc
-..\sqlalchemy\ext\declarative\__pycache__\clsregistry.cpython-36.pyc
-..\sqlalchemy\ext\declarative\__pycache__\__init__.cpython-36.pyc
-..\sqlalchemy\testing\plugin\__pycache__\bootstrap.cpython-36.pyc
-..\sqlalchemy\testing\plugin\__pycache__\noseplugin.cpython-36.pyc
-..\sqlalchemy\testing\plugin\__pycache__\plugin_base.cpython-36.pyc
-..\sqlalchemy\testing\plugin\__pycache__\pytestplugin.cpython-36.pyc
-..\sqlalchemy\testing\plugin\__pycache__\__init__.cpython-36.pyc
-..\sqlalchemy\testing\suite\__pycache__\test_ddl.cpython-36.pyc
-..\sqlalchemy\testing\suite\__pycache__\test_dialect.cpython-36.pyc
-..\sqlalchemy\testing\suite\__pycache__\test_insert.cpython-36.pyc
-..\sqlalchemy\testing\suite\__pycache__\test_reflection.cpython-36.pyc
-..\sqlalchemy\testing\suite\__pycache__\test_results.cpython-36.pyc
-..\sqlalchemy\testing\suite\__pycache__\test_select.cpython-36.pyc
-..\sqlalchemy\testing\suite\__pycache__\test_sequence.cpython-36.pyc
-..\sqlalchemy\testing\suite\__pycache__\test_types.cpython-36.pyc
-..\sqlalchemy\testing\suite\__pycache__\test_update_delete.cpython-36.pyc
-..\sqlalchemy\testing\suite\__pycache__\__init__.cpython-36.pyc
-dependency_links.txt
-PKG-INFO
-requires.txt
-SOURCES.txt
-top_level.txt
diff --git a/venv/Lib/site-packages/SQLAlchemy-1.2.3-py3.6.egg-info/requires.txt b/venv/Lib/site-packages/SQLAlchemy-1.2.3-py3.6.egg-info/requires.txt
deleted file mode 100644
index f2a83ed..0000000
--- a/venv/Lib/site-packages/SQLAlchemy-1.2.3-py3.6.egg-info/requires.txt
+++ /dev/null
@@ -1,24 +0,0 @@
-
-[mssql_pymssql]
-pymssql
-
-[mssql_pyodbc]
-pyodbc
-
-[mysql]
-mysqlclient
-
-[oracle]
-cx_oracle
-
-[postgresql]
-psycopg2
-
-[postgresql_pg8000]
-pg8000
-
-[postgresql_psycopg2cffi]
-psycopg2cffi
-
-[pymysql]
-pymysql
diff --git a/venv/Lib/site-packages/SQLAlchemy-1.2.3-py3.6.egg-info/top_level.txt b/venv/Lib/site-packages/SQLAlchemy-1.2.3-py3.6.egg-info/top_level.txt
deleted file mode 100644
index 39fb2be..0000000
--- a/venv/Lib/site-packages/SQLAlchemy-1.2.3-py3.6.egg-info/top_level.txt
+++ /dev/null
@@ -1 +0,0 @@
-sqlalchemy
diff --git a/venv/Lib/site-packages/WTForms-2.1-py3.6.egg-info/PKG-INFO b/venv/Lib/site-packages/WTForms-2.1-py3.6.egg-info/PKG-INFO
deleted file mode 100644
index c5694fc..0000000
--- a/venv/Lib/site-packages/WTForms-2.1-py3.6.egg-info/PKG-INFO
+++ /dev/null
@@ -1,22 +0,0 @@
-Metadata-Version: 1.1
-Name: WTForms
-Version: 2.1
-Summary: A flexible forms validation and rendering library for python web development.
-Home-page: http://wtforms.simplecodes.com/
-Author: Thomas Johansson, James Crasta
-Author-email: wtforms@simplecodes.com
-License: BSD
-Description: UNKNOWN
-Platform: UNKNOWN
-Classifier: Development Status :: 5 - Production/Stable
-Classifier: Environment :: Web Environment
-Classifier: Intended Audience :: Developers
-Classifier: License :: OSI Approved :: BSD License
-Classifier: Operating System :: OS Independent
-Classifier: Programming Language :: Python
-Classifier: Programming Language :: Python :: 2.6
-Classifier: Programming Language :: Python :: 2.7
-Classifier: Programming Language :: Python :: 3
-Classifier: Programming Language :: Python :: 3.3
-Classifier: Topic :: Internet :: WWW/HTTP :: Dynamic Content
-Classifier: Topic :: Software Development :: Libraries :: Python Modules
diff --git a/venv/Lib/site-packages/WTForms-2.1-py3.6.egg-info/SOURCES.txt b/venv/Lib/site-packages/WTForms-2.1-py3.6.egg-info/SOURCES.txt
deleted file mode 100644
index a864290..0000000
--- a/venv/Lib/site-packages/WTForms-2.1-py3.6.egg-info/SOURCES.txt
+++ /dev/null
@@ -1,206 +0,0 @@
-AUTHORS.txt
-CHANGES.rst
-LICENSE.txt
-MANIFEST.in
-README.md
-setup.cfg
-setup.py
-WTForms.egg-info/PKG-INFO
-WTForms.egg-info/SOURCES.txt
-WTForms.egg-info/dependency_links.txt
-WTForms.egg-info/requires.txt
-WTForms.egg-info/top_level.txt
-docs/Makefile
-docs/changes.rst
-docs/conf.py
-docs/contributing.rst
-docs/crash_course.rst
-docs/csrf.rst
-docs/ext.rst
-docs/faq.rst
-docs/fields.rst
-docs/forms.rst
-docs/i18n.rst
-docs/index.rst
-docs/make.bat
-docs/meta.rst
-docs/specific_problems.rst
-docs/validators.rst
-docs/whats_new.rst
-docs/widgets.rst
-docs/_static/docstyles.css
-docs/_static/main.css
-docs/_static/wtforms.png
-docs/_templates/layout.html
-docs/html/.buildinfo
-docs/html/changes.html
-docs/html/contributing.html
-docs/html/crash_course.html
-docs/html/csrf.html
-docs/html/ext.html
-docs/html/faq.html
-docs/html/fields.html
-docs/html/forms.html
-docs/html/genindex.html
-docs/html/i18n.html
-docs/html/index.html
-docs/html/meta.html
-docs/html/objects.inv
-docs/html/py-modindex.html
-docs/html/search.html
-docs/html/searchindex.js
-docs/html/specific_problems.html
-docs/html/validators.html
-docs/html/whats_new.html
-docs/html/widgets.html
-docs/html/_sources/changes.txt
-docs/html/_sources/contributing.txt
-docs/html/_sources/crash_course.txt
-docs/html/_sources/csrf.txt
-docs/html/_sources/ext.txt
-docs/html/_sources/faq.txt
-docs/html/_sources/fields.txt
-docs/html/_sources/forms.txt
-docs/html/_sources/i18n.txt
-docs/html/_sources/index.txt
-docs/html/_sources/meta.txt
-docs/html/_sources/specific_problems.txt
-docs/html/_sources/validators.txt
-docs/html/_sources/whats_new.txt
-docs/html/_sources/widgets.txt
-docs/html/_static/ajax-loader.gif
-docs/html/_static/alabaster.css
-docs/html/_static/basic.css
-docs/html/_static/comment-bright.png
-docs/html/_static/comment-close.png
-docs/html/_static/comment.png
-docs/html/_static/docstyles.css
-docs/html/_static/doctools.js
-docs/html/_static/down-pressed.png
-docs/html/_static/down.png
-docs/html/_static/file.png
-docs/html/_static/jquery-1.11.1.js
-docs/html/_static/jquery.js
-docs/html/_static/main.css
-docs/html/_static/minus.png
-docs/html/_static/plus.png
-docs/html/_static/pygments.css
-docs/html/_static/searchtools.js
-docs/html/_static/underscore-1.3.1.js
-docs/html/_static/underscore.js
-docs/html/_static/up-pressed.png
-docs/html/_static/up.png
-docs/html/_static/websupport.js
-docs/html/_static/wtforms.png
-tests/__init__.py
-tests/common.py
-tests/csrf.py
-tests/ext_csrf.py
-tests/ext_dateutil.py
-tests/ext_sqlalchemy.py
-tests/fields.py
-tests/form.py
-tests/i18n.py
-tests/locale_babel.py
-tests/runtests.py
-tests/test_requirements.txt
-tests/validators.py
-tests/webob_wrapper.py
-tests/widgets.py
-tests/ext_appengine/__init__.py
-tests/ext_appengine/app.yaml
-tests/ext_appengine/gaetest_common.py
-tests/ext_appengine/test_ndb.py
-tests/ext_appengine/tests.py
-tests/ext_django/__init__.py
-tests/ext_django/models.py
-tests/ext_django/tests.py
-tests/ext_django/fixtures/ext_django.json
-wtforms/__init__.py
-wtforms/compat.py
-wtforms/form.py
-wtforms/i18n.py
-wtforms/meta.py
-wtforms/utils.py
-wtforms/validators.py
-wtforms/csrf/__init__.py
-wtforms/csrf/core.py
-wtforms/csrf/session.py
-wtforms/ext/__init__.py
-wtforms/ext/appengine/__init__.py
-wtforms/ext/appengine/db.py
-wtforms/ext/appengine/fields.py
-wtforms/ext/appengine/ndb.py
-wtforms/ext/csrf/__init__.py
-wtforms/ext/csrf/fields.py
-wtforms/ext/csrf/form.py
-wtforms/ext/csrf/session.py
-wtforms/ext/dateutil/__init__.py
-wtforms/ext/dateutil/fields.py
-wtforms/ext/django/__init__.py
-wtforms/ext/django/fields.py
-wtforms/ext/django/i18n.py
-wtforms/ext/django/orm.py
-wtforms/ext/django/templatetags/__init__.py
-wtforms/ext/django/templatetags/wtforms.py
-wtforms/ext/i18n/__init__.py
-wtforms/ext/i18n/form.py
-wtforms/ext/i18n/utils.py
-wtforms/ext/sqlalchemy/__init__.py
-wtforms/ext/sqlalchemy/fields.py
-wtforms/ext/sqlalchemy/orm.py
-wtforms/fields/__init__.py
-wtforms/fields/core.py
-wtforms/fields/html5.py
-wtforms/fields/simple.py
-wtforms/locale/README.md
-wtforms/locale/wtforms.pot
-wtforms/locale/ar/LC_MESSAGES/wtforms.mo
-wtforms/locale/ar/LC_MESSAGES/wtforms.po
-wtforms/locale/ca/LC_MESSAGES/wtforms.mo
-wtforms/locale/ca/LC_MESSAGES/wtforms.po
-wtforms/locale/cs_CZ/LC_MESSAGES/wtforms.mo
-wtforms/locale/cs_CZ/LC_MESSAGES/wtforms.po
-wtforms/locale/cy/LC_MESSAGES/wtforms.mo
-wtforms/locale/cy/LC_MESSAGES/wtforms.po
-wtforms/locale/de/LC_MESSAGES/wtforms.mo
-wtforms/locale/de/LC_MESSAGES/wtforms.po
-wtforms/locale/de_CH/LC_MESSAGES/wtforms.mo
-wtforms/locale/de_CH/LC_MESSAGES/wtforms.po
-wtforms/locale/el/LC_MESSAGES/wtforms.mo
-wtforms/locale/el/LC_MESSAGES/wtforms.po
-wtforms/locale/en/LC_MESSAGES/wtforms.mo
-wtforms/locale/en/LC_MESSAGES/wtforms.po
-wtforms/locale/es/LC_MESSAGES/wtforms.mo
-wtforms/locale/es/LC_MESSAGES/wtforms.po
-wtforms/locale/et/LC_MESSAGES/wtforms.mo
-wtforms/locale/et/LC_MESSAGES/wtforms.po
-wtforms/locale/fa/LC_MESSAGES/wtforms.mo
-wtforms/locale/fa/LC_MESSAGES/wtforms.po
-wtforms/locale/fr/LC_MESSAGES/wtforms.mo
-wtforms/locale/fr/LC_MESSAGES/wtforms.po
-wtforms/locale/it/LC_MESSAGES/wtforms.mo
-wtforms/locale/it/LC_MESSAGES/wtforms.po
-wtforms/locale/ja/LC_MESSAGES/wtforms.mo
-wtforms/locale/ja/LC_MESSAGES/wtforms.po
-wtforms/locale/ko/LC_MESSAGES/wtforms.mo
-wtforms/locale/ko/LC_MESSAGES/wtforms.po
-wtforms/locale/nb/LC_MESSAGES/wtforms.mo
-wtforms/locale/nb/LC_MESSAGES/wtforms.po
-wtforms/locale/nl/LC_MESSAGES/wtforms.mo
-wtforms/locale/nl/LC_MESSAGES/wtforms.po
-wtforms/locale/pl/LC_MESSAGES/wtforms.mo
-wtforms/locale/pl/LC_MESSAGES/wtforms.po
-wtforms/locale/pt/LC_MESSAGES/wtforms.mo
-wtforms/locale/pt/LC_MESSAGES/wtforms.po
-wtforms/locale/ru/LC_MESSAGES/wtforms.mo
-wtforms/locale/ru/LC_MESSAGES/wtforms.po
-wtforms/locale/uk/LC_MESSAGES/wtforms.mo
-wtforms/locale/uk/LC_MESSAGES/wtforms.po
-wtforms/locale/zh/LC_MESSAGES/wtforms.mo
-wtforms/locale/zh/LC_MESSAGES/wtforms.po
-wtforms/locale/zh_TW/LC_MESSAGES/wtforms.mo
-wtforms/locale/zh_TW/LC_MESSAGES/wtforms.po
-wtforms/widgets/__init__.py
-wtforms/widgets/core.py
-wtforms/widgets/html5.py
\ No newline at end of file
diff --git a/venv/Lib/site-packages/WTForms-2.1-py3.6.egg-info/dependency_links.txt b/venv/Lib/site-packages/WTForms-2.1-py3.6.egg-info/dependency_links.txt
deleted file mode 100644
index 8b13789..0000000
--- a/venv/Lib/site-packages/WTForms-2.1-py3.6.egg-info/dependency_links.txt
+++ /dev/null
@@ -1 +0,0 @@
-
diff --git a/venv/Lib/site-packages/WTForms-2.1-py3.6.egg-info/installed-files.txt b/venv/Lib/site-packages/WTForms-2.1-py3.6.egg-info/installed-files.txt
deleted file mode 100644
index 262d85b..0000000
--- a/venv/Lib/site-packages/WTForms-2.1-py3.6.egg-info/installed-files.txt
+++ /dev/null
@@ -1,132 +0,0 @@
-..\wtforms\compat.py
-..\wtforms\form.py
-..\wtforms\i18n.py
-..\wtforms\meta.py
-..\wtforms\utils.py
-..\wtforms\validators.py
-..\wtforms\__init__.py
-..\wtforms\csrf\core.py
-..\wtforms\csrf\session.py
-..\wtforms\csrf\__init__.py
-..\wtforms\fields\core.py
-..\wtforms\fields\html5.py
-..\wtforms\fields\simple.py
-..\wtforms\fields\__init__.py
-..\wtforms\widgets\core.py
-..\wtforms\widgets\html5.py
-..\wtforms\widgets\__init__.py
-..\wtforms\ext\__init__.py
-..\wtforms\ext\appengine\db.py
-..\wtforms\ext\appengine\fields.py
-..\wtforms\ext\appengine\ndb.py
-..\wtforms\ext\appengine\__init__.py
-..\wtforms\ext\csrf\fields.py
-..\wtforms\ext\csrf\form.py
-..\wtforms\ext\csrf\session.py
-..\wtforms\ext\csrf\__init__.py
-..\wtforms\ext\dateutil\fields.py
-..\wtforms\ext\dateutil\__init__.py
-..\wtforms\ext\django\fields.py
-..\wtforms\ext\django\i18n.py
-..\wtforms\ext\django\orm.py
-..\wtforms\ext\django\__init__.py
-..\wtforms\ext\django\templatetags\wtforms.py
-..\wtforms\ext\django\templatetags\__init__.py
-..\wtforms\ext\i18n\form.py
-..\wtforms\ext\i18n\utils.py
-..\wtforms\ext\i18n\__init__.py
-..\wtforms\ext\sqlalchemy\fields.py
-..\wtforms\ext\sqlalchemy\orm.py
-..\wtforms\ext\sqlalchemy\__init__.py
-..\wtforms\locale\wtforms.pot
-..\wtforms\locale\ar\LC_MESSAGES\wtforms.mo
-..\wtforms\locale\ar\LC_MESSAGES\wtforms.po
-..\wtforms\locale\ca\LC_MESSAGES\wtforms.mo
-..\wtforms\locale\ca\LC_MESSAGES\wtforms.po
-..\wtforms\locale\cs_CZ\LC_MESSAGES\wtforms.mo
-..\wtforms\locale\cs_CZ\LC_MESSAGES\wtforms.po
-..\wtforms\locale\cy\LC_MESSAGES\wtforms.mo
-..\wtforms\locale\cy\LC_MESSAGES\wtforms.po
-..\wtforms\locale\de\LC_MESSAGES\wtforms.mo
-..\wtforms\locale\de\LC_MESSAGES\wtforms.po
-..\wtforms\locale\de_CH\LC_MESSAGES\wtforms.mo
-..\wtforms\locale\de_CH\LC_MESSAGES\wtforms.po
-..\wtforms\locale\el\LC_MESSAGES\wtforms.mo
-..\wtforms\locale\el\LC_MESSAGES\wtforms.po
-..\wtforms\locale\en\LC_MESSAGES\wtforms.mo
-..\wtforms\locale\en\LC_MESSAGES\wtforms.po
-..\wtforms\locale\es\LC_MESSAGES\wtforms.mo
-..\wtforms\locale\es\LC_MESSAGES\wtforms.po
-..\wtforms\locale\et\LC_MESSAGES\wtforms.mo
-..\wtforms\locale\et\LC_MESSAGES\wtforms.po
-..\wtforms\locale\fa\LC_MESSAGES\wtforms.mo
-..\wtforms\locale\fa\LC_MESSAGES\wtforms.po
-..\wtforms\locale\fr\LC_MESSAGES\wtforms.mo
-..\wtforms\locale\fr\LC_MESSAGES\wtforms.po
-..\wtforms\locale\it\LC_MESSAGES\wtforms.mo
-..\wtforms\locale\it\LC_MESSAGES\wtforms.po
-..\wtforms\locale\ja\LC_MESSAGES\wtforms.mo
-..\wtforms\locale\ja\LC_MESSAGES\wtforms.po
-..\wtforms\locale\ko\LC_MESSAGES\wtforms.mo
-..\wtforms\locale\ko\LC_MESSAGES\wtforms.po
-..\wtforms\locale\nb\LC_MESSAGES\wtforms.mo
-..\wtforms\locale\nb\LC_MESSAGES\wtforms.po
-..\wtforms\locale\nl\LC_MESSAGES\wtforms.mo
-..\wtforms\locale\nl\LC_MESSAGES\wtforms.po
-..\wtforms\locale\pl\LC_MESSAGES\wtforms.mo
-..\wtforms\locale\pl\LC_MESSAGES\wtforms.po
-..\wtforms\locale\pt\LC_MESSAGES\wtforms.mo
-..\wtforms\locale\pt\LC_MESSAGES\wtforms.po
-..\wtforms\locale\ru\LC_MESSAGES\wtforms.mo
-..\wtforms\locale\ru\LC_MESSAGES\wtforms.po
-..\wtforms\locale\uk\LC_MESSAGES\wtforms.mo
-..\wtforms\locale\uk\LC_MESSAGES\wtforms.po
-..\wtforms\locale\zh\LC_MESSAGES\wtforms.mo
-..\wtforms\locale\zh\LC_MESSAGES\wtforms.po
-..\wtforms\locale\zh_TW\LC_MESSAGES\wtforms.mo
-..\wtforms\locale\zh_TW\LC_MESSAGES\wtforms.po
-..\wtforms\__pycache__\compat.cpython-36.pyc
-..\wtforms\__pycache__\form.cpython-36.pyc
-..\wtforms\__pycache__\i18n.cpython-36.pyc
-..\wtforms\__pycache__\meta.cpython-36.pyc
-..\wtforms\__pycache__\utils.cpython-36.pyc
-..\wtforms\__pycache__\validators.cpython-36.pyc
-..\wtforms\__pycache__\__init__.cpython-36.pyc
-..\wtforms\csrf\__pycache__\core.cpython-36.pyc
-..\wtforms\csrf\__pycache__\session.cpython-36.pyc
-..\wtforms\csrf\__pycache__\__init__.cpython-36.pyc
-..\wtforms\fields\__pycache__\core.cpython-36.pyc
-..\wtforms\fields\__pycache__\html5.cpython-36.pyc
-..\wtforms\fields\__pycache__\simple.cpython-36.pyc
-..\wtforms\fields\__pycache__\__init__.cpython-36.pyc
-..\wtforms\widgets\__pycache__\core.cpython-36.pyc
-..\wtforms\widgets\__pycache__\html5.cpython-36.pyc
-..\wtforms\widgets\__pycache__\__init__.cpython-36.pyc
-..\wtforms\ext\__pycache__\__init__.cpython-36.pyc
-..\wtforms\ext\appengine\__pycache__\db.cpython-36.pyc
-..\wtforms\ext\appengine\__pycache__\fields.cpython-36.pyc
-..\wtforms\ext\appengine\__pycache__\ndb.cpython-36.pyc
-..\wtforms\ext\appengine\__pycache__\__init__.cpython-36.pyc
-..\wtforms\ext\csrf\__pycache__\fields.cpython-36.pyc
-..\wtforms\ext\csrf\__pycache__\form.cpython-36.pyc
-..\wtforms\ext\csrf\__pycache__\session.cpython-36.pyc
-..\wtforms\ext\csrf\__pycache__\__init__.cpython-36.pyc
-..\wtforms\ext\dateutil\__pycache__\fields.cpython-36.pyc
-..\wtforms\ext\dateutil\__pycache__\__init__.cpython-36.pyc
-..\wtforms\ext\django\__pycache__\fields.cpython-36.pyc
-..\wtforms\ext\django\__pycache__\i18n.cpython-36.pyc
-..\wtforms\ext\django\__pycache__\orm.cpython-36.pyc
-..\wtforms\ext\django\__pycache__\__init__.cpython-36.pyc
-..\wtforms\ext\django\templatetags\__pycache__\wtforms.cpython-36.pyc
-..\wtforms\ext\django\templatetags\__pycache__\__init__.cpython-36.pyc
-..\wtforms\ext\i18n\__pycache__\form.cpython-36.pyc
-..\wtforms\ext\i18n\__pycache__\utils.cpython-36.pyc
-..\wtforms\ext\i18n\__pycache__\__init__.cpython-36.pyc
-..\wtforms\ext\sqlalchemy\__pycache__\fields.cpython-36.pyc
-..\wtforms\ext\sqlalchemy\__pycache__\orm.cpython-36.pyc
-..\wtforms\ext\sqlalchemy\__pycache__\__init__.cpython-36.pyc
-dependency_links.txt
-PKG-INFO
-requires.txt
-SOURCES.txt
-top_level.txt
diff --git a/venv/Lib/site-packages/WTForms-2.1-py3.6.egg-info/requires.txt b/venv/Lib/site-packages/WTForms-2.1-py3.6.egg-info/requires.txt
deleted file mode 100644
index b937ec2..0000000
--- a/venv/Lib/site-packages/WTForms-2.1-py3.6.egg-info/requires.txt
+++ /dev/null
@@ -1,6 +0,0 @@
-
-[:python_version=="2.6"]
-ordereddict
-
-[Locale]
-Babel>=1.3
diff --git a/venv/Lib/site-packages/WTForms-2.1-py3.6.egg-info/top_level.txt b/venv/Lib/site-packages/WTForms-2.1-py3.6.egg-info/top_level.txt
deleted file mode 100644
index 26d80fd..0000000
--- a/venv/Lib/site-packages/WTForms-2.1-py3.6.egg-info/top_level.txt
+++ /dev/null
@@ -1 +0,0 @@
-wtforms
diff --git a/venv/Lib/site-packages/Werkzeug-0.14.1.dist-info/DESCRIPTION.rst b/venv/Lib/site-packages/Werkzeug-0.14.1.dist-info/DESCRIPTION.rst
deleted file mode 100644
index 675f08d..0000000
--- a/venv/Lib/site-packages/Werkzeug-0.14.1.dist-info/DESCRIPTION.rst
+++ /dev/null
@@ -1,80 +0,0 @@
-Werkzeug
-========
-
-Werkzeug is a comprehensive `WSGI`_ web application library. It began as
-a simple collection of various utilities for WSGI applications and has
-become one of the most advanced WSGI utility libraries.
-
-It includes:
-
-* An interactive debugger that allows inspecting stack traces and source
- code in the browser with an interactive interpreter for any frame in
- the stack.
-* A full-featured request object with objects to interact with headers,
- query args, form data, files, and cookies.
-* A response object that can wrap other WSGI applications and handle
- streaming data.
-* A routing system for matching URLs to endpoints and generating URLs
- for endpoints, with an extensible system for capturing variables from
- URLs.
-* HTTP utilities to handle entity tags, cache control, dates, user
- agents, cookies, files, and more.
-* A threaded WSGI server for use while developing applications locally.
-* A test client for simulating HTTP requests during testing without
- requiring running a server.
-
-Werkzeug is Unicode aware and doesn't enforce any dependencies. It is up
-to the developer to choose a template engine, database adapter, and even
-how to handle requests. It can be used to build all sorts of end user
-applications such as blogs, wikis, or bulletin boards.
-
-`Flask`_ wraps Werkzeug, using it to handle the details of WSGI while
-providing more structure and patterns for defining powerful
-applications.
-
-
-Installing
-----------
-
-Install and update using `pip`_:
-
-.. code-block:: text
-
- pip install -U Werkzeug
-
-
-A Simple Example
-----------------
-
-.. code-block:: python
-
- from werkzeug.wrappers import Request, Response
-
- @Request.application
- def application(request):
- return Response('Hello, World!')
-
- if __name__ == '__main__':
- from werkzeug.serving import run_simple
- run_simple('localhost', 4000, application)
-
-
-Links
------
-
-* Website: https://www.palletsprojects.com/p/werkzeug/
-* Releases: https://pypi.org/project/Werkzeug/
-* Code: https://github.com/pallets/werkzeug
-* Issue tracker: https://github.com/pallets/werkzeug/issues
-* Test status:
-
- * Linux, Mac: https://travis-ci.org/pallets/werkzeug
- * Windows: https://ci.appveyor.com/project/davidism/werkzeug
-
-* Test coverage: https://codecov.io/gh/pallets/werkzeug
-
-.. _WSGI: https://wsgi.readthedocs.io/en/latest/
-.. _Flask: https://www.palletsprojects.com/p/flask/
-.. _pip: https://pip.pypa.io/en/stable/quickstart/
-
-
diff --git a/venv/Lib/site-packages/Werkzeug-0.14.1.dist-info/INSTALLER b/venv/Lib/site-packages/Werkzeug-0.14.1.dist-info/INSTALLER
deleted file mode 100644
index a1b589e..0000000
--- a/venv/Lib/site-packages/Werkzeug-0.14.1.dist-info/INSTALLER
+++ /dev/null
@@ -1 +0,0 @@
-pip
diff --git a/venv/Lib/site-packages/Werkzeug-0.14.1.dist-info/LICENSE.txt b/venv/Lib/site-packages/Werkzeug-0.14.1.dist-info/LICENSE.txt
deleted file mode 100644
index 1cc75bb..0000000
--- a/venv/Lib/site-packages/Werkzeug-0.14.1.dist-info/LICENSE.txt
+++ /dev/null
@@ -1,31 +0,0 @@
-Copyright © 2007 by the Pallets team.
-
-Some rights reserved.
-
-Redistribution and use in source and binary forms, with or without
-modification, are permitted provided that the following conditions are
-met:
-
-* Redistributions of source code must retain the above copyright notice,
- this list of conditions and the following disclaimer.
-
-* Redistributions in binary form must reproduce the above copyright
- notice, this list of conditions and the following disclaimer in the
- documentation and/or other materials provided with the distribution.
-
-* Neither the name of the copyright holder nor the names of its
- contributors may be used to endorse or promote products derived from
- this software without specific prior written permission.
-
-THIS SOFTWARE AND DOCUMENTATION IS PROVIDED BY THE COPYRIGHT HOLDERS AND
-CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING,
-BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND
-FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
-COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
-INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
-NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF
-USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
-ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
-THIS SOFTWARE AND DOCUMENTATION, EVEN IF ADVISED OF THE POSSIBILITY OF
-SUCH DAMAGE.
diff --git a/venv/Lib/site-packages/Werkzeug-0.14.1.dist-info/METADATA b/venv/Lib/site-packages/Werkzeug-0.14.1.dist-info/METADATA
deleted file mode 100644
index bfc3c4e..0000000
--- a/venv/Lib/site-packages/Werkzeug-0.14.1.dist-info/METADATA
+++ /dev/null
@@ -1,116 +0,0 @@
-Metadata-Version: 2.0
-Name: Werkzeug
-Version: 0.14.1
-Summary: The comprehensive WSGI web application library.
-Home-page: https://www.palletsprojects.org/p/werkzeug/
-Author: Armin Ronacher
-Author-email: armin.ronacher@active-4.com
-License: BSD
-Description-Content-Type: UNKNOWN
-Platform: any
-Classifier: Development Status :: 5 - Production/Stable
-Classifier: Environment :: Web Environment
-Classifier: Intended Audience :: Developers
-Classifier: License :: OSI Approved :: BSD License
-Classifier: Operating System :: OS Independent
-Classifier: Programming Language :: Python
-Classifier: Programming Language :: Python :: 2
-Classifier: Programming Language :: Python :: 2.6
-Classifier: Programming Language :: Python :: 2.7
-Classifier: Programming Language :: Python :: 3
-Classifier: Programming Language :: Python :: 3.3
-Classifier: Programming Language :: Python :: 3.4
-Classifier: Programming Language :: Python :: 3.5
-Classifier: Programming Language :: Python :: 3.6
-Classifier: Topic :: Internet :: WWW/HTTP :: Dynamic Content
-Classifier: Topic :: Software Development :: Libraries :: Python Modules
-Provides-Extra: dev
-Requires-Dist: coverage; extra == 'dev'
-Requires-Dist: pytest; extra == 'dev'
-Requires-Dist: sphinx; extra == 'dev'
-Requires-Dist: tox; extra == 'dev'
-Provides-Extra: termcolor
-Requires-Dist: termcolor; extra == 'termcolor'
-Provides-Extra: watchdog
-Requires-Dist: watchdog; extra == 'watchdog'
-
-Werkzeug
-========
-
-Werkzeug is a comprehensive `WSGI`_ web application library. It began as
-a simple collection of various utilities for WSGI applications and has
-become one of the most advanced WSGI utility libraries.
-
-It includes:
-
-* An interactive debugger that allows inspecting stack traces and source
- code in the browser with an interactive interpreter for any frame in
- the stack.
-* A full-featured request object with objects to interact with headers,
- query args, form data, files, and cookies.
-* A response object that can wrap other WSGI applications and handle
- streaming data.
-* A routing system for matching URLs to endpoints and generating URLs
- for endpoints, with an extensible system for capturing variables from
- URLs.
-* HTTP utilities to handle entity tags, cache control, dates, user
- agents, cookies, files, and more.
-* A threaded WSGI server for use while developing applications locally.
-* A test client for simulating HTTP requests during testing without
- requiring running a server.
-
-Werkzeug is Unicode aware and doesn't enforce any dependencies. It is up
-to the developer to choose a template engine, database adapter, and even
-how to handle requests. It can be used to build all sorts of end user
-applications such as blogs, wikis, or bulletin boards.
-
-`Flask`_ wraps Werkzeug, using it to handle the details of WSGI while
-providing more structure and patterns for defining powerful
-applications.
-
-
-Installing
-----------
-
-Install and update using `pip`_:
-
-.. code-block:: text
-
- pip install -U Werkzeug
-
-
-A Simple Example
-----------------
-
-.. code-block:: python
-
- from werkzeug.wrappers import Request, Response
-
- @Request.application
- def application(request):
- return Response('Hello, World!')
-
- if __name__ == '__main__':
- from werkzeug.serving import run_simple
- run_simple('localhost', 4000, application)
-
-
-Links
------
-
-* Website: https://www.palletsprojects.com/p/werkzeug/
-* Releases: https://pypi.org/project/Werkzeug/
-* Code: https://github.com/pallets/werkzeug
-* Issue tracker: https://github.com/pallets/werkzeug/issues
-* Test status:
-
- * Linux, Mac: https://travis-ci.org/pallets/werkzeug
- * Windows: https://ci.appveyor.com/project/davidism/werkzeug
-
-* Test coverage: https://codecov.io/gh/pallets/werkzeug
-
-.. _WSGI: https://wsgi.readthedocs.io/en/latest/
-.. _Flask: https://www.palletsprojects.com/p/flask/
-.. _pip: https://pip.pypa.io/en/stable/quickstart/
-
-
diff --git a/venv/Lib/site-packages/Werkzeug-0.14.1.dist-info/RECORD b/venv/Lib/site-packages/Werkzeug-0.14.1.dist-info/RECORD
deleted file mode 100644
index 357d9b7..0000000
--- a/venv/Lib/site-packages/Werkzeug-0.14.1.dist-info/RECORD
+++ /dev/null
@@ -1,97 +0,0 @@
-Werkzeug-0.14.1.dist-info/DESCRIPTION.rst,sha256=rOCN36jwsWtWsTpqPG96z7FMilB5qI1CIARSKRuUmz8,2452
-Werkzeug-0.14.1.dist-info/LICENSE.txt,sha256=xndz_dD4m269AF9l_Xbl5V3tM1N3C1LoZC2PEPxWO-8,1534
-Werkzeug-0.14.1.dist-info/METADATA,sha256=FbfadrPdJNUWAxMOKxGUtHe5R3IDSBKYYmAz3FvI3uY,3872
-Werkzeug-0.14.1.dist-info/RECORD,,
-Werkzeug-0.14.1.dist-info/WHEEL,sha256=GrqQvamwgBV4nLoJe0vhYRSWzWsx7xjlt74FT0SWYfE,110
-Werkzeug-0.14.1.dist-info/metadata.json,sha256=4489UTt6HBp2NQil95-pBkjU4Je93SMHvMxZ_rjOpqA,1452
-Werkzeug-0.14.1.dist-info/top_level.txt,sha256=QRyj2VjwJoQkrwjwFIOlB8Xg3r9un0NtqVHQF-15xaw,9
-werkzeug/__init__.py,sha256=NR0d4n_-U9BLVKlOISean3zUt2vBwhvK-AZE6M0sC0k,6842
-werkzeug/_compat.py,sha256=8c4U9o6A_TR9nKCcTbpZNxpqCXcXDVIbFawwKM2s92c,6311
-werkzeug/_internal.py,sha256=GhEyGMlsSz_tYjsDWO9TG35VN7304MM8gjKDrXLEdVc,13873
-werkzeug/_reloader.py,sha256=AyPphcOHPbu6qzW0UbrVvTDJdre5WgpxbhIJN_TqzUc,9264
-werkzeug/datastructures.py,sha256=3IgNKNqrz-ZjmAG7y3YgEYK-enDiMT_b652PsypWcYg,90080
-werkzeug/exceptions.py,sha256=3wp95Hqj9FqV8MdikV99JRcHse_fSMn27V8tgP5Hw2c,20505
-werkzeug/filesystem.py,sha256=hHWeWo_gqLMzTRfYt8-7n2wWcWUNTnDyudQDLOBEICE,2175
-werkzeug/formparser.py,sha256=mUuCwjzjb8_E4RzrAT2AioLuZSYpqR1KXTK6LScRYzA,21722
-werkzeug/http.py,sha256=RQg4MJuhRv2isNRiEh__Phh09ebpfT3Kuu_GfrZ54_c,40079
-werkzeug/local.py,sha256=QdQhWV5L8p1Y1CJ1CDStwxaUs24SuN5aebHwjVD08C8,14553
-werkzeug/posixemulation.py,sha256=xEF2Bxc-vUCPkiu4IbfWVd3LW7DROYAT-ExW6THqyzw,3519
-werkzeug/routing.py,sha256=2JVtdSgxKGeANy4Z_FP-dKESvKtkYGCZ1J2fARCLGCY,67214
-werkzeug/script.py,sha256=DwaVDcXdaOTffdNvlBdLitxWXjKaRVT32VbhDtljFPY,11365
-werkzeug/security.py,sha256=0m107exslz4QJLWQCpfQJ04z3re4eGHVggRvrQVAdWc,9193
-werkzeug/serving.py,sha256=A0flnIJHufdn2QJ9oeuHfrXwP3LzP8fn3rNW6hbxKUg,31926
-werkzeug/test.py,sha256=XmECSmnpASiYQTct4oMiWr0LT5jHWCtKqnpYKZd2ui8,36100
-werkzeug/testapp.py,sha256=3HQRW1sHZKXuAjCvFMet4KXtQG3loYTFnvn6LWt-4zI,9396
-werkzeug/urls.py,sha256=dUeLg2IeTm0WLmSvFeD4hBZWGdOs-uHudR5-t8n9zPo,36771
-werkzeug/useragents.py,sha256=BhYMf4cBTHyN4U0WsQedePIocmNlH_34C-UwqSThGCc,5865
-werkzeug/utils.py,sha256=BrY1j0DHQ8RTb0K1StIobKuMJhN9SQQkWEARbrh2qpk,22972
-werkzeug/websocket.py,sha256=PpSeDxXD_0UsPAa5hQhQNM6mxibeUgn8lA8eRqiS0vM,11344
-werkzeug/wrappers.py,sha256=kbyL_aFjxELwPgMwfNCYjKu-CR6kNkh-oO8wv3GXbk8,84511
-werkzeug/wsgi.py,sha256=1Nob-aeChWQf7MsiicO8RZt6J90iRzEcik44ev9Qu8s,49347
-werkzeug/contrib/__init__.py,sha256=f7PfttZhbrImqpr5Ezre8CXgwvcGUJK7zWNpO34WWrw,623
-werkzeug/contrib/atom.py,sha256=qqfJcfIn2RYY-3hO3Oz0aLq9YuNubcPQ_KZcNsDwVJo,15575
-werkzeug/contrib/cache.py,sha256=xBImHNj09BmX_7kC5NUCx8f_l4L8_O7zi0jCL21UZKE,32163
-werkzeug/contrib/fixers.py,sha256=gR06T-w71ur-tHQ_31kP_4jpOncPJ4Wc1dOqTvYusr8,10179
-werkzeug/contrib/iterio.py,sha256=RlqDvGhz0RneTpzE8dVc-yWCUv4nkPl1jEc_EDp2fH0,10814
-werkzeug/contrib/jsrouting.py,sha256=QTmgeDoKXvNK02KzXgx9lr3cAH6fAzpwF5bBdPNvJPs,8564
-werkzeug/contrib/limiter.py,sha256=iS8-ahPZ-JLRnmfIBzxpm7O_s3lPsiDMVWv7llAIDCI,1334
-werkzeug/contrib/lint.py,sha256=Mj9NeUN7s4zIUWeQOAVjrmtZIcl3Mm2yDe9BSIr9YGE,12558
-werkzeug/contrib/profiler.py,sha256=ISwCWvwVyGpDLRBRpLjo_qUWma6GXYBrTAco4PEQSHY,5151
-werkzeug/contrib/securecookie.py,sha256=uWMyHDHY3lkeBRiCSayGqWkAIy4a7xAbSE_Hln9ecqc,12196
-werkzeug/contrib/sessions.py,sha256=39LVNvLbm5JWpbxM79WC2l87MJFbqeISARjwYbkJatw,12577
-werkzeug/contrib/testtools.py,sha256=G9xN-qeihJlhExrIZMCahvQOIDxdL9NiX874jiiHFMs,2453
-werkzeug/contrib/wrappers.py,sha256=v7OYlz7wQtDlS9fey75UiRZ1IkUWqCpzbhsLy4k14Hw,10398
-werkzeug/debug/__init__.py,sha256=uSn9BqCZ5E3ySgpoZtundpROGsn-uYvZtSFiTfAX24M,17452
-werkzeug/debug/console.py,sha256=n3-dsKk1TsjnN-u4ZgmuWCU_HO0qw5IA7ttjhyyMM6I,5607
-werkzeug/debug/repr.py,sha256=bKqstDYGfECpeLerd48s_hxuqK4b6UWnjMu3d_DHO8I,9340
-werkzeug/debug/tbtools.py,sha256=rBudXCmkVdAKIcdhxANxgf09g6kQjJWW9_5bjSpr4OY,18451
-werkzeug/debug/shared/FONT_LICENSE,sha256=LwAVEI1oYnvXiNMT9SnCH_TaLCxCpeHziDrMg0gPkAI,4673
-werkzeug/debug/shared/console.png,sha256=bxax6RXXlvOij_KeqvSNX0ojJf83YbnZ7my-3Gx9w2A,507
-werkzeug/debug/shared/debugger.js,sha256=PKPVYuyO4SX1hkqLOwCLvmIEO5154WatFYaXE-zIfKI,6264
-werkzeug/debug/shared/jquery.js,sha256=7LkWEzqTdpEfELxcZZlS6wAx5Ff13zZ83lYO2_ujj7g,95957
-werkzeug/debug/shared/less.png,sha256=-4-kNRaXJSONVLahrQKUxMwXGm9R4OnZ9SxDGpHlIR4,191
-werkzeug/debug/shared/more.png,sha256=GngN7CioHQoV58rH6ojnkYi8c_qED2Aka5FO5UXrReY,200
-werkzeug/debug/shared/source.png,sha256=RoGcBTE4CyCB85GBuDGTFlAnUqxwTBiIfDqW15EpnUQ,818
-werkzeug/debug/shared/style.css,sha256=IEO0PC2pWmh2aEyGCaN--txuWsRCliuhlbEhPDFwh0A,6270
-werkzeug/debug/shared/ubuntu.ttf,sha256=1eaHFyepmy4FyDvjLVzpITrGEBu_CZYY94jE0nED1c0,70220
-Werkzeug-0.14.1.dist-info/INSTALLER,sha256=zuuue4knoyJ-UwPPXg8fezS7VCrXJQrAP7zeNuwvFQg,4
-werkzeug/contrib/__pycache__/atom.cpython-36.pyc,,
-werkzeug/contrib/__pycache__/cache.cpython-36.pyc,,
-werkzeug/contrib/__pycache__/fixers.cpython-36.pyc,,
-werkzeug/contrib/__pycache__/iterio.cpython-36.pyc,,
-werkzeug/contrib/__pycache__/jsrouting.cpython-36.pyc,,
-werkzeug/contrib/__pycache__/limiter.cpython-36.pyc,,
-werkzeug/contrib/__pycache__/lint.cpython-36.pyc,,
-werkzeug/contrib/__pycache__/profiler.cpython-36.pyc,,
-werkzeug/contrib/__pycache__/securecookie.cpython-36.pyc,,
-werkzeug/contrib/__pycache__/sessions.cpython-36.pyc,,
-werkzeug/contrib/__pycache__/testtools.cpython-36.pyc,,
-werkzeug/contrib/__pycache__/wrappers.cpython-36.pyc,,
-werkzeug/contrib/__pycache__/__init__.cpython-36.pyc,,
-werkzeug/debug/__pycache__/console.cpython-36.pyc,,
-werkzeug/debug/__pycache__/repr.cpython-36.pyc,,
-werkzeug/debug/__pycache__/tbtools.cpython-36.pyc,,
-werkzeug/debug/__pycache__/__init__.cpython-36.pyc,,
-werkzeug/__pycache__/datastructures.cpython-36.pyc,,
-werkzeug/__pycache__/exceptions.cpython-36.pyc,,
-werkzeug/__pycache__/filesystem.cpython-36.pyc,,
-werkzeug/__pycache__/formparser.cpython-36.pyc,,
-werkzeug/__pycache__/http.cpython-36.pyc,,
-werkzeug/__pycache__/local.cpython-36.pyc,,
-werkzeug/__pycache__/posixemulation.cpython-36.pyc,,
-werkzeug/__pycache__/routing.cpython-36.pyc,,
-werkzeug/__pycache__/script.cpython-36.pyc,,
-werkzeug/__pycache__/security.cpython-36.pyc,,
-werkzeug/__pycache__/serving.cpython-36.pyc,,
-werkzeug/__pycache__/test.cpython-36.pyc,,
-werkzeug/__pycache__/testapp.cpython-36.pyc,,
-werkzeug/__pycache__/urls.cpython-36.pyc,,
-werkzeug/__pycache__/useragents.cpython-36.pyc,,
-werkzeug/__pycache__/utils.cpython-36.pyc,,
-werkzeug/__pycache__/websocket.cpython-36.pyc,,
-werkzeug/__pycache__/wrappers.cpython-36.pyc,,
-werkzeug/__pycache__/wsgi.cpython-36.pyc,,
-werkzeug/__pycache__/_compat.cpython-36.pyc,,
-werkzeug/__pycache__/_internal.cpython-36.pyc,,
-werkzeug/__pycache__/_reloader.cpython-36.pyc,,
-werkzeug/__pycache__/__init__.cpython-36.pyc,,
diff --git a/venv/Lib/site-packages/Werkzeug-0.14.1.dist-info/WHEEL b/venv/Lib/site-packages/Werkzeug-0.14.1.dist-info/WHEEL
deleted file mode 100644
index 0de529b..0000000
--- a/venv/Lib/site-packages/Werkzeug-0.14.1.dist-info/WHEEL
+++ /dev/null
@@ -1,6 +0,0 @@
-Wheel-Version: 1.0
-Generator: bdist_wheel (0.26.0)
-Root-Is-Purelib: true
-Tag: py2-none-any
-Tag: py3-none-any
-
diff --git a/venv/Lib/site-packages/Werkzeug-0.14.1.dist-info/metadata.json b/venv/Lib/site-packages/Werkzeug-0.14.1.dist-info/metadata.json
deleted file mode 100644
index bca8d12..0000000
--- a/venv/Lib/site-packages/Werkzeug-0.14.1.dist-info/metadata.json
+++ /dev/null
@@ -1 +0,0 @@
-{"generator": "bdist_wheel (0.26.0)", "summary": "The comprehensive WSGI web application library.", "classifiers": ["Development Status :: 5 - Production/Stable", "Environment :: Web Environment", "Intended Audience :: Developers", "License :: OSI Approved :: BSD License", "Operating System :: OS Independent", "Programming Language :: Python", "Programming Language :: Python :: 2", "Programming Language :: Python :: 2.6", "Programming Language :: Python :: 2.7", "Programming Language :: Python :: 3", "Programming Language :: Python :: 3.3", "Programming Language :: Python :: 3.4", "Programming Language :: Python :: 3.5", "Programming Language :: Python :: 3.6", "Topic :: Internet :: WWW/HTTP :: Dynamic Content", "Topic :: Software Development :: Libraries :: Python Modules"], "description_content_type": "UNKNOWN", "extensions": {"python.details": {"project_urls": {"Home": "https://www.palletsprojects.org/p/werkzeug/"}, "contacts": [{"email": "armin.ronacher@active-4.com", "name": "Armin Ronacher", "role": "author"}], "document_names": {"description": "DESCRIPTION.rst", "license": "LICENSE.txt"}}}, "license": "BSD", "metadata_version": "2.0", "name": "Werkzeug", "platform": "any", "extras": ["dev", "termcolor", "watchdog"], "run_requires": [{"requires": ["coverage", "pytest", "sphinx", "tox"], "extra": "dev"}, {"requires": ["termcolor"], "extra": "termcolor"}, {"requires": ["watchdog"], "extra": "watchdog"}], "version": "0.14.1"}
\ No newline at end of file
diff --git a/venv/Lib/site-packages/Werkzeug-0.14.1.dist-info/top_level.txt b/venv/Lib/site-packages/Werkzeug-0.14.1.dist-info/top_level.txt
deleted file mode 100644
index 6fe8da8..0000000
--- a/venv/Lib/site-packages/Werkzeug-0.14.1.dist-info/top_level.txt
+++ /dev/null
@@ -1 +0,0 @@
-werkzeug
diff --git a/venv/Lib/site-packages/alembic-0.9.8-py3.6.egg-info/PKG-INFO b/venv/Lib/site-packages/alembic-0.9.8-py3.6.egg-info/PKG-INFO
deleted file mode 100644
index c83856b..0000000
--- a/venv/Lib/site-packages/alembic-0.9.8-py3.6.egg-info/PKG-INFO
+++ /dev/null
@@ -1,97 +0,0 @@
-Metadata-Version: 1.1
-Name: alembic
-Version: 0.9.8
-Summary: A database migration tool for SQLAlchemy.
-Home-page: http://bitbucket.org/zzzeek/alembic
-Author: Mike Bayer
-Author-email: mike@zzzcomputing.com
-License: MIT
-Description: Alembic is a database migrations tool written by the author
- of `SQLAlchemy `_. A migrations tool
- offers the following functionality:
-
- * Can emit ALTER statements to a database in order to change
- the structure of tables and other constructs
- * Provides a system whereby "migration scripts" may be constructed;
- each script indicates a particular series of steps that can "upgrade" a
- target database to a new version, and optionally a series of steps that can
- "downgrade" similarly, doing the same steps in reverse.
- * Allows the scripts to execute in some sequential manner.
-
- The goals of Alembic are:
-
- * Very open ended and transparent configuration and operation. A new
- Alembic environment is generated from a set of templates which is selected
- among a set of options when setup first occurs. The templates then deposit a
- series of scripts that define fully how database connectivity is established
- and how migration scripts are invoked; the migration scripts themselves are
- generated from a template within that series of scripts. The scripts can
- then be further customized to define exactly how databases will be
- interacted with and what structure new migration files should take.
- * Full support for transactional DDL. The default scripts ensure that all
- migrations occur within a transaction - for those databases which support
- this (Postgresql, Microsoft SQL Server), migrations can be tested with no
- need to manually undo changes upon failure.
- * Minimalist script construction. Basic operations like renaming
- tables/columns, adding/removing columns, changing column attributes can be
- performed through one line commands like alter_column(), rename_table(),
- add_constraint(). There is no need to recreate full SQLAlchemy Table
- structures for simple operations like these - the functions themselves
- generate minimalist schema structures behind the scenes to achieve the given
- DDL sequence.
- * "auto generation" of migrations. While real world migrations are far more
- complex than what can be automatically determined, Alembic can still
- eliminate the initial grunt work in generating new migration directives
- from an altered schema. The ``--autogenerate`` feature will inspect the
- current status of a database using SQLAlchemy's schema inspection
- capabilities, compare it to the current state of the database model as
- specified in Python, and generate a series of "candidate" migrations,
- rendering them into a new migration script as Python directives. The
- developer then edits the new file, adding additional directives and data
- migrations as needed, to produce a finished migration. Table and column
- level changes can be detected, with constraints and indexes to follow as
- well.
- * Full support for migrations generated as SQL scripts. Those of us who
- work in corporate environments know that direct access to DDL commands on a
- production database is a rare privilege, and DBAs want textual SQL scripts.
- Alembic's usage model and commands are oriented towards being able to run a
- series of migrations into a textual output file as easily as it runs them
- directly to a database. Care must be taken in this mode to not invoke other
- operations that rely upon in-memory SELECTs of rows - Alembic tries to
- provide helper constructs like bulk_insert() to help with data-oriented
- operations that are compatible with script-based DDL.
- * Non-linear, dependency-graph versioning. Scripts are given UUID
- identifiers similarly to a DVCS, and the linkage of one script to the next
- is achieved via human-editable markers within the scripts themselves.
- The structure of a set of migration files is considered as a
- directed-acyclic graph, meaning any migration file can be dependent
- on any other arbitrary set of migration files, or none at
- all. Through this open-ended system, migration files can be organized
- into branches, multiple roots, and mergepoints, without restriction.
- Commands are provided to produce new branches, roots, and merges of
- branches automatically.
- * Provide a library of ALTER constructs that can be used by any SQLAlchemy
- application. The DDL constructs build upon SQLAlchemy's own DDLElement base
- and can be used standalone by any application or script.
- * At long last, bring SQLite and its inablity to ALTER things into the fold,
- but in such a way that SQLite's very special workflow needs are accommodated
- in an explicit way that makes the most of a bad situation, through the
- concept of a "batch" migration, where multiple changes to a table can
- be batched together to form a series of instructions for a single, subsequent
- "move-and-copy" workflow. You can even use "move-and-copy" workflow for
- other databases, if you want to recreate a table in the background
- on a busy system.
-
- Documentation and status of Alembic is at http://alembic.zzzcomputing.com/
-
-
-Keywords: SQLAlchemy migrations
-Platform: UNKNOWN
-Classifier: Development Status :: 4 - Beta
-Classifier: Environment :: Console
-Classifier: Intended Audience :: Developers
-Classifier: Programming Language :: Python
-Classifier: Programming Language :: Python :: 3
-Classifier: Programming Language :: Python :: Implementation :: CPython
-Classifier: Programming Language :: Python :: Implementation :: PyPy
-Classifier: Topic :: Database :: Front-Ends
diff --git a/venv/Lib/site-packages/alembic-0.9.8-py3.6.egg-info/SOURCES.txt b/venv/Lib/site-packages/alembic-0.9.8-py3.6.egg-info/SOURCES.txt
deleted file mode 100644
index 421697f..0000000
--- a/venv/Lib/site-packages/alembic-0.9.8-py3.6.egg-info/SOURCES.txt
+++ /dev/null
@@ -1,210 +0,0 @@
-CHANGES
-LICENSE
-MANIFEST.in
-README.rst
-README.unittests.rst
-run_tests.py
-setup.cfg
-setup.py
-tox.ini
-alembic/__init__.py
-alembic/command.py
-alembic/config.py
-alembic/context.py
-alembic/op.py
-alembic.egg-info/PKG-INFO
-alembic.egg-info/SOURCES.txt
-alembic.egg-info/dependency_links.txt
-alembic.egg-info/entry_points.txt
-alembic.egg-info/not-zip-safe
-alembic.egg-info/requires.txt
-alembic.egg-info/top_level.txt
-alembic/autogenerate/__init__.py
-alembic/autogenerate/api.py
-alembic/autogenerate/compare.py
-alembic/autogenerate/render.py
-alembic/autogenerate/rewriter.py
-alembic/ddl/__init__.py
-alembic/ddl/base.py
-alembic/ddl/impl.py
-alembic/ddl/mssql.py
-alembic/ddl/mysql.py
-alembic/ddl/oracle.py
-alembic/ddl/postgresql.py
-alembic/ddl/sqlite.py
-alembic/operations/__init__.py
-alembic/operations/base.py
-alembic/operations/batch.py
-alembic/operations/ops.py
-alembic/operations/schemaobj.py
-alembic/operations/toimpl.py
-alembic/runtime/__init__.py
-alembic/runtime/environment.py
-alembic/runtime/migration.py
-alembic/script/__init__.py
-alembic/script/base.py
-alembic/script/revision.py
-alembic/templates/generic/README
-alembic/templates/generic/alembic.ini.mako
-alembic/templates/generic/env.py
-alembic/templates/generic/script.py.mako
-alembic/templates/multidb/README
-alembic/templates/multidb/alembic.ini.mako
-alembic/templates/multidb/env.py
-alembic/templates/multidb/script.py.mako
-alembic/templates/pylons/README
-alembic/templates/pylons/alembic.ini.mako
-alembic/templates/pylons/env.py
-alembic/templates/pylons/script.py.mako
-alembic/testing/__init__.py
-alembic/testing/assertions.py
-alembic/testing/compat.py
-alembic/testing/config.py
-alembic/testing/engines.py
-alembic/testing/env.py
-alembic/testing/exclusions.py
-alembic/testing/fixtures.py
-alembic/testing/mock.py
-alembic/testing/provision.py
-alembic/testing/requirements.py
-alembic/testing/runner.py
-alembic/testing/util.py
-alembic/testing/warnings.py
-alembic/testing/plugin/__init__.py
-alembic/testing/plugin/bootstrap.py
-alembic/testing/plugin/noseplugin.py
-alembic/testing/plugin/plugin_base.py
-alembic/testing/plugin/pytestplugin.py
-alembic/util/__init__.py
-alembic/util/compat.py
-alembic/util/exc.py
-alembic/util/langhelpers.py
-alembic/util/messaging.py
-alembic/util/pyfiles.py
-alembic/util/sqla_compat.py
-docs/autogenerate.html
-docs/batch.html
-docs/branches.html
-docs/changelog.html
-docs/cookbook.html
-docs/front.html
-docs/genindex.html
-docs/index.html
-docs/naming.html
-docs/offline.html
-docs/ops.html
-docs/py-modindex.html
-docs/search.html
-docs/searchindex.js
-docs/tutorial.html
-docs/_images/api_overview.png
-docs/_sources/autogenerate.rst.txt
-docs/_sources/batch.rst.txt
-docs/_sources/branches.rst.txt
-docs/_sources/changelog.rst.txt
-docs/_sources/cookbook.rst.txt
-docs/_sources/front.rst.txt
-docs/_sources/index.rst.txt
-docs/_sources/naming.rst.txt
-docs/_sources/offline.rst.txt
-docs/_sources/ops.rst.txt
-docs/_sources/tutorial.rst.txt
-docs/_sources/api/autogenerate.rst.txt
-docs/_sources/api/commands.rst.txt
-docs/_sources/api/config.rst.txt
-docs/_sources/api/ddl.rst.txt
-docs/_sources/api/index.rst.txt
-docs/_sources/api/operations.rst.txt
-docs/_sources/api/overview.rst.txt
-docs/_sources/api/runtime.rst.txt
-docs/_sources/api/script.rst.txt
-docs/_static/basic.css
-docs/_static/changelog.css
-docs/_static/comment-bright.png
-docs/_static/comment-close.png
-docs/_static/comment.png
-docs/_static/doctools.js
-docs/_static/documentation_options.js
-docs/_static/down-pressed.png
-docs/_static/down.png
-docs/_static/file.png
-docs/_static/jquery-3.2.1.js
-docs/_static/jquery.js
-docs/_static/minus.png
-docs/_static/nature.css
-docs/_static/nature_override.css
-docs/_static/plus.png
-docs/_static/pygments.css
-docs/_static/searchtools.js
-docs/_static/site_custom_css.css
-docs/_static/sphinx_paramlinks.css
-docs/_static/underscore-1.3.1.js
-docs/_static/underscore.js
-docs/_static/up-pressed.png
-docs/_static/up.png
-docs/_static/websupport.js
-docs/api/autogenerate.html
-docs/api/commands.html
-docs/api/config.html
-docs/api/ddl.html
-docs/api/index.html
-docs/api/operations.html
-docs/api/overview.html
-docs/api/runtime.html
-docs/api/script.html
-docs/build/Makefile
-docs/build/autogenerate.rst
-docs/build/batch.rst
-docs/build/branches.rst
-docs/build/changelog.rst
-docs/build/conf.py
-docs/build/cookbook.rst
-docs/build/front.rst
-docs/build/index.rst
-docs/build/naming.rst
-docs/build/offline.rst
-docs/build/ops.rst
-docs/build/requirements.txt
-docs/build/tutorial.rst
-docs/build/_static/nature_override.css
-docs/build/_static/site_custom_css.css
-docs/build/_templates/site_custom_sidebars.html
-docs/build/api/api_overview.png
-docs/build/api/autogenerate.rst
-docs/build/api/commands.rst
-docs/build/api/config.rst
-docs/build/api/ddl.rst
-docs/build/api/index.rst
-docs/build/api/operations.rst
-docs/build/api/overview.rst
-docs/build/api/runtime.rst
-docs/build/api/script.rst
-docs/build/unreleased/README.txt
-tests/__init__.py
-tests/_autogen_fixtures.py
-tests/_large_map.py
-tests/conftest.py
-tests/requirements.py
-tests/test_autogen_composition.py
-tests/test_autogen_diffs.py
-tests/test_autogen_fks.py
-tests/test_autogen_indexes.py
-tests/test_autogen_render.py
-tests/test_batch.py
-tests/test_bulk_insert.py
-tests/test_command.py
-tests/test_config.py
-tests/test_environment.py
-tests/test_mssql.py
-tests/test_mysql.py
-tests/test_offline_environment.py
-tests/test_op.py
-tests/test_op_naming_convention.py
-tests/test_oracle.py
-tests/test_postgresql.py
-tests/test_revision.py
-tests/test_script_consumption.py
-tests/test_script_production.py
-tests/test_sqlite.py
-tests/test_version_table.py
-tests/test_version_traversal.py
\ No newline at end of file
diff --git a/venv/Lib/site-packages/alembic-0.9.8-py3.6.egg-info/dependency_links.txt b/venv/Lib/site-packages/alembic-0.9.8-py3.6.egg-info/dependency_links.txt
deleted file mode 100644
index 8b13789..0000000
--- a/venv/Lib/site-packages/alembic-0.9.8-py3.6.egg-info/dependency_links.txt
+++ /dev/null
@@ -1 +0,0 @@
-
diff --git a/venv/Lib/site-packages/alembic-0.9.8-py3.6.egg-info/entry_points.txt b/venv/Lib/site-packages/alembic-0.9.8-py3.6.egg-info/entry_points.txt
deleted file mode 100644
index 27ac374..0000000
--- a/venv/Lib/site-packages/alembic-0.9.8-py3.6.egg-info/entry_points.txt
+++ /dev/null
@@ -1,3 +0,0 @@
-[console_scripts]
-alembic = alembic.config:main
-
diff --git a/venv/Lib/site-packages/alembic-0.9.8-py3.6.egg-info/installed-files.txt b/venv/Lib/site-packages/alembic-0.9.8-py3.6.egg-info/installed-files.txt
deleted file mode 100644
index 561255f..0000000
--- a/venv/Lib/site-packages/alembic-0.9.8-py3.6.egg-info/installed-files.txt
+++ /dev/null
@@ -1,137 +0,0 @@
-..\alembic\command.py
-..\alembic\config.py
-..\alembic\context.py
-..\alembic\op.py
-..\alembic\__init__.py
-..\alembic\autogenerate\api.py
-..\alembic\autogenerate\compare.py
-..\alembic\autogenerate\render.py
-..\alembic\autogenerate\rewriter.py
-..\alembic\autogenerate\__init__.py
-..\alembic\ddl\base.py
-..\alembic\ddl\impl.py
-..\alembic\ddl\mssql.py
-..\alembic\ddl\mysql.py
-..\alembic\ddl\oracle.py
-..\alembic\ddl\postgresql.py
-..\alembic\ddl\sqlite.py
-..\alembic\ddl\__init__.py
-..\alembic\operations\base.py
-..\alembic\operations\batch.py
-..\alembic\operations\ops.py
-..\alembic\operations\schemaobj.py
-..\alembic\operations\toimpl.py
-..\alembic\operations\__init__.py
-..\alembic\runtime\environment.py
-..\alembic\runtime\migration.py
-..\alembic\runtime\__init__.py
-..\alembic\script\base.py
-..\alembic\script\revision.py
-..\alembic\script\__init__.py
-..\alembic\testing\assertions.py
-..\alembic\testing\compat.py
-..\alembic\testing\config.py
-..\alembic\testing\engines.py
-..\alembic\testing\env.py
-..\alembic\testing\exclusions.py
-..\alembic\testing\fixtures.py
-..\alembic\testing\mock.py
-..\alembic\testing\provision.py
-..\alembic\testing\requirements.py
-..\alembic\testing\runner.py
-..\alembic\testing\util.py
-..\alembic\testing\warnings.py
-..\alembic\testing\__init__.py
-..\alembic\util\compat.py
-..\alembic\util\exc.py
-..\alembic\util\langhelpers.py
-..\alembic\util\messaging.py
-..\alembic\util\pyfiles.py
-..\alembic\util\sqla_compat.py
-..\alembic\util\__init__.py
-..\alembic\testing\plugin\bootstrap.py
-..\alembic\testing\plugin\noseplugin.py
-..\alembic\testing\plugin\plugin_base.py
-..\alembic\testing\plugin\pytestplugin.py
-..\alembic\testing\plugin\__init__.py
-..\alembic\templates\generic\README
-..\alembic\templates\generic\alembic.ini.mako
-..\alembic\templates\generic\env.py
-..\alembic\templates\generic\script.py.mako
-..\alembic\templates\multidb\README
-..\alembic\templates\multidb\alembic.ini.mako
-..\alembic\templates\multidb\env.py
-..\alembic\templates\multidb\script.py.mako
-..\alembic\templates\pylons\README
-..\alembic\templates\pylons\alembic.ini.mako
-..\alembic\templates\pylons\env.py
-..\alembic\templates\pylons\script.py.mako
-..\alembic\__pycache__\command.cpython-36.pyc
-..\alembic\__pycache__\config.cpython-36.pyc
-..\alembic\__pycache__\context.cpython-36.pyc
-..\alembic\__pycache__\op.cpython-36.pyc
-..\alembic\__pycache__\__init__.cpython-36.pyc
-..\alembic\autogenerate\__pycache__\api.cpython-36.pyc
-..\alembic\autogenerate\__pycache__\compare.cpython-36.pyc
-..\alembic\autogenerate\__pycache__\render.cpython-36.pyc
-..\alembic\autogenerate\__pycache__\rewriter.cpython-36.pyc
-..\alembic\autogenerate\__pycache__\__init__.cpython-36.pyc
-..\alembic\ddl\__pycache__\base.cpython-36.pyc
-..\alembic\ddl\__pycache__\impl.cpython-36.pyc
-..\alembic\ddl\__pycache__\mssql.cpython-36.pyc
-..\alembic\ddl\__pycache__\mysql.cpython-36.pyc
-..\alembic\ddl\__pycache__\oracle.cpython-36.pyc
-..\alembic\ddl\__pycache__\postgresql.cpython-36.pyc
-..\alembic\ddl\__pycache__\sqlite.cpython-36.pyc
-..\alembic\ddl\__pycache__\__init__.cpython-36.pyc
-..\alembic\operations\__pycache__\base.cpython-36.pyc
-..\alembic\operations\__pycache__\batch.cpython-36.pyc
-..\alembic\operations\__pycache__\ops.cpython-36.pyc
-..\alembic\operations\__pycache__\schemaobj.cpython-36.pyc
-..\alembic\operations\__pycache__\toimpl.cpython-36.pyc
-..\alembic\operations\__pycache__\__init__.cpython-36.pyc
-..\alembic\runtime\__pycache__\environment.cpython-36.pyc
-..\alembic\runtime\__pycache__\migration.cpython-36.pyc
-..\alembic\runtime\__pycache__\__init__.cpython-36.pyc
-..\alembic\script\__pycache__\base.cpython-36.pyc
-..\alembic\script\__pycache__\revision.cpython-36.pyc
-..\alembic\script\__pycache__\__init__.cpython-36.pyc
-..\alembic\testing\__pycache__\assertions.cpython-36.pyc
-..\alembic\testing\__pycache__\compat.cpython-36.pyc
-..\alembic\testing\__pycache__\config.cpython-36.pyc
-..\alembic\testing\__pycache__\engines.cpython-36.pyc
-..\alembic\testing\__pycache__\env.cpython-36.pyc
-..\alembic\testing\__pycache__\exclusions.cpython-36.pyc
-..\alembic\testing\__pycache__\fixtures.cpython-36.pyc
-..\alembic\testing\__pycache__\mock.cpython-36.pyc
-..\alembic\testing\__pycache__\provision.cpython-36.pyc
-..\alembic\testing\__pycache__\requirements.cpython-36.pyc
-..\alembic\testing\__pycache__\runner.cpython-36.pyc
-..\alembic\testing\__pycache__\util.cpython-36.pyc
-..\alembic\testing\__pycache__\warnings.cpython-36.pyc
-..\alembic\testing\__pycache__\__init__.cpython-36.pyc
-..\alembic\util\__pycache__\compat.cpython-36.pyc
-..\alembic\util\__pycache__\exc.cpython-36.pyc
-..\alembic\util\__pycache__\langhelpers.cpython-36.pyc
-..\alembic\util\__pycache__\messaging.cpython-36.pyc
-..\alembic\util\__pycache__\pyfiles.cpython-36.pyc
-..\alembic\util\__pycache__\sqla_compat.cpython-36.pyc
-..\alembic\util\__pycache__\__init__.cpython-36.pyc
-..\alembic\testing\plugin\__pycache__\bootstrap.cpython-36.pyc
-..\alembic\testing\plugin\__pycache__\noseplugin.cpython-36.pyc
-..\alembic\testing\plugin\__pycache__\plugin_base.cpython-36.pyc
-..\alembic\testing\plugin\__pycache__\pytestplugin.cpython-36.pyc
-..\alembic\testing\plugin\__pycache__\__init__.cpython-36.pyc
-..\alembic\templates\generic\__pycache__\env.cpython-36.pyc
-..\alembic\templates\multidb\__pycache__\env.cpython-36.pyc
-..\alembic\templates\pylons\__pycache__\env.cpython-36.pyc
-dependency_links.txt
-entry_points.txt
-not-zip-safe
-PKG-INFO
-requires.txt
-SOURCES.txt
-top_level.txt
-..\..\..\Scripts\alembic-script.py
-..\..\..\Scripts\alembic.exe
-..\..\..\Scripts\alembic.exe.manifest
diff --git a/venv/Lib/site-packages/alembic-0.9.8-py3.6.egg-info/not-zip-safe b/venv/Lib/site-packages/alembic-0.9.8-py3.6.egg-info/not-zip-safe
deleted file mode 100644
index 8b13789..0000000
--- a/venv/Lib/site-packages/alembic-0.9.8-py3.6.egg-info/not-zip-safe
+++ /dev/null
@@ -1 +0,0 @@
-
diff --git a/venv/Lib/site-packages/alembic-0.9.8-py3.6.egg-info/requires.txt b/venv/Lib/site-packages/alembic-0.9.8-py3.6.egg-info/requires.txt
deleted file mode 100644
index 87ed84b..0000000
--- a/venv/Lib/site-packages/alembic-0.9.8-py3.6.egg-info/requires.txt
+++ /dev/null
@@ -1,4 +0,0 @@
-SQLAlchemy>=0.7.6
-Mako
-python-editor>=0.3
-python-dateutil
diff --git a/venv/Lib/site-packages/alembic-0.9.8-py3.6.egg-info/top_level.txt b/venv/Lib/site-packages/alembic-0.9.8-py3.6.egg-info/top_level.txt
deleted file mode 100644
index b5bd98d..0000000
--- a/venv/Lib/site-packages/alembic-0.9.8-py3.6.egg-info/top_level.txt
+++ /dev/null
@@ -1 +0,0 @@
-alembic
diff --git a/venv/Lib/site-packages/alembic/__init__.py b/venv/Lib/site-packages/alembic/__init__.py
deleted file mode 100644
index 35d58a8..0000000
--- a/venv/Lib/site-packages/alembic/__init__.py
+++ /dev/null
@@ -1,15 +0,0 @@
-from os import path
-
-__version__ = '0.9.8'
-
-package_dir = path.abspath(path.dirname(__file__))
-
-
-from . import op # noqa
-from . import context # noqa
-
-import sys
-from .runtime import environment
-from .runtime import migration
-sys.modules['alembic.migration'] = migration
-sys.modules['alembic.environment'] = environment
diff --git a/venv/Lib/site-packages/alembic/autogenerate/__init__.py b/venv/Lib/site-packages/alembic/autogenerate/__init__.py
deleted file mode 100644
index 142f55d..0000000
--- a/venv/Lib/site-packages/alembic/autogenerate/__init__.py
+++ /dev/null
@@ -1,8 +0,0 @@
-from .api import ( # noqa
- compare_metadata, _render_migration_diffs,
- produce_migrations, render_python_code,
- RevisionContext
- )
-from .compare import _produce_net_changes, comparators # noqa
-from .render import render_op_text, renderers # noqa
-from .rewriter import Rewriter # noqa
\ No newline at end of file
diff --git a/venv/Lib/site-packages/alembic/autogenerate/api.py b/venv/Lib/site-packages/alembic/autogenerate/api.py
deleted file mode 100644
index b54e6aa..0000000
--- a/venv/Lib/site-packages/alembic/autogenerate/api.py
+++ /dev/null
@@ -1,480 +0,0 @@
-"""Provide the 'autogenerate' feature which can produce migration operations
-automatically."""
-
-from ..operations import ops
-from . import render
-from . import compare
-from .. import util
-from sqlalchemy.engine.reflection import Inspector
-import contextlib
-
-
-def compare_metadata(context, metadata):
- """Compare a database schema to that given in a
- :class:`~sqlalchemy.schema.MetaData` instance.
-
- The database connection is presented in the context
- of a :class:`.MigrationContext` object, which
- provides database connectivity as well as optional
- comparison functions to use for datatypes and
- server defaults - see the "autogenerate" arguments
- at :meth:`.EnvironmentContext.configure`
- for details on these.
-
- The return format is a list of "diff" directives,
- each representing individual differences::
-
- from alembic.migration import MigrationContext
- from alembic.autogenerate import compare_metadata
- from sqlalchemy.schema import SchemaItem
- from sqlalchemy.types import TypeEngine
- from sqlalchemy import (create_engine, MetaData, Column,
- Integer, String, Table)
- import pprint
-
- engine = create_engine("sqlite://")
-
- engine.execute('''
- create table foo (
- id integer not null primary key,
- old_data varchar,
- x integer
- )''')
-
- engine.execute('''
- create table bar (
- data varchar
- )''')
-
- metadata = MetaData()
- Table('foo', metadata,
- Column('id', Integer, primary_key=True),
- Column('data', Integer),
- Column('x', Integer, nullable=False)
- )
- Table('bat', metadata,
- Column('info', String)
- )
-
- mc = MigrationContext.configure(engine.connect())
-
- diff = compare_metadata(mc, metadata)
- pprint.pprint(diff, indent=2, width=20)
-
- Output::
-
- [ ( 'add_table',
- Table('bat', MetaData(bind=None),
- Column('info', String(), table=), schema=None)),
- ( 'remove_table',
- Table(u'bar', MetaData(bind=None),
- Column(u'data', VARCHAR(), table=), schema=None)),
- ( 'add_column',
- None,
- 'foo',
- Column('data', Integer(), table=)),
- ( 'remove_column',
- None,
- 'foo',
- Column(u'old_data', VARCHAR(), table=None)),
- [ ( 'modify_nullable',
- None,
- 'foo',
- u'x',
- { 'existing_server_default': None,
- 'existing_type': INTEGER()},
- True,
- False)]]
-
-
- :param context: a :class:`.MigrationContext`
- instance.
- :param metadata: a :class:`~sqlalchemy.schema.MetaData`
- instance.
-
- .. seealso::
-
- :func:`.produce_migrations` - produces a :class:`.MigrationScript`
- structure based on metadata comparison.
-
- """
-
- migration_script = produce_migrations(context, metadata)
- return migration_script.upgrade_ops.as_diffs()
-
-
-def produce_migrations(context, metadata):
- """Produce a :class:`.MigrationScript` structure based on schema
- comparison.
-
- This function does essentially what :func:`.compare_metadata` does,
- but then runs the resulting list of diffs to produce the full
- :class:`.MigrationScript` object. For an example of what this looks like,
- see the example in :ref:`customizing_revision`.
-
- .. versionadded:: 0.8.0
-
- .. seealso::
-
- :func:`.compare_metadata` - returns more fundamental "diff"
- data from comparing a schema.
-
- """
-
- autogen_context = AutogenContext(context, metadata=metadata)
-
- migration_script = ops.MigrationScript(
- rev_id=None,
- upgrade_ops=ops.UpgradeOps([]),
- downgrade_ops=ops.DowngradeOps([]),
- )
-
- compare._populate_migration_script(autogen_context, migration_script)
-
- return migration_script
-
-
-def render_python_code(
- up_or_down_op,
- sqlalchemy_module_prefix='sa.',
- alembic_module_prefix='op.',
- render_as_batch=False,
- imports=(),
- render_item=None,
-):
- """Render Python code given an :class:`.UpgradeOps` or
- :class:`.DowngradeOps` object.
-
- This is a convenience function that can be used to test the
- autogenerate output of a user-defined :class:`.MigrationScript` structure.
-
- """
- opts = {
- 'sqlalchemy_module_prefix': sqlalchemy_module_prefix,
- 'alembic_module_prefix': alembic_module_prefix,
- 'render_item': render_item,
- 'render_as_batch': render_as_batch,
- }
-
- autogen_context = AutogenContext(None, opts=opts)
- autogen_context.imports = set(imports)
- return render._indent(render._render_cmd_body(
- up_or_down_op, autogen_context))
-
-
-def _render_migration_diffs(context, template_args):
- """legacy, used by test_autogen_composition at the moment"""
-
- autogen_context = AutogenContext(context)
-
- upgrade_ops = ops.UpgradeOps([])
- compare._produce_net_changes(autogen_context, upgrade_ops)
-
- migration_script = ops.MigrationScript(
- rev_id=None,
- upgrade_ops=upgrade_ops,
- downgrade_ops=upgrade_ops.reverse(),
- )
-
- render._render_python_into_templatevars(
- autogen_context, migration_script, template_args
- )
-
-
-class AutogenContext(object):
- """Maintains configuration and state that's specific to an
- autogenerate operation."""
-
- metadata = None
- """The :class:`~sqlalchemy.schema.MetaData` object
- representing the destination.
-
- This object is the one that is passed within ``env.py``
- to the :paramref:`.EnvironmentContext.configure.target_metadata`
- parameter. It represents the structure of :class:`.Table` and other
- objects as stated in the current database model, and represents the
- destination structure for the database being examined.
-
- While the :class:`~sqlalchemy.schema.MetaData` object is primarily
- known as a collection of :class:`~sqlalchemy.schema.Table` objects,
- it also has an :attr:`~sqlalchemy.schema.MetaData.info` dictionary
- that may be used by end-user schemes to store additional schema-level
- objects that are to be compared in custom autogeneration schemes.
-
- """
-
- connection = None
- """The :class:`~sqlalchemy.engine.base.Connection` object currently
- connected to the database backend being compared.
-
- This is obtained from the :attr:`.MigrationContext.bind` and is
- utimately set up in the ``env.py`` script.
-
- """
-
- dialect = None
- """The :class:`~sqlalchemy.engine.Dialect` object currently in use.
-
- This is normally obtained from the
- :attr:`~sqlalchemy.engine.base.Connection.dialect` attribute.
-
- """
-
- imports = None
- """A ``set()`` which contains string Python import directives.
-
- The directives are to be rendered into the ``${imports}`` section
- of a script template. The set is normally empty and can be modified
- within hooks such as the :paramref:`.EnvironmentContext.configure.render_item`
- hook.
-
- .. versionadded:: 0.8.3
-
- .. seealso::
-
- :ref:`autogen_render_types`
-
- """
-
- migration_context = None
- """The :class:`.MigrationContext` established by the ``env.py`` script."""
-
- def __init__(
- self, migration_context, metadata=None,
- opts=None, autogenerate=True):
-
- if autogenerate and \
- migration_context is not None and migration_context.as_sql:
- raise util.CommandError(
- "autogenerate can't use as_sql=True as it prevents querying "
- "the database for schema information")
-
- if opts is None:
- opts = migration_context.opts
-
- self.metadata = metadata = opts.get('target_metadata', None) \
- if metadata is None else metadata
-
- if autogenerate and metadata is None and \
- migration_context is not None and \
- migration_context.script is not None:
- raise util.CommandError(
- "Can't proceed with --autogenerate option; environment "
- "script %s does not provide "
- "a MetaData object or sequence of objects to the context." % (
- migration_context.script.env_py_location
- ))
-
- include_symbol = opts.get('include_symbol', None)
- include_object = opts.get('include_object', None)
-
- object_filters = []
- if include_symbol:
- def include_symbol_filter(
- object, name, type_, reflected, compare_to):
- if type_ == "table":
- return include_symbol(name, object.schema)
- else:
- return True
- object_filters.append(include_symbol_filter)
- if include_object:
- object_filters.append(include_object)
-
- self._object_filters = object_filters
-
- self.migration_context = migration_context
- if self.migration_context is not None:
- self.connection = self.migration_context.bind
- self.dialect = self.migration_context.dialect
-
- self.imports = set()
- self.opts = opts
- self._has_batch = False
-
- @util.memoized_property
- def inspector(self):
- return Inspector.from_engine(self.connection)
-
- @contextlib.contextmanager
- def _within_batch(self):
- self._has_batch = True
- yield
- self._has_batch = False
-
- def run_filters(self, object_, name, type_, reflected, compare_to):
- """Run the context's object filters and return True if the targets
- should be part of the autogenerate operation.
-
- This method should be run for every kind of object encountered within
- an autogenerate operation, giving the environment the chance
- to filter what objects should be included in the comparison.
- The filters here are produced directly via the
- :paramref:`.EnvironmentContext.configure.include_object`
- and :paramref:`.EnvironmentContext.configure.include_symbol`
- functions, if present.
-
- """
- for fn in self._object_filters:
- if not fn(object_, name, type_, reflected, compare_to):
- return False
- else:
- return True
-
- @util.memoized_property
- def sorted_tables(self):
- """Return an aggregate of the :attr:`.MetaData.sorted_tables` collection(s).
-
- For a sequence of :class:`.MetaData` objects, this
- concatenates the :attr:`.MetaData.sorted_tables` collection
- for each individual :class:`.MetaData` in the order of the
- sequence. It does **not** collate the sorted tables collections.
-
- .. versionadded:: 0.9.0
-
- """
- result = []
- for m in util.to_list(self.metadata):
- result.extend(m.sorted_tables)
- return result
-
- @util.memoized_property
- def table_key_to_table(self):
- """Return an aggregate of the :attr:`.MetaData.tables` dictionaries.
-
- The :attr:`.MetaData.tables` collection is a dictionary of table key
- to :class:`.Table`; this method aggregates the dictionary across
- multiple :class:`.MetaData` objects into one dictionary.
-
- Duplicate table keys are **not** supported; if two :class:`.MetaData`
- objects contain the same table key, an exception is raised.
-
- .. versionadded:: 0.9.0
-
- """
- result = {}
- for m in util.to_list(self.metadata):
- intersect = set(result).intersection(set(m.tables))
- if intersect:
- raise ValueError(
- "Duplicate table keys across multiple "
- "MetaData objects: %s" %
- (", ".join('"%s"' % key for key in sorted(intersect)))
- )
-
- result.update(m.tables)
- return result
-
-
-class RevisionContext(object):
- """Maintains configuration and state that's specific to a revision
- file generation operation."""
-
- def __init__(self, config, script_directory, command_args,
- process_revision_directives=None):
- self.config = config
- self.script_directory = script_directory
- self.command_args = command_args
- self.process_revision_directives = process_revision_directives
- self.template_args = {
- 'config': config # Let templates use config for
- # e.g. multiple databases
- }
- self.generated_revisions = [
- self._default_revision()
- ]
-
- def _to_script(self, migration_script):
- template_args = {}
- for k, v in self.template_args.items():
- template_args.setdefault(k, v)
-
- if getattr(migration_script, '_needs_render', False):
- autogen_context = self._last_autogen_context
-
- # clear out existing imports if we are doing multiple
- # renders
- autogen_context.imports = set()
- if migration_script.imports:
- autogen_context.imports.union_update(migration_script.imports)
- render._render_python_into_templatevars(
- autogen_context, migration_script, template_args
- )
-
- return self.script_directory.generate_revision(
- migration_script.rev_id,
- migration_script.message,
- refresh=True,
- head=migration_script.head,
- splice=migration_script.splice,
- branch_labels=migration_script.branch_label,
- version_path=migration_script.version_path,
- depends_on=migration_script.depends_on,
- **template_args)
-
- def run_autogenerate(self, rev, migration_context):
- self._run_environment(rev, migration_context, True)
-
- def run_no_autogenerate(self, rev, migration_context):
- self._run_environment(rev, migration_context, False)
-
- def _run_environment(self, rev, migration_context, autogenerate):
- if autogenerate:
- if self.command_args['sql']:
- raise util.CommandError(
- "Using --sql with --autogenerate does not make any sense")
- if set(self.script_directory.get_revisions(rev)) != \
- set(self.script_directory.get_revisions("heads")):
- raise util.CommandError("Target database is not up to date.")
-
- upgrade_token = migration_context.opts['upgrade_token']
- downgrade_token = migration_context.opts['downgrade_token']
-
- migration_script = self.generated_revisions[-1]
- if not getattr(migration_script, '_needs_render', False):
- migration_script.upgrade_ops_list[-1].upgrade_token = upgrade_token
- migration_script.downgrade_ops_list[-1].downgrade_token = \
- downgrade_token
- migration_script._needs_render = True
- else:
- migration_script._upgrade_ops.append(
- ops.UpgradeOps([], upgrade_token=upgrade_token)
- )
- migration_script._downgrade_ops.append(
- ops.DowngradeOps([], downgrade_token=downgrade_token)
- )
-
- self._last_autogen_context = autogen_context = \
- AutogenContext(migration_context, autogenerate=autogenerate)
-
- if autogenerate:
- compare._populate_migration_script(
- autogen_context, migration_script)
-
- if self.process_revision_directives:
- self.process_revision_directives(
- migration_context, rev, self.generated_revisions)
-
- hook = migration_context.opts['process_revision_directives']
- if hook:
- hook(migration_context, rev, self.generated_revisions)
-
- for migration_script in self.generated_revisions:
- migration_script._needs_render = True
-
- def _default_revision(self):
- op = ops.MigrationScript(
- rev_id=self.command_args['rev_id'] or util.rev_id(),
- message=self.command_args['message'],
- upgrade_ops=ops.UpgradeOps([]),
- downgrade_ops=ops.DowngradeOps([]),
- head=self.command_args['head'],
- splice=self.command_args['splice'],
- branch_label=self.command_args['branch_label'],
- version_path=self.command_args['version_path'],
- depends_on=self.command_args['depends_on']
- )
- return op
-
- def generate_scripts(self):
- for generated_revision in self.generated_revisions:
- yield self._to_script(generated_revision)
diff --git a/venv/Lib/site-packages/alembic/autogenerate/compare.py b/venv/Lib/site-packages/alembic/autogenerate/compare.py
deleted file mode 100644
index 1b66061..0000000
--- a/venv/Lib/site-packages/alembic/autogenerate/compare.py
+++ /dev/null
@@ -1,892 +0,0 @@
-from sqlalchemy import schema as sa_schema, types as sqltypes
-from sqlalchemy.engine.reflection import Inspector
-from sqlalchemy import event
-from ..operations import ops
-import logging
-from .. import util
-from ..util import compat
-from ..util import sqla_compat
-from sqlalchemy.util import OrderedSet
-import re
-from .render import _user_defined_render
-import contextlib
-from alembic.ddl.base import _fk_spec
-
-log = logging.getLogger(__name__)
-
-
-def _populate_migration_script(autogen_context, migration_script):
- upgrade_ops = migration_script.upgrade_ops_list[-1]
- downgrade_ops = migration_script.downgrade_ops_list[-1]
-
- _produce_net_changes(autogen_context, upgrade_ops)
- upgrade_ops.reverse_into(downgrade_ops)
-
-
-comparators = util.Dispatcher(uselist=True)
-
-
-def _produce_net_changes(autogen_context, upgrade_ops):
-
- connection = autogen_context.connection
- include_schemas = autogen_context.opts.get('include_schemas', False)
-
- inspector = Inspector.from_engine(connection)
-
- default_schema = connection.dialect.default_schema_name
- if include_schemas:
- schemas = set(inspector.get_schema_names())
- # replace default schema name with None
- schemas.discard("information_schema")
- # replace the "default" schema with None
- schemas.discard(default_schema)
- schemas.add(None)
- else:
- schemas = [None]
-
- comparators.dispatch("schema", autogen_context.dialect.name)(
- autogen_context, upgrade_ops, schemas
- )
-
-
-@comparators.dispatch_for("schema")
-def _autogen_for_tables(autogen_context, upgrade_ops, schemas):
- inspector = autogen_context.inspector
-
- conn_table_names = set()
-
- version_table_schema = \
- autogen_context.migration_context.version_table_schema
- version_table = autogen_context.migration_context.version_table
-
- for s in schemas:
- tables = set(inspector.get_table_names(schema=s))
- if s == version_table_schema:
- tables = tables.difference(
- [autogen_context.migration_context.version_table]
- )
- conn_table_names.update(zip([s] * len(tables), tables))
-
- metadata_table_names = OrderedSet(
- [(table.schema, table.name) for table in autogen_context.sorted_tables]
- ).difference([(version_table_schema, version_table)])
-
- _compare_tables(conn_table_names, metadata_table_names,
- inspector, upgrade_ops, autogen_context)
-
-
-def _compare_tables(conn_table_names, metadata_table_names,
- inspector, upgrade_ops, autogen_context):
-
- default_schema = inspector.bind.dialect.default_schema_name
-
- # tables coming from the connection will not have "schema"
- # set if it matches default_schema_name; so we need a list
- # of table names from local metadata that also have "None" if schema
- # == default_schema_name. Most setups will be like this anyway but
- # some are not (see #170)
- metadata_table_names_no_dflt_schema = OrderedSet([
- (schema if schema != default_schema else None, tname)
- for schema, tname in metadata_table_names
- ])
-
- # to adjust for the MetaData collection storing the tables either
- # as "schemaname.tablename" or just "tablename", create a new lookup
- # which will match the "non-default-schema" keys to the Table object.
- tname_to_table = dict(
- (
- no_dflt_schema,
- autogen_context.table_key_to_table[
- sa_schema._get_table_key(tname, schema)]
- )
- for no_dflt_schema, (schema, tname) in zip(
- metadata_table_names_no_dflt_schema,
- metadata_table_names)
- )
- metadata_table_names = metadata_table_names_no_dflt_schema
-
- for s, tname in metadata_table_names.difference(conn_table_names):
- name = '%s.%s' % (s, tname) if s else tname
- metadata_table = tname_to_table[(s, tname)]
- if autogen_context.run_filters(
- metadata_table, tname, "table", False, None):
- upgrade_ops.ops.append(
- ops.CreateTableOp.from_table(metadata_table))
- log.info("Detected added table %r", name)
- modify_table_ops = ops.ModifyTableOps(tname, [], schema=s)
-
- comparators.dispatch("table")(
- autogen_context, modify_table_ops,
- s, tname, None, metadata_table
- )
- if not modify_table_ops.is_empty():
- upgrade_ops.ops.append(modify_table_ops)
-
- removal_metadata = sa_schema.MetaData()
- for s, tname in conn_table_names.difference(metadata_table_names):
- name = sa_schema._get_table_key(tname, s)
- exists = name in removal_metadata.tables
- t = sa_schema.Table(tname, removal_metadata, schema=s)
-
- if not exists:
- event.listen(
- t,
- "column_reflect",
- autogen_context.migration_context.impl.
- _compat_autogen_column_reflect(inspector))
- inspector.reflecttable(t, None)
- if autogen_context.run_filters(t, tname, "table", True, None):
-
- modify_table_ops = ops.ModifyTableOps(tname, [], schema=s)
-
- comparators.dispatch("table")(
- autogen_context, modify_table_ops,
- s, tname, t, None
- )
- if not modify_table_ops.is_empty():
- upgrade_ops.ops.append(modify_table_ops)
-
- upgrade_ops.ops.append(
- ops.DropTableOp.from_table(t)
- )
- log.info("Detected removed table %r", name)
-
- existing_tables = conn_table_names.intersection(metadata_table_names)
-
- existing_metadata = sa_schema.MetaData()
- conn_column_info = {}
- for s, tname in existing_tables:
- name = sa_schema._get_table_key(tname, s)
- exists = name in existing_metadata.tables
- t = sa_schema.Table(tname, existing_metadata, schema=s)
- if not exists:
- event.listen(
- t,
- "column_reflect",
- autogen_context.migration_context.impl.
- _compat_autogen_column_reflect(inspector))
- inspector.reflecttable(t, None)
- conn_column_info[(s, tname)] = t
-
- for s, tname in sorted(existing_tables, key=lambda x: (x[0] or '', x[1])):
- s = s or None
- name = '%s.%s' % (s, tname) if s else tname
- metadata_table = tname_to_table[(s, tname)]
- conn_table = existing_metadata.tables[name]
-
- if autogen_context.run_filters(
- metadata_table, tname, "table", False,
- conn_table):
-
- modify_table_ops = ops.ModifyTableOps(tname, [], schema=s)
- with _compare_columns(
- s, tname,
- conn_table,
- metadata_table,
- modify_table_ops, autogen_context, inspector):
-
- comparators.dispatch("table")(
- autogen_context, modify_table_ops,
- s, tname, conn_table, metadata_table
- )
-
- if not modify_table_ops.is_empty():
- upgrade_ops.ops.append(modify_table_ops)
-
-
-def _make_index(params, conn_table):
- ix = sa_schema.Index(
- params['name'],
- *[conn_table.c[cname] for cname in params['column_names']],
- unique=params['unique']
- )
- if 'duplicates_constraint' in params:
- ix.info['duplicates_constraint'] = params['duplicates_constraint']
- return ix
-
-
-def _make_unique_constraint(params, conn_table):
- uq = sa_schema.UniqueConstraint(
- *[conn_table.c[cname] for cname in params['column_names']],
- name=params['name']
- )
- if 'duplicates_index' in params:
- uq.info['duplicates_index'] = params['duplicates_index']
-
- return uq
-
-
-def _make_foreign_key(params, conn_table):
- tname = params['referred_table']
- if params['referred_schema']:
- tname = "%s.%s" % (params['referred_schema'], tname)
-
- options = params.get('options', {})
-
- const = sa_schema.ForeignKeyConstraint(
- [conn_table.c[cname] for cname in params['constrained_columns']],
- ["%s.%s" % (tname, n) for n in params['referred_columns']],
- onupdate=options.get('onupdate'),
- ondelete=options.get('ondelete'),
- deferrable=options.get('deferrable'),
- initially=options.get('initially'),
- name=params['name']
- )
- # needed by 0.7
- conn_table.append_constraint(const)
- return const
-
-
-@contextlib.contextmanager
-def _compare_columns(schema, tname, conn_table, metadata_table,
- modify_table_ops, autogen_context, inspector):
- name = '%s.%s' % (schema, tname) if schema else tname
- metadata_cols_by_name = dict((c.name, c) for c in metadata_table.c)
- conn_col_names = dict((c.name, c) for c in conn_table.c)
- metadata_col_names = OrderedSet(sorted(metadata_cols_by_name))
-
- for cname in metadata_col_names.difference(conn_col_names):
- if autogen_context.run_filters(
- metadata_cols_by_name[cname], cname,
- "column", False, None):
- modify_table_ops.ops.append(
- ops.AddColumnOp.from_column_and_tablename(
- schema, tname, metadata_cols_by_name[cname])
- )
- log.info("Detected added column '%s.%s'", name, cname)
-
- for colname in metadata_col_names.intersection(conn_col_names):
- metadata_col = metadata_cols_by_name[colname]
- conn_col = conn_table.c[colname]
- if not autogen_context.run_filters(
- metadata_col, colname, "column", False,
- conn_col):
- continue
- alter_column_op = ops.AlterColumnOp(
- tname, colname, schema=schema)
-
- comparators.dispatch("column")(
- autogen_context, alter_column_op,
- schema, tname, colname, conn_col, metadata_col
- )
-
- if alter_column_op.has_changes():
- modify_table_ops.ops.append(alter_column_op)
-
- yield
-
- for cname in set(conn_col_names).difference(metadata_col_names):
- if autogen_context.run_filters(
- conn_table.c[cname], cname,
- "column", True, None):
- modify_table_ops.ops.append(
- ops.DropColumnOp.from_column_and_tablename(
- schema, tname, conn_table.c[cname]
- )
- )
- log.info("Detected removed column '%s.%s'", name, cname)
-
-
-class _constraint_sig(object):
-
- def md_name_to_sql_name(self, context):
- return self.name
-
- def __eq__(self, other):
- return self.const == other.const
-
- def __ne__(self, other):
- return self.const != other.const
-
- def __hash__(self):
- return hash(self.const)
-
-
-class _uq_constraint_sig(_constraint_sig):
- is_index = False
- is_unique = True
-
- def __init__(self, const):
- self.const = const
- self.name = const.name
- self.sig = tuple(sorted([col.name for col in const.columns]))
-
- @property
- def column_names(self):
- return [col.name for col in self.const.columns]
-
-
-class _ix_constraint_sig(_constraint_sig):
- is_index = True
-
- def __init__(self, const):
- self.const = const
- self.name = const.name
- self.sig = tuple(sorted([col.name for col in const.columns]))
- self.is_unique = bool(const.unique)
-
- def md_name_to_sql_name(self, context):
- return sqla_compat._get_index_final_name(context.dialect, self.const)
-
- @property
- def column_names(self):
- return sqla_compat._get_index_column_names(self.const)
-
-
-class _fk_constraint_sig(_constraint_sig):
- def __init__(self, const, include_options=False):
- self.const = const
- self.name = const.name
-
- (
- self.source_schema, self.source_table,
- self.source_columns, self.target_schema, self.target_table,
- self.target_columns,
- onupdate, ondelete,
- deferrable, initially) = _fk_spec(const)
-
- self.sig = (
- self.source_schema, self.source_table, tuple(self.source_columns),
- self.target_schema, self.target_table, tuple(self.target_columns)
- )
- if include_options:
- self.sig += (
- (None if onupdate.lower() == 'no action'
- else onupdate.lower())
- if onupdate else None,
- (None if ondelete.lower() == 'no action'
- else ondelete.lower())
- if ondelete else None,
- # convert initially + deferrable into one three-state value
- "initially_deferrable"
- if initially and initially.lower() == "deferred"
- else "deferrable" if deferrable
- else "not deferrable"
- )
-
-
-@comparators.dispatch_for("table")
-def _compare_indexes_and_uniques(
- autogen_context, modify_ops, schema, tname, conn_table,
- metadata_table):
-
- inspector = autogen_context.inspector
- is_create_table = conn_table is None
- is_drop_table = metadata_table is None
-
- # 1a. get raw indexes and unique constraints from metadata ...
- if metadata_table is not None:
- metadata_unique_constraints = set(
- uq for uq in metadata_table.constraints
- if isinstance(uq, sa_schema.UniqueConstraint)
- )
- metadata_indexes = set(metadata_table.indexes)
- else:
- metadata_unique_constraints = set()
- metadata_indexes = set()
-
- conn_uniques = conn_indexes = frozenset()
-
- supports_unique_constraints = False
-
- unique_constraints_duplicate_unique_indexes = False
-
- if conn_table is not None:
- # 1b. ... and from connection, if the table exists
- if hasattr(inspector, "get_unique_constraints"):
- try:
- conn_uniques = inspector.get_unique_constraints(
- tname, schema=schema)
- supports_unique_constraints = True
- except NotImplementedError:
- pass
- except TypeError:
- # number of arguments is off for the base
- # method in SQLAlchemy due to the cache decorator
- # not being present
- pass
- else:
- for uq in conn_uniques:
- if uq.get('duplicates_index'):
- unique_constraints_duplicate_unique_indexes = True
- try:
- conn_indexes = inspector.get_indexes(tname, schema=schema)
- except NotImplementedError:
- pass
-
- # 2. convert conn-level objects from raw inspector records
- # into schema objects
- if is_drop_table:
- # for DROP TABLE uniques are inline, don't need them
- conn_uniques = set()
- else:
- conn_uniques = set(_make_unique_constraint(uq_def, conn_table)
- for uq_def in conn_uniques)
-
- conn_indexes = set(_make_index(ix, conn_table) for ix in conn_indexes)
-
- # 2a. if the dialect dupes unique indexes as unique constraints
- # (mysql and oracle), correct for that
-
- if unique_constraints_duplicate_unique_indexes:
- _correct_for_uq_duplicates_uix(
- conn_uniques, conn_indexes,
- metadata_unique_constraints,
- metadata_indexes
- )
-
- # 3. give the dialect a chance to omit indexes and constraints that
- # we know are either added implicitly by the DB or that the DB
- # can't accurately report on
- autogen_context.migration_context.impl.\
- correct_for_autogen_constraints(
- conn_uniques, conn_indexes,
- metadata_unique_constraints,
- metadata_indexes)
-
- # 4. organize the constraints into "signature" collections, the
- # _constraint_sig() objects provide a consistent facade over both
- # Index and UniqueConstraint so we can easily work with them
- # interchangeably
- metadata_unique_constraints = set(_uq_constraint_sig(uq)
- for uq in metadata_unique_constraints
- )
-
- metadata_indexes = set(_ix_constraint_sig(ix) for ix in metadata_indexes)
-
- conn_unique_constraints = set(
- _uq_constraint_sig(uq) for uq in conn_uniques)
-
- conn_indexes = set(_ix_constraint_sig(ix) for ix in conn_indexes)
-
- # 5. index things by name, for those objects that have names
- metadata_names = dict(
- (c.md_name_to_sql_name(autogen_context), c) for c in
- metadata_unique_constraints.union(metadata_indexes)
- if c.name is not None)
-
- conn_uniques_by_name = dict((c.name, c) for c in conn_unique_constraints)
- conn_indexes_by_name = dict((c.name, c) for c in conn_indexes)
-
- conn_names = dict((c.name, c) for c in
- conn_unique_constraints.union(conn_indexes)
- if c.name is not None)
-
- doubled_constraints = dict(
- (name, (conn_uniques_by_name[name], conn_indexes_by_name[name]))
- for name in set(
- conn_uniques_by_name).intersection(conn_indexes_by_name)
- )
-
- # 6. index things by "column signature", to help with unnamed unique
- # constraints.
- conn_uniques_by_sig = dict((uq.sig, uq) for uq in conn_unique_constraints)
- metadata_uniques_by_sig = dict(
- (uq.sig, uq) for uq in metadata_unique_constraints)
- metadata_indexes_by_sig = dict(
- (ix.sig, ix) for ix in metadata_indexes)
- unnamed_metadata_uniques = dict(
- (uq.sig, uq) for uq in
- metadata_unique_constraints if uq.name is None)
-
- # assumptions:
- # 1. a unique constraint or an index from the connection *always*
- # has a name.
- # 2. an index on the metadata side *always* has a name.
- # 3. a unique constraint on the metadata side *might* have a name.
- # 4. The backend may double up indexes as unique constraints and
- # vice versa (e.g. MySQL, Postgresql)
-
- def obj_added(obj):
- if obj.is_index:
- if autogen_context.run_filters(
- obj.const, obj.name, "index", False, None):
- modify_ops.ops.append(
- ops.CreateIndexOp.from_index(obj.const)
- )
- log.info("Detected added index '%s' on %s",
- obj.name, ', '.join([
- "'%s'" % obj.column_names
- ]))
- else:
- if not supports_unique_constraints:
- # can't report unique indexes as added if we don't
- # detect them
- return
- if is_create_table or is_drop_table:
- # unique constraints are created inline with table defs
- return
- if autogen_context.run_filters(
- obj.const, obj.name,
- "unique_constraint", False, None):
- modify_ops.ops.append(
- ops.AddConstraintOp.from_constraint(obj.const)
- )
- log.info("Detected added unique constraint '%s' on %s",
- obj.name, ', '.join([
- "'%s'" % obj.column_names
- ]))
-
- def obj_removed(obj):
- if obj.is_index:
- if obj.is_unique and not supports_unique_constraints:
- # many databases double up unique constraints
- # as unique indexes. without that list we can't
- # be sure what we're doing here
- return
-
- if autogen_context.run_filters(
- obj.const, obj.name, "index", True, None):
- modify_ops.ops.append(
- ops.DropIndexOp.from_index(obj.const)
- )
- log.info(
- "Detected removed index '%s' on '%s'", obj.name, tname)
- else:
- if is_create_table or is_drop_table:
- # if the whole table is being dropped, we don't need to
- # consider unique constraint separately
- return
- if autogen_context.run_filters(
- obj.const, obj.name,
- "unique_constraint", True, None):
- modify_ops.ops.append(
- ops.DropConstraintOp.from_constraint(obj.const)
- )
- log.info("Detected removed unique constraint '%s' on '%s'",
- obj.name, tname
- )
-
- def obj_changed(old, new, msg):
- if old.is_index:
- if autogen_context.run_filters(
- new.const, new.name, "index",
- False, old.const):
- log.info("Detected changed index '%s' on '%s':%s",
- old.name, tname, ', '.join(msg)
- )
- modify_ops.ops.append(
- ops.DropIndexOp.from_index(old.const)
- )
- modify_ops.ops.append(
- ops.CreateIndexOp.from_index(new.const)
- )
- else:
- if autogen_context.run_filters(
- new.const, new.name,
- "unique_constraint", False, old.const):
- log.info("Detected changed unique constraint '%s' on '%s':%s",
- old.name, tname, ', '.join(msg)
- )
- modify_ops.ops.append(
- ops.DropConstraintOp.from_constraint(old.const)
- )
- modify_ops.ops.append(
- ops.AddConstraintOp.from_constraint(new.const)
- )
-
- for added_name in sorted(set(metadata_names).difference(conn_names)):
- obj = metadata_names[added_name]
- obj_added(obj)
-
- for existing_name in sorted(set(metadata_names).intersection(conn_names)):
- metadata_obj = metadata_names[existing_name]
-
- if existing_name in doubled_constraints:
- conn_uq, conn_idx = doubled_constraints[existing_name]
- if metadata_obj.is_index:
- conn_obj = conn_idx
- else:
- conn_obj = conn_uq
- else:
- conn_obj = conn_names[existing_name]
-
- if conn_obj.is_index != metadata_obj.is_index:
- obj_removed(conn_obj)
- obj_added(metadata_obj)
- else:
- msg = []
- if conn_obj.is_unique != metadata_obj.is_unique:
- msg.append(' unique=%r to unique=%r' % (
- conn_obj.is_unique, metadata_obj.is_unique
- ))
- if conn_obj.sig != metadata_obj.sig:
- msg.append(' columns %r to %r' % (
- conn_obj.sig, metadata_obj.sig
- ))
-
- if msg:
- obj_changed(conn_obj, metadata_obj, msg)
-
- for removed_name in sorted(set(conn_names).difference(metadata_names)):
- conn_obj = conn_names[removed_name]
- if not conn_obj.is_index and conn_obj.sig in unnamed_metadata_uniques:
- continue
- elif removed_name in doubled_constraints:
- if conn_obj.sig not in metadata_indexes_by_sig and \
- conn_obj.sig not in metadata_uniques_by_sig:
- conn_uq, conn_idx = doubled_constraints[removed_name]
- obj_removed(conn_uq)
- obj_removed(conn_idx)
- else:
- obj_removed(conn_obj)
-
- for uq_sig in unnamed_metadata_uniques:
- if uq_sig not in conn_uniques_by_sig:
- obj_added(unnamed_metadata_uniques[uq_sig])
-
-
-def _correct_for_uq_duplicates_uix(
- conn_unique_constraints,
- conn_indexes,
- metadata_unique_constraints,
- metadata_indexes):
- # dedupe unique indexes vs. constraints, since MySQL / Oracle
- # doesn't really have unique constraints as a separate construct.
- # but look in the metadata and try to maintain constructs
- # that already seem to be defined one way or the other
- # on that side. This logic was formerly local to MySQL dialect,
- # generalized to Oracle and others. See #276
- metadata_uq_names = set([
- cons.name for cons in metadata_unique_constraints
- if cons.name is not None])
-
- unnamed_metadata_uqs = set([
- _uq_constraint_sig(cons).sig
- for cons in metadata_unique_constraints
- if cons.name is None
- ])
-
- metadata_ix_names = set([
- cons.name for cons in metadata_indexes if cons.unique])
- conn_ix_names = dict(
- (cons.name, cons) for cons in conn_indexes if cons.unique
- )
-
- uqs_dupe_indexes = dict(
- (cons.name, cons) for cons in conn_unique_constraints
- if cons.info['duplicates_index']
- )
-
- for overlap in uqs_dupe_indexes:
- if overlap not in metadata_uq_names:
- if _uq_constraint_sig(uqs_dupe_indexes[overlap]).sig \
- not in unnamed_metadata_uqs:
-
- conn_unique_constraints.discard(uqs_dupe_indexes[overlap])
- elif overlap not in metadata_ix_names:
- conn_indexes.discard(conn_ix_names[overlap])
-
-
-@comparators.dispatch_for("column")
-def _compare_nullable(
- autogen_context, alter_column_op, schema, tname, cname, conn_col,
- metadata_col):
-
- # work around SQLAlchemy issue #3023
- if metadata_col.primary_key:
- return
-
- metadata_col_nullable = metadata_col.nullable
- conn_col_nullable = conn_col.nullable
- alter_column_op.existing_nullable = conn_col_nullable
-
- if conn_col_nullable is not metadata_col_nullable:
- alter_column_op.modify_nullable = metadata_col_nullable
- log.info("Detected %s on column '%s.%s'",
- "NULL" if metadata_col_nullable else "NOT NULL",
- tname,
- cname
- )
-
-
-@comparators.dispatch_for("column")
-def _setup_autoincrement(
- autogen_context, alter_column_op, schema, tname, cname, conn_col,
- metadata_col):
-
- if metadata_col.table._autoincrement_column is metadata_col:
- alter_column_op.kw['autoincrement'] = True
- elif util.sqla_110 and metadata_col.autoincrement is True:
- alter_column_op.kw['autoincrement'] = True
- elif metadata_col.autoincrement is False:
- alter_column_op.kw['autoincrement'] = False
-
-
-@comparators.dispatch_for("column")
-def _compare_type(
- autogen_context, alter_column_op, schema, tname, cname, conn_col,
- metadata_col):
-
- conn_type = conn_col.type
- alter_column_op.existing_type = conn_type
- metadata_type = metadata_col.type
- if conn_type._type_affinity is sqltypes.NullType:
- log.info("Couldn't determine database type "
- "for column '%s.%s'", tname, cname)
- return
- if metadata_type._type_affinity is sqltypes.NullType:
- log.info("Column '%s.%s' has no type within "
- "the model; can't compare", tname, cname)
- return
-
- isdiff = autogen_context.migration_context._compare_type(
- conn_col, metadata_col)
-
- if isdiff:
- alter_column_op.modify_type = metadata_type
- log.info("Detected type change from %r to %r on '%s.%s'",
- conn_type, metadata_type, tname, cname
- )
-
-
-def _render_server_default_for_compare(metadata_default,
- metadata_col, autogen_context):
- rendered = _user_defined_render(
- "server_default", metadata_default, autogen_context)
- if rendered is not False:
- return rendered
-
- if isinstance(metadata_default, sa_schema.DefaultClause):
- if isinstance(metadata_default.arg, compat.string_types):
- metadata_default = metadata_default.arg
- else:
- metadata_default = str(metadata_default.arg.compile(
- dialect=autogen_context.dialect))
- if isinstance(metadata_default, compat.string_types):
- if metadata_col.type._type_affinity is sqltypes.String:
- metadata_default = re.sub(r"^'|'$", "", metadata_default)
- return repr(metadata_default)
- else:
- return metadata_default
- else:
- return None
-
-
-@comparators.dispatch_for("column")
-def _compare_server_default(
- autogen_context, alter_column_op, schema, tname, cname,
- conn_col, metadata_col):
-
- metadata_default = metadata_col.server_default
- conn_col_default = conn_col.server_default
- if conn_col_default is None and metadata_default is None:
- return False
- rendered_metadata_default = _render_server_default_for_compare(
- metadata_default, metadata_col, autogen_context)
-
- rendered_conn_default = conn_col.server_default.arg.text \
- if conn_col.server_default else None
-
- alter_column_op.existing_server_default = conn_col_default
-
- isdiff = autogen_context.migration_context._compare_server_default(
- conn_col, metadata_col,
- rendered_metadata_default,
- rendered_conn_default
- )
- if isdiff:
- alter_column_op.modify_server_default = metadata_default
- log.info(
- "Detected server default on column '%s.%s'",
- tname, cname)
-
-
-@comparators.dispatch_for("table")
-def _compare_foreign_keys(
- autogen_context, modify_table_ops, schema, tname, conn_table,
- metadata_table):
-
- # if we're doing CREATE TABLE, all FKs are created
- # inline within the table def
- if conn_table is None or metadata_table is None:
- return
-
- inspector = autogen_context.inspector
- metadata_fks = set(
- fk for fk in metadata_table.constraints
- if isinstance(fk, sa_schema.ForeignKeyConstraint)
- )
-
- conn_fks = inspector.get_foreign_keys(tname, schema=schema)
-
- backend_reflects_fk_options = conn_fks and 'options' in conn_fks[0]
-
- conn_fks = set(_make_foreign_key(const, conn_table) for const in conn_fks)
-
- # give the dialect a chance to correct the FKs to match more
- # closely
- autogen_context.migration_context.impl.\
- correct_for_autogen_foreignkeys(
- conn_fks, metadata_fks,
- )
-
- metadata_fks = set(
- _fk_constraint_sig(fk, include_options=backend_reflects_fk_options)
- for fk in metadata_fks
- )
-
- conn_fks = set(
- _fk_constraint_sig(fk, include_options=backend_reflects_fk_options)
- for fk in conn_fks
- )
-
- conn_fks_by_sig = dict(
- (c.sig, c) for c in conn_fks
- )
- metadata_fks_by_sig = dict(
- (c.sig, c) for c in metadata_fks
- )
-
- metadata_fks_by_name = dict(
- (c.name, c) for c in metadata_fks if c.name is not None
- )
- conn_fks_by_name = dict(
- (c.name, c) for c in conn_fks if c.name is not None
- )
-
- def _add_fk(obj, compare_to):
- if autogen_context.run_filters(
- obj.const, obj.name, "foreign_key_constraint", False,
- compare_to):
- modify_table_ops.ops.append(
- ops.CreateForeignKeyOp.from_constraint(const.const)
- )
-
- log.info(
- "Detected added foreign key (%s)(%s) on table %s%s",
- ", ".join(obj.source_columns),
- ", ".join(obj.target_columns),
- "%s." % obj.source_schema if obj.source_schema else "",
- obj.source_table)
-
- def _remove_fk(obj, compare_to):
- if autogen_context.run_filters(
- obj.const, obj.name, "foreign_key_constraint", True,
- compare_to):
- modify_table_ops.ops.append(
- ops.DropConstraintOp.from_constraint(obj.const)
- )
- log.info(
- "Detected removed foreign key (%s)(%s) on table %s%s",
- ", ".join(obj.source_columns),
- ", ".join(obj.target_columns),
- "%s." % obj.source_schema if obj.source_schema else "",
- obj.source_table)
-
- # so far it appears we don't need to do this by name at all.
- # SQLite doesn't preserve constraint names anyway
-
- for removed_sig in set(conn_fks_by_sig).difference(metadata_fks_by_sig):
- const = conn_fks_by_sig[removed_sig]
- if removed_sig not in metadata_fks_by_sig:
- compare_to = metadata_fks_by_name[const.name].const \
- if const.name in metadata_fks_by_name else None
- _remove_fk(const, compare_to)
-
- for added_sig in set(metadata_fks_by_sig).difference(conn_fks_by_sig):
- const = metadata_fks_by_sig[added_sig]
- if added_sig not in conn_fks_by_sig:
- compare_to = conn_fks_by_name[const.name].const \
- if const.name in conn_fks_by_name else None
- _add_fk(const, compare_to)
diff --git a/venv/Lib/site-packages/alembic/autogenerate/render.py b/venv/Lib/site-packages/alembic/autogenerate/render.py
deleted file mode 100644
index 0ba12da..0000000
--- a/venv/Lib/site-packages/alembic/autogenerate/render.py
+++ /dev/null
@@ -1,790 +0,0 @@
-from sqlalchemy import schema as sa_schema, types as sqltypes, sql
-from ..operations import ops
-from ..util import compat
-import re
-from ..util.compat import string_types
-from .. import util
-from mako.pygen import PythonPrinter
-from ..util.compat import StringIO
-
-
-MAX_PYTHON_ARGS = 255
-
-try:
- from sqlalchemy.sql.naming import conv
-
- def _render_gen_name(autogen_context, name):
- if isinstance(name, conv):
- return _f_name(_alembic_autogenerate_prefix(autogen_context), name)
- else:
- return name
-except ImportError:
- def _render_gen_name(autogen_context, name):
- return name
-
-
-def _indent(text):
- text = re.compile(r'^', re.M).sub(" ", text).strip()
- text = re.compile(r' +$', re.M).sub("", text)
- return text
-
-
-def _render_python_into_templatevars(
- autogen_context, migration_script, template_args):
- imports = autogen_context.imports
-
- for upgrade_ops, downgrade_ops in zip(
- migration_script.upgrade_ops_list,
- migration_script.downgrade_ops_list):
- template_args[upgrade_ops.upgrade_token] = _indent(
- _render_cmd_body(upgrade_ops, autogen_context))
- template_args[downgrade_ops.downgrade_token] = _indent(
- _render_cmd_body(downgrade_ops, autogen_context))
- template_args['imports'] = "\n".join(sorted(imports))
-
-
-default_renderers = renderers = util.Dispatcher()
-
-
-def _render_cmd_body(op_container, autogen_context):
-
- buf = StringIO()
- printer = PythonPrinter(buf)
-
- printer.writeline(
- "# ### commands auto generated by Alembic - please adjust! ###"
- )
-
- if not op_container.ops:
- printer.writeline("pass")
- else:
- for op in op_container.ops:
- lines = render_op(autogen_context, op)
-
- for line in lines:
- printer.writeline(line)
-
- printer.writeline("# ### end Alembic commands ###")
-
- return buf.getvalue()
-
-
-def render_op(autogen_context, op):
- renderer = renderers.dispatch(op)
- lines = util.to_list(renderer(autogen_context, op))
- return lines
-
-
-def render_op_text(autogen_context, op):
- return "\n".join(render_op(autogen_context, op))
-
-
-@renderers.dispatch_for(ops.ModifyTableOps)
-def _render_modify_table(autogen_context, op):
- opts = autogen_context.opts
- render_as_batch = opts.get('render_as_batch', False)
-
- if op.ops:
- lines = []
- if render_as_batch:
- with autogen_context._within_batch():
- lines.append(
- "with op.batch_alter_table(%r, schema=%r) as batch_op:"
- % (op.table_name, op.schema)
- )
- for t_op in op.ops:
- t_lines = render_op(autogen_context, t_op)
- lines.extend(t_lines)
- lines.append("")
- else:
- for t_op in op.ops:
- t_lines = render_op(autogen_context, t_op)
- lines.extend(t_lines)
-
- return lines
- else:
- return [
- "pass"
- ]
-
-
-@renderers.dispatch_for(ops.CreateTableOp)
-def _add_table(autogen_context, op):
- table = op.to_table()
-
- args = [col for col in
- [_render_column(col, autogen_context) for col in table.columns]
- if col] + \
- sorted([rcons for rcons in
- [_render_constraint(cons, autogen_context) for cons in
- table.constraints]
- if rcons is not None
- ])
-
- if len(args) > MAX_PYTHON_ARGS:
- args = '*[' + ',\n'.join(args) + ']'
- else:
- args = ',\n'.join(args)
-
- text = "%(prefix)screate_table(%(tablename)r,\n%(args)s" % {
- 'tablename': _ident(op.table_name),
- 'prefix': _alembic_autogenerate_prefix(autogen_context),
- 'args': args,
- }
- if op.schema:
- text += ",\nschema=%r" % _ident(op.schema)
- for k in sorted(op.kw):
- text += ",\n%s=%r" % (k.replace(" ", "_"), op.kw[k])
- text += "\n)"
- return text
-
-
-@renderers.dispatch_for(ops.DropTableOp)
-def _drop_table(autogen_context, op):
- text = "%(prefix)sdrop_table(%(tname)r" % {
- "prefix": _alembic_autogenerate_prefix(autogen_context),
- "tname": _ident(op.table_name)
- }
- if op.schema:
- text += ", schema=%r" % _ident(op.schema)
- text += ")"
- return text
-
-
-@renderers.dispatch_for(ops.CreateIndexOp)
-def _add_index(autogen_context, op):
- index = op.to_index()
-
- has_batch = autogen_context._has_batch
-
- if has_batch:
- tmpl = "%(prefix)screate_index(%(name)r, [%(columns)s], "\
- "unique=%(unique)r%(kwargs)s)"
- else:
- tmpl = "%(prefix)screate_index(%(name)r, %(table)r, [%(columns)s], "\
- "unique=%(unique)r%(schema)s%(kwargs)s)"
-
- text = tmpl % {
- 'prefix': _alembic_autogenerate_prefix(autogen_context),
- 'name': _render_gen_name(autogen_context, index.name),
- 'table': _ident(index.table.name),
- 'columns': ", ".join(
- _get_index_rendered_expressions(index, autogen_context)),
- 'unique': index.unique or False,
- 'schema': (", schema=%r" % _ident(index.table.schema))
- if index.table.schema else '',
- 'kwargs': (
- ', ' +
- ', '.join(
- ["%s=%s" %
- (key, _render_potential_expr(val, autogen_context))
- for key, val in index.kwargs.items()]))
- if len(index.kwargs) else ''
- }
- return text
-
-
-@renderers.dispatch_for(ops.DropIndexOp)
-def _drop_index(autogen_context, op):
- has_batch = autogen_context._has_batch
-
- if has_batch:
- tmpl = "%(prefix)sdrop_index(%(name)r)"
- else:
- tmpl = "%(prefix)sdrop_index(%(name)r, "\
- "table_name=%(table_name)r%(schema)s)"
-
- text = tmpl % {
- 'prefix': _alembic_autogenerate_prefix(autogen_context),
- 'name': _render_gen_name(autogen_context, op.index_name),
- 'table_name': _ident(op.table_name),
- 'schema': ((", schema=%r" % _ident(op.schema))
- if op.schema else '')
- }
- return text
-
-
-@renderers.dispatch_for(ops.CreateUniqueConstraintOp)
-def _add_unique_constraint(autogen_context, op):
- return [_uq_constraint(op.to_constraint(), autogen_context, True)]
-
-
-@renderers.dispatch_for(ops.CreateForeignKeyOp)
-def _add_fk_constraint(autogen_context, op):
-
- args = [
- repr(
- _render_gen_name(autogen_context, op.constraint_name)),
- ]
- if not autogen_context._has_batch:
- args.append(
- repr(_ident(op.source_table))
- )
-
- args.extend(
- [
- repr(_ident(op.referent_table)),
- repr([_ident(col) for col in op.local_cols]),
- repr([_ident(col) for col in op.remote_cols])
- ]
- )
-
- kwargs = [
- 'referent_schema',
- 'onupdate', 'ondelete', 'initially',
- 'deferrable', 'use_alter'
- ]
- if not autogen_context._has_batch:
- kwargs.insert(0, 'source_schema')
-
- for k in kwargs:
- if k in op.kw:
- value = op.kw[k]
- if value is not None:
- args.append("%s=%r" % (k, value))
-
- return "%(prefix)screate_foreign_key(%(args)s)" % {
- 'prefix': _alembic_autogenerate_prefix(autogen_context),
- 'args': ", ".join(args)
- }
-
-
-@renderers.dispatch_for(ops.CreatePrimaryKeyOp)
-def _add_pk_constraint(constraint, autogen_context):
- raise NotImplementedError()
-
-
-@renderers.dispatch_for(ops.CreateCheckConstraintOp)
-def _add_check_constraint(constraint, autogen_context):
- raise NotImplementedError()
-
-
-@renderers.dispatch_for(ops.DropConstraintOp)
-def _drop_constraint(autogen_context, op):
-
- if autogen_context._has_batch:
- template = "%(prefix)sdrop_constraint"\
- "(%(name)r, type_=%(type)r)"
- else:
- template = "%(prefix)sdrop_constraint"\
- "(%(name)r, '%(table_name)s'%(schema)s, type_=%(type)r)"
-
- text = template % {
- 'prefix': _alembic_autogenerate_prefix(autogen_context),
- 'name': _render_gen_name(
- autogen_context, op.constraint_name),
- 'table_name': _ident(op.table_name),
- 'type': op.constraint_type,
- 'schema': (", schema='%s'" % _ident(op.schema))
- if op.schema else '',
- }
- return text
-
-
-@renderers.dispatch_for(ops.AddColumnOp)
-def _add_column(autogen_context, op):
-
- schema, tname, column = op.schema, op.table_name, op.column
- if autogen_context._has_batch:
- template = "%(prefix)sadd_column(%(column)s)"
- else:
- template = "%(prefix)sadd_column(%(tname)r, %(column)s"
- if schema:
- template += ", schema=%(schema)r"
- template += ")"
- text = template % {
- "prefix": _alembic_autogenerate_prefix(autogen_context),
- "tname": tname,
- "column": _render_column(column, autogen_context),
- "schema": schema
- }
- return text
-
-
-@renderers.dispatch_for(ops.DropColumnOp)
-def _drop_column(autogen_context, op):
-
- schema, tname, column_name = op.schema, op.table_name, op.column_name
-
- if autogen_context._has_batch:
- template = "%(prefix)sdrop_column(%(cname)r)"
- else:
- template = "%(prefix)sdrop_column(%(tname)r, %(cname)r"
- if schema:
- template += ", schema=%(schema)r"
- template += ")"
-
- text = template % {
- "prefix": _alembic_autogenerate_prefix(autogen_context),
- "tname": _ident(tname),
- "cname": _ident(column_name),
- "schema": _ident(schema)
- }
- return text
-
-
-@renderers.dispatch_for(ops.AlterColumnOp)
-def _alter_column(autogen_context, op):
-
- tname = op.table_name
- cname = op.column_name
- server_default = op.modify_server_default
- type_ = op.modify_type
- nullable = op.modify_nullable
- autoincrement = op.kw.get('autoincrement', None)
- existing_type = op.existing_type
- existing_nullable = op.existing_nullable
- existing_server_default = op.existing_server_default
- schema = op.schema
-
- indent = " " * 11
-
- if autogen_context._has_batch:
- template = "%(prefix)salter_column(%(cname)r"
- else:
- template = "%(prefix)salter_column(%(tname)r, %(cname)r"
-
- text = template % {
- 'prefix': _alembic_autogenerate_prefix(
- autogen_context),
- 'tname': tname,
- 'cname': cname}
- if existing_type is not None:
- text += ",\n%sexisting_type=%s" % (
- indent,
- _repr_type(existing_type, autogen_context))
- if server_default is not False:
- rendered = _render_server_default(
- server_default, autogen_context)
- text += ",\n%sserver_default=%s" % (indent, rendered)
-
- if type_ is not None:
- text += ",\n%stype_=%s" % (indent,
- _repr_type(type_, autogen_context))
- if nullable is not None:
- text += ",\n%snullable=%r" % (
- indent, nullable,)
- if nullable is None and existing_nullable is not None:
- text += ",\n%sexisting_nullable=%r" % (
- indent, existing_nullable)
- if autoincrement is not None:
- text += ",\n%sautoincrement=%r" % (
- indent, autoincrement)
- if server_default is False and existing_server_default:
- rendered = _render_server_default(
- existing_server_default,
- autogen_context)
- text += ",\n%sexisting_server_default=%s" % (
- indent, rendered)
- if schema and not autogen_context._has_batch:
- text += ",\n%sschema=%r" % (indent, schema)
- text += ")"
- return text
-
-
-class _f_name(object):
-
- def __init__(self, prefix, name):
- self.prefix = prefix
- self.name = name
-
- def __repr__(self):
- return "%sf(%r)" % (self.prefix, _ident(self.name))
-
-
-def _ident(name):
- """produce a __repr__() object for a string identifier that may
- use quoted_name() in SQLAlchemy 0.9 and greater.
-
- The issue worked around here is that quoted_name() doesn't have
- very good repr() behavior by itself when unicode is involved.
-
- """
- if name is None:
- return name
- elif util.sqla_09 and isinstance(name, sql.elements.quoted_name):
- if compat.py2k:
- # the attempt to encode to ascii here isn't super ideal,
- # however we are trying to cut down on an explosion of
- # u'' literals only when py2k + SQLA 0.9, in particular
- # makes unit tests testing code generation very difficult
- try:
- return name.encode('ascii')
- except UnicodeError:
- return compat.text_type(name)
- else:
- return compat.text_type(name)
- elif isinstance(name, compat.string_types):
- return name
-
-
-def _render_potential_expr(value, autogen_context, wrap_in_text=True):
- if isinstance(value, sql.ClauseElement):
- if util.sqla_08:
- compile_kw = dict(compile_kwargs={
- 'literal_binds': True, "include_table": False})
- else:
- compile_kw = {}
-
- if wrap_in_text:
- template = "%(prefix)stext(%(sql)r)"
- else:
- template = "%(sql)r"
-
- return template % {
- "prefix": _sqlalchemy_autogenerate_prefix(autogen_context),
- "sql": compat.text_type(
- value.compile(dialect=autogen_context.dialect,
- **compile_kw)
- )
- }
-
- else:
- return repr(value)
-
-
-def _get_index_rendered_expressions(idx, autogen_context):
- if util.sqla_08:
- return [repr(_ident(getattr(exp, "name", None)))
- if isinstance(exp, sa_schema.Column)
- else _render_potential_expr(exp, autogen_context)
- for exp in idx.expressions]
- else:
- return [
- repr(_ident(getattr(col, "name", None))) for col in idx.columns]
-
-
-def _uq_constraint(constraint, autogen_context, alter):
- opts = []
-
- has_batch = autogen_context._has_batch
-
- if constraint.deferrable:
- opts.append(("deferrable", str(constraint.deferrable)))
- if constraint.initially:
- opts.append(("initially", str(constraint.initially)))
- if not has_batch and alter and constraint.table.schema:
- opts.append(("schema", _ident(constraint.table.schema)))
- if not alter and constraint.name:
- opts.append(
- ("name",
- _render_gen_name(autogen_context, constraint.name)))
-
- if alter:
- args = [
- repr(_render_gen_name(
- autogen_context, constraint.name))]
- if not has_batch:
- args += [repr(_ident(constraint.table.name))]
- args.append(repr([_ident(col.name) for col in constraint.columns]))
- args.extend(["%s=%r" % (k, v) for k, v in opts])
- return "%(prefix)screate_unique_constraint(%(args)s)" % {
- 'prefix': _alembic_autogenerate_prefix(autogen_context),
- 'args': ", ".join(args)
- }
- else:
- args = [repr(_ident(col.name)) for col in constraint.columns]
- args.extend(["%s=%r" % (k, v) for k, v in opts])
- return "%(prefix)sUniqueConstraint(%(args)s)" % {
- "prefix": _sqlalchemy_autogenerate_prefix(autogen_context),
- "args": ", ".join(args)
- }
-
-
-def _user_autogenerate_prefix(autogen_context, target):
- prefix = autogen_context.opts['user_module_prefix']
- if prefix is None:
- return "%s." % target.__module__
- else:
- return prefix
-
-
-def _sqlalchemy_autogenerate_prefix(autogen_context):
- return autogen_context.opts['sqlalchemy_module_prefix'] or ''
-
-
-def _alembic_autogenerate_prefix(autogen_context):
- if autogen_context._has_batch:
- return 'batch_op.'
- else:
- return autogen_context.opts['alembic_module_prefix'] or ''
-
-
-def _user_defined_render(type_, object_, autogen_context):
- if 'render_item' in autogen_context.opts:
- render = autogen_context.opts['render_item']
- if render:
- rendered = render(type_, object_, autogen_context)
- if rendered is not False:
- return rendered
- return False
-
-
-def _render_column(column, autogen_context):
- rendered = _user_defined_render("column", column, autogen_context)
- if rendered is not False:
- return rendered
-
- opts = []
- if column.server_default:
- rendered = _render_server_default(
- column.server_default, autogen_context
- )
- if rendered:
- opts.append(("server_default", rendered))
-
- if not column.autoincrement:
- opts.append(("autoincrement", column.autoincrement))
-
- if column.nullable is not None:
- opts.append(("nullable", column.nullable))
-
- # TODO: for non-ascii colname, assign a "key"
- return "%(prefix)sColumn(%(name)r, %(type)s, %(kw)s)" % {
- 'prefix': _sqlalchemy_autogenerate_prefix(autogen_context),
- 'name': _ident(column.name),
- 'type': _repr_type(column.type, autogen_context),
- 'kw': ", ".join(["%s=%s" % (kwname, val) for kwname, val in opts])
- }
-
-
-def _render_server_default(default, autogen_context, repr_=True):
- rendered = _user_defined_render("server_default", default, autogen_context)
- if rendered is not False:
- return rendered
-
- if isinstance(default, sa_schema.DefaultClause):
- if isinstance(default.arg, compat.string_types):
- default = default.arg
- else:
- return _render_potential_expr(default.arg, autogen_context)
-
- if isinstance(default, string_types) and repr_:
- default = repr(re.sub(r"^'|'$", "", default))
-
- return default
-
-
-def _repr_type(type_, autogen_context):
- rendered = _user_defined_render("type", type_, autogen_context)
- if rendered is not False:
- return rendered
-
- if hasattr(autogen_context.migration_context, 'impl'):
- impl_rt = autogen_context.migration_context.impl.render_type(
- type_, autogen_context)
- else:
- impl_rt = None
-
- mod = type(type_).__module__
- imports = autogen_context.imports
- if mod.startswith("sqlalchemy.dialects"):
- dname = re.match(r"sqlalchemy\.dialects\.(\w+)", mod).group(1)
- if imports is not None:
- imports.add("from sqlalchemy.dialects import %s" % dname)
- if impl_rt:
- return impl_rt
- else:
- return "%s.%r" % (dname, type_)
- elif mod.startswith("sqlalchemy."):
- if '_render_%s_type' % type_.__visit_name__ in globals():
- fn = globals()['_render_%s_type' % type_.__visit_name__]
- return fn(type_, autogen_context)
- else:
- prefix = _sqlalchemy_autogenerate_prefix(autogen_context)
- return "%s%r" % (prefix, type_)
- else:
- prefix = _user_autogenerate_prefix(autogen_context, type_)
- return "%s%r" % (prefix, type_)
-
-
-def _render_ARRAY_type(type_, autogen_context):
- return _render_type_w_subtype(
- type_, autogen_context, 'item_type', r'(.+?\()'
- )
-
-
-def _render_type_w_subtype(type_, autogen_context, attrname, regexp):
- outer_repr = repr(type_)
- inner_type = getattr(type_, attrname, None)
- if inner_type is None:
- return False
-
- inner_repr = repr(inner_type)
-
- inner_repr = re.sub(r'([\(\)])', r'\\\1', inner_repr)
- sub_type = _repr_type(getattr(type_, attrname), autogen_context)
- outer_type = re.sub(
- regexp + inner_repr,
- r"\1%s" % sub_type, outer_repr)
-
- mod = type(type_).__module__
- if mod.startswith("sqlalchemy.dialects"):
- dname = re.match(r"sqlalchemy\.dialects\.(\w+)", mod).group(1)
- return "%s.%s" % (dname, outer_type)
- elif mod.startswith("sqlalchemy"):
- prefix = _sqlalchemy_autogenerate_prefix(autogen_context)
- return "%s%s" % (prefix, outer_type)
- else:
- return None
-
-_constraint_renderers = util.Dispatcher()
-
-
-def _render_constraint(constraint, autogen_context):
- try:
- renderer = _constraint_renderers.dispatch(constraint)
- except ValueError:
- util.warn("No renderer is established for object %r" % constraint)
- return "[Unknown Python object %r]" % constraint
- else:
- return renderer(constraint, autogen_context)
-
-
-@_constraint_renderers.dispatch_for(sa_schema.PrimaryKeyConstraint)
-def _render_primary_key(constraint, autogen_context):
- rendered = _user_defined_render("primary_key", constraint, autogen_context)
- if rendered is not False:
- return rendered
-
- if not constraint.columns:
- return None
-
- opts = []
- if constraint.name:
- opts.append(("name", repr(
- _render_gen_name(autogen_context, constraint.name))))
- return "%(prefix)sPrimaryKeyConstraint(%(args)s)" % {
- "prefix": _sqlalchemy_autogenerate_prefix(autogen_context),
- "args": ", ".join(
- [repr(c.name) for c in constraint.columns] +
- ["%s=%s" % (kwname, val) for kwname, val in opts]
- ),
- }
-
-
-def _fk_colspec(fk, metadata_schema):
- """Implement a 'safe' version of ForeignKey._get_colspec() that
- won't fail if the remote table can't be resolved.
-
- """
- colspec = fk._get_colspec()
- tokens = colspec.split(".")
- tname, colname = tokens[-2:]
-
- if metadata_schema is not None and len(tokens) == 2:
- table_fullname = "%s.%s" % (metadata_schema, tname)
- else:
- table_fullname = ".".join(tokens[0:-1])
-
- if not fk.link_to_name and \
- fk.parent is not None and fk.parent.table is not None:
- # try to resolve the remote table in order to adjust for column.key.
- # the FK constraint needs to be rendered in terms of the column
- # name.
- parent_metadata = fk.parent.table.metadata
- if table_fullname in parent_metadata.tables:
- col = parent_metadata.tables[table_fullname].c.get(colname)
- if col is not None:
- colname = _ident(col.name)
-
- colspec = "%s.%s" % (table_fullname, colname)
-
- return colspec
-
-
-def _populate_render_fk_opts(constraint, opts):
-
- if constraint.onupdate:
- opts.append(("onupdate", repr(constraint.onupdate)))
- if constraint.ondelete:
- opts.append(("ondelete", repr(constraint.ondelete)))
- if constraint.initially:
- opts.append(("initially", repr(constraint.initially)))
- if constraint.deferrable:
- opts.append(("deferrable", repr(constraint.deferrable)))
- if constraint.use_alter:
- opts.append(("use_alter", repr(constraint.use_alter)))
-
-
-@_constraint_renderers.dispatch_for(sa_schema.ForeignKeyConstraint)
-def _render_foreign_key(constraint, autogen_context):
- rendered = _user_defined_render("foreign_key", constraint, autogen_context)
- if rendered is not False:
- return rendered
-
- opts = []
- if constraint.name:
- opts.append(("name", repr(
- _render_gen_name(autogen_context, constraint.name))))
-
- _populate_render_fk_opts(constraint, opts)
-
- apply_metadata_schema = constraint.parent.metadata.schema
- return "%(prefix)sForeignKeyConstraint([%(cols)s], "\
- "[%(refcols)s], %(args)s)" % {
- "prefix": _sqlalchemy_autogenerate_prefix(autogen_context),
- "cols": ", ".join(
- "%r" % _ident(f.parent.name) for f in constraint.elements),
- "refcols": ", ".join(repr(_fk_colspec(f, apply_metadata_schema))
- for f in constraint.elements),
- "args": ", ".join(
- ["%s=%s" % (kwname, val) for kwname, val in opts]
- ),
- }
-
-
-@_constraint_renderers.dispatch_for(sa_schema.UniqueConstraint)
-def _render_unique_constraint(constraint, autogen_context):
- rendered = _user_defined_render("unique", constraint, autogen_context)
- if rendered is not False:
- return rendered
-
- return _uq_constraint(constraint, autogen_context, False)
-
-
-@_constraint_renderers.dispatch_for(sa_schema.CheckConstraint)
-def _render_check_constraint(constraint, autogen_context):
- rendered = _user_defined_render("check", constraint, autogen_context)
- if rendered is not False:
- return rendered
-
- # detect the constraint being part of
- # a parent type which is probably in the Table already.
- # ideally SQLAlchemy would give us more of a first class
- # way to detect this.
- if constraint._create_rule and \
- hasattr(constraint._create_rule, 'target') and \
- isinstance(constraint._create_rule.target,
- sqltypes.TypeEngine):
- return None
- opts = []
- if constraint.name:
- opts.append(
- (
- "name",
- repr(
- _render_gen_name(
- autogen_context, constraint.name))
- )
- )
- return "%(prefix)sCheckConstraint(%(sqltext)s%(opts)s)" % {
- "prefix": _sqlalchemy_autogenerate_prefix(autogen_context),
- "opts": ", " + (", ".join("%s=%s" % (k, v)
- for k, v in opts)) if opts else "",
- "sqltext": _render_potential_expr(
- constraint.sqltext, autogen_context, wrap_in_text=False)
- }
-
-
-@renderers.dispatch_for(ops.ExecuteSQLOp)
-def _execute_sql(autogen_context, op):
- if not isinstance(op.sqltext, string_types):
- raise NotImplementedError(
- "Autogenerate rendering of SQL Expression language constructs "
- "not supported here; please use a plain SQL string"
- )
- return 'op.execute(%r)' % op.sqltext
-
-
-renderers = default_renderers.branch()
diff --git a/venv/Lib/site-packages/alembic/autogenerate/rewriter.py b/venv/Lib/site-packages/alembic/autogenerate/rewriter.py
deleted file mode 100644
index 941bd4b..0000000
--- a/venv/Lib/site-packages/alembic/autogenerate/rewriter.py
+++ /dev/null
@@ -1,150 +0,0 @@
-from alembic import util
-from alembic.operations import ops
-
-
-class Rewriter(object):
- """A helper object that allows easy 'rewriting' of ops streams.
-
- The :class:`.Rewriter` object is intended to be passed along
- to the
- :paramref:`.EnvironmentContext.configure.process_revision_directives`
- parameter in an ``env.py`` script. Once constructed, any number
- of "rewrites" functions can be associated with it, which will be given
- the opportunity to modify the structure without having to have explicit
- knowledge of the overall structure.
-
- The function is passed the :class:`.MigrationContext` object and
- ``revision`` tuple that are passed to the :paramref:`.Environment
- Context.configure.process_revision_directives` function normally,
- and the third argument is an individual directive of the type
- noted in the decorator. The function has the choice of returning
- a single op directive, which normally can be the directive that
- was actually passed, or a new directive to replace it, or a list
- of zero or more directives to replace it.
-
- .. seealso::
-
- :ref:`autogen_rewriter` - usage example
-
- .. versionadded:: 0.8
-
- """
-
- _traverse = util.Dispatcher()
-
- _chained = None
-
- def __init__(self):
- self.dispatch = util.Dispatcher()
-
- def chain(self, other):
- """Produce a "chain" of this :class:`.Rewriter` to another.
-
- This allows two rewriters to operate serially on a stream,
- e.g.::
-
- writer1 = autogenerate.Rewriter()
- writer2 = autogenerate.Rewriter()
-
- @writer1.rewrites(ops.AddColumnOp)
- def add_column_nullable(context, revision, op):
- op.column.nullable = True
- return op
-
- @writer2.rewrites(ops.AddColumnOp)
- def add_column_idx(context, revision, op):
- idx_op = ops.CreateIndexOp(
- 'ixc', op.table_name, [op.column.name])
- return [
- op,
- idx_op
- ]
-
- writer = writer1.chain(writer2)
-
- :param other: a :class:`.Rewriter` instance
- :return: a new :class:`.Rewriter` that will run the operations
- of this writer, then the "other" writer, in succession.
-
- """
- wr = self.__class__.__new__(self.__class__)
- wr.__dict__.update(self.__dict__)
- wr._chained = other
- return wr
-
- def rewrites(self, operator):
- """Register a function as rewriter for a given type.
-
- The function should receive three arguments, which are
- the :class:`.MigrationContext`, a ``revision`` tuple, and
- an op directive of the type indicated. E.g.::
-
- @writer1.rewrites(ops.AddColumnOp)
- def add_column_nullable(context, revision, op):
- op.column.nullable = True
- return op
-
- """
- return self.dispatch.dispatch_for(operator)
-
- def _rewrite(self, context, revision, directive):
- try:
- _rewriter = self.dispatch.dispatch(directive)
- except ValueError:
- _rewriter = None
- yield directive
- else:
- for r_directive in util.to_list(
- _rewriter(context, revision, directive)):
- yield r_directive
-
- def __call__(self, context, revision, directives):
- self.process_revision_directives(context, revision, directives)
- if self._chained:
- self._chained(context, revision, directives)
-
- @_traverse.dispatch_for(ops.MigrationScript)
- def _traverse_script(self, context, revision, directive):
- upgrade_ops_list = []
- for upgrade_ops in directive.upgrade_ops_list:
- ret = self._traverse_for(context, revision, directive.upgrade_ops)
- if len(ret) != 1:
- raise ValueError(
- "Can only return single object for UpgradeOps traverse")
- upgrade_ops_list.append(ret[0])
- directive.upgrade_ops = upgrade_ops_list
-
- downgrade_ops_list = []
- for downgrade_ops in directive.downgrade_ops_list:
- ret = self._traverse_for(
- context, revision, directive.downgrade_ops)
- if len(ret) != 1:
- raise ValueError(
- "Can only return single object for DowngradeOps traverse")
- downgrade_ops_list.append(ret[0])
- directive.downgrade_ops = downgrade_ops_list
-
- @_traverse.dispatch_for(ops.OpContainer)
- def _traverse_op_container(self, context, revision, directive):
- self._traverse_list(context, revision, directive.ops)
-
- @_traverse.dispatch_for(ops.MigrateOperation)
- def _traverse_any_directive(self, context, revision, directive):
- pass
-
- def _traverse_for(self, context, revision, directive):
- directives = list(self._rewrite(context, revision, directive))
- for directive in directives:
- traverser = self._traverse.dispatch(directive)
- traverser(self, context, revision, directive)
- return directives
-
- def _traverse_list(self, context, revision, directives):
- dest = []
- for directive in directives:
- dest.extend(self._traverse_for(context, revision, directive))
-
- directives[:] = dest
-
- def process_revision_directives(self, context, revision, directives):
- self._traverse_list(context, revision, directives)
diff --git a/venv/Lib/site-packages/alembic/command.py b/venv/Lib/site-packages/alembic/command.py
deleted file mode 100644
index 8675005..0000000
--- a/venv/Lib/site-packages/alembic/command.py
+++ /dev/null
@@ -1,538 +0,0 @@
-import os
-
-from .script import ScriptDirectory
-from .runtime.environment import EnvironmentContext
-from . import util
-from . import autogenerate as autogen
-
-
-def list_templates(config):
- """List available templates
-
- :param config: a :class:`.Config` object.
-
- """
-
- config.print_stdout("Available templates:\n")
- for tempname in os.listdir(config.get_template_directory()):
- with open(os.path.join(
- config.get_template_directory(),
- tempname,
- 'README')) as readme:
- synopsis = next(readme)
- config.print_stdout("%s - %s", tempname, synopsis)
-
- config.print_stdout("\nTemplates are used via the 'init' command, e.g.:")
- config.print_stdout("\n alembic init --template generic ./scripts")
-
-
-def init(config, directory, template='generic'):
- """Initialize a new scripts directory.
-
- :param config: a :class:`.Config` object.
-
- :param directory: string path of the target directory
-
- :param template: string name of the migration environment template to
- use.
-
- """
-
- if os.access(directory, os.F_OK):
- raise util.CommandError("Directory %s already exists" % directory)
-
- template_dir = os.path.join(config.get_template_directory(),
- template)
- if not os.access(template_dir, os.F_OK):
- raise util.CommandError("No such template %r" % template)
-
- util.status("Creating directory %s" % os.path.abspath(directory),
- os.makedirs, directory)
-
- versions = os.path.join(directory, 'versions')
- util.status("Creating directory %s" % os.path.abspath(versions),
- os.makedirs, versions)
-
- script = ScriptDirectory(directory)
-
- for file_ in os.listdir(template_dir):
- file_path = os.path.join(template_dir, file_)
- if file_ == 'alembic.ini.mako':
- config_file = os.path.abspath(config.config_file_name)
- if os.access(config_file, os.F_OK):
- util.msg("File %s already exists, skipping" % config_file)
- else:
- script._generate_template(
- file_path,
- config_file,
- script_location=directory
- )
- elif os.path.isfile(file_path):
- output_file = os.path.join(directory, file_)
- script._copy_file(
- file_path,
- output_file
- )
-
- util.msg("Please edit configuration/connection/logging "
- "settings in %r before proceeding." % config_file)
-
-
-def revision(
- config, message=None, autogenerate=False, sql=False,
- head="head", splice=False, branch_label=None,
- version_path=None, rev_id=None, depends_on=None,
- process_revision_directives=None):
- """Create a new revision file.
-
- :param config: a :class:`.Config` object.
-
- :param message: string message to apply to the revision; this is the
- ``-m`` option to ``alembic revision``.
-
- :param autogenerate: whether or not to autogenerate the script from
- the database; this is the ``--autogenerate`` option to ``alembic revision``.
-
- :param sql: whether to dump the script out as a SQL string; when specified,
- the script is dumped to stdout. This is the ``--sql`` option to
- ``alembic revision``.
-
- :param head: head revision to build the new revision upon as a parent;
- this is the ``--head`` option to ``alembic revision``.
-
- :param splice: whether or not the new revision should be made into a
- new head of its own; is required when the given ``head`` is not itself
- a head. This is the ``--splice`` option to ``alembic revision``.
-
- :param branch_label: string label to apply to the branch; this is the
- ``--branch-label`` option to ``alembic revision``.
-
- :param version_path: string symbol identifying a specific version path
- from the configuration; this is the ``--version-path`` option to
- ``alembic revision``.
-
- :param rev_id: optional revision identifier to use instead of having
- one generated; this is the ``--rev-id`` option to ``alembic revision``.
-
- :param depends_on: optional list of "depends on" identifiers; this is the
- ``--depends-on`` option to ``alembic revision``.
-
- :param process_revision_directives: this is a callable that takes the
- same form as the callable described at
- :paramref:`.EnvironmentContext.configure.process_revision_directives`;
- will be applied to the structure generated by the revision process
- where it can be altered programmatically. Note that unlike all
- the other parameters, this option is only available via programmatic
- use of :func:`.command.revision`
-
- .. versionadded:: 0.9.0
-
- """
-
- script_directory = ScriptDirectory.from_config(config)
-
- command_args = dict(
- message=message,
- autogenerate=autogenerate,
- sql=sql, head=head, splice=splice, branch_label=branch_label,
- version_path=version_path, rev_id=rev_id, depends_on=depends_on
- )
- revision_context = autogen.RevisionContext(
- config, script_directory, command_args,
- process_revision_directives=process_revision_directives)
-
- environment = util.asbool(
- config.get_main_option("revision_environment")
- )
-
- if autogenerate:
- environment = True
-
- if sql:
- raise util.CommandError(
- "Using --sql with --autogenerate does not make any sense")
-
- def retrieve_migrations(rev, context):
- revision_context.run_autogenerate(rev, context)
- return []
- elif environment:
- def retrieve_migrations(rev, context):
- revision_context.run_no_autogenerate(rev, context)
- return []
- elif sql:
- raise util.CommandError(
- "Using --sql with the revision command when "
- "revision_environment is not configured does not make any sense")
-
- if environment:
- with EnvironmentContext(
- config,
- script_directory,
- fn=retrieve_migrations,
- as_sql=sql,
- template_args=revision_context.template_args,
- revision_context=revision_context
- ):
- script_directory.run_env()
-
- scripts = [
- script for script in
- revision_context.generate_scripts()
- ]
- if len(scripts) == 1:
- return scripts[0]
- else:
- return scripts
-
-
-def merge(config, revisions, message=None, branch_label=None, rev_id=None):
- """Merge two revisions together. Creates a new migration file.
-
- .. versionadded:: 0.7.0
-
- :param config: a :class:`.Config` instance
-
- :param message: string message to apply to the revision
-
- :param branch_label: string label name to apply to the new revision
-
- :param rev_id: hardcoded revision identifier instead of generating a new
- one.
-
- .. seealso::
-
- :ref:`branches`
-
- """
-
- script = ScriptDirectory.from_config(config)
- template_args = {
- 'config': config # Let templates use config for
- # e.g. multiple databases
- }
- return script.generate_revision(
- rev_id or util.rev_id(), message, refresh=True,
- head=revisions, branch_labels=branch_label,
- **template_args)
-
-
-def upgrade(config, revision, sql=False, tag=None):
- """Upgrade to a later version.
-
- :param config: a :class:`.Config` instance.
-
- :param revision: string revision target or range for --sql mode
-
- :param sql: if True, use ``--sql`` mode
-
- :param tag: an arbitrary "tag" that can be intercepted by custom
- ``env.py`` scripts via the :meth:`.EnvironmentContext.get_tag_argument`
- method.
-
- """
-
- script = ScriptDirectory.from_config(config)
-
- starting_rev = None
- if ":" in revision:
- if not sql:
- raise util.CommandError("Range revision not allowed")
- starting_rev, revision = revision.split(':', 2)
-
- def upgrade(rev, context):
- return script._upgrade_revs(revision, rev)
-
- with EnvironmentContext(
- config,
- script,
- fn=upgrade,
- as_sql=sql,
- starting_rev=starting_rev,
- destination_rev=revision,
- tag=tag
- ):
- script.run_env()
-
-
-def downgrade(config, revision, sql=False, tag=None):
- """Revert to a previous version.
-
- :param config: a :class:`.Config` instance.
-
- :param revision: string revision target or range for --sql mode
-
- :param sql: if True, use ``--sql`` mode
-
- :param tag: an arbitrary "tag" that can be intercepted by custom
- ``env.py`` scripts via the :meth:`.EnvironmentContext.get_tag_argument`
- method.
-
- """
-
- script = ScriptDirectory.from_config(config)
- starting_rev = None
- if ":" in revision:
- if not sql:
- raise util.CommandError("Range revision not allowed")
- starting_rev, revision = revision.split(':', 2)
- elif sql:
- raise util.CommandError(
- "downgrade with --sql requires :")
-
- def downgrade(rev, context):
- return script._downgrade_revs(revision, rev)
-
- with EnvironmentContext(
- config,
- script,
- fn=downgrade,
- as_sql=sql,
- starting_rev=starting_rev,
- destination_rev=revision,
- tag=tag
- ):
- script.run_env()
-
-
-def show(config, rev):
- """Show the revision(s) denoted by the given symbol.
-
- :param config: a :class:`.Config` instance.
-
- :param revision: string revision target
-
- """
-
- script = ScriptDirectory.from_config(config)
-
- if rev == "current":
- def show_current(rev, context):
- for sc in script.get_revisions(rev):
- config.print_stdout(sc.log_entry)
- return []
- with EnvironmentContext(
- config,
- script,
- fn=show_current
- ):
- script.run_env()
- else:
- for sc in script.get_revisions(rev):
- config.print_stdout(sc.log_entry)
-
-
-def history(config, rev_range=None, verbose=False):
- """List changeset scripts in chronological order.
-
- :param config: a :class:`.Config` instance.
-
- :param rev_range: string revision range
-
- :param verbose: output in verbose mode.
-
- """
-
- script = ScriptDirectory.from_config(config)
- if rev_range is not None:
- if ":" not in rev_range:
- raise util.CommandError(
- "History range requires [start]:[end], "
- "[start]:, or :[end]")
- base, head = rev_range.strip().split(":")
- else:
- base = head = None
-
- environment = util.asbool(
- config.get_main_option("revision_environment")
- )
-
- def _display_history(config, script, base, head):
- for sc in script.walk_revisions(
- base=base or "base",
- head=head or "heads"):
- config.print_stdout(
- sc.cmd_format(
- verbose=verbose, include_branches=True,
- include_doc=True, include_parents=True))
-
- def _display_history_w_current(config, script, base=None, head=None):
- def _display_current_history(rev, context):
- if head is None:
- _display_history(config, script, base, rev)
- elif base is None:
- _display_history(config, script, rev, head)
- else:
- _display_history(config, script, base, head)
- return []
-
- with EnvironmentContext(
- config,
- script,
- fn=_display_current_history
- ):
- script.run_env()
-
- if base == "current":
- _display_history_w_current(config, script, head=head)
- elif head == "current":
- _display_history_w_current(config, script, base=base)
- elif environment:
- _display_history_w_current(config, script, base, head)
- else:
- _display_history(config, script, base, head)
-
-
-def heads(config, verbose=False, resolve_dependencies=False):
- """Show current available heads in the script directory
-
- :param config: a :class:`.Config` instance.
-
- :param verbose: output in verbose mode.
-
- :param resolve_dependencies: treat dependency version as down revisions.
-
- """
-
- script = ScriptDirectory.from_config(config)
- if resolve_dependencies:
- heads = script.get_revisions("heads")
- else:
- heads = script.get_revisions(script.get_heads())
-
- for rev in heads:
- config.print_stdout(
- rev.cmd_format(
- verbose, include_branches=True, tree_indicators=False))
-
-
-def branches(config, verbose=False):
- """Show current branch points.
-
- :param config: a :class:`.Config` instance.
-
- :param verbose: output in verbose mode.
-
- """
- script = ScriptDirectory.from_config(config)
- for sc in script.walk_revisions():
- if sc.is_branch_point:
- config.print_stdout(
- "%s\n%s\n",
- sc.cmd_format(verbose, include_branches=True),
- "\n".join(
- "%s -> %s" % (
- " " * len(str(sc.revision)),
- rev_obj.cmd_format(
- False, include_branches=True, include_doc=verbose)
- ) for rev_obj in
- (script.get_revision(rev) for rev in sc.nextrev)
- )
- )
-
-
-def current(config, verbose=False, head_only=False):
- """Display the current revision for a database.
-
- :param config: a :class:`.Config` instance.
-
- :param verbose: output in verbose mode.
-
- :param head_only: deprecated; use ``verbose`` for additional output.
-
- """
-
- script = ScriptDirectory.from_config(config)
-
- if head_only:
- util.warn("--head-only is deprecated")
-
- def display_version(rev, context):
- if verbose:
- config.print_stdout(
- "Current revision(s) for %s:",
- util.obfuscate_url_pw(context.connection.engine.url)
- )
- for rev in script.get_all_current(rev):
- config.print_stdout(rev.cmd_format(verbose))
-
- return []
-
- with EnvironmentContext(
- config,
- script,
- fn=display_version
- ):
- script.run_env()
-
-
-def stamp(config, revision, sql=False, tag=None):
- """'stamp' the revision table with the given revision; don't
- run any migrations.
-
- :param config: a :class:`.Config` instance.
-
- :param revision: target revision.
-
- :param sql: use ``--sql`` mode
-
- :param tag: an arbitrary "tag" that can be intercepted by custom
- ``env.py`` scripts via the :class:`.EnvironmentContext.get_tag_argument`
- method.
-
- """
-
- script = ScriptDirectory.from_config(config)
-
- starting_rev = None
- if ":" in revision:
- if not sql:
- raise util.CommandError("Range revision not allowed")
- starting_rev, revision = revision.split(':', 2)
-
- def do_stamp(rev, context):
- return script._stamp_revs(revision, rev)
-
- with EnvironmentContext(
- config,
- script,
- fn=do_stamp,
- as_sql=sql,
- destination_rev=revision,
- starting_rev=starting_rev,
- tag=tag
- ):
- script.run_env()
-
-
-def edit(config, rev):
- """Edit revision script(s) using $EDITOR.
-
- :param config: a :class:`.Config` instance.
-
- :param rev: target revision.
-
- """
-
- script = ScriptDirectory.from_config(config)
-
- if rev == "current":
- def edit_current(rev, context):
- if not rev:
- raise util.CommandError("No current revisions")
- for sc in script.get_revisions(rev):
- util.edit(sc.path)
- return []
- with EnvironmentContext(
- config,
- script,
- fn=edit_current
- ):
- script.run_env()
- else:
- revs = script.get_revisions(rev)
- if not revs:
- raise util.CommandError(
- "No revision files indicated by symbol '%s'" % rev)
- for sc in revs:
- util.edit(sc.path)
-
diff --git a/venv/Lib/site-packages/alembic/config.py b/venv/Lib/site-packages/alembic/config.py
deleted file mode 100644
index 46ccb91..0000000
--- a/venv/Lib/site-packages/alembic/config.py
+++ /dev/null
@@ -1,482 +0,0 @@
-from argparse import ArgumentParser
-from .util.compat import SafeConfigParser
-import inspect
-import os
-import sys
-
-from . import command
-from . import util
-from . import package_dir
-from .util import compat
-
-
-class Config(object):
-
- """Represent an Alembic configuration.
-
- Within an ``env.py`` script, this is available
- via the :attr:`.EnvironmentContext.config` attribute,
- which in turn is available at ``alembic.context``::
-
- from alembic import context
-
- some_param = context.config.get_main_option("my option")
-
- When invoking Alembic programatically, a new
- :class:`.Config` can be created by passing
- the name of an .ini file to the constructor::
-
- from alembic.config import Config
- alembic_cfg = Config("/path/to/yourapp/alembic.ini")
-
- With a :class:`.Config` object, you can then
- run Alembic commands programmatically using the directives
- in :mod:`alembic.command`.
-
- The :class:`.Config` object can also be constructed without
- a filename. Values can be set programmatically, and
- new sections will be created as needed::
-
- from alembic.config import Config
- alembic_cfg = Config()
- alembic_cfg.set_main_option("script_location", "myapp:migrations")
- alembic_cfg.set_main_option("url", "postgresql://foo/bar")
- alembic_cfg.set_section_option("mysection", "foo", "bar")
-
- .. warning::
-
- When using programmatic configuration, make sure the
- ``env.py`` file in use is compatible with the target configuration;
- including that the call to Python ``logging.fileConfig()`` is
- omitted if the programmatic configuration doesn't actually include
- logging directives.
-
- For passing non-string values to environments, such as connections and
- engines, use the :attr:`.Config.attributes` dictionary::
-
- with engine.begin() as connection:
- alembic_cfg.attributes['connection'] = connection
- command.upgrade(alembic_cfg, "head")
-
- :param file_: name of the .ini file to open.
- :param ini_section: name of the main Alembic section within the
- .ini file
- :param output_buffer: optional file-like input buffer which
- will be passed to the :class:`.MigrationContext` - used to redirect
- the output of "offline generation" when using Alembic programmatically.
- :param stdout: buffer where the "print" output of commands will be sent.
- Defaults to ``sys.stdout``.
-
- .. versionadded:: 0.4
-
- :param config_args: A dictionary of keys and values that will be used
- for substitution in the alembic config file. The dictionary as given
- is **copied** to a new one, stored locally as the attribute
- ``.config_args``. When the :attr:`.Config.file_config` attribute is
- first invoked, the replacement variable ``here`` will be added to this
- dictionary before the dictionary is passed to ``SafeConfigParser()``
- to parse the .ini file.
-
- .. versionadded:: 0.7.0
-
- :param attributes: optional dictionary of arbitrary Python keys/values,
- which will be populated into the :attr:`.Config.attributes` dictionary.
-
- .. versionadded:: 0.7.5
-
- .. seealso::
-
- :ref:`connection_sharing`
-
- """
-
- def __init__(self, file_=None, ini_section='alembic', output_buffer=None,
- stdout=sys.stdout, cmd_opts=None,
- config_args=util.immutabledict(), attributes=None):
- """Construct a new :class:`.Config`
-
- """
- self.config_file_name = file_
- self.config_ini_section = ini_section
- self.output_buffer = output_buffer
- self.stdout = stdout
- self.cmd_opts = cmd_opts
- self.config_args = dict(config_args)
- if attributes:
- self.attributes.update(attributes)
-
- cmd_opts = None
- """The command-line options passed to the ``alembic`` script.
-
- Within an ``env.py`` script this can be accessed via the
- :attr:`.EnvironmentContext.config` attribute.
-
- .. versionadded:: 0.6.0
-
- .. seealso::
-
- :meth:`.EnvironmentContext.get_x_argument`
-
- """
-
- config_file_name = None
- """Filesystem path to the .ini file in use."""
-
- config_ini_section = None
- """Name of the config file section to read basic configuration
- from. Defaults to ``alembic``, that is the ``[alembic]`` section
- of the .ini file. This value is modified using the ``-n/--name``
- option to the Alembic runnier.
-
- """
-
- @util.memoized_property
- def attributes(self):
- """A Python dictionary for storage of additional state.
-
-
- This is a utility dictionary which can include not just strings but
- engines, connections, schema objects, or anything else.
- Use this to pass objects into an env.py script, such as passing
- a :class:`sqlalchemy.engine.base.Connection` when calling
- commands from :mod:`alembic.command` programmatically.
-
- .. versionadded:: 0.7.5
-
- .. seealso::
-
- :ref:`connection_sharing`
-
- :paramref:`.Config.attributes`
-
- """
- return {}
-
- def print_stdout(self, text, *arg):
- """Render a message to standard out."""
-
- util.write_outstream(
- self.stdout,
- (compat.text_type(text) % arg),
- "\n"
- )
-
- @util.memoized_property
- def file_config(self):
- """Return the underlying ``ConfigParser`` object.
-
- Direct access to the .ini file is available here,
- though the :meth:`.Config.get_section` and
- :meth:`.Config.get_main_option`
- methods provide a possibly simpler interface.
-
- """
-
- if self.config_file_name:
- here = os.path.abspath(os.path.dirname(self.config_file_name))
- else:
- here = ""
- self.config_args['here'] = here
- file_config = SafeConfigParser(self.config_args)
- if self.config_file_name:
- file_config.read([self.config_file_name])
- else:
- file_config.add_section(self.config_ini_section)
- return file_config
-
- def get_template_directory(self):
- """Return the directory where Alembic setup templates are found.
-
- This method is used by the alembic ``init`` and ``list_templates``
- commands.
-
- """
- return os.path.join(package_dir, 'templates')
-
- def get_section(self, name):
- """Return all the configuration options from a given .ini file section
- as a dictionary.
-
- """
- return dict(self.file_config.items(name))
-
- def set_main_option(self, name, value):
- """Set an option programmatically within the 'main' section.
-
- This overrides whatever was in the .ini file.
-
- :param name: name of the value
-
- :param value: the value. Note that this value is passed to
- ``ConfigParser.set``, which supports variable interpolation using
- pyformat (e.g. ``%(some_value)s``). A raw percent sign not part of
- an interpolation symbol must therefore be escaped, e.g. ``%%``.
- The given value may refer to another value already in the file
- using the interpolation format.
-
- """
- self.set_section_option(self.config_ini_section, name, value)
-
- def remove_main_option(self, name):
- self.file_config.remove_option(self.config_ini_section, name)
-
- def set_section_option(self, section, name, value):
- """Set an option programmatically within the given section.
-
- The section is created if it doesn't exist already.
- The value here will override whatever was in the .ini
- file.
-
- :param section: name of the section
-
- :param name: name of the value
-
- :param value: the value. Note that this value is passed to
- ``ConfigParser.set``, which supports variable interpolation using
- pyformat (e.g. ``%(some_value)s``). A raw percent sign not part of
- an interpolation symbol must therefore be escaped, e.g. ``%%``.
- The given value may refer to another value already in the file
- using the interpolation format.
-
- """
-
- if not self.file_config.has_section(section):
- self.file_config.add_section(section)
- self.file_config.set(section, name, value)
-
- def get_section_option(self, section, name, default=None):
- """Return an option from the given section of the .ini file.
-
- """
- if not self.file_config.has_section(section):
- raise util.CommandError("No config file %r found, or file has no "
- "'[%s]' section" %
- (self.config_file_name, section))
- if self.file_config.has_option(section, name):
- return self.file_config.get(section, name)
- else:
- return default
-
- def get_main_option(self, name, default=None):
- """Return an option from the 'main' section of the .ini file.
-
- This defaults to being a key from the ``[alembic]``
- section, unless the ``-n/--name`` flag were used to
- indicate a different section.
-
- """
- return self.get_section_option(self.config_ini_section, name, default)
-
-
-class CommandLine(object):
-
- def __init__(self, prog=None):
- self._generate_args(prog)
-
- def _generate_args(self, prog):
- def add_options(parser, positional, kwargs):
- kwargs_opts = {
- 'template': (
- "-t", "--template",
- dict(
- default='generic',
- type=str,
- help="Setup template for use with 'init'"
- )
- ),
- 'message': (
- "-m", "--message",
- dict(
- type=str,
- help="Message string to use with 'revision'")
- ),
- 'sql': (
- "--sql",
- dict(
- action="store_true",
- help="Don't emit SQL to database - dump to "
- "standard output/file instead"
- )
- ),
- 'tag': (
- "--tag",
- dict(
- type=str,
- help="Arbitrary 'tag' name - can be used by "
- "custom env.py scripts.")
- ),
- 'head': (
- "--head",
- dict(
- type=str,
- help="Specify head revision or @head "
- "to base new revision on."
- )
- ),
- 'splice': (
- "--splice",
- dict(
- action="store_true",
- help="Allow a non-head revision as the "
- "'head' to splice onto"
- )
- ),
- 'depends_on': (
- "--depends-on",
- dict(
- action="append",
- help="Specify one or more revision identifiers "
- "which this revision should depend on."
- )
- ),
- 'rev_id': (
- "--rev-id",
- dict(
- type=str,
- help="Specify a hardcoded revision id instead of "
- "generating one"
- )
- ),
- 'version_path': (
- "--version-path",
- dict(
- type=str,
- help="Specify specific path from config for "
- "version file"
- )
- ),
- 'branch_label': (
- "--branch-label",
- dict(
- type=str,
- help="Specify a branch label to apply to the "
- "new revision"
- )
- ),
- 'verbose': (
- "-v", "--verbose",
- dict(
- action="store_true",
- help="Use more verbose output"
- )
- ),
- 'resolve_dependencies': (
- '--resolve-dependencies',
- dict(
- action="store_true",
- help="Treat dependency versions as down revisions"
- )
- ),
- 'autogenerate': (
- "--autogenerate",
- dict(
- action="store_true",
- help="Populate revision script with candidate "
- "migration operations, based on comparison "
- "of database to model.")
- ),
- 'head_only': (
- "--head-only",
- dict(
- action="store_true",
- help="Deprecated. Use --verbose for "
- "additional output")
- ),
- 'rev_range': (
- "-r", "--rev-range",
- dict(
- action="store",
- help="Specify a revision range; "
- "format is [start]:[end]")
- )
- }
- positional_help = {
- 'directory': "location of scripts directory",
- 'revision': "revision identifier",
- 'revisions': "one or more revisions, or 'heads' for all heads"
-
- }
- for arg in kwargs:
- if arg in kwargs_opts:
- args = kwargs_opts[arg]
- args, kw = args[0:-1], args[-1]
- parser.add_argument(*args, **kw)
-
- for arg in positional:
- if arg == "revisions":
- subparser.add_argument(
- arg, nargs='+', help=positional_help.get(arg))
- else:
- subparser.add_argument(arg, help=positional_help.get(arg))
-
- parser = ArgumentParser(prog=prog)
- parser.add_argument("-c", "--config",
- type=str,
- default="alembic.ini",
- help="Alternate config file")
- parser.add_argument("-n", "--name",
- type=str,
- default="alembic",
- help="Name of section in .ini file to "
- "use for Alembic config")
- parser.add_argument("-x", action="append",
- help="Additional arguments consumed by "
- "custom env.py scripts, e.g. -x "
- "setting1=somesetting -x setting2=somesetting")
- parser.add_argument("--raiseerr", action="store_true",
- help="Raise a full stack trace on error")
- subparsers = parser.add_subparsers()
-
- for fn in [getattr(command, n) for n in dir(command)]:
- if inspect.isfunction(fn) and \
- fn.__name__[0] != '_' and \
- fn.__module__ == 'alembic.command':
-
- spec = compat.inspect_getargspec(fn)
- if spec[3]:
- positional = spec[0][1:-len(spec[3])]
- kwarg = spec[0][-len(spec[3]):]
- else:
- positional = spec[0][1:]
- kwarg = []
-
- subparser = subparsers.add_parser(
- fn.__name__,
- help=fn.__doc__)
- add_options(subparser, positional, kwarg)
- subparser.set_defaults(cmd=(fn, positional, kwarg))
- self.parser = parser
-
- def run_cmd(self, config, options):
- fn, positional, kwarg = options.cmd
-
- try:
- fn(config,
- *[getattr(options, k, None) for k in positional],
- **dict((k, getattr(options, k, None)) for k in kwarg)
- )
- except util.CommandError as e:
- if options.raiseerr:
- raise
- else:
- util.err(str(e))
-
- def main(self, argv=None):
- options = self.parser.parse_args(argv)
- if not hasattr(options, "cmd"):
- # see http://bugs.python.org/issue9253, argparse
- # behavior changed incompatibly in py3.3
- self.parser.error("too few arguments")
- else:
- cfg = Config(file_=options.config,
- ini_section=options.name, cmd_opts=options)
- self.run_cmd(cfg, options)
-
-
-def main(argv=None, prog=None, **kwargs):
- """The console runner function for Alembic."""
-
- CommandLine(prog=prog).main(argv=argv)
-
-if __name__ == '__main__':
- main()
diff --git a/venv/Lib/site-packages/alembic/context.py b/venv/Lib/site-packages/alembic/context.py
deleted file mode 100644
index 758fca8..0000000
--- a/venv/Lib/site-packages/alembic/context.py
+++ /dev/null
@@ -1,5 +0,0 @@
-from .runtime.environment import EnvironmentContext
-
-# create proxy functions for
-# each method on the EnvironmentContext class.
-EnvironmentContext.create_module_class_proxy(globals(), locals())
diff --git a/venv/Lib/site-packages/alembic/ddl/__init__.py b/venv/Lib/site-packages/alembic/ddl/__init__.py
deleted file mode 100644
index d225c45..0000000
--- a/venv/Lib/site-packages/alembic/ddl/__init__.py
+++ /dev/null
@@ -1,2 +0,0 @@
-from . import postgresql, mysql, sqlite, mssql, oracle # pragma: no cover
-from .impl import DefaultImpl # pragma: no cover
diff --git a/venv/Lib/site-packages/alembic/ddl/base.py b/venv/Lib/site-packages/alembic/ddl/base.py
deleted file mode 100644
index f4a525f..0000000
--- a/venv/Lib/site-packages/alembic/ddl/base.py
+++ /dev/null
@@ -1,204 +0,0 @@
-import functools
-
-from sqlalchemy.ext.compiler import compiles
-from sqlalchemy.schema import DDLElement, Column
-from sqlalchemy import Integer
-from sqlalchemy import types as sqltypes
-from .. import util
-
-# backwards compat
-from ..util.sqla_compat import ( # noqa
- _table_for_constraint,
- _columns_for_constraint, _fk_spec, _is_type_bound, _find_columns)
-
-if util.sqla_09:
- from sqlalchemy.sql.elements import quoted_name
-
-
-class AlterTable(DDLElement):
-
- """Represent an ALTER TABLE statement.
-
- Only the string name and optional schema name of the table
- is required, not a full Table object.
-
- """
-
- def __init__(self, table_name, schema=None):
- self.table_name = table_name
- self.schema = schema
-
-
-class RenameTable(AlterTable):
-
- def __init__(self, old_table_name, new_table_name, schema=None):
- super(RenameTable, self).__init__(old_table_name, schema=schema)
- self.new_table_name = new_table_name
-
-
-class AlterColumn(AlterTable):
-
- def __init__(self, name, column_name, schema=None,
- existing_type=None,
- existing_nullable=None,
- existing_server_default=None):
- super(AlterColumn, self).__init__(name, schema=schema)
- self.column_name = column_name
- self.existing_type = sqltypes.to_instance(existing_type) \
- if existing_type is not None else None
- self.existing_nullable = existing_nullable
- self.existing_server_default = existing_server_default
-
-
-class ColumnNullable(AlterColumn):
-
- def __init__(self, name, column_name, nullable, **kw):
- super(ColumnNullable, self).__init__(name, column_name,
- **kw)
- self.nullable = nullable
-
-
-class ColumnType(AlterColumn):
-
- def __init__(self, name, column_name, type_, **kw):
- super(ColumnType, self).__init__(name, column_name,
- **kw)
- self.type_ = sqltypes.to_instance(type_)
-
-
-class ColumnName(AlterColumn):
-
- def __init__(self, name, column_name, newname, **kw):
- super(ColumnName, self).__init__(name, column_name, **kw)
- self.newname = newname
-
-
-class ColumnDefault(AlterColumn):
-
- def __init__(self, name, column_name, default, **kw):
- super(ColumnDefault, self).__init__(name, column_name, **kw)
- self.default = default
-
-
-class AddColumn(AlterTable):
-
- def __init__(self, name, column, schema=None):
- super(AddColumn, self).__init__(name, schema=schema)
- self.column = column
-
-
-class DropColumn(AlterTable):
-
- def __init__(self, name, column, schema=None):
- super(DropColumn, self).__init__(name, schema=schema)
- self.column = column
-
-
-@compiles(RenameTable)
-def visit_rename_table(element, compiler, **kw):
- return "%s RENAME TO %s" % (
- alter_table(compiler, element.table_name, element.schema),
- format_table_name(compiler, element.new_table_name, element.schema)
- )
-
-
-@compiles(AddColumn)
-def visit_add_column(element, compiler, **kw):
- return "%s %s" % (
- alter_table(compiler, element.table_name, element.schema),
- add_column(compiler, element.column, **kw)
- )
-
-
-@compiles(DropColumn)
-def visit_drop_column(element, compiler, **kw):
- return "%s %s" % (
- alter_table(compiler, element.table_name, element.schema),
- drop_column(compiler, element.column.name, **kw)
- )
-
-
-@compiles(ColumnNullable)
-def visit_column_nullable(element, compiler, **kw):
- return "%s %s %s" % (
- alter_table(compiler, element.table_name, element.schema),
- alter_column(compiler, element.column_name),
- "DROP NOT NULL" if element.nullable else "SET NOT NULL"
- )
-
-
-@compiles(ColumnType)
-def visit_column_type(element, compiler, **kw):
- return "%s %s %s" % (
- alter_table(compiler, element.table_name, element.schema),
- alter_column(compiler, element.column_name),
- "TYPE %s" % format_type(compiler, element.type_)
- )
-
-
-@compiles(ColumnName)
-def visit_column_name(element, compiler, **kw):
- return "%s RENAME %s TO %s" % (
- alter_table(compiler, element.table_name, element.schema),
- format_column_name(compiler, element.column_name),
- format_column_name(compiler, element.newname)
- )
-
-
-@compiles(ColumnDefault)
-def visit_column_default(element, compiler, **kw):
- return "%s %s %s" % (
- alter_table(compiler, element.table_name, element.schema),
- alter_column(compiler, element.column_name),
- "SET DEFAULT %s" %
- format_server_default(compiler, element.default)
- if element.default is not None
- else "DROP DEFAULT"
- )
-
-
-def quote_dotted(name, quote):
- """quote the elements of a dotted name"""
-
- if util.sqla_09 and isinstance(name, quoted_name):
- return quote(name)
- result = '.'.join([quote(x) for x in name.split('.')])
- return result
-
-
-def format_table_name(compiler, name, schema):
- quote = functools.partial(compiler.preparer.quote, force=None)
- if schema:
- return quote_dotted(schema, quote) + "." + quote(name)
- else:
- return quote(name)
-
-
-def format_column_name(compiler, name):
- return compiler.preparer.quote(name, None)
-
-
-def format_server_default(compiler, default):
- return compiler.get_column_default_string(
- Column("x", Integer, server_default=default)
- )
-
-
-def format_type(compiler, type_):
- return compiler.dialect.type_compiler.process(type_)
-
-
-def alter_table(compiler, name, schema):
- return "ALTER TABLE %s" % format_table_name(compiler, name, schema)
-
-
-def drop_column(compiler, name):
- return 'DROP COLUMN %s' % format_column_name(compiler, name)
-
-
-def alter_column(compiler, name):
- return 'ALTER COLUMN %s' % format_column_name(compiler, name)
-
-
-def add_column(compiler, column, **kw):
- return "ADD COLUMN %s" % compiler.get_column_specification(column, **kw)
diff --git a/venv/Lib/site-packages/alembic/ddl/impl.py b/venv/Lib/site-packages/alembic/ddl/impl.py
deleted file mode 100644
index ec170fd..0000000
--- a/venv/Lib/site-packages/alembic/ddl/impl.py
+++ /dev/null
@@ -1,369 +0,0 @@
-from sqlalchemy import schema, text
-from sqlalchemy import types as sqltypes
-
-from ..util.compat import (
- string_types, text_type, with_metaclass
-)
-from ..util import sqla_compat
-from .. import util
-from . import base
-
-
-class ImplMeta(type):
-
- def __init__(cls, classname, bases, dict_):
- newtype = type.__init__(cls, classname, bases, dict_)
- if '__dialect__' in dict_:
- _impls[dict_['__dialect__']] = cls
- return newtype
-
-_impls = {}
-
-
-class DefaultImpl(with_metaclass(ImplMeta)):
-
- """Provide the entrypoint for major migration operations,
- including database-specific behavioral variances.
-
- While individual SQL/DDL constructs already provide
- for database-specific implementations, variances here
- allow for entirely different sequences of operations
- to take place for a particular migration, such as
- SQL Server's special 'IDENTITY INSERT' step for
- bulk inserts.
-
- """
- __dialect__ = 'default'
-
- transactional_ddl = False
- command_terminator = ";"
-
- def __init__(self, dialect, connection, as_sql,
- transactional_ddl, output_buffer,
- context_opts):
- self.dialect = dialect
- self.connection = connection
- self.as_sql = as_sql
- self.literal_binds = context_opts.get('literal_binds', False)
- if self.literal_binds and not util.sqla_08:
- util.warn("'literal_binds' flag not supported in SQLAlchemy 0.7")
- self.literal_binds = False
-
- self.output_buffer = output_buffer
- self.memo = {}
- self.context_opts = context_opts
- if transactional_ddl is not None:
- self.transactional_ddl = transactional_ddl
-
- if self.literal_binds:
- if not self.as_sql:
- raise util.CommandError(
- "Can't use literal_binds setting without as_sql mode")
-
- @classmethod
- def get_by_dialect(cls, dialect):
- return _impls[dialect.name]
-
- def static_output(self, text):
- self.output_buffer.write(text_type(text + "\n\n"))
- self.output_buffer.flush()
-
- def requires_recreate_in_batch(self, batch_op):
- """Return True if the given :class:`.BatchOperationsImpl`
- would need the table to be recreated and copied in order to
- proceed.
-
- Normally, only returns True on SQLite when operations other
- than add_column are present.
-
- """
- return False
-
- def prep_table_for_batch(self, table):
- """perform any operations needed on a table before a new
- one is created to replace it in batch mode.
-
- the PG dialect uses this to drop constraints on the table
- before the new one uses those same names.
-
- """
-
- @property
- def bind(self):
- return self.connection
-
- def _exec(self, construct, execution_options=None,
- multiparams=(),
- params=util.immutabledict()):
- if isinstance(construct, string_types):
- construct = text(construct)
- if self.as_sql:
- if multiparams or params:
- # TODO: coverage
- raise Exception("Execution arguments not allowed with as_sql")
-
- if self.literal_binds and not isinstance(
- construct, schema.DDLElement):
- compile_kw = dict(compile_kwargs={"literal_binds": True})
- else:
- compile_kw = {}
-
- self.static_output(text_type(
- construct.compile(dialect=self.dialect, **compile_kw)
- ).replace("\t", " ").strip() + self.command_terminator)
- else:
- conn = self.connection
- if execution_options:
- conn = conn.execution_options(**execution_options)
- return conn.execute(construct, *multiparams, **params)
-
- def execute(self, sql, execution_options=None):
- self._exec(sql, execution_options)
-
- def alter_column(self, table_name, column_name,
- nullable=None,
- server_default=False,
- name=None,
- type_=None,
- schema=None,
- autoincrement=None,
- existing_type=None,
- existing_server_default=None,
- existing_nullable=None,
- existing_autoincrement=None
- ):
- if autoincrement is not None or existing_autoincrement is not None:
- util.warn(
- "autoincrement and existing_autoincrement "
- "only make sense for MySQL")
- if nullable is not None:
- self._exec(base.ColumnNullable(
- table_name, column_name,
- nullable, schema=schema,
- existing_type=existing_type,
- existing_server_default=existing_server_default,
- existing_nullable=existing_nullable,
- ))
- if server_default is not False:
- self._exec(base.ColumnDefault(
- table_name, column_name, server_default,
- schema=schema,
- existing_type=existing_type,
- existing_server_default=existing_server_default,
- existing_nullable=existing_nullable,
- ))
- if type_ is not None:
- self._exec(base.ColumnType(
- table_name, column_name, type_, schema=schema,
- existing_type=existing_type,
- existing_server_default=existing_server_default,
- existing_nullable=existing_nullable,
- ))
- # do the new name last ;)
- if name is not None:
- self._exec(base.ColumnName(
- table_name, column_name, name, schema=schema,
- existing_type=existing_type,
- existing_server_default=existing_server_default,
- existing_nullable=existing_nullable,
- ))
-
- def add_column(self, table_name, column, schema=None):
- self._exec(base.AddColumn(table_name, column, schema=schema))
-
- def drop_column(self, table_name, column, schema=None, **kw):
- self._exec(base.DropColumn(table_name, column, schema=schema))
-
- def add_constraint(self, const):
- if const._create_rule is None or \
- const._create_rule(self):
- self._exec(schema.AddConstraint(const))
-
- def drop_constraint(self, const):
- self._exec(schema.DropConstraint(const))
-
- def rename_table(self, old_table_name, new_table_name, schema=None):
- self._exec(base.RenameTable(old_table_name,
- new_table_name, schema=schema))
-
- def create_table(self, table):
- if util.sqla_07:
- table.dispatch.before_create(table, self.connection,
- checkfirst=False,
- _ddl_runner=self)
- self._exec(schema.CreateTable(table))
- if util.sqla_07:
- table.dispatch.after_create(table, self.connection,
- checkfirst=False,
- _ddl_runner=self)
- for index in table.indexes:
- self._exec(schema.CreateIndex(index))
-
- def drop_table(self, table):
- self._exec(schema.DropTable(table))
-
- def create_index(self, index):
- self._exec(schema.CreateIndex(index))
-
- def drop_index(self, index):
- self._exec(schema.DropIndex(index))
-
- def bulk_insert(self, table, rows, multiinsert=True):
- if not isinstance(rows, list):
- raise TypeError("List expected")
- elif rows and not isinstance(rows[0], dict):
- raise TypeError("List of dictionaries expected")
- if self.as_sql:
- for row in rows:
- self._exec(table.insert(inline=True).values(**dict(
- (k,
- sqla_compat._literal_bindparam(
- k, v, type_=table.c[k].type)
- if not isinstance(
- v, sqla_compat._literal_bindparam) else v)
- for k, v in row.items()
- )))
- else:
- # work around http://www.sqlalchemy.org/trac/ticket/2461
- if not hasattr(table, '_autoincrement_column'):
- table._autoincrement_column = None
- if rows:
- if multiinsert:
- self._exec(table.insert(inline=True), multiparams=rows)
- else:
- for row in rows:
- self._exec(table.insert(inline=True).values(**row))
-
- def compare_type(self, inspector_column, metadata_column):
-
- conn_type = inspector_column.type
- metadata_type = metadata_column.type
-
- metadata_impl = metadata_type.dialect_impl(self.dialect)
- if isinstance(metadata_impl, sqltypes.Variant):
- metadata_impl = metadata_impl.impl.dialect_impl(self.dialect)
-
- # work around SQLAlchemy bug "stale value for type affinity"
- # fixed in 0.7.4
- metadata_impl.__dict__.pop('_type_affinity', None)
-
- if hasattr(metadata_impl, "compare_against_backend"):
- comparison = metadata_impl.compare_against_backend(
- self.dialect, conn_type)
- if comparison is not None:
- return not comparison
-
- if conn_type._compare_type_affinity(
- metadata_impl
- ):
- comparator = _type_comparators.get(conn_type._type_affinity, None)
-
- return comparator and comparator(metadata_impl, conn_type)
- else:
- return True
-
- def compare_server_default(self, inspector_column,
- metadata_column,
- rendered_metadata_default,
- rendered_inspector_default):
- return rendered_inspector_default != rendered_metadata_default
-
- def correct_for_autogen_constraints(self, conn_uniques, conn_indexes,
- metadata_unique_constraints,
- metadata_indexes):
- pass
-
- def _compat_autogen_column_reflect(self, inspector):
- if util.sqla_08:
- return self.autogen_column_reflect
- else:
- def adapt(table, column_info):
- return self.autogen_column_reflect(
- inspector, table, column_info)
- return adapt
-
- def correct_for_autogen_foreignkeys(self, conn_fks, metadata_fks):
- pass
-
- def autogen_column_reflect(self, inspector, table, column_info):
- """A hook that is attached to the 'column_reflect' event for when
- a Table is reflected from the database during the autogenerate
- process.
-
- Dialects can elect to modify the information gathered here.
-
- """
-
- def start_migrations(self):
- """A hook called when :meth:`.EnvironmentContext.run_migrations`
- is called.
-
- Implementations can set up per-migration-run state here.
-
- """
-
- def emit_begin(self):
- """Emit the string ``BEGIN``, or the backend-specific
- equivalent, on the current connection context.
-
- This is used in offline mode and typically
- via :meth:`.EnvironmentContext.begin_transaction`.
-
- """
- self.static_output("BEGIN" + self.command_terminator)
-
- def emit_commit(self):
- """Emit the string ``COMMIT``, or the backend-specific
- equivalent, on the current connection context.
-
- This is used in offline mode and typically
- via :meth:`.EnvironmentContext.begin_transaction`.
-
- """
- self.static_output("COMMIT" + self.command_terminator)
-
- def render_type(self, type_obj, autogen_context):
- return False
-
-
-def _string_compare(t1, t2):
- return \
- t1.length is not None and \
- t1.length != t2.length
-
-
-def _numeric_compare(t1, t2):
- return (
- t1.precision is not None and
- t1.precision != t2.precision
- ) or (
- t1.precision is not None and
- t1.scale is not None and
- t1.scale != t2.scale
- )
-
-
-def _integer_compare(t1, t2):
- t1_small_or_big = (
- 'S' if isinstance(t1, sqltypes.SmallInteger)
- else 'B' if isinstance(t1, sqltypes.BigInteger) else 'I'
- )
- t2_small_or_big = (
- 'S' if isinstance(t2, sqltypes.SmallInteger)
- else 'B' if isinstance(t2, sqltypes.BigInteger) else 'I'
- )
- return t1_small_or_big != t2_small_or_big
-
-
-def _datetime_compare(t1, t2):
- return (
- t1.timezone != t2.timezone
- )
-
-
-_type_comparators = {
- sqltypes.String: _string_compare,
- sqltypes.Numeric: _numeric_compare,
- sqltypes.Integer: _integer_compare,
- sqltypes.DateTime: _datetime_compare,
-}
diff --git a/venv/Lib/site-packages/alembic/ddl/mssql.py b/venv/Lib/site-packages/alembic/ddl/mssql.py
deleted file mode 100644
index f10c5e6..0000000
--- a/venv/Lib/site-packages/alembic/ddl/mssql.py
+++ /dev/null
@@ -1,233 +0,0 @@
-from sqlalchemy.ext.compiler import compiles
-
-from .. import util
-from .impl import DefaultImpl
-from .base import alter_table, AddColumn, ColumnName, RenameTable,\
- format_table_name, format_column_name, ColumnNullable, alter_column,\
- format_server_default, ColumnDefault, format_type, ColumnType
-from sqlalchemy.sql.expression import ClauseElement, Executable
-
-
-class MSSQLImpl(DefaultImpl):
- __dialect__ = 'mssql'
- transactional_ddl = True
- batch_separator = "GO"
-
- def __init__(self, *arg, **kw):
- super(MSSQLImpl, self).__init__(*arg, **kw)
- self.batch_separator = self.context_opts.get(
- "mssql_batch_separator",
- self.batch_separator)
-
- def _exec(self, construct, *args, **kw):
- result = super(MSSQLImpl, self)._exec(construct, *args, **kw)
- if self.as_sql and self.batch_separator:
- self.static_output(self.batch_separator)
- return result
-
- def emit_begin(self):
- self.static_output("BEGIN TRANSACTION" + self.command_terminator)
-
- def emit_commit(self):
- super(MSSQLImpl, self).emit_commit()
- if self.as_sql and self.batch_separator:
- self.static_output(self.batch_separator)
-
- def alter_column(self, table_name, column_name,
- nullable=None,
- server_default=False,
- name=None,
- type_=None,
- schema=None,
- existing_type=None,
- existing_server_default=None,
- existing_nullable=None,
- **kw
- ):
-
- if nullable is not None and existing_type is None:
- if type_ is not None:
- existing_type = type_
- # the NULL/NOT NULL alter will handle
- # the type alteration
- type_ = None
- else:
- raise util.CommandError(
- "MS-SQL ALTER COLUMN operations "
- "with NULL or NOT NULL require the "
- "existing_type or a new type_ be passed.")
-
- super(MSSQLImpl, self).alter_column(
- table_name, column_name,
- nullable=nullable,
- type_=type_,
- schema=schema,
- existing_type=existing_type,
- existing_nullable=existing_nullable,
- **kw
- )
-
- if server_default is not False:
- if existing_server_default is not False or \
- server_default is None:
- self._exec(
- _ExecDropConstraint(
- table_name, column_name,
- 'sys.default_constraints')
- )
- if server_default is not None:
- super(MSSQLImpl, self).alter_column(
- table_name, column_name,
- schema=schema,
- server_default=server_default)
-
- if name is not None:
- super(MSSQLImpl, self).alter_column(
- table_name, column_name,
- schema=schema,
- name=name)
-
- def bulk_insert(self, table, rows, **kw):
- if self.as_sql:
- self._exec(
- "SET IDENTITY_INSERT %s ON" %
- self.dialect.identifier_preparer.format_table(table)
- )
- super(MSSQLImpl, self).bulk_insert(table, rows, **kw)
- self._exec(
- "SET IDENTITY_INSERT %s OFF" %
- self.dialect.identifier_preparer.format_table(table)
- )
- else:
- super(MSSQLImpl, self).bulk_insert(table, rows, **kw)
-
- def drop_column(self, table_name, column, **kw):
- drop_default = kw.pop('mssql_drop_default', False)
- if drop_default:
- self._exec(
- _ExecDropConstraint(
- table_name, column,
- 'sys.default_constraints')
- )
- drop_check = kw.pop('mssql_drop_check', False)
- if drop_check:
- self._exec(
- _ExecDropConstraint(
- table_name, column,
- 'sys.check_constraints')
- )
- drop_fks = kw.pop('mssql_drop_foreign_key', False)
- if drop_fks:
- self._exec(
- _ExecDropFKConstraint(table_name, column)
- )
- super(MSSQLImpl, self).drop_column(table_name, column, **kw)
-
-
-class _ExecDropConstraint(Executable, ClauseElement):
-
- def __init__(self, tname, colname, type_):
- self.tname = tname
- self.colname = colname
- self.type_ = type_
-
-
-class _ExecDropFKConstraint(Executable, ClauseElement):
-
- def __init__(self, tname, colname):
- self.tname = tname
- self.colname = colname
-
-
-@compiles(_ExecDropConstraint, 'mssql')
-def _exec_drop_col_constraint(element, compiler, **kw):
- tname, colname, type_ = element.tname, element.colname, element.type_
- # from http://www.mssqltips.com/sqlservertip/1425/\
- # working-with-default-constraints-in-sql-server/
- # TODO: needs table formatting, etc.
- return """declare @const_name varchar(256)
-select @const_name = [name] from %(type)s
-where parent_object_id = object_id('%(tname)s')
-and col_name(parent_object_id, parent_column_id) = '%(colname)s'
-exec('alter table %(tname_quoted)s drop constraint ' + @const_name)""" % {
- 'type': type_,
- 'tname': tname,
- 'colname': colname,
- 'tname_quoted': format_table_name(compiler, tname, None),
- }
-
-
-@compiles(_ExecDropFKConstraint, 'mssql')
-def _exec_drop_col_fk_constraint(element, compiler, **kw):
- tname, colname = element.tname, element.colname
-
- return """declare @const_name varchar(256)
-select @const_name = [name] from
- sys.foreign_keys fk join sys.foreign_key_columns fkc
- on fk.object_id=fkc.constraint_object_id
-where fkc.parent_object_id = object_id('%(tname)s')
-and col_name(fkc.parent_object_id, fkc.parent_column_id) = '%(colname)s'
-exec('alter table %(tname_quoted)s drop constraint ' + @const_name)""" % {
- 'tname': tname,
- 'colname': colname,
- 'tname_quoted': format_table_name(compiler, tname, None),
- }
-
-
-@compiles(AddColumn, 'mssql')
-def visit_add_column(element, compiler, **kw):
- return "%s %s" % (
- alter_table(compiler, element.table_name, element.schema),
- mssql_add_column(compiler, element.column, **kw)
- )
-
-
-def mssql_add_column(compiler, column, **kw):
- return "ADD %s" % compiler.get_column_specification(column, **kw)
-
-
-@compiles(ColumnNullable, 'mssql')
-def visit_column_nullable(element, compiler, **kw):
- return "%s %s %s %s" % (
- alter_table(compiler, element.table_name, element.schema),
- alter_column(compiler, element.column_name),
- format_type(compiler, element.existing_type),
- "NULL" if element.nullable else "NOT NULL"
- )
-
-
-@compiles(ColumnDefault, 'mssql')
-def visit_column_default(element, compiler, **kw):
- # TODO: there can also be a named constraint
- # with ADD CONSTRAINT here
- return "%s ADD DEFAULT %s FOR %s" % (
- alter_table(compiler, element.table_name, element.schema),
- format_server_default(compiler, element.default),
- format_column_name(compiler, element.column_name)
- )
-
-
-@compiles(ColumnName, 'mssql')
-def visit_rename_column(element, compiler, **kw):
- return "EXEC sp_rename '%s.%s', %s, 'COLUMN'" % (
- format_table_name(compiler, element.table_name, element.schema),
- format_column_name(compiler, element.column_name),
- format_column_name(compiler, element.newname)
- )
-
-
-@compiles(ColumnType, 'mssql')
-def visit_column_type(element, compiler, **kw):
- return "%s %s %s" % (
- alter_table(compiler, element.table_name, element.schema),
- alter_column(compiler, element.column_name),
- format_type(compiler, element.type_)
- )
-
-
-@compiles(RenameTable, 'mssql')
-def visit_rename_table(element, compiler, **kw):
- return "EXEC sp_rename '%s', %s" % (
- format_table_name(compiler, element.table_name, element.schema),
- format_table_name(compiler, element.new_table_name, None)
- )
diff --git a/venv/Lib/site-packages/alembic/ddl/mysql.py b/venv/Lib/site-packages/alembic/ddl/mysql.py
deleted file mode 100644
index 71b186c..0000000
--- a/venv/Lib/site-packages/alembic/ddl/mysql.py
+++ /dev/null
@@ -1,346 +0,0 @@
-from sqlalchemy.ext.compiler import compiles
-from sqlalchemy import types as sqltypes
-from sqlalchemy import schema
-
-from ..util.compat import string_types
-from .. import util
-from .impl import DefaultImpl
-from .base import ColumnNullable, ColumnName, ColumnDefault, \
- ColumnType, AlterColumn, format_column_name, \
- format_server_default
-from .base import alter_table
-from ..autogenerate import compare
-from ..util.sqla_compat import _is_type_bound, sqla_100
-import re
-
-
-class MySQLImpl(DefaultImpl):
- __dialect__ = 'mysql'
-
- transactional_ddl = False
-
- def alter_column(self, table_name, column_name,
- nullable=None,
- server_default=False,
- name=None,
- type_=None,
- schema=None,
- existing_type=None,
- existing_server_default=None,
- existing_nullable=None,
- autoincrement=None,
- existing_autoincrement=None,
- **kw
- ):
- if name is not None:
- self._exec(
- MySQLChangeColumn(
- table_name, column_name,
- schema=schema,
- newname=name,
- nullable=nullable if nullable is not None else
- existing_nullable
- if existing_nullable is not None
- else True,
- type_=type_ if type_ is not None else existing_type,
- default=server_default if server_default is not False
- else existing_server_default,
- autoincrement=autoincrement if autoincrement is not None
- else existing_autoincrement
- )
- )
- elif nullable is not None or \
- type_ is not None or \
- autoincrement is not None:
- self._exec(
- MySQLModifyColumn(
- table_name, column_name,
- schema=schema,
- newname=name if name is not None else column_name,
- nullable=nullable if nullable is not None else
- existing_nullable
- if existing_nullable is not None
- else True,
- type_=type_ if type_ is not None else existing_type,
- default=server_default if server_default is not False
- else existing_server_default,
- autoincrement=autoincrement if autoincrement is not None
- else existing_autoincrement
- )
- )
- elif server_default is not False:
- self._exec(
- MySQLAlterDefault(
- table_name, column_name, server_default,
- schema=schema,
- )
- )
-
- def drop_constraint(self, const):
- if isinstance(const, schema.CheckConstraint) and _is_type_bound(const):
- return
-
- super(MySQLImpl, self).drop_constraint(const)
-
- def compare_server_default(self, inspector_column,
- metadata_column,
- rendered_metadata_default,
- rendered_inspector_default):
- # partially a workaround for SQLAlchemy issue #3023; if the
- # column were created without "NOT NULL", MySQL may have added
- # an implicit default of '0' which we need to skip
- if metadata_column.type._type_affinity is sqltypes.Integer and \
- inspector_column.primary_key and \
- not inspector_column.autoincrement and \
- not rendered_metadata_default and \
- rendered_inspector_default == "'0'":
- return False
- elif rendered_inspector_default and rendered_metadata_default:
- # adjust for "function()" vs. "FUNCTION"
- return (
- re.sub(
- r'(.*)(\(\))?$', '\1',
- rendered_inspector_default.lower()) !=
- re.sub(
- r'(.*)(\(\))?$', '\1',
- rendered_metadata_default.lower())
- )
- else:
- return rendered_inspector_default != rendered_metadata_default
-
- def correct_for_autogen_constraints(self, conn_unique_constraints,
- conn_indexes,
- metadata_unique_constraints,
- metadata_indexes):
-
- # TODO: if SQLA 1.0, make use of "duplicates_index"
- # metadata
- removed = set()
- for idx in list(conn_indexes):
- if idx.unique:
- continue
- # MySQL puts implicit indexes on FK columns, even if
- # composite and even if MyISAM, so can't check this too easily.
- # the name of the index may be the column name or it may
- # be the name of the FK constraint.
- for col in idx.columns:
- if idx.name == col.name:
- conn_indexes.remove(idx)
- removed.add(idx.name)
- break
- for fk in col.foreign_keys:
- if fk.name == idx.name:
- conn_indexes.remove(idx)
- removed.add(idx.name)
- break
- if idx.name in removed:
- break
-
- # then remove indexes from the "metadata_indexes"
- # that we've removed from reflected, otherwise they come out
- # as adds (see #202)
- for idx in list(metadata_indexes):
- if idx.name in removed:
- metadata_indexes.remove(idx)
-
- if not sqla_100:
- self._legacy_correct_for_dupe_uq_uix(
- conn_unique_constraints,
- conn_indexes,
- metadata_unique_constraints,
- metadata_indexes
- )
-
- def _legacy_correct_for_dupe_uq_uix(self, conn_unique_constraints,
- conn_indexes,
- metadata_unique_constraints,
- metadata_indexes):
-
- # then dedupe unique indexes vs. constraints, since MySQL
- # doesn't really have unique constraints as a separate construct.
- # but look in the metadata and try to maintain constructs
- # that already seem to be defined one way or the other
- # on that side. See #276
- metadata_uq_names = set([
- cons.name for cons in metadata_unique_constraints
- if cons.name is not None])
-
- unnamed_metadata_uqs = set([
- compare._uq_constraint_sig(cons).sig
- for cons in metadata_unique_constraints
- if cons.name is None
- ])
-
- metadata_ix_names = set([
- cons.name for cons in metadata_indexes if cons.unique])
- conn_uq_names = dict(
- (cons.name, cons) for cons in conn_unique_constraints
- )
- conn_ix_names = dict(
- (cons.name, cons) for cons in conn_indexes if cons.unique
- )
-
- for overlap in set(conn_uq_names).intersection(conn_ix_names):
- if overlap not in metadata_uq_names:
- if compare._uq_constraint_sig(conn_uq_names[overlap]).sig \
- not in unnamed_metadata_uqs:
-
- conn_unique_constraints.discard(conn_uq_names[overlap])
- elif overlap not in metadata_ix_names:
- conn_indexes.discard(conn_ix_names[overlap])
-
- def correct_for_autogen_foreignkeys(self, conn_fks, metadata_fks):
- conn_fk_by_sig = dict(
- (compare._fk_constraint_sig(fk).sig, fk) for fk in conn_fks
- )
- metadata_fk_by_sig = dict(
- (compare._fk_constraint_sig(fk).sig, fk) for fk in metadata_fks
- )
-
- for sig in set(conn_fk_by_sig).intersection(metadata_fk_by_sig):
- mdfk = metadata_fk_by_sig[sig]
- cnfk = conn_fk_by_sig[sig]
- # MySQL considers RESTRICT to be the default and doesn't
- # report on it. if the model has explicit RESTRICT and
- # the conn FK has None, set it to RESTRICT
- if mdfk.ondelete is not None and \
- mdfk.ondelete.lower() == 'restrict' and \
- cnfk.ondelete is None:
- cnfk.ondelete = 'RESTRICT'
- if mdfk.onupdate is not None and \
- mdfk.onupdate.lower() == 'restrict' and \
- cnfk.onupdate is None:
- cnfk.onupdate = 'RESTRICT'
-
-
-class MySQLAlterDefault(AlterColumn):
-
- def __init__(self, name, column_name, default, schema=None):
- super(AlterColumn, self).__init__(name, schema=schema)
- self.column_name = column_name
- self.default = default
-
-
-class MySQLChangeColumn(AlterColumn):
-
- def __init__(self, name, column_name, schema=None,
- newname=None,
- type_=None,
- nullable=None,
- default=False,
- autoincrement=None):
- super(AlterColumn, self).__init__(name, schema=schema)
- self.column_name = column_name
- self.nullable = nullable
- self.newname = newname
- self.default = default
- self.autoincrement = autoincrement
- if type_ is None:
- raise util.CommandError(
- "All MySQL CHANGE/MODIFY COLUMN operations "
- "require the existing type."
- )
-
- self.type_ = sqltypes.to_instance(type_)
-
-
-class MySQLModifyColumn(MySQLChangeColumn):
- pass
-
-
-@compiles(ColumnNullable, 'mysql')
-@compiles(ColumnName, 'mysql')
-@compiles(ColumnDefault, 'mysql')
-@compiles(ColumnType, 'mysql')
-def _mysql_doesnt_support_individual(element, compiler, **kw):
- raise NotImplementedError(
- "Individual alter column constructs not supported by MySQL"
- )
-
-
-@compiles(MySQLAlterDefault, "mysql")
-def _mysql_alter_default(element, compiler, **kw):
- return "%s ALTER COLUMN %s %s" % (
- alter_table(compiler, element.table_name, element.schema),
- format_column_name(compiler, element.column_name),
- "SET DEFAULT %s" % format_server_default(compiler, element.default)
- if element.default is not None
- else "DROP DEFAULT"
- )
-
-
-@compiles(MySQLModifyColumn, "mysql")
-def _mysql_modify_column(element, compiler, **kw):
- return "%s MODIFY %s %s" % (
- alter_table(compiler, element.table_name, element.schema),
- format_column_name(compiler, element.column_name),
- _mysql_colspec(
- compiler,
- nullable=element.nullable,
- server_default=element.default,
- type_=element.type_,
- autoincrement=element.autoincrement
- ),
- )
-
-
-@compiles(MySQLChangeColumn, "mysql")
-def _mysql_change_column(element, compiler, **kw):
- return "%s CHANGE %s %s %s" % (
- alter_table(compiler, element.table_name, element.schema),
- format_column_name(compiler, element.column_name),
- format_column_name(compiler, element.newname),
- _mysql_colspec(
- compiler,
- nullable=element.nullable,
- server_default=element.default,
- type_=element.type_,
- autoincrement=element.autoincrement
- ),
- )
-
-
-def _render_value(compiler, expr):
- if isinstance(expr, string_types):
- return "'%s'" % expr
- else:
- return compiler.sql_compiler.process(expr)
-
-
-def _mysql_colspec(compiler, nullable, server_default, type_,
- autoincrement):
- spec = "%s %s" % (
- compiler.dialect.type_compiler.process(type_),
- "NULL" if nullable else "NOT NULL"
- )
- if autoincrement:
- spec += " AUTO_INCREMENT"
- if server_default is not False and server_default is not None:
- spec += " DEFAULT %s" % _render_value(compiler, server_default)
-
- return spec
-
-
-@compiles(schema.DropConstraint, "mysql")
-def _mysql_drop_constraint(element, compiler, **kw):
- """Redefine SQLAlchemy's drop constraint to
- raise errors for invalid constraint type."""
-
- constraint = element.element
- if isinstance(constraint, (schema.ForeignKeyConstraint,
- schema.PrimaryKeyConstraint,
- schema.UniqueConstraint)
- ):
- return compiler.visit_drop_constraint(element, **kw)
- elif isinstance(constraint, schema.CheckConstraint):
- # note that SQLAlchemy as of 1.2 does not yet support
- # DROP CONSTRAINT for MySQL/MariaDB, so we implement fully
- # here.
- return "ALTER TABLE %s DROP CONSTRAINT %s" % \
- (compiler.preparer.format_table(constraint.table), constraint.name)
- else:
- raise NotImplementedError(
- "No generic 'DROP CONSTRAINT' in MySQL - "
- "please specify constraint type")
-
-
diff --git a/venv/Lib/site-packages/alembic/ddl/oracle.py b/venv/Lib/site-packages/alembic/ddl/oracle.py
deleted file mode 100644
index e528744..0000000
--- a/venv/Lib/site-packages/alembic/ddl/oracle.py
+++ /dev/null
@@ -1,86 +0,0 @@
-from sqlalchemy.ext.compiler import compiles
-
-from .impl import DefaultImpl
-from .base import alter_table, AddColumn, ColumnName, \
- format_column_name, ColumnNullable, \
- format_server_default, ColumnDefault, format_type, ColumnType
-
-
-class OracleImpl(DefaultImpl):
- __dialect__ = 'oracle'
- transactional_ddl = False
- batch_separator = "/"
- command_terminator = ""
-
- def __init__(self, *arg, **kw):
- super(OracleImpl, self).__init__(*arg, **kw)
- self.batch_separator = self.context_opts.get(
- "oracle_batch_separator",
- self.batch_separator)
-
- def _exec(self, construct, *args, **kw):
- result = super(OracleImpl, self)._exec(construct, *args, **kw)
- if self.as_sql and self.batch_separator:
- self.static_output(self.batch_separator)
- return result
-
- def emit_begin(self):
- self._exec("SET TRANSACTION READ WRITE")
-
- def emit_commit(self):
- self._exec("COMMIT")
-
-
-@compiles(AddColumn, 'oracle')
-def visit_add_column(element, compiler, **kw):
- return "%s %s" % (
- alter_table(compiler, element.table_name, element.schema),
- add_column(compiler, element.column, **kw),
- )
-
-
-@compiles(ColumnNullable, 'oracle')
-def visit_column_nullable(element, compiler, **kw):
- return "%s %s %s" % (
- alter_table(compiler, element.table_name, element.schema),
- alter_column(compiler, element.column_name),
- "NULL" if element.nullable else "NOT NULL"
- )
-
-
-@compiles(ColumnType, 'oracle')
-def visit_column_type(element, compiler, **kw):
- return "%s %s %s" % (
- alter_table(compiler, element.table_name, element.schema),
- alter_column(compiler, element.column_name),
- "%s" % format_type(compiler, element.type_)
- )
-
-
-@compiles(ColumnName, 'oracle')
-def visit_column_name(element, compiler, **kw):
- return "%s RENAME COLUMN %s TO %s" % (
- alter_table(compiler, element.table_name, element.schema),
- format_column_name(compiler, element.column_name),
- format_column_name(compiler, element.newname)
- )
-
-
-@compiles(ColumnDefault, 'oracle')
-def visit_column_default(element, compiler, **kw):
- return "%s %s %s" % (
- alter_table(compiler, element.table_name, element.schema),
- alter_column(compiler, element.column_name),
- "DEFAULT %s" %
- format_server_default(compiler, element.default)
- if element.default is not None
- else "DEFAULT NULL"
- )
-
-
-def alter_column(compiler, name):
- return 'MODIFY %s' % format_column_name(compiler, name)
-
-
-def add_column(compiler, column, **kw):
- return "ADD %s" % compiler.get_column_specification(column, **kw)
diff --git a/venv/Lib/site-packages/alembic/ddl/postgresql.py b/venv/Lib/site-packages/alembic/ddl/postgresql.py
deleted file mode 100644
index a2a7dbc..0000000
--- a/venv/Lib/site-packages/alembic/ddl/postgresql.py
+++ /dev/null
@@ -1,469 +0,0 @@
-import re
-
-from ..util import compat
-from .. import util
-from .base import compiles, alter_column, alter_table, format_table_name, \
- format_type, AlterColumn, RenameTable
-from .impl import DefaultImpl
-from sqlalchemy.dialects.postgresql import INTEGER, BIGINT
-from ..autogenerate import render
-from sqlalchemy import text, Numeric, Column
-from sqlalchemy.sql.expression import ColumnClause
-from sqlalchemy.types import NULLTYPE
-from sqlalchemy import types as sqltypes
-
-from ..operations.base import Operations
-from ..operations.base import BatchOperations
-from ..operations import ops
-from ..util import sqla_compat
-from ..operations import schemaobj
-
-import logging
-
-if util.sqla_08:
- from sqlalchemy.sql.expression import UnaryExpression
-else:
- from sqlalchemy.sql.expression import _UnaryExpression as UnaryExpression
-
-if util.sqla_100:
- from sqlalchemy.dialects.postgresql import ExcludeConstraint
-
-
-log = logging.getLogger(__name__)
-
-
-class PostgresqlImpl(DefaultImpl):
- __dialect__ = 'postgresql'
- transactional_ddl = True
-
- def prep_table_for_batch(self, table):
- for constraint in table.constraints:
- if constraint.name is not None:
- self.drop_constraint(constraint)
-
- def compare_server_default(self, inspector_column,
- metadata_column,
- rendered_metadata_default,
- rendered_inspector_default):
- # don't do defaults for SERIAL columns
- if metadata_column.primary_key and \
- metadata_column is metadata_column.table._autoincrement_column:
- return False
-
- conn_col_default = rendered_inspector_default
-
- defaults_equal = conn_col_default == rendered_metadata_default
- if defaults_equal:
- return False
-
- if None in (conn_col_default, rendered_metadata_default):
- return not defaults_equal
-
- if metadata_column.server_default is not None and \
- isinstance(metadata_column.server_default.arg,
- compat.string_types) and \
- not re.match(r"^'.+'$", rendered_metadata_default) and \
- not isinstance(inspector_column.type, Numeric):
- # don't single quote if the column type is float/numeric,
- # otherwise a comparison such as SELECT 5 = '5.0' will fail
- rendered_metadata_default = re.sub(
- r"^u?'?|'?$", "'", rendered_metadata_default)
-
- return not self.connection.scalar(
- "SELECT %s = %s" % (
- conn_col_default,
- rendered_metadata_default
- )
- )
-
- def alter_column(self, table_name, column_name,
- nullable=None,
- server_default=False,
- name=None,
- type_=None,
- schema=None,
- autoincrement=None,
- existing_type=None,
- existing_server_default=None,
- existing_nullable=None,
- existing_autoincrement=None,
- **kw
- ):
-
- using = kw.pop('postgresql_using', None)
-
- if using is not None and type_ is None:
- raise util.CommandError(
- "postgresql_using must be used with the type_ parameter")
-
- if type_ is not None:
- self._exec(PostgresqlColumnType(
- table_name, column_name, type_, schema=schema,
- using=using, existing_type=existing_type,
- existing_server_default=existing_server_default,
- existing_nullable=existing_nullable,
- ))
-
- super(PostgresqlImpl, self).alter_column(
- table_name, column_name,
- nullable=nullable,
- server_default=server_default,
- name=name,
- schema=schema,
- autoincrement=autoincrement,
- existing_type=existing_type,
- existing_server_default=existing_server_default,
- existing_nullable=existing_nullable,
- existing_autoincrement=existing_autoincrement,
- **kw)
-
- def autogen_column_reflect(self, inspector, table, column_info):
- if column_info.get('default') and \
- isinstance(column_info['type'], (INTEGER, BIGINT)):
- seq_match = re.match(
- r"nextval\('(.+?)'::regclass\)",
- column_info['default'])
- if seq_match:
- info = inspector.bind.execute(text(
- "select c.relname, a.attname "
- "from pg_class as c join pg_depend d on d.objid=c.oid and "
- "d.classid='pg_class'::regclass and "
- "d.refclassid='pg_class'::regclass "
- "join pg_class t on t.oid=d.refobjid "
- "join pg_attribute a on a.attrelid=t.oid and "
- "a.attnum=d.refobjsubid "
- "where c.relkind='S' and c.relname=:seqname"
- ), seqname=seq_match.group(1)).first()
- if info:
- seqname, colname = info
- if colname == column_info['name']:
- log.info(
- "Detected sequence named '%s' as "
- "owned by integer column '%s(%s)', "
- "assuming SERIAL and omitting",
- seqname, table.name, colname)
- # sequence, and the owner is this column,
- # its a SERIAL - whack it!
- del column_info['default']
-
- def correct_for_autogen_constraints(self, conn_unique_constraints,
- conn_indexes,
- metadata_unique_constraints,
- metadata_indexes):
-
- conn_uniques_by_name = dict(
- (c.name, c) for c in conn_unique_constraints)
- conn_indexes_by_name = dict(
- (c.name, c) for c in conn_indexes)
-
- if not util.sqla_100:
- doubled_constraints = set(
- conn_indexes_by_name[name]
- for name in set(conn_uniques_by_name).intersection(
- conn_indexes_by_name)
- )
- else:
- doubled_constraints = set(
- index for index in
- conn_indexes if index.info.get('duplicates_constraint')
- )
-
- for ix in doubled_constraints:
- conn_indexes.remove(ix)
-
- for idx in list(metadata_indexes):
- if idx.name in conn_indexes_by_name:
- continue
- if util.sqla_08:
- exprs = idx.expressions
- else:
- exprs = idx.columns
- for expr in exprs:
- while isinstance(expr, UnaryExpression):
- expr = expr.element
- if not isinstance(expr, Column):
- util.warn(
- "autogenerate skipping functional index %s; "
- "not supported by SQLAlchemy reflection" % idx.name
- )
- metadata_indexes.discard(idx)
-
- def render_type(self, type_, autogen_context):
- mod = type(type_).__module__
- if not mod.startswith("sqlalchemy.dialects.postgresql"):
- return False
-
- if hasattr(self, '_render_%s_type' % type_.__visit_name__):
- meth = getattr(self, '_render_%s_type' % type_.__visit_name__)
- return meth(type_, autogen_context)
-
- return False
-
- def _render_HSTORE_type(self, type_, autogen_context):
- return render._render_type_w_subtype(
- type_, autogen_context, 'text_type', r'(.+?\(.*text_type=)'
- )
-
- def _render_ARRAY_type(self, type_, autogen_context):
- return render._render_type_w_subtype(
- type_, autogen_context, 'item_type', r'(.+?\()'
- )
-
- def _render_JSON_type(self, type_, autogen_context):
- return render._render_type_w_subtype(
- type_, autogen_context, 'astext_type', r'(.+?\(.*astext_type=)'
- )
-
- def _render_JSONB_type(self, type_, autogen_context):
- return render._render_type_w_subtype(
- type_, autogen_context, 'astext_type', r'(.+?\(.*astext_type=)'
- )
-
-
-class PostgresqlColumnType(AlterColumn):
-
- def __init__(self, name, column_name, type_, **kw):
- using = kw.pop('using', None)
- super(PostgresqlColumnType, self).__init__(name, column_name, **kw)
- self.type_ = sqltypes.to_instance(type_)
- self.using = using
-
-
-@compiles(RenameTable, "postgresql")
-def visit_rename_table(element, compiler, **kw):
- return "%s RENAME TO %s" % (
- alter_table(compiler, element.table_name, element.schema),
- format_table_name(compiler, element.new_table_name, None)
- )
-
-
-@compiles(PostgresqlColumnType, "postgresql")
-def visit_column_type(element, compiler, **kw):
- return "%s %s %s %s" % (
- alter_table(compiler, element.table_name, element.schema),
- alter_column(compiler, element.column_name),
- "TYPE %s" % format_type(compiler, element.type_),
- "USING %s" % element.using if element.using else ""
- )
-
-
-@Operations.register_operation("create_exclude_constraint")
-@BatchOperations.register_operation(
- "create_exclude_constraint", "batch_create_exclude_constraint")
-@ops.AddConstraintOp.register_add_constraint("exclude_constraint")
-class CreateExcludeConstraintOp(ops.AddConstraintOp):
- """Represent a create exclude constraint operation."""
-
- constraint_type = "exclude"
-
- def __init__(
- self, constraint_name, table_name,
- elements, where=None, schema=None,
- _orig_constraint=None, **kw):
- self.constraint_name = constraint_name
- self.table_name = table_name
- self.elements = elements
- self.where = where
- self.schema = schema
- self._orig_constraint = _orig_constraint
- self.kw = kw
-
- @classmethod
- def from_constraint(cls, constraint):
- constraint_table = sqla_compat._table_for_constraint(constraint)
-
- return cls(
- constraint.name,
- constraint_table.name,
- [(expr, op) for expr, name, op in constraint._render_exprs],
- where=constraint.where,
- schema=constraint_table.schema,
- _orig_constraint=constraint,
- deferrable=constraint.deferrable,
- initially=constraint.initially,
- using=constraint.using
- )
-
- def to_constraint(self, migration_context=None):
- if not util.sqla_100:
- raise NotImplementedError(
- "ExcludeConstraint not supported until SQLAlchemy 1.0")
- if self._orig_constraint is not None:
- return self._orig_constraint
- schema_obj = schemaobj.SchemaObjects(migration_context)
- t = schema_obj.table(self.table_name, schema=self.schema)
- excl = ExcludeConstraint(
- *self.elements,
- name=self.constraint_name,
- where=self.where,
- **self.kw
- )
- for expr, name, oper in excl._render_exprs:
- t.append_column(Column(name, NULLTYPE))
- t.append_constraint(excl)
- return excl
-
- @classmethod
- def create_exclude_constraint(
- cls, operations,
- constraint_name, table_name, *elements, **kw):
- """Issue an alter to create an EXCLUDE constraint using the
- current migration context.
-
- .. note:: This method is Postgresql specific, and additionally
- requires at least SQLAlchemy 1.0.
-
- e.g.::
-
- from alembic import op
-
- op.create_exclude_constraint(
- "user_excl",
- "user",
-
- ("period", '&&'),
- ("group", '='),
- where=("group != 'some group'")
-
- )
-
- Note that the expressions work the same way as that of
- the ``ExcludeConstraint`` object itself; if plain strings are
- passed, quoting rules must be applied manually.
-
- :param name: Name of the constraint.
- :param table_name: String name of the source table.
- :param elements: exclude conditions.
- :param where: SQL expression or SQL string with optional WHERE
- clause.
- :param deferrable: optional bool. If set, emit DEFERRABLE or
- NOT DEFERRABLE when issuing DDL for this constraint.
- :param initially: optional string. If set, emit INITIALLY
- when issuing DDL for this constraint.
- :param schema: Optional schema name to operate within.
-
- .. versionadded:: 0.9.0
-
- """
- op = cls(constraint_name, table_name, elements, **kw)
- return operations.invoke(op)
-
- @classmethod
- def batch_create_exclude_constraint(
- cls, operations, constraint_name, *elements, **kw):
- """Issue a "create exclude constraint" instruction using the
- current batch migration context.
-
- .. note:: This method is Postgresql specific, and additionally
- requires at least SQLAlchemy 1.0.
-
- .. versionadded:: 0.9.0
-
- .. seealso::
-
- :meth:`.Operations.create_exclude_constraint`
-
- """
- kw['schema'] = operations.impl.schema
- op = cls(constraint_name, operations.impl.table_name, elements, **kw)
- return operations.invoke(op)
-
-
-@render.renderers.dispatch_for(CreateExcludeConstraintOp)
-def _add_exclude_constraint(autogen_context, op):
- return _exclude_constraint(
- op.to_constraint(),
- autogen_context,
- alter=True
- )
-
-if util.sqla_100:
- @render._constraint_renderers.dispatch_for(ExcludeConstraint)
- def _render_inline_exclude_constraint(constraint, autogen_context):
- rendered = render._user_defined_render(
- "exclude", constraint, autogen_context)
- if rendered is not False:
- return rendered
-
- return _exclude_constraint(constraint, autogen_context, False)
-
-
-def _postgresql_autogenerate_prefix(autogen_context):
-
- imports = autogen_context.imports
- if imports is not None:
- imports.add("from sqlalchemy.dialects import postgresql")
- return "postgresql."
-
-
-def _exclude_constraint(constraint, autogen_context, alter):
- opts = []
-
- has_batch = autogen_context._has_batch
-
- if constraint.deferrable:
- opts.append(("deferrable", str(constraint.deferrable)))
- if constraint.initially:
- opts.append(("initially", str(constraint.initially)))
- if constraint.using:
- opts.append(("using", str(constraint.using)))
- if not has_batch and alter and constraint.table.schema:
- opts.append(("schema", render._ident(constraint.table.schema)))
- if not alter and constraint.name:
- opts.append(
- ("name",
- render._render_gen_name(autogen_context, constraint.name)))
-
- if alter:
- args = [
- repr(render._render_gen_name(
- autogen_context, constraint.name))]
- if not has_batch:
- args += [repr(render._ident(constraint.table.name))]
- args.extend([
- "(%s, %r)" % (
- _render_potential_column(sqltext, autogen_context),
- opstring
- )
- for sqltext, name, opstring in constraint._render_exprs
- ])
- if constraint.where is not None:
- args.append(
- "where=%s" % render._render_potential_expr(
- constraint.where, autogen_context)
- )
- args.extend(["%s=%r" % (k, v) for k, v in opts])
- return "%(prefix)screate_exclude_constraint(%(args)s)" % {
- 'prefix': render._alembic_autogenerate_prefix(autogen_context),
- 'args': ", ".join(args)
- }
- else:
- args = [
- "(%s, %r)" % (
- _render_potential_column(sqltext, autogen_context),
- opstring
- ) for sqltext, name, opstring in constraint._render_exprs
- ]
- if constraint.where is not None:
- args.append(
- "where=%s" % render._render_potential_expr(
- constraint.where, autogen_context)
- )
- args.extend(["%s=%r" % (k, v) for k, v in opts])
- return "%(prefix)sExcludeConstraint(%(args)s)" % {
- "prefix": _postgresql_autogenerate_prefix(autogen_context),
- "args": ", ".join(args)
- }
-
-
-def _render_potential_column(value, autogen_context):
- if isinstance(value, ColumnClause):
- template = "%(prefix)scolumn(%(name)r)"
-
- return template % {
- "prefix": render._sqlalchemy_autogenerate_prefix(autogen_context),
- "name": value.name
- }
-
- else:
- return render._render_potential_expr(value, autogen_context, wrap_in_text=False)
diff --git a/venv/Lib/site-packages/alembic/ddl/sqlite.py b/venv/Lib/site-packages/alembic/ddl/sqlite.py
deleted file mode 100644
index 5d231b5..0000000
--- a/venv/Lib/site-packages/alembic/ddl/sqlite.py
+++ /dev/null
@@ -1,100 +0,0 @@
-from .. import util
-from .impl import DefaultImpl
-import re
-
-
-class SQLiteImpl(DefaultImpl):
- __dialect__ = 'sqlite'
-
- transactional_ddl = False
- """SQLite supports transactional DDL, but pysqlite does not:
- see: http://bugs.python.org/issue10740
- """
-
- def requires_recreate_in_batch(self, batch_op):
- """Return True if the given :class:`.BatchOperationsImpl`
- would need the table to be recreated and copied in order to
- proceed.
-
- Normally, only returns True on SQLite when operations other
- than add_column are present.
-
- """
- for op in batch_op.batch:
- if op[0] not in ('add_column', 'create_index', 'drop_index'):
- return True
- else:
- return False
-
- def add_constraint(self, const):
- # attempt to distinguish between an
- # auto-gen constraint and an explicit one
- if const._create_rule is None:
- raise NotImplementedError(
- "No support for ALTER of constraints in SQLite dialect")
- elif const._create_rule(self):
- util.warn("Skipping unsupported ALTER for "
- "creation of implicit constraint")
-
- def drop_constraint(self, const):
- if const._create_rule is None:
- raise NotImplementedError(
- "No support for ALTER of constraints in SQLite dialect")
-
- def compare_server_default(self, inspector_column,
- metadata_column,
- rendered_metadata_default,
- rendered_inspector_default):
-
- if rendered_metadata_default is not None:
- rendered_metadata_default = re.sub(
- r"^\"'|\"'$", "", rendered_metadata_default)
- if rendered_inspector_default is not None:
- rendered_inspector_default = re.sub(
- r"^\"'|\"'$", "", rendered_inspector_default)
-
- return rendered_inspector_default != rendered_metadata_default
-
- def correct_for_autogen_constraints(
- self, conn_unique_constraints, conn_indexes,
- metadata_unique_constraints,
- metadata_indexes):
-
- if util.sqla_100:
- return
-
- # adjustments to accommodate for SQLite unnamed unique constraints
- # not being reported from the backend; this was updated in
- # SQLA 1.0.
-
- def uq_sig(uq):
- return tuple(sorted(uq.columns.keys()))
-
- conn_unique_sigs = set(
- uq_sig(uq)
- for uq in conn_unique_constraints
- )
-
- for idx in list(metadata_unique_constraints):
- # SQLite backend can't report on unnamed UNIQUE constraints,
- # so remove these, unless we see an exact signature match
- if idx.name is None and uq_sig(idx) not in conn_unique_sigs:
- metadata_unique_constraints.remove(idx)
-
-
-# @compiles(AddColumn, 'sqlite')
-# def visit_add_column(element, compiler, **kw):
-# return "%s %s" % (
-# alter_table(compiler, element.table_name, element.schema),
-# add_column(compiler, element.column, **kw)
-# )
-
-
-# def add_column(compiler, column, **kw):
-# text = "ADD COLUMN %s" % compiler.get_column_specification(column, **kw)
-# need to modify SQLAlchemy so that the CHECK associated with a Boolean
-# or Enum gets placed as part of the column constraints, not the Table
-# see ticket 98
-# for const in column.constraints:
-# text += compiler.process(AddConstraint(const))
-# return text
diff --git a/venv/Lib/site-packages/alembic/op.py b/venv/Lib/site-packages/alembic/op.py
deleted file mode 100644
index 1f367a1..0000000
--- a/venv/Lib/site-packages/alembic/op.py
+++ /dev/null
@@ -1,6 +0,0 @@
-from .operations.base import Operations
-
-# create proxy functions for
-# each method on the Operations class.
-Operations.create_module_class_proxy(globals(), locals())
-
diff --git a/venv/Lib/site-packages/alembic/operations/__init__.py b/venv/Lib/site-packages/alembic/operations/__init__.py
deleted file mode 100644
index 1f6ee5d..0000000
--- a/venv/Lib/site-packages/alembic/operations/__init__.py
+++ /dev/null
@@ -1,6 +0,0 @@
-from .base import Operations, BatchOperations
-from .ops import MigrateOperation
-from . import toimpl
-
-
-__all__ = ['Operations', 'BatchOperations', 'MigrateOperation']
\ No newline at end of file
diff --git a/venv/Lib/site-packages/alembic/operations/base.py b/venv/Lib/site-packages/alembic/operations/base.py
deleted file mode 100644
index ef6ecf8..0000000
--- a/venv/Lib/site-packages/alembic/operations/base.py
+++ /dev/null
@@ -1,445 +0,0 @@
-from contextlib import contextmanager
-
-from .. import util
-from ..util import sqla_compat
-from . import batch
-from . import schemaobj
-from ..util.compat import exec_
-from ..util.compat import inspect_getargspec
-import textwrap
-import inspect
-
-__all__ = ('Operations', 'BatchOperations')
-
-try:
- from sqlalchemy.sql.naming import conv
-except:
- conv = None
-
-
-class Operations(util.ModuleClsProxy):
-
- """Define high level migration operations.
-
- Each operation corresponds to some schema migration operation,
- executed against a particular :class:`.MigrationContext`
- which in turn represents connectivity to a database,
- or a file output stream.
-
- While :class:`.Operations` is normally configured as
- part of the :meth:`.EnvironmentContext.run_migrations`
- method called from an ``env.py`` script, a standalone
- :class:`.Operations` instance can be
- made for use cases external to regular Alembic
- migrations by passing in a :class:`.MigrationContext`::
-
- from alembic.migration import MigrationContext
- from alembic.operations import Operations
-
- conn = myengine.connect()
- ctx = MigrationContext.configure(conn)
- op = Operations(ctx)
-
- op.alter_column("t", "c", nullable=True)
-
- Note that as of 0.8, most of the methods on this class are produced
- dynamically using the :meth:`.Operations.register_operation`
- method.
-
- """
-
- _to_impl = util.Dispatcher()
-
- def __init__(self, migration_context, impl=None):
- """Construct a new :class:`.Operations`
-
- :param migration_context: a :class:`.MigrationContext`
- instance.
-
- """
- self.migration_context = migration_context
- if impl is None:
- self.impl = migration_context.impl
- else:
- self.impl = impl
-
- self.schema_obj = schemaobj.SchemaObjects(migration_context)
-
- @classmethod
- def register_operation(cls, name, sourcename=None):
- """Register a new operation for this class.
-
- This method is normally used to add new operations
- to the :class:`.Operations` class, and possibly the
- :class:`.BatchOperations` class as well. All Alembic migration
- operations are implemented via this system, however the system
- is also available as a public API to facilitate adding custom
- operations.
-
- .. versionadded:: 0.8.0
-
- .. seealso::
-
- :ref:`operation_plugins`
-
-
- """
- def register(op_cls):
- if sourcename is None:
- fn = getattr(op_cls, name)
- source_name = fn.__name__
- else:
- fn = getattr(op_cls, sourcename)
- source_name = fn.__name__
-
- spec = inspect_getargspec(fn)
-
- name_args = spec[0]
- assert name_args[0:2] == ['cls', 'operations']
-
- name_args[0:2] = ['self']
-
- args = inspect.formatargspec(*spec)
- num_defaults = len(spec[3]) if spec[3] else 0
- if num_defaults:
- defaulted_vals = name_args[0 - num_defaults:]
- else:
- defaulted_vals = ()
-
- apply_kw = inspect.formatargspec(
- name_args, spec[1], spec[2],
- defaulted_vals,
- formatvalue=lambda x: '=' + x)
-
- func_text = textwrap.dedent("""\
- def %(name)s%(args)s:
- %(doc)r
- return op_cls.%(source_name)s%(apply_kw)s
- """ % {
- 'name': name,
- 'source_name': source_name,
- 'args': args,
- 'apply_kw': apply_kw,
- 'doc': fn.__doc__,
- 'meth': fn.__name__
- })
- globals_ = {'op_cls': op_cls}
- lcl = {}
- exec_(func_text, globals_, lcl)
- setattr(cls, name, lcl[name])
- fn.__func__.__doc__ = "This method is proxied on "\
- "the :class:`.%s` class, via the :meth:`.%s.%s` method." % (
- cls.__name__, cls.__name__, name
- )
- if hasattr(fn, '_legacy_translations'):
- lcl[name]._legacy_translations = fn._legacy_translations
- return op_cls
- return register
-
- @classmethod
- def implementation_for(cls, op_cls):
- """Register an implementation for a given :class:`.MigrateOperation`.
-
- This is part of the operation extensibility API.
-
- .. seealso::
-
- :ref:`operation_plugins` - example of use
-
- """
-
- def decorate(fn):
- cls._to_impl.dispatch_for(op_cls)(fn)
- return fn
- return decorate
-
- @classmethod
- @contextmanager
- def context(cls, migration_context):
- op = Operations(migration_context)
- op._install_proxy()
- yield op
- op._remove_proxy()
-
- @contextmanager
- def batch_alter_table(
- self, table_name, schema=None, recreate="auto", copy_from=None,
- table_args=(), table_kwargs=util.immutabledict(),
- reflect_args=(), reflect_kwargs=util.immutabledict(),
- naming_convention=None):
- """Invoke a series of per-table migrations in batch.
-
- Batch mode allows a series of operations specific to a table
- to be syntactically grouped together, and allows for alternate
- modes of table migration, in particular the "recreate" style of
- migration required by SQLite.
-
- "recreate" style is as follows:
-
- 1. A new table is created with the new specification, based on the
- migration directives within the batch, using a temporary name.
-
- 2. the data copied from the existing table to the new table.
-
- 3. the existing table is dropped.
-
- 4. the new table is renamed to the existing table name.
-
- The directive by default will only use "recreate" style on the
- SQLite backend, and only if directives are present which require
- this form, e.g. anything other than ``add_column()``. The batch
- operation on other backends will proceed using standard ALTER TABLE
- operations.
-
- The method is used as a context manager, which returns an instance
- of :class:`.BatchOperations`; this object is the same as
- :class:`.Operations` except that table names and schema names
- are omitted. E.g.::
-
- with op.batch_alter_table("some_table") as batch_op:
- batch_op.add_column(Column('foo', Integer))
- batch_op.drop_column('bar')
-
- The operations within the context manager are invoked at once
- when the context is ended. When run against SQLite, if the
- migrations include operations not supported by SQLite's ALTER TABLE,
- the entire table will be copied to a new one with the new
- specification, moving all data across as well.
-
- The copy operation by default uses reflection to retrieve the current
- structure of the table, and therefore :meth:`.batch_alter_table`
- in this mode requires that the migration is run in "online" mode.
- The ``copy_from`` parameter may be passed which refers to an existing
- :class:`.Table` object, which will bypass this reflection step.
-
- .. note:: The table copy operation will currently not copy
- CHECK constraints, and may not copy UNIQUE constraints that are
- unnamed, as is possible on SQLite. See the section
- :ref:`sqlite_batch_constraints` for workarounds.
-
- :param table_name: name of table
- :param schema: optional schema name.
- :param recreate: under what circumstances the table should be
- recreated. At its default of ``"auto"``, the SQLite dialect will
- recreate the table if any operations other than ``add_column()``,
- ``create_index()``, or ``drop_index()`` are
- present. Other options include ``"always"`` and ``"never"``.
- :param copy_from: optional :class:`~sqlalchemy.schema.Table` object
- that will act as the structure of the table being copied. If omitted,
- table reflection is used to retrieve the structure of the table.
-
- .. versionadded:: 0.7.6 Fully implemented the
- :paramref:`~.Operations.batch_alter_table.copy_from`
- parameter.
-
- .. seealso::
-
- :ref:`batch_offline_mode`
-
- :paramref:`~.Operations.batch_alter_table.reflect_args`
-
- :paramref:`~.Operations.batch_alter_table.reflect_kwargs`
-
- :param reflect_args: a sequence of additional positional arguments that
- will be applied to the table structure being reflected / copied;
- this may be used to pass column and constraint overrides to the
- table that will be reflected, in lieu of passing the whole
- :class:`~sqlalchemy.schema.Table` using
- :paramref:`~.Operations.batch_alter_table.copy_from`.
-
- .. versionadded:: 0.7.1
-
- :param reflect_kwargs: a dictionary of additional keyword arguments
- that will be applied to the table structure being copied; this may be
- used to pass additional table and reflection options to the table that
- will be reflected, in lieu of passing the whole
- :class:`~sqlalchemy.schema.Table` using
- :paramref:`~.Operations.batch_alter_table.copy_from`.
-
- .. versionadded:: 0.7.1
-
- :param table_args: a sequence of additional positional arguments that
- will be applied to the new :class:`~sqlalchemy.schema.Table` when
- created, in addition to those copied from the source table.
- This may be used to provide additional constraints such as CHECK
- constraints that may not be reflected.
- :param table_kwargs: a dictionary of additional keyword arguments
- that will be applied to the new :class:`~sqlalchemy.schema.Table`
- when created, in addition to those copied from the source table.
- This may be used to provide for additional table options that may
- not be reflected.
-
- .. versionadded:: 0.7.0
-
- :param naming_convention: a naming convention dictionary of the form
- described at :ref:`autogen_naming_conventions` which will be applied
- to the :class:`~sqlalchemy.schema.MetaData` during the reflection
- process. This is typically required if one wants to drop SQLite
- constraints, as these constraints will not have names when
- reflected on this backend. Requires SQLAlchemy **0.9.4** or greater.
-
- .. seealso::
-
- :ref:`dropping_sqlite_foreign_keys`
-
- .. versionadded:: 0.7.1
-
- .. note:: batch mode requires SQLAlchemy 0.8 or above.
-
- .. seealso::
-
- :ref:`batch_migrations`
-
- """
- impl = batch.BatchOperationsImpl(
- self, table_name, schema, recreate,
- copy_from, table_args, table_kwargs, reflect_args,
- reflect_kwargs, naming_convention)
- batch_op = BatchOperations(self.migration_context, impl=impl)
- yield batch_op
- impl.flush()
-
- def get_context(self):
- """Return the :class:`.MigrationContext` object that's
- currently in use.
-
- """
-
- return self.migration_context
-
- def invoke(self, operation):
- """Given a :class:`.MigrateOperation`, invoke it in terms of
- this :class:`.Operations` instance.
-
- .. versionadded:: 0.8.0
-
- """
- fn = self._to_impl.dispatch(
- operation, self.migration_context.impl.__dialect__)
- return fn(self, operation)
-
- def f(self, name):
- """Indicate a string name that has already had a naming convention
- applied to it.
-
- This feature combines with the SQLAlchemy ``naming_convention`` feature
- to disambiguate constraint names that have already had naming
- conventions applied to them, versus those that have not. This is
- necessary in the case that the ``"%(constraint_name)s"`` token
- is used within a naming convention, so that it can be identified
- that this particular name should remain fixed.
-
- If the :meth:`.Operations.f` is used on a constraint, the naming
- convention will not take effect::
-
- op.add_column('t', 'x', Boolean(name=op.f('ck_bool_t_x')))
-
- Above, the CHECK constraint generated will have the name
- ``ck_bool_t_x`` regardless of whether or not a naming convention is
- in use.
-
- Alternatively, if a naming convention is in use, and 'f' is not used,
- names will be converted along conventions. If the ``target_metadata``
- contains the naming convention
- ``{"ck": "ck_bool_%(table_name)s_%(constraint_name)s"}``, then the
- output of the following:
-
- op.add_column('t', 'x', Boolean(name='x'))
-
- will be::
-
- CONSTRAINT ck_bool_t_x CHECK (x in (1, 0)))
-
- The function is rendered in the output of autogenerate when
- a particular constraint name is already converted, for SQLAlchemy
- version **0.9.4 and greater only**. Even though ``naming_convention``
- was introduced in 0.9.2, the string disambiguation service is new
- as of 0.9.4.
-
- .. versionadded:: 0.6.4
-
- """
- if conv:
- return conv(name)
- else:
- raise NotImplementedError(
- "op.f() feature requires SQLAlchemy 0.9.4 or greater.")
-
- def inline_literal(self, value, type_=None):
- """Produce an 'inline literal' expression, suitable for
- using in an INSERT, UPDATE, or DELETE statement.
-
- When using Alembic in "offline" mode, CRUD operations
- aren't compatible with SQLAlchemy's default behavior surrounding
- literal values,
- which is that they are converted into bound values and passed
- separately into the ``execute()`` method of the DBAPI cursor.
- An offline SQL
- script needs to have these rendered inline. While it should
- always be noted that inline literal values are an **enormous**
- security hole in an application that handles untrusted input,
- a schema migration is not run in this context, so
- literals are safe to render inline, with the caveat that
- advanced types like dates may not be supported directly
- by SQLAlchemy.
-
- See :meth:`.execute` for an example usage of
- :meth:`.inline_literal`.
-
- The environment can also be configured to attempt to render
- "literal" values inline automatically, for those simple types
- that are supported by the dialect; see
- :paramref:`.EnvironmentContext.configure.literal_binds` for this
- more recently added feature.
-
- :param value: The value to render. Strings, integers, and simple
- numerics should be supported. Other types like boolean,
- dates, etc. may or may not be supported yet by various
- backends.
- :param type_: optional - a :class:`sqlalchemy.types.TypeEngine`
- subclass stating the type of this value. In SQLAlchemy
- expressions, this is usually derived automatically
- from the Python type of the value itself, as well as
- based on the context in which the value is used.
-
- .. seealso::
-
- :paramref:`.EnvironmentContext.configure.literal_binds`
-
- """
- return sqla_compat._literal_bindparam(None, value, type_=type_)
-
- def get_bind(self):
- """Return the current 'bind'.
-
- Under normal circumstances, this is the
- :class:`~sqlalchemy.engine.Connection` currently being used
- to emit SQL to the database.
-
- In a SQL script context, this value is ``None``. [TODO: verify this]
-
- """
- return self.migration_context.impl.bind
-
-
-class BatchOperations(Operations):
- """Modifies the interface :class:`.Operations` for batch mode.
-
- This basically omits the ``table_name`` and ``schema`` parameters
- from associated methods, as these are a given when running under batch
- mode.
-
- .. seealso::
-
- :meth:`.Operations.batch_alter_table`
-
- Note that as of 0.8, most of the methods on this class are produced
- dynamically using the :meth:`.Operations.register_operation`
- method.
-
- """
-
- def _noop(self, operation):
- raise NotImplementedError(
- "The %s method does not apply to a batch table alter operation."
- % operation)
diff --git a/venv/Lib/site-packages/alembic/operations/batch.py b/venv/Lib/site-packages/alembic/operations/batch.py
deleted file mode 100644
index 84d29d9..0000000
--- a/venv/Lib/site-packages/alembic/operations/batch.py
+++ /dev/null
@@ -1,377 +0,0 @@
-from sqlalchemy import Table, MetaData, Index, select, Column, \
- ForeignKeyConstraint, PrimaryKeyConstraint, cast, CheckConstraint
-from sqlalchemy import types as sqltypes
-from sqlalchemy import schema as sql_schema
-from sqlalchemy.util import OrderedDict
-from .. import util
-if util.sqla_08:
- from sqlalchemy.events import SchemaEventTarget
-from ..util.sqla_compat import _columns_for_constraint, \
- _is_type_bound, _fk_is_self_referential
-
-
-class BatchOperationsImpl(object):
- def __init__(self, operations, table_name, schema, recreate,
- copy_from, table_args, table_kwargs,
- reflect_args, reflect_kwargs, naming_convention):
- if not util.sqla_08:
- raise NotImplementedError(
- "batch mode requires SQLAlchemy 0.8 or greater.")
- self.operations = operations
- self.table_name = table_name
- self.schema = schema
- if recreate not in ('auto', 'always', 'never'):
- raise ValueError(
- "recreate may be one of 'auto', 'always', or 'never'.")
- self.recreate = recreate
- self.copy_from = copy_from
- self.table_args = table_args
- self.table_kwargs = dict(table_kwargs)
- self.reflect_args = reflect_args
- self.reflect_kwargs = reflect_kwargs
- self.naming_convention = naming_convention
- self.batch = []
-
- @property
- def dialect(self):
- return self.operations.impl.dialect
-
- @property
- def impl(self):
- return self.operations.impl
-
- def _should_recreate(self):
- if self.recreate == 'auto':
- return self.operations.impl.requires_recreate_in_batch(self)
- elif self.recreate == 'always':
- return True
- else:
- return False
-
- def flush(self):
- should_recreate = self._should_recreate()
-
- if not should_recreate:
- for opname, arg, kw in self.batch:
- fn = getattr(self.operations.impl, opname)
- fn(*arg, **kw)
- else:
- if self.naming_convention:
- m1 = MetaData(naming_convention=self.naming_convention)
- else:
- m1 = MetaData()
-
- if self.copy_from is not None:
- existing_table = self.copy_from
- reflected = False
- else:
- existing_table = Table(
- self.table_name, m1,
- schema=self.schema,
- autoload=True,
- autoload_with=self.operations.get_bind(),
- *self.reflect_args, **self.reflect_kwargs)
- reflected = True
-
- batch_impl = ApplyBatchImpl(
- existing_table, self.table_args, self.table_kwargs, reflected)
- for opname, arg, kw in self.batch:
- fn = getattr(batch_impl, opname)
- fn(*arg, **kw)
-
- batch_impl._create(self.impl)
-
- def alter_column(self, *arg, **kw):
- self.batch.append(("alter_column", arg, kw))
-
- def add_column(self, *arg, **kw):
- self.batch.append(("add_column", arg, kw))
-
- def drop_column(self, *arg, **kw):
- self.batch.append(("drop_column", arg, kw))
-
- def add_constraint(self, const):
- self.batch.append(("add_constraint", (const,), {}))
-
- def drop_constraint(self, const):
- self.batch.append(("drop_constraint", (const, ), {}))
-
- def rename_table(self, *arg, **kw):
- self.batch.append(("rename_table", arg, kw))
-
- def create_index(self, idx):
- self.batch.append(("create_index", (idx,), {}))
-
- def drop_index(self, idx):
- self.batch.append(("drop_index", (idx,), {}))
-
- def create_table(self, table):
- raise NotImplementedError("Can't create table in batch mode")
-
- def drop_table(self, table):
- raise NotImplementedError("Can't drop table in batch mode")
-
-
-class ApplyBatchImpl(object):
- def __init__(self, table, table_args, table_kwargs, reflected):
- self.table = table # this is a Table object
- self.table_args = table_args
- self.table_kwargs = table_kwargs
- self.temp_table_name = self._calc_temp_name(table.name)
- self.new_table = None
- self.column_transfers = OrderedDict(
- (c.name, {'expr': c}) for c in self.table.c
- )
- self.reflected = reflected
- self._grab_table_elements()
-
- @classmethod
- def _calc_temp_name(cls, tablename):
- return ("_alembic_tmp_%s" % tablename)[0:50]
-
- def _grab_table_elements(self):
- schema = self.table.schema
- self.columns = OrderedDict()
- for c in self.table.c:
- c_copy = c.copy(schema=schema)
- c_copy.unique = c_copy.index = False
- # ensure that the type object was copied,
- # as we may need to modify it in-place
- if isinstance(c.type, SchemaEventTarget):
- assert c_copy.type is not c.type
- self.columns[c.name] = c_copy
- self.named_constraints = {}
- self.unnamed_constraints = []
- self.indexes = {}
- self.new_indexes = {}
- for const in self.table.constraints:
- if _is_type_bound(const):
- continue
- elif self.reflected and isinstance(const, CheckConstraint):
- # TODO: we are skipping reflected CheckConstraint because
- # we have no way to determine _is_type_bound() for these.
- pass
- elif const.name:
- self.named_constraints[const.name] = const
- else:
- self.unnamed_constraints.append(const)
-
- for idx in self.table.indexes:
- self.indexes[idx.name] = idx
-
- for k in self.table.kwargs:
- self.table_kwargs.setdefault(k, self.table.kwargs[k])
-
- def _transfer_elements_to_new_table(self):
- assert self.new_table is None, "Can only create new table once"
-
- m = MetaData()
- schema = self.table.schema
-
- self.new_table = new_table = Table(
- self.temp_table_name, m,
- *(list(self.columns.values()) + list(self.table_args)),
- schema=schema,
- **self.table_kwargs)
-
- for const in list(self.named_constraints.values()) + \
- self.unnamed_constraints:
-
- const_columns = set([
- c.key for c in _columns_for_constraint(const)])
-
- if not const_columns.issubset(self.column_transfers):
- continue
-
- if isinstance(const, ForeignKeyConstraint):
- if _fk_is_self_referential(const):
- # for self-referential constraint, refer to the
- # *original* table name, and not _alembic_batch_temp.
- # This is consistent with how we're handling
- # FK constraints from other tables; we assume SQLite
- # no foreign keys just keeps the names unchanged, so
- # when we rename back, they match again.
- const_copy = const.copy(
- schema=schema, target_table=self.table)
- else:
- # "target_table" for ForeignKeyConstraint.copy() is
- # only used if the FK is detected as being
- # self-referential, which we are handling above.
- const_copy = const.copy(schema=schema)
- else:
- const_copy = const.copy(schema=schema, target_table=new_table)
- if isinstance(const, ForeignKeyConstraint):
- self._setup_referent(m, const)
- new_table.append_constraint(const_copy)
-
- def _gather_indexes_from_both_tables(self):
- idx = []
- idx.extend(self.indexes.values())
- for index in self.new_indexes.values():
- idx.append(
- Index(
- index.name,
- unique=index.unique,
- *[self.new_table.c[col] for col in index.columns.keys()],
- **index.kwargs)
- )
- return idx
-
- def _setup_referent(self, metadata, constraint):
- spec = constraint.elements[0]._get_colspec()
- parts = spec.split(".")
- tname = parts[-2]
- if len(parts) == 3:
- referent_schema = parts[0]
- else:
- referent_schema = None
-
- if tname != self.temp_table_name:
- key = sql_schema._get_table_key(tname, referent_schema)
- if key in metadata.tables:
- t = metadata.tables[key]
- for elem in constraint.elements:
- colname = elem._get_colspec().split(".")[-1]
- if not t.c.contains_column(colname):
- t.append_column(
- Column(colname, sqltypes.NULLTYPE)
- )
- else:
- Table(
- tname, metadata,
- *[Column(n, sqltypes.NULLTYPE) for n in
- [elem._get_colspec().split(".")[-1]
- for elem in constraint.elements]],
- schema=referent_schema)
-
- def _create(self, op_impl):
- self._transfer_elements_to_new_table()
-
- op_impl.prep_table_for_batch(self.table)
- op_impl.create_table(self.new_table)
-
- try:
- op_impl._exec(
- self.new_table.insert(inline=True).from_select(
- list(k for k, transfer in
- self.column_transfers.items() if 'expr' in transfer),
- select([
- transfer['expr']
- for transfer in self.column_transfers.values()
- if 'expr' in transfer
- ])
- )
- )
- op_impl.drop_table(self.table)
- except:
- op_impl.drop_table(self.new_table)
- raise
- else:
- op_impl.rename_table(
- self.temp_table_name,
- self.table.name,
- schema=self.table.schema
- )
- self.new_table.name = self.table.name
- try:
- for idx in self._gather_indexes_from_both_tables():
- op_impl.create_index(idx)
- finally:
- self.new_table.name = self.temp_table_name
-
- def alter_column(self, table_name, column_name,
- nullable=None,
- server_default=False,
- name=None,
- type_=None,
- autoincrement=None,
- **kw
- ):
- existing = self.columns[column_name]
- existing_transfer = self.column_transfers[column_name]
- if name is not None and name != column_name:
- # note that we don't change '.key' - we keep referring
- # to the renamed column by its old key in _create(). neat!
- existing.name = name
- existing_transfer["name"] = name
-
- if type_ is not None:
- type_ = sqltypes.to_instance(type_)
- # old type is being discarded so turn off eventing
- # rules. Alternatively we can
- # erase the events set up by this type, but this is simpler.
- # we also ignore the drop_constraint that will come here from
- # Operations.implementation_for(alter_column)
- if isinstance(existing.type, SchemaEventTarget):
- existing.type._create_events = \
- existing.type.create_constraint = False
-
- if existing.type._type_affinity is not type_._type_affinity:
- existing_transfer["expr"] = cast(
- existing_transfer["expr"], type_)
-
- existing.type = type_
-
- # we *dont* however set events for the new type, because
- # alter_column is invoked from
- # Operations.implementation_for(alter_column) which already
- # will emit an add_constraint()
-
- if nullable is not None:
- existing.nullable = nullable
- if server_default is not False:
- if server_default is None:
- existing.server_default = None
- else:
- sql_schema.DefaultClause(server_default)._set_parent(existing)
- if autoincrement is not None:
- existing.autoincrement = bool(autoincrement)
-
- def add_column(self, table_name, column, **kw):
- # we copy the column because operations.add_column()
- # gives us a Column that is part of a Table already.
- self.columns[column.name] = column.copy(schema=self.table.schema)
- self.column_transfers[column.name] = {}
-
- def drop_column(self, table_name, column, **kw):
- del self.columns[column.name]
- del self.column_transfers[column.name]
-
- def add_constraint(self, const):
- if not const.name:
- raise ValueError("Constraint must have a name")
- if isinstance(const, sql_schema.PrimaryKeyConstraint):
- if self.table.primary_key in self.unnamed_constraints:
- self.unnamed_constraints.remove(self.table.primary_key)
-
- self.named_constraints[const.name] = const
-
- def drop_constraint(self, const):
- if not const.name:
- raise ValueError("Constraint must have a name")
- try:
- const = self.named_constraints.pop(const.name)
- except KeyError:
- if _is_type_bound(const):
- # type-bound constraints are only included in the new
- # table via their type object in any case, so ignore the
- # drop_constraint() that comes here via the
- # Operations.implementation_for(alter_column)
- return
- raise ValueError("No such constraint: '%s'" % const.name)
- else:
- if isinstance(const, PrimaryKeyConstraint):
- for col in const.columns:
- self.columns[col.name].primary_key = False
-
- def create_index(self, idx):
- self.new_indexes[idx.name] = idx
-
- def drop_index(self, idx):
- try:
- del self.indexes[idx.name]
- except KeyError:
- raise ValueError("No such index: '%s'" % idx.name)
-
- def rename_table(self, *arg, **kw):
- raise NotImplementedError("TODO")
diff --git a/venv/Lib/site-packages/alembic/operations/ops.py b/venv/Lib/site-packages/alembic/operations/ops.py
deleted file mode 100644
index 43001fe..0000000
--- a/venv/Lib/site-packages/alembic/operations/ops.py
+++ /dev/null
@@ -1,2062 +0,0 @@
-from .. import util
-from ..util import sqla_compat
-from . import schemaobj
-from sqlalchemy.types import NULLTYPE
-from .base import Operations, BatchOperations
-import re
-
-
-class MigrateOperation(object):
- """base class for migration command and organization objects.
-
- This system is part of the operation extensibility API.
-
- .. versionadded:: 0.8.0
-
- .. seealso::
-
- :ref:`operation_objects`
-
- :ref:`operation_plugins`
-
- :ref:`customizing_revision`
-
- """
-
- @util.memoized_property
- def info(self):
- """A dictionary that may be used to store arbitrary information
- along with this :class:`.MigrateOperation` object.
-
- """
- return {}
-
-
-class AddConstraintOp(MigrateOperation):
- """Represent an add constraint operation."""
-
- add_constraint_ops = util.Dispatcher()
-
- @property
- def constraint_type(self):
- raise NotImplementedError()
-
- @classmethod
- def register_add_constraint(cls, type_):
- def go(klass):
- cls.add_constraint_ops.dispatch_for(type_)(klass.from_constraint)
- return klass
- return go
-
- @classmethod
- def from_constraint(cls, constraint):
- return cls.add_constraint_ops.dispatch(
- constraint.__visit_name__)(constraint)
-
- def reverse(self):
- return DropConstraintOp.from_constraint(self.to_constraint())
-
- def to_diff_tuple(self):
- return ("add_constraint", self.to_constraint())
-
-
-@Operations.register_operation("drop_constraint")
-@BatchOperations.register_operation("drop_constraint", "batch_drop_constraint")
-class DropConstraintOp(MigrateOperation):
- """Represent a drop constraint operation."""
-
- def __init__(
- self,
- constraint_name, table_name, type_=None, schema=None,
- _orig_constraint=None):
- self.constraint_name = constraint_name
- self.table_name = table_name
- self.constraint_type = type_
- self.schema = schema
- self._orig_constraint = _orig_constraint
-
- def reverse(self):
- if self._orig_constraint is None:
- raise ValueError(
- "operation is not reversible; "
- "original constraint is not present")
- return AddConstraintOp.from_constraint(self._orig_constraint)
-
- def to_diff_tuple(self):
- if self.constraint_type == "foreignkey":
- return ("remove_fk", self.to_constraint())
- else:
- return ("remove_constraint", self.to_constraint())
-
- @classmethod
- def from_constraint(cls, constraint):
- types = {
- "unique_constraint": "unique",
- "foreign_key_constraint": "foreignkey",
- "primary_key_constraint": "primary",
- "check_constraint": "check",
- "column_check_constraint": "check",
- }
-
- constraint_table = sqla_compat._table_for_constraint(constraint)
- return cls(
- constraint.name,
- constraint_table.name,
- schema=constraint_table.schema,
- type_=types[constraint.__visit_name__],
- _orig_constraint=constraint
- )
-
- def to_constraint(self):
- if self._orig_constraint is not None:
- return self._orig_constraint
- else:
- raise ValueError(
- "constraint cannot be produced; "
- "original constraint is not present")
-
- @classmethod
- @util._with_legacy_names([
- ("type", "type_"),
- ("name", "constraint_name"),
- ])
- def drop_constraint(
- cls, operations, constraint_name, table_name,
- type_=None, schema=None):
- """Drop a constraint of the given name, typically via DROP CONSTRAINT.
-
- :param constraint_name: name of the constraint.
- :param table_name: table name.
- :param type_: optional, required on MySQL. can be
- 'foreignkey', 'primary', 'unique', or 'check'.
- :param schema: Optional schema name to operate within. To control
- quoting of the schema outside of the default behavior, use
- the SQLAlchemy construct
- :class:`~sqlalchemy.sql.elements.quoted_name`.
-
- .. versionadded:: 0.7.0 'schema' can now accept a
- :class:`~sqlalchemy.sql.elements.quoted_name` construct.
-
- .. versionchanged:: 0.8.0 The following positional argument names
- have been changed:
-
- * name -> constraint_name
-
- """
-
- op = cls(constraint_name, table_name, type_=type_, schema=schema)
- return operations.invoke(op)
-
- @classmethod
- def batch_drop_constraint(cls, operations, constraint_name, type_=None):
- """Issue a "drop constraint" instruction using the
- current batch migration context.
-
- The batch form of this call omits the ``table_name`` and ``schema``
- arguments from the call.
-
- .. seealso::
-
- :meth:`.Operations.drop_constraint`
-
- .. versionchanged:: 0.8.0 The following positional argument names
- have been changed:
-
- * name -> constraint_name
-
- """
- op = cls(
- constraint_name, operations.impl.table_name,
- type_=type_, schema=operations.impl.schema
- )
- return operations.invoke(op)
-
-
-@Operations.register_operation("create_primary_key")
-@BatchOperations.register_operation(
- "create_primary_key", "batch_create_primary_key")
-@AddConstraintOp.register_add_constraint("primary_key_constraint")
-class CreatePrimaryKeyOp(AddConstraintOp):
- """Represent a create primary key operation."""
-
- constraint_type = "primarykey"
-
- def __init__(
- self, constraint_name, table_name, columns,
- schema=None, _orig_constraint=None, **kw):
- self.constraint_name = constraint_name
- self.table_name = table_name
- self.columns = columns
- self.schema = schema
- self._orig_constraint = _orig_constraint
- self.kw = kw
-
- @classmethod
- def from_constraint(cls, constraint):
- constraint_table = sqla_compat._table_for_constraint(constraint)
-
- return cls(
- constraint.name,
- constraint_table.name,
- constraint.columns,
- schema=constraint_table.schema,
- _orig_constraint=constraint
- )
-
- def to_constraint(self, migration_context=None):
- if self._orig_constraint is not None:
- return self._orig_constraint
-
- schema_obj = schemaobj.SchemaObjects(migration_context)
- return schema_obj.primary_key_constraint(
- self.constraint_name, self.table_name,
- self.columns, schema=self.schema)
-
- @classmethod
- @util._with_legacy_names([
- ('name', 'constraint_name'),
- ('cols', 'columns')
- ])
- def create_primary_key(
- cls, operations,
- constraint_name, table_name, columns, schema=None):
- """Issue a "create primary key" instruction using the current
- migration context.
-
- e.g.::
-
- from alembic import op
- op.create_primary_key(
- "pk_my_table", "my_table",
- ["id", "version"]
- )
-
- This internally generates a :class:`~sqlalchemy.schema.Table` object
- containing the necessary columns, then generates a new
- :class:`~sqlalchemy.schema.PrimaryKeyConstraint`
- object which it then associates with the
- :class:`~sqlalchemy.schema.Table`.
- Any event listeners associated with this action will be fired
- off normally. The :class:`~sqlalchemy.schema.AddConstraint`
- construct is ultimately used to generate the ALTER statement.
-
- :param name: Name of the primary key constraint. The name is necessary
- so that an ALTER statement can be emitted. For setups that
- use an automated naming scheme such as that described at
- :ref:`sqla:constraint_naming_conventions`
- ``name`` here can be ``None``, as the event listener will
- apply the name to the constraint object when it is associated
- with the table.
- :param table_name: String name of the target table.
- :param columns: a list of string column names to be applied to the
- primary key constraint.
- :param schema: Optional schema name to operate within. To control
- quoting of the schema outside of the default behavior, use
- the SQLAlchemy construct
- :class:`~sqlalchemy.sql.elements.quoted_name`.
-
- .. versionadded:: 0.7.0 'schema' can now accept a
- :class:`~sqlalchemy.sql.elements.quoted_name` construct.
-
- .. versionchanged:: 0.8.0 The following positional argument names
- have been changed:
-
- * name -> constraint_name
- * cols -> columns
-
- """
- op = cls(constraint_name, table_name, columns, schema)
- return operations.invoke(op)
-
- @classmethod
- def batch_create_primary_key(cls, operations, constraint_name, columns):
- """Issue a "create primary key" instruction using the
- current batch migration context.
-
- The batch form of this call omits the ``table_name`` and ``schema``
- arguments from the call.
-
- .. seealso::
-
- :meth:`.Operations.create_primary_key`
-
- """
- op = cls(
- constraint_name, operations.impl.table_name, columns,
- schema=operations.impl.schema
- )
- return operations.invoke(op)
-
-
-@Operations.register_operation("create_unique_constraint")
-@BatchOperations.register_operation(
- "create_unique_constraint", "batch_create_unique_constraint")
-@AddConstraintOp.register_add_constraint("unique_constraint")
-class CreateUniqueConstraintOp(AddConstraintOp):
- """Represent a create unique constraint operation."""
-
- constraint_type = "unique"
-
- def __init__(
- self, constraint_name, table_name,
- columns, schema=None, _orig_constraint=None, **kw):
- self.constraint_name = constraint_name
- self.table_name = table_name
- self.columns = columns
- self.schema = schema
- self._orig_constraint = _orig_constraint
- self.kw = kw
-
- @classmethod
- def from_constraint(cls, constraint):
- constraint_table = sqla_compat._table_for_constraint(constraint)
-
- kw = {}
- if constraint.deferrable:
- kw['deferrable'] = constraint.deferrable
- if constraint.initially:
- kw['initially'] = constraint.initially
-
- return cls(
- constraint.name,
- constraint_table.name,
- [c.name for c in constraint.columns],
- schema=constraint_table.schema,
- _orig_constraint=constraint,
- **kw
- )
-
- def to_constraint(self, migration_context=None):
- if self._orig_constraint is not None:
- return self._orig_constraint
-
- schema_obj = schemaobj.SchemaObjects(migration_context)
- return schema_obj.unique_constraint(
- self.constraint_name, self.table_name, self.columns,
- schema=self.schema, **self.kw)
-
- @classmethod
- @util._with_legacy_names([
- ('name', 'constraint_name'),
- ('source', 'table_name'),
- ('local_cols', 'columns'),
- ])
- def create_unique_constraint(
- cls, operations, constraint_name, table_name, columns,
- schema=None, **kw):
- """Issue a "create unique constraint" instruction using the
- current migration context.
-
- e.g.::
-
- from alembic import op
- op.create_unique_constraint("uq_user_name", "user", ["name"])
-
- This internally generates a :class:`~sqlalchemy.schema.Table` object
- containing the necessary columns, then generates a new
- :class:`~sqlalchemy.schema.UniqueConstraint`
- object which it then associates with the
- :class:`~sqlalchemy.schema.Table`.
- Any event listeners associated with this action will be fired
- off normally. The :class:`~sqlalchemy.schema.AddConstraint`
- construct is ultimately used to generate the ALTER statement.
-
- :param name: Name of the unique constraint. The name is necessary
- so that an ALTER statement can be emitted. For setups that
- use an automated naming scheme such as that described at
- :ref:`sqla:constraint_naming_conventions`,
- ``name`` here can be ``None``, as the event listener will
- apply the name to the constraint object when it is associated
- with the table.
- :param table_name: String name of the source table.
- :param columns: a list of string column names in the
- source table.
- :param deferrable: optional bool. If set, emit DEFERRABLE or
- NOT DEFERRABLE when issuing DDL for this constraint.
- :param initially: optional string. If set, emit INITIALLY
- when issuing DDL for this constraint.
- :param schema: Optional schema name to operate within. To control
- quoting of the schema outside of the default behavior, use
- the SQLAlchemy construct
- :class:`~sqlalchemy.sql.elements.quoted_name`.
-
- .. versionadded:: 0.7.0 'schema' can now accept a
- :class:`~sqlalchemy.sql.elements.quoted_name` construct.
-
- .. versionchanged:: 0.8.0 The following positional argument names
- have been changed:
-
- * name -> constraint_name
- * source -> table_name
- * local_cols -> columns
-
- """
-
- op = cls(
- constraint_name, table_name, columns,
- schema=schema, **kw
- )
- return operations.invoke(op)
-
- @classmethod
- @util._with_legacy_names([('name', 'constraint_name')])
- def batch_create_unique_constraint(
- cls, operations, constraint_name, columns, **kw):
- """Issue a "create unique constraint" instruction using the
- current batch migration context.
-
- The batch form of this call omits the ``source`` and ``schema``
- arguments from the call.
-
- .. seealso::
-
- :meth:`.Operations.create_unique_constraint`
-
- .. versionchanged:: 0.8.0 The following positional argument names
- have been changed:
-
- * name -> constraint_name
-
- """
- kw['schema'] = operations.impl.schema
- op = cls(
- constraint_name, operations.impl.table_name, columns,
- **kw
- )
- return operations.invoke(op)
-
-
-@Operations.register_operation("create_foreign_key")
-@BatchOperations.register_operation(
- "create_foreign_key", "batch_create_foreign_key")
-@AddConstraintOp.register_add_constraint("foreign_key_constraint")
-class CreateForeignKeyOp(AddConstraintOp):
- """Represent a create foreign key constraint operation."""
-
- constraint_type = "foreignkey"
-
- def __init__(
- self, constraint_name, source_table, referent_table, local_cols,
- remote_cols, _orig_constraint=None, **kw):
- self.constraint_name = constraint_name
- self.source_table = source_table
- self.referent_table = referent_table
- self.local_cols = local_cols
- self.remote_cols = remote_cols
- self._orig_constraint = _orig_constraint
- self.kw = kw
-
- def to_diff_tuple(self):
- return ("add_fk", self.to_constraint())
-
- @classmethod
- def from_constraint(cls, constraint):
- kw = {}
- if constraint.onupdate:
- kw['onupdate'] = constraint.onupdate
- if constraint.ondelete:
- kw['ondelete'] = constraint.ondelete
- if constraint.initially:
- kw['initially'] = constraint.initially
- if constraint.deferrable:
- kw['deferrable'] = constraint.deferrable
- if constraint.use_alter:
- kw['use_alter'] = constraint.use_alter
-
- source_schema, source_table, \
- source_columns, target_schema, \
- target_table, target_columns,\
- onupdate, ondelete, deferrable, initially \
- = sqla_compat._fk_spec(constraint)
-
- kw['source_schema'] = source_schema
- kw['referent_schema'] = target_schema
-
- return cls(
- constraint.name,
- source_table,
- target_table,
- source_columns,
- target_columns,
- _orig_constraint=constraint,
- **kw
- )
-
- def to_constraint(self, migration_context=None):
- if self._orig_constraint is not None:
- return self._orig_constraint
- schema_obj = schemaobj.SchemaObjects(migration_context)
- return schema_obj.foreign_key_constraint(
- self.constraint_name,
- self.source_table, self.referent_table,
- self.local_cols, self.remote_cols,
- **self.kw)
-
- @classmethod
- @util._with_legacy_names([
- ('name', 'constraint_name'),
- ('source', 'source_table'),
- ('referent', 'referent_table'),
- ])
- def create_foreign_key(cls, operations, constraint_name,
- source_table, referent_table, local_cols,
- remote_cols, onupdate=None, ondelete=None,
- deferrable=None, initially=None, match=None,
- source_schema=None, referent_schema=None,
- **dialect_kw):
- """Issue a "create foreign key" instruction using the
- current migration context.
-
- e.g.::
-
- from alembic import op
- op.create_foreign_key(
- "fk_user_address", "address",
- "user", ["user_id"], ["id"])
-
- This internally generates a :class:`~sqlalchemy.schema.Table` object
- containing the necessary columns, then generates a new
- :class:`~sqlalchemy.schema.ForeignKeyConstraint`
- object which it then associates with the
- :class:`~sqlalchemy.schema.Table`.
- Any event listeners associated with this action will be fired
- off normally. The :class:`~sqlalchemy.schema.AddConstraint`
- construct is ultimately used to generate the ALTER statement.
-
- :param name: Name of the foreign key constraint. The name is necessary
- so that an ALTER statement can be emitted. For setups that
- use an automated naming scheme such as that described at
- :ref:`sqla:constraint_naming_conventions`,
- ``name`` here can be ``None``, as the event listener will
- apply the name to the constraint object when it is associated
- with the table.
- :param source_table: String name of the source table.
- :param referent_table: String name of the destination table.
- :param local_cols: a list of string column names in the
- source table.
- :param remote_cols: a list of string column names in the
- remote table.
- :param onupdate: Optional string. If set, emit ON UPDATE when
- issuing DDL for this constraint. Typical values include CASCADE,
- DELETE and RESTRICT.
- :param ondelete: Optional string. If set, emit ON DELETE when
- issuing DDL for this constraint. Typical values include CASCADE,
- DELETE and RESTRICT.
- :param deferrable: optional bool. If set, emit DEFERRABLE or NOT
- DEFERRABLE when issuing DDL for this constraint.
- :param source_schema: Optional schema name of the source table.
- :param referent_schema: Optional schema name of the destination table.
-
- .. versionchanged:: 0.8.0 The following positional argument names
- have been changed:
-
- * name -> constraint_name
- * source -> source_table
- * referent -> referent_table
-
- """
-
- op = cls(
- constraint_name,
- source_table, referent_table,
- local_cols, remote_cols,
- onupdate=onupdate, ondelete=ondelete,
- deferrable=deferrable,
- source_schema=source_schema,
- referent_schema=referent_schema,
- initially=initially, match=match,
- **dialect_kw
- )
- return operations.invoke(op)
-
- @classmethod
- @util._with_legacy_names([
- ('name', 'constraint_name'),
- ('referent', 'referent_table')
- ])
- def batch_create_foreign_key(
- cls, operations, constraint_name, referent_table,
- local_cols, remote_cols,
- referent_schema=None,
- onupdate=None, ondelete=None,
- deferrable=None, initially=None, match=None,
- **dialect_kw):
- """Issue a "create foreign key" instruction using the
- current batch migration context.
-
- The batch form of this call omits the ``source`` and ``source_schema``
- arguments from the call.
-
- e.g.::
-
- with batch_alter_table("address") as batch_op:
- batch_op.create_foreign_key(
- "fk_user_address",
- "user", ["user_id"], ["id"])
-
- .. seealso::
-
- :meth:`.Operations.create_foreign_key`
-
- .. versionchanged:: 0.8.0 The following positional argument names
- have been changed:
-
- * name -> constraint_name
- * referent -> referent_table
-
- """
- op = cls(
- constraint_name,
- operations.impl.table_name, referent_table,
- local_cols, remote_cols,
- onupdate=onupdate, ondelete=ondelete,
- deferrable=deferrable,
- source_schema=operations.impl.schema,
- referent_schema=referent_schema,
- initially=initially, match=match,
- **dialect_kw
- )
- return operations.invoke(op)
-
-
-@Operations.register_operation("create_check_constraint")
-@BatchOperations.register_operation(
- "create_check_constraint", "batch_create_check_constraint")
-@AddConstraintOp.register_add_constraint("check_constraint")
-@AddConstraintOp.register_add_constraint("column_check_constraint")
-class CreateCheckConstraintOp(AddConstraintOp):
- """Represent a create check constraint operation."""
-
- constraint_type = "check"
-
- def __init__(
- self, constraint_name, table_name,
- condition, schema=None, _orig_constraint=None, **kw):
- self.constraint_name = constraint_name
- self.table_name = table_name
- self.condition = condition
- self.schema = schema
- self._orig_constraint = _orig_constraint
- self.kw = kw
-
- @classmethod
- def from_constraint(cls, constraint):
- constraint_table = sqla_compat._table_for_constraint(constraint)
-
- return cls(
- constraint.name,
- constraint_table.name,
- constraint.sqltext,
- schema=constraint_table.schema,
- _orig_constraint=constraint
- )
-
- def to_constraint(self, migration_context=None):
- if self._orig_constraint is not None:
- return self._orig_constraint
- schema_obj = schemaobj.SchemaObjects(migration_context)
- return schema_obj.check_constraint(
- self.constraint_name, self.table_name,
- self.condition, schema=self.schema, **self.kw)
-
- @classmethod
- @util._with_legacy_names([
- ('name', 'constraint_name'),
- ('source', 'table_name')
- ])
- def create_check_constraint(
- cls, operations,
- constraint_name, table_name, condition,
- schema=None, **kw):
- """Issue a "create check constraint" instruction using the
- current migration context.
-
- e.g.::
-
- from alembic import op
- from sqlalchemy.sql import column, func
-
- op.create_check_constraint(
- "ck_user_name_len",
- "user",
- func.len(column('name')) > 5
- )
-
- CHECK constraints are usually against a SQL expression, so ad-hoc
- table metadata is usually needed. The function will convert the given
- arguments into a :class:`sqlalchemy.schema.CheckConstraint` bound
- to an anonymous table in order to emit the CREATE statement.
-
- :param name: Name of the check constraint. The name is necessary
- so that an ALTER statement can be emitted. For setups that
- use an automated naming scheme such as that described at
- :ref:`sqla:constraint_naming_conventions`,
- ``name`` here can be ``None``, as the event listener will
- apply the name to the constraint object when it is associated
- with the table.
- :param table_name: String name of the source table.
- :param condition: SQL expression that's the condition of the
- constraint. Can be a string or SQLAlchemy expression language
- structure.
- :param deferrable: optional bool. If set, emit DEFERRABLE or
- NOT DEFERRABLE when issuing DDL for this constraint.
- :param initially: optional string. If set, emit INITIALLY
- when issuing DDL for this constraint.
- :param schema: Optional schema name to operate within. To control
- quoting of the schema outside of the default behavior, use
- the SQLAlchemy construct
- :class:`~sqlalchemy.sql.elements.quoted_name`.
-
- .. versionadded:: 0.7.0 'schema' can now accept a
- :class:`~sqlalchemy.sql.elements.quoted_name` construct.
-
- .. versionchanged:: 0.8.0 The following positional argument names
- have been changed:
-
- * name -> constraint_name
- * source -> table_name
-
- """
- op = cls(constraint_name, table_name, condition, schema=schema, **kw)
- return operations.invoke(op)
-
- @classmethod
- @util._with_legacy_names([('name', 'constraint_name')])
- def batch_create_check_constraint(
- cls, operations, constraint_name, condition, **kw):
- """Issue a "create check constraint" instruction using the
- current batch migration context.
-
- The batch form of this call omits the ``source`` and ``schema``
- arguments from the call.
-
- .. seealso::
-
- :meth:`.Operations.create_check_constraint`
-
- .. versionchanged:: 0.8.0 The following positional argument names
- have been changed:
-
- * name -> constraint_name
-
- """
- op = cls(
- constraint_name, operations.impl.table_name,
- condition, schema=operations.impl.schema, **kw)
- return operations.invoke(op)
-
-
-@Operations.register_operation("create_index")
-@BatchOperations.register_operation("create_index", "batch_create_index")
-class CreateIndexOp(MigrateOperation):
- """Represent a create index operation."""
-
- def __init__(
- self, index_name, table_name, columns, schema=None,
- unique=False, _orig_index=None, **kw):
- self.index_name = index_name
- self.table_name = table_name
- self.columns = columns
- self.schema = schema
- self.unique = unique
- self.kw = kw
- self._orig_index = _orig_index
-
- def reverse(self):
- return DropIndexOp.from_index(self.to_index())
-
- def to_diff_tuple(self):
- return ("add_index", self.to_index())
-
- @classmethod
- def from_index(cls, index):
- return cls(
- index.name,
- index.table.name,
- sqla_compat._get_index_expressions(index),
- schema=index.table.schema,
- unique=index.unique,
- _orig_index=index,
- **index.kwargs
- )
-
- def to_index(self, migration_context=None):
- if self._orig_index:
- return self._orig_index
- schema_obj = schemaobj.SchemaObjects(migration_context)
- return schema_obj.index(
- self.index_name, self.table_name, self.columns, schema=self.schema,
- unique=self.unique, **self.kw)
-
- @classmethod
- @util._with_legacy_names([('name', 'index_name')])
- def create_index(
- cls, operations,
- index_name, table_name, columns, schema=None,
- unique=False, **kw):
- r"""Issue a "create index" instruction using the current
- migration context.
-
- e.g.::
-
- from alembic import op
- op.create_index('ik_test', 't1', ['foo', 'bar'])
-
- Functional indexes can be produced by using the
- :func:`sqlalchemy.sql.expression.text` construct::
-
- from alembic import op
- from sqlalchemy import text
- op.create_index('ik_test', 't1', [text('lower(foo)')])
-
- .. versionadded:: 0.6.7 support for making use of the
- :func:`~sqlalchemy.sql.expression.text` construct in
- conjunction with
- :meth:`.Operations.create_index` in
- order to produce functional expressions within CREATE INDEX.
-
- :param index_name: name of the index.
- :param table_name: name of the owning table.
- :param columns: a list consisting of string column names and/or
- :func:`~sqlalchemy.sql.expression.text` constructs.
- :param schema: Optional schema name to operate within. To control
- quoting of the schema outside of the default behavior, use
- the SQLAlchemy construct
- :class:`~sqlalchemy.sql.elements.quoted_name`.
-
- .. versionadded:: 0.7.0 'schema' can now accept a
- :class:`~sqlalchemy.sql.elements.quoted_name` construct.
-
- :param unique: If True, create a unique index.
-
- :param quote:
- Force quoting of this column's name on or off, corresponding
- to ``True`` or ``False``. When left at its default
- of ``None``, the column identifier will be quoted according to
- whether the name is case sensitive (identifiers with at least one
- upper case character are treated as case sensitive), or if it's a
- reserved word. This flag is only needed to force quoting of a
- reserved word which is not known by the SQLAlchemy dialect.
-
- :param \**kw: Additional keyword arguments not mentioned above are
- dialect specific, and passed in the form
- ``_``.
- See the documentation regarding an individual dialect at
- :ref:`dialect_toplevel` for detail on documented arguments.
-
- .. versionchanged:: 0.8.0 The following positional argument names
- have been changed:
-
- * name -> index_name
-
- """
- op = cls(
- index_name, table_name, columns, schema=schema,
- unique=unique, **kw
- )
- return operations.invoke(op)
-
- @classmethod
- def batch_create_index(cls, operations, index_name, columns, **kw):
- """Issue a "create index" instruction using the
- current batch migration context.
-
- .. seealso::
-
- :meth:`.Operations.create_index`
-
- """
-
- op = cls(
- index_name, operations.impl.table_name, columns,
- schema=operations.impl.schema, **kw
- )
- return operations.invoke(op)
-
-
-@Operations.register_operation("drop_index")
-@BatchOperations.register_operation("drop_index", "batch_drop_index")
-class DropIndexOp(MigrateOperation):
- """Represent a drop index operation."""
-
- def __init__(
- self, index_name, table_name=None,
- schema=None, _orig_index=None, **kw):
- self.index_name = index_name
- self.table_name = table_name
- self.schema = schema
- self._orig_index = _orig_index
- self.kw = kw
-
- def to_diff_tuple(self):
- return ("remove_index", self.to_index())
-
- def reverse(self):
- if self._orig_index is None:
- raise ValueError(
- "operation is not reversible; "
- "original index is not present")
- return CreateIndexOp.from_index(self._orig_index)
-
- @classmethod
- def from_index(cls, index):
- return cls(
- index.name,
- index.table.name,
- schema=index.table.schema,
- _orig_index=index,
- **index.kwargs
- )
-
- def to_index(self, migration_context=None):
- if self._orig_index is not None:
- return self._orig_index
-
- schema_obj = schemaobj.SchemaObjects(migration_context)
-
- # need a dummy column name here since SQLAlchemy
- # 0.7.6 and further raises on Index with no columns
- return schema_obj.index(
- self.index_name, self.table_name, ['x'],
- schema=self.schema, **self.kw)
-
- @classmethod
- @util._with_legacy_names([
- ('name', 'index_name'),
- ('tablename', 'table_name')
- ])
- def drop_index(cls, operations, index_name,
- table_name=None, schema=None, **kw):
- r"""Issue a "drop index" instruction using the current
- migration context.
-
- e.g.::
-
- drop_index("accounts")
-
- :param index_name: name of the index.
- :param table_name: name of the owning table. Some
- backends such as Microsoft SQL Server require this.
- :param schema: Optional schema name to operate within. To control
- quoting of the schema outside of the default behavior, use
- the SQLAlchemy construct
- :class:`~sqlalchemy.sql.elements.quoted_name`.
-
- .. versionadded:: 0.7.0 'schema' can now accept a
- :class:`~sqlalchemy.sql.elements.quoted_name` construct.
-
- :param \**kw: Additional keyword arguments not mentioned above are
- dialect specific, and passed in the form
- ``_``.
- See the documentation regarding an individual dialect at
- :ref:`dialect_toplevel` for detail on documented arguments.
-
- .. versionadded:: 0.9.5 Support for dialect-specific keyword
- arguments for DROP INDEX
-
- .. versionchanged:: 0.8.0 The following positional argument names
- have been changed:
-
- * name -> index_name
-
- """
- op = cls(index_name, table_name=table_name, schema=schema, **kw)
- return operations.invoke(op)
-
- @classmethod
- @util._with_legacy_names([('name', 'index_name')])
- def batch_drop_index(cls, operations, index_name, **kw):
- """Issue a "drop index" instruction using the
- current batch migration context.
-
- .. seealso::
-
- :meth:`.Operations.drop_index`
-
- .. versionchanged:: 0.8.0 The following positional argument names
- have been changed:
-
- * name -> index_name
-
- """
-
- op = cls(
- index_name, table_name=operations.impl.table_name,
- schema=operations.impl.schema, **kw
- )
- return operations.invoke(op)
-
-
-@Operations.register_operation("create_table")
-class CreateTableOp(MigrateOperation):
- """Represent a create table operation."""
-
- def __init__(
- self, table_name, columns, schema=None, _orig_table=None, **kw):
- self.table_name = table_name
- self.columns = columns
- self.schema = schema
- self.kw = kw
- self._orig_table = _orig_table
-
- def reverse(self):
- return DropTableOp.from_table(self.to_table())
-
- def to_diff_tuple(self):
- return ("add_table", self.to_table())
-
- @classmethod
- def from_table(cls, table):
- return cls(
- table.name,
- list(table.c) + list(table.constraints),
- schema=table.schema,
- _orig_table=table,
- **table.kwargs
- )
-
- def to_table(self, migration_context=None):
- if self._orig_table is not None:
- return self._orig_table
- schema_obj = schemaobj.SchemaObjects(migration_context)
-
- return schema_obj.table(
- self.table_name, *self.columns, schema=self.schema, **self.kw
- )
-
- @classmethod
- @util._with_legacy_names([('name', 'table_name')])
- def create_table(cls, operations, table_name, *columns, **kw):
- r"""Issue a "create table" instruction using the current migration
- context.
-
- This directive receives an argument list similar to that of the
- traditional :class:`sqlalchemy.schema.Table` construct, but without the
- metadata::
-
- from sqlalchemy import INTEGER, VARCHAR, NVARCHAR, Column
- from alembic import op
-
- op.create_table(
- 'account',
- Column('id', INTEGER, primary_key=True),
- Column('name', VARCHAR(50), nullable=False),
- Column('description', NVARCHAR(200)),
- Column('timestamp', TIMESTAMP, server_default=func.now())
- )
-
- Note that :meth:`.create_table` accepts
- :class:`~sqlalchemy.schema.Column`
- constructs directly from the SQLAlchemy library. In particular,
- default values to be created on the database side are
- specified using the ``server_default`` parameter, and not
- ``default`` which only specifies Python-side defaults::
-
- from alembic import op
- from sqlalchemy import Column, TIMESTAMP, func
-
- # specify "DEFAULT NOW" along with the "timestamp" column
- op.create_table('account',
- Column('id', INTEGER, primary_key=True),
- Column('timestamp', TIMESTAMP, server_default=func.now())
- )
-
- The function also returns a newly created
- :class:`~sqlalchemy.schema.Table` object, corresponding to the table
- specification given, which is suitable for
- immediate SQL operations, in particular
- :meth:`.Operations.bulk_insert`::
-
- from sqlalchemy import INTEGER, VARCHAR, NVARCHAR, Column
- from alembic import op
-
- account_table = op.create_table(
- 'account',
- Column('id', INTEGER, primary_key=True),
- Column('name', VARCHAR(50), nullable=False),
- Column('description', NVARCHAR(200)),
- Column('timestamp', TIMESTAMP, server_default=func.now())
- )
-
- op.bulk_insert(
- account_table,
- [
- {"name": "A1", "description": "account 1"},
- {"name": "A2", "description": "account 2"},
- ]
- )
-
- .. versionadded:: 0.7.0
-
- :param table_name: Name of the table
- :param \*columns: collection of :class:`~sqlalchemy.schema.Column`
- objects within
- the table, as well as optional :class:`~sqlalchemy.schema.Constraint`
- objects
- and :class:`~.sqlalchemy.schema.Index` objects.
- :param schema: Optional schema name to operate within. To control
- quoting of the schema outside of the default behavior, use
- the SQLAlchemy construct
- :class:`~sqlalchemy.sql.elements.quoted_name`.
-
- .. versionadded:: 0.7.0 'schema' can now accept a
- :class:`~sqlalchemy.sql.elements.quoted_name` construct.
- :param \**kw: Other keyword arguments are passed to the underlying
- :class:`sqlalchemy.schema.Table` object created for the command.
-
- :return: the :class:`~sqlalchemy.schema.Table` object corresponding
- to the parameters given.
-
- .. versionadded:: 0.7.0 - the :class:`~sqlalchemy.schema.Table`
- object is returned.
-
- .. versionchanged:: 0.8.0 The following positional argument names
- have been changed:
-
- * name -> table_name
-
- """
- op = cls(table_name, columns, **kw)
- return operations.invoke(op)
-
-
-@Operations.register_operation("drop_table")
-class DropTableOp(MigrateOperation):
- """Represent a drop table operation."""
-
- def __init__(
- self, table_name, schema=None, table_kw=None, _orig_table=None):
- self.table_name = table_name
- self.schema = schema
- self.table_kw = table_kw or {}
- self._orig_table = _orig_table
-
- def to_diff_tuple(self):
- return ("remove_table", self.to_table())
-
- def reverse(self):
- if self._orig_table is None:
- raise ValueError(
- "operation is not reversible; "
- "original table is not present")
- return CreateTableOp.from_table(self._orig_table)
-
- @classmethod
- def from_table(cls, table):
- return cls(table.name, schema=table.schema, _orig_table=table)
-
- def to_table(self, migration_context=None):
- if self._orig_table is not None:
- return self._orig_table
- schema_obj = schemaobj.SchemaObjects(migration_context)
- return schema_obj.table(
- self.table_name,
- schema=self.schema,
- **self.table_kw)
-
- @classmethod
- @util._with_legacy_names([('name', 'table_name')])
- def drop_table(cls, operations, table_name, schema=None, **kw):
- r"""Issue a "drop table" instruction using the current
- migration context.
-
-
- e.g.::
-
- drop_table("accounts")
-
- :param table_name: Name of the table
- :param schema: Optional schema name to operate within. To control
- quoting of the schema outside of the default behavior, use
- the SQLAlchemy construct
- :class:`~sqlalchemy.sql.elements.quoted_name`.
-
- .. versionadded:: 0.7.0 'schema' can now accept a
- :class:`~sqlalchemy.sql.elements.quoted_name` construct.
-
- :param \**kw: Other keyword arguments are passed to the underlying
- :class:`sqlalchemy.schema.Table` object created for the command.
-
- .. versionchanged:: 0.8.0 The following positional argument names
- have been changed:
-
- * name -> table_name
-
- """
- op = cls(table_name, schema=schema, table_kw=kw)
- operations.invoke(op)
-
-
-class AlterTableOp(MigrateOperation):
- """Represent an alter table operation."""
-
- def __init__(self, table_name, schema=None):
- self.table_name = table_name
- self.schema = schema
-
-
-@Operations.register_operation("rename_table")
-class RenameTableOp(AlterTableOp):
- """Represent a rename table operation."""
-
- def __init__(self, old_table_name, new_table_name, schema=None):
- super(RenameTableOp, self).__init__(old_table_name, schema=schema)
- self.new_table_name = new_table_name
-
- @classmethod
- def rename_table(
- cls, operations, old_table_name, new_table_name, schema=None):
- """Emit an ALTER TABLE to rename a table.
-
- :param old_table_name: old name.
- :param new_table_name: new name.
- :param schema: Optional schema name to operate within. To control
- quoting of the schema outside of the default behavior, use
- the SQLAlchemy construct
- :class:`~sqlalchemy.sql.elements.quoted_name`.
-
- .. versionadded:: 0.7.0 'schema' can now accept a
- :class:`~sqlalchemy.sql.elements.quoted_name` construct.
-
- """
- op = cls(old_table_name, new_table_name, schema=schema)
- return operations.invoke(op)
-
-
-@Operations.register_operation("alter_column")
-@BatchOperations.register_operation("alter_column", "batch_alter_column")
-class AlterColumnOp(AlterTableOp):
- """Represent an alter column operation."""
-
- def __init__(
- self, table_name, column_name, schema=None,
- existing_type=None,
- existing_server_default=False,
- existing_nullable=None,
- modify_nullable=None,
- modify_server_default=False,
- modify_name=None,
- modify_type=None,
- **kw
-
- ):
- super(AlterColumnOp, self).__init__(table_name, schema=schema)
- self.column_name = column_name
- self.existing_type = existing_type
- self.existing_server_default = existing_server_default
- self.existing_nullable = existing_nullable
- self.modify_nullable = modify_nullable
- self.modify_server_default = modify_server_default
- self.modify_name = modify_name
- self.modify_type = modify_type
- self.kw = kw
-
- def to_diff_tuple(self):
- col_diff = []
- schema, tname, cname = self.schema, self.table_name, self.column_name
-
- if self.modify_type is not None:
- col_diff.append(
- ("modify_type", schema, tname, cname,
- {
- "existing_nullable": self.existing_nullable,
- "existing_server_default": self.existing_server_default,
- },
- self.existing_type,
- self.modify_type)
- )
-
- if self.modify_nullable is not None:
- col_diff.append(
- ("modify_nullable", schema, tname, cname,
- {
- "existing_type": self.existing_type,
- "existing_server_default": self.existing_server_default
- },
- self.existing_nullable,
- self.modify_nullable)
- )
-
- if self.modify_server_default is not False:
- col_diff.append(
- ("modify_default", schema, tname, cname,
- {
- "existing_nullable": self.existing_nullable,
- "existing_type": self.existing_type
- },
- self.existing_server_default,
- self.modify_server_default)
- )
-
- return col_diff
-
- def has_changes(self):
- hc1 = self.modify_nullable is not None or \
- self.modify_server_default is not False or \
- self.modify_type is not None
- if hc1:
- return True
- for kw in self.kw:
- if kw.startswith('modify_'):
- return True
- else:
- return False
-
- def reverse(self):
-
- kw = self.kw.copy()
- kw['existing_type'] = self.existing_type
- kw['existing_nullable'] = self.existing_nullable
- kw['existing_server_default'] = self.existing_server_default
- if self.modify_type is not None:
- kw['modify_type'] = self.modify_type
- if self.modify_nullable is not None:
- kw['modify_nullable'] = self.modify_nullable
- if self.modify_server_default is not False:
- kw['modify_server_default'] = self.modify_server_default
-
- # TODO: make this a little simpler
- all_keys = set(m.group(1) for m in [
- re.match(r'^(?:existing_|modify_)(.+)$', k)
- for k in kw
- ] if m)
-
- for k in all_keys:
- if 'modify_%s' % k in kw:
- swap = kw['existing_%s' % k]
- kw['existing_%s' % k] = kw['modify_%s' % k]
- kw['modify_%s' % k] = swap
-
- return self.__class__(
- self.table_name, self.column_name, schema=self.schema,
- **kw
- )
-
- @classmethod
- @util._with_legacy_names([('name', 'new_column_name')])
- def alter_column(
- cls, operations, table_name, column_name,
- nullable=None,
- server_default=False,
- new_column_name=None,
- type_=None,
- existing_type=None,
- existing_server_default=False,
- existing_nullable=None,
- schema=None, **kw
- ):
- """Issue an "alter column" instruction using the
- current migration context.
-
- Generally, only that aspect of the column which
- is being changed, i.e. name, type, nullability,
- default, needs to be specified. Multiple changes
- can also be specified at once and the backend should
- "do the right thing", emitting each change either
- separately or together as the backend allows.
-
- MySQL has special requirements here, since MySQL
- cannot ALTER a column without a full specification.
- When producing MySQL-compatible migration files,
- it is recommended that the ``existing_type``,
- ``existing_server_default``, and ``existing_nullable``
- parameters be present, if not being altered.
-
- Type changes which are against the SQLAlchemy
- "schema" types :class:`~sqlalchemy.types.Boolean`
- and :class:`~sqlalchemy.types.Enum` may also
- add or drop constraints which accompany those
- types on backends that don't support them natively.
- The ``existing_type`` argument is
- used in this case to identify and remove a previous
- constraint that was bound to the type object.
-
- :param table_name: string name of the target table.
- :param column_name: string name of the target column,
- as it exists before the operation begins.
- :param nullable: Optional; specify ``True`` or ``False``
- to alter the column's nullability.
- :param server_default: Optional; specify a string
- SQL expression, :func:`~sqlalchemy.sql.expression.text`,
- or :class:`~sqlalchemy.schema.DefaultClause` to indicate
- an alteration to the column's default value.
- Set to ``None`` to have the default removed.
- :param new_column_name: Optional; specify a string name here to
- indicate the new name within a column rename operation.
- :param type_: Optional; a :class:`~sqlalchemy.types.TypeEngine`
- type object to specify a change to the column's type.
- For SQLAlchemy types that also indicate a constraint (i.e.
- :class:`~sqlalchemy.types.Boolean`, :class:`~sqlalchemy.types.Enum`),
- the constraint is also generated.
- :param autoincrement: set the ``AUTO_INCREMENT`` flag of the column;
- currently understood by the MySQL dialect.
- :param existing_type: Optional; a
- :class:`~sqlalchemy.types.TypeEngine`
- type object to specify the previous type. This
- is required for all MySQL column alter operations that
- don't otherwise specify a new type, as well as for
- when nullability is being changed on a SQL Server
- column. It is also used if the type is a so-called
- SQLlchemy "schema" type which may define a constraint (i.e.
- :class:`~sqlalchemy.types.Boolean`,
- :class:`~sqlalchemy.types.Enum`),
- so that the constraint can be dropped.
- :param existing_server_default: Optional; The existing
- default value of the column. Required on MySQL if
- an existing default is not being changed; else MySQL
- removes the default.
- :param existing_nullable: Optional; the existing nullability
- of the column. Required on MySQL if the existing nullability
- is not being changed; else MySQL sets this to NULL.
- :param existing_autoincrement: Optional; the existing autoincrement
- of the column. Used for MySQL's system of altering a column
- that specifies ``AUTO_INCREMENT``.
- :param schema: Optional schema name to operate within. To control
- quoting of the schema outside of the default behavior, use
- the SQLAlchemy construct
- :class:`~sqlalchemy.sql.elements.quoted_name`.
-
- .. versionadded:: 0.7.0 'schema' can now accept a
- :class:`~sqlalchemy.sql.elements.quoted_name` construct.
-
- :param postgresql_using: String argument which will indicate a
- SQL expression to render within the Postgresql-specific USING clause
- within ALTER COLUMN. This string is taken directly as raw SQL which
- must explicitly include any necessary quoting or escaping of tokens
- within the expression.
-
- .. versionadded:: 0.8.8
-
- """
-
- alt = cls(
- table_name, column_name, schema=schema,
- existing_type=existing_type,
- existing_server_default=existing_server_default,
- existing_nullable=existing_nullable,
- modify_name=new_column_name,
- modify_type=type_,
- modify_server_default=server_default,
- modify_nullable=nullable,
- **kw
- )
-
- return operations.invoke(alt)
-
- @classmethod
- def batch_alter_column(
- cls, operations, column_name,
- nullable=None,
- server_default=False,
- new_column_name=None,
- type_=None,
- existing_type=None,
- existing_server_default=False,
- existing_nullable=None,
- **kw
- ):
- """Issue an "alter column" instruction using the current
- batch migration context.
-
- .. seealso::
-
- :meth:`.Operations.alter_column`
-
- """
- alt = cls(
- operations.impl.table_name, column_name,
- schema=operations.impl.schema,
- existing_type=existing_type,
- existing_server_default=existing_server_default,
- existing_nullable=existing_nullable,
- modify_name=new_column_name,
- modify_type=type_,
- modify_server_default=server_default,
- modify_nullable=nullable,
- **kw
- )
-
- return operations.invoke(alt)
-
-
-@Operations.register_operation("add_column")
-@BatchOperations.register_operation("add_column", "batch_add_column")
-class AddColumnOp(AlterTableOp):
- """Represent an add column operation."""
-
- def __init__(self, table_name, column, schema=None):
- super(AddColumnOp, self).__init__(table_name, schema=schema)
- self.column = column
-
- def reverse(self):
- return DropColumnOp.from_column_and_tablename(
- self.schema, self.table_name, self.column)
-
- def to_diff_tuple(self):
- return ("add_column", self.schema, self.table_name, self.column)
-
- def to_column(self):
- return self.column
-
- @classmethod
- def from_column(cls, col):
- return cls(col.table.name, col, schema=col.table.schema)
-
- @classmethod
- def from_column_and_tablename(cls, schema, tname, col):
- return cls(tname, col, schema=schema)
-
- @classmethod
- def add_column(cls, operations, table_name, column, schema=None):
- """Issue an "add column" instruction using the current
- migration context.
-
- e.g.::
-
- from alembic import op
- from sqlalchemy import Column, String
-
- op.add_column('organization',
- Column('name', String())
- )
-
- The provided :class:`~sqlalchemy.schema.Column` object can also
- specify a :class:`~sqlalchemy.schema.ForeignKey`, referencing
- a remote table name. Alembic will automatically generate a stub
- "referenced" table and emit a second ALTER statement in order
- to add the constraint separately::
-
- from alembic import op
- from sqlalchemy import Column, INTEGER, ForeignKey
-
- op.add_column('organization',
- Column('account_id', INTEGER, ForeignKey('accounts.id'))
- )
-
- Note that this statement uses the :class:`~sqlalchemy.schema.Column`
- construct as is from the SQLAlchemy library. In particular,
- default values to be created on the database side are
- specified using the ``server_default`` parameter, and not
- ``default`` which only specifies Python-side defaults::
-
- from alembic import op
- from sqlalchemy import Column, TIMESTAMP, func
-
- # specify "DEFAULT NOW" along with the column add
- op.add_column('account',
- Column('timestamp', TIMESTAMP, server_default=func.now())
- )
-
- :param table_name: String name of the parent table.
- :param column: a :class:`sqlalchemy.schema.Column` object
- representing the new column.
- :param schema: Optional schema name to operate within. To control
- quoting of the schema outside of the default behavior, use
- the SQLAlchemy construct
- :class:`~sqlalchemy.sql.elements.quoted_name`.
-
- .. versionadded:: 0.7.0 'schema' can now accept a
- :class:`~sqlalchemy.sql.elements.quoted_name` construct.
-
-
- """
-
- op = cls(table_name, column, schema=schema)
- return operations.invoke(op)
-
- @classmethod
- def batch_add_column(cls, operations, column):
- """Issue an "add column" instruction using the current
- batch migration context.
-
- .. seealso::
-
- :meth:`.Operations.add_column`
-
- """
- op = cls(
- operations.impl.table_name, column,
- schema=operations.impl.schema
- )
- return operations.invoke(op)
-
-
-@Operations.register_operation("drop_column")
-@BatchOperations.register_operation("drop_column", "batch_drop_column")
-class DropColumnOp(AlterTableOp):
- """Represent a drop column operation."""
-
- def __init__(
- self, table_name, column_name, schema=None,
- _orig_column=None, **kw):
- super(DropColumnOp, self).__init__(table_name, schema=schema)
- self.column_name = column_name
- self.kw = kw
- self._orig_column = _orig_column
-
- def to_diff_tuple(self):
- return (
- "remove_column", self.schema, self.table_name, self.to_column())
-
- def reverse(self):
- if self._orig_column is None:
- raise ValueError(
- "operation is not reversible; "
- "original column is not present")
-
- return AddColumnOp.from_column_and_tablename(
- self.schema, self.table_name, self._orig_column)
-
- @classmethod
- def from_column_and_tablename(cls, schema, tname, col):
- return cls(tname, col.name, schema=schema, _orig_column=col)
-
- def to_column(self, migration_context=None):
- if self._orig_column is not None:
- return self._orig_column
- schema_obj = schemaobj.SchemaObjects(migration_context)
- return schema_obj.column(self.column_name, NULLTYPE)
-
- @classmethod
- def drop_column(
- cls, operations, table_name, column_name, schema=None, **kw):
- """Issue a "drop column" instruction using the current
- migration context.
-
- e.g.::
-
- drop_column('organization', 'account_id')
-
- :param table_name: name of table
- :param column_name: name of column
- :param schema: Optional schema name to operate within. To control
- quoting of the schema outside of the default behavior, use
- the SQLAlchemy construct
- :class:`~sqlalchemy.sql.elements.quoted_name`.
-
- .. versionadded:: 0.7.0 'schema' can now accept a
- :class:`~sqlalchemy.sql.elements.quoted_name` construct.
-
- :param mssql_drop_check: Optional boolean. When ``True``, on
- Microsoft SQL Server only, first
- drop the CHECK constraint on the column using a
- SQL-script-compatible
- block that selects into a @variable from sys.check_constraints,
- then exec's a separate DROP CONSTRAINT for that constraint.
- :param mssql_drop_default: Optional boolean. When ``True``, on
- Microsoft SQL Server only, first
- drop the DEFAULT constraint on the column using a
- SQL-script-compatible
- block that selects into a @variable from sys.default_constraints,
- then exec's a separate DROP CONSTRAINT for that default.
- :param mssql_drop_foreign_key: Optional boolean. When ``True``, on
- Microsoft SQL Server only, first
- drop a single FOREIGN KEY constraint on the column using a
- SQL-script-compatible
- block that selects into a @variable from
- sys.foreign_keys/sys.foreign_key_columns,
- then exec's a separate DROP CONSTRAINT for that default. Only
- works if the column has exactly one FK constraint which refers to
- it, at the moment.
-
- .. versionadded:: 0.6.2
-
- """
-
- op = cls(table_name, column_name, schema=schema, **kw)
- return operations.invoke(op)
-
- @classmethod
- def batch_drop_column(cls, operations, column_name, **kw):
- """Issue a "drop column" instruction using the current
- batch migration context.
-
- .. seealso::
-
- :meth:`.Operations.drop_column`
-
- """
- op = cls(
- operations.impl.table_name, column_name,
- schema=operations.impl.schema, **kw)
- return operations.invoke(op)
-
-
-@Operations.register_operation("bulk_insert")
-class BulkInsertOp(MigrateOperation):
- """Represent a bulk insert operation."""
-
- def __init__(self, table, rows, multiinsert=True):
- self.table = table
- self.rows = rows
- self.multiinsert = multiinsert
-
- @classmethod
- def bulk_insert(cls, operations, table, rows, multiinsert=True):
- """Issue a "bulk insert" operation using the current
- migration context.
-
- This provides a means of representing an INSERT of multiple rows
- which works equally well in the context of executing on a live
- connection as well as that of generating a SQL script. In the
- case of a SQL script, the values are rendered inline into the
- statement.
-
- e.g.::
-
- from alembic import op
- from datetime import date
- from sqlalchemy.sql import table, column
- from sqlalchemy import String, Integer, Date
-
- # Create an ad-hoc table to use for the insert statement.
- accounts_table = table('account',
- column('id', Integer),
- column('name', String),
- column('create_date', Date)
- )
-
- op.bulk_insert(accounts_table,
- [
- {'id':1, 'name':'John Smith',
- 'create_date':date(2010, 10, 5)},
- {'id':2, 'name':'Ed Williams',
- 'create_date':date(2007, 5, 27)},
- {'id':3, 'name':'Wendy Jones',
- 'create_date':date(2008, 8, 15)},
- ]
- )
-
- When using --sql mode, some datatypes may not render inline
- automatically, such as dates and other special types. When this
- issue is present, :meth:`.Operations.inline_literal` may be used::
-
- op.bulk_insert(accounts_table,
- [
- {'id':1, 'name':'John Smith',
- 'create_date':op.inline_literal("2010-10-05")},
- {'id':2, 'name':'Ed Williams',
- 'create_date':op.inline_literal("2007-05-27")},
- {'id':3, 'name':'Wendy Jones',
- 'create_date':op.inline_literal("2008-08-15")},
- ],
- multiinsert=False
- )
-
- When using :meth:`.Operations.inline_literal` in conjunction with
- :meth:`.Operations.bulk_insert`, in order for the statement to work
- in "online" (e.g. non --sql) mode, the
- :paramref:`~.Operations.bulk_insert.multiinsert`
- flag should be set to ``False``, which will have the effect of
- individual INSERT statements being emitted to the database, each
- with a distinct VALUES clause, so that the "inline" values can
- still be rendered, rather than attempting to pass the values
- as bound parameters.
-
- .. versionadded:: 0.6.4 :meth:`.Operations.inline_literal` can now
- be used with :meth:`.Operations.bulk_insert`, and the
- :paramref:`~.Operations.bulk_insert.multiinsert` flag has
- been added to assist in this usage when running in "online"
- mode.
-
- :param table: a table object which represents the target of the INSERT.
-
- :param rows: a list of dictionaries indicating rows.
-
- :param multiinsert: when at its default of True and --sql mode is not
- enabled, the INSERT statement will be executed using
- "executemany()" style, where all elements in the list of
- dictionaries are passed as bound parameters in a single
- list. Setting this to False results in individual INSERT
- statements being emitted per parameter set, and is needed
- in those cases where non-literal values are present in the
- parameter sets.
-
- .. versionadded:: 0.6.4
-
- """
-
- op = cls(table, rows, multiinsert=multiinsert)
- operations.invoke(op)
-
-
-@Operations.register_operation("execute")
-class ExecuteSQLOp(MigrateOperation):
- """Represent an execute SQL operation."""
-
- def __init__(self, sqltext, execution_options=None):
- self.sqltext = sqltext
- self.execution_options = execution_options
-
- @classmethod
- def execute(cls, operations, sqltext, execution_options=None):
- """Execute the given SQL using the current migration context.
-
- In a SQL script context, the statement is emitted directly to the
- output stream. There is *no* return result, however, as this
- function is oriented towards generating a change script
- that can run in "offline" mode. For full interaction
- with a connected database, use the "bind" available
- from the context::
-
- from alembic import op
- connection = op.get_bind()
-
- Also note that any parameterized statement here *will not work*
- in offline mode - INSERT, UPDATE and DELETE statements which refer
- to literal values would need to render
- inline expressions. For simple use cases, the
- :meth:`.inline_literal` function can be used for **rudimentary**
- quoting of string values. For "bulk" inserts, consider using
- :meth:`.bulk_insert`.
-
- For example, to emit an UPDATE statement which is equally
- compatible with both online and offline mode::
-
- from sqlalchemy.sql import table, column
- from sqlalchemy import String
- from alembic import op
-
- account = table('account',
- column('name', String)
- )
- op.execute(
- account.update().\\
- where(account.c.name==op.inline_literal('account 1')).\\
- values({'name':op.inline_literal('account 2')})
- )
-
- Note above we also used the SQLAlchemy
- :func:`sqlalchemy.sql.expression.table`
- and :func:`sqlalchemy.sql.expression.column` constructs to
- make a brief, ad-hoc table construct just for our UPDATE
- statement. A full :class:`~sqlalchemy.schema.Table` construct
- of course works perfectly fine as well, though note it's a
- recommended practice to at least ensure the definition of a
- table is self-contained within the migration script, rather
- than imported from a module that may break compatibility with
- older migrations.
-
- :param sql: Any legal SQLAlchemy expression, including:
-
- * a string
- * a :func:`sqlalchemy.sql.expression.text` construct.
- * a :func:`sqlalchemy.sql.expression.insert` construct.
- * a :func:`sqlalchemy.sql.expression.update`,
- :func:`sqlalchemy.sql.expression.insert`,
- or :func:`sqlalchemy.sql.expression.delete` construct.
- * Pretty much anything that's "executable" as described
- in :ref:`sqlexpression_toplevel`.
-
- :param execution_options: Optional dictionary of
- execution options, will be passed to
- :meth:`sqlalchemy.engine.Connection.execution_options`.
- """
- op = cls(sqltext, execution_options=execution_options)
- return operations.invoke(op)
-
-
-class OpContainer(MigrateOperation):
- """Represent a sequence of operations operation."""
- def __init__(self, ops=()):
- self.ops = ops
-
- def is_empty(self):
- return not self.ops
-
- def as_diffs(self):
- return list(OpContainer._ops_as_diffs(self))
-
- @classmethod
- def _ops_as_diffs(cls, migrations):
- for op in migrations.ops:
- if hasattr(op, 'ops'):
- for sub_op in cls._ops_as_diffs(op):
- yield sub_op
- else:
- yield op.to_diff_tuple()
-
-
-class ModifyTableOps(OpContainer):
- """Contains a sequence of operations that all apply to a single Table."""
-
- def __init__(self, table_name, ops, schema=None):
- super(ModifyTableOps, self).__init__(ops)
- self.table_name = table_name
- self.schema = schema
-
- def reverse(self):
- return ModifyTableOps(
- self.table_name,
- ops=list(reversed(
- [op.reverse() for op in self.ops]
- )),
- schema=self.schema
- )
-
-
-class UpgradeOps(OpContainer):
- """contains a sequence of operations that would apply to the
- 'upgrade' stream of a script.
-
- .. seealso::
-
- :ref:`customizing_revision`
-
- """
-
- def __init__(self, ops=(), upgrade_token="upgrades"):
- super(UpgradeOps, self).__init__(ops=ops)
- self.upgrade_token = upgrade_token
-
- def reverse_into(self, downgrade_ops):
- downgrade_ops.ops[:] = list(reversed(
- [op.reverse() for op in self.ops]
- ))
- return downgrade_ops
-
- def reverse(self):
- return self.reverse_into(DowngradeOps(ops=[]))
-
-
-class DowngradeOps(OpContainer):
- """contains a sequence of operations that would apply to the
- 'downgrade' stream of a script.
-
- .. seealso::
-
- :ref:`customizing_revision`
-
- """
-
- def __init__(self, ops=(), downgrade_token="downgrades"):
- super(DowngradeOps, self).__init__(ops=ops)
- self.downgrade_token = downgrade_token
-
- def reverse(self):
- return UpgradeOps(
- ops=list(reversed(
- [op.reverse() for op in self.ops]
- ))
- )
-
-
-class MigrationScript(MigrateOperation):
- """represents a migration script.
-
- E.g. when autogenerate encounters this object, this corresponds to the
- production of an actual script file.
-
- A normal :class:`.MigrationScript` object would contain a single
- :class:`.UpgradeOps` and a single :class:`.DowngradeOps` directive.
- These are accessible via the ``.upgrade_ops`` and ``.downgrade_ops``
- attributes.
-
- In the case of an autogenerate operation that runs multiple times,
- such as the multiple database example in the "multidb" template,
- the ``.upgrade_ops`` and ``.downgrade_ops`` attributes are disabled,
- and instead these objects should be accessed via the ``.upgrade_ops_list``
- and ``.downgrade_ops_list`` list-based attributes. These latter
- attributes are always available at the very least as single-element lists.
-
- .. versionchanged:: 0.8.1 the ``.upgrade_ops`` and ``.downgrade_ops``
- attributes should be accessed via the ``.upgrade_ops_list``
- and ``.downgrade_ops_list`` attributes if multiple autogenerate
- passes proceed on the same :class:`.MigrationScript` object.
-
- .. seealso::
-
- :ref:`customizing_revision`
-
- """
-
- def __init__(
- self, rev_id, upgrade_ops, downgrade_ops,
- message=None,
- imports=set(), head=None, splice=None,
- branch_label=None, version_path=None, depends_on=None):
- self.rev_id = rev_id
- self.message = message
- self.imports = imports
- self.head = head
- self.splice = splice
- self.branch_label = branch_label
- self.version_path = version_path
- self.depends_on = depends_on
- self.upgrade_ops = upgrade_ops
- self.downgrade_ops = downgrade_ops
-
- @property
- def upgrade_ops(self):
- """An instance of :class:`.UpgradeOps`.
-
- .. seealso::
-
- :attr:`.MigrationScript.upgrade_ops_list`
- """
- if len(self._upgrade_ops) > 1:
- raise ValueError(
- "This MigrationScript instance has a multiple-entry "
- "list for UpgradeOps; please use the "
- "upgrade_ops_list attribute.")
- elif not self._upgrade_ops:
- return None
- else:
- return self._upgrade_ops[0]
-
- @upgrade_ops.setter
- def upgrade_ops(self, upgrade_ops):
- self._upgrade_ops = util.to_list(upgrade_ops)
- for elem in self._upgrade_ops:
- assert isinstance(elem, UpgradeOps)
-
- @property
- def downgrade_ops(self):
- """An instance of :class:`.DowngradeOps`.
-
- .. seealso::
-
- :attr:`.MigrationScript.downgrade_ops_list`
- """
- if len(self._downgrade_ops) > 1:
- raise ValueError(
- "This MigrationScript instance has a multiple-entry "
- "list for DowngradeOps; please use the "
- "downgrade_ops_list attribute.")
- elif not self._downgrade_ops:
- return None
- else:
- return self._downgrade_ops[0]
-
- @downgrade_ops.setter
- def downgrade_ops(self, downgrade_ops):
- self._downgrade_ops = util.to_list(downgrade_ops)
- for elem in self._downgrade_ops:
- assert isinstance(elem, DowngradeOps)
-
- @property
- def upgrade_ops_list(self):
- """A list of :class:`.UpgradeOps` instances.
-
- This is used in place of the :attr:`.MigrationScript.upgrade_ops`
- attribute when dealing with a revision operation that does
- multiple autogenerate passes.
-
- .. versionadded:: 0.8.1
-
- """
- return self._upgrade_ops
-
- @property
- def downgrade_ops_list(self):
- """A list of :class:`.DowngradeOps` instances.
-
- This is used in place of the :attr:`.MigrationScript.downgrade_ops`
- attribute when dealing with a revision operation that does
- multiple autogenerate passes.
-
- .. versionadded:: 0.8.1
-
- """
- return self._downgrade_ops
-
diff --git a/venv/Lib/site-packages/alembic/operations/schemaobj.py b/venv/Lib/site-packages/alembic/operations/schemaobj.py
deleted file mode 100644
index a01f5be..0000000
--- a/venv/Lib/site-packages/alembic/operations/schemaobj.py
+++ /dev/null
@@ -1,159 +0,0 @@
-from sqlalchemy import schema as sa_schema
-from sqlalchemy.types import NULLTYPE, Integer
-from ..util.compat import string_types
-from .. import util
-
-
-class SchemaObjects(object):
-
- def __init__(self, migration_context=None):
- self.migration_context = migration_context
-
- def primary_key_constraint(self, name, table_name, cols, schema=None):
- m = self.metadata()
- columns = [sa_schema.Column(n, NULLTYPE) for n in cols]
- t = sa_schema.Table(
- table_name, m,
- *columns,
- schema=schema)
- p = sa_schema.PrimaryKeyConstraint(
- *[t.c[n] for n in cols], name=name)
- t.append_constraint(p)
- return p
-
- def foreign_key_constraint(
- self, name, source, referent,
- local_cols, remote_cols,
- onupdate=None, ondelete=None,
- deferrable=None, source_schema=None,
- referent_schema=None, initially=None,
- match=None, **dialect_kw):
- m = self.metadata()
- if source == referent and source_schema == referent_schema:
- t1_cols = local_cols + remote_cols
- else:
- t1_cols = local_cols
- sa_schema.Table(
- referent, m,
- *[sa_schema.Column(n, NULLTYPE) for n in remote_cols],
- schema=referent_schema)
-
- t1 = sa_schema.Table(
- source, m,
- *[sa_schema.Column(n, NULLTYPE) for n in t1_cols],
- schema=source_schema)
-
- tname = "%s.%s" % (referent_schema, referent) if referent_schema \
- else referent
-
- if util.sqla_08:
- # "match" kw unsupported in 0.7
- dialect_kw['match'] = match
-
- f = sa_schema.ForeignKeyConstraint(local_cols,
- ["%s.%s" % (tname, n)
- for n in remote_cols],
- name=name,
- onupdate=onupdate,
- ondelete=ondelete,
- deferrable=deferrable,
- initially=initially,
- **dialect_kw
- )
- t1.append_constraint(f)
-
- return f
-
- def unique_constraint(self, name, source, local_cols, schema=None, **kw):
- t = sa_schema.Table(
- source, self.metadata(),
- *[sa_schema.Column(n, NULLTYPE) for n in local_cols],
- schema=schema)
- kw['name'] = name
- uq = sa_schema.UniqueConstraint(*[t.c[n] for n in local_cols], **kw)
- # TODO: need event tests to ensure the event
- # is fired off here
- t.append_constraint(uq)
- return uq
-
- def check_constraint(self, name, source, condition, schema=None, **kw):
- t = sa_schema.Table(source, self.metadata(),
- sa_schema.Column('x', Integer), schema=schema)
- ck = sa_schema.CheckConstraint(condition, name=name, **kw)
- t.append_constraint(ck)
- return ck
-
- def generic_constraint(self, name, table_name, type_, schema=None, **kw):
- t = self.table(table_name, schema=schema)
- types = {
- 'foreignkey': lambda name: sa_schema.ForeignKeyConstraint(
- [], [], name=name),
- 'primary': sa_schema.PrimaryKeyConstraint,
- 'unique': sa_schema.UniqueConstraint,
- 'check': lambda name: sa_schema.CheckConstraint("", name=name),
- None: sa_schema.Constraint
- }
- try:
- const = types[type_]
- except KeyError:
- raise TypeError("'type' can be one of %s" %
- ", ".join(sorted(repr(x) for x in types)))
- else:
- const = const(name=name)
- t.append_constraint(const)
- return const
-
- def metadata(self):
- kw = {}
- if self.migration_context is not None and \
- 'target_metadata' in self.migration_context.opts:
- mt = self.migration_context.opts['target_metadata']
- if hasattr(mt, 'naming_convention'):
- kw['naming_convention'] = mt.naming_convention
- return sa_schema.MetaData(**kw)
-
- def table(self, name, *columns, **kw):
- m = self.metadata()
- t = sa_schema.Table(name, m, *columns, **kw)
- for f in t.foreign_keys:
- self._ensure_table_for_fk(m, f)
- return t
-
- def column(self, name, type_, **kw):
- return sa_schema.Column(name, type_, **kw)
-
- def index(self, name, tablename, columns, schema=None, **kw):
- t = sa_schema.Table(
- tablename or 'no_table', self.metadata(),
- schema=schema
- )
- idx = sa_schema.Index(
- name,
- *[util.sqla_compat._textual_index_column(t, n) for n in columns],
- **kw)
- return idx
-
- def _parse_table_key(self, table_key):
- if '.' in table_key:
- tokens = table_key.split('.')
- sname = ".".join(tokens[0:-1])
- tname = tokens[-1]
- else:
- tname = table_key
- sname = None
- return (sname, tname)
-
- def _ensure_table_for_fk(self, metadata, fk):
- """create a placeholder Table object for the referent of a
- ForeignKey.
-
- """
- if isinstance(fk._colspec, string_types):
- table_key, cname = fk._colspec.rsplit('.', 1)
- sname, tname = self._parse_table_key(table_key)
- if table_key not in metadata.tables:
- rel_t = sa_schema.Table(tname, metadata, schema=sname)
- else:
- rel_t = metadata.tables[table_key]
- if cname not in rel_t.c:
- rel_t.append_column(sa_schema.Column(cname, NULLTYPE))
diff --git a/venv/Lib/site-packages/alembic/operations/toimpl.py b/venv/Lib/site-packages/alembic/operations/toimpl.py
deleted file mode 100644
index 1327367..0000000
--- a/venv/Lib/site-packages/alembic/operations/toimpl.py
+++ /dev/null
@@ -1,162 +0,0 @@
-from . import ops
-
-from . import Operations
-from sqlalchemy import schema as sa_schema
-
-
-@Operations.implementation_for(ops.AlterColumnOp)
-def alter_column(operations, operation):
-
- compiler = operations.impl.dialect.statement_compiler(
- operations.impl.dialect,
- None
- )
-
- existing_type = operation.existing_type
- existing_nullable = operation.existing_nullable
- existing_server_default = operation.existing_server_default
- type_ = operation.modify_type
- column_name = operation.column_name
- table_name = operation.table_name
- schema = operation.schema
- server_default = operation.modify_server_default
- new_column_name = operation.modify_name
- nullable = operation.modify_nullable
-
- def _count_constraint(constraint):
- return not isinstance(
- constraint,
- sa_schema.PrimaryKeyConstraint) and \
- (not constraint._create_rule or
- constraint._create_rule(compiler))
-
- if existing_type and type_:
- t = operations.schema_obj.table(
- table_name,
- sa_schema.Column(column_name, existing_type),
- schema=schema
- )
- for constraint in t.constraints:
- if _count_constraint(constraint):
- operations.impl.drop_constraint(constraint)
-
- operations.impl.alter_column(
- table_name, column_name,
- nullable=nullable,
- server_default=server_default,
- name=new_column_name,
- type_=type_,
- schema=schema,
- existing_type=existing_type,
- existing_server_default=existing_server_default,
- existing_nullable=existing_nullable,
- **operation.kw
- )
-
- if type_:
- t = operations.schema_obj.table(
- table_name,
- operations.schema_obj.column(column_name, type_),
- schema=schema
- )
- for constraint in t.constraints:
- if _count_constraint(constraint):
- operations.impl.add_constraint(constraint)
-
-
-@Operations.implementation_for(ops.DropTableOp)
-def drop_table(operations, operation):
- operations.impl.drop_table(
- operation.to_table(operations.migration_context)
- )
-
-
-@Operations.implementation_for(ops.DropColumnOp)
-def drop_column(operations, operation):
- column = operation.to_column(operations.migration_context)
- operations.impl.drop_column(
- operation.table_name,
- column,
- schema=operation.schema,
- **operation.kw
- )
-
-
-@Operations.implementation_for(ops.CreateIndexOp)
-def create_index(operations, operation):
- idx = operation.to_index(operations.migration_context)
- operations.impl.create_index(idx)
-
-
-@Operations.implementation_for(ops.DropIndexOp)
-def drop_index(operations, operation):
- operations.impl.drop_index(
- operation.to_index(operations.migration_context)
- )
-
-
-@Operations.implementation_for(ops.CreateTableOp)
-def create_table(operations, operation):
- table = operation.to_table(operations.migration_context)
- operations.impl.create_table(table)
- return table
-
-
-@Operations.implementation_for(ops.RenameTableOp)
-def rename_table(operations, operation):
- operations.impl.rename_table(
- operation.table_name,
- operation.new_table_name,
- schema=operation.schema)
-
-
-@Operations.implementation_for(ops.AddColumnOp)
-def add_column(operations, operation):
- table_name = operation.table_name
- column = operation.column
- schema = operation.schema
-
- t = operations.schema_obj.table(table_name, column, schema=schema)
- operations.impl.add_column(
- table_name,
- column,
- schema=schema
- )
- for constraint in t.constraints:
- if not isinstance(constraint, sa_schema.PrimaryKeyConstraint):
- operations.impl.add_constraint(constraint)
- for index in t.indexes:
- operations.impl.create_index(index)
-
-
-@Operations.implementation_for(ops.AddConstraintOp)
-def create_constraint(operations, operation):
- operations.impl.add_constraint(
- operation.to_constraint(operations.migration_context)
- )
-
-
-@Operations.implementation_for(ops.DropConstraintOp)
-def drop_constraint(operations, operation):
- operations.impl.drop_constraint(
- operations.schema_obj.generic_constraint(
- operation.constraint_name,
- operation.table_name,
- operation.constraint_type,
- schema=operation.schema,
- )
- )
-
-
-@Operations.implementation_for(ops.BulkInsertOp)
-def bulk_insert(operations, operation):
- operations.impl.bulk_insert(
- operation.table, operation.rows, multiinsert=operation.multiinsert)
-
-
-@Operations.implementation_for(ops.ExecuteSQLOp)
-def execute_sql(operations, operation):
- operations.migration_context.impl.execute(
- operation.sqltext,
- execution_options=operation.execution_options
- )
diff --git a/venv/Lib/site-packages/alembic/runtime/__init__.py b/venv/Lib/site-packages/alembic/runtime/__init__.py
deleted file mode 100644
index e69de29..0000000
diff --git a/venv/Lib/site-packages/alembic/runtime/environment.py b/venv/Lib/site-packages/alembic/runtime/environment.py
deleted file mode 100644
index ce9be63..0000000
--- a/venv/Lib/site-packages/alembic/runtime/environment.py
+++ /dev/null
@@ -1,936 +0,0 @@
-from ..operations import Operations
-from .migration import MigrationContext
-from .. import util
-
-
-class EnvironmentContext(util.ModuleClsProxy):
-
- """A configurational facade made available in an ``env.py`` script.
-
- The :class:`.EnvironmentContext` acts as a *facade* to the more
- nuts-and-bolts objects of :class:`.MigrationContext` as well as certain
- aspects of :class:`.Config`,
- within the context of the ``env.py`` script that is invoked by
- most Alembic commands.
-
- :class:`.EnvironmentContext` is normally instantiated
- when a command in :mod:`alembic.command` is run. It then makes
- itself available in the ``alembic.context`` module for the scope
- of the command. From within an ``env.py`` script, the current
- :class:`.EnvironmentContext` is available by importing this module.
-
- :class:`.EnvironmentContext` also supports programmatic usage.
- At this level, it acts as a Python context manager, that is, is
- intended to be used using the
- ``with:`` statement. A typical use of :class:`.EnvironmentContext`::
-
- from alembic.config import Config
- from alembic.script import ScriptDirectory
-
- config = Config()
- config.set_main_option("script_location", "myapp:migrations")
- script = ScriptDirectory.from_config(config)
-
- def my_function(rev, context):
- '''do something with revision "rev", which
- will be the current database revision,
- and "context", which is the MigrationContext
- that the env.py will create'''
-
- with EnvironmentContext(
- config,
- script,
- fn = my_function,
- as_sql = False,
- starting_rev = 'base',
- destination_rev = 'head',
- tag = "sometag"
- ):
- script.run_env()
-
- The above script will invoke the ``env.py`` script
- within the migration environment. If and when ``env.py``
- calls :meth:`.MigrationContext.run_migrations`, the
- ``my_function()`` function above will be called
- by the :class:`.MigrationContext`, given the context
- itself as well as the current revision in the database.
-
- .. note::
-
- For most API usages other than full blown
- invocation of migration scripts, the :class:`.MigrationContext`
- and :class:`.ScriptDirectory` objects can be created and
- used directly. The :class:`.EnvironmentContext` object
- is *only* needed when you need to actually invoke the
- ``env.py`` module present in the migration environment.
-
- """
-
- _migration_context = None
-
- config = None
- """An instance of :class:`.Config` representing the
- configuration file contents as well as other variables
- set programmatically within it."""
-
- script = None
- """An instance of :class:`.ScriptDirectory` which provides
- programmatic access to version files within the ``versions/``
- directory.
-
- """
-
- def __init__(self, config, script, **kw):
- r"""Construct a new :class:`.EnvironmentContext`.
-
- :param config: a :class:`.Config` instance.
- :param script: a :class:`.ScriptDirectory` instance.
- :param \**kw: keyword options that will be ultimately
- passed along to the :class:`.MigrationContext` when
- :meth:`.EnvironmentContext.configure` is called.
-
- """
- self.config = config
- self.script = script
- self.context_opts = kw
-
- def __enter__(self):
- """Establish a context which provides a
- :class:`.EnvironmentContext` object to
- env.py scripts.
-
- The :class:`.EnvironmentContext` will
- be made available as ``from alembic import context``.
-
- """
- self._install_proxy()
- return self
-
- def __exit__(self, *arg, **kw):
- self._remove_proxy()
-
- def is_offline_mode(self):
- """Return True if the current migrations environment
- is running in "offline mode".
-
- This is ``True`` or ``False`` depending
- on the the ``--sql`` flag passed.
-
- This function does not require that the :class:`.MigrationContext`
- has been configured.
-
- """
- return self.context_opts.get('as_sql', False)
-
- def is_transactional_ddl(self):
- """Return True if the context is configured to expect a
- transactional DDL capable backend.
-
- This defaults to the type of database in use, and
- can be overridden by the ``transactional_ddl`` argument
- to :meth:`.configure`
-
- This function requires that a :class:`.MigrationContext`
- has first been made available via :meth:`.configure`.
-
- """
- return self.get_context().impl.transactional_ddl
-
- def requires_connection(self):
- return not self.is_offline_mode()
-
- def get_head_revision(self):
- """Return the hex identifier of the 'head' script revision.
-
- If the script directory has multiple heads, this
- method raises a :class:`.CommandError`;
- :meth:`.EnvironmentContext.get_head_revisions` should be preferred.
-
- This function does not require that the :class:`.MigrationContext`
- has been configured.
-
- .. seealso:: :meth:`.EnvironmentContext.get_head_revisions`
-
- """
- return self.script.as_revision_number("head")
-
- def get_head_revisions(self):
- """Return the hex identifier of the 'heads' script revision(s).
-
- This returns a tuple containing the version number of all
- heads in the script directory.
-
- This function does not require that the :class:`.MigrationContext`
- has been configured.
-
- .. versionadded:: 0.7.0
-
- """
- return self.script.as_revision_number("heads")
-
- def get_starting_revision_argument(self):
- """Return the 'starting revision' argument,
- if the revision was passed using ``start:end``.
-
- This is only meaningful in "offline" mode.
- Returns ``None`` if no value is available
- or was configured.
-
- This function does not require that the :class:`.MigrationContext`
- has been configured.
-
- """
- if self._migration_context is not None:
- return self.script.as_revision_number(
- self.get_context()._start_from_rev)
- elif 'starting_rev' in self.context_opts:
- return self.script.as_revision_number(
- self.context_opts['starting_rev'])
- else:
- # this should raise only in the case that a command
- # is being run where the "starting rev" is never applicable;
- # this is to catch scripts which rely upon this in
- # non-sql mode or similar
- raise util.CommandError(
- "No starting revision argument is available.")
-
- def get_revision_argument(self):
- """Get the 'destination' revision argument.
-
- This is typically the argument passed to the
- ``upgrade`` or ``downgrade`` command.
-
- If it was specified as ``head``, the actual
- version number is returned; if specified
- as ``base``, ``None`` is returned.
-
- This function does not require that the :class:`.MigrationContext`
- has been configured.
-
- """
- return self.script.as_revision_number(
- self.context_opts['destination_rev'])
-
- def get_tag_argument(self):
- """Return the value passed for the ``--tag`` argument, if any.
-
- The ``--tag`` argument is not used directly by Alembic,
- but is available for custom ``env.py`` configurations that
- wish to use it; particularly for offline generation scripts
- that wish to generate tagged filenames.
-
- This function does not require that the :class:`.MigrationContext`
- has been configured.
-
- .. seealso::
-
- :meth:`.EnvironmentContext.get_x_argument` - a newer and more
- open ended system of extending ``env.py`` scripts via the command
- line.
-
- """
- return self.context_opts.get('tag', None)
-
- def get_x_argument(self, as_dictionary=False):
- """Return the value(s) passed for the ``-x`` argument, if any.
-
- The ``-x`` argument is an open ended flag that allows any user-defined
- value or values to be passed on the command line, then available
- here for consumption by a custom ``env.py`` script.
-
- The return value is a list, returned directly from the ``argparse``
- structure. If ``as_dictionary=True`` is passed, the ``x`` arguments
- are parsed using ``key=value`` format into a dictionary that is
- then returned.
-
- For example, to support passing a database URL on the command line,
- the standard ``env.py`` script can be modified like this::
-
- cmd_line_url = context.get_x_argument(
- as_dictionary=True).get('dbname')
- if cmd_line_url:
- engine = create_engine(cmd_line_url)
- else:
- engine = engine_from_config(
- config.get_section(config.config_ini_section),
- prefix='sqlalchemy.',
- poolclass=pool.NullPool)
-
- This then takes effect by running the ``alembic`` script as::
-
- alembic -x dbname=postgresql://user:pass@host/dbname upgrade head
-
- This function does not require that the :class:`.MigrationContext`
- has been configured.
-
- .. versionadded:: 0.6.0
-
- .. seealso::
-
- :meth:`.EnvironmentContext.get_tag_argument`
-
- :attr:`.Config.cmd_opts`
-
- """
- if self.config.cmd_opts is not None:
- value = self.config.cmd_opts.x or []
- else:
- value = []
- if as_dictionary:
- value = dict(
- arg.split('=', 1) for arg in value
- )
- return value
-
- def configure(self,
- connection=None,
- url=None,
- dialect_name=None,
- transactional_ddl=None,
- transaction_per_migration=False,
- output_buffer=None,
- starting_rev=None,
- tag=None,
- template_args=None,
- render_as_batch=False,
- target_metadata=None,
- include_symbol=None,
- include_object=None,
- include_schemas=False,
- process_revision_directives=None,
- compare_type=False,
- compare_server_default=False,
- render_item=None,
- literal_binds=False,
- upgrade_token="upgrades",
- downgrade_token="downgrades",
- alembic_module_prefix="op.",
- sqlalchemy_module_prefix="sa.",
- user_module_prefix=None,
- on_version_apply=None,
- **kw
- ):
- """Configure a :class:`.MigrationContext` within this
- :class:`.EnvironmentContext` which will provide database
- connectivity and other configuration to a series of
- migration scripts.
-
- Many methods on :class:`.EnvironmentContext` require that
- this method has been called in order to function, as they
- ultimately need to have database access or at least access
- to the dialect in use. Those which do are documented as such.
-
- The important thing needed by :meth:`.configure` is a
- means to determine what kind of database dialect is in use.
- An actual connection to that database is needed only if
- the :class:`.MigrationContext` is to be used in
- "online" mode.
-
- If the :meth:`.is_offline_mode` function returns ``True``,
- then no connection is needed here. Otherwise, the
- ``connection`` parameter should be present as an
- instance of :class:`sqlalchemy.engine.Connection`.
-
- This function is typically called from the ``env.py``
- script within a migration environment. It can be called
- multiple times for an invocation. The most recent
- :class:`~sqlalchemy.engine.Connection`
- for which it was called is the one that will be operated upon
- by the next call to :meth:`.run_migrations`.
-
- General parameters:
-
- :param connection: a :class:`~sqlalchemy.engine.Connection`
- to use
- for SQL execution in "online" mode. When present, is also
- used to determine the type of dialect in use.
- :param url: a string database url, or a
- :class:`sqlalchemy.engine.url.URL` object.
- The type of dialect to be used will be derived from this if
- ``connection`` is not passed.
- :param dialect_name: string name of a dialect, such as
- "postgresql", "mssql", etc.
- The type of dialect to be used will be derived from this if
- ``connection`` and ``url`` are not passed.
- :param transactional_ddl: Force the usage of "transactional"
- DDL on or off;
- this otherwise defaults to whether or not the dialect in
- use supports it.
- :param transaction_per_migration: if True, nest each migration script
- in a transaction rather than the full series of migrations to
- run.
-
- .. versionadded:: 0.6.5
-
- :param output_buffer: a file-like object that will be used
- for textual output
- when the ``--sql`` option is used to generate SQL scripts.
- Defaults to
- ``sys.stdout`` if not passed here and also not present on
- the :class:`.Config`
- object. The value here overrides that of the :class:`.Config`
- object.
- :param output_encoding: when using ``--sql`` to generate SQL
- scripts, apply this encoding to the string output.
- :param literal_binds: when using ``--sql`` to generate SQL
- scripts, pass through the ``literal_binds`` flag to the compiler
- so that any literal values that would ordinarily be bound
- parameters are converted to plain strings.
-
- .. warning:: Dialects can typically only handle simple datatypes
- like strings and numbers for auto-literal generation. Datatypes
- like dates, intervals, and others may still require manual
- formatting, typically using :meth:`.Operations.inline_literal`.
-
- .. note:: the ``literal_binds`` flag is ignored on SQLAlchemy
- versions prior to 0.8 where this feature is not supported.
-
- .. versionadded:: 0.7.6
-
- .. seealso::
-
- :meth:`.Operations.inline_literal`
-
- :param starting_rev: Override the "starting revision" argument
- when using ``--sql`` mode.
- :param tag: a string tag for usage by custom ``env.py`` scripts.
- Set via the ``--tag`` option, can be overridden here.
- :param template_args: dictionary of template arguments which
- will be added to the template argument environment when
- running the "revision" command. Note that the script environment
- is only run within the "revision" command if the --autogenerate
- option is used, or if the option "revision_environment=true"
- is present in the alembic.ini file.
-
- :param version_table: The name of the Alembic version table.
- The default is ``'alembic_version'``.
- :param version_table_schema: Optional schema to place version
- table within.
- :param version_table_pk: boolean, whether the Alembic version table
- should use a primary key constraint for the "value" column; this
- only takes effect when the table is first created.
- Defaults to True; setting to False should not be necessary and is
- here for backwards compatibility reasons.
-
- .. versionadded:: 0.8.10 Added the
- :paramref:`.EnvironmentContext.configure.version_table_pk`
- flag and additionally established that the Alembic version table
- has a primary key constraint by default.
-
- :param on_version_apply: a callable or collection of callables to be
- run for each migration step.
- The callables will be run in the order they are given, once for
- each migration step, after the respective operation has been
- applied but before its transaction is finalized.
- Each callable accepts no positional arguments and the following
- keyword arguments:
-
- * ``ctx``: the :class:`.MigrationContext` running the migration,
- * ``step``: a :class:`.MigrationInfo` representing the
- step currently being applied,
- * ``heads``: a collection of version strings representing the
- current heads,
- * ``run_args``: the ``**kwargs`` passed to :meth:`.run_migrations`.
-
- .. versionadded:: 0.9.3
-
-
- Parameters specific to the autogenerate feature, when
- ``alembic revision`` is run with the ``--autogenerate`` feature:
-
- :param target_metadata: a :class:`sqlalchemy.schema.MetaData`
- object, or a sequence of :class:`~sqlalchemy.schema.MetaData`
- objects, that will be consulted during autogeneration.
- The tables present in each :class:`~sqlalchemy.schema.MetaData`
- will be compared against
- what is locally available on the target
- :class:`~sqlalchemy.engine.Connection`
- to produce candidate upgrade/downgrade operations.
-
- .. versionchanged:: 0.9.0 the
- :paramref:`.EnvironmentContext.configure.target_metadata`
- parameter may now be passed a sequence of
- :class:`~sqlalchemy.schema.MetaData` objects to support
- autogeneration of multiple :class:`~sqlalchemy.schema.MetaData`
- collections.
-
- :param compare_type: Indicates type comparison behavior during
- an autogenerate
- operation. Defaults to ``False`` which disables type
- comparison. Set to
- ``True`` to turn on default type comparison, which has varied
- accuracy depending on backend. See :ref:`compare_types`
- for an example as well as information on other type
- comparison options.
-
- .. seealso::
-
- :ref:`compare_types`
-
- :paramref:`.EnvironmentContext.configure.compare_server_default`
-
- :param compare_server_default: Indicates server default comparison
- behavior during
- an autogenerate operation. Defaults to ``False`` which disables
- server default
- comparison. Set to ``True`` to turn on server default comparison,
- which has
- varied accuracy depending on backend.
-
- To customize server default comparison behavior, a callable may
- be specified
- which can filter server default comparisons during an
- autogenerate operation.
- defaults during an autogenerate operation. The format of this
- callable is::
-
- def my_compare_server_default(context, inspected_column,
- metadata_column, inspected_default, metadata_default,
- rendered_metadata_default):
- # return True if the defaults are different,
- # False if not, or None to allow the default implementation
- # to compare these defaults
- return None
-
- context.configure(
- # ...
- compare_server_default = my_compare_server_default
- )
-
- ``inspected_column`` is a dictionary structure as returned by
- :meth:`sqlalchemy.engine.reflection.Inspector.get_columns`, whereas
- ``metadata_column`` is a :class:`sqlalchemy.schema.Column` from
- the local model environment.
-
- A return value of ``None`` indicates to allow default server default
- comparison
- to proceed. Note that some backends such as Postgresql actually
- execute
- the two defaults on the database side to compare for equivalence.
-
- .. seealso::
-
- :paramref:`.EnvironmentContext.configure.compare_type`
-
- :param include_object: A callable function which is given
- the chance to return ``True`` or ``False`` for any object,
- indicating if the given object should be considered in the
- autogenerate sweep.
-
- The function accepts the following positional arguments:
-
- * ``object``: a :class:`~sqlalchemy.schema.SchemaItem` object such
- as a :class:`~sqlalchemy.schema.Table`,
- :class:`~sqlalchemy.schema.Column`,
- :class:`~sqlalchemy.schema.Index`
- :class:`~sqlalchemy.schema.UniqueConstraint`,
- or :class:`~sqlalchemy.schema.ForeignKeyConstraint` object
- * ``name``: the name of the object. This is typically available
- via ``object.name``.
- * ``type``: a string describing the type of object; currently
- ``"table"``, ``"column"``, ``"index"``, ``"unique_constraint"``,
- or ``"foreign_key_constraint"``
-
- .. versionadded:: 0.7.0 Support for indexes and unique constraints
- within the
- :paramref:`~.EnvironmentContext.configure.include_object` hook.
-
- .. versionadded:: 0.7.1 Support for foreign keys within the
- :paramref:`~.EnvironmentContext.configure.include_object` hook.
-
- * ``reflected``: ``True`` if the given object was produced based on
- table reflection, ``False`` if it's from a local :class:`.MetaData`
- object.
- * ``compare_to``: the object being compared against, if available,
- else ``None``.
-
- E.g.::
-
- def include_object(object, name, type_, reflected, compare_to):
- if (type_ == "column" and
- not reflected and
- object.info.get("skip_autogenerate", False)):
- return False
- else:
- return True
-
- context.configure(
- # ...
- include_object = include_object
- )
-
- :paramref:`.EnvironmentContext.configure.include_object` can also
- be used to filter on specific schemas to include or omit, when
- the :paramref:`.EnvironmentContext.configure.include_schemas`
- flag is set to ``True``. The :attr:`.Table.schema` attribute
- on each :class:`.Table` object reflected will indicate the name of the
- schema from which the :class:`.Table` originates.
-
- .. versionadded:: 0.6.0
-
- .. seealso::
-
- :paramref:`.EnvironmentContext.configure.include_schemas`
-
- :param include_symbol: A callable function which, given a table name
- and schema name (may be ``None``), returns ``True`` or ``False``,
- indicating if the given table should be considered in the
- autogenerate sweep.
-
- .. deprecated:: 0.6.0
- :paramref:`.EnvironmentContext.configure.include_symbol`
- is superceded by the more generic
- :paramref:`.EnvironmentContext.configure.include_object`
- parameter.
-
- E.g.::
-
- def include_symbol(tablename, schema):
- return tablename not in ("skip_table_one", "skip_table_two")
-
- context.configure(
- # ...
- include_symbol = include_symbol
- )
-
- .. seealso::
-
- :paramref:`.EnvironmentContext.configure.include_schemas`
-
- :paramref:`.EnvironmentContext.configure.include_object`
-
- :param render_as_batch: if True, commands which alter elements
- within a table will be placed under a ``with batch_alter_table():``
- directive, so that batch migrations will take place.
-
- .. versionadded:: 0.7.0
-
- .. seealso::
-
- :ref:`batch_migrations`
-
- :param include_schemas: If True, autogenerate will scan across
- all schemas located by the SQLAlchemy
- :meth:`~sqlalchemy.engine.reflection.Inspector.get_schema_names`
- method, and include all differences in tables found across all
- those schemas. When using this option, you may want to also
- use the :paramref:`.EnvironmentContext.configure.include_object`
- option to specify a callable which
- can filter the tables/schemas that get included.
-
- .. seealso::
-
- :paramref:`.EnvironmentContext.configure.include_object`
-
- :param render_item: Callable that can be used to override how
- any schema item, i.e. column, constraint, type,
- etc., is rendered for autogenerate. The callable receives a
- string describing the type of object, the object, and
- the autogen context. If it returns False, the
- default rendering method will be used. If it returns None,
- the item will not be rendered in the context of a Table
- construct, that is, can be used to skip columns or constraints
- within op.create_table()::
-
- def my_render_column(type_, col, autogen_context):
- if type_ == "column" and isinstance(col, MySpecialCol):
- return repr(col)
- else:
- return False
-
- context.configure(
- # ...
- render_item = my_render_column
- )
-
- Available values for the type string include: ``"column"``,
- ``"primary_key"``, ``"foreign_key"``, ``"unique"``, ``"check"``,
- ``"type"``, ``"server_default"``.
-
- .. seealso::
-
- :ref:`autogen_render_types`
-
- :param upgrade_token: When autogenerate completes, the text of the
- candidate upgrade operations will be present in this template
- variable when ``script.py.mako`` is rendered. Defaults to
- ``upgrades``.
- :param downgrade_token: When autogenerate completes, the text of the
- candidate downgrade operations will be present in this
- template variable when ``script.py.mako`` is rendered. Defaults to
- ``downgrades``.
-
- :param alembic_module_prefix: When autogenerate refers to Alembic
- :mod:`alembic.operations` constructs, this prefix will be used
- (i.e. ``op.create_table``) Defaults to "``op.``".
- Can be ``None`` to indicate no prefix.
-
- :param sqlalchemy_module_prefix: When autogenerate refers to
- SQLAlchemy
- :class:`~sqlalchemy.schema.Column` or type classes, this prefix
- will be used
- (i.e. ``sa.Column("somename", sa.Integer)``) Defaults to "``sa.``".
- Can be ``None`` to indicate no prefix.
- Note that when dialect-specific types are rendered, autogenerate
- will render them using the dialect module name, i.e. ``mssql.BIT()``,
- ``postgresql.UUID()``.
-
- :param user_module_prefix: When autogenerate refers to a SQLAlchemy
- type (e.g. :class:`.TypeEngine`) where the module name is not
- under the ``sqlalchemy`` namespace, this prefix will be used
- within autogenerate. If left at its default of
- ``None``, the ``__module__`` attribute of the type is used to
- render the import module. It's a good practice to set this
- and to have all custom types be available from a fixed module space,
- in order to future-proof migration files against reorganizations
- in modules.
-
- .. versionchanged:: 0.7.0
- :paramref:`.EnvironmentContext.configure.user_module_prefix`
- no longer defaults to the value of
- :paramref:`.EnvironmentContext.configure.sqlalchemy_module_prefix`
- when left at ``None``; the ``__module__`` attribute is now used.
-
- .. versionadded:: 0.6.3 added
- :paramref:`.EnvironmentContext.configure.user_module_prefix`
-
- .. seealso::
-
- :ref:`autogen_module_prefix`
-
- :param process_revision_directives: a callable function that will
- be passed a structure representing the end result of an autogenerate
- or plain "revision" operation, which can be manipulated to affect
- how the ``alembic revision`` command ultimately outputs new
- revision scripts. The structure of the callable is::
-
- def process_revision_directives(context, revision, directives):
- pass
-
- The ``directives`` parameter is a Python list containing
- a single :class:`.MigrationScript` directive, which represents
- the revision file to be generated. This list as well as its
- contents may be freely modified to produce any set of commands.
- The section :ref:`customizing_revision` shows an example of
- doing this. The ``context`` parameter is the
- :class:`.MigrationContext` in use,
- and ``revision`` is a tuple of revision identifiers representing the
- current revision of the database.
-
- The callable is invoked at all times when the ``--autogenerate``
- option is passed to ``alembic revision``. If ``--autogenerate``
- is not passed, the callable is invoked only if the
- ``revision_environment`` variable is set to True in the Alembic
- configuration, in which case the given ``directives`` collection
- will contain empty :class:`.UpgradeOps` and :class:`.DowngradeOps`
- collections for ``.upgrade_ops`` and ``.downgrade_ops``. The
- ``--autogenerate`` option itself can be inferred by inspecting
- ``context.config.cmd_opts.autogenerate``.
-
- The callable function may optionally be an instance of
- a :class:`.Rewriter` object. This is a helper object that
- assists in the production of autogenerate-stream rewriter functions.
-
-
- .. versionadded:: 0.8.0
-
- .. versionchanged:: 0.8.1 - The
- :paramref:`.EnvironmentContext.configure.process_revision_directives`
- hook can append op directives into :class:`.UpgradeOps` and
- :class:`.DowngradeOps` which will be rendered in Python regardless
- of whether the ``--autogenerate`` option is in use or not;
- the ``revision_environment`` configuration variable should be
- set to "true" in the config to enable this.
-
-
- .. seealso::
-
- :ref:`customizing_revision`
-
- :ref:`autogen_rewriter`
-
- :paramref:`.command.revision.process_revision_directives`
-
- Parameters specific to individual backends:
-
- :param mssql_batch_separator: The "batch separator" which will
- be placed between each statement when generating offline SQL Server
- migrations. Defaults to ``GO``. Note this is in addition to the
- customary semicolon ``;`` at the end of each statement; SQL Server
- considers the "batch separator" to denote the end of an
- individual statement execution, and cannot group certain
- dependent operations in one step.
- :param oracle_batch_separator: The "batch separator" which will
- be placed between each statement when generating offline
- Oracle migrations. Defaults to ``/``. Oracle doesn't add a
- semicolon between statements like most other backends.
-
- """
- opts = self.context_opts
- if transactional_ddl is not None:
- opts["transactional_ddl"] = transactional_ddl
- if output_buffer is not None:
- opts["output_buffer"] = output_buffer
- elif self.config.output_buffer is not None:
- opts["output_buffer"] = self.config.output_buffer
- if starting_rev:
- opts['starting_rev'] = starting_rev
- if tag:
- opts['tag'] = tag
- if template_args and 'template_args' in opts:
- opts['template_args'].update(template_args)
- opts["transaction_per_migration"] = transaction_per_migration
- opts['target_metadata'] = target_metadata
- opts['include_symbol'] = include_symbol
- opts['include_object'] = include_object
- opts['include_schemas'] = include_schemas
- opts['render_as_batch'] = render_as_batch
- opts['upgrade_token'] = upgrade_token
- opts['downgrade_token'] = downgrade_token
- opts['sqlalchemy_module_prefix'] = sqlalchemy_module_prefix
- opts['alembic_module_prefix'] = alembic_module_prefix
- opts['user_module_prefix'] = user_module_prefix
- opts['literal_binds'] = literal_binds
- opts['process_revision_directives'] = process_revision_directives
- opts['on_version_apply'] = util.to_tuple(on_version_apply, default=())
-
- if render_item is not None:
- opts['render_item'] = render_item
- if compare_type is not None:
- opts['compare_type'] = compare_type
- if compare_server_default is not None:
- opts['compare_server_default'] = compare_server_default
- opts['script'] = self.script
-
- opts.update(kw)
-
- self._migration_context = MigrationContext.configure(
- connection=connection,
- url=url,
- dialect_name=dialect_name,
- environment_context=self,
- opts=opts
- )
-
- def run_migrations(self, **kw):
- """Run migrations as determined by the current command line
- configuration
- as well as versioning information present (or not) in the current
- database connection (if one is present).
-
- The function accepts optional ``**kw`` arguments. If these are
- passed, they are sent directly to the ``upgrade()`` and
- ``downgrade()``
- functions within each target revision file. By modifying the
- ``script.py.mako`` file so that the ``upgrade()`` and ``downgrade()``
- functions accept arguments, parameters can be passed here so that
- contextual information, usually information to identify a particular
- database in use, can be passed from a custom ``env.py`` script
- to the migration functions.
-
- This function requires that a :class:`.MigrationContext` has
- first been made available via :meth:`.configure`.
-
- """
- with Operations.context(self._migration_context):
- self.get_context().run_migrations(**kw)
-
- def execute(self, sql, execution_options=None):
- """Execute the given SQL using the current change context.
-
- The behavior of :meth:`.execute` is the same
- as that of :meth:`.Operations.execute`. Please see that
- function's documentation for full detail including
- caveats and limitations.
-
- This function requires that a :class:`.MigrationContext` has
- first been made available via :meth:`.configure`.
-
- """
- self.get_context().execute(sql,
- execution_options=execution_options)
-
- def static_output(self, text):
- """Emit text directly to the "offline" SQL stream.
-
- Typically this is for emitting comments that
- start with --. The statement is not treated
- as a SQL execution, no ; or batch separator
- is added, etc.
-
- """
- self.get_context().impl.static_output(text)
-
- def begin_transaction(self):
- """Return a context manager that will
- enclose an operation within a "transaction",
- as defined by the environment's offline
- and transactional DDL settings.
-
- e.g.::
-
- with context.begin_transaction():
- context.run_migrations()
-
- :meth:`.begin_transaction` is intended to
- "do the right thing" regardless of
- calling context:
-
- * If :meth:`.is_transactional_ddl` is ``False``,
- returns a "do nothing" context manager
- which otherwise produces no transactional
- state or directives.
- * If :meth:`.is_offline_mode` is ``True``,
- returns a context manager that will
- invoke the :meth:`.DefaultImpl.emit_begin`
- and :meth:`.DefaultImpl.emit_commit`
- methods, which will produce the string
- directives ``BEGIN`` and ``COMMIT`` on
- the output stream, as rendered by the
- target backend (e.g. SQL Server would
- emit ``BEGIN TRANSACTION``).
- * Otherwise, calls :meth:`sqlalchemy.engine.Connection.begin`
- on the current online connection, which
- returns a :class:`sqlalchemy.engine.Transaction`
- object. This object demarcates a real
- transaction and is itself a context manager,
- which will roll back if an exception
- is raised.
-
- Note that a custom ``env.py`` script which
- has more specific transactional needs can of course
- manipulate the :class:`~sqlalchemy.engine.Connection`
- directly to produce transactional state in "online"
- mode.
-
- """
-
- return self.get_context().begin_transaction()
-
- def get_context(self):
- """Return the current :class:`.MigrationContext` object.
-
- If :meth:`.EnvironmentContext.configure` has not been
- called yet, raises an exception.
-
- """
-
- if self._migration_context is None:
- raise Exception("No context has been configured yet.")
- return self._migration_context
-
- def get_bind(self):
- """Return the current 'bind'.
-
- In "online" mode, this is the
- :class:`sqlalchemy.engine.Connection` currently being used
- to emit SQL to the database.
-
- This function requires that a :class:`.MigrationContext`
- has first been made available via :meth:`.configure`.
-
- """
- return self.get_context().bind
-
- def get_impl(self):
- return self.get_context().impl
diff --git a/venv/Lib/site-packages/alembic/runtime/migration.py b/venv/Lib/site-packages/alembic/runtime/migration.py
deleted file mode 100644
index 17cc226..0000000
--- a/venv/Lib/site-packages/alembic/runtime/migration.py
+++ /dev/null
@@ -1,989 +0,0 @@
-import logging
-import sys
-from contextlib import contextmanager
-
-from sqlalchemy import MetaData, Table, Column, String, literal_column,\
- PrimaryKeyConstraint
-from sqlalchemy.engine.strategies import MockEngineStrategy
-from sqlalchemy.engine import url as sqla_url
-from sqlalchemy.engine import Connection
-
-from ..util.compat import callable, EncodedIO
-from .. import ddl, util
-
-log = logging.getLogger(__name__)
-
-
-class MigrationContext(object):
-
- """Represent the database state made available to a migration
- script.
-
- :class:`.MigrationContext` is the front end to an actual
- database connection, or alternatively a string output
- stream given a particular database dialect,
- from an Alembic perspective.
-
- When inside the ``env.py`` script, the :class:`.MigrationContext`
- is available via the
- :meth:`.EnvironmentContext.get_context` method,
- which is available at ``alembic.context``::
-
- # from within env.py script
- from alembic import context
- migration_context = context.get_context()
-
- For usage outside of an ``env.py`` script, such as for
- utility routines that want to check the current version
- in the database, the :meth:`.MigrationContext.configure`
- method to create new :class:`.MigrationContext` objects.
- For example, to get at the current revision in the
- database using :meth:`.MigrationContext.get_current_revision`::
-
- # in any application, outside of an env.py script
- from alembic.migration import MigrationContext
- from sqlalchemy import create_engine
-
- engine = create_engine("postgresql://mydatabase")
- conn = engine.connect()
-
- context = MigrationContext.configure(conn)
- current_rev = context.get_current_revision()
-
- The above context can also be used to produce
- Alembic migration operations with an :class:`.Operations`
- instance::
-
- # in any application, outside of the normal Alembic environment
- from alembic.operations import Operations
- op = Operations(context)
- op.alter_column("mytable", "somecolumn", nullable=True)
-
- """
-
- def __init__(self, dialect, connection, opts, environment_context=None):
- self.environment_context = environment_context
- self.opts = opts
- self.dialect = dialect
- self.script = opts.get('script')
- as_sql = opts.get('as_sql', False)
- transactional_ddl = opts.get("transactional_ddl")
- self._transaction_per_migration = opts.get(
- "transaction_per_migration", False)
- self.on_version_apply_callbacks = opts.get('on_version_apply', ())
-
- if as_sql:
- self.connection = self._stdout_connection(connection)
- assert self.connection is not None
- else:
- self.connection = connection
- self._migrations_fn = opts.get('fn')
- self.as_sql = as_sql
-
- if "output_encoding" in opts:
- self.output_buffer = EncodedIO(
- opts.get("output_buffer") or sys.stdout,
- opts['output_encoding']
- )
- else:
- self.output_buffer = opts.get("output_buffer", sys.stdout)
-
- self._user_compare_type = opts.get('compare_type', False)
- self._user_compare_server_default = opts.get(
- 'compare_server_default',
- False)
- self.version_table = version_table = opts.get(
- 'version_table', 'alembic_version')
- self.version_table_schema = version_table_schema = \
- opts.get('version_table_schema', None)
- self._version = Table(
- version_table, MetaData(),
- Column('version_num', String(32), nullable=False),
- schema=version_table_schema)
- if opts.get("version_table_pk", True):
- self._version.append_constraint(
- PrimaryKeyConstraint(
- 'version_num', name="%s_pkc" % version_table
- )
- )
-
- self._start_from_rev = opts.get("starting_rev")
- self.impl = ddl.DefaultImpl.get_by_dialect(dialect)(
- dialect, self.connection, self.as_sql,
- transactional_ddl,
- self.output_buffer,
- opts
- )
- log.info("Context impl %s.", self.impl.__class__.__name__)
- if self.as_sql:
- log.info("Generating static SQL")
- log.info("Will assume %s DDL.",
- "transactional" if self.impl.transactional_ddl
- else "non-transactional")
-
- @classmethod
- def configure(cls,
- connection=None,
- url=None,
- dialect_name=None,
- dialect=None,
- environment_context=None,
- opts=None,
- ):
- """Create a new :class:`.MigrationContext`.
-
- This is a factory method usually called
- by :meth:`.EnvironmentContext.configure`.
-
- :param connection: a :class:`~sqlalchemy.engine.Connection`
- to use for SQL execution in "online" mode. When present,
- is also used to determine the type of dialect in use.
- :param url: a string database url, or a
- :class:`sqlalchemy.engine.url.URL` object.
- The type of dialect to be used will be derived from this if
- ``connection`` is not passed.
- :param dialect_name: string name of a dialect, such as
- "postgresql", "mssql", etc. The type of dialect to be used will be
- derived from this if ``connection`` and ``url`` are not passed.
- :param opts: dictionary of options. Most other options
- accepted by :meth:`.EnvironmentContext.configure` are passed via
- this dictionary.
-
- """
- if opts is None:
- opts = {}
-
- if connection:
- if not isinstance(connection, Connection):
- util.warn(
- "'connection' argument to configure() is expected "
- "to be a sqlalchemy.engine.Connection instance, "
- "got %r" % connection)
- dialect = connection.dialect
- elif url:
- url = sqla_url.make_url(url)
- dialect = url.get_dialect()()
- elif dialect_name:
- url = sqla_url.make_url("%s://" % dialect_name)
- dialect = url.get_dialect()()
- elif not dialect:
- raise Exception("Connection, url, or dialect_name is required.")
-
- return MigrationContext(dialect, connection, opts, environment_context)
-
- def begin_transaction(self, _per_migration=False):
- transaction_now = _per_migration == self._transaction_per_migration
-
- if not transaction_now:
- @contextmanager
- def do_nothing():
- yield
- return do_nothing()
-
- elif not self.impl.transactional_ddl:
- @contextmanager
- def do_nothing():
- yield
- return do_nothing()
- elif self.as_sql:
- @contextmanager
- def begin_commit():
- self.impl.emit_begin()
- yield
- self.impl.emit_commit()
- return begin_commit()
- else:
- return self.bind.begin()
-
- def get_current_revision(self):
- """Return the current revision, usually that which is present
- in the ``alembic_version`` table in the database.
-
- This method intends to be used only for a migration stream that
- does not contain unmerged branches in the target database;
- if there are multiple branches present, an exception is raised.
- The :meth:`.MigrationContext.get_current_heads` should be preferred
- over this method going forward in order to be compatible with
- branch migration support.
-
- If this :class:`.MigrationContext` was configured in "offline"
- mode, that is with ``as_sql=True``, the ``starting_rev``
- parameter is returned instead, if any.
-
- """
- heads = self.get_current_heads()
- if len(heads) == 0:
- return None
- elif len(heads) > 1:
- raise util.CommandError(
- "Version table '%s' has more than one head present; "
- "please use get_current_heads()" % self.version_table)
- else:
- return heads[0]
-
- def get_current_heads(self):
- """Return a tuple of the current 'head versions' that are represented
- in the target database.
-
- For a migration stream without branches, this will be a single
- value, synonymous with that of
- :meth:`.MigrationContext.get_current_revision`. However when multiple
- unmerged branches exist within the target database, the returned tuple
- will contain a value for each head.
-
- If this :class:`.MigrationContext` was configured in "offline"
- mode, that is with ``as_sql=True``, the ``starting_rev``
- parameter is returned in a one-length tuple.
-
- If no version table is present, or if there are no revisions
- present, an empty tuple is returned.
-
- .. versionadded:: 0.7.0
-
- """
- if self.as_sql:
- start_from_rev = self._start_from_rev
- if start_from_rev == 'base':
- start_from_rev = None
- elif start_from_rev is not None and self.script:
- start_from_rev = \
- self.script.get_revision(start_from_rev).revision
-
- return util.to_tuple(start_from_rev, default=())
- else:
- if self._start_from_rev:
- raise util.CommandError(
- "Can't specify current_rev to context "
- "when using a database connection")
- if not self._has_version_table():
- return ()
- return tuple(
- row[0] for row in self.connection.execute(self._version.select())
- )
-
- def _ensure_version_table(self):
- self._version.create(self.connection, checkfirst=True)
-
- def _has_version_table(self):
- return self.connection.dialect.has_table(
- self.connection, self.version_table, self.version_table_schema)
-
- def stamp(self, script_directory, revision):
- """Stamp the version table with a specific revision.
-
- This method calculates those branches to which the given revision
- can apply, and updates those branches as though they were migrated
- towards that revision (either up or down). If no current branches
- include the revision, it is added as a new branch head.
-
- .. versionadded:: 0.7.0
-
- """
- heads = self.get_current_heads()
- if not self.as_sql and not heads:
- self._ensure_version_table()
- head_maintainer = HeadMaintainer(self, heads)
- for step in script_directory._stamp_revs(revision, heads):
- head_maintainer.update_to_step(step)
-
- def run_migrations(self, **kw):
- r"""Run the migration scripts established for this
- :class:`.MigrationContext`, if any.
-
- The commands in :mod:`alembic.command` will set up a function
- that is ultimately passed to the :class:`.MigrationContext`
- as the ``fn`` argument. This function represents the "work"
- that will be done when :meth:`.MigrationContext.run_migrations`
- is called, typically from within the ``env.py`` script of the
- migration environment. The "work function" then provides an iterable
- of version callables and other version information which
- in the case of the ``upgrade`` or ``downgrade`` commands are the
- list of version scripts to invoke. Other commands yield nothing,
- in the case that a command wants to run some other operation
- against the database such as the ``current`` or ``stamp`` commands.
-
- :param \**kw: keyword arguments here will be passed to each
- migration callable, that is the ``upgrade()`` or ``downgrade()``
- method within revision scripts.
-
- """
- self.impl.start_migrations()
-
- heads = self.get_current_heads()
- if not self.as_sql and not heads:
- self._ensure_version_table()
-
- head_maintainer = HeadMaintainer(self, heads)
-
- starting_in_transaction = not self.as_sql and \
- self._in_connection_transaction()
-
- for step in self._migrations_fn(heads, self):
- with self.begin_transaction(_per_migration=True):
- if self.as_sql and not head_maintainer.heads:
- # for offline mode, include a CREATE TABLE from
- # the base
- self._version.create(self.connection)
- log.info("Running %s", step)
- if self.as_sql:
- self.impl.static_output("-- Running %s" % (step.short_log,))
- step.migration_fn(**kw)
-
- # previously, we wouldn't stamp per migration
- # if we were in a transaction, however given the more
- # complex model that involves any number of inserts
- # and row-targeted updates and deletes, it's simpler for now
- # just to run the operations on every version
- head_maintainer.update_to_step(step)
- for callback in self.on_version_apply_callbacks:
- callback(ctx=self,
- step=step.info,
- heads=set(head_maintainer.heads),
- run_args=kw)
-
- if not starting_in_transaction and not self.as_sql and \
- not self.impl.transactional_ddl and \
- self._in_connection_transaction():
- raise util.CommandError(
- "Migration \"%s\" has left an uncommitted "
- "transaction opened; transactional_ddl is False so "
- "Alembic is not committing transactions"
- % step)
-
- if self.as_sql and not head_maintainer.heads:
- self._version.drop(self.connection)
-
- def _in_connection_transaction(self):
- try:
- meth = self.connection.in_transaction
- except AttributeError:
- return False
- else:
- return meth()
-
- def execute(self, sql, execution_options=None):
- """Execute a SQL construct or string statement.
-
- The underlying execution mechanics are used, that is
- if this is "offline mode" the SQL is written to the
- output buffer, otherwise the SQL is emitted on
- the current SQLAlchemy connection.
-
- """
- self.impl._exec(sql, execution_options)
-
- def _stdout_connection(self, connection):
- def dump(construct, *multiparams, **params):
- self.impl._exec(construct)
-
- return MockEngineStrategy.MockConnection(self.dialect, dump)
-
- @property
- def bind(self):
- """Return the current "bind".
-
- In online mode, this is an instance of
- :class:`sqlalchemy.engine.Connection`, and is suitable
- for ad-hoc execution of any kind of usage described
- in :ref:`sqlexpression_toplevel` as well as
- for usage with the :meth:`sqlalchemy.schema.Table.create`
- and :meth:`sqlalchemy.schema.MetaData.create_all` methods
- of :class:`~sqlalchemy.schema.Table`,
- :class:`~sqlalchemy.schema.MetaData`.
-
- Note that when "standard output" mode is enabled,
- this bind will be a "mock" connection handler that cannot
- return results and is only appropriate for a very limited
- subset of commands.
-
- """
- return self.connection
-
- @property
- def config(self):
- """Return the :class:`.Config` used by the current environment, if any.
-
- .. versionadded:: 0.6.6
-
- """
- if self.environment_context:
- return self.environment_context.config
- else:
- return None
-
- def _compare_type(self, inspector_column, metadata_column):
- if self._user_compare_type is False:
- return False
-
- if callable(self._user_compare_type):
- user_value = self._user_compare_type(
- self,
- inspector_column,
- metadata_column,
- inspector_column.type,
- metadata_column.type
- )
- if user_value is not None:
- return user_value
-
- return self.impl.compare_type(
- inspector_column,
- metadata_column)
-
- def _compare_server_default(self, inspector_column,
- metadata_column,
- rendered_metadata_default,
- rendered_column_default):
-
- if self._user_compare_server_default is False:
- return False
-
- if callable(self._user_compare_server_default):
- user_value = self._user_compare_server_default(
- self,
- inspector_column,
- metadata_column,
- rendered_column_default,
- metadata_column.server_default,
- rendered_metadata_default
- )
- if user_value is not None:
- return user_value
-
- return self.impl.compare_server_default(
- inspector_column,
- metadata_column,
- rendered_metadata_default,
- rendered_column_default)
-
-
-class HeadMaintainer(object):
- def __init__(self, context, heads):
- self.context = context
- self.heads = set(heads)
-
- def _insert_version(self, version):
- assert version not in self.heads
- self.heads.add(version)
-
- self.context.impl._exec(
- self.context._version.insert().
- values(
- version_num=literal_column("'%s'" % version)
- )
- )
-
- def _delete_version(self, version):
- self.heads.remove(version)
-
- ret = self.context.impl._exec(
- self.context._version.delete().where(
- self.context._version.c.version_num ==
- literal_column("'%s'" % version)))
- if not self.context.as_sql and ret.rowcount != 1:
- raise util.CommandError(
- "Online migration expected to match one "
- "row when deleting '%s' in '%s'; "
- "%d found"
- % (version,
- self.context.version_table, ret.rowcount))
-
- def _update_version(self, from_, to_):
- assert to_ not in self.heads
- self.heads.remove(from_)
- self.heads.add(to_)
-
- ret = self.context.impl._exec(
- self.context._version.update().
- values(version_num=literal_column("'%s'" % to_)).where(
- self.context._version.c.version_num
- == literal_column("'%s'" % from_))
- )
- if not self.context.as_sql and ret.rowcount != 1:
- raise util.CommandError(
- "Online migration expected to match one "
- "row when updating '%s' to '%s' in '%s'; "
- "%d found"
- % (from_, to_, self.context.version_table, ret.rowcount))
-
- def update_to_step(self, step):
- if step.should_delete_branch(self.heads):
- vers = step.delete_version_num
- log.debug("branch delete %s", vers)
- self._delete_version(vers)
- elif step.should_create_branch(self.heads):
- vers = step.insert_version_num
- log.debug("new branch insert %s", vers)
- self._insert_version(vers)
- elif step.should_merge_branches(self.heads):
- # delete revs, update from rev, update to rev
- (delete_revs, update_from_rev,
- update_to_rev) = step.merge_branch_idents(self.heads)
- log.debug(
- "merge, delete %s, update %s to %s",
- delete_revs, update_from_rev, update_to_rev)
- for delrev in delete_revs:
- self._delete_version(delrev)
- self._update_version(update_from_rev, update_to_rev)
- elif step.should_unmerge_branches(self.heads):
- (update_from_rev, update_to_rev,
- insert_revs) = step.unmerge_branch_idents(self.heads)
- log.debug(
- "unmerge, insert %s, update %s to %s",
- insert_revs, update_from_rev, update_to_rev)
- for insrev in insert_revs:
- self._insert_version(insrev)
- self._update_version(update_from_rev, update_to_rev)
- else:
- from_, to_ = step.update_version_num(self.heads)
- log.debug("update %s to %s", from_, to_)
- self._update_version(from_, to_)
-
-
-class MigrationInfo(object):
- """Exposes information about a migration step to a callback listener.
-
- The :class:`.MigrationInfo` object is available exclusively for the
- benefit of the :paramref:`.EnvironmentContext.on_version_apply`
- callback hook.
-
- .. versionadded:: 0.9.3
-
- """
-
- is_upgrade = None
- """True/False: indicates whether this operation ascends or descends the
- version tree."""
-
- is_stamp = None
- """True/False: indicates whether this operation is a stamp (i.e. whether
- it results in any actual database operations)."""
-
- up_revision_id = None
- """Version string corresponding to :attr:`.Revision.revision`.
-
- In the case of a stamp operation, it is advised to use the
- :attr:`.MigrationInfo.up_revision_ids` tuple as a stamp operation can
- make a single movement from one or more branches down to a single
- branchpoint, in which case there will be multiple "up" revisions.
-
- .. seealso::
-
- :attr:`.MigrationInfo.up_revision_ids`
-
- """
-
- up_revision_ids = None
- """Tuple of version strings corresponding to :attr:`.Revision.revision`.
-
- In the majority of cases, this tuple will be a single value, synonomous
- with the scalar value of :attr:`.MigrationInfo.up_revision_id`.
- It can be multiple revision identifiers only in the case of an
- ``alembic stamp`` operation which is moving downwards from multiple
- branches down to their common branch point.
-
- .. versionadded:: 0.9.4
-
- """
-
- down_revision_ids = None
- """Tuple of strings representing the base revisions of this migration step.
-
- If empty, this represents a root revision; otherwise, the first item
- corresponds to :attr:`.Revision.down_revision`, and the rest are inferred
- from dependencies.
- """
-
- revision_map = None
- """The revision map inside of which this operation occurs."""
-
- def __init__(self, revision_map, is_upgrade, is_stamp, up_revisions,
- down_revisions):
- self.revision_map = revision_map
- self.is_upgrade = is_upgrade
- self.is_stamp = is_stamp
- self.up_revision_ids = util.to_tuple(up_revisions, default=())
- if self.up_revision_ids:
- self.up_revision_id = self.up_revision_ids[0]
- else:
- # this should never be the case with
- # "upgrade", "downgrade", or "stamp" as we are always
- # measuring movement in terms of at least one upgrade version
- self.up_revision_id = None
- self.down_revision_ids = util.to_tuple(down_revisions, default=())
-
- @property
- def is_migration(self):
- """True/False: indicates whether this operation is a migration.
-
- At present this is true if and only the migration is not a stamp.
- If other operation types are added in the future, both this attribute
- and :attr:`~.MigrationInfo.is_stamp` will be false.
- """
- return not self.is_stamp
-
- @property
- def source_revision_ids(self):
- """Active revisions before this migration step is applied."""
- return self.down_revision_ids if self.is_upgrade \
- else self.up_revision_ids
-
- @property
- def destination_revision_ids(self):
- """Active revisions after this migration step is applied."""
- return self.up_revision_ids if self.is_upgrade \
- else self.down_revision_ids
-
- @property
- def up_revision(self):
- """Get :attr:`~.MigrationInfo.up_revision_id` as a :class:`.Revision`."""
- return self.revision_map.get_revision(self.up_revision_id)
-
- @property
- def up_revisions(self):
- """Get :attr:`~.MigrationInfo.up_revision_ids` as a :class:`.Revision`.
-
- .. versionadded:: 0.9.4
-
- """
- return self.revision_map.get_revisions(self.up_revision_ids)
-
- @property
- def down_revisions(self):
- """Get :attr:`~.MigrationInfo.down_revision_ids` as a tuple of
- :class:`Revisions <.Revision>`."""
- return self.revision_map.get_revisions(self.down_revision_ids)
-
- @property
- def source_revisions(self):
- """Get :attr:`~MigrationInfo.source_revision_ids` as a tuple of
- :class:`Revisions <.Revision>`."""
- return self.revision_map.get_revisions(self.source_revision_ids)
-
- @property
- def destination_revisions(self):
- """Get :attr:`~MigrationInfo.destination_revision_ids` as a tuple of
- :class:`Revisions <.Revision>`."""
- return self.revision_map.get_revisions(self.destination_revision_ids)
-
-
-class MigrationStep(object):
- @property
- def name(self):
- return self.migration_fn.__name__
-
- @classmethod
- def upgrade_from_script(cls, revision_map, script):
- return RevisionStep(revision_map, script, True)
-
- @classmethod
- def downgrade_from_script(cls, revision_map, script):
- return RevisionStep(revision_map, script, False)
-
- @property
- def is_downgrade(self):
- return not self.is_upgrade
-
- @property
- def short_log(self):
- return "%s %s -> %s" % (
- self.name,
- util.format_as_comma(self.from_revisions_no_deps),
- util.format_as_comma(self.to_revisions_no_deps)
- )
-
- def __str__(self):
- if self.doc:
- return "%s %s -> %s, %s" % (
- self.name,
- util.format_as_comma(self.from_revisions_no_deps),
- util.format_as_comma(self.to_revisions_no_deps),
- self.doc
- )
- else:
- return self.short_log
-
-
-class RevisionStep(MigrationStep):
- def __init__(self, revision_map, revision, is_upgrade):
- self.revision_map = revision_map
- self.revision = revision
- self.is_upgrade = is_upgrade
- if is_upgrade:
- self.migration_fn = revision.module.upgrade
- else:
- self.migration_fn = revision.module.downgrade
-
- def __repr__(self):
- return "RevisionStep(%r, is_upgrade=%r)" % (
- self.revision.revision, self.is_upgrade
- )
-
- def __eq__(self, other):
- return isinstance(other, RevisionStep) and \
- other.revision == self.revision and \
- self.is_upgrade == other.is_upgrade
-
- @property
- def doc(self):
- return self.revision.doc
-
- @property
- def from_revisions(self):
- if self.is_upgrade:
- return self.revision._all_down_revisions
- else:
- return (self.revision.revision, )
-
- @property
- def from_revisions_no_deps(self):
- if self.is_upgrade:
- return self.revision._versioned_down_revisions
- else:
- return (self.revision.revision, )
-
- @property
- def to_revisions(self):
- if self.is_upgrade:
- return (self.revision.revision, )
- else:
- return self.revision._all_down_revisions
-
- @property
- def to_revisions_no_deps(self):
- if self.is_upgrade:
- return (self.revision.revision, )
- else:
- return self.revision._versioned_down_revisions
-
- @property
- def _has_scalar_down_revision(self):
- return len(self.revision._all_down_revisions) == 1
-
- def should_delete_branch(self, heads):
- """A delete is when we are a. in a downgrade and b.
- we are going to the "base" or we are going to a version that
- is implied as a dependency on another version that is remaining.
-
- """
- if not self.is_downgrade:
- return False
-
- if self.revision.revision not in heads:
- return False
-
- downrevs = self.revision._all_down_revisions
-
- if not downrevs:
- # is a base
- return True
- else:
- # determine what the ultimate "to_revisions" for an
- # unmerge would be. If there are none, then we're a delete.
- to_revisions = self._unmerge_to_revisions(heads)
- return not to_revisions
-
- def merge_branch_idents(self, heads):
- other_heads = set(heads).difference(self.from_revisions)
-
- if other_heads:
- ancestors = set(
- r.revision for r in
- self.revision_map._get_ancestor_nodes(
- self.revision_map.get_revisions(other_heads),
- check=False
- )
- )
- from_revisions = list(
- set(self.from_revisions).difference(ancestors))
- else:
- from_revisions = list(self.from_revisions)
-
- return (
- # delete revs, update from rev, update to rev
- list(from_revisions[0:-1]), from_revisions[-1],
- self.to_revisions[0]
- )
-
- def _unmerge_to_revisions(self, heads):
- other_heads = set(heads).difference([self.revision.revision])
- if other_heads:
- ancestors = set(
- r.revision for r in
- self.revision_map._get_ancestor_nodes(
- self.revision_map.get_revisions(other_heads),
- check=False
- )
- )
- return list(set(self.to_revisions).difference(ancestors))
- else:
- return self.to_revisions
-
- def unmerge_branch_idents(self, heads):
- to_revisions = self._unmerge_to_revisions(heads)
-
- return (
- # update from rev, update to rev, insert revs
- self.from_revisions[0], to_revisions[-1],
- to_revisions[0:-1]
- )
-
- def should_create_branch(self, heads):
- if not self.is_upgrade:
- return False
-
- downrevs = self.revision._all_down_revisions
-
- if not downrevs:
- # is a base
- return True
- else:
- # none of our downrevs are present, so...
- # we have to insert our version. This is true whether
- # or not there is only one downrev, or multiple (in the latter
- # case, we're a merge point.)
- if not heads.intersection(downrevs):
- return True
- else:
- return False
-
- def should_merge_branches(self, heads):
- if not self.is_upgrade:
- return False
-
- downrevs = self.revision._all_down_revisions
-
- if len(downrevs) > 1 and \
- len(heads.intersection(downrevs)) > 1:
- return True
-
- return False
-
- def should_unmerge_branches(self, heads):
- if not self.is_downgrade:
- return False
-
- downrevs = self.revision._all_down_revisions
-
- if self.revision.revision in heads and len(downrevs) > 1:
- return True
-
- return False
-
- def update_version_num(self, heads):
- if not self._has_scalar_down_revision:
- downrev = heads.intersection(self.revision._all_down_revisions)
- assert len(downrev) == 1, \
- "Can't do an UPDATE because downrevision is ambiguous"
- down_revision = list(downrev)[0]
- else:
- down_revision = self.revision._all_down_revisions[0]
-
- if self.is_upgrade:
- return down_revision, self.revision.revision
- else:
- return self.revision.revision, down_revision
-
- @property
- def delete_version_num(self):
- return self.revision.revision
-
- @property
- def insert_version_num(self):
- return self.revision.revision
-
- @property
- def info(self):
- return MigrationInfo(revision_map=self.revision_map,
- up_revisions=self.revision.revision,
- down_revisions=self.revision._all_down_revisions,
- is_upgrade=self.is_upgrade, is_stamp=False)
-
-
-class StampStep(MigrationStep):
- def __init__(self, from_, to_, is_upgrade, branch_move, revision_map=None):
- self.from_ = util.to_tuple(from_, default=())
- self.to_ = util.to_tuple(to_, default=())
- self.is_upgrade = is_upgrade
- self.branch_move = branch_move
- self.migration_fn = self.stamp_revision
- self.revision_map = revision_map
-
- doc = None
-
- def stamp_revision(self, **kw):
- return None
-
- def __eq__(self, other):
- return isinstance(other, StampStep) and \
- other.from_revisions == self.revisions and \
- other.to_revisions == self.to_revisions and \
- other.branch_move == self.branch_move and \
- self.is_upgrade == other.is_upgrade
-
- @property
- def from_revisions(self):
- return self.from_
-
- @property
- def to_revisions(self):
- return self.to_
-
- @property
- def from_revisions_no_deps(self):
- return self.from_
-
- @property
- def to_revisions_no_deps(self):
- return self.to_
-
- @property
- def delete_version_num(self):
- assert len(self.from_) == 1
- return self.from_[0]
-
- @property
- def insert_version_num(self):
- assert len(self.to_) == 1
- return self.to_[0]
-
- def update_version_num(self, heads):
- assert len(self.from_) == 1
- assert len(self.to_) == 1
- return self.from_[0], self.to_[0]
-
- def merge_branch_idents(self, heads):
- return (
- # delete revs, update from rev, update to rev
- list(self.from_[0:-1]), self.from_[-1],
- self.to_[0]
- )
-
- def unmerge_branch_idents(self, heads):
- return (
- # update from rev, update to rev, insert revs
- self.from_[0], self.to_[-1],
- list(self.to_[0:-1])
- )
-
- def should_delete_branch(self, heads):
- return self.is_downgrade and self.branch_move
-
- def should_create_branch(self, heads):
- return self.is_upgrade and self.branch_move
-
- def should_merge_branches(self, heads):
- return len(self.from_) > 1
-
- def should_unmerge_branches(self, heads):
- return len(self.to_) > 1
-
- @property
- def info(self):
- up, down = (self.to_, self.from_) if self.is_upgrade \
- else (self.from_, self.to_)
- return MigrationInfo(revision_map=self.revision_map,
- up_revisions=up,
- down_revisions=down,
- is_upgrade=self.is_upgrade,
- is_stamp=True)
diff --git a/venv/Lib/site-packages/alembic/script/__init__.py b/venv/Lib/site-packages/alembic/script/__init__.py
deleted file mode 100644
index cae294f..0000000
--- a/venv/Lib/site-packages/alembic/script/__init__.py
+++ /dev/null
@@ -1,3 +0,0 @@
-from .base import ScriptDirectory, Script # noqa
-
-__all__ = ['ScriptDirectory', 'Script']
diff --git a/venv/Lib/site-packages/alembic/script/base.py b/venv/Lib/site-packages/alembic/script/base.py
deleted file mode 100644
index 42dd469..0000000
--- a/venv/Lib/site-packages/alembic/script/base.py
+++ /dev/null
@@ -1,823 +0,0 @@
-import datetime
-from dateutil import tz
-import os
-import re
-import shutil
-from .. import util
-from ..util import compat
-from . import revision
-from ..runtime import migration
-
-from contextlib import contextmanager
-
-_sourceless_rev_file = re.compile(r'(?!\.\#|__init__)(.*\.py)(c|o)?$')
-_only_source_rev_file = re.compile(r'(?!\.\#|__init__)(.*\.py)$')
-_legacy_rev = re.compile(r'([a-f0-9]+)\.py$')
-_mod_def_re = re.compile(r'(upgrade|downgrade)_([a-z0-9]+)')
-_slug_re = re.compile(r'\w+')
-_default_file_template = "%(rev)s_%(slug)s"
-_split_on_space_comma = re.compile(r',|(?: +)')
-
-
-class ScriptDirectory(object):
-
- """Provides operations upon an Alembic script directory.
-
- This object is useful to get information as to current revisions,
- most notably being able to get at the "head" revision, for schemes
- that want to test if the current revision in the database is the most
- recent::
-
- from alembic.script import ScriptDirectory
- from alembic.config import Config
- config = Config()
- config.set_main_option("script_location", "myapp:migrations")
- script = ScriptDirectory.from_config(config)
-
- head_revision = script.get_current_head()
-
-
-
- """
-
- def __init__(self, dir, file_template=_default_file_template,
- truncate_slug_length=40,
- version_locations=None,
- sourceless=False, output_encoding="utf-8",
- timezone=None):
- self.dir = dir
- self.file_template = file_template
- self.version_locations = version_locations
- self.truncate_slug_length = truncate_slug_length or 40
- self.sourceless = sourceless
- self.output_encoding = output_encoding
- self.revision_map = revision.RevisionMap(self._load_revisions)
- self.timezone = timezone
-
- if not os.access(dir, os.F_OK):
- raise util.CommandError("Path doesn't exist: %r. Please use "
- "the 'init' command to create a new "
- "scripts folder." % dir)
-
- @property
- def versions(self):
- loc = self._version_locations
- if len(loc) > 1:
- raise util.CommandError("Multiple version_locations present")
- else:
- return loc[0]
-
- @util.memoized_property
- def _version_locations(self):
- if self.version_locations:
- return [
- os.path.abspath(util.coerce_resource_to_filename(location))
- for location in self.version_locations
- ]
- else:
- return (os.path.abspath(os.path.join(self.dir, 'versions')),)
-
- def _load_revisions(self):
- if self.version_locations:
- paths = [
- vers for vers in self._version_locations
- if os.path.exists(vers)]
- else:
- paths = [self.versions]
-
- dupes = set()
- for vers in paths:
- for file_ in Script._list_py_dir(self, vers):
- path = os.path.realpath(os.path.join(vers, file_))
- if path in dupes:
- util.warn(
- "File %s loaded twice! ignoring. Please ensure "
- "version_locations is unique." % path
- )
- continue
- dupes.add(path)
- script = Script._from_filename(self, vers, file_)
- if script is None:
- continue
- yield script
-
- @classmethod
- def from_config(cls, config):
- """Produce a new :class:`.ScriptDirectory` given a :class:`.Config`
- instance.
-
- The :class:`.Config` need only have the ``script_location`` key
- present.
-
- """
- script_location = config.get_main_option('script_location')
- if script_location is None:
- raise util.CommandError("No 'script_location' key "
- "found in configuration.")
- truncate_slug_length = config.get_main_option("truncate_slug_length")
- if truncate_slug_length is not None:
- truncate_slug_length = int(truncate_slug_length)
-
- version_locations = config.get_main_option("version_locations")
- if version_locations:
- version_locations = _split_on_space_comma.split(version_locations)
-
- return ScriptDirectory(
- util.coerce_resource_to_filename(script_location),
- file_template=config.get_main_option(
- 'file_template',
- _default_file_template),
- truncate_slug_length=truncate_slug_length,
- sourceless=config.get_main_option("sourceless") == "true",
- output_encoding=config.get_main_option("output_encoding", "utf-8"),
- version_locations=version_locations,
- timezone=config.get_main_option("timezone")
- )
-
- @contextmanager
- def _catch_revision_errors(
- self,
- ancestor=None, multiple_heads=None, start=None, end=None,
- resolution=None):
- try:
- yield
- except revision.RangeNotAncestorError as rna:
- if start is None:
- start = rna.lower
- if end is None:
- end = rna.upper
- if not ancestor:
- ancestor = (
- "Requested range %(start)s:%(end)s does not refer to "
- "ancestor/descendant revisions along the same branch"
- )
- ancestor = ancestor % {"start": start, "end": end}
- compat.raise_from_cause(util.CommandError(ancestor))
- except revision.MultipleHeads as mh:
- if not multiple_heads:
- multiple_heads = (
- "Multiple head revisions are present for given "
- "argument '%(head_arg)s'; please "
- "specify a specific target revision, "
- "'@%(head_arg)s' to "
- "narrow to a specific head, or 'heads' for all heads")
- multiple_heads = multiple_heads % {
- "head_arg": end or mh.argument,
- "heads": util.format_as_comma(mh.heads)
- }
- compat.raise_from_cause(util.CommandError(multiple_heads))
- except revision.ResolutionError as re:
- if resolution is None:
- resolution = "Can't locate revision identified by '%s'" % (
- re.argument
- )
- compat.raise_from_cause(util.CommandError(resolution))
- except revision.RevisionError as err:
- compat.raise_from_cause(util.CommandError(err.args[0]))
-
- def walk_revisions(self, base="base", head="heads"):
- """Iterate through all revisions.
-
- :param base: the base revision, or "base" to start from the
- empty revision.
-
- :param head: the head revision; defaults to "heads" to indicate
- all head revisions. May also be "head" to indicate a single
- head revision.
-
- .. versionchanged:: 0.7.0 the "head" identifier now refers to
- the head of a non-branched repository only; use "heads" to
- refer to the set of all head branches simultaneously.
-
- """
- with self._catch_revision_errors(start=base, end=head):
- for rev in self.revision_map.iterate_revisions(
- head, base, inclusive=True, assert_relative_length=False):
- yield rev
-
- def get_revisions(self, id_):
- """Return the :class:`.Script` instance with the given rev identifier,
- symbolic name, or sequence of identifiers.
-
- .. versionadded:: 0.7.0
-
- """
- with self._catch_revision_errors():
- return self.revision_map.get_revisions(id_)
-
- def get_all_current(self, id_):
- with self._catch_revision_errors():
- top_revs = set(self.revision_map.get_revisions(id_))
- top_revs.update(
- self.revision_map._get_ancestor_nodes(
- list(top_revs), include_dependencies=True)
- )
- top_revs = self.revision_map._filter_into_branch_heads(top_revs)
- return top_revs
-
- def get_revision(self, id_):
- """Return the :class:`.Script` instance with the given rev id.
-
- .. seealso::
-
- :meth:`.ScriptDirectory.get_revisions`
-
- """
-
- with self._catch_revision_errors():
- return self.revision_map.get_revision(id_)
-
- def as_revision_number(self, id_):
- """Convert a symbolic revision, i.e. 'head' or 'base', into
- an actual revision number."""
-
- with self._catch_revision_errors():
- rev, branch_name = self.revision_map._resolve_revision_number(id_)
-
- if not rev:
- # convert () to None
- return None
- elif id_ == "heads":
- return rev
- else:
- return rev[0]
-
- def iterate_revisions(self, upper, lower):
- """Iterate through script revisions, starting at the given
- upper revision identifier and ending at the lower.
-
- The traversal uses strictly the `down_revision`
- marker inside each migration script, so
- it is a requirement that upper >= lower,
- else you'll get nothing back.
-
- The iterator yields :class:`.Script` objects.
-
- .. seealso::
-
- :meth:`.RevisionMap.iterate_revisions`
-
- """
- return self.revision_map.iterate_revisions(upper, lower)
-
- def get_current_head(self):
- """Return the current head revision.
-
- If the script directory has multiple heads
- due to branching, an error is raised;
- :meth:`.ScriptDirectory.get_heads` should be
- preferred.
-
- :return: a string revision number.
-
- .. seealso::
-
- :meth:`.ScriptDirectory.get_heads`
-
- """
- with self._catch_revision_errors(multiple_heads=(
- 'The script directory has multiple heads (due to branching).'
- 'Please use get_heads(), or merge the branches using '
- 'alembic merge.'
- )):
- return self.revision_map.get_current_head()
-
- def get_heads(self):
- """Return all "versioned head" revisions as strings.
-
- This is normally a list of length one,
- unless branches are present. The
- :meth:`.ScriptDirectory.get_current_head()` method
- can be used normally when a script directory
- has only one head.
-
- :return: a tuple of string revision numbers.
- """
- return list(self.revision_map.heads)
-
- def get_base(self):
- """Return the "base" revision as a string.
-
- This is the revision number of the script that
- has a ``down_revision`` of None.
-
- If the script directory has multiple bases, an error is raised;
- :meth:`.ScriptDirectory.get_bases` should be
- preferred.
-
- """
- bases = self.get_bases()
- if len(bases) > 1:
- raise util.CommandError(
- "The script directory has multiple bases. "
- "Please use get_bases().")
- elif bases:
- return bases[0]
- else:
- return None
-
- def get_bases(self):
- """return all "base" revisions as strings.
-
- This is the revision number of all scripts that
- have a ``down_revision`` of None.
-
- .. versionadded:: 0.7.0
-
- """
- return list(self.revision_map.bases)
-
- def _upgrade_revs(self, destination, current_rev):
- with self._catch_revision_errors(
- ancestor="Destination %(end)s is not a valid upgrade "
- "target from current head(s)", end=destination):
- revs = self.revision_map.iterate_revisions(
- destination, current_rev, implicit_base=True)
- revs = list(revs)
- return [
- migration.MigrationStep.upgrade_from_script(
- self.revision_map, script)
- for script in reversed(list(revs))
- ]
-
- def _downgrade_revs(self, destination, current_rev):
- with self._catch_revision_errors(
- ancestor="Destination %(end)s is not a valid downgrade "
- "target from current head(s)", end=destination):
- revs = self.revision_map.iterate_revisions(
- current_rev, destination, select_for_downgrade=True)
- return [
- migration.MigrationStep.downgrade_from_script(
- self.revision_map, script)
- for script in revs
- ]
-
- def _stamp_revs(self, revision, heads):
- with self._catch_revision_errors(
- multiple_heads="Multiple heads are present; please specify a "
- "single target revision"):
-
- heads = self.get_revisions(heads)
-
- # filter for lineage will resolve things like
- # branchname@base, version@base, etc.
- filtered_heads = self.revision_map.filter_for_lineage(
- heads, revision, include_dependencies=True)
-
- steps = []
-
- dests = self.get_revisions(revision) or [None]
- for dest in dests:
- if dest is None:
- # dest is 'base'. Return a "delete branch" migration
- # for all applicable heads.
- steps.extend([
- migration.StampStep(head.revision, None, False, True,
- self.revision_map)
- for head in filtered_heads
- ])
- continue
- elif dest in filtered_heads:
- # the dest is already in the version table, do nothing.
- continue
-
- # figure out if the dest is a descendant or an
- # ancestor of the selected nodes
- descendants = set(
- self.revision_map._get_descendant_nodes([dest]))
- ancestors = set(self.revision_map._get_ancestor_nodes([dest]))
-
- if descendants.intersection(filtered_heads):
- # heads are above the target, so this is a downgrade.
- # we can treat them as a "merge", single step.
- assert not ancestors.intersection(filtered_heads)
- todo_heads = [head.revision for head in filtered_heads]
- step = migration.StampStep(
- todo_heads, dest.revision, False, False,
- self.revision_map)
- steps.append(step)
- continue
- elif ancestors.intersection(filtered_heads):
- # heads are below the target, so this is an upgrade.
- # we can treat them as a "merge", single step.
- todo_heads = [head.revision for head in filtered_heads]
- step = migration.StampStep(
- todo_heads, dest.revision, True, False,
- self.revision_map)
- steps.append(step)
- continue
- else:
- # destination is in a branch not represented,
- # treat it as new branch
- step = migration.StampStep((), dest.revision, True, True,
- self.revision_map)
- steps.append(step)
- continue
- return steps
-
- def run_env(self):
- """Run the script environment.
-
- This basically runs the ``env.py`` script present
- in the migration environment. It is called exclusively
- by the command functions in :mod:`alembic.command`.
-
-
- """
- util.load_python_file(self.dir, 'env.py')
-
- @property
- def env_py_location(self):
- return os.path.abspath(os.path.join(self.dir, "env.py"))
-
- def _generate_template(self, src, dest, **kw):
- util.status("Generating %s" % os.path.abspath(dest),
- util.template_to_file,
- src,
- dest,
- self.output_encoding,
- **kw
- )
-
- def _copy_file(self, src, dest):
- util.status("Generating %s" % os.path.abspath(dest),
- shutil.copy,
- src, dest)
-
- def _ensure_directory(self, path):
- path = os.path.abspath(path)
- if not os.path.exists(path):
- util.status(
- "Creating directory %s" % path,
- os.makedirs, path)
-
- def _generate_create_date(self):
- if self.timezone is not None:
- # First, assume correct capitalization
- tzinfo = tz.gettz(self.timezone)
- if tzinfo is None:
- # Fall back to uppercase
- tzinfo = tz.gettz(self.timezone.upper())
- if tzinfo is None:
- raise util.CommandError(
- "Can't locate timezone: %s" % self.timezone)
- create_date = datetime.datetime.utcnow().replace(
- tzinfo=tz.tzutc()).astimezone(tzinfo)
- else:
- create_date = datetime.datetime.now()
- return create_date
-
- def generate_revision(
- self, revid, message, head=None,
- refresh=False, splice=False, branch_labels=None,
- version_path=None, depends_on=None, **kw):
- """Generate a new revision file.
-
- This runs the ``script.py.mako`` template, given
- template arguments, and creates a new file.
-
- :param revid: String revision id. Typically this
- comes from ``alembic.util.rev_id()``.
- :param message: the revision message, the one passed
- by the -m argument to the ``revision`` command.
- :param head: the head revision to generate against. Defaults
- to the current "head" if no branches are present, else raises
- an exception.
-
- .. versionadded:: 0.7.0
-
- :param splice: if True, allow the "head" version to not be an
- actual head; otherwise, the selected head must be a head
- (e.g. endpoint) revision.
- :param refresh: deprecated.
-
- """
- if head is None:
- head = "head"
-
- try:
- Script.verify_rev_id(revid)
- except revision.RevisionError as err:
- compat.raise_from_cause(util.CommandError(err.args[0]))
-
- with self._catch_revision_errors(multiple_heads=(
- "Multiple heads are present; please specify the head "
- "revision on which the new revision should be based, "
- "or perform a merge."
- )):
- heads = self.revision_map.get_revisions(head)
-
- if len(set(heads)) != len(heads):
- raise util.CommandError("Duplicate head revisions specified")
-
- create_date = self._generate_create_date()
-
- if version_path is None:
- if len(self._version_locations) > 1:
- for head in heads:
- if head is not None:
- version_path = os.path.dirname(head.path)
- break
- else:
- raise util.CommandError(
- "Multiple version locations present, "
- "please specify --version-path")
- else:
- version_path = self.versions
-
- norm_path = os.path.normpath(os.path.abspath(version_path))
- for vers_path in self._version_locations:
- if os.path.normpath(vers_path) == norm_path:
- break
- else:
- raise util.CommandError(
- "Path %s is not represented in current "
- "version locations" % version_path)
-
- if self.version_locations:
- self._ensure_directory(version_path)
-
- path = self._rev_path(version_path, revid, message, create_date)
-
- if not splice:
- for head in heads:
- if head is not None and not head.is_head:
- raise util.CommandError(
- "Revision %s is not a head revision; please specify "
- "--splice to create a new branch from this revision"
- % head.revision)
-
- if depends_on:
- with self._catch_revision_errors():
- depends_on = [
- dep
- if dep in rev.branch_labels # maintain branch labels
- else rev.revision # resolve partial revision identifiers
- for rev, dep in [
- (self.revision_map.get_revision(dep), dep)
- for dep in util.to_list(depends_on)
- ]
-
- ]
-
- self._generate_template(
- os.path.join(self.dir, "script.py.mako"),
- path,
- up_revision=str(revid),
- down_revision=revision.tuple_rev_as_scalar(
- tuple(h.revision if h is not None else None for h in heads)),
- branch_labels=util.to_tuple(branch_labels),
- depends_on=revision.tuple_rev_as_scalar(depends_on),
- create_date=create_date,
- comma=util.format_as_comma,
- message=message if message is not None else ("empty message"),
- **kw
- )
- try:
- script = Script._from_path(self, path)
- except revision.RevisionError as err:
- compat.raise_from_cause(util.CommandError(err.args[0]))
- if branch_labels and not script.branch_labels:
- raise util.CommandError(
- "Version %s specified branch_labels %s, however the "
- "migration file %s does not have them; have you upgraded "
- "your script.py.mako to include the "
- "'branch_labels' section?" % (
- script.revision, branch_labels, script.path
- ))
-
- self.revision_map.add_revision(script)
- return script
-
- def _rev_path(self, path, rev_id, message, create_date):
- slug = "_".join(_slug_re.findall(message or "")).lower()
- if len(slug) > self.truncate_slug_length:
- slug = slug[:self.truncate_slug_length].rsplit('_', 1)[0] + '_'
- filename = "%s.py" % (
- self.file_template % {
- 'rev': rev_id,
- 'slug': slug,
- 'year': create_date.year,
- 'month': create_date.month,
- 'day': create_date.day,
- 'hour': create_date.hour,
- 'minute': create_date.minute,
- 'second': create_date.second
- }
- )
- return os.path.join(path, filename)
-
-
-class Script(revision.Revision):
-
- """Represent a single revision file in a ``versions/`` directory.
-
- The :class:`.Script` instance is returned by methods
- such as :meth:`.ScriptDirectory.iterate_revisions`.
-
- """
-
- def __init__(self, module, rev_id, path):
- self.module = module
- self.path = path
- super(Script, self).__init__(
- rev_id,
- module.down_revision,
- branch_labels=util.to_tuple(
- getattr(module, 'branch_labels', None), default=()),
- dependencies=util.to_tuple(
- getattr(module, 'depends_on', None), default=())
- )
-
- module = None
- """The Python module representing the actual script itself."""
-
- path = None
- """Filesystem path of the script."""
-
- @property
- def doc(self):
- """Return the docstring given in the script."""
-
- return re.split("\n\n", self.longdoc)[0]
-
- @property
- def longdoc(self):
- """Return the docstring given in the script."""
-
- doc = self.module.__doc__
- if doc:
- if hasattr(self.module, "_alembic_source_encoding"):
- doc = doc.decode(self.module._alembic_source_encoding)
- return doc.strip()
- else:
- return ""
-
- @property
- def log_entry(self):
- entry = "Rev: %s%s%s%s\n" % (
- self.revision,
- " (head)" if self.is_head else "",
- " (branchpoint)" if self.is_branch_point else "",
- " (mergepoint)" if self.is_merge_point else "",
- )
- if self.is_merge_point:
- entry += "Merges: %s\n" % (self._format_down_revision(), )
- else:
- entry += "Parent: %s\n" % (self._format_down_revision(), )
-
- if self.dependencies:
- entry += "Also depends on: %s\n" % (
- util.format_as_comma(self.dependencies))
-
- if self.is_branch_point:
- entry += "Branches into: %s\n" % (
- util.format_as_comma(self.nextrev))
-
- if self.branch_labels:
- entry += "Branch names: %s\n" % (
- util.format_as_comma(self.branch_labels), )
-
- entry += "Path: %s\n" % (self.path,)
-
- entry += "\n%s\n" % (
- "\n".join(
- " %s" % para
- for para in self.longdoc.splitlines()
- )
- )
- return entry
-
- def __str__(self):
- return "%s -> %s%s%s%s, %s" % (
- self._format_down_revision(),
- self.revision,
- " (head)" if self.is_head else "",
- " (branchpoint)" if self.is_branch_point else "",
- " (mergepoint)" if self.is_merge_point else "",
- self.doc)
-
- def _head_only(
- self, include_branches=False, include_doc=False,
- include_parents=False, tree_indicators=True,
- head_indicators=True):
- text = self.revision
- if include_parents:
- if self.dependencies:
- text = "%s (%s) -> %s" % (
- self._format_down_revision(),
- util.format_as_comma(self.dependencies),
- text
- )
- else:
- text = "%s -> %s" % (
- self._format_down_revision(), text)
- if include_branches and self.branch_labels:
- text += " (%s)" % util.format_as_comma(self.branch_labels)
- if head_indicators or tree_indicators:
- text += "%s%s" % (
- " (head)" if self._is_real_head else "",
- " (effective head)" if self.is_head and
- not self._is_real_head else ""
- )
- if tree_indicators:
- text += "%s%s" % (
- " (branchpoint)" if self.is_branch_point else "",
- " (mergepoint)" if self.is_merge_point else ""
- )
- if include_doc:
- text += ", %s" % self.doc
- return text
-
- def cmd_format(
- self,
- verbose,
- include_branches=False, include_doc=False,
- include_parents=False, tree_indicators=True):
- if verbose:
- return self.log_entry
- else:
- return self._head_only(
- include_branches, include_doc,
- include_parents, tree_indicators)
-
- def _format_down_revision(self):
- if not self.down_revision:
- return ""
- else:
- return util.format_as_comma(self._versioned_down_revisions)
-
- @classmethod
- def _from_path(cls, scriptdir, path):
- dir_, filename = os.path.split(path)
- return cls._from_filename(scriptdir, dir_, filename)
-
- @classmethod
- def _list_py_dir(cls, scriptdir, path):
- if scriptdir.sourceless:
- # read files in version path, e.g. pyc or pyo files
- # in the immediate path
- paths = os.listdir(path)
-
- names = set(fname.split(".")[0] for fname in paths)
-
- # look for __pycache__
- if os.path.exists(os.path.join(path, '__pycache__')):
- # add all files from __pycache__ whose filename is not
- # already in the names we got from the version directory.
- # add as relative paths including __pycache__ token
- paths.extend(
- os.path.join('__pycache__', pyc)
- for pyc in os.listdir(os.path.join(path, '__pycache__'))
- if pyc.split(".")[0] not in names
- )
- return paths
- else:
- return os.listdir(path)
-
- @classmethod
- def _from_filename(cls, scriptdir, dir_, filename):
- if scriptdir.sourceless:
- py_match = _sourceless_rev_file.match(filename)
- else:
- py_match = _only_source_rev_file.match(filename)
-
- if not py_match:
- return None
-
- py_filename = py_match.group(1)
-
- if scriptdir.sourceless:
- is_c = py_match.group(2) == 'c'
- is_o = py_match.group(2) == 'o'
- else:
- is_c = is_o = False
-
- if is_o or is_c:
- py_exists = os.path.exists(os.path.join(dir_, py_filename))
- pyc_exists = os.path.exists(os.path.join(dir_, py_filename + "c"))
-
- # prefer .py over .pyc because we'd like to get the
- # source encoding; prefer .pyc over .pyo because we'd like to
- # have the docstrings which a -OO file would not have
- if py_exists or is_o and pyc_exists:
- return None
-
- module = util.load_python_file(dir_, filename)
-
- if not hasattr(module, "revision"):
- # attempt to get the revision id from the script name,
- # this for legacy only
- m = _legacy_rev.match(filename)
- if not m:
- raise util.CommandError(
- "Could not determine revision id from filename %s. "
- "Be sure the 'revision' variable is "
- "declared inside the script (please see 'Upgrading "
- "from Alembic 0.1 to 0.2' in the documentation)."
- % filename)
- else:
- revision = m.group(1)
- else:
- revision = module.revision
- return Script(module, revision, os.path.join(dir_, filename))
diff --git a/venv/Lib/site-packages/alembic/script/revision.py b/venv/Lib/site-packages/alembic/script/revision.py
deleted file mode 100644
index 3d9a332..0000000
--- a/venv/Lib/site-packages/alembic/script/revision.py
+++ /dev/null
@@ -1,942 +0,0 @@
-import re
-import collections
-
-from .. import util
-from sqlalchemy import util as sqlautil
-from ..util import compat
-
-_relative_destination = re.compile(r'(?:(.+?)@)?(\w+)?((?:\+|-)\d+)')
-_revision_illegal_chars = ['@', '-', '+']
-
-
-class RevisionError(Exception):
- pass
-
-
-class RangeNotAncestorError(RevisionError):
- def __init__(self, lower, upper):
- self.lower = lower
- self.upper = upper
- super(RangeNotAncestorError, self).__init__(
- "Revision %s is not an ancestor of revision %s" %
- (lower or "base", upper or "base")
- )
-
-
-class MultipleHeads(RevisionError):
- def __init__(self, heads, argument):
- self.heads = heads
- self.argument = argument
- super(MultipleHeads, self).__init__(
- "Multiple heads are present for given argument '%s'; "
- "%s" % (argument, ", ".join(heads))
- )
-
-
-class ResolutionError(RevisionError):
- def __init__(self, message, argument):
- super(ResolutionError, self).__init__(message)
- self.argument = argument
-
-
-class RevisionMap(object):
- """Maintains a map of :class:`.Revision` objects.
-
- :class:`.RevisionMap` is used by :class:`.ScriptDirectory` to maintain
- and traverse the collection of :class:`.Script` objects, which are
- themselves instances of :class:`.Revision`.
-
- """
-
- def __init__(self, generator):
- """Construct a new :class:`.RevisionMap`.
-
- :param generator: a zero-arg callable that will generate an iterable
- of :class:`.Revision` instances to be used. These are typically
- :class:`.Script` subclasses within regular Alembic use.
-
- """
- self._generator = generator
-
- @util.memoized_property
- def heads(self):
- """All "head" revisions as strings.
-
- This is normally a tuple of length one,
- unless unmerged branches are present.
-
- :return: a tuple of string revision numbers.
-
- """
- self._revision_map
- return self.heads
-
- @util.memoized_property
- def bases(self):
- """All "base" revisions as strings.
-
- These are revisions that have a ``down_revision`` of None,
- or empty tuple.
-
- :return: a tuple of string revision numbers.
-
- """
- self._revision_map
- return self.bases
-
- @util.memoized_property
- def _real_heads(self):
- """All "real" head revisions as strings.
-
- :return: a tuple of string revision numbers.
-
- """
- self._revision_map
- return self._real_heads
-
- @util.memoized_property
- def _real_bases(self):
- """All "real" base revisions as strings.
-
- :return: a tuple of string revision numbers.
-
- """
- self._revision_map
- return self._real_bases
-
- @util.memoized_property
- def _revision_map(self):
- """memoized attribute, initializes the revision map from the
- initial collection.
-
- """
- map_ = {}
-
- heads = sqlautil.OrderedSet()
- _real_heads = sqlautil.OrderedSet()
- self.bases = ()
- self._real_bases = ()
-
- has_branch_labels = set()
- has_depends_on = set()
- for revision in self._generator():
-
- if revision.revision in map_:
- util.warn("Revision %s is present more than once" %
- revision.revision)
- map_[revision.revision] = revision
- if revision.branch_labels:
- has_branch_labels.add(revision)
- if revision.dependencies:
- has_depends_on.add(revision)
- heads.add(revision.revision)
- _real_heads.add(revision.revision)
- if revision.is_base:
- self.bases += (revision.revision, )
- if revision._is_real_base:
- self._real_bases += (revision.revision, )
-
- # add the branch_labels to the map_. We'll need these
- # to resolve the dependencies.
- for revision in has_branch_labels:
- self._map_branch_labels(revision, map_)
-
- for revision in has_depends_on:
- self._add_depends_on(revision, map_)
-
- for rev in map_.values():
- for downrev in rev._all_down_revisions:
- if downrev not in map_:
- util.warn("Revision %s referenced from %s is not present"
- % (downrev, rev))
- down_revision = map_[downrev]
- down_revision.add_nextrev(rev)
- if downrev in rev._versioned_down_revisions:
- heads.discard(downrev)
- _real_heads.discard(downrev)
-
- map_[None] = map_[()] = None
- self.heads = tuple(heads)
- self._real_heads = tuple(_real_heads)
-
- for revision in has_branch_labels:
- self._add_branches(revision, map_, map_branch_labels=False)
- return map_
-
- def _map_branch_labels(self, revision, map_):
- if revision.branch_labels:
- for branch_label in revision._orig_branch_labels:
- if branch_label in map_:
- raise RevisionError(
- "Branch name '%s' in revision %s already "
- "used by revision %s" %
- (branch_label, revision.revision,
- map_[branch_label].revision)
- )
- map_[branch_label] = revision
-
- def _add_branches(self, revision, map_, map_branch_labels=True):
- if map_branch_labels:
- self._map_branch_labels(revision, map_)
-
- if revision.branch_labels:
- revision.branch_labels.update(revision.branch_labels)
- for node in self._get_descendant_nodes(
- [revision], map_, include_dependencies=False):
- node.branch_labels.update(revision.branch_labels)
-
- parent = node
- while parent and \
- not parent._is_real_branch_point and \
- not parent.is_merge_point:
-
- parent.branch_labels.update(revision.branch_labels)
- if parent.down_revision:
- parent = map_[parent.down_revision]
- else:
- break
-
- def _add_depends_on(self, revision, map_):
- if revision.dependencies:
- deps = [map_[dep] for dep in util.to_tuple(revision.dependencies)]
- revision._resolved_dependencies = tuple([d.revision for d in deps])
-
-
- def add_revision(self, revision, _replace=False):
- """add a single revision to an existing map.
-
- This method is for single-revision use cases, it's not
- appropriate for fully populating an entire revision map.
-
- """
- map_ = self._revision_map
- if not _replace and revision.revision in map_:
- util.warn("Revision %s is present more than once" %
- revision.revision)
- elif _replace and revision.revision not in map_:
- raise Exception("revision %s not in map" % revision.revision)
-
- map_[revision.revision] = revision
- self._add_branches(revision, map_)
- self._add_depends_on(revision, map_)
-
- if revision.is_base:
- self.bases += (revision.revision, )
- if revision._is_real_base:
- self._real_bases += (revision.revision, )
- for downrev in revision._all_down_revisions:
- if downrev not in map_:
- util.warn(
- "Revision %s referenced from %s is not present"
- % (downrev, revision)
- )
- map_[downrev].add_nextrev(revision)
- if revision._is_real_head:
- self._real_heads = tuple(
- head for head in self._real_heads
- if head not in
- set(revision._all_down_revisions).union([revision.revision])
- ) + (revision.revision,)
- if revision.is_head:
- self.heads = tuple(
- head for head in self.heads
- if head not in
- set(revision._versioned_down_revisions).union([revision.revision])
- ) + (revision.revision,)
-
- def get_current_head(self, branch_label=None):
- """Return the current head revision.
-
- If the script directory has multiple heads
- due to branching, an error is raised;
- :meth:`.ScriptDirectory.get_heads` should be
- preferred.
-
- :param branch_label: optional branch name which will limit the
- heads considered to those which include that branch_label.
-
- :return: a string revision number.
-
- .. seealso::
-
- :meth:`.ScriptDirectory.get_heads`
-
- """
- current_heads = self.heads
- if branch_label:
- current_heads = self.filter_for_lineage(current_heads, branch_label)
- if len(current_heads) > 1:
- raise MultipleHeads(
- current_heads,
- "%s@head" % branch_label if branch_label else "head")
-
- if current_heads:
- return current_heads[0]
- else:
- return None
-
- def _get_base_revisions(self, identifier):
- return self.filter_for_lineage(self.bases, identifier)
-
- def get_revisions(self, id_):
- """Return the :class:`.Revision` instances with the given rev id
- or identifiers.
-
- May be given a single identifier, a sequence of identifiers, or the
- special symbols "head" or "base". The result is a tuple of one
- or more identifiers, or an empty tuple in the case of "base".
-
- In the cases where 'head', 'heads' is requested and the
- revision map is empty, returns an empty tuple.
-
- Supports partial identifiers, where the given identifier
- is matched against all identifiers that start with the given
- characters; if there is exactly one match, that determines the
- full revision.
-
- """
- if isinstance(id_, (list, tuple, set, frozenset)):
- return sum([self.get_revisions(id_elem) for id_elem in id_], ())
- else:
- resolved_id, branch_label = self._resolve_revision_number(id_)
- return tuple(
- self._revision_for_ident(rev_id, branch_label)
- for rev_id in resolved_id)
-
- def get_revision(self, id_):
- """Return the :class:`.Revision` instance with the given rev id.
-
- If a symbolic name such as "head" or "base" is given, resolves
- the identifier into the current head or base revision. If the symbolic
- name refers to multiples, :class:`.MultipleHeads` is raised.
-
- Supports partial identifiers, where the given identifier
- is matched against all identifiers that start with the given
- characters; if there is exactly one match, that determines the
- full revision.
-
- """
-
- resolved_id, branch_label = self._resolve_revision_number(id_)
- if len(resolved_id) > 1:
- raise MultipleHeads(resolved_id, id_)
- elif resolved_id:
- resolved_id = resolved_id[0]
-
- return self._revision_for_ident(resolved_id, branch_label)
-
- def _resolve_branch(self, branch_label):
- try:
- branch_rev = self._revision_map[branch_label]
- except KeyError:
- try:
- nonbranch_rev = self._revision_for_ident(branch_label)
- except ResolutionError:
- raise ResolutionError(
- "No such branch: '%s'" % branch_label, branch_label)
- else:
- return nonbranch_rev
- else:
- return branch_rev
-
- def _revision_for_ident(self, resolved_id, check_branch=None):
- if check_branch:
- branch_rev = self._resolve_branch(check_branch)
- else:
- branch_rev = None
-
- try:
- revision = self._revision_map[resolved_id]
- except KeyError:
- # break out to avoid misleading py3k stack traces
- revision = False
- if revision is False:
- # do a partial lookup
- revs = [x for x in self._revision_map
- if x and x.startswith(resolved_id)]
- if branch_rev:
- revs = self.filter_for_lineage(revs, check_branch)
- if not revs:
- raise ResolutionError(
- "No such revision or branch '%s'" % resolved_id,
- resolved_id)
- elif len(revs) > 1:
- raise ResolutionError(
- "Multiple revisions start "
- "with '%s': %s..." % (
- resolved_id,
- ", ".join("'%s'" % r for r in revs[0:3])
- ), resolved_id)
- else:
- revision = self._revision_map[revs[0]]
-
- if check_branch and revision is not None:
- if not self._shares_lineage(
- revision.revision, branch_rev.revision):
- raise ResolutionError(
- "Revision %s is not a member of branch '%s'" %
- (revision.revision, check_branch), resolved_id)
- return revision
-
- def _filter_into_branch_heads(self, targets):
- targets = set(targets)
-
- for rev in list(targets):
- if targets.intersection(
- self._get_descendant_nodes(
- [rev], include_dependencies=False)).\
- difference([rev]):
- targets.discard(rev)
- return targets
-
- def filter_for_lineage(
- self, targets, check_against, include_dependencies=False):
- id_, branch_label = self._resolve_revision_number(check_against)
-
- shares = []
- if branch_label:
- shares.append(branch_label)
- if id_:
- shares.extend(id_)
-
- return [
- tg for tg in targets
- if self._shares_lineage(
- tg, shares, include_dependencies=include_dependencies)]
-
- def _shares_lineage(
- self, target, test_against_revs, include_dependencies=False):
- if not test_against_revs:
- return True
- if not isinstance(target, Revision):
- target = self._revision_for_ident(target)
-
- test_against_revs = [
- self._revision_for_ident(test_against_rev)
- if not isinstance(test_against_rev, Revision)
- else test_against_rev
- for test_against_rev
- in util.to_tuple(test_against_revs, default=())
- ]
-
- return bool(
- set(self._get_descendant_nodes([target],
- include_dependencies=include_dependencies))
- .union(self._get_ancestor_nodes([target],
- include_dependencies=include_dependencies))
- .intersection(test_against_revs)
- )
-
- def _resolve_revision_number(self, id_):
- if isinstance(id_, compat.string_types) and "@" in id_:
- branch_label, id_ = id_.split('@', 1)
- else:
- branch_label = None
-
- # ensure map is loaded
- self._revision_map
- if id_ == 'heads':
- if branch_label:
- return self.filter_for_lineage(
- self.heads, branch_label), branch_label
- else:
- return self._real_heads, branch_label
- elif id_ == 'head':
- current_head = self.get_current_head(branch_label)
- if current_head:
- return (current_head, ), branch_label
- else:
- return (), branch_label
- elif id_ == 'base' or id_ is None:
- return (), branch_label
- else:
- return util.to_tuple(id_, default=None), branch_label
-
- def _relative_iterate(
- self, destination, source, is_upwards,
- implicit_base, inclusive, assert_relative_length):
- if isinstance(destination, compat.string_types):
- match = _relative_destination.match(destination)
- if not match:
- return None
- else:
- return None
-
- relative = int(match.group(3))
- symbol = match.group(2)
- branch_label = match.group(1)
-
- reldelta = 1 if inclusive and not symbol else 0
-
- if is_upwards:
- if branch_label:
- from_ = "%s@head" % branch_label
- elif symbol:
- if symbol.startswith("head"):
- from_ = symbol
- else:
- from_ = "%s@head" % symbol
- else:
- from_ = "head"
- to_ = source
- else:
- if branch_label:
- to_ = "%s@base" % branch_label
- elif symbol:
- to_ = "%s@base" % symbol
- else:
- to_ = "base"
- from_ = source
-
- revs = list(
- self._iterate_revisions(
- from_, to_,
- inclusive=inclusive, implicit_base=implicit_base))
-
- if symbol:
- if branch_label:
- symbol_rev = self.get_revision(
- "%s@%s" % (branch_label, symbol))
- else:
- symbol_rev = self.get_revision(symbol)
- if symbol.startswith("head"):
- index = 0
- elif symbol == "base":
- index = len(revs) - 1
- else:
- range_ = compat.range(len(revs) - 1, 0, -1)
- for index in range_:
- if symbol_rev.revision == revs[index].revision:
- break
- else:
- index = 0
- else:
- index = 0
- if is_upwards:
- revs = revs[index - relative - reldelta:]
- if not index and assert_relative_length and \
- len(revs) < abs(relative - reldelta):
- raise RevisionError(
- "Relative revision %s didn't "
- "produce %d migrations" % (destination, abs(relative)))
- else:
- revs = revs[0:index - relative + reldelta]
- if not index and assert_relative_length and \
- len(revs) != abs(relative) + reldelta:
- raise RevisionError(
- "Relative revision %s didn't "
- "produce %d migrations" % (destination, abs(relative)))
-
- return iter(revs)
-
- def iterate_revisions(
- self, upper, lower, implicit_base=False, inclusive=False,
- assert_relative_length=True, select_for_downgrade=False):
- """Iterate through script revisions, starting at the given
- upper revision identifier and ending at the lower.
-
- The traversal uses strictly the `down_revision`
- marker inside each migration script, so
- it is a requirement that upper >= lower,
- else you'll get nothing back.
-
- The iterator yields :class:`.Revision` objects.
-
- """
-
- relative_upper = self._relative_iterate(
- upper, lower, True, implicit_base,
- inclusive, assert_relative_length
- )
- if relative_upper:
- return relative_upper
-
- relative_lower = self._relative_iterate(
- lower, upper, False, implicit_base,
- inclusive, assert_relative_length
- )
- if relative_lower:
- return relative_lower
-
- return self._iterate_revisions(
- upper, lower, inclusive=inclusive, implicit_base=implicit_base,
- select_for_downgrade=select_for_downgrade)
-
- def _get_descendant_nodes(
- self, targets, map_=None, check=False,
- omit_immediate_dependencies=False, include_dependencies=True):
-
- if omit_immediate_dependencies:
- def fn(rev):
- if rev not in targets:
- return rev._all_nextrev
- else:
- return rev.nextrev
- elif include_dependencies:
- def fn(rev):
- return rev._all_nextrev
- else:
- def fn(rev):
- return rev.nextrev
-
- return self._iterate_related_revisions(
- fn, targets, map_=map_, check=check
- )
-
- def _get_ancestor_nodes(
- self, targets, map_=None, check=False, include_dependencies=True):
-
- if include_dependencies:
- def fn(rev):
- return rev._all_down_revisions
- else:
- def fn(rev):
- return rev._versioned_down_revisions
-
- return self._iterate_related_revisions(
- fn, targets, map_=map_, check=check
- )
-
- def _iterate_related_revisions(self, fn, targets, map_, check=False):
- if map_ is None:
- map_ = self._revision_map
-
- seen = set()
- todo = collections.deque()
- for target in targets:
-
- todo.append(target)
- if check:
- per_target = set()
-
- while todo:
- rev = todo.pop()
- if check:
- per_target.add(rev)
-
- if rev in seen:
- continue
- seen.add(rev)
- todo.extend(
- map_[rev_id] for rev_id in fn(rev))
- yield rev
- if check:
- overlaps = per_target.intersection(targets).\
- difference([target])
- if overlaps:
- raise RevisionError(
- "Requested revision %s overlaps with "
- "other requested revisions %s" % (
- target.revision,
- ", ".join(r.revision for r in overlaps)
- )
- )
-
- def _iterate_revisions(
- self, upper, lower, inclusive=True, implicit_base=False,
- select_for_downgrade=False):
- """iterate revisions from upper to lower.
-
- The traversal is depth-first within branches, and breadth-first
- across branches as a whole.
-
- """
-
- requested_lowers = self.get_revisions(lower)
-
- # some complexity to accommodate an iteration where some
- # branches are starting from nothing, and others are starting
- # from a given point. Additionally, if the bottom branch
- # is specified using a branch identifier, then we limit operations
- # to just that branch.
-
- limit_to_lower_branch = \
- isinstance(lower, compat.string_types) and lower.endswith('@base')
-
- uppers = util.dedupe_tuple(self.get_revisions(upper))
-
- if not uppers and not requested_lowers:
- return
-
- upper_ancestors = set(self._get_ancestor_nodes(uppers, check=True))
-
- if limit_to_lower_branch:
- lowers = self.get_revisions(self._get_base_revisions(lower))
- elif implicit_base and requested_lowers:
- lower_ancestors = set(
- self._get_ancestor_nodes(requested_lowers)
- )
- lower_descendants = set(
- self._get_descendant_nodes(requested_lowers)
- )
- base_lowers = set()
- candidate_lowers = upper_ancestors.\
- difference(lower_ancestors).\
- difference(lower_descendants)
- for rev in candidate_lowers:
- for downrev in rev._all_down_revisions:
- if self._revision_map[downrev] in candidate_lowers:
- break
- else:
- base_lowers.add(rev)
- lowers = base_lowers.union(requested_lowers)
- elif implicit_base:
- base_lowers = set(self.get_revisions(self._real_bases))
- lowers = base_lowers.union(requested_lowers)
- elif not requested_lowers:
- lowers = set(self.get_revisions(self._real_bases))
- else:
- lowers = requested_lowers
-
- # represents all nodes we will produce
- total_space = set(
- rev.revision for rev in upper_ancestors).intersection(
- rev.revision for rev
- in self._get_descendant_nodes(
- lowers, check=True,
- omit_immediate_dependencies=(
- select_for_downgrade and requested_lowers
- )
- )
- )
-
- if not total_space:
- # no nodes. determine if this is an invalid range
- # or not.
- start_from = set(requested_lowers)
- start_from.update(
- self._get_ancestor_nodes(
- list(start_from), include_dependencies=True)
- )
-
- # determine all the current branch points represented
- # by requested_lowers
- start_from = self._filter_into_branch_heads(start_from)
-
- # if the requested start is one of those branch points,
- # then just return empty set
- if start_from.intersection(upper_ancestors):
- return
- else:
- # otherwise, they requested nodes out of
- # order
- raise RangeNotAncestorError(lower, upper)
-
- # organize branch points to be consumed separately from
- # member nodes
- branch_todo = set(
- rev for rev in
- (self._revision_map[rev] for rev in total_space)
- if rev._is_real_branch_point and
- len(total_space.intersection(rev._all_nextrev)) > 1
- )
-
- # it's not possible for any "uppers" to be in branch_todo,
- # because the ._all_nextrev of those nodes is not in total_space
- #assert not branch_todo.intersection(uppers)
-
- todo = collections.deque(
- r for r in uppers
- if r.revision in total_space
- )
-
- # iterate for total_space being emptied out
- total_space_modified = True
- while total_space:
-
- if not total_space_modified:
- raise RevisionError(
- "Dependency resolution failed; iteration can't proceed")
- total_space_modified = False
- # when everything non-branch pending is consumed,
- # add to the todo any branch nodes that have no
- # descendants left in the queue
- if not todo:
- todo.extendleft(
- sorted(
- (
- rev for rev in branch_todo
- if not rev._all_nextrev.intersection(total_space)
- ),
- # favor "revisioned" branch points before
- # dependent ones
- key=lambda rev: 0 if rev.is_branch_point else 1
- )
- )
- branch_todo.difference_update(todo)
- # iterate nodes that are in the immediate todo
- while todo:
- rev = todo.popleft()
- total_space.remove(rev.revision)
- total_space_modified = True
-
- # do depth first for elements within branches,
- # don't consume any actual branch nodes
- todo.extendleft([
- self._revision_map[downrev]
- for downrev in reversed(rev._all_down_revisions)
- if self._revision_map[downrev] not in branch_todo
- and downrev in total_space])
-
- if not inclusive and rev in requested_lowers:
- continue
- yield rev
-
- assert not branch_todo
-
-
-class Revision(object):
- """Base class for revisioned objects.
-
- The :class:`.Revision` class is the base of the more public-facing
- :class:`.Script` object, which represents a migration script.
- The mechanics of revision management and traversal are encapsulated
- within :class:`.Revision`, while :class:`.Script` applies this logic
- to Python files in a version directory.
-
- """
- nextrev = frozenset()
- """following revisions, based on down_revision only."""
-
- _all_nextrev = frozenset()
-
- revision = None
- """The string revision number."""
-
- down_revision = None
- """The ``down_revision`` identifier(s) within the migration script.
-
- Note that the total set of "down" revisions is
- down_revision + dependencies.
-
- """
-
- dependencies = None
- """Additional revisions which this revision is dependent on.
-
- From a migration standpoint, these dependencies are added to the
- down_revision to form the full iteration. However, the separation
- of down_revision from "dependencies" is to assist in navigating
- a history that contains many branches, typically a multi-root scenario.
-
- """
-
- branch_labels = None
- """Optional string/tuple of symbolic names to apply to this
- revision's branch"""
-
- @classmethod
- def verify_rev_id(cls, revision):
- illegal_chars = set(revision).intersection(_revision_illegal_chars)
- if illegal_chars:
- raise RevisionError(
- "Character(s) '%s' not allowed in revision identifier '%s'" % (
- ", ".join(sorted(illegal_chars)),
- revision
- )
- )
-
- def __init__(
- self, revision, down_revision,
- dependencies=None, branch_labels=None):
- self.verify_rev_id(revision)
- self.revision = revision
- self.down_revision = tuple_rev_as_scalar(down_revision)
- self.dependencies = tuple_rev_as_scalar(dependencies)
- self._resolved_dependencies = ()
- self._orig_branch_labels = util.to_tuple(branch_labels, default=())
- self.branch_labels = set(self._orig_branch_labels)
-
- def __repr__(self):
- args = [
- repr(self.revision),
- repr(self.down_revision)
- ]
- if self.dependencies:
- args.append("dependencies=%r" % (self.dependencies,))
- if self.branch_labels:
- args.append("branch_labels=%r" % (self.branch_labels,))
- return "%s(%s)" % (
- self.__class__.__name__,
- ", ".join(args)
- )
-
- def add_nextrev(self, revision):
- self._all_nextrev = self._all_nextrev.union([revision.revision])
- if self.revision in revision._versioned_down_revisions:
- self.nextrev = self.nextrev.union([revision.revision])
-
- @property
- def _all_down_revisions(self):
- return util.to_tuple(self.down_revision, default=()) + \
- self._resolved_dependencies
-
- @property
- def _versioned_down_revisions(self):
- return util.to_tuple(self.down_revision, default=())
-
- @property
- def is_head(self):
- """Return True if this :class:`.Revision` is a 'head' revision.
-
- This is determined based on whether any other :class:`.Script`
- within the :class:`.ScriptDirectory` refers to this
- :class:`.Script`. Multiple heads can be present.
-
- """
- return not bool(self.nextrev)
-
- @property
- def _is_real_head(self):
- return not bool(self._all_nextrev)
-
- @property
- def is_base(self):
- """Return True if this :class:`.Revision` is a 'base' revision."""
-
- return self.down_revision is None
-
- @property
- def _is_real_base(self):
- """Return True if this :class:`.Revision` is a "real" base revision,
- e.g. that it has no dependencies either."""
-
- # we use self.dependencies here because this is called up
- # in initialization where _real_dependencies isn't set up
- # yet
- return self.down_revision is None and self.dependencies is None
-
- @property
- def is_branch_point(self):
- """Return True if this :class:`.Script` is a branch point.
-
- A branchpoint is defined as a :class:`.Script` which is referred
- to by more than one succeeding :class:`.Script`, that is more
- than one :class:`.Script` has a `down_revision` identifier pointing
- here.
-
- """
- return len(self.nextrev) > 1
-
- @property
- def _is_real_branch_point(self):
- """Return True if this :class:`.Script` is a 'real' branch point,
- taking into account dependencies as well.
-
- """
- return len(self._all_nextrev) > 1
-
- @property
- def is_merge_point(self):
- """Return True if this :class:`.Script` is a merge point."""
-
- return len(self._versioned_down_revisions) > 1
-
-
-def tuple_rev_as_scalar(rev):
- if not rev:
- return None
- elif len(rev) == 1:
- return rev[0]
- else:
- return rev
diff --git a/venv/Lib/site-packages/alembic/templates/generic/README b/venv/Lib/site-packages/alembic/templates/generic/README
deleted file mode 100644
index 98e4f9c..0000000
--- a/venv/Lib/site-packages/alembic/templates/generic/README
+++ /dev/null
@@ -1 +0,0 @@
-Generic single-database configuration.
\ No newline at end of file
diff --git a/venv/Lib/site-packages/alembic/templates/generic/alembic.ini.mako b/venv/Lib/site-packages/alembic/templates/generic/alembic.ini.mako
deleted file mode 100644
index 9ee59db..0000000
--- a/venv/Lib/site-packages/alembic/templates/generic/alembic.ini.mako
+++ /dev/null
@@ -1,74 +0,0 @@
-# A generic, single database configuration.
-
-[alembic]
-# path to migration scripts
-script_location = ${script_location}
-
-# template used to generate migration files
-# file_template = %%(rev)s_%%(slug)s
-
-# timezone to use when rendering the date
-# within the migration file as well as the filename.
-# string value is passed to dateutil.tz.gettz()
-# leave blank for localtime
-# timezone =
-
-# max length of characters to apply to the
-# "slug" field
-#truncate_slug_length = 40
-
-# set to 'true' to run the environment during
-# the 'revision' command, regardless of autogenerate
-# revision_environment = false
-
-# set to 'true' to allow .pyc and .pyo files without
-# a source .py file to be detected as revisions in the
-# versions/ directory
-# sourceless = false
-
-# version location specification; this defaults
-# to ${script_location}/versions. When using multiple version
-# directories, initial revisions must be specified with --version-path
-# version_locations = %(here)s/bar %(here)s/bat ${script_location}/versions
-
-# the output encoding used when revision files
-# are written from script.py.mako
-# output_encoding = utf-8
-
-sqlalchemy.url = driver://user:pass@localhost/dbname
-
-
-# Logging configuration
-[loggers]
-keys = root,sqlalchemy,alembic
-
-[handlers]
-keys = console
-
-[formatters]
-keys = generic
-
-[logger_root]
-level = WARN
-handlers = console
-qualname =
-
-[logger_sqlalchemy]
-level = WARN
-handlers =
-qualname = sqlalchemy.engine
-
-[logger_alembic]
-level = INFO
-handlers =
-qualname = alembic
-
-[handler_console]
-class = StreamHandler
-args = (sys.stderr,)
-level = NOTSET
-formatter = generic
-
-[formatter_generic]
-format = %(levelname)-5.5s [%(name)s] %(message)s
-datefmt = %H:%M:%S
diff --git a/venv/Lib/site-packages/alembic/templates/generic/env.py b/venv/Lib/site-packages/alembic/templates/generic/env.py
deleted file mode 100644
index 058378b..0000000
--- a/venv/Lib/site-packages/alembic/templates/generic/env.py
+++ /dev/null
@@ -1,70 +0,0 @@
-from __future__ import with_statement
-from alembic import context
-from sqlalchemy import engine_from_config, pool
-from logging.config import fileConfig
-
-# this is the Alembic Config object, which provides
-# access to the values within the .ini file in use.
-config = context.config
-
-# Interpret the config file for Python logging.
-# This line sets up loggers basically.
-fileConfig(config.config_file_name)
-
-# add your model's MetaData object here
-# for 'autogenerate' support
-# from myapp import mymodel
-# target_metadata = mymodel.Base.metadata
-target_metadata = None
-
-# other values from the config, defined by the needs of env.py,
-# can be acquired:
-# my_important_option = config.get_main_option("my_important_option")
-# ... etc.
-
-
-def run_migrations_offline():
- """Run migrations in 'offline' mode.
-
- This configures the context with just a URL
- and not an Engine, though an Engine is acceptable
- here as well. By skipping the Engine creation
- we don't even need a DBAPI to be available.
-
- Calls to context.execute() here emit the given string to the
- script output.
-
- """
- url = config.get_main_option("sqlalchemy.url")
- context.configure(
- url=url, target_metadata=target_metadata, literal_binds=True)
-
- with context.begin_transaction():
- context.run_migrations()
-
-
-def run_migrations_online():
- """Run migrations in 'online' mode.
-
- In this scenario we need to create an Engine
- and associate a connection with the context.
-
- """
- connectable = engine_from_config(
- config.get_section(config.config_ini_section),
- prefix='sqlalchemy.',
- poolclass=pool.NullPool)
-
- with connectable.connect() as connection:
- context.configure(
- connection=connection,
- target_metadata=target_metadata
- )
-
- with context.begin_transaction():
- context.run_migrations()
-
-if context.is_offline_mode():
- run_migrations_offline()
-else:
- run_migrations_online()
diff --git a/venv/Lib/site-packages/alembic/templates/generic/script.py.mako b/venv/Lib/site-packages/alembic/templates/generic/script.py.mako
deleted file mode 100644
index 2c01563..0000000
--- a/venv/Lib/site-packages/alembic/templates/generic/script.py.mako
+++ /dev/null
@@ -1,24 +0,0 @@
-"""${message}
-
-Revision ID: ${up_revision}
-Revises: ${down_revision | comma,n}
-Create Date: ${create_date}
-
-"""
-from alembic import op
-import sqlalchemy as sa
-${imports if imports else ""}
-
-# revision identifiers, used by Alembic.
-revision = ${repr(up_revision)}
-down_revision = ${repr(down_revision)}
-branch_labels = ${repr(branch_labels)}
-depends_on = ${repr(depends_on)}
-
-
-def upgrade():
- ${upgrades if upgrades else "pass"}
-
-
-def downgrade():
- ${downgrades if downgrades else "pass"}
diff --git a/venv/Lib/site-packages/alembic/templates/multidb/README b/venv/Lib/site-packages/alembic/templates/multidb/README
deleted file mode 100644
index 5db219f..0000000
--- a/venv/Lib/site-packages/alembic/templates/multidb/README
+++ /dev/null
@@ -1 +0,0 @@
-Rudimentary multi-database configuration.
\ No newline at end of file
diff --git a/venv/Lib/site-packages/alembic/templates/multidb/alembic.ini.mako b/venv/Lib/site-packages/alembic/templates/multidb/alembic.ini.mako
deleted file mode 100644
index a0708ff..0000000
--- a/venv/Lib/site-packages/alembic/templates/multidb/alembic.ini.mako
+++ /dev/null
@@ -1,80 +0,0 @@
-# a multi-database configuration.
-
-[alembic]
-# path to migration scripts
-script_location = ${script_location}
-
-# template used to generate migration files
-# file_template = %%(rev)s_%%(slug)s
-
-# timezone to use when rendering the date
-# within the migration file as well as the filename.
-# string value is passed to dateutil.tz.gettz()
-# leave blank for localtime
-# timezone =
-
-# max length of characters to apply to the
-# "slug" field
-#truncate_slug_length = 40
-
-# set to 'true' to run the environment during
-# the 'revision' command, regardless of autogenerate
-# revision_environment = false
-
-# set to 'true' to allow .pyc and .pyo files without
-# a source .py file to be detected as revisions in the
-# versions/ directory
-# sourceless = false
-
-# version location specification; this defaults
-# to ${script_location}/versions. When using multiple version
-# directories, initial revisions must be specified with --version-path
-# version_locations = %(here)s/bar %(here)s/bat ${script_location}/versions
-
-# the output encoding used when revision files
-# are written from script.py.mako
-# output_encoding = utf-8
-
-databases = engine1, engine2
-
-[engine1]
-sqlalchemy.url = driver://user:pass@localhost/dbname
-
-[engine2]
-sqlalchemy.url = driver://user:pass@localhost/dbname2
-
-
-# Logging configuration
-[loggers]
-keys = root,sqlalchemy,alembic
-
-[handlers]
-keys = console
-
-[formatters]
-keys = generic
-
-[logger_root]
-level = WARN
-handlers = console
-qualname =
-
-[logger_sqlalchemy]
-level = WARN
-handlers =
-qualname = sqlalchemy.engine
-
-[logger_alembic]
-level = INFO
-handlers =
-qualname = alembic
-
-[handler_console]
-class = StreamHandler
-args = (sys.stderr,)
-level = NOTSET
-formatter = generic
-
-[formatter_generic]
-format = %(levelname)-5.5s [%(name)s] %(message)s
-datefmt = %H:%M:%S
diff --git a/venv/Lib/site-packages/alembic/templates/multidb/env.py b/venv/Lib/site-packages/alembic/templates/multidb/env.py
deleted file mode 100644
index db24173..0000000
--- a/venv/Lib/site-packages/alembic/templates/multidb/env.py
+++ /dev/null
@@ -1,133 +0,0 @@
-from __future__ import with_statement
-from alembic import context
-from sqlalchemy import engine_from_config, pool
-from logging.config import fileConfig
-import logging
-import re
-
-USE_TWOPHASE = False
-
-# this is the Alembic Config object, which provides
-# access to the values within the .ini file in use.
-config = context.config
-
-# Interpret the config file for Python logging.
-# This line sets up loggers basically.
-fileConfig(config.config_file_name)
-logger = logging.getLogger('alembic.env')
-
-# gather section names referring to different
-# databases. These are named "engine1", "engine2"
-# in the sample .ini file.
-db_names = config.get_main_option('databases')
-
-# add your model's MetaData objects here
-# for 'autogenerate' support. These must be set
-# up to hold just those tables targeting a
-# particular database. table.tometadata() may be
-# helpful here in case a "copy" of
-# a MetaData is needed.
-# from myapp import mymodel
-# target_metadata = {
-# 'engine1':mymodel.metadata1,
-# 'engine2':mymodel.metadata2
-# }
-target_metadata = {}
-
-# other values from the config, defined by the needs of env.py,
-# can be acquired:
-# my_important_option = config.get_main_option("my_important_option")
-# ... etc.
-
-
-def run_migrations_offline():
- """Run migrations in 'offline' mode.
-
- This configures the context with just a URL
- and not an Engine, though an Engine is acceptable
- here as well. By skipping the Engine creation
- we don't even need a DBAPI to be available.
-
- Calls to context.execute() here emit the given string to the
- script output.
-
- """
- # for the --sql use case, run migrations for each URL into
- # individual files.
-
- engines = {}
- for name in re.split(r',\s*', db_names):
- engines[name] = rec = {}
- rec['url'] = context.config.get_section_option(name,
- "sqlalchemy.url")
-
- for name, rec in engines.items():
- logger.info("Migrating database %s" % name)
- file_ = "%s.sql" % name
- logger.info("Writing output to %s" % file_)
- with open(file_, 'w') as buffer:
- context.configure(url=rec['url'], output_buffer=buffer,
- target_metadata=target_metadata.get(name),
- literal_binds=True)
- with context.begin_transaction():
- context.run_migrations(engine_name=name)
-
-
-def run_migrations_online():
- """Run migrations in 'online' mode.
-
- In this scenario we need to create an Engine
- and associate a connection with the context.
-
- """
-
- # for the direct-to-DB use case, start a transaction on all
- # engines, then run all migrations, then commit all transactions.
-
- engines = {}
- for name in re.split(r',\s*', db_names):
- engines[name] = rec = {}
- rec['engine'] = engine_from_config(
- context.config.get_section(name),
- prefix='sqlalchemy.',
- poolclass=pool.NullPool)
-
- for name, rec in engines.items():
- engine = rec['engine']
- rec['connection'] = conn = engine.connect()
-
- if USE_TWOPHASE:
- rec['transaction'] = conn.begin_twophase()
- else:
- rec['transaction'] = conn.begin()
-
- try:
- for name, rec in engines.items():
- logger.info("Migrating database %s" % name)
- context.configure(
- connection=rec['connection'],
- upgrade_token="%s_upgrades" % name,
- downgrade_token="%s_downgrades" % name,
- target_metadata=target_metadata.get(name)
- )
- context.run_migrations(engine_name=name)
-
- if USE_TWOPHASE:
- for rec in engines.values():
- rec['transaction'].prepare()
-
- for rec in engines.values():
- rec['transaction'].commit()
- except:
- for rec in engines.values():
- rec['transaction'].rollback()
- raise
- finally:
- for rec in engines.values():
- rec['connection'].close()
-
-
-if context.is_offline_mode():
- run_migrations_offline()
-else:
- run_migrations_online()
diff --git a/venv/Lib/site-packages/alembic/templates/multidb/script.py.mako b/venv/Lib/site-packages/alembic/templates/multidb/script.py.mako
deleted file mode 100644
index c3970a5..0000000
--- a/venv/Lib/site-packages/alembic/templates/multidb/script.py.mako
+++ /dev/null
@@ -1,45 +0,0 @@
-<%!
-import re
-
-%>"""${message}
-
-Revision ID: ${up_revision}
-Revises: ${down_revision | comma,n}
-Create Date: ${create_date}
-
-"""
-from alembic import op
-import sqlalchemy as sa
-${imports if imports else ""}
-
-# revision identifiers, used by Alembic.
-revision = ${repr(up_revision)}
-down_revision = ${repr(down_revision)}
-branch_labels = ${repr(branch_labels)}
-depends_on = ${repr(depends_on)}
-
-
-def upgrade(engine_name):
- globals()["upgrade_%s" % engine_name]()
-
-
-def downgrade(engine_name):
- globals()["downgrade_%s" % engine_name]()
-
-<%
- db_names = config.get_main_option("databases")
-%>
-
-## generate an "upgrade_() / downgrade_()" function
-## for each database name in the ini file.
-
-% for db_name in re.split(r',\s*', db_names):
-
-def upgrade_${db_name}():
- ${context.get("%s_upgrades" % db_name, "pass")}
-
-
-def downgrade_${db_name}():
- ${context.get("%s_downgrades" % db_name, "pass")}
-
-% endfor
diff --git a/venv/Lib/site-packages/alembic/templates/pylons/README b/venv/Lib/site-packages/alembic/templates/pylons/README
deleted file mode 100644
index ed3c28e..0000000
--- a/venv/Lib/site-packages/alembic/templates/pylons/README
+++ /dev/null
@@ -1 +0,0 @@
-Configuration that reads from a Pylons project environment.
\ No newline at end of file
diff --git a/venv/Lib/site-packages/alembic/templates/pylons/alembic.ini.mako b/venv/Lib/site-packages/alembic/templates/pylons/alembic.ini.mako
deleted file mode 100644
index c5cc413..0000000
--- a/venv/Lib/site-packages/alembic/templates/pylons/alembic.ini.mako
+++ /dev/null
@@ -1,40 +0,0 @@
-# a Pylons configuration.
-
-[alembic]
-# path to migration scripts
-script_location = ${script_location}
-
-# template used to generate migration files
-# file_template = %%(rev)s_%%(slug)s
-
-# timezone to use when rendering the date
-# within the migration file as well as the filename.
-# string value is passed to dateutil.tz.gettz()
-# leave blank for localtime
-# timezone =
-
-# max length of characters to apply to the
-# "slug" field
-#truncate_slug_length = 40
-
-# set to 'true' to run the environment during
-# the 'revision' command, regardless of autogenerate
-# revision_environment = false
-
-# set to 'true' to allow .pyc and .pyo files without
-# a source .py file to be detected as revisions in the
-# versions/ directory
-# sourceless = false
-
-# version location specification; this defaults
-# to ${script_location}/versions. When using multiple version
-# directories, initial revisions must be specified with --version-path
-# version_locations = %(here)s/bar %(here)s/bat ${script_location}/versions
-
-# the output encoding used when revision files
-# are written from script.py.mako
-# output_encoding = utf-8
-
-pylons_config_file = ./development.ini
-
-# that's it !
\ No newline at end of file
diff --git a/venv/Lib/site-packages/alembic/templates/pylons/env.py b/venv/Lib/site-packages/alembic/templates/pylons/env.py
deleted file mode 100644
index 5ad9fd5..0000000
--- a/venv/Lib/site-packages/alembic/templates/pylons/env.py
+++ /dev/null
@@ -1,78 +0,0 @@
-"""Pylons bootstrap environment.
-
-Place 'pylons_config_file' into alembic.ini, and the application will
-be loaded from there.
-
-"""
-from alembic import context
-from paste.deploy import loadapp
-from logging.config import fileConfig
-from sqlalchemy.engine.base import Engine
-
-
-try:
- # if pylons app already in, don't create a new app
- from pylons import config as pylons_config
- pylons_config['__file__']
-except:
- config = context.config
- # can use config['__file__'] here, i.e. the Pylons
- # ini file, instead of alembic.ini
- config_file = config.get_main_option('pylons_config_file')
- fileConfig(config_file)
- wsgi_app = loadapp('config:%s' % config_file, relative_to='.')
-
-
-# customize this section for non-standard engine configurations.
-meta = __import__("%s.model.meta" % wsgi_app.config['pylons.package']).model.meta
-
-# add your model's MetaData object here
-# for 'autogenerate' support
-# from myapp import mymodel
-# target_metadata = mymodel.Base.metadata
-target_metadata = None
-
-
-def run_migrations_offline():
- """Run migrations in 'offline' mode.
-
- This configures the context with just a URL
- and not an Engine, though an Engine is acceptable
- here as well. By skipping the Engine creation
- we don't even need a DBAPI to be available.
-
- Calls to context.execute() here emit the given string to the
- script output.
-
- """
- context.configure(
- url=meta.engine.url, target_metadata=target_metadata,
- literal_binds=True)
- with context.begin_transaction():
- context.run_migrations()
-
-
-def run_migrations_online():
- """Run migrations in 'online' mode.
-
- In this scenario we need to create an Engine
- and associate a connection with the context.
-
- """
- # specify here how the engine is acquired
- # engine = meta.engine
- raise NotImplementedError("Please specify engine connectivity here")
-
- with engine.connect() as connection:
- context.configure(
- connection=connection,
- target_metadata=target_metadata
- )
-
- with context.begin_transaction():
- context.run_migrations()
-
-if context.is_offline_mode():
- run_migrations_offline()
-else:
- run_migrations_online()
diff --git a/venv/Lib/site-packages/alembic/templates/pylons/script.py.mako b/venv/Lib/site-packages/alembic/templates/pylons/script.py.mako
deleted file mode 100644
index 2c01563..0000000
--- a/venv/Lib/site-packages/alembic/templates/pylons/script.py.mako
+++ /dev/null
@@ -1,24 +0,0 @@
-"""${message}
-
-Revision ID: ${up_revision}
-Revises: ${down_revision | comma,n}
-Create Date: ${create_date}
-
-"""
-from alembic import op
-import sqlalchemy as sa
-${imports if imports else ""}
-
-# revision identifiers, used by Alembic.
-revision = ${repr(up_revision)}
-down_revision = ${repr(down_revision)}
-branch_labels = ${repr(branch_labels)}
-depends_on = ${repr(depends_on)}
-
-
-def upgrade():
- ${upgrades if upgrades else "pass"}
-
-
-def downgrade():
- ${downgrades if downgrades else "pass"}
diff --git a/venv/Lib/site-packages/alembic/testing/__init__.py b/venv/Lib/site-packages/alembic/testing/__init__.py
deleted file mode 100644
index 553f501..0000000
--- a/venv/Lib/site-packages/alembic/testing/__init__.py
+++ /dev/null
@@ -1,10 +0,0 @@
-from .fixtures import TestBase
-from .assertions import eq_, ne_, is_, is_not_, assert_raises_message, \
- eq_ignore_whitespace, assert_raises
-
-from .util import provide_metadata
-
-from alembic import util
-
-
-from .config import requirements as requires
diff --git a/venv/Lib/site-packages/alembic/testing/assertions.py b/venv/Lib/site-packages/alembic/testing/assertions.py
deleted file mode 100644
index 4fb43a4..0000000
--- a/venv/Lib/site-packages/alembic/testing/assertions.py
+++ /dev/null
@@ -1,208 +0,0 @@
-from __future__ import absolute_import
-
-
-import re
-from .. import util
-from sqlalchemy.engine import default
-from ..util.compat import text_type, py3k
-import contextlib
-from sqlalchemy.util import decorator
-from sqlalchemy import exc as sa_exc
-import warnings
-from . import mock
-
-
-if not util.sqla_094:
- def eq_(a, b, msg=None):
- """Assert a == b, with repr messaging on failure."""
- assert a == b, msg or "%r != %r" % (a, b)
-
- def ne_(a, b, msg=None):
- """Assert a != b, with repr messaging on failure."""
- assert a != b, msg or "%r == %r" % (a, b)
-
- def is_(a, b, msg=None):
- """Assert a is b, with repr messaging on failure."""
- assert a is b, msg or "%r is not %r" % (a, b)
-
- def is_not_(a, b, msg=None):
- """Assert a is not b, with repr messaging on failure."""
- assert a is not b, msg or "%r is %r" % (a, b)
-
- def assert_raises(except_cls, callable_, *args, **kw):
- try:
- callable_(*args, **kw)
- success = False
- except except_cls:
- success = True
-
- # assert outside the block so it works for AssertionError too !
- assert success, "Callable did not raise an exception"
-
- def assert_raises_message(except_cls, msg, callable_, *args, **kwargs):
- try:
- callable_(*args, **kwargs)
- assert False, "Callable did not raise an exception"
- except except_cls as e:
- assert re.search(
- msg, text_type(e), re.UNICODE), "%r !~ %s" % (msg, e)
- print(text_type(e).encode('utf-8'))
-
-else:
- from sqlalchemy.testing.assertions import eq_, ne_, is_, is_not_, \
- assert_raises_message, assert_raises
-
-
-def eq_ignore_whitespace(a, b, msg=None):
- a = re.sub(r'^\s+?|\n', "", a)
- a = re.sub(r' {2,}', " ", a)
- b = re.sub(r'^\s+?|\n', "", b)
- b = re.sub(r' {2,}', " ", b)
-
- # convert for unicode string rendering,
- # using special escape character "!U"
- if py3k:
- b = re.sub(r'!U', '', b)
- else:
- b = re.sub(r'!U', 'u', b)
-
- assert a == b, msg or "%r != %r" % (a, b)
-
-
-def assert_compiled(element, assert_string, dialect=None):
- dialect = _get_dialect(dialect)
- eq_(
- text_type(element.compile(dialect=dialect)).
- replace("\n", "").replace("\t", ""),
- assert_string.replace("\n", "").replace("\t", "")
- )
-
-
-_dialect_mods = {}
-
-
-def _get_dialect(name):
- if name is None or name == 'default':
- return default.DefaultDialect()
- else:
- try:
- dialect_mod = _dialect_mods[name]
- except KeyError:
- dialect_mod = getattr(
- __import__('sqlalchemy.dialects.%s' % name).dialects, name)
- _dialect_mods[name] = dialect_mod
- d = dialect_mod.dialect()
- if name == 'postgresql':
- d.implicit_returning = True
- elif name == 'mssql':
- d.legacy_schema_aliasing = False
- return d
-
-
-def expect_warnings(*messages, **kw):
- """Context manager which expects one or more warnings.
-
- With no arguments, squelches all SAWarnings emitted via
- sqlalchemy.util.warn and sqlalchemy.util.warn_limited. Otherwise
- pass string expressions that will match selected warnings via regex;
- all non-matching warnings are sent through.
-
- The expect version **asserts** that the warnings were in fact seen.
-
- Note that the test suite sets SAWarning warnings to raise exceptions.
-
- """
- return _expect_warnings(sa_exc.SAWarning, messages, **kw)
-
-
-@contextlib.contextmanager
-def expect_warnings_on(db, *messages, **kw):
- """Context manager which expects one or more warnings on specific
- dialects.
-
- The expect version **asserts** that the warnings were in fact seen.
-
- """
- spec = db_spec(db)
-
- if isinstance(db, util.string_types) and not spec(config._current):
- yield
- elif not _is_excluded(*db):
- yield
- else:
- with expect_warnings(*messages, **kw):
- yield
-
-
-def emits_warning(*messages):
- """Decorator form of expect_warnings().
-
- Note that emits_warning does **not** assert that the warnings
- were in fact seen.
-
- """
-
- @decorator
- def decorate(fn, *args, **kw):
- with expect_warnings(assert_=False, *messages):
- return fn(*args, **kw)
-
- return decorate
-
-
-def emits_warning_on(db, *messages):
- """Mark a test as emitting a warning on a specific dialect.
-
- With no arguments, squelches all SAWarning failures. Or pass one or more
- strings; these will be matched to the root of the warning description by
- warnings.filterwarnings().
-
- Note that emits_warning_on does **not** assert that the warnings
- were in fact seen.
-
- """
- @decorator
- def decorate(fn, *args, **kw):
- with expect_warnings_on(db, *messages):
- return fn(*args, **kw)
-
- return decorate
-
-
-@contextlib.contextmanager
-def _expect_warnings(exc_cls, messages, regex=True, assert_=True):
-
- if regex:
- filters = [re.compile(msg, re.I) for msg in messages]
- else:
- filters = messages
-
- seen = set(filters)
-
- real_warn = warnings.warn
-
- def our_warn(msg, exception=None, *arg, **kw):
- if exception and not issubclass(exception, exc_cls):
- return real_warn(msg, exception, *arg, **kw)
-
- if not filters:
- return
-
- for filter_ in filters:
- if (regex and filter_.match(msg)) or \
- (not regex and filter_ == msg):
- seen.discard(filter_)
- break
- else:
- if exception is None:
- real_warn(msg, *arg, **kw)
- else:
- real_warn(msg, exception, *arg, **kw)
-
- with mock.patch("warnings.warn", our_warn):
- yield
-
- if assert_:
- assert not seen, "Warnings were not seen: %s" % \
- ", ".join("%r" % (s.pattern if regex else s) for s in seen)
-
diff --git a/venv/Lib/site-packages/alembic/testing/compat.py b/venv/Lib/site-packages/alembic/testing/compat.py
deleted file mode 100644
index e0af6a2..0000000
--- a/venv/Lib/site-packages/alembic/testing/compat.py
+++ /dev/null
@@ -1,13 +0,0 @@
-def get_url_driver_name(url):
- if '+' not in url.drivername:
- return url.get_dialect().driver
- else:
- return url.drivername.split('+')[1]
-
-
-def get_url_backend_name(url):
- if '+' not in url.drivername:
- return url.drivername
- else:
- return url.drivername.split('+')[0]
-
diff --git a/venv/Lib/site-packages/alembic/testing/config.py b/venv/Lib/site-packages/alembic/testing/config.py
deleted file mode 100644
index ca28c6b..0000000
--- a/venv/Lib/site-packages/alembic/testing/config.py
+++ /dev/null
@@ -1,91 +0,0 @@
-# testing/config.py
-# Copyright (C) 2005-2017 the SQLAlchemy authors and contributors
-#
-#
-# This module is part of SQLAlchemy and is released under
-# the MIT License: http://www.opensource.org/licenses/mit-license.php
-"""NOTE: copied/adapted from SQLAlchemy master for backwards compatibility;
- this should be removable when Alembic targets SQLAlchemy 1.0.0
-"""
-
-import collections
-
-requirements = None
-db = None
-db_url = None
-db_opts = None
-file_config = None
-test_schema = None
-test_schema_2 = None
-_current = None
-
-
-class Config(object):
- def __init__(self, db, db_opts, options, file_config):
- self._set_name(db)
- self.db = db
- self.db_opts = db_opts
- self.options = options
- self.file_config = file_config
- self.test_schema = "test_schema"
- self.test_schema_2 = "test_schema_2"
-
- _stack = collections.deque()
- _configs = set()
-
- def _set_name(self, db):
- if db.dialect.server_version_info:
- svi = ".".join(str(tok) for tok in db.dialect.server_version_info)
- self.name = "%s+%s_[%s]" % (db.name, db.driver, svi)
- else:
- self.name = "%s+%s" % (db.name, db.driver)
-
- @classmethod
- def register(cls, db, db_opts, options, file_config):
- """add a config as one of the global configs.
-
- If there are no configs set up yet, this config also
- gets set as the "_current".
- """
- cfg = Config(db, db_opts, options, file_config)
- cls._configs.add(cfg)
- return cfg
-
- @classmethod
- def set_as_current(cls, config):
- global db, _current, db_url, test_schema, test_schema_2, db_opts
- _current = config
- db_url = config.db.url
- db_opts = config.db_opts
- test_schema = config.test_schema
- test_schema_2 = config.test_schema_2
- db = config.db
-
- @classmethod
- def push_engine(cls, db):
- assert _current, "Can't push without a default Config set up"
- cls.push(
- Config(
- db, _current.db_opts, _current.options, _current.file_config)
- )
-
- @classmethod
- def push(cls, config):
- cls._stack.append(_current)
- cls.set_as_current(config)
-
- @classmethod
- def reset(cls):
- if cls._stack:
- cls.set_as_current(cls._stack[0])
- cls._stack.clear()
-
- @classmethod
- def all_configs(cls):
- return cls._configs
-
- @classmethod
- def all_dbs(cls):
- for cfg in cls.all_configs():
- yield cfg.db
-
diff --git a/venv/Lib/site-packages/alembic/testing/engines.py b/venv/Lib/site-packages/alembic/testing/engines.py
deleted file mode 100644
index dadabc8..0000000
--- a/venv/Lib/site-packages/alembic/testing/engines.py
+++ /dev/null
@@ -1,28 +0,0 @@
-# testing/engines.py
-# Copyright (C) 2005-2017 the SQLAlchemy authors and contributors
-#
-#
-# This module is part of SQLAlchemy and is released under
-# the MIT License: http://www.opensource.org/licenses/mit-license.php
-"""NOTE: copied/adapted from SQLAlchemy master for backwards compatibility;
- this should be removable when Alembic targets SQLAlchemy 1.0.0.
-"""
-
-from __future__ import absolute_import
-
-from . import config
-
-
-def testing_engine(url=None, options=None):
- """Produce an engine configured by --options with optional overrides."""
-
- from sqlalchemy import create_engine
-
- url = url or config.db.url
- if options is None:
- options = config.db_opts
-
- engine = create_engine(url, **options)
-
- return engine
-
diff --git a/venv/Lib/site-packages/alembic/testing/env.py b/venv/Lib/site-packages/alembic/testing/env.py
deleted file mode 100644
index 792db22..0000000
--- a/venv/Lib/site-packages/alembic/testing/env.py
+++ /dev/null
@@ -1,436 +0,0 @@
-#!coding: utf-8
-
-import os
-import shutil
-import textwrap
-
-from ..util.compat import u, has_pep3147, get_current_bytecode_suffixes
-from ..script import Script, ScriptDirectory
-from .. import util
-from . import engines
-from . import provision
-
-
-def _get_staging_directory():
- if provision.FOLLOWER_IDENT:
- return "scratch_%s" % provision.FOLLOWER_IDENT
- else:
- return 'scratch'
-
-
-def staging_env(create=True, template="generic", sourceless=False):
- from alembic import command, script
- cfg = _testing_config()
- if create:
- path = os.path.join(_get_staging_directory(), 'scripts')
- if os.path.exists(path):
- shutil.rmtree(path)
- command.init(cfg, path, template=template)
- if sourceless:
- try:
- # do an import so that a .pyc/.pyo is generated.
- util.load_python_file(path, 'env.py')
- except AttributeError:
- # we don't have the migration context set up yet
- # so running the .env py throws this exception.
- # theoretically we could be using py_compiler here to
- # generate .pyc/.pyo without importing but not really
- # worth it.
- pass
- assert sourceless in (
- "pep3147_envonly", "simple", "pep3147_everything"), sourceless
- make_sourceless(
- os.path.join(path, "env.py"),
- "pep3147" if "pep3147" in sourceless else "simple"
- )
-
- sc = script.ScriptDirectory.from_config(cfg)
- return sc
-
-
-def clear_staging_env():
- shutil.rmtree(_get_staging_directory(), True)
-
-
-def script_file_fixture(txt):
- dir_ = os.path.join(_get_staging_directory(), 'scripts')
- path = os.path.join(dir_, "script.py.mako")
- with open(path, 'w') as f:
- f.write(txt)
-
-
-def env_file_fixture(txt):
- dir_ = os.path.join(_get_staging_directory(), 'scripts')
- txt = """
-from alembic import context
-
-config = context.config
-""" + txt
-
- path = os.path.join(dir_, "env.py")
- pyc_path = util.pyc_file_from_path(path)
- if pyc_path:
- os.unlink(pyc_path)
-
- with open(path, 'w') as f:
- f.write(txt)
-
-
-def _sqlite_file_db(tempname="foo.db"):
- dir_ = os.path.join(_get_staging_directory(), 'scripts')
- url = "sqlite:///%s/%s" % (dir_, tempname)
- return engines.testing_engine(url=url)
-
-
-def _sqlite_testing_config(sourceless=False):
- dir_ = os.path.join(_get_staging_directory(), 'scripts')
- url = "sqlite:///%s/foo.db" % dir_
-
- return _write_config_file("""
-[alembic]
-script_location = %s
-sqlalchemy.url = %s
-sourceless = %s
-
-[loggers]
-keys = root
-
-[handlers]
-keys = console
-
-[logger_root]
-level = WARN
-handlers = console
-qualname =
-
-[handler_console]
-class = StreamHandler
-args = (sys.stderr,)
-level = NOTSET
-formatter = generic
-
-[formatters]
-keys = generic
-
-[formatter_generic]
-format = %%(levelname)-5.5s [%%(name)s] %%(message)s
-datefmt = %%H:%%M:%%S
- """ % (dir_, url, "true" if sourceless else "false"))
-
-
-def _multi_dir_testing_config(sourceless=False, extra_version_location=''):
- dir_ = os.path.join(_get_staging_directory(), 'scripts')
- url = "sqlite:///%s/foo.db" % dir_
-
- return _write_config_file("""
-[alembic]
-script_location = %s
-sqlalchemy.url = %s
-sourceless = %s
-version_locations = %%(here)s/model1/ %%(here)s/model2/ %%(here)s/model3/ %s
-
-[loggers]
-keys = root
-
-[handlers]
-keys = console
-
-[logger_root]
-level = WARN
-handlers = console
-qualname =
-
-[handler_console]
-class = StreamHandler
-args = (sys.stderr,)
-level = NOTSET
-formatter = generic
-
-[formatters]
-keys = generic
-
-[formatter_generic]
-format = %%(levelname)-5.5s [%%(name)s] %%(message)s
-datefmt = %%H:%%M:%%S
- """ % (dir_, url, "true" if sourceless else "false",
- extra_version_location))
-
-
-def _no_sql_testing_config(dialect="postgresql", directives=""):
- """use a postgresql url with no host so that
- connections guaranteed to fail"""
- dir_ = os.path.join(_get_staging_directory(), 'scripts')
- return _write_config_file("""
-[alembic]
-script_location = %s
-sqlalchemy.url = %s://
-%s
-
-[loggers]
-keys = root
-
-[handlers]
-keys = console
-
-[logger_root]
-level = WARN
-handlers = console
-qualname =
-
-[handler_console]
-class = StreamHandler
-args = (sys.stderr,)
-level = NOTSET
-formatter = generic
-
-[formatters]
-keys = generic
-
-[formatter_generic]
-format = %%(levelname)-5.5s [%%(name)s] %%(message)s
-datefmt = %%H:%%M:%%S
-
-""" % (dir_, dialect, directives))
-
-
-def _write_config_file(text):
- cfg = _testing_config()
- with open(cfg.config_file_name, 'w') as f:
- f.write(text)
- return cfg
-
-
-def _testing_config():
- from alembic.config import Config
- if not os.access(_get_staging_directory(), os.F_OK):
- os.mkdir(_get_staging_directory())
- return Config(os.path.join(_get_staging_directory(), 'test_alembic.ini'))
-
-
-def write_script(
- scriptdir, rev_id, content, encoding='ascii', sourceless=False):
- old = scriptdir.revision_map.get_revision(rev_id)
- path = old.path
-
- content = textwrap.dedent(content)
- if encoding:
- content = content.encode(encoding)
- with open(path, 'wb') as fp:
- fp.write(content)
- pyc_path = util.pyc_file_from_path(path)
- if pyc_path:
- os.unlink(pyc_path)
- script = Script._from_path(scriptdir, path)
- old = scriptdir.revision_map.get_revision(script.revision)
- if old.down_revision != script.down_revision:
- raise Exception("Can't change down_revision "
- "on a refresh operation.")
- scriptdir.revision_map.add_revision(script, _replace=True)
-
- if sourceless:
- make_sourceless(
- path,
- "pep3147" if sourceless == "pep3147_everything" else "simple"
- )
-
-
-def make_sourceless(path, style):
-
- import py_compile
- py_compile.compile(path)
-
- if style == "simple" and has_pep3147():
- pyc_path = util.pyc_file_from_path(path)
- suffix = get_current_bytecode_suffixes()[0]
- filepath, ext = os.path.splitext(path)
- simple_pyc_path = filepath + suffix
- shutil.move(pyc_path, simple_pyc_path)
- pyc_path = simple_pyc_path
- elif style == "pep3147" and not has_pep3147():
- raise NotImplementedError()
- else:
- assert style in ("pep3147", "simple")
- pyc_path = util.pyc_file_from_path(path)
-
- assert os.access(pyc_path, os.F_OK)
-
- os.unlink(path)
-
-
-def three_rev_fixture(cfg):
- a = util.rev_id()
- b = util.rev_id()
- c = util.rev_id()
-
- script = ScriptDirectory.from_config(cfg)
- script.generate_revision(a, "revision a", refresh=True)
- write_script(script, a, """\
-"Rev A"
-revision = '%s'
-down_revision = None
-
-from alembic import op
-
-
-def upgrade():
- op.execute("CREATE STEP 1")
-
-
-def downgrade():
- op.execute("DROP STEP 1")
-
-""" % a)
-
- script.generate_revision(b, "revision b", refresh=True)
- write_script(script, b, u("""# coding: utf-8
-"Rev B, méil"
-revision = '%s'
-down_revision = '%s'
-
-from alembic import op
-
-
-def upgrade():
- op.execute("CREATE STEP 2")
-
-
-def downgrade():
- op.execute("DROP STEP 2")
-
-""") % (b, a), encoding="utf-8")
-
- script.generate_revision(c, "revision c", refresh=True)
- write_script(script, c, """\
-"Rev C"
-revision = '%s'
-down_revision = '%s'
-
-from alembic import op
-
-
-def upgrade():
- op.execute("CREATE STEP 3")
-
-
-def downgrade():
- op.execute("DROP STEP 3")
-
-""" % (c, b))
- return a, b, c
-
-
-def multi_heads_fixture(cfg, a, b, c):
- """Create a multiple head fixture from the three-revs fixture"""
-
- d = util.rev_id()
- e = util.rev_id()
- f = util.rev_id()
-
- script = ScriptDirectory.from_config(cfg)
- script.generate_revision(
- d, "revision d from b", head=b, splice=True, refresh=True)
- write_script(script, d, """\
-"Rev D"
-revision = '%s'
-down_revision = '%s'
-
-from alembic import op
-
-
-def upgrade():
- op.execute("CREATE STEP 4")
-
-
-def downgrade():
- op.execute("DROP STEP 4")
-
-""" % (d, b))
-
- script.generate_revision(
- e, "revision e from d", head=d, splice=True, refresh=True)
- write_script(script, e, """\
-"Rev E"
-revision = '%s'
-down_revision = '%s'
-
-from alembic import op
-
-
-def upgrade():
- op.execute("CREATE STEP 5")
-
-
-def downgrade():
- op.execute("DROP STEP 5")
-
-""" % (e, d))
-
- script.generate_revision(
- f, "revision f from b", head=b, splice=True, refresh=True)
- write_script(script, f, """\
-"Rev F"
-revision = '%s'
-down_revision = '%s'
-
-from alembic import op
-
-
-def upgrade():
- op.execute("CREATE STEP 6")
-
-
-def downgrade():
- op.execute("DROP STEP 6")
-
-""" % (f, b))
-
- return d, e, f
-
-
-def _multidb_testing_config(engines):
- """alembic.ini fixture to work exactly with the 'multidb' template"""
-
- dir_ = os.path.join(_get_staging_directory(), 'scripts')
-
- databases = ", ".join(
- engines.keys()
- )
- engines = "\n\n".join(
- "[%s]\n"
- "sqlalchemy.url = %s" % (key, value.url)
- for key, value in engines.items()
- )
-
- return _write_config_file("""
-[alembic]
-script_location = %s
-sourceless = false
-
-databases = %s
-
-%s
-[loggers]
-keys = root
-
-[handlers]
-keys = console
-
-[logger_root]
-level = WARN
-handlers = console
-qualname =
-
-[handler_console]
-class = StreamHandler
-args = (sys.stderr,)
-level = NOTSET
-formatter = generic
-
-[formatters]
-keys = generic
-
-[formatter_generic]
-format = %%(levelname)-5.5s [%%(name)s] %%(message)s
-datefmt = %%H:%%M:%%S
- """ % (dir_, databases, engines)
- )
diff --git a/venv/Lib/site-packages/alembic/testing/exclusions.py b/venv/Lib/site-packages/alembic/testing/exclusions.py
deleted file mode 100644
index 7d33a5b..0000000
--- a/venv/Lib/site-packages/alembic/testing/exclusions.py
+++ /dev/null
@@ -1,447 +0,0 @@
-# testing/exclusions.py
-# Copyright (C) 2005-2017 the SQLAlchemy authors and contributors
-#
-#
-# This module is part of SQLAlchemy and is released under
-# the MIT License: http://www.opensource.org/licenses/mit-license.php
-"""NOTE: copied/adapted from SQLAlchemy master for backwards compatibility;
- this should be removable when Alembic targets SQLAlchemy 1.0.0
-"""
-
-
-import operator
-from .plugin.plugin_base import SkipTest
-from sqlalchemy.util import decorator
-from . import config
-from sqlalchemy import util
-from ..util import compat
-import inspect
-import contextlib
-from .compat import get_url_driver_name, get_url_backend_name
-
-
-def skip_if(predicate, reason=None):
- rule = compound()
- pred = _as_predicate(predicate, reason)
- rule.skips.add(pred)
- return rule
-
-
-def fails_if(predicate, reason=None):
- rule = compound()
- pred = _as_predicate(predicate, reason)
- rule.fails.add(pred)
- return rule
-
-
-class compound(object):
- def __init__(self):
- self.fails = set()
- self.skips = set()
- self.tags = set()
-
- def __add__(self, other):
- return self.add(other)
-
- def add(self, *others):
- copy = compound()
- copy.fails.update(self.fails)
- copy.skips.update(self.skips)
- copy.tags.update(self.tags)
- for other in others:
- copy.fails.update(other.fails)
- copy.skips.update(other.skips)
- copy.tags.update(other.tags)
- return copy
-
- def not_(self):
- copy = compound()
- copy.fails.update(NotPredicate(fail) for fail in self.fails)
- copy.skips.update(NotPredicate(skip) for skip in self.skips)
- copy.tags.update(self.tags)
- return copy
-
- @property
- def enabled(self):
- return self.enabled_for_config(config._current)
-
- def enabled_for_config(self, config):
- for predicate in self.skips.union(self.fails):
- if predicate(config):
- return False
- else:
- return True
-
- def matching_config_reasons(self, config):
- return [
- predicate._as_string(config) for predicate
- in self.skips.union(self.fails)
- if predicate(config)
- ]
-
- def include_test(self, include_tags, exclude_tags):
- return bool(
- not self.tags.intersection(exclude_tags) and
- (not include_tags or self.tags.intersection(include_tags))
- )
-
- def _extend(self, other):
- self.skips.update(other.skips)
- self.fails.update(other.fails)
- self.tags.update(other.tags)
-
- def __call__(self, fn):
- if hasattr(fn, '_sa_exclusion_extend'):
- fn._sa_exclusion_extend._extend(self)
- return fn
-
- @decorator
- def decorate(fn, *args, **kw):
- return self._do(config._current, fn, *args, **kw)
- decorated = decorate(fn)
- decorated._sa_exclusion_extend = self
- return decorated
-
- @contextlib.contextmanager
- def fail_if(self):
- all_fails = compound()
- all_fails.fails.update(self.skips.union(self.fails))
-
- try:
- yield
- except Exception as ex:
- all_fails._expect_failure(config._current, ex)
- else:
- all_fails._expect_success(config._current)
-
- def _do(self, config, fn, *args, **kw):
- for skip in self.skips:
- if skip(config):
- msg = "'%s' : %s" % (
- fn.__name__,
- skip._as_string(config)
- )
- raise SkipTest(msg)
-
- try:
- return_value = fn(*args, **kw)
- except Exception as ex:
- self._expect_failure(config, ex, name=fn.__name__)
- else:
- self._expect_success(config, name=fn.__name__)
- return return_value
-
- def _expect_failure(self, config, ex, name='block'):
- for fail in self.fails:
- if fail(config):
- print(("%s failed as expected (%s): %s " % (
- name, fail._as_string(config), str(ex))))
- break
- else:
- compat.raise_from_cause(ex)
-
- def _expect_success(self, config, name='block'):
- if not self.fails:
- return
- for fail in self.fails:
- if not fail(config):
- break
- else:
- raise AssertionError(
- "Unexpected success for '%s' (%s)" %
- (
- name,
- " and ".join(
- fail._as_string(config)
- for fail in self.fails
- )
- )
- )
-
-
-def requires_tag(tagname):
- return tags([tagname])
-
-
-def tags(tagnames):
- comp = compound()
- comp.tags.update(tagnames)
- return comp
-
-
-def only_if(predicate, reason=None):
- predicate = _as_predicate(predicate)
- return skip_if(NotPredicate(predicate), reason)
-
-
-def succeeds_if(predicate, reason=None):
- predicate = _as_predicate(predicate)
- return fails_if(NotPredicate(predicate), reason)
-
-
-class Predicate(object):
- @classmethod
- def as_predicate(cls, predicate, description=None):
- if isinstance(predicate, compound):
- return cls.as_predicate(predicate.fails.union(predicate.skips))
-
- elif isinstance(predicate, Predicate):
- if description and predicate.description is None:
- predicate.description = description
- return predicate
- elif isinstance(predicate, (list, set)):
- return OrPredicate(
- [cls.as_predicate(pred) for pred in predicate],
- description)
- elif isinstance(predicate, tuple):
- return SpecPredicate(*predicate)
- elif isinstance(predicate, compat.string_types):
- tokens = predicate.split(" ", 2)
- op = spec = None
- db = tokens.pop(0)
- if tokens:
- op = tokens.pop(0)
- if tokens:
- spec = tuple(int(d) for d in tokens.pop(0).split("."))
- return SpecPredicate(db, op, spec, description=description)
- elif util.callable(predicate):
- return LambdaPredicate(predicate, description)
- else:
- assert False, "unknown predicate type: %s" % predicate
-
- def _format_description(self, config, negate=False):
- bool_ = self(config)
- if negate:
- bool_ = not negate
- return self.description % {
- "driver": get_url_driver_name(config.db.url),
- "database": get_url_backend_name(config.db.url),
- "doesnt_support": "doesn't support" if bool_ else "does support",
- "does_support": "does support" if bool_ else "doesn't support"
- }
-
- def _as_string(self, config=None, negate=False):
- raise NotImplementedError()
-
-
-class BooleanPredicate(Predicate):
- def __init__(self, value, description=None):
- self.value = value
- self.description = description or "boolean %s" % value
-
- def __call__(self, config):
- return self.value
-
- def _as_string(self, config, negate=False):
- return self._format_description(config, negate=negate)
-
-
-class SpecPredicate(Predicate):
- def __init__(self, db, op=None, spec=None, description=None):
- self.db = db
- self.op = op
- self.spec = spec
- self.description = description
-
- _ops = {
- '<': operator.lt,
- '>': operator.gt,
- '==': operator.eq,
- '!=': operator.ne,
- '<=': operator.le,
- '>=': operator.ge,
- 'in': operator.contains,
- 'between': lambda val, pair: val >= pair[0] and val <= pair[1],
- }
-
- def __call__(self, config):
- engine = config.db
-
- if "+" in self.db:
- dialect, driver = self.db.split('+')
- else:
- dialect, driver = self.db, None
-
- if dialect and engine.name != dialect:
- return False
- if driver is not None and engine.driver != driver:
- return False
-
- if self.op is not None:
- assert driver is None, "DBAPI version specs not supported yet"
-
- version = _server_version(engine)
- oper = hasattr(self.op, '__call__') and self.op \
- or self._ops[self.op]
- return oper(version, self.spec)
- else:
- return True
-
- def _as_string(self, config, negate=False):
- if self.description is not None:
- return self._format_description(config)
- elif self.op is None:
- if negate:
- return "not %s" % self.db
- else:
- return "%s" % self.db
- else:
- if negate:
- return "not %s %s %s" % (
- self.db,
- self.op,
- self.spec
- )
- else:
- return "%s %s %s" % (
- self.db,
- self.op,
- self.spec
- )
-
-
-class LambdaPredicate(Predicate):
- def __init__(self, lambda_, description=None, args=None, kw=None):
- spec = compat.inspect_getargspec(lambda_)
- if not spec[0]:
- self.lambda_ = lambda db: lambda_()
- else:
- self.lambda_ = lambda_
- self.args = args or ()
- self.kw = kw or {}
- if description:
- self.description = description
- elif lambda_.__doc__:
- self.description = lambda_.__doc__
- else:
- self.description = "custom function"
-
- def __call__(self, config):
- return self.lambda_(config)
-
- def _as_string(self, config, negate=False):
- return self._format_description(config)
-
-
-class NotPredicate(Predicate):
- def __init__(self, predicate, description=None):
- self.predicate = predicate
- self.description = description
-
- def __call__(self, config):
- return not self.predicate(config)
-
- def _as_string(self, config, negate=False):
- if self.description:
- return self._format_description(config, not negate)
- else:
- return self.predicate._as_string(config, not negate)
-
-
-class OrPredicate(Predicate):
- def __init__(self, predicates, description=None):
- self.predicates = predicates
- self.description = description
-
- def __call__(self, config):
- for pred in self.predicates:
- if pred(config):
- return True
- return False
-
- def _eval_str(self, config, negate=False):
- if negate:
- conjunction = " and "
- else:
- conjunction = " or "
- return conjunction.join(p._as_string(config, negate=negate)
- for p in self.predicates)
-
- def _negation_str(self, config):
- if self.description is not None:
- return "Not " + self._format_description(config)
- else:
- return self._eval_str(config, negate=True)
-
- def _as_string(self, config, negate=False):
- if negate:
- return self._negation_str(config)
- else:
- if self.description is not None:
- return self._format_description(config)
- else:
- return self._eval_str(config)
-
-
-_as_predicate = Predicate.as_predicate
-
-
-def _is_excluded(db, op, spec):
- return SpecPredicate(db, op, spec)(config._current)
-
-
-def _server_version(engine):
- """Return a server_version_info tuple."""
-
- # force metadata to be retrieved
- conn = engine.connect()
- version = getattr(engine.dialect, 'server_version_info', ())
- conn.close()
- return version
-
-
-def db_spec(*dbs):
- return OrPredicate(
- [Predicate.as_predicate(db) for db in dbs]
- )
-
-
-def open():
- return skip_if(BooleanPredicate(False, "mark as execute"))
-
-
-def closed():
- return skip_if(BooleanPredicate(True, "marked as skip"))
-
-
-def fails(msg=None):
- return fails_if(BooleanPredicate(True, msg or "expected to fail"))
-
-
-@decorator
-def future(fn, *arg):
- return fails_if(LambdaPredicate(fn), "Future feature")
-
-
-def fails_on(db, reason=None):
- return fails_if(SpecPredicate(db), reason)
-
-
-def fails_on_everything_except(*dbs):
- return succeeds_if(
- OrPredicate([
- Predicate.as_predicate(db) for db in dbs
- ])
- )
-
-
-def skip(db, reason=None):
- return skip_if(SpecPredicate(db), reason)
-
-
-def only_on(dbs, reason=None):
- return only_if(
- OrPredicate([Predicate.as_predicate(db) for db in util.to_list(dbs)])
- )
-
-
-def exclude(db, op, spec, reason=None):
- return skip_if(SpecPredicate(db, op, spec), reason)
-
-
-def against(config, *queries):
- assert queries, "no queries sent!"
- return OrPredicate([
- Predicate.as_predicate(query)
- for query in queries
- ])(config)
diff --git a/venv/Lib/site-packages/alembic/testing/fixtures.py b/venv/Lib/site-packages/alembic/testing/fixtures.py
deleted file mode 100644
index e6c16dd..0000000
--- a/venv/Lib/site-packages/alembic/testing/fixtures.py
+++ /dev/null
@@ -1,168 +0,0 @@
-# coding: utf-8
-import io
-import re
-
-from sqlalchemy import create_engine, text, MetaData
-
-import alembic
-from ..util.compat import configparser
-from .. import util
-from ..util.compat import string_types, text_type
-from ..migration import MigrationContext
-from ..environment import EnvironmentContext
-from ..operations import Operations
-from contextlib import contextmanager
-from .plugin.plugin_base import SkipTest
-from .assertions import _get_dialect, eq_
-from . import mock
-
-testing_config = configparser.ConfigParser()
-testing_config.read(['test.cfg'])
-
-
-if not util.sqla_094:
- class TestBase(object):
- # A sequence of database names to always run, regardless of the
- # constraints below.
- __whitelist__ = ()
-
- # A sequence of requirement names matching testing.requires decorators
- __requires__ = ()
-
- # A sequence of dialect names to exclude from the test class.
- __unsupported_on__ = ()
-
- # If present, test class is only runnable for the *single* specified
- # dialect. If you need multiple, use __unsupported_on__ and invert.
- __only_on__ = None
-
- # A sequence of no-arg callables. If any are True, the entire testcase is
- # skipped.
- __skip_if__ = None
-
- def assert_(self, val, msg=None):
- assert val, msg
-
- # apparently a handful of tests are doing this....OK
- def setup(self):
- if hasattr(self, "setUp"):
- self.setUp()
-
- def teardown(self):
- if hasattr(self, "tearDown"):
- self.tearDown()
-else:
- from sqlalchemy.testing.fixtures import TestBase
-
-
-def capture_db():
- buf = []
-
- def dump(sql, *multiparams, **params):
- buf.append(str(sql.compile(dialect=engine.dialect)))
- engine = create_engine("postgresql://", strategy="mock", executor=dump)
- return engine, buf
-
-_engs = {}
-
-
-@contextmanager
-def capture_context_buffer(**kw):
- if kw.pop('bytes_io', False):
- buf = io.BytesIO()
- else:
- buf = io.StringIO()
-
- kw.update({
- 'dialect_name': "sqlite",
- 'output_buffer': buf
- })
- conf = EnvironmentContext.configure
-
- def configure(*arg, **opt):
- opt.update(**kw)
- return conf(*arg, **opt)
-
- with mock.patch.object(EnvironmentContext, "configure", configure):
- yield buf
-
-
-def op_fixture(
- dialect='default', as_sql=False,
- naming_convention=None, literal_binds=False,
- native_boolean=None):
-
- opts = {}
- if naming_convention:
- if not util.sqla_092:
- raise SkipTest(
- "naming_convention feature requires "
- "sqla 0.9.2 or greater")
- opts['target_metadata'] = MetaData(naming_convention=naming_convention)
-
- class buffer_(object):
- def __init__(self):
- self.lines = []
-
- def write(self, msg):
- msg = msg.strip()
- msg = re.sub(r'[\n\t]', '', msg)
- if as_sql:
- # the impl produces soft tabs,
- # so search for blocks of 4 spaces
- msg = re.sub(r' ', '', msg)
- msg = re.sub(r'\;\n*$', '', msg)
-
- self.lines.append(msg)
-
- def flush(self):
- pass
-
- buf = buffer_()
-
- class ctx(MigrationContext):
- def clear_assertions(self):
- buf.lines[:] = []
-
- def assert_(self, *sql):
- # TODO: make this more flexible about
- # whitespace and such
- eq_(buf.lines, list(sql))
-
- def assert_contains(self, sql):
- for stmt in buf.lines:
- if sql in stmt:
- return
- else:
- assert False, "Could not locate fragment %r in %r" % (
- sql,
- buf.lines
- )
-
- if as_sql:
- opts['as_sql'] = as_sql
- if literal_binds:
- opts['literal_binds'] = literal_binds
- ctx_dialect = _get_dialect(dialect)
- if native_boolean is not None:
- ctx_dialect.supports_native_boolean = native_boolean
- if not as_sql:
- def execute(stmt, *multiparam, **param):
- if isinstance(stmt, string_types):
- stmt = text(stmt)
- assert stmt.supports_execution
- sql = text_type(stmt.compile(dialect=ctx_dialect))
-
- buf.write(sql)
-
- connection = mock.Mock(dialect=ctx_dialect, execute=execute)
- else:
- opts['output_buffer'] = buf
- connection = None
- context = ctx(
- ctx_dialect,
- connection,
- opts)
-
- alembic.op._proxy = Operations(context)
- return context
diff --git a/venv/Lib/site-packages/alembic/testing/mock.py b/venv/Lib/site-packages/alembic/testing/mock.py
deleted file mode 100644
index db8a673..0000000
--- a/venv/Lib/site-packages/alembic/testing/mock.py
+++ /dev/null
@@ -1,25 +0,0 @@
-# testing/mock.py
-# Copyright (C) 2005-2017 the SQLAlchemy authors and contributors
-#
-#
-# This module is part of SQLAlchemy and is released under
-# the MIT License: http://www.opensource.org/licenses/mit-license.php
-
-"""Import stub for mock library.
-
- NOTE: copied/adapted from SQLAlchemy master for backwards compatibility;
- this should be removable when Alembic targets SQLAlchemy 1.0.0
-
-"""
-from __future__ import absolute_import
-from ..util.compat import py33
-
-if py33:
- from unittest.mock import MagicMock, Mock, call, patch, ANY
-else:
- try:
- from mock import MagicMock, Mock, call, patch, ANY # noqa
- except ImportError:
- raise ImportError(
- "SQLAlchemy's test suite requires the "
- "'mock' library as of 0.8.2.")
diff --git a/venv/Lib/site-packages/alembic/testing/plugin/__init__.py b/venv/Lib/site-packages/alembic/testing/plugin/__init__.py
deleted file mode 100644
index 98616f4..0000000
--- a/venv/Lib/site-packages/alembic/testing/plugin/__init__.py
+++ /dev/null
@@ -1,3 +0,0 @@
-"""NOTE: copied/adapted from SQLAlchemy master for backwards compatibility;
- this should be removable when Alembic targets SQLAlchemy 1.0.0
-"""
diff --git a/venv/Lib/site-packages/alembic/testing/plugin/bootstrap.py b/venv/Lib/site-packages/alembic/testing/plugin/bootstrap.py
deleted file mode 100644
index 1560b03..0000000
--- a/venv/Lib/site-packages/alembic/testing/plugin/bootstrap.py
+++ /dev/null
@@ -1,44 +0,0 @@
-"""
-Bootstrapper for nose/pytest plugins.
-
-The entire rationale for this system is to get the modules in plugin/
-imported without importing all of the supporting library, so that we can
-set up things for testing before coverage starts.
-
-The rationale for all of plugin/ being *in* the supporting library in the
-first place is so that the testing and plugin suite is available to other
-libraries, mainly external SQLAlchemy and Alembic dialects, to make use
-of the same test environment and standard suites available to
-SQLAlchemy/Alembic themselves without the need to ship/install a separate
-package outside of SQLAlchemy.
-
-NOTE: copied/adapted from SQLAlchemy master for backwards compatibility;
-this should be removable when Alembic targets SQLAlchemy 1.0.0.
-
-"""
-
-import os
-import sys
-
-bootstrap_file = locals()['bootstrap_file']
-to_bootstrap = locals()['to_bootstrap']
-
-
-def load_file_as_module(name):
- path = os.path.join(os.path.dirname(bootstrap_file), "%s.py" % name)
- if sys.version_info >= (3, 3):
- from importlib import machinery
- mod = machinery.SourceFileLoader(name, path).load_module()
- else:
- import imp
- mod = imp.load_source(name, path)
- return mod
-
-if to_bootstrap == "pytest":
- sys.modules["alembic_plugin_base"] = load_file_as_module("plugin_base")
- sys.modules["alembic_pytestplugin"] = load_file_as_module("pytestplugin")
-elif to_bootstrap == "nose":
- sys.modules["alembic_plugin_base"] = load_file_as_module("plugin_base")
- sys.modules["alembic_noseplugin"] = load_file_as_module("noseplugin")
-else:
- raise Exception("unknown bootstrap: %s" % to_bootstrap) # noqa
diff --git a/venv/Lib/site-packages/alembic/testing/plugin/noseplugin.py b/venv/Lib/site-packages/alembic/testing/plugin/noseplugin.py
deleted file mode 100644
index 1adfccb..0000000
--- a/venv/Lib/site-packages/alembic/testing/plugin/noseplugin.py
+++ /dev/null
@@ -1,103 +0,0 @@
-# plugin/noseplugin.py
-# Copyright (C) 2005-2017 the SQLAlchemy authors and contributors
-#
-#
-# This module is part of SQLAlchemy and is released under
-# the MIT License: http://www.opensource.org/licenses/mit-license.php
-
-"""
-Enhance nose with extra options and behaviors for running SQLAlchemy tests.
-
-
-NOTE: copied/adapted from SQLAlchemy master for backwards compatibility;
-this should be removable when Alembic targets SQLAlchemy 1.0.0.
-
-"""
-
-try:
- # installed by bootstrap.py
- import alembic_plugin_base as plugin_base
-except ImportError:
- # assume we're a package, use traditional import
- from . import plugin_base
-
-import os
-import sys
-
-from nose.plugins import Plugin
-fixtures = None
-
-py3k = sys.version_info >= (3, 0)
-
-
-class NoseSQLAlchemy(Plugin):
- enabled = True
-
- name = 'sqla_testing'
- score = 100
-
- def options(self, parser, env=os.environ):
- Plugin.options(self, parser, env)
- opt = parser.add_option
-
- def make_option(name, **kw):
- callback_ = kw.pop("callback", None)
- if callback_:
- def wrap_(option, opt_str, value, parser):
- callback_(opt_str, value, parser)
- kw["callback"] = wrap_
- opt(name, **kw)
-
- plugin_base.setup_options(make_option)
- plugin_base.read_config()
-
- def configure(self, options, conf):
- super(NoseSQLAlchemy, self).configure(options, conf)
- plugin_base.pre_begin(options)
-
- plugin_base.set_coverage_flag(options.enable_plugin_coverage)
-
- def begin(self):
- global fixtures
- from alembic.testing import fixtures # noqa
-
- plugin_base.post_begin()
-
- def describeTest(self, test):
- return ""
-
- def wantFunction(self, fn):
- return False
-
- def wantMethod(self, fn):
- if py3k:
- if not hasattr(fn.__self__, 'cls'):
- return False
- cls = fn.__self__.cls
- else:
- cls = fn.im_class
- return plugin_base.want_method(cls, fn)
-
- def wantClass(self, cls):
- return plugin_base.want_class(cls)
-
- def beforeTest(self, test):
- plugin_base.before_test(
- test,
- test.test.cls.__module__,
- test.test.cls, test.test.method.__name__)
-
- def afterTest(self, test):
- plugin_base.after_test(test)
-
- def startContext(self, ctx):
- if not isinstance(ctx, type) \
- or not issubclass(ctx, fixtures.TestBase):
- return
- plugin_base.start_test_class(ctx)
-
- def stopContext(self, ctx):
- if not isinstance(ctx, type) \
- or not issubclass(ctx, fixtures.TestBase):
- return
- plugin_base.stop_test_class(ctx)
diff --git a/venv/Lib/site-packages/alembic/testing/plugin/plugin_base.py b/venv/Lib/site-packages/alembic/testing/plugin/plugin_base.py
deleted file mode 100644
index 83b30e3..0000000
--- a/venv/Lib/site-packages/alembic/testing/plugin/plugin_base.py
+++ /dev/null
@@ -1,561 +0,0 @@
-# plugin/plugin_base.py
-# Copyright (C) 2005-2017 the SQLAlchemy authors and contributors
-#
-#
-# This module is part of SQLAlchemy and is released under
-# the MIT License: http://www.opensource.org/licenses/mit-license.php
-"""Testing extensions.
-
-this module is designed to work as a testing-framework-agnostic library,
-so that we can continue to support nose and also begin adding new
-functionality via py.test.
-
-NOTE: copied/adapted from SQLAlchemy master for backwards compatibility;
-this should be removable when Alembic targets SQLAlchemy 1.0.0
-
-
-"""
-
-from __future__ import absolute_import
-try:
- # unitttest has a SkipTest also but pytest doesn't
- # honor it unless nose is imported too...
- from nose import SkipTest
-except ImportError:
- from pytest import skip
- SkipTest = skip.Exception
-
-import sys
-import re
-
-py3k = sys.version_info >= (3, 0)
-
-if py3k:
- import configparser
-else:
- import ConfigParser as configparser
-
-# late imports
-fixtures = None
-engines = None
-provision = None
-exclusions = None
-warnings = None
-assertions = None
-requirements = None
-config = None
-util = None
-file_config = None
-
-
-logging = None
-include_tags = set()
-exclude_tags = set()
-options = None
-
-
-def setup_options(make_option):
- make_option("--log-info", action="callback", type="string", callback=_log,
- help="turn on info logging for (multiple OK)")
- make_option("--log-debug", action="callback",
- type="string", callback=_log,
- help="turn on debug logging for (multiple OK)")
- make_option("--db", action="append", type="string", dest="db",
- help="Use prefab database uri. Multiple OK, "
- "first one is run by default.")
- make_option('--dbs', action='callback', zeroarg_callback=_list_dbs,
- help="List available prefab dbs")
- make_option("--dburi", action="append", type="string", dest="dburi",
- help="Database uri. Multiple OK, "
- "first one is run by default.")
- make_option("--dropfirst", action="store_true", dest="dropfirst",
- help="Drop all tables in the target database first")
- make_option("--backend-only", action="store_true", dest="backend_only",
- help="Run only tests marked with __backend__")
- make_option("--postgresql-templatedb", type="string",
- help="name of template database to use for Postgresql "
- "CREATE DATABASE (defaults to current database)")
- make_option("--low-connections", action="store_true",
- dest="low_connections",
- help="Use a low number of distinct connections - "
- "i.e. for Oracle TNS")
- make_option("--write-idents", type="string", dest="write_idents",
- help="write out generated follower idents to , "
- "when -n is used")
- make_option("--reversetop", action="store_true",
- dest="reversetop", default=False,
- help="Use a random-ordering set implementation in the ORM "
- "(helps reveal dependency issues)")
- make_option("--requirements", action="callback", type="string",
- callback=_requirements_opt,
- help="requirements class for testing, overrides setup.cfg")
- make_option("--with-cdecimal", action="store_true",
- dest="cdecimal", default=False,
- help="Monkeypatch the cdecimal library into Python 'decimal' "
- "for all tests")
- make_option("--include-tag", action="callback", callback=_include_tag,
- type="string",
- help="Include tests with tag ")
- make_option("--exclude-tag", action="callback", callback=_exclude_tag,
- type="string",
- help="Exclude tests with tag ")
- make_option("--mysql-engine", action="store",
- dest="mysql_engine", default=None,
- help="Use the specified MySQL storage engine for all tables, "
- "default is a db-default/InnoDB combo.")
-
-
-def configure_follower(follower_ident):
- """Configure required state for a follower.
-
- This invokes in the parent process and typically includes
- database creation.
-
- """
- from alembic.testing import provision
- provision.FOLLOWER_IDENT = follower_ident
-
-
-def memoize_important_follower_config(dict_):
- """Store important configuration we will need to send to a follower.
-
- This invokes in the parent process after normal config is set up.
-
- This is necessary as py.test seems to not be using forking, so we
- start with nothing in memory, *but* it isn't running our argparse
- callables, so we have to just copy all of that over.
-
- """
- dict_['memoized_config'] = {
- 'include_tags': include_tags,
- 'exclude_tags': exclude_tags
- }
-
-
-def restore_important_follower_config(dict_):
- """Restore important configuration needed by a follower.
-
- This invokes in the follower process.
-
- """
- include_tags.update(dict_['memoized_config']['include_tags'])
- exclude_tags.update(dict_['memoized_config']['exclude_tags'])
-
-
-def read_config():
- global file_config
- file_config = configparser.ConfigParser()
- file_config.read(['setup.cfg', 'test.cfg'])
-
-
-def pre_begin(opt):
- """things to set up early, before coverage might be setup."""
- global options
- options = opt
- for fn in pre_configure:
- fn(options, file_config)
-
-
-def set_coverage_flag(value):
- options.has_coverage = value
-
-
-def post_begin():
- """things to set up later, once we know coverage is running."""
-
- # Lazy setup of other options (post coverage)
- for fn in post_configure:
- fn(options, file_config)
-
- # late imports, has to happen after config as well
- # as nose plugins like coverage
- global util, fixtures, engines, exclusions, \
- assertions, warnings, profiling,\
- config, testing
- from alembic.testing import config, warnings, exclusions # noqa
- from alembic.testing import engines, fixtures # noqa
- from sqlalchemy import util # noqa
- warnings.setup_filters()
-
-
-def _log(opt_str, value, parser):
- global logging
- if not logging:
- import logging
- logging.basicConfig()
-
- if opt_str.endswith('-info'):
- logging.getLogger(value).setLevel(logging.INFO)
- elif opt_str.endswith('-debug'):
- logging.getLogger(value).setLevel(logging.DEBUG)
-
-
-def _list_dbs(*args):
- print("Available --db options (use --dburi to override)")
- for macro in sorted(file_config.options('db')):
- print("%20s\t%s" % (macro, file_config.get('db', macro)))
- sys.exit(0)
-
-
-def _requirements_opt(opt_str, value, parser):
- _setup_requirements(value)
-
-
-def _exclude_tag(opt_str, value, parser):
- exclude_tags.add(value.replace('-', '_'))
-
-
-def _include_tag(opt_str, value, parser):
- include_tags.add(value.replace('-', '_'))
-
-pre_configure = []
-post_configure = []
-
-
-def pre(fn):
- pre_configure.append(fn)
- return fn
-
-
-def post(fn):
- post_configure.append(fn)
- return fn
-
-
-@pre
-def _setup_options(opt, file_config):
- global options
- options = opt
-
-
-
-@pre
-def _monkeypatch_cdecimal(options, file_config):
- if options.cdecimal:
- import cdecimal
- sys.modules['decimal'] = cdecimal
-
-
-@post
-def _engine_uri(options, file_config):
- from alembic.testing import config
- from alembic.testing import provision
-
- if options.dburi:
- db_urls = list(options.dburi)
- else:
- db_urls = []
-
- if options.db:
- for db_token in options.db:
- for db in re.split(r'[,\s]+', db_token):
- if db not in file_config.options('db'):
- raise RuntimeError(
- "Unknown URI specifier '%s'. "
- "Specify --dbs for known uris."
- % db)
- else:
- db_urls.append(file_config.get('db', db))
-
- if not db_urls:
- db_urls.append(file_config.get('db', 'default'))
-
- for db_url in db_urls:
-
- if options.write_idents and provision.FOLLOWER_IDENT: # != 'master':
- with open(options.write_idents, "a") as file_:
- file_.write(provision.FOLLOWER_IDENT + " " + db_url + "\n")
-
- cfg = provision.setup_config(
- db_url, options, file_config, provision.FOLLOWER_IDENT)
-
- if not config._current:
- cfg.set_as_current(cfg)
-
-
-@post
-def _requirements(options, file_config):
-
- requirement_cls = file_config.get('sqla_testing', "requirement_cls")
- _setup_requirements(requirement_cls)
-
-
-def _setup_requirements(argument):
- from alembic.testing import config
-
- if config.requirements is not None:
- return
-
- modname, clsname = argument.split(":")
-
- # importlib.import_module() only introduced in 2.7, a little
- # late
- mod = __import__(modname)
- for component in modname.split(".")[1:]:
- mod = getattr(mod, component)
- req_cls = getattr(mod, clsname)
-
- config.requirements = req_cls()
-
-
-@post
-def _prep_testing_database(options, file_config):
- from alembic.testing import config
- from alembic.testing.exclusions import against
- from sqlalchemy import schema
- from alembic import util
-
- if util.sqla_08:
- from sqlalchemy import inspect
- else:
- from sqlalchemy.engine.reflection import Inspector
- inspect = Inspector.from_engine
-
- if options.dropfirst:
- for cfg in config.Config.all_configs():
- e = cfg.db
- inspector = inspect(e)
- try:
- view_names = inspector.get_view_names()
- except NotImplementedError:
- pass
- else:
- for vname in view_names:
- e.execute(schema._DropView(
- schema.Table(vname, schema.MetaData())
- ))
-
- if config.requirements.schemas.enabled_for_config(cfg):
- try:
- view_names = inspector.get_view_names(
- schema="test_schema")
- except NotImplementedError:
- pass
- else:
- for vname in view_names:
- e.execute(schema._DropView(
- schema.Table(vname, schema.MetaData(),
- schema="test_schema")
- ))
-
- for tname in reversed(inspector.get_table_names(
- order_by="foreign_key")):
- e.execute(schema.DropTable(
- schema.Table(tname, schema.MetaData())
- ))
-
- if config.requirements.schemas.enabled_for_config(cfg):
- for tname in reversed(inspector.get_table_names(
- order_by="foreign_key", schema="test_schema")):
- e.execute(schema.DropTable(
- schema.Table(tname, schema.MetaData(),
- schema="test_schema")
- ))
-
- if against(cfg, "postgresql") and util.sqla_100:
- from sqlalchemy.dialects import postgresql
- for enum in inspector.get_enums("*"):
- e.execute(postgresql.DropEnumType(
- postgresql.ENUM(
- name=enum['name'],
- schema=enum['schema'])))
-
-
-@post
-def _reverse_topological(options, file_config):
- if options.reversetop:
- from sqlalchemy.orm.util import randomize_unitofwork
- randomize_unitofwork()
-
-
-@post
-def _post_setup_options(opt, file_config):
- from alembic.testing import config
- config.options = options
- config.file_config = file_config
-
-
-def want_class(cls):
- if not issubclass(cls, fixtures.TestBase):
- return False
- elif cls.__name__.startswith('_'):
- return False
- elif config.options.backend_only and not getattr(cls, '__backend__',
- False):
- return False
- else:
- return True
-
-
-def want_method(cls, fn):
- if not fn.__name__.startswith("test_"):
- return False
- elif fn.__module__ is None:
- return False
- elif include_tags:
- return (
- hasattr(cls, '__tags__') and
- exclusions.tags(cls.__tags__).include_test(
- include_tags, exclude_tags)
- ) or (
- hasattr(fn, '_sa_exclusion_extend') and
- fn._sa_exclusion_extend.include_test(
- include_tags, exclude_tags)
- )
- elif exclude_tags and hasattr(cls, '__tags__'):
- return exclusions.tags(cls.__tags__).include_test(
- include_tags, exclude_tags)
- elif exclude_tags and hasattr(fn, '_sa_exclusion_extend'):
- return fn._sa_exclusion_extend.include_test(include_tags, exclude_tags)
- else:
- return True
-
-
-def generate_sub_tests(cls, module):
- if getattr(cls, '__backend__', False):
- for cfg in _possible_configs_for_cls(cls):
- orig_name = cls.__name__
-
- # we can have special chars in these names except for the
- # pytest junit plugin, which is tripped up by the brackets
- # and periods, so sanitize
-
- alpha_name = re.sub(r'[_\[\]\.]+', '_', cfg.name)
- alpha_name = re.sub('_+$', '', alpha_name)
- name = "%s_%s" % (cls.__name__, alpha_name)
-
- subcls = type(
- name,
- (cls, ),
- {
- "_sa_orig_cls_name": orig_name,
- "__only_on_config__": cfg
- }
- )
- setattr(module, name, subcls)
- yield subcls
- else:
- yield cls
-
-
-def start_test_class(cls):
- _do_skips(cls)
- _setup_engine(cls)
-
-
-def stop_test_class(cls):
- #from sqlalchemy import inspect
- #assert not inspect(testing.db).get_table_names()
- _restore_engine()
-
-
-def _restore_engine():
- config._current.reset()
-
-
-def _setup_engine(cls):
- if getattr(cls, '__engine_options__', None):
- eng = engines.testing_engine(options=cls.__engine_options__)
- config._current.push_engine(eng)
-
-
-def before_test(test, test_module_name, test_class, test_name):
- pass
-
-
-def after_test(test):
- pass
-
-
-def _possible_configs_for_cls(cls, reasons=None):
- all_configs = set(config.Config.all_configs())
-
- if cls.__unsupported_on__:
- spec = exclusions.db_spec(*cls.__unsupported_on__)
- for config_obj in list(all_configs):
- if spec(config_obj):
- all_configs.remove(config_obj)
-
- if getattr(cls, '__only_on__', None):
- spec = exclusions.db_spec(*util.to_list(cls.__only_on__))
- for config_obj in list(all_configs):
- if not spec(config_obj):
- all_configs.remove(config_obj)
-
- if getattr(cls, '__only_on_config__', None):
- all_configs.intersection_update([cls.__only_on_config__])
-
- if hasattr(cls, '__requires__'):
- requirements = config.requirements
- for config_obj in list(all_configs):
- for requirement in cls.__requires__:
- check = getattr(requirements, requirement)
-
- skip_reasons = check.matching_config_reasons(config_obj)
- if skip_reasons:
- all_configs.remove(config_obj)
- if reasons is not None:
- reasons.extend(skip_reasons)
- break
-
- if hasattr(cls, '__prefer_requires__'):
- non_preferred = set()
- requirements = config.requirements
- for config_obj in list(all_configs):
- for requirement in cls.__prefer_requires__:
- check = getattr(requirements, requirement)
-
- if not check.enabled_for_config(config_obj):
- non_preferred.add(config_obj)
- if all_configs.difference(non_preferred):
- all_configs.difference_update(non_preferred)
-
- return all_configs
-
-
-def _do_skips(cls):
- reasons = []
- all_configs = _possible_configs_for_cls(cls, reasons)
-
- if getattr(cls, '__skip_if__', False):
- for c in getattr(cls, '__skip_if__'):
- if c():
- raise SkipTest("'%s' skipped by %s" % (
- cls.__name__, c.__name__)
- )
-
- if not all_configs:
- if getattr(cls, '__backend__', False):
- msg = "'%s' unsupported for implementation '%s'" % (
- cls.__name__, cls.__only_on__)
- else:
- msg = "'%s' unsupported on any DB implementation %s%s" % (
- cls.__name__,
- ", ".join(
- "'%s(%s)+%s'" % (
- config_obj.db.name,
- ".".join(
- str(dig) for dig in
- config_obj.db.dialect.server_version_info),
- config_obj.db.driver
- )
- for config_obj in config.Config.all_configs()
- ),
- ", ".join(reasons)
- )
- raise SkipTest(msg)
- elif hasattr(cls, '__prefer_backends__'):
- non_preferred = set()
- spec = exclusions.db_spec(*util.to_list(cls.__prefer_backends__))
- for config_obj in all_configs:
- if not spec(config_obj):
- non_preferred.add(config_obj)
- if all_configs.difference(non_preferred):
- all_configs.difference_update(non_preferred)
-
- if config._current not in all_configs:
- _setup_config(all_configs.pop(), cls)
-
-
-def _setup_config(config_obj, ctx):
- config._current.push(config_obj)
diff --git a/venv/Lib/site-packages/alembic/testing/plugin/pytestplugin.py b/venv/Lib/site-packages/alembic/testing/plugin/pytestplugin.py
deleted file mode 100644
index 15c7e07..0000000
--- a/venv/Lib/site-packages/alembic/testing/plugin/pytestplugin.py
+++ /dev/null
@@ -1,210 +0,0 @@
-"""NOTE: copied/adapted from SQLAlchemy master for backwards compatibility;
- this should be removable when Alembic targets SQLAlchemy 1.0.0.
-"""
-
-try:
- # installed by bootstrap.py
- import alembic_plugin_base as plugin_base
-except ImportError:
- # assume we're a package, use traditional import
- from . import plugin_base
-
-import sys
-
-py3k = sys.version_info >= (3, 0)
-
-import pytest
-import argparse
-import inspect
-import collections
-import os
-
-try:
- import xdist # noqa
- has_xdist = True
-except ImportError:
- has_xdist = False
-
-
-def pytest_addoption(parser):
- group = parser.getgroup("sqlalchemy")
-
- def make_option(name, **kw):
- callback_ = kw.pop("callback", None)
- if callback_:
- class CallableAction(argparse.Action):
- def __call__(self, parser, namespace,
- values, option_string=None):
- callback_(option_string, values, parser)
- kw["action"] = CallableAction
-
- zeroarg_callback = kw.pop("zeroarg_callback", None)
- if zeroarg_callback:
- class CallableAction(argparse.Action):
- def __init__(self, option_strings,
- dest, default=False,
- required=False, help=None):
- super(CallableAction, self).__init__(
- option_strings=option_strings,
- dest=dest,
- nargs=0,
- const=True,
- default=default,
- required=required,
- help=help)
-
- def __call__(self, parser, namespace,
- values, option_string=None):
- zeroarg_callback(option_string, values, parser)
- kw["action"] = CallableAction
-
- group.addoption(name, **kw)
-
- plugin_base.setup_options(make_option)
- plugin_base.read_config()
-
-
-def pytest_configure(config):
- if hasattr(config, "slaveinput"):
- plugin_base.restore_important_follower_config(config.slaveinput)
- plugin_base.configure_follower(
- config.slaveinput["follower_ident"]
- )
- else:
- if config.option.write_idents and \
- os.path.exists(config.option.write_idents):
- os.remove(config.option.write_idents)
-
- plugin_base.pre_begin(config.option)
-
- plugin_base.set_coverage_flag(bool(getattr(config.option,
- "cov_source", False)))
-
-
-def pytest_sessionstart(session):
- plugin_base.post_begin()
-
-if has_xdist:
- import uuid
-
- def pytest_configure_node(node):
- # the master for each node fills slaveinput dictionary
- # which pytest-xdist will transfer to the subprocess
-
- plugin_base.memoize_important_follower_config(node.slaveinput)
-
- node.slaveinput["follower_ident"] = "test_%s" % uuid.uuid4().hex[0:12]
- from alembic.testing import provision
- provision.create_follower_db(node.slaveinput["follower_ident"])
-
- def pytest_testnodedown(node, error):
- from alembic.testing import provision
- provision.drop_follower_db(node.slaveinput["follower_ident"])
-
-
-def pytest_collection_modifyitems(session, config, items):
- # look for all those classes that specify __backend__ and
- # expand them out into per-database test cases.
-
- # this is much easier to do within pytest_pycollect_makeitem, however
- # pytest is iterating through cls.__dict__ as makeitem is
- # called which causes a "dictionary changed size" error on py3k.
- # I'd submit a pullreq for them to turn it into a list first, but
- # it's to suit the rather odd use case here which is that we are adding
- # new classes to a module on the fly.
-
- rebuilt_items = collections.defaultdict(list)
- items[:] = [
- item for item in
- items if isinstance(item.parent, pytest.Instance)]
- test_classes = set(item.parent for item in items)
- for test_class in test_classes:
- for sub_cls in plugin_base.generate_sub_tests(
- test_class.cls, test_class.parent.module):
- if sub_cls is not test_class.cls:
- list_ = rebuilt_items[test_class.cls]
-
- for inst in pytest.Class(
- sub_cls.__name__,
- parent=test_class.parent.parent).collect():
- list_.extend(inst.collect())
-
- newitems = []
- for item in items:
- if item.parent.cls in rebuilt_items:
- newitems.extend(rebuilt_items[item.parent.cls])
- rebuilt_items[item.parent.cls][:] = []
- else:
- newitems.append(item)
-
- # seems like the functions attached to a test class aren't sorted already?
- # is that true and why's that? (when using unittest, they're sorted)
- items[:] = sorted(newitems, key=lambda item: (
- item.parent.parent.parent.name,
- item.parent.parent.name,
- item.name
- ))
-
-
-def pytest_pycollect_makeitem(collector, name, obj):
- if inspect.isclass(obj) and plugin_base.want_class(obj):
- return pytest.Class(name, parent=collector)
- elif inspect.isfunction(obj) and \
- isinstance(collector, pytest.Instance) and \
- plugin_base.want_method(collector.cls, obj):
- return pytest.Function(name, parent=collector)
- else:
- return []
-
-_current_class = None
-
-
-def pytest_runtest_setup(item):
- # here we seem to get called only based on what we collected
- # in pytest_collection_modifyitems. So to do class-based stuff
- # we have to tear that out.
- global _current_class
-
- if not isinstance(item, pytest.Function):
- return
-
- # ... so we're doing a little dance here to figure it out...
- if _current_class is None:
- class_setup(item.parent.parent)
- _current_class = item.parent.parent
-
- # this is needed for the class-level, to ensure that the
- # teardown runs after the class is completed with its own
- # class-level teardown...
- def finalize():
- global _current_class
- class_teardown(item.parent.parent)
- _current_class = None
- item.parent.parent.addfinalizer(finalize)
-
- test_setup(item)
-
-
-def pytest_runtest_teardown(item):
- # ...but this works better as the hook here rather than
- # using a finalizer, as the finalizer seems to get in the way
- # of the test reporting failures correctly (you get a bunch of
- # py.test assertion stuff instead)
- test_teardown(item)
-
-
-def test_setup(item):
- plugin_base.before_test(item, item.parent.module.__name__,
- item.parent.cls, item.name)
-
-
-def test_teardown(item):
- plugin_base.after_test(item)
-
-
-def class_setup(item):
- plugin_base.start_test_class(item.cls)
-
-
-def class_teardown(item):
- plugin_base.stop_test_class(item.cls)
diff --git a/venv/Lib/site-packages/alembic/testing/provision.py b/venv/Lib/site-packages/alembic/testing/provision.py
deleted file mode 100644
index 546ef23..0000000
--- a/venv/Lib/site-packages/alembic/testing/provision.py
+++ /dev/null
@@ -1,340 +0,0 @@
-"""NOTE: copied/adapted from SQLAlchemy master for backwards compatibility;
- this should be removable when Alembic targets SQLAlchemy 1.0.0
-"""
-from sqlalchemy.engine import url as sa_url
-from sqlalchemy import create_engine
-from sqlalchemy import text
-from sqlalchemy import exc
-from ..util import compat
-from . import config, engines
-from .compat import get_url_backend_name
-import collections
-import os
-import time
-import logging
-
-log = logging.getLogger(__name__)
-
-FOLLOWER_IDENT = None
-
-
-class register(object):
- def __init__(self):
- self.fns = {}
-
- @classmethod
- def init(cls, fn):
- return register().for_db("*")(fn)
-
- def for_db(self, dbname):
- def decorate(fn):
- self.fns[dbname] = fn
- return self
- return decorate
-
- def __call__(self, cfg, *arg):
- if isinstance(cfg, compat.string_types):
- url = sa_url.make_url(cfg)
- elif isinstance(cfg, sa_url.URL):
- url = cfg
- else:
- url = cfg.db.url
- backend = get_url_backend_name(url)
- if backend in self.fns:
- return self.fns[backend](cfg, *arg)
- else:
- return self.fns['*'](cfg, *arg)
-
-
-def create_follower_db(follower_ident):
-
- for cfg in _configs_for_db_operation():
- _create_db(cfg, cfg.db, follower_ident)
-
-
-def configure_follower(follower_ident):
- for cfg in config.Config.all_configs():
- _configure_follower(cfg, follower_ident)
-
-
-def setup_config(db_url, options, file_config, follower_ident):
- if follower_ident:
- db_url = _follower_url_from_main(db_url, follower_ident)
- db_opts = {}
- _update_db_opts(db_url, db_opts)
- eng = engines.testing_engine(db_url, db_opts)
- _post_configure_engine(db_url, eng, follower_ident)
- eng.connect().close()
-
- cfg = config.Config.register(eng, db_opts, options, file_config)
- if follower_ident:
- _configure_follower(cfg, follower_ident)
- return cfg
-
-
-def drop_follower_db(follower_ident):
- for cfg in _configs_for_db_operation():
- _drop_db(cfg, cfg.db, follower_ident)
-
-
-def _configs_for_db_operation():
- hosts = set()
-
- for cfg in config.Config.all_configs():
- cfg.db.dispose()
-
- for cfg in config.Config.all_configs():
- url = cfg.db.url
- backend = get_url_backend_name(url)
- host_conf = (
- backend,
- url.username, url.host, url.database)
-
- if host_conf not in hosts:
- yield cfg
- hosts.add(host_conf)
-
- for cfg in config.Config.all_configs():
- cfg.db.dispose()
-
-
-@register.init
-def _create_db(cfg, eng, ident):
- raise NotImplementedError("no DB creation routine for cfg: %s" % eng.url)
-
-
-@register.init
-def _drop_db(cfg, eng, ident):
- raise NotImplementedError("no DB drop routine for cfg: %s" % eng.url)
-
-
-@register.init
-def _update_db_opts(db_url, db_opts):
- pass
-
-
-@register.init
-def _configure_follower(cfg, ident):
- pass
-
-
-@register.init
-def _post_configure_engine(url, engine, follower_ident):
- pass
-
-
-@register.init
-def _follower_url_from_main(url, ident):
- url = sa_url.make_url(url)
- url.database = ident
- return url
-
-
-@_update_db_opts.for_db("mssql")
-def _mssql_update_db_opts(db_url, db_opts):
- db_opts['legacy_schema_aliasing'] = False
-
-
-@_follower_url_from_main.for_db("sqlite")
-def _sqlite_follower_url_from_main(url, ident):
- url = sa_url.make_url(url)
- if not url.database or url.database == ':memory:':
- return url
- else:
- return sa_url.make_url("sqlite:///%s.db" % ident)
-
-
-@_post_configure_engine.for_db("sqlite")
-def _sqlite_post_configure_engine(url, engine, follower_ident):
- from sqlalchemy import event
-
- @event.listens_for(engine, "connect")
- def connect(dbapi_connection, connection_record):
- # use file DBs in all cases, memory acts kind of strangely
- # as an attached
- if not follower_ident:
- dbapi_connection.execute(
- 'ATTACH DATABASE "test_schema.db" AS test_schema')
- else:
- dbapi_connection.execute(
- 'ATTACH DATABASE "%s_test_schema.db" AS test_schema'
- % follower_ident)
-
-
-@_create_db.for_db("postgresql")
-def _pg_create_db(cfg, eng, ident):
- template_db = cfg.options.postgresql_templatedb
-
- with eng.connect().execution_options(
- isolation_level="AUTOCOMMIT") as conn:
- try:
- _pg_drop_db(cfg, conn, ident)
- except Exception:
- pass
- if not template_db:
- template_db = conn.scalar("select current_database()")
- for attempt in range(3):
- try:
- conn.execute(
- "CREATE DATABASE %s TEMPLATE %s" % (ident, template_db))
- except exc.OperationalError as err:
- if "accessed by other users" in str(err):
- log.info(
- "Waiting to create %s, URI %r, "
- "template DB %s is in use sleeping for .5",
- ident, eng.url, template_db)
- time.sleep(.5)
- else:
- break
- else:
- raise err
-
-
-@_create_db.for_db("mysql")
-def _mysql_create_db(cfg, eng, ident):
- with eng.connect() as conn:
- try:
- _mysql_drop_db(cfg, conn, ident)
- except Exception:
- pass
- conn.execute("CREATE DATABASE %s" % ident)
- conn.execute("CREATE DATABASE %s_test_schema" % ident)
- conn.execute("CREATE DATABASE %s_test_schema_2" % ident)
-
-
-@_configure_follower.for_db("mysql")
-def _mysql_configure_follower(config, ident):
- config.test_schema = "%s_test_schema" % ident
- config.test_schema_2 = "%s_test_schema_2" % ident
-
-
-@_create_db.for_db("sqlite")
-def _sqlite_create_db(cfg, eng, ident):
- pass
-
-
-@_drop_db.for_db("postgresql")
-def _pg_drop_db(cfg, eng, ident):
- with eng.connect().execution_options(
- isolation_level="AUTOCOMMIT") as conn:
- conn.execute(
- text(
- "select pg_terminate_backend(pid) from pg_stat_activity "
- "where usename=current_user and pid != pg_backend_pid() "
- "and datname=:dname"
- ), dname=ident)
- conn.execute("DROP DATABASE %s" % ident)
-
-
-@_drop_db.for_db("sqlite")
-def _sqlite_drop_db(cfg, eng, ident):
- if ident:
- os.remove("%s_test_schema.db" % ident)
- else:
- os.remove("%s.db" % ident)
-
-
-@_drop_db.for_db("mysql")
-def _mysql_drop_db(cfg, eng, ident):
- with eng.connect() as conn:
- conn.execute("DROP DATABASE %s_test_schema" % ident)
- conn.execute("DROP DATABASE %s_test_schema_2" % ident)
- conn.execute("DROP DATABASE %s" % ident)
-
-
-@_create_db.for_db("oracle")
-def _oracle_create_db(cfg, eng, ident):
- # NOTE: make sure you've run "ALTER DATABASE default tablespace users" or
- # similar, so that the default tablespace is not "system"; reflection will
- # fail otherwise
- with eng.connect() as conn:
- conn.execute("create user %s identified by xe" % ident)
- conn.execute("create user %s_ts1 identified by xe" % ident)
- conn.execute("create user %s_ts2 identified by xe" % ident)
- conn.execute("grant dba to %s" % (ident, ))
- conn.execute("grant unlimited tablespace to %s" % ident)
- conn.execute("grant unlimited tablespace to %s_ts1" % ident)
- conn.execute("grant unlimited tablespace to %s_ts2" % ident)
-
-@_configure_follower.for_db("oracle")
-def _oracle_configure_follower(config, ident):
- config.test_schema = "%s_ts1" % ident
- config.test_schema_2 = "%s_ts2" % ident
-
-
-def _ora_drop_ignore(conn, dbname):
- try:
- conn.execute("drop user %s cascade" % dbname)
- log.info("Reaped db: %s" % dbname)
- return True
- except exc.DatabaseError as err:
- log.warn("couldn't drop db: %s" % err)
- return False
-
-
-@_drop_db.for_db("oracle")
-def _oracle_drop_db(cfg, eng, ident):
- with eng.connect() as conn:
- # cx_Oracle seems to occasionally leak open connections when a large
- # suite it run, even if we confirm we have zero references to
- # connection objects.
- # while there is a "kill session" command in Oracle,
- # it unfortunately does not release the connection sufficiently.
- _ora_drop_ignore(conn, ident)
- _ora_drop_ignore(conn, "%s_ts1" % ident)
- _ora_drop_ignore(conn, "%s_ts2" % ident)
-
-
-def reap_oracle_dbs(idents_file):
- log.info("Reaping Oracle dbs...")
-
- urls = collections.defaultdict(list)
- with open(idents_file) as file_:
- for line in file_:
- line = line.strip()
- db_name, db_url = line.split(" ")
- urls[db_url].append(db_name)
-
- for url in urls:
- if not url.startswith("oracle"):
- continue
- idents = urls[url]
- log.info("db reaper connecting to %r", url)
- eng = create_engine(url)
- with eng.connect() as conn:
-
- log.info("identifiers in file: %s", ", ".join(idents))
-
- to_reap = conn.execute(
- "select u.username from all_users u where username "
- "like 'TEST_%' and not exists (select username "
- "from v$session where username=u.username)")
- all_names = set(username.lower() for (username, ) in to_reap)
- to_drop = set()
- for name in all_names:
- if name.endswith("_ts1") or name.endswith("_ts2"):
- continue
- elif name in idents:
- to_drop.add(name)
- if "%s_ts1" % name in all_names:
- to_drop.add("%s_ts1" % name)
- if "%s_ts2" % name in all_names:
- to_drop.add("%s_ts2" % name)
-
- dropped = total = 0
- for total, username in enumerate(to_drop, 1):
- if _ora_drop_ignore(conn, username):
- dropped += 1
- log.info(
- "Dropped %d out of %d stale databases detected",
- dropped, total)
-
-
-@_follower_url_from_main.for_db("oracle")
-def _oracle_follower_url_from_main(url, ident):
- url = sa_url.make_url(url)
- url.username = ident
- url.password = 'xe'
- return url
-
-
diff --git a/venv/Lib/site-packages/alembic/testing/requirements.py b/venv/Lib/site-packages/alembic/testing/requirements.py
deleted file mode 100644
index 32645ed..0000000
--- a/venv/Lib/site-packages/alembic/testing/requirements.py
+++ /dev/null
@@ -1,195 +0,0 @@
-from alembic import util
-
-from . import exclusions
-
-if util.sqla_094:
- from sqlalchemy.testing.requirements import Requirements
-else:
- class Requirements(object):
- pass
-
-
-class SuiteRequirements(Requirements):
- @property
- def schemas(self):
- """Target database must support external schemas, and have one
- named 'test_schema'."""
-
- return exclusions.open()
-
- @property
- def unique_constraint_reflection(self):
- def doesnt_have_check_uq_constraints(config):
- if not util.sqla_084:
- return True
- from sqlalchemy import inspect
-
- # temporary
- if config.db.name == "oracle":
- return True
-
- insp = inspect(config.db)
- try:
- insp.get_unique_constraints('x')
- except NotImplementedError:
- return True
- except TypeError:
- return True
- except Exception:
- pass
- return False
-
- return exclusions.skip_if(
- lambda config: not util.sqla_084,
- "SQLAlchemy 0.8.4 or greater required"
- ) + exclusions.skip_if(doesnt_have_check_uq_constraints)
-
- @property
- def foreign_key_match(self):
- return exclusions.fails_if(
- lambda config: not util.sqla_08,
- "MATCH for foreign keys added in SQLAlchemy 0.8.0"
- )
-
- @property
- def check_constraints_w_enforcement(self):
- """Target database must support check constraints
- and also enforce them."""
-
- return exclusions.open()
-
- @property
- def reflects_pk_names(self):
- return exclusions.closed()
-
- @property
- def reflects_fk_options(self):
- return exclusions.closed()
-
- @property
- def fail_before_sqla_079(self):
- return exclusions.fails_if(
- lambda config: not util.sqla_079,
- "SQLAlchemy 0.7.9 or greater required"
- )
-
- @property
- def fail_before_sqla_080(self):
- return exclusions.fails_if(
- lambda config: not util.sqla_08,
- "SQLAlchemy 0.8.0 or greater required"
- )
-
- @property
- def fail_before_sqla_083(self):
- return exclusions.fails_if(
- lambda config: not util.sqla_083,
- "SQLAlchemy 0.8.3 or greater required"
- )
-
- @property
- def fail_before_sqla_084(self):
- return exclusions.fails_if(
- lambda config: not util.sqla_084,
- "SQLAlchemy 0.8.4 or greater required"
- )
-
- @property
- def fail_before_sqla_09(self):
- return exclusions.fails_if(
- lambda config: not util.sqla_09,
- "SQLAlchemy 0.9.0 or greater required"
- )
-
- @property
- def fail_before_sqla_100(self):
- return exclusions.fails_if(
- lambda config: not util.sqla_100,
- "SQLAlchemy 1.0.0 or greater required"
- )
-
- @property
- def fail_before_sqla_1010(self):
- return exclusions.fails_if(
- lambda config: not util.sqla_1010,
- "SQLAlchemy 1.0.10 or greater required"
- )
-
- @property
- def fail_before_sqla_099(self):
- return exclusions.fails_if(
- lambda config: not util.sqla_099,
- "SQLAlchemy 0.9.9 or greater required"
- )
-
- @property
- def fail_before_sqla_110(self):
- return exclusions.fails_if(
- lambda config: not util.sqla_110,
- "SQLAlchemy 1.1.0 or greater required"
- )
-
- @property
- def sqlalchemy_08(self):
-
- return exclusions.skip_if(
- lambda config: not util.sqla_08,
- "SQLAlchemy 0.8.0b2 or greater required"
- )
-
- @property
- def sqlalchemy_09(self):
- return exclusions.skip_if(
- lambda config: not util.sqla_09,
- "SQLAlchemy 0.9.0 or greater required"
- )
-
- @property
- def sqlalchemy_092(self):
- return exclusions.skip_if(
- lambda config: not util.sqla_092,
- "SQLAlchemy 0.9.2 or greater required"
- )
-
- @property
- def sqlalchemy_094(self):
- return exclusions.skip_if(
- lambda config: not util.sqla_094,
- "SQLAlchemy 0.9.4 or greater required"
- )
-
- @property
- def sqlalchemy_100(self):
- return exclusions.skip_if(
- lambda config: not util.sqla_100,
- "SQLAlchemy 1.0.0 or greater required"
- )
-
- @property
- def sqlalchemy_1014(self):
- return exclusions.skip_if(
- lambda config: not util.sqla_1014,
- "SQLAlchemy 1.0.14 or greater required"
- )
-
- @property
- def sqlalchemy_1115(self):
- return exclusions.skip_if(
- lambda config: not util.sqla_1115,
- "SQLAlchemy 1.1.15 or greater required"
- )
-
- @property
- def sqlalchemy_110(self):
- return exclusions.skip_if(
- lambda config: not util.sqla_110,
- "SQLAlchemy 1.1.0 or greater required"
- )
-
- @property
- def pep3147(self):
-
- return exclusions.only_if(
- lambda config: util.compat.has_pep3147()
- )
-
diff --git a/venv/Lib/site-packages/alembic/testing/runner.py b/venv/Lib/site-packages/alembic/testing/runner.py
deleted file mode 100644
index d4adbcf..0000000
--- a/venv/Lib/site-packages/alembic/testing/runner.py
+++ /dev/null
@@ -1,48 +0,0 @@
-#!/usr/bin/env python
-# testing/runner.py
-# Copyright (C) 2005-2017 the SQLAlchemy authors and contributors
-#
-#
-# This module is part of SQLAlchemy and is released under
-# the MIT License: http://www.opensource.org/licenses/mit-license.php
-"""
-Nose test runner module.
-
-This script is a front-end to "nosetests" which
-installs SQLAlchemy's testing plugin into the local environment.
-
-The script is intended to be used by third-party dialects and extensions
-that run within SQLAlchemy's testing framework. The runner can
-be invoked via::
-
- python -m alembic.testing.runner
-
-The script is then essentially the same as the "nosetests" script, including
-all of the usual Nose options. The test environment requires that a
-setup.cfg is locally present including various required options.
-
-Note that when using this runner, Nose's "coverage" plugin will not be
-able to provide coverage for SQLAlchemy itself, since SQLAlchemy is
-imported into sys.modules before coverage is started. The special
-script sqla_nose.py is provided as a top-level script which loads the
-plugin in a special (somewhat hacky) way so that coverage against
-SQLAlchemy itself is possible.
-
-"""
-from .plugin.noseplugin import NoseSQLAlchemy
-import nose
-
-
-def main():
- nose.main(addplugins=[NoseSQLAlchemy()])
-
-
-def setup_py_test():
- """Runner to use for the 'test_suite' entry of your setup.py.
-
- Prevents any name clash shenanigans from the command line
- argument "test" that the "setup.py test" command sends
- to nose.
-
- """
- nose.main(addplugins=[NoseSQLAlchemy()], argv=['runner'])
diff --git a/venv/Lib/site-packages/alembic/testing/util.py b/venv/Lib/site-packages/alembic/testing/util.py
deleted file mode 100644
index 466dea3..0000000
--- a/venv/Lib/site-packages/alembic/testing/util.py
+++ /dev/null
@@ -1,19 +0,0 @@
-from sqlalchemy.util import decorator
-
-
-@decorator
-def provide_metadata(fn, *args, **kw):
- """Provide bound MetaData for a single test, dropping afterwards."""
-
- from . import config
- from sqlalchemy import schema
-
- metadata = schema.MetaData(config.db)
- self = args[0]
- prev_meta = getattr(self, 'metadata', None)
- self.metadata = metadata
- try:
- return fn(*args, **kw)
- finally:
- metadata.drop_all()
- self.metadata = prev_meta
diff --git a/venv/Lib/site-packages/alembic/testing/warnings.py b/venv/Lib/site-packages/alembic/testing/warnings.py
deleted file mode 100644
index 397938f..0000000
--- a/venv/Lib/site-packages/alembic/testing/warnings.py
+++ /dev/null
@@ -1,43 +0,0 @@
-# testing/warnings.py
-# Copyright (C) 2005-2017 the SQLAlchemy authors and contributors
-#
-#
-# This module is part of SQLAlchemy and is released under
-# the MIT License: http://www.opensource.org/licenses/mit-license.php
-"""NOTE: copied/adapted from SQLAlchemy master for backwards compatibility;
- this should be removable when Alembic targets SQLAlchemy 0.9.4.
-"""
-
-from __future__ import absolute_import
-
-import warnings
-from sqlalchemy import exc as sa_exc
-import re
-
-
-def setup_filters():
- """Set global warning behavior for the test suite."""
-
- warnings.filterwarnings('ignore',
- category=sa_exc.SAPendingDeprecationWarning)
- warnings.filterwarnings('error', category=sa_exc.SADeprecationWarning)
- warnings.filterwarnings('error', category=sa_exc.SAWarning)
-
-
-def assert_warnings(fn, warning_msgs, regex=False):
- """Assert that each of the given warnings are emitted by fn."""
-
- from .assertions import eq_
-
- with warnings.catch_warnings(record=True) as log:
- # ensure that nothing is going into __warningregistry__
- warnings.filterwarnings("always")
-
- result = fn()
- for warning in log:
- popwarn = warning_msgs.pop(0)
- if regex:
- assert re.match(popwarn, str(warning.message))
- else:
- eq_(popwarn, str(warning.message))
- return result
diff --git a/venv/Lib/site-packages/alembic/util/__init__.py b/venv/Lib/site-packages/alembic/util/__init__.py
deleted file mode 100644
index 8b5d8c7..0000000
--- a/venv/Lib/site-packages/alembic/util/__init__.py
+++ /dev/null
@@ -1,18 +0,0 @@
-from .langhelpers import ( # noqa
- asbool, rev_id, to_tuple, to_list, memoized_property, dedupe_tuple,
- immutabledict, _with_legacy_names, Dispatcher, ModuleClsProxy)
-from .messaging import ( # noqa
- write_outstream, status, err, obfuscate_url_pw, warn, msg, format_as_comma)
-from .pyfiles import ( # noqa
- template_to_file, coerce_resource_to_filename,
- pyc_file_from_path, load_python_file, edit)
-from .sqla_compat import ( # noqa
- sqla_07, sqla_079, sqla_08, sqla_083, sqla_084, sqla_09, sqla_092,
- sqla_094, sqla_099, sqla_100, sqla_105, sqla_110, sqla_1010, sqla_1014,
- sqla_1115)
-from .exc import CommandError
-
-
-if not sqla_07:
- raise CommandError(
- "SQLAlchemy 0.7.3 or greater is required. ")
diff --git a/venv/Lib/site-packages/alembic/util/compat.py b/venv/Lib/site-packages/alembic/util/compat.py
deleted file mode 100644
index a754f2a..0000000
--- a/venv/Lib/site-packages/alembic/util/compat.py
+++ /dev/null
@@ -1,252 +0,0 @@
-import io
-import sys
-
-if sys.version_info < (2, 6):
- raise NotImplementedError("Python 2.6 or greater is required.")
-
-py27 = sys.version_info >= (2, 7)
-py2k = sys.version_info < (3, 0)
-py3k = sys.version_info >= (3, 0)
-py33 = sys.version_info >= (3, 3)
-py35 = sys.version_info >= (3, 5)
-py36 = sys.version_info >= (3, 6)
-
-if py3k:
- from io import StringIO
-else:
- # accepts strings
- from StringIO import StringIO # noqa
-
-if py3k:
- import builtins as compat_builtins
- string_types = str,
- binary_type = bytes
- text_type = str
-
- def callable(fn):
- return hasattr(fn, '__call__')
-
- def u(s):
- return s
-
- def ue(s):
- return s
-
- range = range
-else:
- import __builtin__ as compat_builtins
- string_types = basestring,
- binary_type = str
- text_type = unicode
- callable = callable
-
- def u(s):
- return unicode(s, "utf-8")
-
- def ue(s):
- return unicode(s, "unicode_escape")
-
- range = xrange
-
-if py3k:
- import collections
- ArgSpec = collections.namedtuple(
- "ArgSpec",
- ["args", "varargs", "keywords", "defaults"])
-
- from inspect import getfullargspec as inspect_getfullargspec
-
- def inspect_getargspec(func):
- return ArgSpec(
- *inspect_getfullargspec(func)[0:4]
- )
-else:
- from inspect import getargspec as inspect_getargspec # noqa
-
-if py3k:
- from configparser import ConfigParser as SafeConfigParser
- import configparser
-else:
- from ConfigParser import SafeConfigParser # noqa
- import ConfigParser as configparser # noqa
-
-if py2k:
- from mako.util import parse_encoding
-
-if py35:
- import importlib.util
- import importlib.machinery
-
- def load_module_py(module_id, path):
- spec = importlib.util.spec_from_file_location(module_id, path)
- module = importlib.util.module_from_spec(spec)
- spec.loader.exec_module(module)
- return module
-
- def load_module_pyc(module_id, path):
- spec = importlib.util.spec_from_file_location(module_id, path)
- module = importlib.util.module_from_spec(spec)
- spec.loader.exec_module(module)
- return module
-
-elif py33:
- import importlib.machinery
-
- def load_module_py(module_id, path):
- module = importlib.machinery.SourceFileLoader(
- module_id, path).load_module(module_id)
- del sys.modules[module_id]
- return module
-
- def load_module_pyc(module_id, path):
- module = importlib.machinery.SourcelessFileLoader(
- module_id, path).load_module(module_id)
- del sys.modules[module_id]
- return module
-
-if py33:
- def get_bytecode_suffixes():
- try:
- return importlib.machinery.BYTECODE_SUFFIXES
- except AttributeError:
- return importlib.machinery.DEBUG_BYTECODE_SUFFIXES
-
- def get_current_bytecode_suffixes():
- if py35:
- suffixes = importlib.machinery.BYTECODE_SUFFIXES
- elif py33:
- if sys.flags.optimize:
- suffixes = importlib.machinery.OPTIMIZED_BYTECODE_SUFFIXES
- else:
- suffixes = importlib.machinery.BYTECODE_SUFFIXES
- else:
- if sys.flags.optimize:
- suffixes = [".pyo"]
- else:
- suffixes = [".pyc"]
-
- return suffixes
-
- def has_pep3147():
- # http://www.python.org/dev/peps/pep-3147/#detecting-pep-3147-availability
-
- import imp
- return hasattr(imp, 'get_tag')
-
-else:
- import imp
-
- def load_module_py(module_id, path): # noqa
- with open(path, 'rb') as fp:
- mod = imp.load_source(module_id, path, fp)
- if py2k:
- source_encoding = parse_encoding(fp)
- if source_encoding:
- mod._alembic_source_encoding = source_encoding
- del sys.modules[module_id]
- return mod
-
- def load_module_pyc(module_id, path): # noqa
- with open(path, 'rb') as fp:
- mod = imp.load_compiled(module_id, path, fp)
- # no source encoding here
- del sys.modules[module_id]
- return mod
-
- def get_current_bytecode_suffixes():
- if sys.flags.optimize:
- return [".pyo"] # e.g. .pyo
- else:
- return [".pyc"] # e.g. .pyc
-
- def has_pep3147():
- return False
-
-try:
- exec_ = getattr(compat_builtins, 'exec')
-except AttributeError:
- # Python 2
- def exec_(func_text, globals_, lcl):
- exec('exec func_text in globals_, lcl')
-
-################################################
-# cross-compatible metaclass implementation
-# Copyright (c) 2010-2012 Benjamin Peterson
-
-
-def with_metaclass(meta, base=object):
- """Create a base class with a metaclass."""
- return meta("%sBase" % meta.__name__, (base,), {})
-################################################
-
-if py3k:
- def reraise(tp, value, tb=None, cause=None):
- if cause is not None:
- value.__cause__ = cause
- if value.__traceback__ is not tb:
- raise value.with_traceback(tb)
- raise value
-
- def raise_from_cause(exception, exc_info=None):
- if exc_info is None:
- exc_info = sys.exc_info()
- exc_type, exc_value, exc_tb = exc_info
- reraise(type(exception), exception, tb=exc_tb, cause=exc_value)
-else:
- exec("def reraise(tp, value, tb=None, cause=None):\n"
- " raise tp, value, tb\n")
-
- def raise_from_cause(exception, exc_info=None):
- # not as nice as that of Py3K, but at least preserves
- # the code line where the issue occurred
- if exc_info is None:
- exc_info = sys.exc_info()
- exc_type, exc_value, exc_tb = exc_info
- reraise(type(exception), exception, tb=exc_tb)
-
-# produce a wrapper that allows encoded text to stream
-# into a given buffer, but doesn't close it.
-# not sure of a more idiomatic approach to this.
-class EncodedIO(io.TextIOWrapper):
-
- def close(self):
- pass
-
-if py2k:
- # in Py2K, the io.* package is awkward because it does not
- # easily wrap the file type (e.g. sys.stdout) and I can't
- # figure out at all how to wrap StringIO.StringIO (used by nosetests)
- # and also might be user specified too. So create a full
- # adapter.
-
- class ActLikePy3kIO(object):
-
- """Produce an object capable of wrapping either
- sys.stdout (e.g. file) *or* StringIO.StringIO().
-
- """
-
- def _false(self):
- return False
-
- def _true(self):
- return True
-
- readable = seekable = _false
- writable = _true
- closed = False
-
- def __init__(self, file_):
- self.file_ = file_
-
- def write(self, text):
- return self.file_.write(text)
-
- def flush(self):
- return self.file_.flush()
-
- class EncodedIO(EncodedIO):
-
- def __init__(self, file_, encoding):
- super(EncodedIO, self).__init__(
- ActLikePy3kIO(file_), encoding=encoding)
diff --git a/venv/Lib/site-packages/alembic/util/exc.py b/venv/Lib/site-packages/alembic/util/exc.py
deleted file mode 100644
index f7ad021..0000000
--- a/venv/Lib/site-packages/alembic/util/exc.py
+++ /dev/null
@@ -1,2 +0,0 @@
-class CommandError(Exception):
- pass
diff --git a/venv/Lib/site-packages/alembic/util/langhelpers.py b/venv/Lib/site-packages/alembic/util/langhelpers.py
deleted file mode 100644
index aa016f0..0000000
--- a/venv/Lib/site-packages/alembic/util/langhelpers.py
+++ /dev/null
@@ -1,330 +0,0 @@
-import textwrap
-import warnings
-import inspect
-import uuid
-import collections
-
-from .compat import callable, exec_, string_types, with_metaclass
-
-from .compat import inspect_getargspec
-
-
-class _ModuleClsMeta(type):
- def __setattr__(cls, key, value):
- super(_ModuleClsMeta, cls).__setattr__(key, value)
- cls._update_module_proxies(key)
-
-
-class ModuleClsProxy(with_metaclass(_ModuleClsMeta)):
- """Create module level proxy functions for the
- methods on a given class.
-
- The functions will have a compatible signature
- as the methods.
-
- """
-
- _setups = collections.defaultdict(lambda: (set(), []))
-
- @classmethod
- def _update_module_proxies(cls, name):
- attr_names, modules = cls._setups[cls]
- for globals_, locals_ in modules:
- cls._add_proxied_attribute(name, globals_, locals_, attr_names)
-
- def _install_proxy(self):
- attr_names, modules = self._setups[self.__class__]
- for globals_, locals_ in modules:
- globals_['_proxy'] = self
- for attr_name in attr_names:
- globals_[attr_name] = getattr(self, attr_name)
-
- def _remove_proxy(self):
- attr_names, modules = self._setups[self.__class__]
- for globals_, locals_ in modules:
- globals_['_proxy'] = None
- for attr_name in attr_names:
- del globals_[attr_name]
-
- @classmethod
- def create_module_class_proxy(cls, globals_, locals_):
- attr_names, modules = cls._setups[cls]
- modules.append(
- (globals_, locals_)
- )
- cls._setup_proxy(globals_, locals_, attr_names)
-
- @classmethod
- def _setup_proxy(cls, globals_, locals_, attr_names):
- for methname in dir(cls):
- cls._add_proxied_attribute(methname, globals_, locals_, attr_names)
-
- @classmethod
- def _add_proxied_attribute(cls, methname, globals_, locals_, attr_names):
- if not methname.startswith('_'):
- meth = getattr(cls, methname)
- if callable(meth):
- locals_[methname] = cls._create_method_proxy(
- methname, globals_, locals_)
- else:
- attr_names.add(methname)
-
- @classmethod
- def _create_method_proxy(cls, name, globals_, locals_):
- fn = getattr(cls, name)
- spec = inspect_getargspec(fn)
- if spec[0] and spec[0][0] == 'self':
- spec[0].pop(0)
- args = inspect.formatargspec(*spec)
- num_defaults = 0
- if spec[3]:
- num_defaults += len(spec[3])
- name_args = spec[0]
- if num_defaults:
- defaulted_vals = name_args[0 - num_defaults:]
- else:
- defaulted_vals = ()
-
- apply_kw = inspect.formatargspec(
- name_args, spec[1], spec[2],
- defaulted_vals,
- formatvalue=lambda x: '=' + x)
-
- def _name_error(name):
- raise NameError(
- "Can't invoke function '%s', as the proxy object has "
- "not yet been "
- "established for the Alembic '%s' class. "
- "Try placing this code inside a callable." % (
- name, cls.__name__
- ))
- globals_['_name_error'] = _name_error
-
- translations = getattr(fn, "_legacy_translations", [])
- if translations:
- outer_args = inner_args = "*args, **kw"
- translate_str = "args, kw = _translate(%r, %r, %r, args, kw)" % (
- fn.__name__,
- tuple(spec),
- translations
- )
-
- def translate(fn_name, spec, translations, args, kw):
- return_kw = {}
- return_args = []
-
- for oldname, newname in translations:
- if oldname in kw:
- warnings.warn(
- "Argument %r is now named %r "
- "for method %s()." % (
- oldname, newname, fn_name
- ))
- return_kw[newname] = kw.pop(oldname)
- return_kw.update(kw)
-
- args = list(args)
- if spec[3]:
- pos_only = spec[0][:-len(spec[3])]
- else:
- pos_only = spec[0]
- for arg in pos_only:
- if arg not in return_kw:
- try:
- return_args.append(args.pop(0))
- except IndexError:
- raise TypeError(
- "missing required positional argument: %s"
- % arg)
- return_args.extend(args)
-
- return return_args, return_kw
- globals_['_translate'] = translate
- else:
- outer_args = args[1:-1]
- inner_args = apply_kw[1:-1]
- translate_str = ""
-
- func_text = textwrap.dedent("""\
- def %(name)s(%(args)s):
- %(doc)r
- %(translate)s
- try:
- p = _proxy
- except NameError:
- _name_error('%(name)s')
- return _proxy.%(name)s(%(apply_kw)s)
- e
- """ % {
- 'name': name,
- 'translate': translate_str,
- 'args': outer_args,
- 'apply_kw': inner_args,
- 'doc': fn.__doc__,
- })
- lcl = {}
- exec_(func_text, globals_, lcl)
- return lcl[name]
-
-
-def _with_legacy_names(translations):
- def decorate(fn):
- fn._legacy_translations = translations
- return fn
-
- return decorate
-
-
-def asbool(value):
- return value is not None and \
- value.lower() == 'true'
-
-
-def rev_id():
- return uuid.uuid4().hex[-12:]
-
-
-def to_list(x, default=None):
- if x is None:
- return default
- elif isinstance(x, string_types):
- return [x]
- elif isinstance(x, collections.Iterable):
- return list(x)
- else:
- return [x]
-
-
-def to_tuple(x, default=None):
- if x is None:
- return default
- elif isinstance(x, string_types):
- return (x, )
- elif isinstance(x, collections.Iterable):
- return tuple(x)
- else:
- return (x, )
-
-
-def unique_list(seq, hashfunc=None):
- seen = set()
- seen_add = seen.add
- if not hashfunc:
- return [x for x in seq
- if x not in seen
- and not seen_add(x)]
- else:
- return [x for x in seq
- if hashfunc(x) not in seen
- and not seen_add(hashfunc(x))]
-
-
-def dedupe_tuple(tup):
- return tuple(unique_list(tup))
-
-
-
-class memoized_property(object):
-
- """A read-only @property that is only evaluated once."""
-
- def __init__(self, fget, doc=None):
- self.fget = fget
- self.__doc__ = doc or fget.__doc__
- self.__name__ = fget.__name__
-
- def __get__(self, obj, cls):
- if obj is None:
- return self
- obj.__dict__[self.__name__] = result = self.fget(obj)
- return result
-
-
-class immutabledict(dict):
-
- def _immutable(self, *arg, **kw):
- raise TypeError("%s object is immutable" % self.__class__.__name__)
-
- __delitem__ = __setitem__ = __setattr__ = \
- clear = pop = popitem = setdefault = \
- update = _immutable
-
- def __new__(cls, *args):
- new = dict.__new__(cls)
- dict.__init__(new, *args)
- return new
-
- def __init__(self, *args):
- pass
-
- def __reduce__(self):
- return immutabledict, (dict(self), )
-
- def union(self, d):
- if not self:
- return immutabledict(d)
- else:
- d2 = immutabledict(self)
- dict.update(d2, d)
- return d2
-
- def __repr__(self):
- return "immutabledict(%s)" % dict.__repr__(self)
-
-
-class Dispatcher(object):
- def __init__(self, uselist=False):
- self._registry = {}
- self.uselist = uselist
-
- def dispatch_for(self, target, qualifier='default'):
- def decorate(fn):
- if self.uselist:
- self._registry.setdefault((target, qualifier), []).append(fn)
- else:
- assert (target, qualifier) not in self._registry
- self._registry[(target, qualifier)] = fn
- return fn
- return decorate
-
- def dispatch(self, obj, qualifier='default'):
-
- if isinstance(obj, string_types):
- targets = [obj]
- elif isinstance(obj, type):
- targets = obj.__mro__
- else:
- targets = type(obj).__mro__
-
- for spcls in targets:
- if qualifier != 'default' and (spcls, qualifier) in self._registry:
- return self._fn_or_list(
- self._registry[(spcls, qualifier)])
- elif (spcls, 'default') in self._registry:
- return self._fn_or_list(
- self._registry[(spcls, 'default')])
- else:
- raise ValueError("no dispatch function for object: %s" % obj)
-
- def _fn_or_list(self, fn_or_list):
- if self.uselist:
- def go(*arg, **kw):
- for fn in fn_or_list:
- fn(*arg, **kw)
- return go
- else:
- return fn_or_list
-
- def branch(self):
- """Return a copy of this dispatcher that is independently
- writable."""
-
- d = Dispatcher()
- if self.uselist:
- d._registry.update(
- (k, [fn for fn in self._registry[k]])
- for k in self._registry
- )
- else:
- d._registry.update(self._registry)
- return d
diff --git a/venv/Lib/site-packages/alembic/util/messaging.py b/venv/Lib/site-packages/alembic/util/messaging.py
deleted file mode 100644
index c202e96..0000000
--- a/venv/Lib/site-packages/alembic/util/messaging.py
+++ /dev/null
@@ -1,94 +0,0 @@
-from .compat import py27, binary_type, string_types
-import sys
-from sqlalchemy.engine import url
-import warnings
-import textwrap
-import collections
-import logging
-
-log = logging.getLogger(__name__)
-
-if py27:
- # disable "no handler found" errors
- logging.getLogger('alembic').addHandler(logging.NullHandler())
-
-
-try:
- import fcntl
- import termios
- import struct
- ioctl = fcntl.ioctl(0, termios.TIOCGWINSZ,
- struct.pack('HHHH', 0, 0, 0, 0))
- _h, TERMWIDTH, _hp, _wp = struct.unpack('HHHH', ioctl)
- if TERMWIDTH <= 0: # can occur if running in emacs pseudo-tty
- TERMWIDTH = None
-except (ImportError, IOError):
- TERMWIDTH = None
-
-
-def write_outstream(stream, *text):
- encoding = getattr(stream, 'encoding', 'ascii') or 'ascii'
- for t in text:
- if not isinstance(t, binary_type):
- t = t.encode(encoding, 'replace')
- t = t.decode(encoding)
- try:
- stream.write(t)
- except IOError:
- # suppress "broken pipe" errors.
- # no known way to handle this on Python 3 however
- # as the exception is "ignored" (noisily) in TextIOWrapper.
- break
-
-
-def status(_statmsg, fn, *arg, **kw):
- msg(_statmsg + " ...", False)
- try:
- ret = fn(*arg, **kw)
- write_outstream(sys.stdout, " done\n")
- return ret
- except:
- write_outstream(sys.stdout, " FAILED\n")
- raise
-
-
-def err(message):
- log.error(message)
- msg("FAILED: %s" % message)
- sys.exit(-1)
-
-
-def obfuscate_url_pw(u):
- u = url.make_url(u)
- if u.password:
- u.password = 'XXXXX'
- return str(u)
-
-
-def warn(msg):
- warnings.warn(msg)
-
-
-def msg(msg, newline=True):
- if TERMWIDTH is None:
- write_outstream(sys.stdout, msg)
- if newline:
- write_outstream(sys.stdout, "\n")
- else:
- # left indent output lines
- lines = textwrap.wrap(msg, TERMWIDTH)
- if len(lines) > 1:
- for line in lines[0:-1]:
- write_outstream(sys.stdout, " ", line, "\n")
- write_outstream(sys.stdout, " ", lines[-1], ("\n" if newline else ""))
-
-
-def format_as_comma(value):
- if value is None:
- return ""
- elif isinstance(value, string_types):
- return value
- elif isinstance(value, collections.Iterable):
- return ", ".join(value)
- else:
- raise ValueError("Don't know how to comma-format %r" % value)
diff --git a/venv/Lib/site-packages/alembic/util/pyfiles.py b/venv/Lib/site-packages/alembic/util/pyfiles.py
deleted file mode 100644
index 0e52133..0000000
--- a/venv/Lib/site-packages/alembic/util/pyfiles.py
+++ /dev/null
@@ -1,90 +0,0 @@
-import sys
-import os
-import re
-from .compat import load_module_py, load_module_pyc, \
- get_current_bytecode_suffixes, has_pep3147
-from mako.template import Template
-from mako import exceptions
-import tempfile
-from .exc import CommandError
-
-
-def template_to_file(template_file, dest, output_encoding, **kw):
- template = Template(filename=template_file)
- try:
- output = template.render_unicode(**kw).encode(output_encoding)
- except:
- with tempfile.NamedTemporaryFile(suffix='.txt', delete=False) as ntf:
- ntf.write(
- exceptions.text_error_template().
- render_unicode().encode(output_encoding))
- fname = ntf.name
- raise CommandError(
- "Template rendering failed; see %s for a "
- "template-oriented traceback." % fname)
- else:
- with open(dest, 'wb') as f:
- f.write(output)
-
-
-def coerce_resource_to_filename(fname):
- """Interpret a filename as either a filesystem location or as a package
- resource.
-
- Names that are non absolute paths and contain a colon
- are interpreted as resources and coerced to a file location.
-
- """
- if not os.path.isabs(fname) and ":" in fname:
- import pkg_resources
- fname = pkg_resources.resource_filename(*fname.split(':'))
- return fname
-
-
-def pyc_file_from_path(path):
- """Given a python source path, locate the .pyc.
-
- """
-
- if has_pep3147():
- import imp
- candidate = imp.cache_from_source(path)
- if os.path.exists(candidate):
- return candidate
-
- filepath, ext = os.path.splitext(path)
- for ext in get_current_bytecode_suffixes():
- if os.path.exists(filepath + ext):
- return filepath + ext
- else:
- return None
-
-
-def edit(path):
- """Given a source path, run the EDITOR for it"""
-
- import editor
- try:
- editor.edit(path)
- except Exception as exc:
- raise CommandError('Error executing editor (%s)' % (exc,))
-
-
-def load_python_file(dir_, filename):
- """Load a file from the given path as a Python module."""
-
- module_id = re.sub(r'\W', "_", filename)
- path = os.path.join(dir_, filename)
- _, ext = os.path.splitext(filename)
- if ext == ".py":
- if os.path.exists(path):
- module = load_module_py(module_id, path)
- else:
- pyc_path = pyc_file_from_path(path)
- if pyc_path is None:
- raise ImportError("Can't find Python file %s" % path)
- else:
- module = load_module_pyc(module_id, pyc_path)
- elif ext in (".pyc", ".pyo"):
- module = load_module_pyc(module_id, path)
- return module
diff --git a/venv/Lib/site-packages/alembic/util/sqla_compat.py b/venv/Lib/site-packages/alembic/util/sqla_compat.py
deleted file mode 100644
index a099149..0000000
--- a/venv/Lib/site-packages/alembic/util/sqla_compat.py
+++ /dev/null
@@ -1,209 +0,0 @@
-import re
-from sqlalchemy import __version__
-from sqlalchemy.schema import ForeignKeyConstraint, CheckConstraint, Column
-from sqlalchemy import types as sqltypes
-from sqlalchemy import schema, sql
-from sqlalchemy.sql.visitors import traverse
-from sqlalchemy.ext.compiler import compiles
-from sqlalchemy.sql.expression import _BindParamClause
-from . import compat
-
-
-def _safe_int(value):
- try:
- return int(value)
- except:
- return value
-_vers = tuple(
- [_safe_int(x) for x in re.findall(r'(\d+|[abc]\d)', __version__)])
-sqla_07 = _vers > (0, 7, 2)
-sqla_079 = _vers >= (0, 7, 9)
-sqla_08 = _vers >= (0, 8, 0)
-sqla_083 = _vers >= (0, 8, 3)
-sqla_084 = _vers >= (0, 8, 4)
-sqla_09 = _vers >= (0, 9, 0)
-sqla_092 = _vers >= (0, 9, 2)
-sqla_094 = _vers >= (0, 9, 4)
-sqla_094 = _vers >= (0, 9, 4)
-sqla_099 = _vers >= (0, 9, 9)
-sqla_100 = _vers >= (1, 0, 0)
-sqla_105 = _vers >= (1, 0, 5)
-sqla_1010 = _vers >= (1, 0, 10)
-sqla_110 = _vers >= (1, 1, 0)
-sqla_1014 = _vers >= (1, 0, 14)
-sqla_1115 = _vers >= (1, 1, 15)
-
-if sqla_08:
- from sqlalchemy.sql.expression import TextClause
-else:
- from sqlalchemy.sql.expression import _TextClause as TextClause
-
-
-def _table_for_constraint(constraint):
- if isinstance(constraint, ForeignKeyConstraint):
- return constraint.parent
- else:
- return constraint.table
-
-
-def _columns_for_constraint(constraint):
- if isinstance(constraint, ForeignKeyConstraint):
- return [fk.parent for fk in constraint.elements]
- elif isinstance(constraint, CheckConstraint):
- return _find_columns(constraint.sqltext)
- else:
- return list(constraint.columns)
-
-
-def _fk_spec(constraint):
- if sqla_100:
- source_columns = [
- constraint.columns[key].name for key in constraint.column_keys]
- else:
- source_columns = [
- element.parent.name for element in constraint.elements]
-
- source_table = constraint.parent.name
- source_schema = constraint.parent.schema
- target_schema = constraint.elements[0].column.table.schema
- target_table = constraint.elements[0].column.table.name
- target_columns = [element.column.name for element in constraint.elements]
- ondelete = constraint.ondelete
- onupdate = constraint.onupdate
- deferrable = constraint.deferrable
- initially = constraint.initially
- return (
- source_schema, source_table,
- source_columns, target_schema, target_table, target_columns,
- onupdate, ondelete, deferrable, initially)
-
-
-def _fk_is_self_referential(constraint):
- spec = constraint.elements[0]._get_colspec()
- tokens = spec.split(".")
- tokens.pop(-1) # colname
- tablekey = ".".join(tokens)
- return tablekey == constraint.parent.key
-
-
-def _is_type_bound(constraint):
- # this deals with SQLAlchemy #3260, don't copy CHECK constraints
- # that will be generated by the type.
- if sqla_100:
- # new feature added for #3260
- return constraint._type_bound
- else:
- # old way, look at what we know Boolean/Enum to use
- return (
- constraint._create_rule is not None and
- isinstance(
- getattr(constraint._create_rule, "target", None),
- sqltypes.SchemaType)
- )
-
-
-def _find_columns(clause):
- """locate Column objects within the given expression."""
-
- cols = set()
- traverse(clause, {}, {'column': cols.add})
- return cols
-
-
-def _textual_index_column(table, text_):
- """a workaround for the Index construct's severe lack of flexibility"""
- if isinstance(text_, compat.string_types):
- c = Column(text_, sqltypes.NULLTYPE)
- table.append_column(c)
- return c
- elif isinstance(text_, TextClause):
- return _textual_index_element(table, text_)
- else:
- raise ValueError("String or text() construct expected")
-
-
-class _textual_index_element(sql.ColumnElement):
- """Wrap around a sqlalchemy text() construct in such a way that
- we appear like a column-oriented SQL expression to an Index
- construct.
-
- The issue here is that currently the Postgresql dialect, the biggest
- recipient of functional indexes, keys all the index expressions to
- the corresponding column expressions when rendering CREATE INDEX,
- so the Index we create here needs to have a .columns collection that
- is the same length as the .expressions collection. Ultimately
- SQLAlchemy should support text() expressions in indexes.
-
- See https://bitbucket.org/zzzeek/sqlalchemy/issue/3174/\
- support-text-sent-to-indexes
-
- """
- __visit_name__ = '_textual_idx_element'
-
- def __init__(self, table, text):
- self.table = table
- self.text = text
- self.key = text.text
- self.fake_column = schema.Column(self.text.text, sqltypes.NULLTYPE)
- table.append_column(self.fake_column)
-
- def get_children(self):
- return [self.fake_column]
-
-
-@compiles(_textual_index_element)
-def _render_textual_index_column(element, compiler, **kw):
- return compiler.process(element.text, **kw)
-
-
-class _literal_bindparam(_BindParamClause):
- pass
-
-
-@compiles(_literal_bindparam)
-def _render_literal_bindparam(element, compiler, **kw):
- return compiler.render_literal_bindparam(element, **kw)
-
-
-def _get_index_expressions(idx):
- if sqla_08:
- return list(idx.expressions)
- else:
- return list(idx.columns)
-
-
-def _get_index_column_names(idx):
- return [getattr(exp, "name", None) for exp in _get_index_expressions(idx)]
-
-
-def _get_index_final_name(dialect, idx):
- if sqla_08:
- # trying to keep the truncation rules totally localized on the
- # SQLA side while also stepping around the quoting issue. Ideally
- # the _prepared_index_name() method on the SQLA side would have
- # a quoting option or the truncation routine would be broken out.
- #
- # test for SQLA quoted_name construct, introduced in
- # 0.9 or thereabouts.
- # this doesn't work in 0.8 and the "quote" option on Index doesn't
- # seem to work in 0.8 either.
- if hasattr(idx.name, "quote"):
- # might be quoted_name, might be truncated_name, keep it the
- # same
- quoted_name_cls = type(idx.name)
- new_name = quoted_name_cls(str(idx.name), quote=False)
- idx = schema.Index(name=new_name)
- return dialect.ddl_compiler(dialect, None)._prepared_index_name(idx)
- else:
- return idx.name
-
-
-def _is_mariadb(mysql_dialect):
- return 'MariaDB' in mysql_dialect.server_version_info
-
-
-def _mariadb_normalized_version_info(mysql_dialect):
- if len(mysql_dialect.server_version_info) > 5:
- return mysql_dialect.server_version_info[3:]
- else:
- return mysql_dialect.server_version_info
diff --git a/venv/Lib/site-packages/click-6.7.dist-info/DESCRIPTION.rst b/venv/Lib/site-packages/click-6.7.dist-info/DESCRIPTION.rst
deleted file mode 100644
index e118723..0000000
--- a/venv/Lib/site-packages/click-6.7.dist-info/DESCRIPTION.rst
+++ /dev/null
@@ -1,3 +0,0 @@
-UNKNOWN
-
-
diff --git a/venv/Lib/site-packages/click-6.7.dist-info/INSTALLER b/venv/Lib/site-packages/click-6.7.dist-info/INSTALLER
deleted file mode 100644
index a1b589e..0000000
--- a/venv/Lib/site-packages/click-6.7.dist-info/INSTALLER
+++ /dev/null
@@ -1 +0,0 @@
-pip
diff --git a/venv/Lib/site-packages/click-6.7.dist-info/METADATA b/venv/Lib/site-packages/click-6.7.dist-info/METADATA
deleted file mode 100644
index 1f10885..0000000
--- a/venv/Lib/site-packages/click-6.7.dist-info/METADATA
+++ /dev/null
@@ -1,16 +0,0 @@
-Metadata-Version: 2.0
-Name: click
-Version: 6.7
-Summary: A simple wrapper around optparse for powerful command line utilities.
-Home-page: http://github.com/mitsuhiko/click
-Author: Armin Ronacher
-Author-email: armin.ronacher@active-4.com
-License: UNKNOWN
-Platform: UNKNOWN
-Classifier: License :: OSI Approved :: BSD License
-Classifier: Programming Language :: Python
-Classifier: Programming Language :: Python :: 3
-
-UNKNOWN
-
-
diff --git a/venv/Lib/site-packages/click-6.7.dist-info/RECORD b/venv/Lib/site-packages/click-6.7.dist-info/RECORD
deleted file mode 100644
index 6eaa8b2..0000000
--- a/venv/Lib/site-packages/click-6.7.dist-info/RECORD
+++ /dev/null
@@ -1,41 +0,0 @@
-click/__init__.py,sha256=k8R00cFKWI8dhDVKQeLBlAdNh1CxerMEDRiGnr32gdw,2858
-click/_bashcomplete.py,sha256=82rMiibtEurdwBq60NHXVCBuGXJHDpblFO9o2YxJDF0,2423
-click/_compat.py,sha256=j59MpzxYGE-fTGj0A5sg8UI8GhHod1XMojiCA0jvbL0,21011
-click/_termui_impl.py,sha256=Ol1JJhvBRw3l8j1WIU0tOWjQtxxmwGE44lFDbzDqzoA,16395
-click/_textwrap.py,sha256=gwS4m7bdQiJnzaDG8osFcRb-5vn4t4l2qSCy-5csCEc,1198
-click/_unicodefun.py,sha256=A3UOzJw6lEZyol2SBg3fNXgweTutaOzkJ61OB7vik3Y,4204
-click/_winconsole.py,sha256=MzG46DEYPoRyx4SO7EIhFuFZHESgooAfJLIukbB6p5c,7790
-click/core.py,sha256=M0nJ6Kkye7XZXYG7HCbkJWSfy14WHV6bQmGLACrOhKw,70254
-click/decorators.py,sha256=y7CX2needh8iRWafj-QS_hGQFsN24eyXAhx5Y2ATwas,10941
-click/exceptions.py,sha256=rOa0pP3PbSy0_AAPOW9irBEM8AJ3BySN-4z2VUwFVo4,6788
-click/formatting.py,sha256=eh-cypTUAhpI3HD-K4ZpR3vCiURIO62xXvKkR3tNUTM,8889
-click/globals.py,sha256=PAgnKvGxq4YuEIldw3lgYOGBLYwsyxnm1IByBX3BFXo,1515
-click/parser.py,sha256=i01xgYuIA6AwQWEXjshwHSwnTR3gUep4FxJIfyW4ta4,15510
-click/termui.py,sha256=Bp99MSWQtyoWe1_7HggDmA77n--3KLxu7NsZMFMaCUo,21008
-click/testing.py,sha256=kJ9mjtJgwNAlkgKcFf9-ISxufmaPDbbuOHVC9WIvKdY,11002
-click/types.py,sha256=ZGb2lmFs5Vwd9loTRIMbGcqhPVOql8mGoBhWBRT6V4E,18864
-click/utils.py,sha256=1jalPlkUU28JReTEQeeSFtbJd-SirYWBNfjtELBKzT4,14916
-click-6.7.dist-info/DESCRIPTION.rst,sha256=OCTuuN6LcWulhHS3d5rfjdsQtW22n7HENFRh6jC6ego,10
-click-6.7.dist-info/METADATA,sha256=l6lAyogIUXiHKUK_rWguef-EMcvO5C6bXzFCNCcblbQ,424
-click-6.7.dist-info/RECORD,,
-click-6.7.dist-info/WHEEL,sha256=5wvfB7GvgZAbKBSE9uX9Zbi6LCL-_KgezgHblXhCRnM,113
-click-6.7.dist-info/metadata.json,sha256=qg0uO6amNHkIkOxnmWX7Xa_DNQMQ62Q6drivuP9Gh1c,571
-click-6.7.dist-info/top_level.txt,sha256=J1ZQogalYS4pphY_lPECoNMfw0HzTSrZglC4Yfwo4xA,6
-click-6.7.dist-info/INSTALLER,sha256=zuuue4knoyJ-UwPPXg8fezS7VCrXJQrAP7zeNuwvFQg,4
-click/__pycache__/core.cpython-36.pyc,,
-click/__pycache__/decorators.cpython-36.pyc,,
-click/__pycache__/exceptions.cpython-36.pyc,,
-click/__pycache__/formatting.cpython-36.pyc,,
-click/__pycache__/globals.cpython-36.pyc,,
-click/__pycache__/parser.cpython-36.pyc,,
-click/__pycache__/termui.cpython-36.pyc,,
-click/__pycache__/testing.cpython-36.pyc,,
-click/__pycache__/types.cpython-36.pyc,,
-click/__pycache__/utils.cpython-36.pyc,,
-click/__pycache__/_bashcomplete.cpython-36.pyc,,
-click/__pycache__/_compat.cpython-36.pyc,,
-click/__pycache__/_termui_impl.cpython-36.pyc,,
-click/__pycache__/_textwrap.cpython-36.pyc,,
-click/__pycache__/_unicodefun.cpython-36.pyc,,
-click/__pycache__/_winconsole.cpython-36.pyc,,
-click/__pycache__/__init__.cpython-36.pyc,,
diff --git a/venv/Lib/site-packages/click-6.7.dist-info/WHEEL b/venv/Lib/site-packages/click-6.7.dist-info/WHEEL
deleted file mode 100644
index 7bf9daa..0000000
--- a/venv/Lib/site-packages/click-6.7.dist-info/WHEEL
+++ /dev/null
@@ -1,6 +0,0 @@
-Wheel-Version: 1.0
-Generator: bdist_wheel (0.30.0.a0)
-Root-Is-Purelib: true
-Tag: py2-none-any
-Tag: py3-none-any
-
diff --git a/venv/Lib/site-packages/click-6.7.dist-info/metadata.json b/venv/Lib/site-packages/click-6.7.dist-info/metadata.json
deleted file mode 100644
index 0a4cfb1..0000000
--- a/venv/Lib/site-packages/click-6.7.dist-info/metadata.json
+++ /dev/null
@@ -1 +0,0 @@
-{"classifiers": ["License :: OSI Approved :: BSD License", "Programming Language :: Python", "Programming Language :: Python :: 3"], "extensions": {"python.details": {"contacts": [{"email": "armin.ronacher@active-4.com", "name": "Armin Ronacher", "role": "author"}], "document_names": {"description": "DESCRIPTION.rst"}, "project_urls": {"Home": "http://github.com/mitsuhiko/click"}}}, "generator": "bdist_wheel (0.30.0.a0)", "metadata_version": "2.0", "name": "click", "summary": "A simple wrapper around optparse for powerful command line utilities.", "version": "6.7"}
\ No newline at end of file
diff --git a/venv/Lib/site-packages/click-6.7.dist-info/top_level.txt b/venv/Lib/site-packages/click-6.7.dist-info/top_level.txt
deleted file mode 100644
index dca9a90..0000000
--- a/venv/Lib/site-packages/click-6.7.dist-info/top_level.txt
+++ /dev/null
@@ -1 +0,0 @@
-click
diff --git a/venv/Lib/site-packages/click/__init__.py b/venv/Lib/site-packages/click/__init__.py
deleted file mode 100644
index 971e55d..0000000
--- a/venv/Lib/site-packages/click/__init__.py
+++ /dev/null
@@ -1,98 +0,0 @@
-# -*- coding: utf-8 -*-
-"""
- click
- ~~~~~
-
- Click is a simple Python module that wraps the stdlib's optparse to make
- writing command line scripts fun. Unlike other modules, it's based around
- a simple API that does not come with too much magic and is composable.
-
- In case optparse ever gets removed from the stdlib, it will be shipped by
- this module.
-
- :copyright: (c) 2014 by Armin Ronacher.
- :license: BSD, see LICENSE for more details.
-"""
-
-# Core classes
-from .core import Context, BaseCommand, Command, MultiCommand, Group, \
- CommandCollection, Parameter, Option, Argument
-
-# Globals
-from .globals import get_current_context
-
-# Decorators
-from .decorators import pass_context, pass_obj, make_pass_decorator, \
- command, group, argument, option, confirmation_option, \
- password_option, version_option, help_option
-
-# Types
-from .types import ParamType, File, Path, Choice, IntRange, Tuple, \
- STRING, INT, FLOAT, BOOL, UUID, UNPROCESSED
-
-# Utilities
-from .utils import echo, get_binary_stream, get_text_stream, open_file, \
- format_filename, get_app_dir, get_os_args
-
-# Terminal functions
-from .termui import prompt, confirm, get_terminal_size, echo_via_pager, \
- progressbar, clear, style, unstyle, secho, edit, launch, getchar, \
- pause
-
-# Exceptions
-from .exceptions import ClickException, UsageError, BadParameter, \
- FileError, Abort, NoSuchOption, BadOptionUsage, BadArgumentUsage, \
- MissingParameter
-
-# Formatting
-from .formatting import HelpFormatter, wrap_text
-
-# Parsing
-from .parser import OptionParser
-
-
-__all__ = [
- # Core classes
- 'Context', 'BaseCommand', 'Command', 'MultiCommand', 'Group',
- 'CommandCollection', 'Parameter', 'Option', 'Argument',
-
- # Globals
- 'get_current_context',
-
- # Decorators
- 'pass_context', 'pass_obj', 'make_pass_decorator', 'command', 'group',
- 'argument', 'option', 'confirmation_option', 'password_option',
- 'version_option', 'help_option',
-
- # Types
- 'ParamType', 'File', 'Path', 'Choice', 'IntRange', 'Tuple', 'STRING',
- 'INT', 'FLOAT', 'BOOL', 'UUID', 'UNPROCESSED',
-
- # Utilities
- 'echo', 'get_binary_stream', 'get_text_stream', 'open_file',
- 'format_filename', 'get_app_dir', 'get_os_args',
-
- # Terminal functions
- 'prompt', 'confirm', 'get_terminal_size', 'echo_via_pager',
- 'progressbar', 'clear', 'style', 'unstyle', 'secho', 'edit', 'launch',
- 'getchar', 'pause',
-
- # Exceptions
- 'ClickException', 'UsageError', 'BadParameter', 'FileError',
- 'Abort', 'NoSuchOption', 'BadOptionUsage', 'BadArgumentUsage',
- 'MissingParameter',
-
- # Formatting
- 'HelpFormatter', 'wrap_text',
-
- # Parsing
- 'OptionParser',
-]
-
-
-# Controls if click should emit the warning about the use of unicode
-# literals.
-disable_unicode_literals_warning = False
-
-
-__version__ = '6.7'
diff --git a/venv/Lib/site-packages/click/_bashcomplete.py b/venv/Lib/site-packages/click/_bashcomplete.py
deleted file mode 100644
index d9d26d2..0000000
--- a/venv/Lib/site-packages/click/_bashcomplete.py
+++ /dev/null
@@ -1,83 +0,0 @@
-import os
-import re
-from .utils import echo
-from .parser import split_arg_string
-from .core import MultiCommand, Option
-
-
-COMPLETION_SCRIPT = '''
-%(complete_func)s() {
- COMPREPLY=( $( env COMP_WORDS="${COMP_WORDS[*]}" \\
- COMP_CWORD=$COMP_CWORD \\
- %(autocomplete_var)s=complete $1 ) )
- return 0
-}
-
-complete -F %(complete_func)s -o default %(script_names)s
-'''
-
-_invalid_ident_char_re = re.compile(r'[^a-zA-Z0-9_]')
-
-
-def get_completion_script(prog_name, complete_var):
- cf_name = _invalid_ident_char_re.sub('', prog_name.replace('-', '_'))
- return (COMPLETION_SCRIPT % {
- 'complete_func': '_%s_completion' % cf_name,
- 'script_names': prog_name,
- 'autocomplete_var': complete_var,
- }).strip() + ';'
-
-
-def resolve_ctx(cli, prog_name, args):
- ctx = cli.make_context(prog_name, args, resilient_parsing=True)
- while ctx.protected_args + ctx.args and isinstance(ctx.command, MultiCommand):
- a = ctx.protected_args + ctx.args
- cmd = ctx.command.get_command(ctx, a[0])
- if cmd is None:
- return None
- ctx = cmd.make_context(a[0], a[1:], parent=ctx, resilient_parsing=True)
- return ctx
-
-
-def get_choices(cli, prog_name, args, incomplete):
- ctx = resolve_ctx(cli, prog_name, args)
- if ctx is None:
- return
-
- choices = []
- if incomplete and not incomplete[:1].isalnum():
- for param in ctx.command.params:
- if not isinstance(param, Option):
- continue
- choices.extend(param.opts)
- choices.extend(param.secondary_opts)
- elif isinstance(ctx.command, MultiCommand):
- choices.extend(ctx.command.list_commands(ctx))
-
- for item in choices:
- if item.startswith(incomplete):
- yield item
-
-
-def do_complete(cli, prog_name):
- cwords = split_arg_string(os.environ['COMP_WORDS'])
- cword = int(os.environ['COMP_CWORD'])
- args = cwords[1:cword]
- try:
- incomplete = cwords[cword]
- except IndexError:
- incomplete = ''
-
- for item in get_choices(cli, prog_name, args, incomplete):
- echo(item)
-
- return True
-
-
-def bashcomplete(cli, prog_name, complete_var, complete_instr):
- if complete_instr == 'source':
- echo(get_completion_script(prog_name, complete_var))
- return True
- elif complete_instr == 'complete':
- return do_complete(cli, prog_name)
- return False
diff --git a/venv/Lib/site-packages/click/_compat.py b/venv/Lib/site-packages/click/_compat.py
deleted file mode 100644
index 2b43412..0000000
--- a/venv/Lib/site-packages/click/_compat.py
+++ /dev/null
@@ -1,648 +0,0 @@
-import re
-import io
-import os
-import sys
-import codecs
-from weakref import WeakKeyDictionary
-
-
-PY2 = sys.version_info[0] == 2
-WIN = sys.platform.startswith('win')
-DEFAULT_COLUMNS = 80
-
-
-_ansi_re = re.compile('\033\[((?:\d|;)*)([a-zA-Z])')
-
-
-def get_filesystem_encoding():
- return sys.getfilesystemencoding() or sys.getdefaultencoding()
-
-
-def _make_text_stream(stream, encoding, errors):
- if encoding is None:
- encoding = get_best_encoding(stream)
- if errors is None:
- errors = 'replace'
- return _NonClosingTextIOWrapper(stream, encoding, errors,
- line_buffering=True)
-
-
-def is_ascii_encoding(encoding):
- """Checks if a given encoding is ascii."""
- try:
- return codecs.lookup(encoding).name == 'ascii'
- except LookupError:
- return False
-
-
-def get_best_encoding(stream):
- """Returns the default stream encoding if not found."""
- rv = getattr(stream, 'encoding', None) or sys.getdefaultencoding()
- if is_ascii_encoding(rv):
- return 'utf-8'
- return rv
-
-
-class _NonClosingTextIOWrapper(io.TextIOWrapper):
-
- def __init__(self, stream, encoding, errors, **extra):
- self._stream = stream = _FixupStream(stream)
- io.TextIOWrapper.__init__(self, stream, encoding, errors, **extra)
-
- # The io module is a place where the Python 3 text behavior
- # was forced upon Python 2, so we need to unbreak
- # it to look like Python 2.
- if PY2:
- def write(self, x):
- if isinstance(x, str) or is_bytes(x):
- try:
- self.flush()
- except Exception:
- pass
- return self.buffer.write(str(x))
- return io.TextIOWrapper.write(self, x)
-
- def writelines(self, lines):
- for line in lines:
- self.write(line)
-
- def __del__(self):
- try:
- self.detach()
- except Exception:
- pass
-
- def isatty(self):
- # https://bitbucket.org/pypy/pypy/issue/1803
- return self._stream.isatty()
-
-
-class _FixupStream(object):
- """The new io interface needs more from streams than streams
- traditionally implement. As such, this fix-up code is necessary in
- some circumstances.
- """
-
- def __init__(self, stream):
- self._stream = stream
-
- def __getattr__(self, name):
- return getattr(self._stream, name)
-
- def read1(self, size):
- f = getattr(self._stream, 'read1', None)
- if f is not None:
- return f(size)
- # We only dispatch to readline instead of read in Python 2 as we
- # do not want cause problems with the different implementation
- # of line buffering.
- if PY2:
- return self._stream.readline(size)
- return self._stream.read(size)
-
- def readable(self):
- x = getattr(self._stream, 'readable', None)
- if x is not None:
- return x()
- try:
- self._stream.read(0)
- except Exception:
- return False
- return True
-
- def writable(self):
- x = getattr(self._stream, 'writable', None)
- if x is not None:
- return x()
- try:
- self._stream.write('')
- except Exception:
- try:
- self._stream.write(b'')
- except Exception:
- return False
- return True
-
- def seekable(self):
- x = getattr(self._stream, 'seekable', None)
- if x is not None:
- return x()
- try:
- self._stream.seek(self._stream.tell())
- except Exception:
- return False
- return True
-
-
-if PY2:
- text_type = unicode
- bytes = str
- raw_input = raw_input
- string_types = (str, unicode)
- iteritems = lambda x: x.iteritems()
- range_type = xrange
-
- def is_bytes(x):
- return isinstance(x, (buffer, bytearray))
-
- _identifier_re = re.compile(r'^[a-zA-Z_][a-zA-Z0-9_]*$')
-
- # For Windows, we need to force stdout/stdin/stderr to binary if it's
- # fetched for that. This obviously is not the most correct way to do
- # it as it changes global state. Unfortunately, there does not seem to
- # be a clear better way to do it as just reopening the file in binary
- # mode does not change anything.
- #
- # An option would be to do what Python 3 does and to open the file as
- # binary only, patch it back to the system, and then use a wrapper
- # stream that converts newlines. It's not quite clear what's the
- # correct option here.
- #
- # This code also lives in _winconsole for the fallback to the console
- # emulation stream.
- #
- # There are also Windows environments where the `msvcrt` module is not
- # available (which is why we use try-catch instead of the WIN variable
- # here), such as the Google App Engine development server on Windows. In
- # those cases there is just nothing we can do.
- try:
- import msvcrt
- except ImportError:
- set_binary_mode = lambda x: x
- else:
- def set_binary_mode(f):
- try:
- fileno = f.fileno()
- except Exception:
- pass
- else:
- msvcrt.setmode(fileno, os.O_BINARY)
- return f
-
- def isidentifier(x):
- return _identifier_re.search(x) is not None
-
- def get_binary_stdin():
- return set_binary_mode(sys.stdin)
-
- def get_binary_stdout():
- return set_binary_mode(sys.stdout)
-
- def get_binary_stderr():
- return set_binary_mode(sys.stderr)
-
- def get_text_stdin(encoding=None, errors=None):
- rv = _get_windows_console_stream(sys.stdin, encoding, errors)
- if rv is not None:
- return rv
- return _make_text_stream(sys.stdin, encoding, errors)
-
- def get_text_stdout(encoding=None, errors=None):
- rv = _get_windows_console_stream(sys.stdout, encoding, errors)
- if rv is not None:
- return rv
- return _make_text_stream(sys.stdout, encoding, errors)
-
- def get_text_stderr(encoding=None, errors=None):
- rv = _get_windows_console_stream(sys.stderr, encoding, errors)
- if rv is not None:
- return rv
- return _make_text_stream(sys.stderr, encoding, errors)
-
- def filename_to_ui(value):
- if isinstance(value, bytes):
- value = value.decode(get_filesystem_encoding(), 'replace')
- return value
-else:
- import io
- text_type = str
- raw_input = input
- string_types = (str,)
- range_type = range
- isidentifier = lambda x: x.isidentifier()
- iteritems = lambda x: iter(x.items())
-
- def is_bytes(x):
- return isinstance(x, (bytes, memoryview, bytearray))
-
- def _is_binary_reader(stream, default=False):
- try:
- return isinstance(stream.read(0), bytes)
- except Exception:
- return default
- # This happens in some cases where the stream was already
- # closed. In this case, we assume the default.
-
- def _is_binary_writer(stream, default=False):
- try:
- stream.write(b'')
- except Exception:
- try:
- stream.write('')
- return False
- except Exception:
- pass
- return default
- return True
-
- def _find_binary_reader(stream):
- # We need to figure out if the given stream is already binary.
- # This can happen because the official docs recommend detaching
- # the streams to get binary streams. Some code might do this, so
- # we need to deal with this case explicitly.
- if _is_binary_reader(stream, False):
- return stream
-
- buf = getattr(stream, 'buffer', None)
-
- # Same situation here; this time we assume that the buffer is
- # actually binary in case it's closed.
- if buf is not None and _is_binary_reader(buf, True):
- return buf
-
- def _find_binary_writer(stream):
- # We need to figure out if the given stream is already binary.
- # This can happen because the official docs recommend detatching
- # the streams to get binary streams. Some code might do this, so
- # we need to deal with this case explicitly.
- if _is_binary_writer(stream, False):
- return stream
-
- buf = getattr(stream, 'buffer', None)
-
- # Same situation here; this time we assume that the buffer is
- # actually binary in case it's closed.
- if buf is not None and _is_binary_writer(buf, True):
- return buf
-
- def _stream_is_misconfigured(stream):
- """A stream is misconfigured if its encoding is ASCII."""
- # If the stream does not have an encoding set, we assume it's set
- # to ASCII. This appears to happen in certain unittest
- # environments. It's not quite clear what the correct behavior is
- # but this at least will force Click to recover somehow.
- return is_ascii_encoding(getattr(stream, 'encoding', None) or 'ascii')
-
- def _is_compatible_text_stream(stream, encoding, errors):
- stream_encoding = getattr(stream, 'encoding', None)
- stream_errors = getattr(stream, 'errors', None)
-
- # Perfect match.
- if stream_encoding == encoding and stream_errors == errors:
- return True
-
- # Otherwise, it's only a compatible stream if we did not ask for
- # an encoding.
- if encoding is None:
- return stream_encoding is not None
-
- return False
-
- def _force_correct_text_reader(text_reader, encoding, errors):
- if _is_binary_reader(text_reader, False):
- binary_reader = text_reader
- else:
- # If there is no target encoding set, we need to verify that the
- # reader is not actually misconfigured.
- if encoding is None and not _stream_is_misconfigured(text_reader):
- return text_reader
-
- if _is_compatible_text_stream(text_reader, encoding, errors):
- return text_reader
-
- # If the reader has no encoding, we try to find the underlying
- # binary reader for it. If that fails because the environment is
- # misconfigured, we silently go with the same reader because this
- # is too common to happen. In that case, mojibake is better than
- # exceptions.
- binary_reader = _find_binary_reader(text_reader)
- if binary_reader is None:
- return text_reader
-
- # At this point, we default the errors to replace instead of strict
- # because nobody handles those errors anyways and at this point
- # we're so fundamentally fucked that nothing can repair it.
- if errors is None:
- errors = 'replace'
- return _make_text_stream(binary_reader, encoding, errors)
-
- def _force_correct_text_writer(text_writer, encoding, errors):
- if _is_binary_writer(text_writer, False):
- binary_writer = text_writer
- else:
- # If there is no target encoding set, we need to verify that the
- # writer is not actually misconfigured.
- if encoding is None and not _stream_is_misconfigured(text_writer):
- return text_writer
-
- if _is_compatible_text_stream(text_writer, encoding, errors):
- return text_writer
-
- # If the writer has no encoding, we try to find the underlying
- # binary writer for it. If that fails because the environment is
- # misconfigured, we silently go with the same writer because this
- # is too common to happen. In that case, mojibake is better than
- # exceptions.
- binary_writer = _find_binary_writer(text_writer)
- if binary_writer is None:
- return text_writer
-
- # At this point, we default the errors to replace instead of strict
- # because nobody handles those errors anyways and at this point
- # we're so fundamentally fucked that nothing can repair it.
- if errors is None:
- errors = 'replace'
- return _make_text_stream(binary_writer, encoding, errors)
-
- def get_binary_stdin():
- reader = _find_binary_reader(sys.stdin)
- if reader is None:
- raise RuntimeError('Was not able to determine binary '
- 'stream for sys.stdin.')
- return reader
-
- def get_binary_stdout():
- writer = _find_binary_writer(sys.stdout)
- if writer is None:
- raise RuntimeError('Was not able to determine binary '
- 'stream for sys.stdout.')
- return writer
-
- def get_binary_stderr():
- writer = _find_binary_writer(sys.stderr)
- if writer is None:
- raise RuntimeError('Was not able to determine binary '
- 'stream for sys.stderr.')
- return writer
-
- def get_text_stdin(encoding=None, errors=None):
- rv = _get_windows_console_stream(sys.stdin, encoding, errors)
- if rv is not None:
- return rv
- return _force_correct_text_reader(sys.stdin, encoding, errors)
-
- def get_text_stdout(encoding=None, errors=None):
- rv = _get_windows_console_stream(sys.stdout, encoding, errors)
- if rv is not None:
- return rv
- return _force_correct_text_writer(sys.stdout, encoding, errors)
-
- def get_text_stderr(encoding=None, errors=None):
- rv = _get_windows_console_stream(sys.stderr, encoding, errors)
- if rv is not None:
- return rv
- return _force_correct_text_writer(sys.stderr, encoding, errors)
-
- def filename_to_ui(value):
- if isinstance(value, bytes):
- value = value.decode(get_filesystem_encoding(), 'replace')
- else:
- value = value.encode('utf-8', 'surrogateescape') \
- .decode('utf-8', 'replace')
- return value
-
-
-def get_streerror(e, default=None):
- if hasattr(e, 'strerror'):
- msg = e.strerror
- else:
- if default is not None:
- msg = default
- else:
- msg = str(e)
- if isinstance(msg, bytes):
- msg = msg.decode('utf-8', 'replace')
- return msg
-
-
-def open_stream(filename, mode='r', encoding=None, errors='strict',
- atomic=False):
- # Standard streams first. These are simple because they don't need
- # special handling for the atomic flag. It's entirely ignored.
- if filename == '-':
- if 'w' in mode:
- if 'b' in mode:
- return get_binary_stdout(), False
- return get_text_stdout(encoding=encoding, errors=errors), False
- if 'b' in mode:
- return get_binary_stdin(), False
- return get_text_stdin(encoding=encoding, errors=errors), False
-
- # Non-atomic writes directly go out through the regular open functions.
- if not atomic:
- if encoding is None:
- return open(filename, mode), True
- return io.open(filename, mode, encoding=encoding, errors=errors), True
-
- # Some usability stuff for atomic writes
- if 'a' in mode:
- raise ValueError(
- 'Appending to an existing file is not supported, because that '
- 'would involve an expensive `copy`-operation to a temporary '
- 'file. Open the file in normal `w`-mode and copy explicitly '
- 'if that\'s what you\'re after.'
- )
- if 'x' in mode:
- raise ValueError('Use the `overwrite`-parameter instead.')
- if 'w' not in mode:
- raise ValueError('Atomic writes only make sense with `w`-mode.')
-
- # Atomic writes are more complicated. They work by opening a file
- # as a proxy in the same folder and then using the fdopen
- # functionality to wrap it in a Python file. Then we wrap it in an
- # atomic file that moves the file over on close.
- import tempfile
- fd, tmp_filename = tempfile.mkstemp(dir=os.path.dirname(filename),
- prefix='.__atomic-write')
-
- if encoding is not None:
- f = io.open(fd, mode, encoding=encoding, errors=errors)
- else:
- f = os.fdopen(fd, mode)
-
- return _AtomicFile(f, tmp_filename, filename), True
-
-
-# Used in a destructor call, needs extra protection from interpreter cleanup.
-if hasattr(os, 'replace'):
- _replace = os.replace
- _can_replace = True
-else:
- _replace = os.rename
- _can_replace = not WIN
-
-
-class _AtomicFile(object):
-
- def __init__(self, f, tmp_filename, real_filename):
- self._f = f
- self._tmp_filename = tmp_filename
- self._real_filename = real_filename
- self.closed = False
-
- @property
- def name(self):
- return self._real_filename
-
- def close(self, delete=False):
- if self.closed:
- return
- self._f.close()
- if not _can_replace:
- try:
- os.remove(self._real_filename)
- except OSError:
- pass
- _replace(self._tmp_filename, self._real_filename)
- self.closed = True
-
- def __getattr__(self, name):
- return getattr(self._f, name)
-
- def __enter__(self):
- return self
-
- def __exit__(self, exc_type, exc_value, tb):
- self.close(delete=exc_type is not None)
-
- def __repr__(self):
- return repr(self._f)
-
-
-auto_wrap_for_ansi = None
-colorama = None
-get_winterm_size = None
-
-
-def strip_ansi(value):
- return _ansi_re.sub('', value)
-
-
-def should_strip_ansi(stream=None, color=None):
- if color is None:
- if stream is None:
- stream = sys.stdin
- return not isatty(stream)
- return not color
-
-
-# If we're on Windows, we provide transparent integration through
-# colorama. This will make ANSI colors through the echo function
-# work automatically.
-if WIN:
- # Windows has a smaller terminal
- DEFAULT_COLUMNS = 79
-
- from ._winconsole import _get_windows_console_stream
-
- def _get_argv_encoding():
- import locale
- return locale.getpreferredencoding()
-
- if PY2:
- def raw_input(prompt=''):
- sys.stderr.flush()
- if prompt:
- stdout = _default_text_stdout()
- stdout.write(prompt)
- stdin = _default_text_stdin()
- return stdin.readline().rstrip('\r\n')
-
- try:
- import colorama
- except ImportError:
- pass
- else:
- _ansi_stream_wrappers = WeakKeyDictionary()
-
- def auto_wrap_for_ansi(stream, color=None):
- """This function wraps a stream so that calls through colorama
- are issued to the win32 console API to recolor on demand. It
- also ensures to reset the colors if a write call is interrupted
- to not destroy the console afterwards.
- """
- try:
- cached = _ansi_stream_wrappers.get(stream)
- except Exception:
- cached = None
- if cached is not None:
- return cached
- strip = should_strip_ansi(stream, color)
- ansi_wrapper = colorama.AnsiToWin32(stream, strip=strip)
- rv = ansi_wrapper.stream
- _write = rv.write
-
- def _safe_write(s):
- try:
- return _write(s)
- except:
- ansi_wrapper.reset_all()
- raise
-
- rv.write = _safe_write
- try:
- _ansi_stream_wrappers[stream] = rv
- except Exception:
- pass
- return rv
-
- def get_winterm_size():
- win = colorama.win32.GetConsoleScreenBufferInfo(
- colorama.win32.STDOUT).srWindow
- return win.Right - win.Left, win.Bottom - win.Top
-else:
- def _get_argv_encoding():
- return getattr(sys.stdin, 'encoding', None) or get_filesystem_encoding()
-
- _get_windows_console_stream = lambda *x: None
-
-
-def term_len(x):
- return len(strip_ansi(x))
-
-
-def isatty(stream):
- try:
- return stream.isatty()
- except Exception:
- return False
-
-
-def _make_cached_stream_func(src_func, wrapper_func):
- cache = WeakKeyDictionary()
- def func():
- stream = src_func()
- try:
- rv = cache.get(stream)
- except Exception:
- rv = None
- if rv is not None:
- return rv
- rv = wrapper_func()
- try:
- cache[stream] = rv
- except Exception:
- pass
- return rv
- return func
-
-
-_default_text_stdin = _make_cached_stream_func(
- lambda: sys.stdin, get_text_stdin)
-_default_text_stdout = _make_cached_stream_func(
- lambda: sys.stdout, get_text_stdout)
-_default_text_stderr = _make_cached_stream_func(
- lambda: sys.stderr, get_text_stderr)
-
-
-binary_streams = {
- 'stdin': get_binary_stdin,
- 'stdout': get_binary_stdout,
- 'stderr': get_binary_stderr,
-}
-
-text_streams = {
- 'stdin': get_text_stdin,
- 'stdout': get_text_stdout,
- 'stderr': get_text_stderr,
-}
diff --git a/venv/Lib/site-packages/click/_termui_impl.py b/venv/Lib/site-packages/click/_termui_impl.py
deleted file mode 100644
index 7cfd3d5..0000000
--- a/venv/Lib/site-packages/click/_termui_impl.py
+++ /dev/null
@@ -1,547 +0,0 @@
-"""
- click._termui_impl
- ~~~~~~~~~~~~~~~~~~
-
- This module contains implementations for the termui module. To keep the
- import time of Click down, some infrequently used functionality is placed
- in this module and only imported as needed.
-
- :copyright: (c) 2014 by Armin Ronacher.
- :license: BSD, see LICENSE for more details.
-"""
-import os
-import sys
-import time
-import math
-from ._compat import _default_text_stdout, range_type, PY2, isatty, \
- open_stream, strip_ansi, term_len, get_best_encoding, WIN
-from .utils import echo
-from .exceptions import ClickException
-
-
-if os.name == 'nt':
- BEFORE_BAR = '\r'
- AFTER_BAR = '\n'
-else:
- BEFORE_BAR = '\r\033[?25l'
- AFTER_BAR = '\033[?25h\n'
-
-
-def _length_hint(obj):
- """Returns the length hint of an object."""
- try:
- return len(obj)
- except (AttributeError, TypeError):
- try:
- get_hint = type(obj).__length_hint__
- except AttributeError:
- return None
- try:
- hint = get_hint(obj)
- except TypeError:
- return None
- if hint is NotImplemented or \
- not isinstance(hint, (int, long)) or \
- hint < 0:
- return None
- return hint
-
-
-class ProgressBar(object):
-
- def __init__(self, iterable, length=None, fill_char='#', empty_char=' ',
- bar_template='%(bar)s', info_sep=' ', show_eta=True,
- show_percent=None, show_pos=False, item_show_func=None,
- label=None, file=None, color=None, width=30):
- self.fill_char = fill_char
- self.empty_char = empty_char
- self.bar_template = bar_template
- self.info_sep = info_sep
- self.show_eta = show_eta
- self.show_percent = show_percent
- self.show_pos = show_pos
- self.item_show_func = item_show_func
- self.label = label or ''
- if file is None:
- file = _default_text_stdout()
- self.file = file
- self.color = color
- self.width = width
- self.autowidth = width == 0
-
- if length is None:
- length = _length_hint(iterable)
- if iterable is None:
- if length is None:
- raise TypeError('iterable or length is required')
- iterable = range_type(length)
- self.iter = iter(iterable)
- self.length = length
- self.length_known = length is not None
- self.pos = 0
- self.avg = []
- self.start = self.last_eta = time.time()
- self.eta_known = False
- self.finished = False
- self.max_width = None
- self.entered = False
- self.current_item = None
- self.is_hidden = not isatty(self.file)
- self._last_line = None
-
- def __enter__(self):
- self.entered = True
- self.render_progress()
- return self
-
- def __exit__(self, exc_type, exc_value, tb):
- self.render_finish()
-
- def __iter__(self):
- if not self.entered:
- raise RuntimeError('You need to use progress bars in a with block.')
- self.render_progress()
- return self
-
- def render_finish(self):
- if self.is_hidden:
- return
- self.file.write(AFTER_BAR)
- self.file.flush()
-
- @property
- def pct(self):
- if self.finished:
- return 1.0
- return min(self.pos / (float(self.length) or 1), 1.0)
-
- @property
- def time_per_iteration(self):
- if not self.avg:
- return 0.0
- return sum(self.avg) / float(len(self.avg))
-
- @property
- def eta(self):
- if self.length_known and not self.finished:
- return self.time_per_iteration * (self.length - self.pos)
- return 0.0
-
- def format_eta(self):
- if self.eta_known:
- t = self.eta + 1
- seconds = t % 60
- t /= 60
- minutes = t % 60
- t /= 60
- hours = t % 24
- t /= 24
- if t > 0:
- days = t
- return '%dd %02d:%02d:%02d' % (days, hours, minutes, seconds)
- else:
- return '%02d:%02d:%02d' % (hours, minutes, seconds)
- return ''
-
- def format_pos(self):
- pos = str(self.pos)
- if self.length_known:
- pos += '/%s' % self.length
- return pos
-
- def format_pct(self):
- return ('% 4d%%' % int(self.pct * 100))[1:]
-
- def format_progress_line(self):
- show_percent = self.show_percent
-
- info_bits = []
- if self.length_known:
- bar_length = int(self.pct * self.width)
- bar = self.fill_char * bar_length
- bar += self.empty_char * (self.width - bar_length)
- if show_percent is None:
- show_percent = not self.show_pos
- else:
- if self.finished:
- bar = self.fill_char * self.width
- else:
- bar = list(self.empty_char * (self.width or 1))
- if self.time_per_iteration != 0:
- bar[int((math.cos(self.pos * self.time_per_iteration)
- / 2.0 + 0.5) * self.width)] = self.fill_char
- bar = ''.join(bar)
-
- if self.show_pos:
- info_bits.append(self.format_pos())
- if show_percent:
- info_bits.append(self.format_pct())
- if self.show_eta and self.eta_known and not self.finished:
- info_bits.append(self.format_eta())
- if self.item_show_func is not None:
- item_info = self.item_show_func(self.current_item)
- if item_info is not None:
- info_bits.append(item_info)
-
- return (self.bar_template % {
- 'label': self.label,
- 'bar': bar,
- 'info': self.info_sep.join(info_bits)
- }).rstrip()
-
- def render_progress(self):
- from .termui import get_terminal_size
- nl = False
-
- if self.is_hidden:
- buf = [self.label]
- nl = True
- else:
- buf = []
- # Update width in case the terminal has been resized
- if self.autowidth:
- old_width = self.width
- self.width = 0
- clutter_length = term_len(self.format_progress_line())
- new_width = max(0, get_terminal_size()[0] - clutter_length)
- if new_width < old_width:
- buf.append(BEFORE_BAR)
- buf.append(' ' * self.max_width)
- self.max_width = new_width
- self.width = new_width
-
- clear_width = self.width
- if self.max_width is not None:
- clear_width = self.max_width
-
- buf.append(BEFORE_BAR)
- line = self.format_progress_line()
- line_len = term_len(line)
- if self.max_width is None or self.max_width < line_len:
- self.max_width = line_len
- buf.append(line)
-
- buf.append(' ' * (clear_width - line_len))
- line = ''.join(buf)
-
- # Render the line only if it changed.
- if line != self._last_line:
- self._last_line = line
- echo(line, file=self.file, color=self.color, nl=nl)
- self.file.flush()
-
- def make_step(self, n_steps):
- self.pos += n_steps
- if self.length_known and self.pos >= self.length:
- self.finished = True
-
- if (time.time() - self.last_eta) < 1.0:
- return
-
- self.last_eta = time.time()
- self.avg = self.avg[-6:] + [-(self.start - time.time()) / (self.pos)]
-
- self.eta_known = self.length_known
-
- def update(self, n_steps):
- self.make_step(n_steps)
- self.render_progress()
-
- def finish(self):
- self.eta_known = 0
- self.current_item = None
- self.finished = True
-
- def next(self):
- if self.is_hidden:
- return next(self.iter)
- try:
- rv = next(self.iter)
- self.current_item = rv
- except StopIteration:
- self.finish()
- self.render_progress()
- raise StopIteration()
- else:
- self.update(1)
- return rv
-
- if not PY2:
- __next__ = next
- del next
-
-
-def pager(text, color=None):
- """Decide what method to use for paging through text."""
- stdout = _default_text_stdout()
- if not isatty(sys.stdin) or not isatty(stdout):
- return _nullpager(stdout, text, color)
- pager_cmd = (os.environ.get('PAGER', None) or '').strip()
- if pager_cmd:
- if WIN:
- return _tempfilepager(text, pager_cmd, color)
- return _pipepager(text, pager_cmd, color)
- if os.environ.get('TERM') in ('dumb', 'emacs'):
- return _nullpager(stdout, text, color)
- if WIN or sys.platform.startswith('os2'):
- return _tempfilepager(text, 'more <', color)
- if hasattr(os, 'system') and os.system('(less) 2>/dev/null') == 0:
- return _pipepager(text, 'less', color)
-
- import tempfile
- fd, filename = tempfile.mkstemp()
- os.close(fd)
- try:
- if hasattr(os, 'system') and os.system('more "%s"' % filename) == 0:
- return _pipepager(text, 'more', color)
- return _nullpager(stdout, text, color)
- finally:
- os.unlink(filename)
-
-
-def _pipepager(text, cmd, color):
- """Page through text by feeding it to another program. Invoking a
- pager through this might support colors.
- """
- import subprocess
- env = dict(os.environ)
-
- # If we're piping to less we might support colors under the
- # condition that
- cmd_detail = cmd.rsplit('/', 1)[-1].split()
- if color is None and cmd_detail[0] == 'less':
- less_flags = os.environ.get('LESS', '') + ' '.join(cmd_detail[1:])
- if not less_flags:
- env['LESS'] = '-R'
- color = True
- elif 'r' in less_flags or 'R' in less_flags:
- color = True
-
- if not color:
- text = strip_ansi(text)
-
- c = subprocess.Popen(cmd, shell=True, stdin=subprocess.PIPE,
- env=env)
- encoding = get_best_encoding(c.stdin)
- try:
- c.stdin.write(text.encode(encoding, 'replace'))
- c.stdin.close()
- except (IOError, KeyboardInterrupt):
- pass
-
- # Less doesn't respect ^C, but catches it for its own UI purposes (aborting
- # search or other commands inside less).
- #
- # That means when the user hits ^C, the parent process (click) terminates,
- # but less is still alive, paging the output and messing up the terminal.
- #
- # If the user wants to make the pager exit on ^C, they should set
- # `LESS='-K'`. It's not our decision to make.
- while True:
- try:
- c.wait()
- except KeyboardInterrupt:
- pass
- else:
- break
-
-
-def _tempfilepager(text, cmd, color):
- """Page through text by invoking a program on a temporary file."""
- import tempfile
- filename = tempfile.mktemp()
- if not color:
- text = strip_ansi(text)
- encoding = get_best_encoding(sys.stdout)
- with open_stream(filename, 'wb')[0] as f:
- f.write(text.encode(encoding))
- try:
- os.system(cmd + ' "' + filename + '"')
- finally:
- os.unlink(filename)
-
-
-def _nullpager(stream, text, color):
- """Simply print unformatted text. This is the ultimate fallback."""
- if not color:
- text = strip_ansi(text)
- stream.write(text)
-
-
-class Editor(object):
-
- def __init__(self, editor=None, env=None, require_save=True,
- extension='.txt'):
- self.editor = editor
- self.env = env
- self.require_save = require_save
- self.extension = extension
-
- def get_editor(self):
- if self.editor is not None:
- return self.editor
- for key in 'VISUAL', 'EDITOR':
- rv = os.environ.get(key)
- if rv:
- return rv
- if WIN:
- return 'notepad'
- for editor in 'vim', 'nano':
- if os.system('which %s >/dev/null 2>&1' % editor) == 0:
- return editor
- return 'vi'
-
- def edit_file(self, filename):
- import subprocess
- editor = self.get_editor()
- if self.env:
- environ = os.environ.copy()
- environ.update(self.env)
- else:
- environ = None
- try:
- c = subprocess.Popen('%s "%s"' % (editor, filename),
- env=environ, shell=True)
- exit_code = c.wait()
- if exit_code != 0:
- raise ClickException('%s: Editing failed!' % editor)
- except OSError as e:
- raise ClickException('%s: Editing failed: %s' % (editor, e))
-
- def edit(self, text):
- import tempfile
-
- text = text or ''
- if text and not text.endswith('\n'):
- text += '\n'
-
- fd, name = tempfile.mkstemp(prefix='editor-', suffix=self.extension)
- try:
- if WIN:
- encoding = 'utf-8-sig'
- text = text.replace('\n', '\r\n')
- else:
- encoding = 'utf-8'
- text = text.encode(encoding)
-
- f = os.fdopen(fd, 'wb')
- f.write(text)
- f.close()
- timestamp = os.path.getmtime(name)
-
- self.edit_file(name)
-
- if self.require_save \
- and os.path.getmtime(name) == timestamp:
- return None
-
- f = open(name, 'rb')
- try:
- rv = f.read()
- finally:
- f.close()
- return rv.decode('utf-8-sig').replace('\r\n', '\n')
- finally:
- os.unlink(name)
-
-
-def open_url(url, wait=False, locate=False):
- import subprocess
-
- def _unquote_file(url):
- try:
- import urllib
- except ImportError:
- import urllib
- if url.startswith('file://'):
- url = urllib.unquote(url[7:])
- return url
-
- if sys.platform == 'darwin':
- args = ['open']
- if wait:
- args.append('-W')
- if locate:
- args.append('-R')
- args.append(_unquote_file(url))
- null = open('/dev/null', 'w')
- try:
- return subprocess.Popen(args, stderr=null).wait()
- finally:
- null.close()
- elif WIN:
- if locate:
- url = _unquote_file(url)
- args = 'explorer /select,"%s"' % _unquote_file(
- url.replace('"', ''))
- else:
- args = 'start %s "" "%s"' % (
- wait and '/WAIT' or '', url.replace('"', ''))
- return os.system(args)
-
- try:
- if locate:
- url = os.path.dirname(_unquote_file(url)) or '.'
- else:
- url = _unquote_file(url)
- c = subprocess.Popen(['xdg-open', url])
- if wait:
- return c.wait()
- return 0
- except OSError:
- if url.startswith(('http://', 'https://')) and not locate and not wait:
- import webbrowser
- webbrowser.open(url)
- return 0
- return 1
-
-
-def _translate_ch_to_exc(ch):
- if ch == '\x03':
- raise KeyboardInterrupt()
- if ch == '\x04':
- raise EOFError()
-
-
-if WIN:
- import msvcrt
-
- def getchar(echo):
- rv = msvcrt.getch()
- if echo:
- msvcrt.putchar(rv)
- _translate_ch_to_exc(rv)
- if PY2:
- enc = getattr(sys.stdin, 'encoding', None)
- if enc is not None:
- rv = rv.decode(enc, 'replace')
- else:
- rv = rv.decode('cp1252', 'replace')
- return rv
-else:
- import tty
- import termios
-
- def getchar(echo):
- if not isatty(sys.stdin):
- f = open('/dev/tty')
- fd = f.fileno()
- else:
- fd = sys.stdin.fileno()
- f = None
- try:
- old_settings = termios.tcgetattr(fd)
- try:
- tty.setraw(fd)
- ch = os.read(fd, 32)
- if echo and isatty(sys.stdout):
- sys.stdout.write(ch)
- finally:
- termios.tcsetattr(fd, termios.TCSADRAIN, old_settings)
- sys.stdout.flush()
- if f is not None:
- f.close()
- except termios.error:
- pass
- _translate_ch_to_exc(ch)
- return ch.decode(get_best_encoding(sys.stdin), 'replace')
diff --git a/venv/Lib/site-packages/click/_textwrap.py b/venv/Lib/site-packages/click/_textwrap.py
deleted file mode 100644
index 7e77603..0000000
--- a/venv/Lib/site-packages/click/_textwrap.py
+++ /dev/null
@@ -1,38 +0,0 @@
-import textwrap
-from contextlib import contextmanager
-
-
-class TextWrapper(textwrap.TextWrapper):
-
- def _handle_long_word(self, reversed_chunks, cur_line, cur_len, width):
- space_left = max(width - cur_len, 1)
-
- if self.break_long_words:
- last = reversed_chunks[-1]
- cut = last[:space_left]
- res = last[space_left:]
- cur_line.append(cut)
- reversed_chunks[-1] = res
- elif not cur_line:
- cur_line.append(reversed_chunks.pop())
-
- @contextmanager
- def extra_indent(self, indent):
- old_initial_indent = self.initial_indent
- old_subsequent_indent = self.subsequent_indent
- self.initial_indent += indent
- self.subsequent_indent += indent
- try:
- yield
- finally:
- self.initial_indent = old_initial_indent
- self.subsequent_indent = old_subsequent_indent
-
- def indent_only(self, text):
- rv = []
- for idx, line in enumerate(text.splitlines()):
- indent = self.initial_indent
- if idx > 0:
- indent = self.subsequent_indent
- rv.append(indent + line)
- return '\n'.join(rv)
diff --git a/venv/Lib/site-packages/click/_unicodefun.py b/venv/Lib/site-packages/click/_unicodefun.py
deleted file mode 100644
index 9e17a38..0000000
--- a/venv/Lib/site-packages/click/_unicodefun.py
+++ /dev/null
@@ -1,118 +0,0 @@
-import os
-import sys
-import codecs
-
-from ._compat import PY2
-
-
-# If someone wants to vendor click, we want to ensure the
-# correct package is discovered. Ideally we could use a
-# relative import here but unfortunately Python does not
-# support that.
-click = sys.modules[__name__.rsplit('.', 1)[0]]
-
-
-def _find_unicode_literals_frame():
- import __future__
- frm = sys._getframe(1)
- idx = 1
- while frm is not None:
- if frm.f_globals.get('__name__', '').startswith('click.'):
- frm = frm.f_back
- idx += 1
- elif frm.f_code.co_flags & __future__.unicode_literals.compiler_flag:
- return idx
- else:
- break
- return 0
-
-
-def _check_for_unicode_literals():
- if not __debug__:
- return
- if not PY2 or click.disable_unicode_literals_warning:
- return
- bad_frame = _find_unicode_literals_frame()
- if bad_frame <= 0:
- return
- from warnings import warn
- warn(Warning('Click detected the use of the unicode_literals '
- '__future__ import. This is heavily discouraged '
- 'because it can introduce subtle bugs in your '
- 'code. You should instead use explicit u"" literals '
- 'for your unicode strings. For more information see '
- 'http://click.pocoo.org/python3/'),
- stacklevel=bad_frame)
-
-
-def _verify_python3_env():
- """Ensures that the environment is good for unicode on Python 3."""
- if PY2:
- return
- try:
- import locale
- fs_enc = codecs.lookup(locale.getpreferredencoding()).name
- except Exception:
- fs_enc = 'ascii'
- if fs_enc != 'ascii':
- return
-
- extra = ''
- if os.name == 'posix':
- import subprocess
- rv = subprocess.Popen(['locale', '-a'], stdout=subprocess.PIPE,
- stderr=subprocess.PIPE).communicate()[0]
- good_locales = set()
- has_c_utf8 = False
-
- # Make sure we're operating on text here.
- if isinstance(rv, bytes):
- rv = rv.decode('ascii', 'replace')
-
- for line in rv.splitlines():
- locale = line.strip()
- if locale.lower().endswith(('.utf-8', '.utf8')):
- good_locales.add(locale)
- if locale.lower() in ('c.utf8', 'c.utf-8'):
- has_c_utf8 = True
-
- extra += '\n\n'
- if not good_locales:
- extra += (
- 'Additional information: on this system no suitable UTF-8\n'
- 'locales were discovered. This most likely requires resolving\n'
- 'by reconfiguring the locale system.'
- )
- elif has_c_utf8:
- extra += (
- 'This system supports the C.UTF-8 locale which is recommended.\n'
- 'You might be able to resolve your issue by exporting the\n'
- 'following environment variables:\n\n'
- ' export LC_ALL=C.UTF-8\n'
- ' export LANG=C.UTF-8'
- )
- else:
- extra += (
- 'This system lists a couple of UTF-8 supporting locales that\n'
- 'you can pick from. The following suitable locales where\n'
- 'discovered: %s'
- ) % ', '.join(sorted(good_locales))
-
- bad_locale = None
- for locale in os.environ.get('LC_ALL'), os.environ.get('LANG'):
- if locale and locale.lower().endswith(('.utf-8', '.utf8')):
- bad_locale = locale
- if locale is not None:
- break
- if bad_locale is not None:
- extra += (
- '\n\nClick discovered that you exported a UTF-8 locale\n'
- 'but the locale system could not pick up from it because\n'
- 'it does not exist. The exported locale is "%s" but it\n'
- 'is not supported'
- ) % bad_locale
-
- raise RuntimeError('Click will abort further execution because Python 3 '
- 'was configured to use ASCII as encoding for the '
- 'environment. Consult http://click.pocoo.org/python3/'
- 'for mitigation steps.' + extra)
diff --git a/venv/Lib/site-packages/click/_winconsole.py b/venv/Lib/site-packages/click/_winconsole.py
deleted file mode 100644
index 9aed942..0000000
--- a/venv/Lib/site-packages/click/_winconsole.py
+++ /dev/null
@@ -1,273 +0,0 @@
-# -*- coding: utf-8 -*-
-# This module is based on the excellent work by Adam Bartoš who
-# provided a lot of what went into the implementation here in
-# the discussion to issue1602 in the Python bug tracker.
-#
-# There are some general differences in regards to how this works
-# compared to the original patches as we do not need to patch
-# the entire interpreter but just work in our little world of
-# echo and prmopt.
-
-import io
-import os
-import sys
-import zlib
-import time
-import ctypes
-import msvcrt
-from click._compat import _NonClosingTextIOWrapper, text_type, PY2
-from ctypes import byref, POINTER, c_int, c_char, c_char_p, \
- c_void_p, py_object, c_ssize_t, c_ulong, windll, WINFUNCTYPE
-try:
- from ctypes import pythonapi
- PyObject_GetBuffer = pythonapi.PyObject_GetBuffer
- PyBuffer_Release = pythonapi.PyBuffer_Release
-except ImportError:
- pythonapi = None
-from ctypes.wintypes import LPWSTR, LPCWSTR
-
-
-c_ssize_p = POINTER(c_ssize_t)
-
-kernel32 = windll.kernel32
-GetStdHandle = kernel32.GetStdHandle
-ReadConsoleW = kernel32.ReadConsoleW
-WriteConsoleW = kernel32.WriteConsoleW
-GetLastError = kernel32.GetLastError
-GetCommandLineW = WINFUNCTYPE(LPWSTR)(
- ('GetCommandLineW', windll.kernel32))
-CommandLineToArgvW = WINFUNCTYPE(
- POINTER(LPWSTR), LPCWSTR, POINTER(c_int))(
- ('CommandLineToArgvW', windll.shell32))
-
-
-STDIN_HANDLE = GetStdHandle(-10)
-STDOUT_HANDLE = GetStdHandle(-11)
-STDERR_HANDLE = GetStdHandle(-12)
-
-
-PyBUF_SIMPLE = 0
-PyBUF_WRITABLE = 1
-
-ERROR_SUCCESS = 0
-ERROR_NOT_ENOUGH_MEMORY = 8
-ERROR_OPERATION_ABORTED = 995
-
-STDIN_FILENO = 0
-STDOUT_FILENO = 1
-STDERR_FILENO = 2
-
-EOF = b'\x1a'
-MAX_BYTES_WRITTEN = 32767
-
-
-class Py_buffer(ctypes.Structure):
- _fields_ = [
- ('buf', c_void_p),
- ('obj', py_object),
- ('len', c_ssize_t),
- ('itemsize', c_ssize_t),
- ('readonly', c_int),
- ('ndim', c_int),
- ('format', c_char_p),
- ('shape', c_ssize_p),
- ('strides', c_ssize_p),
- ('suboffsets', c_ssize_p),
- ('internal', c_void_p)
- ]
-
- if PY2:
- _fields_.insert(-1, ('smalltable', c_ssize_t * 2))
-
-
-# On PyPy we cannot get buffers so our ability to operate here is
-# serverly limited.
-if pythonapi is None:
- get_buffer = None
-else:
- def get_buffer(obj, writable=False):
- buf = Py_buffer()
- flags = PyBUF_WRITABLE if writable else PyBUF_SIMPLE
- PyObject_GetBuffer(py_object(obj), byref(buf), flags)
- try:
- buffer_type = c_char * buf.len
- return buffer_type.from_address(buf.buf)
- finally:
- PyBuffer_Release(byref(buf))
-
-
-class _WindowsConsoleRawIOBase(io.RawIOBase):
-
- def __init__(self, handle):
- self.handle = handle
-
- def isatty(self):
- io.RawIOBase.isatty(self)
- return True
-
-
-class _WindowsConsoleReader(_WindowsConsoleRawIOBase):
-
- def readable(self):
- return True
-
- def readinto(self, b):
- bytes_to_be_read = len(b)
- if not bytes_to_be_read:
- return 0
- elif bytes_to_be_read % 2:
- raise ValueError('cannot read odd number of bytes from '
- 'UTF-16-LE encoded console')
-
- buffer = get_buffer(b, writable=True)
- code_units_to_be_read = bytes_to_be_read // 2
- code_units_read = c_ulong()
-
- rv = ReadConsoleW(self.handle, buffer, code_units_to_be_read,
- byref(code_units_read), None)
- if GetLastError() == ERROR_OPERATION_ABORTED:
- # wait for KeyboardInterrupt
- time.sleep(0.1)
- if not rv:
- raise OSError('Windows error: %s' % GetLastError())
-
- if buffer[0] == EOF:
- return 0
- return 2 * code_units_read.value
-
-
-class _WindowsConsoleWriter(_WindowsConsoleRawIOBase):
-
- def writable(self):
- return True
-
- @staticmethod
- def _get_error_message(errno):
- if errno == ERROR_SUCCESS:
- return 'ERROR_SUCCESS'
- elif errno == ERROR_NOT_ENOUGH_MEMORY:
- return 'ERROR_NOT_ENOUGH_MEMORY'
- return 'Windows error %s' % errno
-
- def write(self, b):
- bytes_to_be_written = len(b)
- buf = get_buffer(b)
- code_units_to_be_written = min(bytes_to_be_written,
- MAX_BYTES_WRITTEN) // 2
- code_units_written = c_ulong()
-
- WriteConsoleW(self.handle, buf, code_units_to_be_written,
- byref(code_units_written), None)
- bytes_written = 2 * code_units_written.value
-
- if bytes_written == 0 and bytes_to_be_written > 0:
- raise OSError(self._get_error_message(GetLastError()))
- return bytes_written
-
-
-class ConsoleStream(object):
-
- def __init__(self, text_stream, byte_stream):
- self._text_stream = text_stream
- self.buffer = byte_stream
-
- @property
- def name(self):
- return self.buffer.name
-
- def write(self, x):
- if isinstance(x, text_type):
- return self._text_stream.write(x)
- try:
- self.flush()
- except Exception:
- pass
- return self.buffer.write(x)
-
- def writelines(self, lines):
- for line in lines:
- self.write(line)
-
- def __getattr__(self, name):
- return getattr(self._text_stream, name)
-
- def isatty(self):
- return self.buffer.isatty()
-
- def __repr__(self):
- return '' % (
- self.name,
- self.encoding,
- )
-
-
-def _get_text_stdin(buffer_stream):
- text_stream = _NonClosingTextIOWrapper(
- io.BufferedReader(_WindowsConsoleReader(STDIN_HANDLE)),
- 'utf-16-le', 'strict', line_buffering=True)
- return ConsoleStream(text_stream, buffer_stream)
-
-
-def _get_text_stdout(buffer_stream):
- text_stream = _NonClosingTextIOWrapper(
- _WindowsConsoleWriter(STDOUT_HANDLE),
- 'utf-16-le', 'strict', line_buffering=True)
- return ConsoleStream(text_stream, buffer_stream)
-
-
-def _get_text_stderr(buffer_stream):
- text_stream = _NonClosingTextIOWrapper(
- _WindowsConsoleWriter(STDERR_HANDLE),
- 'utf-16-le', 'strict', line_buffering=True)
- return ConsoleStream(text_stream, buffer_stream)
-
-
-if PY2:
- def _hash_py_argv():
- return zlib.crc32('\x00'.join(sys.argv[1:]))
-
- _initial_argv_hash = _hash_py_argv()
-
- def _get_windows_argv():
- argc = c_int(0)
- argv_unicode = CommandLineToArgvW(GetCommandLineW(), byref(argc))
- argv = [argv_unicode[i] for i in range(0, argc.value)]
-
- if not hasattr(sys, 'frozen'):
- argv = argv[1:]
- while len(argv) > 0:
- arg = argv[0]
- if not arg.startswith('-') or arg == '-':
- break
- argv = argv[1:]
- if arg.startswith(('-c', '-m')):
- break
-
- return argv[1:]
-
-
-_stream_factories = {
- 0: _get_text_stdin,
- 1: _get_text_stdout,
- 2: _get_text_stderr,
-}
-
-
-def _get_windows_console_stream(f, encoding, errors):
- if get_buffer is not None and \
- encoding in ('utf-16-le', None) \
- and errors in ('strict', None) and \
- hasattr(f, 'isatty') and f.isatty():
- func = _stream_factories.get(f.fileno())
- if func is not None:
- if not PY2:
- f = getattr(f, 'buffer')
- if f is None:
- return None
- else:
- # If we are on Python 2 we need to set the stream that we
- # deal with to binary mode as otherwise the exercise if a
- # bit moot. The same problems apply as for
- # get_binary_stdin and friends from _compat.
- msvcrt.setmode(f.fileno(), os.O_BINARY)
- return func(f)
diff --git a/venv/Lib/site-packages/click/core.py b/venv/Lib/site-packages/click/core.py
deleted file mode 100644
index 7456451..0000000
--- a/venv/Lib/site-packages/click/core.py
+++ /dev/null
@@ -1,1744 +0,0 @@
-import errno
-import os
-import sys
-from contextlib import contextmanager
-from itertools import repeat
-from functools import update_wrapper
-
-from .types import convert_type, IntRange, BOOL
-from .utils import make_str, make_default_short_help, echo, get_os_args
-from .exceptions import ClickException, UsageError, BadParameter, Abort, \
- MissingParameter
-from .termui import prompt, confirm
-from .formatting import HelpFormatter, join_options
-from .parser import OptionParser, split_opt
-from .globals import push_context, pop_context
-
-from ._compat import PY2, isidentifier, iteritems
-from ._unicodefun import _check_for_unicode_literals, _verify_python3_env
-
-
-_missing = object()
-
-
-SUBCOMMAND_METAVAR = 'COMMAND [ARGS]...'
-SUBCOMMANDS_METAVAR = 'COMMAND1 [ARGS]... [COMMAND2 [ARGS]...]...'
-
-
-def _bashcomplete(cmd, prog_name, complete_var=None):
- """Internal handler for the bash completion support."""
- if complete_var is None:
- complete_var = '_%s_COMPLETE' % (prog_name.replace('-', '_')).upper()
- complete_instr = os.environ.get(complete_var)
- if not complete_instr:
- return
-
- from ._bashcomplete import bashcomplete
- if bashcomplete(cmd, prog_name, complete_var, complete_instr):
- sys.exit(1)
-
-
-def _check_multicommand(base_command, cmd_name, cmd, register=False):
- if not base_command.chain or not isinstance(cmd, MultiCommand):
- return
- if register:
- hint = 'It is not possible to add multi commands as children to ' \
- 'another multi command that is in chain mode'
- else:
- hint = 'Found a multi command as subcommand to a multi command ' \
- 'that is in chain mode. This is not supported'
- raise RuntimeError('%s. Command "%s" is set to chain and "%s" was '
- 'added as subcommand but it in itself is a '
- 'multi command. ("%s" is a %s within a chained '
- '%s named "%s"). This restriction was supposed to '
- 'be lifted in 6.0 but the fix was flawed. This '
- 'will be fixed in Click 7.0' % (
- hint, base_command.name, cmd_name,
- cmd_name, cmd.__class__.__name__,
- base_command.__class__.__name__,
- base_command.name))
-
-
-def batch(iterable, batch_size):
- return list(zip(*repeat(iter(iterable), batch_size)))
-
-
-def invoke_param_callback(callback, ctx, param, value):
- code = getattr(callback, '__code__', None)
- args = getattr(code, 'co_argcount', 3)
-
- if args < 3:
- # This will become a warning in Click 3.0:
- from warnings import warn
- warn(Warning('Invoked legacy parameter callback "%s". The new '
- 'signature for such callbacks starting with '
- 'click 2.0 is (ctx, param, value).'
- % callback), stacklevel=3)
- return callback(ctx, value)
- return callback(ctx, param, value)
-
-
-@contextmanager
-def augment_usage_errors(ctx, param=None):
- """Context manager that attaches extra information to exceptions that
- fly.
- """
- try:
- yield
- except BadParameter as e:
- if e.ctx is None:
- e.ctx = ctx
- if param is not None and e.param is None:
- e.param = param
- raise
- except UsageError as e:
- if e.ctx is None:
- e.ctx = ctx
- raise
-
-
-def iter_params_for_processing(invocation_order, declaration_order):
- """Given a sequence of parameters in the order as should be considered
- for processing and an iterable of parameters that exist, this returns
- a list in the correct order as they should be processed.
- """
- def sort_key(item):
- try:
- idx = invocation_order.index(item)
- except ValueError:
- idx = float('inf')
- return (not item.is_eager, idx)
-
- return sorted(declaration_order, key=sort_key)
-
-
-class Context(object):
- """The context is a special internal object that holds state relevant
- for the script execution at every single level. It's normally invisible
- to commands unless they opt-in to getting access to it.
-
- The context is useful as it can pass internal objects around and can
- control special execution features such as reading data from
- environment variables.
-
- A context can be used as context manager in which case it will call
- :meth:`close` on teardown.
-
- .. versionadded:: 2.0
- Added the `resilient_parsing`, `help_option_names`,
- `token_normalize_func` parameters.
-
- .. versionadded:: 3.0
- Added the `allow_extra_args` and `allow_interspersed_args`
- parameters.
-
- .. versionadded:: 4.0
- Added the `color`, `ignore_unknown_options`, and
- `max_content_width` parameters.
-
- :param command: the command class for this context.
- :param parent: the parent context.
- :param info_name: the info name for this invocation. Generally this
- is the most descriptive name for the script or
- command. For the toplevel script it is usually
- the name of the script, for commands below it it's
- the name of the script.
- :param obj: an arbitrary object of user data.
- :param auto_envvar_prefix: the prefix to use for automatic environment
- variables. If this is `None` then reading
- from environment variables is disabled. This
- does not affect manually set environment
- variables which are always read.
- :param default_map: a dictionary (like object) with default values
- for parameters.
- :param terminal_width: the width of the terminal. The default is
- inherit from parent context. If no context
- defines the terminal width then auto
- detection will be applied.
- :param max_content_width: the maximum width for content rendered by
- Click (this currently only affects help
- pages). This defaults to 80 characters if
- not overridden. In other words: even if the
- terminal is larger than that, Click will not
- format things wider than 80 characters by
- default. In addition to that, formatters might
- add some safety mapping on the right.
- :param resilient_parsing: if this flag is enabled then Click will
- parse without any interactivity or callback
- invocation. This is useful for implementing
- things such as completion support.
- :param allow_extra_args: if this is set to `True` then extra arguments
- at the end will not raise an error and will be
- kept on the context. The default is to inherit
- from the command.
- :param allow_interspersed_args: if this is set to `False` then options
- and arguments cannot be mixed. The
- default is to inherit from the command.
- :param ignore_unknown_options: instructs click to ignore options it does
- not know and keeps them for later
- processing.
- :param help_option_names: optionally a list of strings that define how
- the default help parameter is named. The
- default is ``['--help']``.
- :param token_normalize_func: an optional function that is used to
- normalize tokens (options, choices,
- etc.). This for instance can be used to
- implement case insensitive behavior.
- :param color: controls if the terminal supports ANSI colors or not. The
- default is autodetection. This is only needed if ANSI
- codes are used in texts that Click prints which is by
- default not the case. This for instance would affect
- help output.
- """
-
- def __init__(self, command, parent=None, info_name=None, obj=None,
- auto_envvar_prefix=None, default_map=None,
- terminal_width=None, max_content_width=None,
- resilient_parsing=False, allow_extra_args=None,
- allow_interspersed_args=None,
- ignore_unknown_options=None, help_option_names=None,
- token_normalize_func=None, color=None):
- #: the parent context or `None` if none exists.
- self.parent = parent
- #: the :class:`Command` for this context.
- self.command = command
- #: the descriptive information name
- self.info_name = info_name
- #: the parsed parameters except if the value is hidden in which
- #: case it's not remembered.
- self.params = {}
- #: the leftover arguments.
- self.args = []
- #: protected arguments. These are arguments that are prepended
- #: to `args` when certain parsing scenarios are encountered but
- #: must be never propagated to another arguments. This is used
- #: to implement nested parsing.
- self.protected_args = []
- if obj is None and parent is not None:
- obj = parent.obj
- #: the user object stored.
- self.obj = obj
- self._meta = getattr(parent, 'meta', {})
-
- #: A dictionary (-like object) with defaults for parameters.
- if default_map is None \
- and parent is not None \
- and parent.default_map is not None:
- default_map = parent.default_map.get(info_name)
- self.default_map = default_map
-
- #: This flag indicates if a subcommand is going to be executed. A
- #: group callback can use this information to figure out if it's
- #: being executed directly or because the execution flow passes
- #: onwards to a subcommand. By default it's None, but it can be
- #: the name of the subcommand to execute.
- #:
- #: If chaining is enabled this will be set to ``'*'`` in case
- #: any commands are executed. It is however not possible to
- #: figure out which ones. If you require this knowledge you
- #: should use a :func:`resultcallback`.
- self.invoked_subcommand = None
-
- if terminal_width is None and parent is not None:
- terminal_width = parent.terminal_width
- #: The width of the terminal (None is autodetection).
- self.terminal_width = terminal_width
-
- if max_content_width is None and parent is not None:
- max_content_width = parent.max_content_width
- #: The maximum width of formatted content (None implies a sensible
- #: default which is 80 for most things).
- self.max_content_width = max_content_width
-
- if allow_extra_args is None:
- allow_extra_args = command.allow_extra_args
- #: Indicates if the context allows extra args or if it should
- #: fail on parsing.
- #:
- #: .. versionadded:: 3.0
- self.allow_extra_args = allow_extra_args
-
- if allow_interspersed_args is None:
- allow_interspersed_args = command.allow_interspersed_args
- #: Indicates if the context allows mixing of arguments and
- #: options or not.
- #:
- #: .. versionadded:: 3.0
- self.allow_interspersed_args = allow_interspersed_args
-
- if ignore_unknown_options is None:
- ignore_unknown_options = command.ignore_unknown_options
- #: Instructs click to ignore options that a command does not
- #: understand and will store it on the context for later
- #: processing. This is primarily useful for situations where you
- #: want to call into external programs. Generally this pattern is
- #: strongly discouraged because it's not possibly to losslessly
- #: forward all arguments.
- #:
- #: .. versionadded:: 4.0
- self.ignore_unknown_options = ignore_unknown_options
-
- if help_option_names is None:
- if parent is not None:
- help_option_names = parent.help_option_names
- else:
- help_option_names = ['--help']
-
- #: The names for the help options.
- self.help_option_names = help_option_names
-
- if token_normalize_func is None and parent is not None:
- token_normalize_func = parent.token_normalize_func
-
- #: An optional normalization function for tokens. This is
- #: options, choices, commands etc.
- self.token_normalize_func = token_normalize_func
-
- #: Indicates if resilient parsing is enabled. In that case Click
- #: will do its best to not cause any failures.
- self.resilient_parsing = resilient_parsing
-
- # If there is no envvar prefix yet, but the parent has one and
- # the command on this level has a name, we can expand the envvar
- # prefix automatically.
- if auto_envvar_prefix is None:
- if parent is not None \
- and parent.auto_envvar_prefix is not None and \
- self.info_name is not None:
- auto_envvar_prefix = '%s_%s' % (parent.auto_envvar_prefix,
- self.info_name.upper())
- else:
- self.auto_envvar_prefix = auto_envvar_prefix.upper()
- self.auto_envvar_prefix = auto_envvar_prefix
-
- if color is None and parent is not None:
- color = parent.color
-
- #: Controls if styling output is wanted or not.
- self.color = color
-
- self._close_callbacks = []
- self._depth = 0
-
- def __enter__(self):
- self._depth += 1
- push_context(self)
- return self
-
- def __exit__(self, exc_type, exc_value, tb):
- self._depth -= 1
- if self._depth == 0:
- self.close()
- pop_context()
-
- @contextmanager
- def scope(self, cleanup=True):
- """This helper method can be used with the context object to promote
- it to the current thread local (see :func:`get_current_context`).
- The default behavior of this is to invoke the cleanup functions which
- can be disabled by setting `cleanup` to `False`. The cleanup
- functions are typically used for things such as closing file handles.
-
- If the cleanup is intended the context object can also be directly
- used as a context manager.
-
- Example usage::
-
- with ctx.scope():
- assert get_current_context() is ctx
-
- This is equivalent::
-
- with ctx:
- assert get_current_context() is ctx
-
- .. versionadded:: 5.0
-
- :param cleanup: controls if the cleanup functions should be run or
- not. The default is to run these functions. In
- some situations the context only wants to be
- temporarily pushed in which case this can be disabled.
- Nested pushes automatically defer the cleanup.
- """
- if not cleanup:
- self._depth += 1
- try:
- with self as rv:
- yield rv
- finally:
- if not cleanup:
- self._depth -= 1
-
- @property
- def meta(self):
- """This is a dictionary which is shared with all the contexts
- that are nested. It exists so that click utiltiies can store some
- state here if they need to. It is however the responsibility of
- that code to manage this dictionary well.
-
- The keys are supposed to be unique dotted strings. For instance
- module paths are a good choice for it. What is stored in there is
- irrelevant for the operation of click. However what is important is
- that code that places data here adheres to the general semantics of
- the system.
-
- Example usage::
-
- LANG_KEY = __name__ + '.lang'
-
- def set_language(value):
- ctx = get_current_context()
- ctx.meta[LANG_KEY] = value
-
- def get_language():
- return get_current_context().meta.get(LANG_KEY, 'en_US')
-
- .. versionadded:: 5.0
- """
- return self._meta
-
- def make_formatter(self):
- """Creates the formatter for the help and usage output."""
- return HelpFormatter(width=self.terminal_width,
- max_width=self.max_content_width)
-
- def call_on_close(self, f):
- """This decorator remembers a function as callback that should be
- executed when the context tears down. This is most useful to bind
- resource handling to the script execution. For instance, file objects
- opened by the :class:`File` type will register their close callbacks
- here.
-
- :param f: the function to execute on teardown.
- """
- self._close_callbacks.append(f)
- return f
-
- def close(self):
- """Invokes all close callbacks."""
- for cb in self._close_callbacks:
- cb()
- self._close_callbacks = []
-
- @property
- def command_path(self):
- """The computed command path. This is used for the ``usage``
- information on the help page. It's automatically created by
- combining the info names of the chain of contexts to the root.
- """
- rv = ''
- if self.info_name is not None:
- rv = self.info_name
- if self.parent is not None:
- rv = self.parent.command_path + ' ' + rv
- return rv.lstrip()
-
- def find_root(self):
- """Finds the outermost context."""
- node = self
- while node.parent is not None:
- node = node.parent
- return node
-
- def find_object(self, object_type):
- """Finds the closest object of a given type."""
- node = self
- while node is not None:
- if isinstance(node.obj, object_type):
- return node.obj
- node = node.parent
-
- def ensure_object(self, object_type):
- """Like :meth:`find_object` but sets the innermost object to a
- new instance of `object_type` if it does not exist.
- """
- rv = self.find_object(object_type)
- if rv is None:
- self.obj = rv = object_type()
- return rv
-
- def lookup_default(self, name):
- """Looks up the default for a parameter name. This by default
- looks into the :attr:`default_map` if available.
- """
- if self.default_map is not None:
- rv = self.default_map.get(name)
- if callable(rv):
- rv = rv()
- return rv
-
- def fail(self, message):
- """Aborts the execution of the program with a specific error
- message.
-
- :param message: the error message to fail with.
- """
- raise UsageError(message, self)
-
- def abort(self):
- """Aborts the script."""
- raise Abort()
-
- def exit(self, code=0):
- """Exits the application with a given exit code."""
- sys.exit(code)
-
- def get_usage(self):
- """Helper method to get formatted usage string for the current
- context and command.
- """
- return self.command.get_usage(self)
-
- def get_help(self):
- """Helper method to get formatted help page for the current
- context and command.
- """
- return self.command.get_help(self)
-
- def invoke(*args, **kwargs):
- """Invokes a command callback in exactly the way it expects. There
- are two ways to invoke this method:
-
- 1. the first argument can be a callback and all other arguments and
- keyword arguments are forwarded directly to the function.
- 2. the first argument is a click command object. In that case all
- arguments are forwarded as well but proper click parameters
- (options and click arguments) must be keyword arguments and Click
- will fill in defaults.
-
- Note that before Click 3.2 keyword arguments were not properly filled
- in against the intention of this code and no context was created. For
- more information about this change and why it was done in a bugfix
- release see :ref:`upgrade-to-3.2`.
- """
- self, callback = args[:2]
- ctx = self
-
- # It's also possible to invoke another command which might or
- # might not have a callback. In that case we also fill
- # in defaults and make a new context for this command.
- if isinstance(callback, Command):
- other_cmd = callback
- callback = other_cmd.callback
- ctx = Context(other_cmd, info_name=other_cmd.name, parent=self)
- if callback is None:
- raise TypeError('The given command does not have a '
- 'callback that can be invoked.')
-
- for param in other_cmd.params:
- if param.name not in kwargs and param.expose_value:
- kwargs[param.name] = param.get_default(ctx)
-
- args = args[2:]
- with augment_usage_errors(self):
- with ctx:
- return callback(*args, **kwargs)
-
- def forward(*args, **kwargs):
- """Similar to :meth:`invoke` but fills in default keyword
- arguments from the current context if the other command expects
- it. This cannot invoke callbacks directly, only other commands.
- """
- self, cmd = args[:2]
-
- # It's also possible to invoke another command which might or
- # might not have a callback.
- if not isinstance(cmd, Command):
- raise TypeError('Callback is not a command.')
-
- for param in self.params:
- if param not in kwargs:
- kwargs[param] = self.params[param]
-
- return self.invoke(cmd, **kwargs)
-
-
-class BaseCommand(object):
- """The base command implements the minimal API contract of commands.
- Most code will never use this as it does not implement a lot of useful
- functionality but it can act as the direct subclass of alternative
- parsing methods that do not depend on the Click parser.
-
- For instance, this can be used to bridge Click and other systems like
- argparse or docopt.
-
- Because base commands do not implement a lot of the API that other
- parts of Click take for granted, they are not supported for all
- operations. For instance, they cannot be used with the decorators
- usually and they have no built-in callback system.
-
- .. versionchanged:: 2.0
- Added the `context_settings` parameter.
-
- :param name: the name of the command to use unless a group overrides it.
- :param context_settings: an optional dictionary with defaults that are
- passed to the context object.
- """
- #: the default for the :attr:`Context.allow_extra_args` flag.
- allow_extra_args = False
- #: the default for the :attr:`Context.allow_interspersed_args` flag.
- allow_interspersed_args = True
- #: the default for the :attr:`Context.ignore_unknown_options` flag.
- ignore_unknown_options = False
-
- def __init__(self, name, context_settings=None):
- #: the name the command thinks it has. Upon registering a command
- #: on a :class:`Group` the group will default the command name
- #: with this information. You should instead use the
- #: :class:`Context`\'s :attr:`~Context.info_name` attribute.
- self.name = name
- if context_settings is None:
- context_settings = {}
- #: an optional dictionary with defaults passed to the context.
- self.context_settings = context_settings
-
- def get_usage(self, ctx):
- raise NotImplementedError('Base commands cannot get usage')
-
- def get_help(self, ctx):
- raise NotImplementedError('Base commands cannot get help')
-
- def make_context(self, info_name, args, parent=None, **extra):
- """This function when given an info name and arguments will kick
- off the parsing and create a new :class:`Context`. It does not
- invoke the actual command callback though.
-
- :param info_name: the info name for this invokation. Generally this
- is the most descriptive name for the script or
- command. For the toplevel script it's usually
- the name of the script, for commands below it it's
- the name of the script.
- :param args: the arguments to parse as list of strings.
- :param parent: the parent context if available.
- :param extra: extra keyword arguments forwarded to the context
- constructor.
- """
- for key, value in iteritems(self.context_settings):
- if key not in extra:
- extra[key] = value
- ctx = Context(self, info_name=info_name, parent=parent, **extra)
- with ctx.scope(cleanup=False):
- self.parse_args(ctx, args)
- return ctx
-
- def parse_args(self, ctx, args):
- """Given a context and a list of arguments this creates the parser
- and parses the arguments, then modifies the context as necessary.
- This is automatically invoked by :meth:`make_context`.
- """
- raise NotImplementedError('Base commands do not know how to parse '
- 'arguments.')
-
- def invoke(self, ctx):
- """Given a context, this invokes the command. The default
- implementation is raising a not implemented error.
- """
- raise NotImplementedError('Base commands are not invokable by default')
-
- def main(self, args=None, prog_name=None, complete_var=None,
- standalone_mode=True, **extra):
- """This is the way to invoke a script with all the bells and
- whistles as a command line application. This will always terminate
- the application after a call. If this is not wanted, ``SystemExit``
- needs to be caught.
-
- This method is also available by directly calling the instance of
- a :class:`Command`.
-
- .. versionadded:: 3.0
- Added the `standalone_mode` flag to control the standalone mode.
-
- :param args: the arguments that should be used for parsing. If not
- provided, ``sys.argv[1:]`` is used.
- :param prog_name: the program name that should be used. By default
- the program name is constructed by taking the file
- name from ``sys.argv[0]``.
- :param complete_var: the environment variable that controls the
- bash completion support. The default is
- ``"__COMPLETE"`` with prog name in
- uppercase.
- :param standalone_mode: the default behavior is to invoke the script
- in standalone mode. Click will then
- handle exceptions and convert them into
- error messages and the function will never
- return but shut down the interpreter. If
- this is set to `False` they will be
- propagated to the caller and the return
- value of this function is the return value
- of :meth:`invoke`.
- :param extra: extra keyword arguments are forwarded to the context
- constructor. See :class:`Context` for more information.
- """
- # If we are in Python 3, we will verify that the environment is
- # sane at this point of reject further execution to avoid a
- # broken script.
- if not PY2:
- _verify_python3_env()
- else:
- _check_for_unicode_literals()
-
- if args is None:
- args = get_os_args()
- else:
- args = list(args)
-
- if prog_name is None:
- prog_name = make_str(os.path.basename(
- sys.argv and sys.argv[0] or __file__))
-
- # Hook for the Bash completion. This only activates if the Bash
- # completion is actually enabled, otherwise this is quite a fast
- # noop.
- _bashcomplete(self, prog_name, complete_var)
-
- try:
- try:
- with self.make_context(prog_name, args, **extra) as ctx:
- rv = self.invoke(ctx)
- if not standalone_mode:
- return rv
- ctx.exit()
- except (EOFError, KeyboardInterrupt):
- echo(file=sys.stderr)
- raise Abort()
- except ClickException as e:
- if not standalone_mode:
- raise
- e.show()
- sys.exit(e.exit_code)
- except IOError as e:
- if e.errno == errno.EPIPE:
- sys.exit(1)
- else:
- raise
- except Abort:
- if not standalone_mode:
- raise
- echo('Aborted!', file=sys.stderr)
- sys.exit(1)
-
- def __call__(self, *args, **kwargs):
- """Alias for :meth:`main`."""
- return self.main(*args, **kwargs)
-
-
-class Command(BaseCommand):
- """Commands are the basic building block of command line interfaces in
- Click. A basic command handles command line parsing and might dispatch
- more parsing to commands nested below it.
-
- .. versionchanged:: 2.0
- Added the `context_settings` parameter.
-
- :param name: the name of the command to use unless a group overrides it.
- :param context_settings: an optional dictionary with defaults that are
- passed to the context object.
- :param callback: the callback to invoke. This is optional.
- :param params: the parameters to register with this command. This can
- be either :class:`Option` or :class:`Argument` objects.
- :param help: the help string to use for this command.
- :param epilog: like the help string but it's printed at the end of the
- help page after everything else.
- :param short_help: the short help to use for this command. This is
- shown on the command listing of the parent command.
- :param add_help_option: by default each command registers a ``--help``
- option. This can be disabled by this parameter.
- """
-
- def __init__(self, name, context_settings=None, callback=None,
- params=None, help=None, epilog=None, short_help=None,
- options_metavar='[OPTIONS]', add_help_option=True):
- BaseCommand.__init__(self, name, context_settings)
- #: the callback to execute when the command fires. This might be
- #: `None` in which case nothing happens.
- self.callback = callback
- #: the list of parameters for this command in the order they
- #: should show up in the help page and execute. Eager parameters
- #: will automatically be handled before non eager ones.
- self.params = params or []
- self.help = help
- self.epilog = epilog
- self.options_metavar = options_metavar
- if short_help is None and help:
- short_help = make_default_short_help(help)
- self.short_help = short_help
- self.add_help_option = add_help_option
-
- def get_usage(self, ctx):
- formatter = ctx.make_formatter()
- self.format_usage(ctx, formatter)
- return formatter.getvalue().rstrip('\n')
-
- def get_params(self, ctx):
- rv = self.params
- help_option = self.get_help_option(ctx)
- if help_option is not None:
- rv = rv + [help_option]
- return rv
-
- def format_usage(self, ctx, formatter):
- """Writes the usage line into the formatter."""
- pieces = self.collect_usage_pieces(ctx)
- formatter.write_usage(ctx.command_path, ' '.join(pieces))
-
- def collect_usage_pieces(self, ctx):
- """Returns all the pieces that go into the usage line and returns
- it as a list of strings.
- """
- rv = [self.options_metavar]
- for param in self.get_params(ctx):
- rv.extend(param.get_usage_pieces(ctx))
- return rv
-
- def get_help_option_names(self, ctx):
- """Returns the names for the help option."""
- all_names = set(ctx.help_option_names)
- for param in self.params:
- all_names.difference_update(param.opts)
- all_names.difference_update(param.secondary_opts)
- return all_names
-
- def get_help_option(self, ctx):
- """Returns the help option object."""
- help_options = self.get_help_option_names(ctx)
- if not help_options or not self.add_help_option:
- return
-
- def show_help(ctx, param, value):
- if value and not ctx.resilient_parsing:
- echo(ctx.get_help(), color=ctx.color)
- ctx.exit()
- return Option(help_options, is_flag=True,
- is_eager=True, expose_value=False,
- callback=show_help,
- help='Show this message and exit.')
-
- def make_parser(self, ctx):
- """Creates the underlying option parser for this command."""
- parser = OptionParser(ctx)
- parser.allow_interspersed_args = ctx.allow_interspersed_args
- parser.ignore_unknown_options = ctx.ignore_unknown_options
- for param in self.get_params(ctx):
- param.add_to_parser(parser, ctx)
- return parser
-
- def get_help(self, ctx):
- """Formats the help into a string and returns it. This creates a
- formatter and will call into the following formatting methods:
- """
- formatter = ctx.make_formatter()
- self.format_help(ctx, formatter)
- return formatter.getvalue().rstrip('\n')
-
- def format_help(self, ctx, formatter):
- """Writes the help into the formatter if it exists.
-
- This calls into the following methods:
-
- - :meth:`format_usage`
- - :meth:`format_help_text`
- - :meth:`format_options`
- - :meth:`format_epilog`
- """
- self.format_usage(ctx, formatter)
- self.format_help_text(ctx, formatter)
- self.format_options(ctx, formatter)
- self.format_epilog(ctx, formatter)
-
- def format_help_text(self, ctx, formatter):
- """Writes the help text to the formatter if it exists."""
- if self.help:
- formatter.write_paragraph()
- with formatter.indentation():
- formatter.write_text(self.help)
-
- def format_options(self, ctx, formatter):
- """Writes all the options into the formatter if they exist."""
- opts = []
- for param in self.get_params(ctx):
- rv = param.get_help_record(ctx)
- if rv is not None:
- opts.append(rv)
-
- if opts:
- with formatter.section('Options'):
- formatter.write_dl(opts)
-
- def format_epilog(self, ctx, formatter):
- """Writes the epilog into the formatter if it exists."""
- if self.epilog:
- formatter.write_paragraph()
- with formatter.indentation():
- formatter.write_text(self.epilog)
-
- def parse_args(self, ctx, args):
- parser = self.make_parser(ctx)
- opts, args, param_order = parser.parse_args(args=args)
-
- for param in iter_params_for_processing(
- param_order, self.get_params(ctx)):
- value, args = param.handle_parse_result(ctx, opts, args)
-
- if args and not ctx.allow_extra_args and not ctx.resilient_parsing:
- ctx.fail('Got unexpected extra argument%s (%s)'
- % (len(args) != 1 and 's' or '',
- ' '.join(map(make_str, args))))
-
- ctx.args = args
- return args
-
- def invoke(self, ctx):
- """Given a context, this invokes the attached callback (if it exists)
- in the right way.
- """
- if self.callback is not None:
- return ctx.invoke(self.callback, **ctx.params)
-
-
-class MultiCommand(Command):
- """A multi command is the basic implementation of a command that
- dispatches to subcommands. The most common version is the
- :class:`Group`.
-
- :param invoke_without_command: this controls how the multi command itself
- is invoked. By default it's only invoked
- if a subcommand is provided.
- :param no_args_is_help: this controls what happens if no arguments are
- provided. This option is enabled by default if
- `invoke_without_command` is disabled or disabled
- if it's enabled. If enabled this will add
- ``--help`` as argument if no arguments are
- passed.
- :param subcommand_metavar: the string that is used in the documentation
- to indicate the subcommand place.
- :param chain: if this is set to `True` chaining of multiple subcommands
- is enabled. This restricts the form of commands in that
- they cannot have optional arguments but it allows
- multiple commands to be chained together.
- :param result_callback: the result callback to attach to this multi
- command.
- """
- allow_extra_args = True
- allow_interspersed_args = False
-
- def __init__(self, name=None, invoke_without_command=False,
- no_args_is_help=None, subcommand_metavar=None,
- chain=False, result_callback=None, **attrs):
- Command.__init__(self, name, **attrs)
- if no_args_is_help is None:
- no_args_is_help = not invoke_without_command
- self.no_args_is_help = no_args_is_help
- self.invoke_without_command = invoke_without_command
- if subcommand_metavar is None:
- if chain:
- subcommand_metavar = SUBCOMMANDS_METAVAR
- else:
- subcommand_metavar = SUBCOMMAND_METAVAR
- self.subcommand_metavar = subcommand_metavar
- self.chain = chain
- #: The result callback that is stored. This can be set or
- #: overridden with the :func:`resultcallback` decorator.
- self.result_callback = result_callback
-
- if self.chain:
- for param in self.params:
- if isinstance(param, Argument) and not param.required:
- raise RuntimeError('Multi commands in chain mode cannot '
- 'have optional arguments.')
-
- def collect_usage_pieces(self, ctx):
- rv = Command.collect_usage_pieces(self, ctx)
- rv.append(self.subcommand_metavar)
- return rv
-
- def format_options(self, ctx, formatter):
- Command.format_options(self, ctx, formatter)
- self.format_commands(ctx, formatter)
-
- def resultcallback(self, replace=False):
- """Adds a result callback to the chain command. By default if a
- result callback is already registered this will chain them but
- this can be disabled with the `replace` parameter. The result
- callback is invoked with the return value of the subcommand
- (or the list of return values from all subcommands if chaining
- is enabled) as well as the parameters as they would be passed
- to the main callback.
-
- Example::
-
- @click.group()
- @click.option('-i', '--input', default=23)
- def cli(input):
- return 42
-
- @cli.resultcallback()
- def process_result(result, input):
- return result + input
-
- .. versionadded:: 3.0
-
- :param replace: if set to `True` an already existing result
- callback will be removed.
- """
- def decorator(f):
- old_callback = self.result_callback
- if old_callback is None or replace:
- self.result_callback = f
- return f
- def function(__value, *args, **kwargs):
- return f(old_callback(__value, *args, **kwargs),
- *args, **kwargs)
- self.result_callback = rv = update_wrapper(function, f)
- return rv
- return decorator
-
- def format_commands(self, ctx, formatter):
- """Extra format methods for multi methods that adds all the commands
- after the options.
- """
- rows = []
- for subcommand in self.list_commands(ctx):
- cmd = self.get_command(ctx, subcommand)
- # What is this, the tool lied about a command. Ignore it
- if cmd is None:
- continue
-
- help = cmd.short_help or ''
- rows.append((subcommand, help))
-
- if rows:
- with formatter.section('Commands'):
- formatter.write_dl(rows)
-
- def parse_args(self, ctx, args):
- if not args and self.no_args_is_help and not ctx.resilient_parsing:
- echo(ctx.get_help(), color=ctx.color)
- ctx.exit()
-
- rest = Command.parse_args(self, ctx, args)
- if self.chain:
- ctx.protected_args = rest
- ctx.args = []
- elif rest:
- ctx.protected_args, ctx.args = rest[:1], rest[1:]
-
- return ctx.args
-
- def invoke(self, ctx):
- def _process_result(value):
- if self.result_callback is not None:
- value = ctx.invoke(self.result_callback, value,
- **ctx.params)
- return value
-
- if not ctx.protected_args:
- # If we are invoked without command the chain flag controls
- # how this happens. If we are not in chain mode, the return
- # value here is the return value of the command.
- # If however we are in chain mode, the return value is the
- # return value of the result processor invoked with an empty
- # list (which means that no subcommand actually was executed).
- if self.invoke_without_command:
- if not self.chain:
- return Command.invoke(self, ctx)
- with ctx:
- Command.invoke(self, ctx)
- return _process_result([])
- ctx.fail('Missing command.')
-
- # Fetch args back out
- args = ctx.protected_args + ctx.args
- ctx.args = []
- ctx.protected_args = []
-
- # If we're not in chain mode, we only allow the invocation of a
- # single command but we also inform the current context about the
- # name of the command to invoke.
- if not self.chain:
- # Make sure the context is entered so we do not clean up
- # resources until the result processor has worked.
- with ctx:
- cmd_name, cmd, args = self.resolve_command(ctx, args)
- ctx.invoked_subcommand = cmd_name
- Command.invoke(self, ctx)
- sub_ctx = cmd.make_context(cmd_name, args, parent=ctx)
- with sub_ctx:
- return _process_result(sub_ctx.command.invoke(sub_ctx))
-
- # In chain mode we create the contexts step by step, but after the
- # base command has been invoked. Because at that point we do not
- # know the subcommands yet, the invoked subcommand attribute is
- # set to ``*`` to inform the command that subcommands are executed
- # but nothing else.
- with ctx:
- ctx.invoked_subcommand = args and '*' or None
- Command.invoke(self, ctx)
-
- # Otherwise we make every single context and invoke them in a
- # chain. In that case the return value to the result processor
- # is the list of all invoked subcommand's results.
- contexts = []
- while args:
- cmd_name, cmd, args = self.resolve_command(ctx, args)
- sub_ctx = cmd.make_context(cmd_name, args, parent=ctx,
- allow_extra_args=True,
- allow_interspersed_args=False)
- contexts.append(sub_ctx)
- args, sub_ctx.args = sub_ctx.args, []
-
- rv = []
- for sub_ctx in contexts:
- with sub_ctx:
- rv.append(sub_ctx.command.invoke(sub_ctx))
- return _process_result(rv)
-
- def resolve_command(self, ctx, args):
- cmd_name = make_str(args[0])
- original_cmd_name = cmd_name
-
- # Get the command
- cmd = self.get_command(ctx, cmd_name)
-
- # If we can't find the command but there is a normalization
- # function available, we try with that one.
- if cmd is None and ctx.token_normalize_func is not None:
- cmd_name = ctx.token_normalize_func(cmd_name)
- cmd = self.get_command(ctx, cmd_name)
-
- # If we don't find the command we want to show an error message
- # to the user that it was not provided. However, there is
- # something else we should do: if the first argument looks like
- # an option we want to kick off parsing again for arguments to
- # resolve things like --help which now should go to the main
- # place.
- if cmd is None:
- if split_opt(cmd_name)[0]:
- self.parse_args(ctx, ctx.args)
- ctx.fail('No such command "%s".' % original_cmd_name)
-
- return cmd_name, cmd, args[1:]
-
- def get_command(self, ctx, cmd_name):
- """Given a context and a command name, this returns a
- :class:`Command` object if it exists or returns `None`.
- """
- raise NotImplementedError()
-
- def list_commands(self, ctx):
- """Returns a list of subcommand names in the order they should
- appear.
- """
- return []
-
-
-class Group(MultiCommand):
- """A group allows a command to have subcommands attached. This is the
- most common way to implement nesting in Click.
-
- :param commands: a dictionary of commands.
- """
-
- def __init__(self, name=None, commands=None, **attrs):
- MultiCommand.__init__(self, name, **attrs)
- #: the registered subcommands by their exported names.
- self.commands = commands or {}
-
- def add_command(self, cmd, name=None):
- """Registers another :class:`Command` with this group. If the name
- is not provided, the name of the command is used.
- """
- name = name or cmd.name
- if name is None:
- raise TypeError('Command has no name.')
- _check_multicommand(self, name, cmd, register=True)
- self.commands[name] = cmd
-
- def command(self, *args, **kwargs):
- """A shortcut decorator for declaring and attaching a command to
- the group. This takes the same arguments as :func:`command` but
- immediately registers the created command with this instance by
- calling into :meth:`add_command`.
- """
- def decorator(f):
- cmd = command(*args, **kwargs)(f)
- self.add_command(cmd)
- return cmd
- return decorator
-
- def group(self, *args, **kwargs):
- """A shortcut decorator for declaring and attaching a group to
- the group. This takes the same arguments as :func:`group` but
- immediately registers the created command with this instance by
- calling into :meth:`add_command`.
- """
- def decorator(f):
- cmd = group(*args, **kwargs)(f)
- self.add_command(cmd)
- return cmd
- return decorator
-
- def get_command(self, ctx, cmd_name):
- return self.commands.get(cmd_name)
-
- def list_commands(self, ctx):
- return sorted(self.commands)
-
-
-class CommandCollection(MultiCommand):
- """A command collection is a multi command that merges multiple multi
- commands together into one. This is a straightforward implementation
- that accepts a list of different multi commands as sources and
- provides all the commands for each of them.
- """
-
- def __init__(self, name=None, sources=None, **attrs):
- MultiCommand.__init__(self, name, **attrs)
- #: The list of registered multi commands.
- self.sources = sources or []
-
- def add_source(self, multi_cmd):
- """Adds a new multi command to the chain dispatcher."""
- self.sources.append(multi_cmd)
-
- def get_command(self, ctx, cmd_name):
- for source in self.sources:
- rv = source.get_command(ctx, cmd_name)
- if rv is not None:
- if self.chain:
- _check_multicommand(self, cmd_name, rv)
- return rv
-
- def list_commands(self, ctx):
- rv = set()
- for source in self.sources:
- rv.update(source.list_commands(ctx))
- return sorted(rv)
-
-
-class Parameter(object):
- """A parameter to a command comes in two versions: they are either
- :class:`Option`\s or :class:`Argument`\s. Other subclasses are currently
- not supported by design as some of the internals for parsing are
- intentionally not finalized.
-
- Some settings are supported by both options and arguments.
-
- .. versionchanged:: 2.0
- Changed signature for parameter callback to also be passed the
- parameter. In Click 2.0, the old callback format will still work,
- but it will raise a warning to give you change to migrate the
- code easier.
-
- :param param_decls: the parameter declarations for this option or
- argument. This is a list of flags or argument
- names.
- :param type: the type that should be used. Either a :class:`ParamType`
- or a Python type. The later is converted into the former
- automatically if supported.
- :param required: controls if this is optional or not.
- :param default: the default value if omitted. This can also be a callable,
- in which case it's invoked when the default is needed
- without any arguments.
- :param callback: a callback that should be executed after the parameter
- was matched. This is called as ``fn(ctx, param,
- value)`` and needs to return the value. Before Click
- 2.0, the signature was ``(ctx, value)``.
- :param nargs: the number of arguments to match. If not ``1`` the return
- value is a tuple instead of single value. The default for
- nargs is ``1`` (except if the type is a tuple, then it's
- the arity of the tuple).
- :param metavar: how the value is represented in the help page.
- :param expose_value: if this is `True` then the value is passed onwards
- to the command callback and stored on the context,
- otherwise it's skipped.
- :param is_eager: eager values are processed before non eager ones. This
- should not be set for arguments or it will inverse the
- order of processing.
- :param envvar: a string or list of strings that are environment variables
- that should be checked.
- """
- param_type_name = 'parameter'
-
- def __init__(self, param_decls=None, type=None, required=False,
- default=None, callback=None, nargs=None, metavar=None,
- expose_value=True, is_eager=False, envvar=None):
- self.name, self.opts, self.secondary_opts = \
- self._parse_decls(param_decls or (), expose_value)
-
- self.type = convert_type(type, default)
-
- # Default nargs to what the type tells us if we have that
- # information available.
- if nargs is None:
- if self.type.is_composite:
- nargs = self.type.arity
- else:
- nargs = 1
-
- self.required = required
- self.callback = callback
- self.nargs = nargs
- self.multiple = False
- self.expose_value = expose_value
- self.default = default
- self.is_eager = is_eager
- self.metavar = metavar
- self.envvar = envvar
-
- @property
- def human_readable_name(self):
- """Returns the human readable name of this parameter. This is the
- same as the name for options, but the metavar for arguments.
- """
- return self.name
-
- def make_metavar(self):
- if self.metavar is not None:
- return self.metavar
- metavar = self.type.get_metavar(self)
- if metavar is None:
- metavar = self.type.name.upper()
- if self.nargs != 1:
- metavar += '...'
- return metavar
-
- def get_default(self, ctx):
- """Given a context variable this calculates the default value."""
- # Otherwise go with the regular default.
- if callable(self.default):
- rv = self.default()
- else:
- rv = self.default
- return self.type_cast_value(ctx, rv)
-
- def add_to_parser(self, parser, ctx):
- pass
-
- def consume_value(self, ctx, opts):
- value = opts.get(self.name)
- if value is None:
- value = ctx.lookup_default(self.name)
- if value is None:
- value = self.value_from_envvar(ctx)
- return value
-
- def type_cast_value(self, ctx, value):
- """Given a value this runs it properly through the type system.
- This automatically handles things like `nargs` and `multiple` as
- well as composite types.
- """
- if self.type.is_composite:
- if self.nargs <= 1:
- raise TypeError('Attempted to invoke composite type '
- 'but nargs has been set to %s. This is '
- 'not supported; nargs needs to be set to '
- 'a fixed value > 1.' % self.nargs)
- if self.multiple:
- return tuple(self.type(x or (), self, ctx) for x in value or ())
- return self.type(value or (), self, ctx)
-
- def _convert(value, level):
- if level == 0:
- return self.type(value, self, ctx)
- return tuple(_convert(x, level - 1) for x in value or ())
- return _convert(value, (self.nargs != 1) + bool(self.multiple))
-
- def process_value(self, ctx, value):
- """Given a value and context this runs the logic to convert the
- value as necessary.
- """
- # If the value we were given is None we do nothing. This way
- # code that calls this can easily figure out if something was
- # not provided. Otherwise it would be converted into an empty
- # tuple for multiple invocations which is inconvenient.
- if value is not None:
- return self.type_cast_value(ctx, value)
-
- def value_is_missing(self, value):
- if value is None:
- return True
- if (self.nargs != 1 or self.multiple) and value == ():
- return True
- return False
-
- def full_process_value(self, ctx, value):
- value = self.process_value(ctx, value)
-
- if value is None:
- value = self.get_default(ctx)
-
- if self.required and self.value_is_missing(value):
- raise MissingParameter(ctx=ctx, param=self)
-
- return value
-
- def resolve_envvar_value(self, ctx):
- if self.envvar is None:
- return
- if isinstance(self.envvar, (tuple, list)):
- for envvar in self.envvar:
- rv = os.environ.get(envvar)
- if rv is not None:
- return rv
- else:
- return os.environ.get(self.envvar)
-
- def value_from_envvar(self, ctx):
- rv = self.resolve_envvar_value(ctx)
- if rv is not None and self.nargs != 1:
- rv = self.type.split_envvar_value(rv)
- return rv
-
- def handle_parse_result(self, ctx, opts, args):
- with augment_usage_errors(ctx, param=self):
- value = self.consume_value(ctx, opts)
- try:
- value = self.full_process_value(ctx, value)
- except Exception:
- if not ctx.resilient_parsing:
- raise
- value = None
- if self.callback is not None:
- try:
- value = invoke_param_callback(
- self.callback, ctx, self, value)
- except Exception:
- if not ctx.resilient_parsing:
- raise
-
- if self.expose_value:
- ctx.params[self.name] = value
- return value, args
-
- def get_help_record(self, ctx):
- pass
-
- def get_usage_pieces(self, ctx):
- return []
-
-
-class Option(Parameter):
- """Options are usually optional values on the command line and
- have some extra features that arguments don't have.
-
- All other parameters are passed onwards to the parameter constructor.
-
- :param show_default: controls if the default value should be shown on the
- help page. Normally, defaults are not shown.
- :param prompt: if set to `True` or a non empty string then the user will
- be prompted for input if not set. If set to `True` the
- prompt will be the option name capitalized.
- :param confirmation_prompt: if set then the value will need to be confirmed
- if it was prompted for.
- :param hide_input: if this is `True` then the input on the prompt will be
- hidden from the user. This is useful for password
- input.
- :param is_flag: forces this option to act as a flag. The default is
- auto detection.
- :param flag_value: which value should be used for this flag if it's
- enabled. This is set to a boolean automatically if
- the option string contains a slash to mark two options.
- :param multiple: if this is set to `True` then the argument is accepted
- multiple times and recorded. This is similar to ``nargs``
- in how it works but supports arbitrary number of
- arguments.
- :param count: this flag makes an option increment an integer.
- :param allow_from_autoenv: if this is enabled then the value of this
- parameter will be pulled from an environment
- variable in case a prefix is defined on the
- context.
- :param help: the help string.
- """
- param_type_name = 'option'
-
- def __init__(self, param_decls=None, show_default=False,
- prompt=False, confirmation_prompt=False,
- hide_input=False, is_flag=None, flag_value=None,
- multiple=False, count=False, allow_from_autoenv=True,
- type=None, help=None, **attrs):
- default_is_missing = attrs.get('default', _missing) is _missing
- Parameter.__init__(self, param_decls, type=type, **attrs)
-
- if prompt is True:
- prompt_text = self.name.replace('_', ' ').capitalize()
- elif prompt is False:
- prompt_text = None
- else:
- prompt_text = prompt
- self.prompt = prompt_text
- self.confirmation_prompt = confirmation_prompt
- self.hide_input = hide_input
-
- # Flags
- if is_flag is None:
- if flag_value is not None:
- is_flag = True
- else:
- is_flag = bool(self.secondary_opts)
- if is_flag and default_is_missing:
- self.default = False
- if flag_value is None:
- flag_value = not self.default
- self.is_flag = is_flag
- self.flag_value = flag_value
- if self.is_flag and isinstance(self.flag_value, bool) \
- and type is None:
- self.type = BOOL
- self.is_bool_flag = True
- else:
- self.is_bool_flag = False
-
- # Counting
- self.count = count
- if count:
- if type is None:
- self.type = IntRange(min=0)
- if default_is_missing:
- self.default = 0
-
- self.multiple = multiple
- self.allow_from_autoenv = allow_from_autoenv
- self.help = help
- self.show_default = show_default
-
- # Sanity check for stuff we don't support
- if __debug__:
- if self.nargs < 0:
- raise TypeError('Options cannot have nargs < 0')
- if self.prompt and self.is_flag and not self.is_bool_flag:
- raise TypeError('Cannot prompt for flags that are not bools.')
- if not self.is_bool_flag and self.secondary_opts:
- raise TypeError('Got secondary option for non boolean flag.')
- if self.is_bool_flag and self.hide_input \
- and self.prompt is not None:
- raise TypeError('Hidden input does not work with boolean '
- 'flag prompts.')
- if self.count:
- if self.multiple:
- raise TypeError('Options cannot be multiple and count '
- 'at the same time.')
- elif self.is_flag:
- raise TypeError('Options cannot be count and flags at '
- 'the same time.')
-
- def _parse_decls(self, decls, expose_value):
- opts = []
- secondary_opts = []
- name = None
- possible_names = []
-
- for decl in decls:
- if isidentifier(decl):
- if name is not None:
- raise TypeError('Name defined twice')
- name = decl
- else:
- split_char = decl[:1] == '/' and ';' or '/'
- if split_char in decl:
- first, second = decl.split(split_char, 1)
- first = first.rstrip()
- if first:
- possible_names.append(split_opt(first))
- opts.append(first)
- second = second.lstrip()
- if second:
- secondary_opts.append(second.lstrip())
- else:
- possible_names.append(split_opt(decl))
- opts.append(decl)
-
- if name is None and possible_names:
- possible_names.sort(key=lambda x: len(x[0]))
- name = possible_names[-1][1].replace('-', '_').lower()
- if not isidentifier(name):
- name = None
-
- if name is None:
- if not expose_value:
- return None, opts, secondary_opts
- raise TypeError('Could not determine name for option')
-
- if not opts and not secondary_opts:
- raise TypeError('No options defined but a name was passed (%s). '
- 'Did you mean to declare an argument instead '
- 'of an option?' % name)
-
- return name, opts, secondary_opts
-
- def add_to_parser(self, parser, ctx):
- kwargs = {
- 'dest': self.name,
- 'nargs': self.nargs,
- 'obj': self,
- }
-
- if self.multiple:
- action = 'append'
- elif self.count:
- action = 'count'
- else:
- action = 'store'
-
- if self.is_flag:
- kwargs.pop('nargs', None)
- if self.is_bool_flag and self.secondary_opts:
- parser.add_option(self.opts, action=action + '_const',
- const=True, **kwargs)
- parser.add_option(self.secondary_opts, action=action +
- '_const', const=False, **kwargs)
- else:
- parser.add_option(self.opts, action=action + '_const',
- const=self.flag_value,
- **kwargs)
- else:
- kwargs['action'] = action
- parser.add_option(self.opts, **kwargs)
-
- def get_help_record(self, ctx):
- any_prefix_is_slash = []
-
- def _write_opts(opts):
- rv, any_slashes = join_options(opts)
- if any_slashes:
- any_prefix_is_slash[:] = [True]
- if not self.is_flag and not self.count:
- rv += ' ' + self.make_metavar()
- return rv
-
- rv = [_write_opts(self.opts)]
- if self.secondary_opts:
- rv.append(_write_opts(self.secondary_opts))
-
- help = self.help or ''
- extra = []
- if self.default is not None and self.show_default:
- extra.append('default: %s' % (
- ', '.join('%s' % d for d in self.default)
- if isinstance(self.default, (list, tuple))
- else self.default, ))
- if self.required:
- extra.append('required')
- if extra:
- help = '%s[%s]' % (help and help + ' ' or '', '; '.join(extra))
-
- return ((any_prefix_is_slash and '; ' or ' / ').join(rv), help)
-
- def get_default(self, ctx):
- # If we're a non boolean flag out default is more complex because
- # we need to look at all flags in the same group to figure out
- # if we're the the default one in which case we return the flag
- # value as default.
- if self.is_flag and not self.is_bool_flag:
- for param in ctx.command.params:
- if param.name == self.name and param.default:
- return param.flag_value
- return None
- return Parameter.get_default(self, ctx)
-
- def prompt_for_value(self, ctx):
- """This is an alternative flow that can be activated in the full
- value processing if a value does not exist. It will prompt the
- user until a valid value exists and then returns the processed
- value as result.
- """
- # Calculate the default before prompting anything to be stable.
- default = self.get_default(ctx)
-
- # If this is a prompt for a flag we need to handle this
- # differently.
- if self.is_bool_flag:
- return confirm(self.prompt, default)
-
- return prompt(self.prompt, default=default,
- hide_input=self.hide_input,
- confirmation_prompt=self.confirmation_prompt,
- value_proc=lambda x: self.process_value(ctx, x))
-
- def resolve_envvar_value(self, ctx):
- rv = Parameter.resolve_envvar_value(self, ctx)
- if rv is not None:
- return rv
- if self.allow_from_autoenv and \
- ctx.auto_envvar_prefix is not None:
- envvar = '%s_%s' % (ctx.auto_envvar_prefix, self.name.upper())
- return os.environ.get(envvar)
-
- def value_from_envvar(self, ctx):
- rv = self.resolve_envvar_value(ctx)
- if rv is None:
- return None
- value_depth = (self.nargs != 1) + bool(self.multiple)
- if value_depth > 0 and rv is not None:
- rv = self.type.split_envvar_value(rv)
- if self.multiple and self.nargs != 1:
- rv = batch(rv, self.nargs)
- return rv
-
- def full_process_value(self, ctx, value):
- if value is None and self.prompt is not None \
- and not ctx.resilient_parsing:
- return self.prompt_for_value(ctx)
- return Parameter.full_process_value(self, ctx, value)
-
-
-class Argument(Parameter):
- """Arguments are positional parameters to a command. They generally
- provide fewer features than options but can have infinite ``nargs``
- and are required by default.
-
- All parameters are passed onwards to the parameter constructor.
- """
- param_type_name = 'argument'
-
- def __init__(self, param_decls, required=None, **attrs):
- if required is None:
- if attrs.get('default') is not None:
- required = False
- else:
- required = attrs.get('nargs', 1) > 0
- Parameter.__init__(self, param_decls, required=required, **attrs)
- if self.default is not None and self.nargs < 0:
- raise TypeError('nargs=-1 in combination with a default value '
- 'is not supported.')
-
- @property
- def human_readable_name(self):
- if self.metavar is not None:
- return self.metavar
- return self.name.upper()
-
- def make_metavar(self):
- if self.metavar is not None:
- return self.metavar
- var = self.name.upper()
- if not self.required:
- var = '[%s]' % var
- if self.nargs != 1:
- var += '...'
- return var
-
- def _parse_decls(self, decls, expose_value):
- if not decls:
- if not expose_value:
- return None, [], []
- raise TypeError('Could not determine name for argument')
- if len(decls) == 1:
- name = arg = decls[0]
- name = name.replace('-', '_').lower()
- elif len(decls) == 2:
- name, arg = decls
- else:
- raise TypeError('Arguments take exactly one or two '
- 'parameter declarations, got %d' % len(decls))
- return name, [arg], []
-
- def get_usage_pieces(self, ctx):
- return [self.make_metavar()]
-
- def add_to_parser(self, parser, ctx):
- parser.add_argument(dest=self.name, nargs=self.nargs,
- obj=self)
-
-
-# Circular dependency between decorators and core
-from .decorators import command, group
diff --git a/venv/Lib/site-packages/click/decorators.py b/venv/Lib/site-packages/click/decorators.py
deleted file mode 100644
index 9893452..0000000
--- a/venv/Lib/site-packages/click/decorators.py
+++ /dev/null
@@ -1,304 +0,0 @@
-import sys
-import inspect
-
-from functools import update_wrapper
-
-from ._compat import iteritems
-from ._unicodefun import _check_for_unicode_literals
-from .utils import echo
-from .globals import get_current_context
-
-
-def pass_context(f):
- """Marks a callback as wanting to receive the current context
- object as first argument.
- """
- def new_func(*args, **kwargs):
- return f(get_current_context(), *args, **kwargs)
- return update_wrapper(new_func, f)
-
-
-def pass_obj(f):
- """Similar to :func:`pass_context`, but only pass the object on the
- context onwards (:attr:`Context.obj`). This is useful if that object
- represents the state of a nested system.
- """
- def new_func(*args, **kwargs):
- return f(get_current_context().obj, *args, **kwargs)
- return update_wrapper(new_func, f)
-
-
-def make_pass_decorator(object_type, ensure=False):
- """Given an object type this creates a decorator that will work
- similar to :func:`pass_obj` but instead of passing the object of the
- current context, it will find the innermost context of type
- :func:`object_type`.
-
- This generates a decorator that works roughly like this::
-
- from functools import update_wrapper
-
- def decorator(f):
- @pass_context
- def new_func(ctx, *args, **kwargs):
- obj = ctx.find_object(object_type)
- return ctx.invoke(f, obj, *args, **kwargs)
- return update_wrapper(new_func, f)
- return decorator
-
- :param object_type: the type of the object to pass.
- :param ensure: if set to `True`, a new object will be created and
- remembered on the context if it's not there yet.
- """
- def decorator(f):
- def new_func(*args, **kwargs):
- ctx = get_current_context()
- if ensure:
- obj = ctx.ensure_object(object_type)
- else:
- obj = ctx.find_object(object_type)
- if obj is None:
- raise RuntimeError('Managed to invoke callback without a '
- 'context object of type %r existing'
- % object_type.__name__)
- return ctx.invoke(f, obj, *args[1:], **kwargs)
- return update_wrapper(new_func, f)
- return decorator
-
-
-def _make_command(f, name, attrs, cls):
- if isinstance(f, Command):
- raise TypeError('Attempted to convert a callback into a '
- 'command twice.')
- try:
- params = f.__click_params__
- params.reverse()
- del f.__click_params__
- except AttributeError:
- params = []
- help = attrs.get('help')
- if help is None:
- help = inspect.getdoc(f)
- if isinstance(help, bytes):
- help = help.decode('utf-8')
- else:
- help = inspect.cleandoc(help)
- attrs['help'] = help
- _check_for_unicode_literals()
- return cls(name=name or f.__name__.lower(),
- callback=f, params=params, **attrs)
-
-
-def command(name=None, cls=None, **attrs):
- """Creates a new :class:`Command` and uses the decorated function as
- callback. This will also automatically attach all decorated
- :func:`option`\s and :func:`argument`\s as parameters to the command.
-
- The name of the command defaults to the name of the function. If you
- want to change that, you can pass the intended name as the first
- argument.
-
- All keyword arguments are forwarded to the underlying command class.
-
- Once decorated the function turns into a :class:`Command` instance
- that can be invoked as a command line utility or be attached to a
- command :class:`Group`.
-
- :param name: the name of the command. This defaults to the function
- name.
- :param cls: the command class to instantiate. This defaults to
- :class:`Command`.
- """
- if cls is None:
- cls = Command
- def decorator(f):
- cmd = _make_command(f, name, attrs, cls)
- cmd.__doc__ = f.__doc__
- return cmd
- return decorator
-
-
-def group(name=None, **attrs):
- """Creates a new :class:`Group` with a function as callback. This
- works otherwise the same as :func:`command` just that the `cls`
- parameter is set to :class:`Group`.
- """
- attrs.setdefault('cls', Group)
- return command(name, **attrs)
-
-
-def _param_memo(f, param):
- if isinstance(f, Command):
- f.params.append(param)
- else:
- if not hasattr(f, '__click_params__'):
- f.__click_params__ = []
- f.__click_params__.append(param)
-
-
-def argument(*param_decls, **attrs):
- """Attaches an argument to the command. All positional arguments are
- passed as parameter declarations to :class:`Argument`; all keyword
- arguments are forwarded unchanged (except ``cls``).
- This is equivalent to creating an :class:`Argument` instance manually
- and attaching it to the :attr:`Command.params` list.
-
- :param cls: the argument class to instantiate. This defaults to
- :class:`Argument`.
- """
- def decorator(f):
- ArgumentClass = attrs.pop('cls', Argument)
- _param_memo(f, ArgumentClass(param_decls, **attrs))
- return f
- return decorator
-
-
-def option(*param_decls, **attrs):
- """Attaches an option to the command. All positional arguments are
- passed as parameter declarations to :class:`Option`; all keyword
- arguments are forwarded unchanged (except ``cls``).
- This is equivalent to creating an :class:`Option` instance manually
- and attaching it to the :attr:`Command.params` list.
-
- :param cls: the option class to instantiate. This defaults to
- :class:`Option`.
- """
- def decorator(f):
- if 'help' in attrs:
- attrs['help'] = inspect.cleandoc(attrs['help'])
- OptionClass = attrs.pop('cls', Option)
- _param_memo(f, OptionClass(param_decls, **attrs))
- return f
- return decorator
-
-
-def confirmation_option(*param_decls, **attrs):
- """Shortcut for confirmation prompts that can be ignored by passing
- ``--yes`` as parameter.
-
- This is equivalent to decorating a function with :func:`option` with
- the following parameters::
-
- def callback(ctx, param, value):
- if not value:
- ctx.abort()
-
- @click.command()
- @click.option('--yes', is_flag=True, callback=callback,
- expose_value=False, prompt='Do you want to continue?')
- def dropdb():
- pass
- """
- def decorator(f):
- def callback(ctx, param, value):
- if not value:
- ctx.abort()
- attrs.setdefault('is_flag', True)
- attrs.setdefault('callback', callback)
- attrs.setdefault('expose_value', False)
- attrs.setdefault('prompt', 'Do you want to continue?')
- attrs.setdefault('help', 'Confirm the action without prompting.')
- return option(*(param_decls or ('--yes',)), **attrs)(f)
- return decorator
-
-
-def password_option(*param_decls, **attrs):
- """Shortcut for password prompts.
-
- This is equivalent to decorating a function with :func:`option` with
- the following parameters::
-
- @click.command()
- @click.option('--password', prompt=True, confirmation_prompt=True,
- hide_input=True)
- def changeadmin(password):
- pass
- """
- def decorator(f):
- attrs.setdefault('prompt', True)
- attrs.setdefault('confirmation_prompt', True)
- attrs.setdefault('hide_input', True)
- return option(*(param_decls or ('--password',)), **attrs)(f)
- return decorator
-
-
-def version_option(version=None, *param_decls, **attrs):
- """Adds a ``--version`` option which immediately ends the program
- printing out the version number. This is implemented as an eager
- option that prints the version and exits the program in the callback.
-
- :param version: the version number to show. If not provided Click
- attempts an auto discovery via setuptools.
- :param prog_name: the name of the program (defaults to autodetection)
- :param message: custom message to show instead of the default
- (``'%(prog)s, version %(version)s'``)
- :param others: everything else is forwarded to :func:`option`.
- """
- if version is None:
- module = sys._getframe(1).f_globals.get('__name__')
- def decorator(f):
- prog_name = attrs.pop('prog_name', None)
- message = attrs.pop('message', '%(prog)s, version %(version)s')
-
- def callback(ctx, param, value):
- if not value or ctx.resilient_parsing:
- return
- prog = prog_name
- if prog is None:
- prog = ctx.find_root().info_name
- ver = version
- if ver is None:
- try:
- import pkg_resources
- except ImportError:
- pass
- else:
- for dist in pkg_resources.working_set:
- scripts = dist.get_entry_map().get('console_scripts') or {}
- for script_name, entry_point in iteritems(scripts):
- if entry_point.module_name == module:
- ver = dist.version
- break
- if ver is None:
- raise RuntimeError('Could not determine version')
- echo(message % {
- 'prog': prog,
- 'version': ver,
- }, color=ctx.color)
- ctx.exit()
-
- attrs.setdefault('is_flag', True)
- attrs.setdefault('expose_value', False)
- attrs.setdefault('is_eager', True)
- attrs.setdefault('help', 'Show the version and exit.')
- attrs['callback'] = callback
- return option(*(param_decls or ('--version',)), **attrs)(f)
- return decorator
-
-
-def help_option(*param_decls, **attrs):
- """Adds a ``--help`` option which immediately ends the program
- printing out the help page. This is usually unnecessary to add as
- this is added by default to all commands unless suppressed.
-
- Like :func:`version_option`, this is implemented as eager option that
- prints in the callback and exits.
-
- All arguments are forwarded to :func:`option`.
- """
- def decorator(f):
- def callback(ctx, param, value):
- if value and not ctx.resilient_parsing:
- echo(ctx.get_help(), color=ctx.color)
- ctx.exit()
- attrs.setdefault('is_flag', True)
- attrs.setdefault('expose_value', False)
- attrs.setdefault('help', 'Show this message and exit.')
- attrs.setdefault('is_eager', True)
- attrs['callback'] = callback
- return option(*(param_decls or ('--help',)), **attrs)(f)
- return decorator
-
-
-# Circular dependencies between core and decorators
-from .core import Command, Group, Argument, Option
diff --git a/venv/Lib/site-packages/click/exceptions.py b/venv/Lib/site-packages/click/exceptions.py
deleted file mode 100644
index 74a4542..0000000
--- a/venv/Lib/site-packages/click/exceptions.py
+++ /dev/null
@@ -1,201 +0,0 @@
-from ._compat import PY2, filename_to_ui, get_text_stderr
-from .utils import echo
-
-
-class ClickException(Exception):
- """An exception that Click can handle and show to the user."""
-
- #: The exit code for this exception
- exit_code = 1
-
- def __init__(self, message):
- if PY2:
- if message is not None:
- message = message.encode('utf-8')
- Exception.__init__(self, message)
- self.message = message
-
- def format_message(self):
- return self.message
-
- def show(self, file=None):
- if file is None:
- file = get_text_stderr()
- echo('Error: %s' % self.format_message(), file=file)
-
-
-class UsageError(ClickException):
- """An internal exception that signals a usage error. This typically
- aborts any further handling.
-
- :param message: the error message to display.
- :param ctx: optionally the context that caused this error. Click will
- fill in the context automatically in some situations.
- """
- exit_code = 2
-
- def __init__(self, message, ctx=None):
- ClickException.__init__(self, message)
- self.ctx = ctx
-
- def show(self, file=None):
- if file is None:
- file = get_text_stderr()
- color = None
- if self.ctx is not None:
- color = self.ctx.color
- echo(self.ctx.get_usage() + '\n', file=file, color=color)
- echo('Error: %s' % self.format_message(), file=file, color=color)
-
-
-class BadParameter(UsageError):
- """An exception that formats out a standardized error message for a
- bad parameter. This is useful when thrown from a callback or type as
- Click will attach contextual information to it (for instance, which
- parameter it is).
-
- .. versionadded:: 2.0
-
- :param param: the parameter object that caused this error. This can
- be left out, and Click will attach this info itself
- if possible.
- :param param_hint: a string that shows up as parameter name. This
- can be used as alternative to `param` in cases
- where custom validation should happen. If it is
- a string it's used as such, if it's a list then
- each item is quoted and separated.
- """
-
- def __init__(self, message, ctx=None, param=None,
- param_hint=None):
- UsageError.__init__(self, message, ctx)
- self.param = param
- self.param_hint = param_hint
-
- def format_message(self):
- if self.param_hint is not None:
- param_hint = self.param_hint
- elif self.param is not None:
- param_hint = self.param.opts or [self.param.human_readable_name]
- else:
- return 'Invalid value: %s' % self.message
- if isinstance(param_hint, (tuple, list)):
- param_hint = ' / '.join('"%s"' % x for x in param_hint)
- return 'Invalid value for %s: %s' % (param_hint, self.message)
-
-
-class MissingParameter(BadParameter):
- """Raised if click required an option or argument but it was not
- provided when invoking the script.
-
- .. versionadded:: 4.0
-
- :param param_type: a string that indicates the type of the parameter.
- The default is to inherit the parameter type from
- the given `param`. Valid values are ``'parameter'``,
- ``'option'`` or ``'argument'``.
- """
-
- def __init__(self, message=None, ctx=None, param=None,
- param_hint=None, param_type=None):
- BadParameter.__init__(self, message, ctx, param, param_hint)
- self.param_type = param_type
-
- def format_message(self):
- if self.param_hint is not None:
- param_hint = self.param_hint
- elif self.param is not None:
- param_hint = self.param.opts or [self.param.human_readable_name]
- else:
- param_hint = None
- if isinstance(param_hint, (tuple, list)):
- param_hint = ' / '.join('"%s"' % x for x in param_hint)
-
- param_type = self.param_type
- if param_type is None and self.param is not None:
- param_type = self.param.param_type_name
-
- msg = self.message
- if self.param is not None:
- msg_extra = self.param.type.get_missing_message(self.param)
- if msg_extra:
- if msg:
- msg += '. ' + msg_extra
- else:
- msg = msg_extra
-
- return 'Missing %s%s%s%s' % (
- param_type,
- param_hint and ' %s' % param_hint or '',
- msg and '. ' or '.',
- msg or '',
- )
-
-
-class NoSuchOption(UsageError):
- """Raised if click attempted to handle an option that does not
- exist.
-
- .. versionadded:: 4.0
- """
-
- def __init__(self, option_name, message=None, possibilities=None,
- ctx=None):
- if message is None:
- message = 'no such option: %s' % option_name
- UsageError.__init__(self, message, ctx)
- self.option_name = option_name
- self.possibilities = possibilities
-
- def format_message(self):
- bits = [self.message]
- if self.possibilities:
- if len(self.possibilities) == 1:
- bits.append('Did you mean %s?' % self.possibilities[0])
- else:
- possibilities = sorted(self.possibilities)
- bits.append('(Possible options: %s)' % ', '.join(possibilities))
- return ' '.join(bits)
-
-
-class BadOptionUsage(UsageError):
- """Raised if an option is generally supplied but the use of the option
- was incorrect. This is for instance raised if the number of arguments
- for an option is not correct.
-
- .. versionadded:: 4.0
- """
-
- def __init__(self, message, ctx=None):
- UsageError.__init__(self, message, ctx)
-
-
-class BadArgumentUsage(UsageError):
- """Raised if an argument is generally supplied but the use of the argument
- was incorrect. This is for instance raised if the number of values
- for an argument is not correct.
-
- .. versionadded:: 6.0
- """
-
- def __init__(self, message, ctx=None):
- UsageError.__init__(self, message, ctx)
-
-
-class FileError(ClickException):
- """Raised if a file cannot be opened."""
-
- def __init__(self, filename, hint=None):
- ui_filename = filename_to_ui(filename)
- if hint is None:
- hint = 'unknown error'
- ClickException.__init__(self, hint)
- self.ui_filename = ui_filename
- self.filename = filename
-
- def format_message(self):
- return 'Could not open file %s: %s' % (self.ui_filename, self.message)
-
-
-class Abort(RuntimeError):
- """An internal signalling exception that signals Click to abort."""
diff --git a/venv/Lib/site-packages/click/formatting.py b/venv/Lib/site-packages/click/formatting.py
deleted file mode 100644
index a3d6a4d..0000000
--- a/venv/Lib/site-packages/click/formatting.py
+++ /dev/null
@@ -1,256 +0,0 @@
-from contextlib import contextmanager
-from .termui import get_terminal_size
-from .parser import split_opt
-from ._compat import term_len
-
-
-# Can force a width. This is used by the test system
-FORCED_WIDTH = None
-
-
-def measure_table(rows):
- widths = {}
- for row in rows:
- for idx, col in enumerate(row):
- widths[idx] = max(widths.get(idx, 0), term_len(col))
- return tuple(y for x, y in sorted(widths.items()))
-
-
-def iter_rows(rows, col_count):
- for row in rows:
- row = tuple(row)
- yield row + ('',) * (col_count - len(row))
-
-
-def wrap_text(text, width=78, initial_indent='', subsequent_indent='',
- preserve_paragraphs=False):
- """A helper function that intelligently wraps text. By default, it
- assumes that it operates on a single paragraph of text but if the
- `preserve_paragraphs` parameter is provided it will intelligently
- handle paragraphs (defined by two empty lines).
-
- If paragraphs are handled, a paragraph can be prefixed with an empty
- line containing the ``\\b`` character (``\\x08``) to indicate that
- no rewrapping should happen in that block.
-
- :param text: the text that should be rewrapped.
- :param width: the maximum width for the text.
- :param initial_indent: the initial indent that should be placed on the
- first line as a string.
- :param subsequent_indent: the indent string that should be placed on
- each consecutive line.
- :param preserve_paragraphs: if this flag is set then the wrapping will
- intelligently handle paragraphs.
- """
- from ._textwrap import TextWrapper
- text = text.expandtabs()
- wrapper = TextWrapper(width, initial_indent=initial_indent,
- subsequent_indent=subsequent_indent,
- replace_whitespace=False)
- if not preserve_paragraphs:
- return wrapper.fill(text)
-
- p = []
- buf = []
- indent = None
-
- def _flush_par():
- if not buf:
- return
- if buf[0].strip() == '\b':
- p.append((indent or 0, True, '\n'.join(buf[1:])))
- else:
- p.append((indent or 0, False, ' '.join(buf)))
- del buf[:]
-
- for line in text.splitlines():
- if not line:
- _flush_par()
- indent = None
- else:
- if indent is None:
- orig_len = term_len(line)
- line = line.lstrip()
- indent = orig_len - term_len(line)
- buf.append(line)
- _flush_par()
-
- rv = []
- for indent, raw, text in p:
- with wrapper.extra_indent(' ' * indent):
- if raw:
- rv.append(wrapper.indent_only(text))
- else:
- rv.append(wrapper.fill(text))
-
- return '\n\n'.join(rv)
-
-
-class HelpFormatter(object):
- """This class helps with formatting text-based help pages. It's
- usually just needed for very special internal cases, but it's also
- exposed so that developers can write their own fancy outputs.
-
- At present, it always writes into memory.
-
- :param indent_increment: the additional increment for each level.
- :param width: the width for the text. This defaults to the terminal
- width clamped to a maximum of 78.
- """
-
- def __init__(self, indent_increment=2, width=None, max_width=None):
- self.indent_increment = indent_increment
- if max_width is None:
- max_width = 80
- if width is None:
- width = FORCED_WIDTH
- if width is None:
- width = max(min(get_terminal_size()[0], max_width) - 2, 50)
- self.width = width
- self.current_indent = 0
- self.buffer = []
-
- def write(self, string):
- """Writes a unicode string into the internal buffer."""
- self.buffer.append(string)
-
- def indent(self):
- """Increases the indentation."""
- self.current_indent += self.indent_increment
-
- def dedent(self):
- """Decreases the indentation."""
- self.current_indent -= self.indent_increment
-
- def write_usage(self, prog, args='', prefix='Usage: '):
- """Writes a usage line into the buffer.
-
- :param prog: the program name.
- :param args: whitespace separated list of arguments.
- :param prefix: the prefix for the first line.
- """
- usage_prefix = '%*s%s ' % (self.current_indent, prefix, prog)
- text_width = self.width - self.current_indent
-
- if text_width >= (term_len(usage_prefix) + 20):
- # The arguments will fit to the right of the prefix.
- indent = ' ' * term_len(usage_prefix)
- self.write(wrap_text(args, text_width,
- initial_indent=usage_prefix,
- subsequent_indent=indent))
- else:
- # The prefix is too long, put the arguments on the next line.
- self.write(usage_prefix)
- self.write('\n')
- indent = ' ' * (max(self.current_indent, term_len(prefix)) + 4)
- self.write(wrap_text(args, text_width,
- initial_indent=indent,
- subsequent_indent=indent))
-
- self.write('\n')
-
- def write_heading(self, heading):
- """Writes a heading into the buffer."""
- self.write('%*s%s:\n' % (self.current_indent, '', heading))
-
- def write_paragraph(self):
- """Writes a paragraph into the buffer."""
- if self.buffer:
- self.write('\n')
-
- def write_text(self, text):
- """Writes re-indented text into the buffer. This rewraps and
- preserves paragraphs.
- """
- text_width = max(self.width - self.current_indent, 11)
- indent = ' ' * self.current_indent
- self.write(wrap_text(text, text_width,
- initial_indent=indent,
- subsequent_indent=indent,
- preserve_paragraphs=True))
- self.write('\n')
-
- def write_dl(self, rows, col_max=30, col_spacing=2):
- """Writes a definition list into the buffer. This is how options
- and commands are usually formatted.
-
- :param rows: a list of two item tuples for the terms and values.
- :param col_max: the maximum width of the first column.
- :param col_spacing: the number of spaces between the first and
- second column.
- """
- rows = list(rows)
- widths = measure_table(rows)
- if len(widths) != 2:
- raise TypeError('Expected two columns for definition list')
-
- first_col = min(widths[0], col_max) + col_spacing
-
- for first, second in iter_rows(rows, len(widths)):
- self.write('%*s%s' % (self.current_indent, '', first))
- if not second:
- self.write('\n')
- continue
- if term_len(first) <= first_col - col_spacing:
- self.write(' ' * (first_col - term_len(first)))
- else:
- self.write('\n')
- self.write(' ' * (first_col + self.current_indent))
-
- text_width = max(self.width - first_col - 2, 10)
- lines = iter(wrap_text(second, text_width).splitlines())
- if lines:
- self.write(next(lines) + '\n')
- for line in lines:
- self.write('%*s%s\n' % (
- first_col + self.current_indent, '', line))
- else:
- self.write('\n')
-
- @contextmanager
- def section(self, name):
- """Helpful context manager that writes a paragraph, a heading,
- and the indents.
-
- :param name: the section name that is written as heading.
- """
- self.write_paragraph()
- self.write_heading(name)
- self.indent()
- try:
- yield
- finally:
- self.dedent()
-
- @contextmanager
- def indentation(self):
- """A context manager that increases the indentation."""
- self.indent()
- try:
- yield
- finally:
- self.dedent()
-
- def getvalue(self):
- """Returns the buffer contents."""
- return ''.join(self.buffer)
-
-
-def join_options(options):
- """Given a list of option strings this joins them in the most appropriate
- way and returns them in the form ``(formatted_string,
- any_prefix_is_slash)`` where the second item in the tuple is a flag that
- indicates if any of the option prefixes was a slash.
- """
- rv = []
- any_prefix_is_slash = False
- for opt in options:
- prefix = split_opt(opt)[0]
- if prefix == '/':
- any_prefix_is_slash = True
- rv.append((len(prefix), opt))
-
- rv.sort(key=lambda x: x[0])
-
- rv = ', '.join(x[1] for x in rv)
- return rv, any_prefix_is_slash
diff --git a/venv/Lib/site-packages/click/globals.py b/venv/Lib/site-packages/click/globals.py
deleted file mode 100644
index 14338e6..0000000
--- a/venv/Lib/site-packages/click/globals.py
+++ /dev/null
@@ -1,48 +0,0 @@
-from threading import local
-
-
-_local = local()
-
-
-def get_current_context(silent=False):
- """Returns the current click context. This can be used as a way to
- access the current context object from anywhere. This is a more implicit
- alternative to the :func:`pass_context` decorator. This function is
- primarily useful for helpers such as :func:`echo` which might be
- interested in changing it's behavior based on the current context.
-
- To push the current context, :meth:`Context.scope` can be used.
-
- .. versionadded:: 5.0
-
- :param silent: is set to `True` the return value is `None` if no context
- is available. The default behavior is to raise a
- :exc:`RuntimeError`.
- """
- try:
- return getattr(_local, 'stack')[-1]
- except (AttributeError, IndexError):
- if not silent:
- raise RuntimeError('There is no active click context.')
-
-
-def push_context(ctx):
- """Pushes a new context to the current stack."""
- _local.__dict__.setdefault('stack', []).append(ctx)
-
-
-def pop_context():
- """Removes the top level from the stack."""
- _local.stack.pop()
-
-
-def resolve_color_default(color=None):
- """"Internal helper to get the default value of the color flag. If a
- value is passed it's returned unchanged, otherwise it's looked up from
- the current context.
- """
- if color is not None:
- return color
- ctx = get_current_context(silent=True)
- if ctx is not None:
- return ctx.color
diff --git a/venv/Lib/site-packages/click/parser.py b/venv/Lib/site-packages/click/parser.py
deleted file mode 100644
index 9775c9f..0000000
--- a/venv/Lib/site-packages/click/parser.py
+++ /dev/null
@@ -1,426 +0,0 @@
-# -*- coding: utf-8 -*-
-"""
- click.parser
- ~~~~~~~~~~~~
-
- This module started out as largely a copy paste from the stdlib's
- optparse module with the features removed that we do not need from
- optparse because we implement them in Click on a higher level (for
- instance type handling, help formatting and a lot more).
-
- The plan is to remove more and more from here over time.
-
- The reason this is a different module and not optparse from the stdlib
- is that there are differences in 2.x and 3.x about the error messages
- generated and optparse in the stdlib uses gettext for no good reason
- and might cause us issues.
-"""
-import re
-from collections import deque
-from .exceptions import UsageError, NoSuchOption, BadOptionUsage, \
- BadArgumentUsage
-
-
-def _unpack_args(args, nargs_spec):
- """Given an iterable of arguments and an iterable of nargs specifications,
- it returns a tuple with all the unpacked arguments at the first index
- and all remaining arguments as the second.
-
- The nargs specification is the number of arguments that should be consumed
- or `-1` to indicate that this position should eat up all the remainders.
-
- Missing items are filled with `None`.
- """
- args = deque(args)
- nargs_spec = deque(nargs_spec)
- rv = []
- spos = None
-
- def _fetch(c):
- try:
- if spos is None:
- return c.popleft()
- else:
- return c.pop()
- except IndexError:
- return None
-
- while nargs_spec:
- nargs = _fetch(nargs_spec)
- if nargs == 1:
- rv.append(_fetch(args))
- elif nargs > 1:
- x = [_fetch(args) for _ in range(nargs)]
- # If we're reversed, we're pulling in the arguments in reverse,
- # so we need to turn them around.
- if spos is not None:
- x.reverse()
- rv.append(tuple(x))
- elif nargs < 0:
- if spos is not None:
- raise TypeError('Cannot have two nargs < 0')
- spos = len(rv)
- rv.append(None)
-
- # spos is the position of the wildcard (star). If it's not `None`,
- # we fill it with the remainder.
- if spos is not None:
- rv[spos] = tuple(args)
- args = []
- rv[spos + 1:] = reversed(rv[spos + 1:])
-
- return tuple(rv), list(args)
-
-
-def _error_opt_args(nargs, opt):
- if nargs == 1:
- raise BadOptionUsage('%s option requires an argument' % opt)
- raise BadOptionUsage('%s option requires %d arguments' % (opt, nargs))
-
-
-def split_opt(opt):
- first = opt[:1]
- if first.isalnum():
- return '', opt
- if opt[1:2] == first:
- return opt[:2], opt[2:]
- return first, opt[1:]
-
-
-def normalize_opt(opt, ctx):
- if ctx is None or ctx.token_normalize_func is None:
- return opt
- prefix, opt = split_opt(opt)
- return prefix + ctx.token_normalize_func(opt)
-
-
-def split_arg_string(string):
- """Given an argument string this attempts to split it into small parts."""
- rv = []
- for match in re.finditer(r"('([^'\\]*(?:\\.[^'\\]*)*)'"
- r'|"([^"\\]*(?:\\.[^"\\]*)*)"'
- r'|\S+)\s*', string, re.S):
- arg = match.group().strip()
- if arg[:1] == arg[-1:] and arg[:1] in '"\'':
- arg = arg[1:-1].encode('ascii', 'backslashreplace') \
- .decode('unicode-escape')
- try:
- arg = type(string)(arg)
- except UnicodeError:
- pass
- rv.append(arg)
- return rv
-
-
-class Option(object):
-
- def __init__(self, opts, dest, action=None, nargs=1, const=None, obj=None):
- self._short_opts = []
- self._long_opts = []
- self.prefixes = set()
-
- for opt in opts:
- prefix, value = split_opt(opt)
- if not prefix:
- raise ValueError('Invalid start character for option (%s)'
- % opt)
- self.prefixes.add(prefix[0])
- if len(prefix) == 1 and len(value) == 1:
- self._short_opts.append(opt)
- else:
- self._long_opts.append(opt)
- self.prefixes.add(prefix)
-
- if action is None:
- action = 'store'
-
- self.dest = dest
- self.action = action
- self.nargs = nargs
- self.const = const
- self.obj = obj
-
- @property
- def takes_value(self):
- return self.action in ('store', 'append')
-
- def process(self, value, state):
- if self.action == 'store':
- state.opts[self.dest] = value
- elif self.action == 'store_const':
- state.opts[self.dest] = self.const
- elif self.action == 'append':
- state.opts.setdefault(self.dest, []).append(value)
- elif self.action == 'append_const':
- state.opts.setdefault(self.dest, []).append(self.const)
- elif self.action == 'count':
- state.opts[self.dest] = state.opts.get(self.dest, 0) + 1
- else:
- raise ValueError('unknown action %r' % self.action)
- state.order.append(self.obj)
-
-
-class Argument(object):
-
- def __init__(self, dest, nargs=1, obj=None):
- self.dest = dest
- self.nargs = nargs
- self.obj = obj
-
- def process(self, value, state):
- if self.nargs > 1:
- holes = sum(1 for x in value if x is None)
- if holes == len(value):
- value = None
- elif holes != 0:
- raise BadArgumentUsage('argument %s takes %d values'
- % (self.dest, self.nargs))
- state.opts[self.dest] = value
- state.order.append(self.obj)
-
-
-class ParsingState(object):
-
- def __init__(self, rargs):
- self.opts = {}
- self.largs = []
- self.rargs = rargs
- self.order = []
-
-
-class OptionParser(object):
- """The option parser is an internal class that is ultimately used to
- parse options and arguments. It's modelled after optparse and brings
- a similar but vastly simplified API. It should generally not be used
- directly as the high level Click classes wrap it for you.
-
- It's not nearly as extensible as optparse or argparse as it does not
- implement features that are implemented on a higher level (such as
- types or defaults).
-
- :param ctx: optionally the :class:`~click.Context` where this parser
- should go with.
- """
-
- def __init__(self, ctx=None):
- #: The :class:`~click.Context` for this parser. This might be
- #: `None` for some advanced use cases.
- self.ctx = ctx
- #: This controls how the parser deals with interspersed arguments.
- #: If this is set to `False`, the parser will stop on the first
- #: non-option. Click uses this to implement nested subcommands
- #: safely.
- self.allow_interspersed_args = True
- #: This tells the parser how to deal with unknown options. By
- #: default it will error out (which is sensible), but there is a
- #: second mode where it will ignore it and continue processing
- #: after shifting all the unknown options into the resulting args.
- self.ignore_unknown_options = False
- if ctx is not None:
- self.allow_interspersed_args = ctx.allow_interspersed_args
- self.ignore_unknown_options = ctx.ignore_unknown_options
- self._short_opt = {}
- self._long_opt = {}
- self._opt_prefixes = set(['-', '--'])
- self._args = []
-
- def add_option(self, opts, dest, action=None, nargs=1, const=None,
- obj=None):
- """Adds a new option named `dest` to the parser. The destination
- is not inferred (unlike with optparse) and needs to be explicitly
- provided. Action can be any of ``store``, ``store_const``,
- ``append``, ``appnd_const`` or ``count``.
-
- The `obj` can be used to identify the option in the order list
- that is returned from the parser.
- """
- if obj is None:
- obj = dest
- opts = [normalize_opt(opt, self.ctx) for opt in opts]
- option = Option(opts, dest, action=action, nargs=nargs,
- const=const, obj=obj)
- self._opt_prefixes.update(option.prefixes)
- for opt in option._short_opts:
- self._short_opt[opt] = option
- for opt in option._long_opts:
- self._long_opt[opt] = option
-
- def add_argument(self, dest, nargs=1, obj=None):
- """Adds a positional argument named `dest` to the parser.
-
- The `obj` can be used to identify the option in the order list
- that is returned from the parser.
- """
- if obj is None:
- obj = dest
- self._args.append(Argument(dest=dest, nargs=nargs, obj=obj))
-
- def parse_args(self, args):
- """Parses positional arguments and returns ``(values, args, order)``
- for the parsed options and arguments as well as the leftover
- arguments if there are any. The order is a list of objects as they
- appear on the command line. If arguments appear multiple times they
- will be memorized multiple times as well.
- """
- state = ParsingState(args)
- try:
- self._process_args_for_options(state)
- self._process_args_for_args(state)
- except UsageError:
- if self.ctx is None or not self.ctx.resilient_parsing:
- raise
- return state.opts, state.largs, state.order
-
- def _process_args_for_args(self, state):
- pargs, args = _unpack_args(state.largs + state.rargs,
- [x.nargs for x in self._args])
-
- for idx, arg in enumerate(self._args):
- arg.process(pargs[idx], state)
-
- state.largs = args
- state.rargs = []
-
- def _process_args_for_options(self, state):
- while state.rargs:
- arg = state.rargs.pop(0)
- arglen = len(arg)
- # Double dashes always handled explicitly regardless of what
- # prefixes are valid.
- if arg == '--':
- return
- elif arg[:1] in self._opt_prefixes and arglen > 1:
- self._process_opts(arg, state)
- elif self.allow_interspersed_args:
- state.largs.append(arg)
- else:
- state.rargs.insert(0, arg)
- return
-
- # Say this is the original argument list:
- # [arg0, arg1, ..., arg(i-1), arg(i), arg(i+1), ..., arg(N-1)]
- # ^
- # (we are about to process arg(i)).
- #
- # Then rargs is [arg(i), ..., arg(N-1)] and largs is a *subset* of
- # [arg0, ..., arg(i-1)] (any options and their arguments will have
- # been removed from largs).
- #
- # The while loop will usually consume 1 or more arguments per pass.
- # If it consumes 1 (eg. arg is an option that takes no arguments),
- # then after _process_arg() is done the situation is:
- #
- # largs = subset of [arg0, ..., arg(i)]
- # rargs = [arg(i+1), ..., arg(N-1)]
- #
- # If allow_interspersed_args is false, largs will always be
- # *empty* -- still a subset of [arg0, ..., arg(i-1)], but
- # not a very interesting subset!
-
- def _match_long_opt(self, opt, explicit_value, state):
- if opt not in self._long_opt:
- possibilities = [word for word in self._long_opt
- if word.startswith(opt)]
- raise NoSuchOption(opt, possibilities=possibilities)
-
- option = self._long_opt[opt]
- if option.takes_value:
- # At this point it's safe to modify rargs by injecting the
- # explicit value, because no exception is raised in this
- # branch. This means that the inserted value will be fully
- # consumed.
- if explicit_value is not None:
- state.rargs.insert(0, explicit_value)
-
- nargs = option.nargs
- if len(state.rargs) < nargs:
- _error_opt_args(nargs, opt)
- elif nargs == 1:
- value = state.rargs.pop(0)
- else:
- value = tuple(state.rargs[:nargs])
- del state.rargs[:nargs]
-
- elif explicit_value is not None:
- raise BadOptionUsage('%s option does not take a value' % opt)
-
- else:
- value = None
-
- option.process(value, state)
-
- def _match_short_opt(self, arg, state):
- stop = False
- i = 1
- prefix = arg[0]
- unknown_options = []
-
- for ch in arg[1:]:
- opt = normalize_opt(prefix + ch, self.ctx)
- option = self._short_opt.get(opt)
- i += 1
-
- if not option:
- if self.ignore_unknown_options:
- unknown_options.append(ch)
- continue
- raise NoSuchOption(opt)
- if option.takes_value:
- # Any characters left in arg? Pretend they're the
- # next arg, and stop consuming characters of arg.
- if i < len(arg):
- state.rargs.insert(0, arg[i:])
- stop = True
-
- nargs = option.nargs
- if len(state.rargs) < nargs:
- _error_opt_args(nargs, opt)
- elif nargs == 1:
- value = state.rargs.pop(0)
- else:
- value = tuple(state.rargs[:nargs])
- del state.rargs[:nargs]
-
- else:
- value = None
-
- option.process(value, state)
-
- if stop:
- break
-
- # If we got any unknown options we re-combinate the string of the
- # remaining options and re-attach the prefix, then report that
- # to the state as new larg. This way there is basic combinatorics
- # that can be achieved while still ignoring unknown arguments.
- if self.ignore_unknown_options and unknown_options:
- state.largs.append(prefix + ''.join(unknown_options))
-
- def _process_opts(self, arg, state):
- explicit_value = None
- # Long option handling happens in two parts. The first part is
- # supporting explicitly attached values. In any case, we will try
- # to long match the option first.
- if '=' in arg:
- long_opt, explicit_value = arg.split('=', 1)
- else:
- long_opt = arg
- norm_long_opt = normalize_opt(long_opt, self.ctx)
-
- # At this point we will match the (assumed) long option through
- # the long option matching code. Note that this allows options
- # like "-foo" to be matched as long options.
- try:
- self._match_long_opt(norm_long_opt, explicit_value, state)
- except NoSuchOption:
- # At this point the long option matching failed, and we need
- # to try with short options. However there is a special rule
- # which says, that if we have a two character options prefix
- # (applies to "--foo" for instance), we do not dispatch to the
- # short option code and will instead raise the no option
- # error.
- if arg[:2] not in self._opt_prefixes:
- return self._match_short_opt(arg, state)
- if not self.ignore_unknown_options:
- raise
- state.largs.append(arg)
diff --git a/venv/Lib/site-packages/click/termui.py b/venv/Lib/site-packages/click/termui.py
deleted file mode 100644
index d9fba52..0000000
--- a/venv/Lib/site-packages/click/termui.py
+++ /dev/null
@@ -1,539 +0,0 @@
-import os
-import sys
-import struct
-
-from ._compat import raw_input, text_type, string_types, \
- isatty, strip_ansi, get_winterm_size, DEFAULT_COLUMNS, WIN
-from .utils import echo
-from .exceptions import Abort, UsageError
-from .types import convert_type
-from .globals import resolve_color_default
-
-
-# The prompt functions to use. The doc tools currently override these
-# functions to customize how they work.
-visible_prompt_func = raw_input
-
-_ansi_colors = ('black', 'red', 'green', 'yellow', 'blue', 'magenta',
- 'cyan', 'white', 'reset')
-_ansi_reset_all = '\033[0m'
-
-
-def hidden_prompt_func(prompt):
- import getpass
- return getpass.getpass(prompt)
-
-
-def _build_prompt(text, suffix, show_default=False, default=None):
- prompt = text
- if default is not None and show_default:
- prompt = '%s [%s]' % (prompt, default)
- return prompt + suffix
-
-
-def prompt(text, default=None, hide_input=False,
- confirmation_prompt=False, type=None,
- value_proc=None, prompt_suffix=': ',
- show_default=True, err=False):
- """Prompts a user for input. This is a convenience function that can
- be used to prompt a user for input later.
-
- If the user aborts the input by sending a interrupt signal, this
- function will catch it and raise a :exc:`Abort` exception.
-
- .. versionadded:: 6.0
- Added unicode support for cmd.exe on Windows.
-
- .. versionadded:: 4.0
- Added the `err` parameter.
-
- :param text: the text to show for the prompt.
- :param default: the default value to use if no input happens. If this
- is not given it will prompt until it's aborted.
- :param hide_input: if this is set to true then the input value will
- be hidden.
- :param confirmation_prompt: asks for confirmation for the value.
- :param type: the type to use to check the value against.
- :param value_proc: if this parameter is provided it's a function that
- is invoked instead of the type conversion to
- convert a value.
- :param prompt_suffix: a suffix that should be added to the prompt.
- :param show_default: shows or hides the default value in the prompt.
- :param err: if set to true the file defaults to ``stderr`` instead of
- ``stdout``, the same as with echo.
- """
- result = None
-
- def prompt_func(text):
- f = hide_input and hidden_prompt_func or visible_prompt_func
- try:
- # Write the prompt separately so that we get nice
- # coloring through colorama on Windows
- echo(text, nl=False, err=err)
- return f('')
- except (KeyboardInterrupt, EOFError):
- # getpass doesn't print a newline if the user aborts input with ^C.
- # Allegedly this behavior is inherited from getpass(3).
- # A doc bug has been filed at https://bugs.python.org/issue24711
- if hide_input:
- echo(None, err=err)
- raise Abort()
-
- if value_proc is None:
- value_proc = convert_type(type, default)
-
- prompt = _build_prompt(text, prompt_suffix, show_default, default)
-
- while 1:
- while 1:
- value = prompt_func(prompt)
- if value:
- break
- # If a default is set and used, then the confirmation
- # prompt is always skipped because that's the only thing
- # that really makes sense.
- elif default is not None:
- return default
- try:
- result = value_proc(value)
- except UsageError as e:
- echo('Error: %s' % e.message, err=err)
- continue
- if not confirmation_prompt:
- return result
- while 1:
- value2 = prompt_func('Repeat for confirmation: ')
- if value2:
- break
- if value == value2:
- return result
- echo('Error: the two entered values do not match', err=err)
-
-
-def confirm(text, default=False, abort=False, prompt_suffix=': ',
- show_default=True, err=False):
- """Prompts for confirmation (yes/no question).
-
- If the user aborts the input by sending a interrupt signal this
- function will catch it and raise a :exc:`Abort` exception.
-
- .. versionadded:: 4.0
- Added the `err` parameter.
-
- :param text: the question to ask.
- :param default: the default for the prompt.
- :param abort: if this is set to `True` a negative answer aborts the
- exception by raising :exc:`Abort`.
- :param prompt_suffix: a suffix that should be added to the prompt.
- :param show_default: shows or hides the default value in the prompt.
- :param err: if set to true the file defaults to ``stderr`` instead of
- ``stdout``, the same as with echo.
- """
- prompt = _build_prompt(text, prompt_suffix, show_default,
- default and 'Y/n' or 'y/N')
- while 1:
- try:
- # Write the prompt separately so that we get nice
- # coloring through colorama on Windows
- echo(prompt, nl=False, err=err)
- value = visible_prompt_func('').lower().strip()
- except (KeyboardInterrupt, EOFError):
- raise Abort()
- if value in ('y', 'yes'):
- rv = True
- elif value in ('n', 'no'):
- rv = False
- elif value == '':
- rv = default
- else:
- echo('Error: invalid input', err=err)
- continue
- break
- if abort and not rv:
- raise Abort()
- return rv
-
-
-def get_terminal_size():
- """Returns the current size of the terminal as tuple in the form
- ``(width, height)`` in columns and rows.
- """
- # If shutil has get_terminal_size() (Python 3.3 and later) use that
- if sys.version_info >= (3, 3):
- import shutil
- shutil_get_terminal_size = getattr(shutil, 'get_terminal_size', None)
- if shutil_get_terminal_size:
- sz = shutil_get_terminal_size()
- return sz.columns, sz.lines
-
- if get_winterm_size is not None:
- return get_winterm_size()
-
- def ioctl_gwinsz(fd):
- try:
- import fcntl
- import termios
- cr = struct.unpack(
- 'hh', fcntl.ioctl(fd, termios.TIOCGWINSZ, '1234'))
- except Exception:
- return
- return cr
-
- cr = ioctl_gwinsz(0) or ioctl_gwinsz(1) or ioctl_gwinsz(2)
- if not cr:
- try:
- fd = os.open(os.ctermid(), os.O_RDONLY)
- try:
- cr = ioctl_gwinsz(fd)
- finally:
- os.close(fd)
- except Exception:
- pass
- if not cr or not cr[0] or not cr[1]:
- cr = (os.environ.get('LINES', 25),
- os.environ.get('COLUMNS', DEFAULT_COLUMNS))
- return int(cr[1]), int(cr[0])
-
-
-def echo_via_pager(text, color=None):
- """This function takes a text and shows it via an environment specific
- pager on stdout.
-
- .. versionchanged:: 3.0
- Added the `color` flag.
-
- :param text: the text to page.
- :param color: controls if the pager supports ANSI colors or not. The
- default is autodetection.
- """
- color = resolve_color_default(color)
- if not isinstance(text, string_types):
- text = text_type(text)
- from ._termui_impl import pager
- return pager(text + '\n', color)
-
-
-def progressbar(iterable=None, length=None, label=None, show_eta=True,
- show_percent=None, show_pos=False,
- item_show_func=None, fill_char='#', empty_char='-',
- bar_template='%(label)s [%(bar)s] %(info)s',
- info_sep=' ', width=36, file=None, color=None):
- """This function creates an iterable context manager that can be used
- to iterate over something while showing a progress bar. It will
- either iterate over the `iterable` or `length` items (that are counted
- up). While iteration happens, this function will print a rendered
- progress bar to the given `file` (defaults to stdout) and will attempt
- to calculate remaining time and more. By default, this progress bar
- will not be rendered if the file is not a terminal.
-
- The context manager creates the progress bar. When the context
- manager is entered the progress bar is already displayed. With every
- iteration over the progress bar, the iterable passed to the bar is
- advanced and the bar is updated. When the context manager exits,
- a newline is printed and the progress bar is finalized on screen.
-
- No printing must happen or the progress bar will be unintentionally
- destroyed.
-
- Example usage::
-
- with progressbar(items) as bar:
- for item in bar:
- do_something_with(item)
-
- Alternatively, if no iterable is specified, one can manually update the
- progress bar through the `update()` method instead of directly
- iterating over the progress bar. The update method accepts the number
- of steps to increment the bar with::
-
- with progressbar(length=chunks.total_bytes) as bar:
- for chunk in chunks:
- process_chunk(chunk)
- bar.update(chunks.bytes)
-
- .. versionadded:: 2.0
-
- .. versionadded:: 4.0
- Added the `color` parameter. Added a `update` method to the
- progressbar object.
-
- :param iterable: an iterable to iterate over. If not provided the length
- is required.
- :param length: the number of items to iterate over. By default the
- progressbar will attempt to ask the iterator about its
- length, which might or might not work. If an iterable is
- also provided this parameter can be used to override the
- length. If an iterable is not provided the progress bar
- will iterate over a range of that length.
- :param label: the label to show next to the progress bar.
- :param show_eta: enables or disables the estimated time display. This is
- automatically disabled if the length cannot be
- determined.
- :param show_percent: enables or disables the percentage display. The
- default is `True` if the iterable has a length or
- `False` if not.
- :param show_pos: enables or disables the absolute position display. The
- default is `False`.
- :param item_show_func: a function called with the current item which
- can return a string to show the current item
- next to the progress bar. Note that the current
- item can be `None`!
- :param fill_char: the character to use to show the filled part of the
- progress bar.
- :param empty_char: the character to use to show the non-filled part of
- the progress bar.
- :param bar_template: the format string to use as template for the bar.
- The parameters in it are ``label`` for the label,
- ``bar`` for the progress bar and ``info`` for the
- info section.
- :param info_sep: the separator between multiple info items (eta etc.)
- :param width: the width of the progress bar in characters, 0 means full
- terminal width
- :param file: the file to write to. If this is not a terminal then
- only the label is printed.
- :param color: controls if the terminal supports ANSI colors or not. The
- default is autodetection. This is only needed if ANSI
- codes are included anywhere in the progress bar output
- which is not the case by default.
- """
- from ._termui_impl import ProgressBar
- color = resolve_color_default(color)
- return ProgressBar(iterable=iterable, length=length, show_eta=show_eta,
- show_percent=show_percent, show_pos=show_pos,
- item_show_func=item_show_func, fill_char=fill_char,
- empty_char=empty_char, bar_template=bar_template,
- info_sep=info_sep, file=file, label=label,
- width=width, color=color)
-
-
-def clear():
- """Clears the terminal screen. This will have the effect of clearing
- the whole visible space of the terminal and moving the cursor to the
- top left. This does not do anything if not connected to a terminal.
-
- .. versionadded:: 2.0
- """
- if not isatty(sys.stdout):
- return
- # If we're on Windows and we don't have colorama available, then we
- # clear the screen by shelling out. Otherwise we can use an escape
- # sequence.
- if WIN:
- os.system('cls')
- else:
- sys.stdout.write('\033[2J\033[1;1H')
-
-
-def style(text, fg=None, bg=None, bold=None, dim=None, underline=None,
- blink=None, reverse=None, reset=True):
- """Styles a text with ANSI styles and returns the new string. By
- default the styling is self contained which means that at the end
- of the string a reset code is issued. This can be prevented by
- passing ``reset=False``.
-
- Examples::
-
- click.echo(click.style('Hello World!', fg='green'))
- click.echo(click.style('ATTENTION!', blink=True))
- click.echo(click.style('Some things', reverse=True, fg='cyan'))
-
- Supported color names:
-
- * ``black`` (might be a gray)
- * ``red``
- * ``green``
- * ``yellow`` (might be an orange)
- * ``blue``
- * ``magenta``
- * ``cyan``
- * ``white`` (might be light gray)
- * ``reset`` (reset the color code only)
-
- .. versionadded:: 2.0
-
- :param text: the string to style with ansi codes.
- :param fg: if provided this will become the foreground color.
- :param bg: if provided this will become the background color.
- :param bold: if provided this will enable or disable bold mode.
- :param dim: if provided this will enable or disable dim mode. This is
- badly supported.
- :param underline: if provided this will enable or disable underline.
- :param blink: if provided this will enable or disable blinking.
- :param reverse: if provided this will enable or disable inverse
- rendering (foreground becomes background and the
- other way round).
- :param reset: by default a reset-all code is added at the end of the
- string which means that styles do not carry over. This
- can be disabled to compose styles.
- """
- bits = []
- if fg:
- try:
- bits.append('\033[%dm' % (_ansi_colors.index(fg) + 30))
- except ValueError:
- raise TypeError('Unknown color %r' % fg)
- if bg:
- try:
- bits.append('\033[%dm' % (_ansi_colors.index(bg) + 40))
- except ValueError:
- raise TypeError('Unknown color %r' % bg)
- if bold is not None:
- bits.append('\033[%dm' % (1 if bold else 22))
- if dim is not None:
- bits.append('\033[%dm' % (2 if dim else 22))
- if underline is not None:
- bits.append('\033[%dm' % (4 if underline else 24))
- if blink is not None:
- bits.append('\033[%dm' % (5 if blink else 25))
- if reverse is not None:
- bits.append('\033[%dm' % (7 if reverse else 27))
- bits.append(text)
- if reset:
- bits.append(_ansi_reset_all)
- return ''.join(bits)
-
-
-def unstyle(text):
- """Removes ANSI styling information from a string. Usually it's not
- necessary to use this function as Click's echo function will
- automatically remove styling if necessary.
-
- .. versionadded:: 2.0
-
- :param text: the text to remove style information from.
- """
- return strip_ansi(text)
-
-
-def secho(text, file=None, nl=True, err=False, color=None, **styles):
- """This function combines :func:`echo` and :func:`style` into one
- call. As such the following two calls are the same::
-
- click.secho('Hello World!', fg='green')
- click.echo(click.style('Hello World!', fg='green'))
-
- All keyword arguments are forwarded to the underlying functions
- depending on which one they go with.
-
- .. versionadded:: 2.0
- """
- return echo(style(text, **styles), file=file, nl=nl, err=err, color=color)
-
-
-def edit(text=None, editor=None, env=None, require_save=True,
- extension='.txt', filename=None):
- r"""Edits the given text in the defined editor. If an editor is given
- (should be the full path to the executable but the regular operating
- system search path is used for finding the executable) it overrides
- the detected editor. Optionally, some environment variables can be
- used. If the editor is closed without changes, `None` is returned. In
- case a file is edited directly the return value is always `None` and
- `require_save` and `extension` are ignored.
-
- If the editor cannot be opened a :exc:`UsageError` is raised.
-
- Note for Windows: to simplify cross-platform usage, the newlines are
- automatically converted from POSIX to Windows and vice versa. As such,
- the message here will have ``\n`` as newline markers.
-
- :param text: the text to edit.
- :param editor: optionally the editor to use. Defaults to automatic
- detection.
- :param env: environment variables to forward to the editor.
- :param require_save: if this is true, then not saving in the editor
- will make the return value become `None`.
- :param extension: the extension to tell the editor about. This defaults
- to `.txt` but changing this might change syntax
- highlighting.
- :param filename: if provided it will edit this file instead of the
- provided text contents. It will not use a temporary
- file as an indirection in that case.
- """
- from ._termui_impl import Editor
- editor = Editor(editor=editor, env=env, require_save=require_save,
- extension=extension)
- if filename is None:
- return editor.edit(text)
- editor.edit_file(filename)
-
-
-def launch(url, wait=False, locate=False):
- """This function launches the given URL (or filename) in the default
- viewer application for this file type. If this is an executable, it
- might launch the executable in a new session. The return value is
- the exit code of the launched application. Usually, ``0`` indicates
- success.
-
- Examples::
-
- click.launch('http://click.pocoo.org/')
- click.launch('/my/downloaded/file', locate=True)
-
- .. versionadded:: 2.0
-
- :param url: URL or filename of the thing to launch.
- :param wait: waits for the program to stop.
- :param locate: if this is set to `True` then instead of launching the
- application associated with the URL it will attempt to
- launch a file manager with the file located. This
- might have weird effects if the URL does not point to
- the filesystem.
- """
- from ._termui_impl import open_url
- return open_url(url, wait=wait, locate=locate)
-
-
-# If this is provided, getchar() calls into this instead. This is used
-# for unittesting purposes.
-_getchar = None
-
-
-def getchar(echo=False):
- """Fetches a single character from the terminal and returns it. This
- will always return a unicode character and under certain rare
- circumstances this might return more than one character. The
- situations which more than one character is returned is when for
- whatever reason multiple characters end up in the terminal buffer or
- standard input was not actually a terminal.
-
- Note that this will always read from the terminal, even if something
- is piped into the standard input.
-
- .. versionadded:: 2.0
-
- :param echo: if set to `True`, the character read will also show up on
- the terminal. The default is to not show it.
- """
- f = _getchar
- if f is None:
- from ._termui_impl import getchar as f
- return f(echo)
-
-
-def pause(info='Press any key to continue ...', err=False):
- """This command stops execution and waits for the user to press any
- key to continue. This is similar to the Windows batch "pause"
- command. If the program is not run through a terminal, this command
- will instead do nothing.
-
- .. versionadded:: 2.0
-
- .. versionadded:: 4.0
- Added the `err` parameter.
-
- :param info: the info string to print before pausing.
- :param err: if set to message goes to ``stderr`` instead of
- ``stdout``, the same as with echo.
- """
- if not isatty(sys.stdin) or not isatty(sys.stdout):
- return
- try:
- if info:
- echo(info, nl=False, err=err)
- try:
- getchar()
- except (KeyboardInterrupt, EOFError):
- pass
- finally:
- if info:
- echo(err=err)
diff --git a/venv/Lib/site-packages/click/testing.py b/venv/Lib/site-packages/click/testing.py
deleted file mode 100644
index 4416c77..0000000
--- a/venv/Lib/site-packages/click/testing.py
+++ /dev/null
@@ -1,322 +0,0 @@
-import os
-import sys
-import shutil
-import tempfile
-import contextlib
-
-from ._compat import iteritems, PY2
-
-
-# If someone wants to vendor click, we want to ensure the
-# correct package is discovered. Ideally we could use a
-# relative import here but unfortunately Python does not
-# support that.
-clickpkg = sys.modules[__name__.rsplit('.', 1)[0]]
-
-
-if PY2:
- from cStringIO import StringIO
-else:
- import io
- from ._compat import _find_binary_reader
-
-
-class EchoingStdin(object):
-
- def __init__(self, input, output):
- self._input = input
- self._output = output
-
- def __getattr__(self, x):
- return getattr(self._input, x)
-
- def _echo(self, rv):
- self._output.write(rv)
- return rv
-
- def read(self, n=-1):
- return self._echo(self._input.read(n))
-
- def readline(self, n=-1):
- return self._echo(self._input.readline(n))
-
- def readlines(self):
- return [self._echo(x) for x in self._input.readlines()]
-
- def __iter__(self):
- return iter(self._echo(x) for x in self._input)
-
- def __repr__(self):
- return repr(self._input)
-
-
-def make_input_stream(input, charset):
- # Is already an input stream.
- if hasattr(input, 'read'):
- if PY2:
- return input
- rv = _find_binary_reader(input)
- if rv is not None:
- return rv
- raise TypeError('Could not find binary reader for input stream.')
-
- if input is None:
- input = b''
- elif not isinstance(input, bytes):
- input = input.encode(charset)
- if PY2:
- return StringIO(input)
- return io.BytesIO(input)
-
-
-class Result(object):
- """Holds the captured result of an invoked CLI script."""
-
- def __init__(self, runner, output_bytes, exit_code, exception,
- exc_info=None):
- #: The runner that created the result
- self.runner = runner
- #: The output as bytes.
- self.output_bytes = output_bytes
- #: The exit code as integer.
- self.exit_code = exit_code
- #: The exception that happend if one did.
- self.exception = exception
- #: The traceback
- self.exc_info = exc_info
-
- @property
- def output(self):
- """The output as unicode string."""
- return self.output_bytes.decode(self.runner.charset, 'replace') \
- .replace('\r\n', '\n')
-
- def __repr__(self):
- return '' % (
- self.exception and repr(self.exception) or 'okay',
- )
-
-
-class CliRunner(object):
- """The CLI runner provides functionality to invoke a Click command line
- script for unittesting purposes in a isolated environment. This only
- works in single-threaded systems without any concurrency as it changes the
- global interpreter state.
-
- :param charset: the character set for the input and output data. This is
- UTF-8 by default and should not be changed currently as
- the reporting to Click only works in Python 2 properly.
- :param env: a dictionary with environment variables for overriding.
- :param echo_stdin: if this is set to `True`, then reading from stdin writes
- to stdout. This is useful for showing examples in
- some circumstances. Note that regular prompts
- will automatically echo the input.
- """
-
- def __init__(self, charset=None, env=None, echo_stdin=False):
- if charset is None:
- charset = 'utf-8'
- self.charset = charset
- self.env = env or {}
- self.echo_stdin = echo_stdin
-
- def get_default_prog_name(self, cli):
- """Given a command object it will return the default program name
- for it. The default is the `name` attribute or ``"root"`` if not
- set.
- """
- return cli.name or 'root'
-
- def make_env(self, overrides=None):
- """Returns the environment overrides for invoking a script."""
- rv = dict(self.env)
- if overrides:
- rv.update(overrides)
- return rv
-
- @contextlib.contextmanager
- def isolation(self, input=None, env=None, color=False):
- """A context manager that sets up the isolation for invoking of a
- command line tool. This sets up stdin with the given input data
- and `os.environ` with the overrides from the given dictionary.
- This also rebinds some internals in Click to be mocked (like the
- prompt functionality).
-
- This is automatically done in the :meth:`invoke` method.
-
- .. versionadded:: 4.0
- The ``color`` parameter was added.
-
- :param input: the input stream to put into sys.stdin.
- :param env: the environment overrides as dictionary.
- :param color: whether the output should contain color codes. The
- application can still override this explicitly.
- """
- input = make_input_stream(input, self.charset)
-
- old_stdin = sys.stdin
- old_stdout = sys.stdout
- old_stderr = sys.stderr
- old_forced_width = clickpkg.formatting.FORCED_WIDTH
- clickpkg.formatting.FORCED_WIDTH = 80
-
- env = self.make_env(env)
-
- if PY2:
- sys.stdout = sys.stderr = bytes_output = StringIO()
- if self.echo_stdin:
- input = EchoingStdin(input, bytes_output)
- else:
- bytes_output = io.BytesIO()
- if self.echo_stdin:
- input = EchoingStdin(input, bytes_output)
- input = io.TextIOWrapper(input, encoding=self.charset)
- sys.stdout = sys.stderr = io.TextIOWrapper(
- bytes_output, encoding=self.charset)
-
- sys.stdin = input
-
- def visible_input(prompt=None):
- sys.stdout.write(prompt or '')
- val = input.readline().rstrip('\r\n')
- sys.stdout.write(val + '\n')
- sys.stdout.flush()
- return val
-
- def hidden_input(prompt=None):
- sys.stdout.write((prompt or '') + '\n')
- sys.stdout.flush()
- return input.readline().rstrip('\r\n')
-
- def _getchar(echo):
- char = sys.stdin.read(1)
- if echo:
- sys.stdout.write(char)
- sys.stdout.flush()
- return char
-
- default_color = color
- def should_strip_ansi(stream=None, color=None):
- if color is None:
- return not default_color
- return not color
-
- old_visible_prompt_func = clickpkg.termui.visible_prompt_func
- old_hidden_prompt_func = clickpkg.termui.hidden_prompt_func
- old__getchar_func = clickpkg.termui._getchar
- old_should_strip_ansi = clickpkg.utils.should_strip_ansi
- clickpkg.termui.visible_prompt_func = visible_input
- clickpkg.termui.hidden_prompt_func = hidden_input
- clickpkg.termui._getchar = _getchar
- clickpkg.utils.should_strip_ansi = should_strip_ansi
-
- old_env = {}
- try:
- for key, value in iteritems(env):
- old_env[key] = os.environ.get(key)
- if value is None:
- try:
- del os.environ[key]
- except Exception:
- pass
- else:
- os.environ[key] = value
- yield bytes_output
- finally:
- for key, value in iteritems(old_env):
- if value is None:
- try:
- del os.environ[key]
- except Exception:
- pass
- else:
- os.environ[key] = value
- sys.stdout = old_stdout
- sys.stderr = old_stderr
- sys.stdin = old_stdin
- clickpkg.termui.visible_prompt_func = old_visible_prompt_func
- clickpkg.termui.hidden_prompt_func = old_hidden_prompt_func
- clickpkg.termui._getchar = old__getchar_func
- clickpkg.utils.should_strip_ansi = old_should_strip_ansi
- clickpkg.formatting.FORCED_WIDTH = old_forced_width
-
- def invoke(self, cli, args=None, input=None, env=None,
- catch_exceptions=True, color=False, **extra):
- """Invokes a command in an isolated environment. The arguments are
- forwarded directly to the command line script, the `extra` keyword
- arguments are passed to the :meth:`~clickpkg.Command.main` function of
- the command.
-
- This returns a :class:`Result` object.
-
- .. versionadded:: 3.0
- The ``catch_exceptions`` parameter was added.
-
- .. versionchanged:: 3.0
- The result object now has an `exc_info` attribute with the
- traceback if available.
-
- .. versionadded:: 4.0
- The ``color`` parameter was added.
-
- :param cli: the command to invoke
- :param args: the arguments to invoke
- :param input: the input data for `sys.stdin`.
- :param env: the environment overrides.
- :param catch_exceptions: Whether to catch any other exceptions than
- ``SystemExit``.
- :param extra: the keyword arguments to pass to :meth:`main`.
- :param color: whether the output should contain color codes. The
- application can still override this explicitly.
- """
- exc_info = None
- with self.isolation(input=input, env=env, color=color) as out:
- exception = None
- exit_code = 0
-
- try:
- cli.main(args=args or (),
- prog_name=self.get_default_prog_name(cli), **extra)
- except SystemExit as e:
- if e.code != 0:
- exception = e
-
- exc_info = sys.exc_info()
-
- exit_code = e.code
- if not isinstance(exit_code, int):
- sys.stdout.write(str(exit_code))
- sys.stdout.write('\n')
- exit_code = 1
- except Exception as e:
- if not catch_exceptions:
- raise
- exception = e
- exit_code = -1
- exc_info = sys.exc_info()
- finally:
- sys.stdout.flush()
- output = out.getvalue()
-
- return Result(runner=self,
- output_bytes=output,
- exit_code=exit_code,
- exception=exception,
- exc_info=exc_info)
-
- @contextlib.contextmanager
- def isolated_filesystem(self):
- """A context manager that creates a temporary folder and changes
- the current working directory to it for isolated filesystem tests.
- """
- cwd = os.getcwd()
- t = tempfile.mkdtemp()
- os.chdir(t)
- try:
- yield t
- finally:
- os.chdir(cwd)
- try:
- shutil.rmtree(t)
- except (OSError, IOError):
- pass
diff --git a/venv/Lib/site-packages/click/types.py b/venv/Lib/site-packages/click/types.py
deleted file mode 100644
index 3639002..0000000
--- a/venv/Lib/site-packages/click/types.py
+++ /dev/null
@@ -1,550 +0,0 @@
-import os
-import stat
-
-from ._compat import open_stream, text_type, filename_to_ui, \
- get_filesystem_encoding, get_streerror, _get_argv_encoding, PY2
-from .exceptions import BadParameter
-from .utils import safecall, LazyFile
-
-
-class ParamType(object):
- """Helper for converting values through types. The following is
- necessary for a valid type:
-
- * it needs a name
- * it needs to pass through None unchanged
- * it needs to convert from a string
- * it needs to convert its result type through unchanged
- (eg: needs to be idempotent)
- * it needs to be able to deal with param and context being `None`.
- This can be the case when the object is used with prompt
- inputs.
- """
- is_composite = False
-
- #: the descriptive name of this type
- name = None
-
- #: if a list of this type is expected and the value is pulled from a
- #: string environment variable, this is what splits it up. `None`
- #: means any whitespace. For all parameters the general rule is that
- #: whitespace splits them up. The exception are paths and files which
- #: are split by ``os.path.pathsep`` by default (":" on Unix and ";" on
- #: Windows).
- envvar_list_splitter = None
-
- def __call__(self, value, param=None, ctx=None):
- if value is not None:
- return self.convert(value, param, ctx)
-
- def get_metavar(self, param):
- """Returns the metavar default for this param if it provides one."""
-
- def get_missing_message(self, param):
- """Optionally might return extra information about a missing
- parameter.
-
- .. versionadded:: 2.0
- """
-
- def convert(self, value, param, ctx):
- """Converts the value. This is not invoked for values that are
- `None` (the missing value).
- """
- return value
-
- def split_envvar_value(self, rv):
- """Given a value from an environment variable this splits it up
- into small chunks depending on the defined envvar list splitter.
-
- If the splitter is set to `None`, which means that whitespace splits,
- then leading and trailing whitespace is ignored. Otherwise, leading
- and trailing splitters usually lead to empty items being included.
- """
- return (rv or '').split(self.envvar_list_splitter)
-
- def fail(self, message, param=None, ctx=None):
- """Helper method to fail with an invalid value message."""
- raise BadParameter(message, ctx=ctx, param=param)
-
-
-class CompositeParamType(ParamType):
- is_composite = True
-
- @property
- def arity(self):
- raise NotImplementedError()
-
-
-class FuncParamType(ParamType):
-
- def __init__(self, func):
- self.name = func.__name__
- self.func = func
-
- def convert(self, value, param, ctx):
- try:
- return self.func(value)
- except ValueError:
- try:
- value = text_type(value)
- except UnicodeError:
- value = str(value).decode('utf-8', 'replace')
- self.fail(value, param, ctx)
-
-
-class UnprocessedParamType(ParamType):
- name = 'text'
-
- def convert(self, value, param, ctx):
- return value
-
- def __repr__(self):
- return 'UNPROCESSED'
-
-
-class StringParamType(ParamType):
- name = 'text'
-
- def convert(self, value, param, ctx):
- if isinstance(value, bytes):
- enc = _get_argv_encoding()
- try:
- value = value.decode(enc)
- except UnicodeError:
- fs_enc = get_filesystem_encoding()
- if fs_enc != enc:
- try:
- value = value.decode(fs_enc)
- except UnicodeError:
- value = value.decode('utf-8', 'replace')
- return value
- return value
-
- def __repr__(self):
- return 'STRING'
-
-
-class Choice(ParamType):
- """The choice type allows a value to be checked against a fixed set of
- supported values. All of these values have to be strings.
-
- See :ref:`choice-opts` for an example.
- """
- name = 'choice'
-
- def __init__(self, choices):
- self.choices = choices
-
- def get_metavar(self, param):
- return '[%s]' % '|'.join(self.choices)
-
- def get_missing_message(self, param):
- return 'Choose from %s.' % ', '.join(self.choices)
-
- def convert(self, value, param, ctx):
- # Exact match
- if value in self.choices:
- return value
-
- # Match through normalization
- if ctx is not None and \
- ctx.token_normalize_func is not None:
- value = ctx.token_normalize_func(value)
- for choice in self.choices:
- if ctx.token_normalize_func(choice) == value:
- return choice
-
- self.fail('invalid choice: %s. (choose from %s)' %
- (value, ', '.join(self.choices)), param, ctx)
-
- def __repr__(self):
- return 'Choice(%r)' % list(self.choices)
-
-
-class IntParamType(ParamType):
- name = 'integer'
-
- def convert(self, value, param, ctx):
- try:
- return int(value)
- except (ValueError, UnicodeError):
- self.fail('%s is not a valid integer' % value, param, ctx)
-
- def __repr__(self):
- return 'INT'
-
-
-class IntRange(IntParamType):
- """A parameter that works similar to :data:`click.INT` but restricts
- the value to fit into a range. The default behavior is to fail if the
- value falls outside the range, but it can also be silently clamped
- between the two edges.
-
- See :ref:`ranges` for an example.
- """
- name = 'integer range'
-
- def __init__(self, min=None, max=None, clamp=False):
- self.min = min
- self.max = max
- self.clamp = clamp
-
- def convert(self, value, param, ctx):
- rv = IntParamType.convert(self, value, param, ctx)
- if self.clamp:
- if self.min is not None and rv < self.min:
- return self.min
- if self.max is not None and rv > self.max:
- return self.max
- if self.min is not None and rv < self.min or \
- self.max is not None and rv > self.max:
- if self.min is None:
- self.fail('%s is bigger than the maximum valid value '
- '%s.' % (rv, self.max), param, ctx)
- elif self.max is None:
- self.fail('%s is smaller than the minimum valid value '
- '%s.' % (rv, self.min), param, ctx)
- else:
- self.fail('%s is not in the valid range of %s to %s.'
- % (rv, self.min, self.max), param, ctx)
- return rv
-
- def __repr__(self):
- return 'IntRange(%r, %r)' % (self.min, self.max)
-
-
-class BoolParamType(ParamType):
- name = 'boolean'
-
- def convert(self, value, param, ctx):
- if isinstance(value, bool):
- return bool(value)
- value = value.lower()
- if value in ('true', '1', 'yes', 'y'):
- return True
- elif value in ('false', '0', 'no', 'n'):
- return False
- self.fail('%s is not a valid boolean' % value, param, ctx)
-
- def __repr__(self):
- return 'BOOL'
-
-
-class FloatParamType(ParamType):
- name = 'float'
-
- def convert(self, value, param, ctx):
- try:
- return float(value)
- except (UnicodeError, ValueError):
- self.fail('%s is not a valid floating point value' %
- value, param, ctx)
-
- def __repr__(self):
- return 'FLOAT'
-
-
-class UUIDParameterType(ParamType):
- name = 'uuid'
-
- def convert(self, value, param, ctx):
- import uuid
- try:
- if PY2 and isinstance(value, text_type):
- value = value.encode('ascii')
- return uuid.UUID(value)
- except (UnicodeError, ValueError):
- self.fail('%s is not a valid UUID value' % value, param, ctx)
-
- def __repr__(self):
- return 'UUID'
-
-
-class File(ParamType):
- """Declares a parameter to be a file for reading or writing. The file
- is automatically closed once the context tears down (after the command
- finished working).
-
- Files can be opened for reading or writing. The special value ``-``
- indicates stdin or stdout depending on the mode.
-
- By default, the file is opened for reading text data, but it can also be
- opened in binary mode or for writing. The encoding parameter can be used
- to force a specific encoding.
-
- The `lazy` flag controls if the file should be opened immediately or
- upon first IO. The default is to be non lazy for standard input and
- output streams as well as files opened for reading, lazy otherwise.
-
- Starting with Click 2.0, files can also be opened atomically in which
- case all writes go into a separate file in the same folder and upon
- completion the file will be moved over to the original location. This
- is useful if a file regularly read by other users is modified.
-
- See :ref:`file-args` for more information.
- """
- name = 'filename'
- envvar_list_splitter = os.path.pathsep
-
- def __init__(self, mode='r', encoding=None, errors='strict', lazy=None,
- atomic=False):
- self.mode = mode
- self.encoding = encoding
- self.errors = errors
- self.lazy = lazy
- self.atomic = atomic
-
- def resolve_lazy_flag(self, value):
- if self.lazy is not None:
- return self.lazy
- if value == '-':
- return False
- elif 'w' in self.mode:
- return True
- return False
-
- def convert(self, value, param, ctx):
- try:
- if hasattr(value, 'read') or hasattr(value, 'write'):
- return value
-
- lazy = self.resolve_lazy_flag(value)
-
- if lazy:
- f = LazyFile(value, self.mode, self.encoding, self.errors,
- atomic=self.atomic)
- if ctx is not None:
- ctx.call_on_close(f.close_intelligently)
- return f
-
- f, should_close = open_stream(value, self.mode,
- self.encoding, self.errors,
- atomic=self.atomic)
- # If a context is provided, we automatically close the file
- # at the end of the context execution (or flush out). If a
- # context does not exist, it's the caller's responsibility to
- # properly close the file. This for instance happens when the
- # type is used with prompts.
- if ctx is not None:
- if should_close:
- ctx.call_on_close(safecall(f.close))
- else:
- ctx.call_on_close(safecall(f.flush))
- return f
- except (IOError, OSError) as e:
- self.fail('Could not open file: %s: %s' % (
- filename_to_ui(value),
- get_streerror(e),
- ), param, ctx)
-
-
-class Path(ParamType):
- """The path type is similar to the :class:`File` type but it performs
- different checks. First of all, instead of returning an open file
- handle it returns just the filename. Secondly, it can perform various
- basic checks about what the file or directory should be.
-
- .. versionchanged:: 6.0
- `allow_dash` was added.
-
- :param exists: if set to true, the file or directory needs to exist for
- this value to be valid. If this is not required and a
- file does indeed not exist, then all further checks are
- silently skipped.
- :param file_okay: controls if a file is a possible value.
- :param dir_okay: controls if a directory is a possible value.
- :param writable: if true, a writable check is performed.
- :param readable: if true, a readable check is performed.
- :param resolve_path: if this is true, then the path is fully resolved
- before the value is passed onwards. This means
- that it's absolute and symlinks are resolved.
- :param allow_dash: If this is set to `True`, a single dash to indicate
- standard streams is permitted.
- :param type: optionally a string type that should be used to
- represent the path. The default is `None` which
- means the return value will be either bytes or
- unicode depending on what makes most sense given the
- input data Click deals with.
- """
- envvar_list_splitter = os.path.pathsep
-
- def __init__(self, exists=False, file_okay=True, dir_okay=True,
- writable=False, readable=True, resolve_path=False,
- allow_dash=False, path_type=None):
- self.exists = exists
- self.file_okay = file_okay
- self.dir_okay = dir_okay
- self.writable = writable
- self.readable = readable
- self.resolve_path = resolve_path
- self.allow_dash = allow_dash
- self.type = path_type
-
- if self.file_okay and not self.dir_okay:
- self.name = 'file'
- self.path_type = 'File'
- if self.dir_okay and not self.file_okay:
- self.name = 'directory'
- self.path_type = 'Directory'
- else:
- self.name = 'path'
- self.path_type = 'Path'
-
- def coerce_path_result(self, rv):
- if self.type is not None and not isinstance(rv, self.type):
- if self.type is text_type:
- rv = rv.decode(get_filesystem_encoding())
- else:
- rv = rv.encode(get_filesystem_encoding())
- return rv
-
- def convert(self, value, param, ctx):
- rv = value
-
- is_dash = self.file_okay and self.allow_dash and rv in (b'-', '-')
-
- if not is_dash:
- if self.resolve_path:
- rv = os.path.realpath(rv)
-
- try:
- st = os.stat(rv)
- except OSError:
- if not self.exists:
- return self.coerce_path_result(rv)
- self.fail('%s "%s" does not exist.' % (
- self.path_type,
- filename_to_ui(value)
- ), param, ctx)
-
- if not self.file_okay and stat.S_ISREG(st.st_mode):
- self.fail('%s "%s" is a file.' % (
- self.path_type,
- filename_to_ui(value)
- ), param, ctx)
- if not self.dir_okay and stat.S_ISDIR(st.st_mode):
- self.fail('%s "%s" is a directory.' % (
- self.path_type,
- filename_to_ui(value)
- ), param, ctx)
- if self.writable and not os.access(value, os.W_OK):
- self.fail('%s "%s" is not writable.' % (
- self.path_type,
- filename_to_ui(value)
- ), param, ctx)
- if self.readable and not os.access(value, os.R_OK):
- self.fail('%s "%s" is not readable.' % (
- self.path_type,
- filename_to_ui(value)
- ), param, ctx)
-
- return self.coerce_path_result(rv)
-
-
-class Tuple(CompositeParamType):
- """The default behavior of Click is to apply a type on a value directly.
- This works well in most cases, except for when `nargs` is set to a fixed
- count and different types should be used for different items. In this
- case the :class:`Tuple` type can be used. This type can only be used
- if `nargs` is set to a fixed number.
-
- For more information see :ref:`tuple-type`.
-
- This can be selected by using a Python tuple literal as a type.
-
- :param types: a list of types that should be used for the tuple items.
- """
-
- def __init__(self, types):
- self.types = [convert_type(ty) for ty in types]
-
- @property
- def name(self):
- return "<" + " ".join(ty.name for ty in self.types) + ">"
-
- @property
- def arity(self):
- return len(self.types)
-
- def convert(self, value, param, ctx):
- if len(value) != len(self.types):
- raise TypeError('It would appear that nargs is set to conflict '
- 'with the composite type arity.')
- return tuple(ty(x, param, ctx) for ty, x in zip(self.types, value))
-
-
-def convert_type(ty, default=None):
- """Converts a callable or python ty into the most appropriate param
- ty.
- """
- guessed_type = False
- if ty is None and default is not None:
- if isinstance(default, tuple):
- ty = tuple(map(type, default))
- else:
- ty = type(default)
- guessed_type = True
-
- if isinstance(ty, tuple):
- return Tuple(ty)
- if isinstance(ty, ParamType):
- return ty
- if ty is text_type or ty is str or ty is None:
- return STRING
- if ty is int:
- return INT
- # Booleans are only okay if not guessed. This is done because for
- # flags the default value is actually a bit of a lie in that it
- # indicates which of the flags is the one we want. See get_default()
- # for more information.
- if ty is bool and not guessed_type:
- return BOOL
- if ty is float:
- return FLOAT
- if guessed_type:
- return STRING
-
- # Catch a common mistake
- if __debug__:
- try:
- if issubclass(ty, ParamType):
- raise AssertionError('Attempted to use an uninstantiated '
- 'parameter type (%s).' % ty)
- except TypeError:
- pass
- return FuncParamType(ty)
-
-
-#: A dummy parameter type that just does nothing. From a user's
-#: perspective this appears to just be the same as `STRING` but internally
-#: no string conversion takes place. This is necessary to achieve the
-#: same bytes/unicode behavior on Python 2/3 in situations where you want
-#: to not convert argument types. This is usually useful when working
-#: with file paths as they can appear in bytes and unicode.
-#:
-#: For path related uses the :class:`Path` type is a better choice but
-#: there are situations where an unprocessed type is useful which is why
-#: it is is provided.
-#:
-#: .. versionadded:: 4.0
-UNPROCESSED = UnprocessedParamType()
-
-#: A unicode string parameter type which is the implicit default. This
-#: can also be selected by using ``str`` as type.
-STRING = StringParamType()
-
-#: An integer parameter. This can also be selected by using ``int`` as
-#: type.
-INT = IntParamType()
-
-#: A floating point value parameter. This can also be selected by using
-#: ``float`` as type.
-FLOAT = FloatParamType()
-
-#: A boolean parameter. This is the default for boolean flags. This can
-#: also be selected by using ``bool`` as a type.
-BOOL = BoolParamType()
-
-#: A UUID parameter.
-UUID = UUIDParameterType()
diff --git a/venv/Lib/site-packages/click/utils.py b/venv/Lib/site-packages/click/utils.py
deleted file mode 100644
index eee626d..0000000
--- a/venv/Lib/site-packages/click/utils.py
+++ /dev/null
@@ -1,415 +0,0 @@
-import os
-import sys
-
-from .globals import resolve_color_default
-
-from ._compat import text_type, open_stream, get_filesystem_encoding, \
- get_streerror, string_types, PY2, binary_streams, text_streams, \
- filename_to_ui, auto_wrap_for_ansi, strip_ansi, should_strip_ansi, \
- _default_text_stdout, _default_text_stderr, is_bytes, WIN
-
-if not PY2:
- from ._compat import _find_binary_writer
-elif WIN:
- from ._winconsole import _get_windows_argv, \
- _hash_py_argv, _initial_argv_hash
-
-
-echo_native_types = string_types + (bytes, bytearray)
-
-
-def _posixify(name):
- return '-'.join(name.split()).lower()
-
-
-def safecall(func):
- """Wraps a function so that it swallows exceptions."""
- def wrapper(*args, **kwargs):
- try:
- return func(*args, **kwargs)
- except Exception:
- pass
- return wrapper
-
-
-def make_str(value):
- """Converts a value into a valid string."""
- if isinstance(value, bytes):
- try:
- return value.decode(get_filesystem_encoding())
- except UnicodeError:
- return value.decode('utf-8', 'replace')
- return text_type(value)
-
-
-def make_default_short_help(help, max_length=45):
- words = help.split()
- total_length = 0
- result = []
- done = False
-
- for word in words:
- if word[-1:] == '.':
- done = True
- new_length = result and 1 + len(word) or len(word)
- if total_length + new_length > max_length:
- result.append('...')
- done = True
- else:
- if result:
- result.append(' ')
- result.append(word)
- if done:
- break
- total_length += new_length
-
- return ''.join(result)
-
-
-class LazyFile(object):
- """A lazy file works like a regular file but it does not fully open
- the file but it does perform some basic checks early to see if the
- filename parameter does make sense. This is useful for safely opening
- files for writing.
- """
-
- def __init__(self, filename, mode='r', encoding=None, errors='strict',
- atomic=False):
- self.name = filename
- self.mode = mode
- self.encoding = encoding
- self.errors = errors
- self.atomic = atomic
-
- if filename == '-':
- self._f, self.should_close = open_stream(filename, mode,
- encoding, errors)
- else:
- if 'r' in mode:
- # Open and close the file in case we're opening it for
- # reading so that we can catch at least some errors in
- # some cases early.
- open(filename, mode).close()
- self._f = None
- self.should_close = True
-
- def __getattr__(self, name):
- return getattr(self.open(), name)
-
- def __repr__(self):
- if self._f is not None:
- return repr(self._f)
- return '' % (self.name, self.mode)
-
- def open(self):
- """Opens the file if it's not yet open. This call might fail with
- a :exc:`FileError`. Not handling this error will produce an error
- that Click shows.
- """
- if self._f is not None:
- return self._f
- try:
- rv, self.should_close = open_stream(self.name, self.mode,
- self.encoding,
- self.errors,
- atomic=self.atomic)
- except (IOError, OSError) as e:
- from .exceptions import FileError
- raise FileError(self.name, hint=get_streerror(e))
- self._f = rv
- return rv
-
- def close(self):
- """Closes the underlying file, no matter what."""
- if self._f is not None:
- self._f.close()
-
- def close_intelligently(self):
- """This function only closes the file if it was opened by the lazy
- file wrapper. For instance this will never close stdin.
- """
- if self.should_close:
- self.close()
-
- def __enter__(self):
- return self
-
- def __exit__(self, exc_type, exc_value, tb):
- self.close_intelligently()
-
- def __iter__(self):
- self.open()
- return iter(self._f)
-
-
-class KeepOpenFile(object):
-
- def __init__(self, file):
- self._file = file
-
- def __getattr__(self, name):
- return getattr(self._file, name)
-
- def __enter__(self):
- return self
-
- def __exit__(self, exc_type, exc_value, tb):
- pass
-
- def __repr__(self):
- return repr(self._file)
-
- def __iter__(self):
- return iter(self._file)
-
-
-def echo(message=None, file=None, nl=True, err=False, color=None):
- """Prints a message plus a newline to the given file or stdout. On
- first sight, this looks like the print function, but it has improved
- support for handling Unicode and binary data that does not fail no
- matter how badly configured the system is.
-
- Primarily it means that you can print binary data as well as Unicode
- data on both 2.x and 3.x to the given file in the most appropriate way
- possible. This is a very carefree function as in that it will try its
- best to not fail. As of Click 6.0 this includes support for unicode
- output on the Windows console.
-
- In addition to that, if `colorama`_ is installed, the echo function will
- also support clever handling of ANSI codes. Essentially it will then
- do the following:
-
- - add transparent handling of ANSI color codes on Windows.
- - hide ANSI codes automatically if the destination file is not a
- terminal.
-
- .. _colorama: http://pypi.python.org/pypi/colorama
-
- .. versionchanged:: 6.0
- As of Click 6.0 the echo function will properly support unicode
- output on the windows console. Not that click does not modify
- the interpreter in any way which means that `sys.stdout` or the
- print statement or function will still not provide unicode support.
-
- .. versionchanged:: 2.0
- Starting with version 2.0 of Click, the echo function will work
- with colorama if it's installed.
-
- .. versionadded:: 3.0
- The `err` parameter was added.
-
- .. versionchanged:: 4.0
- Added the `color` flag.
-
- :param message: the message to print
- :param file: the file to write to (defaults to ``stdout``)
- :param err: if set to true the file defaults to ``stderr`` instead of
- ``stdout``. This is faster and easier than calling
- :func:`get_text_stderr` yourself.
- :param nl: if set to `True` (the default) a newline is printed afterwards.
- :param color: controls if the terminal supports ANSI colors or not. The
- default is autodetection.
- """
- if file is None:
- if err:
- file = _default_text_stderr()
- else:
- file = _default_text_stdout()
-
- # Convert non bytes/text into the native string type.
- if message is not None and not isinstance(message, echo_native_types):
- message = text_type(message)
-
- if nl:
- message = message or u''
- if isinstance(message, text_type):
- message += u'\n'
- else:
- message += b'\n'
-
- # If there is a message, and we're in Python 3, and the value looks
- # like bytes, we manually need to find the binary stream and write the
- # message in there. This is done separately so that most stream
- # types will work as you would expect. Eg: you can write to StringIO
- # for other cases.
- if message and not PY2 and is_bytes(message):
- binary_file = _find_binary_writer(file)
- if binary_file is not None:
- file.flush()
- binary_file.write(message)
- binary_file.flush()
- return
-
- # ANSI-style support. If there is no message or we are dealing with
- # bytes nothing is happening. If we are connected to a file we want
- # to strip colors. If we are on windows we either wrap the stream
- # to strip the color or we use the colorama support to translate the
- # ansi codes to API calls.
- if message and not is_bytes(message):
- color = resolve_color_default(color)
- if should_strip_ansi(file, color):
- message = strip_ansi(message)
- elif WIN:
- if auto_wrap_for_ansi is not None:
- file = auto_wrap_for_ansi(file)
- elif not color:
- message = strip_ansi(message)
-
- if message:
- file.write(message)
- file.flush()
-
-
-def get_binary_stream(name):
- """Returns a system stream for byte processing. This essentially
- returns the stream from the sys module with the given name but it
- solves some compatibility issues between different Python versions.
- Primarily this function is necessary for getting binary streams on
- Python 3.
-
- :param name: the name of the stream to open. Valid names are ``'stdin'``,
- ``'stdout'`` and ``'stderr'``
- """
- opener = binary_streams.get(name)
- if opener is None:
- raise TypeError('Unknown standard stream %r' % name)
- return opener()
-
-
-def get_text_stream(name, encoding=None, errors='strict'):
- """Returns a system stream for text processing. This usually returns
- a wrapped stream around a binary stream returned from
- :func:`get_binary_stream` but it also can take shortcuts on Python 3
- for already correctly configured streams.
-
- :param name: the name of the stream to open. Valid names are ``'stdin'``,
- ``'stdout'`` and ``'stderr'``
- :param encoding: overrides the detected default encoding.
- :param errors: overrides the default error mode.
- """
- opener = text_streams.get(name)
- if opener is None:
- raise TypeError('Unknown standard stream %r' % name)
- return opener(encoding, errors)
-
-
-def open_file(filename, mode='r', encoding=None, errors='strict',
- lazy=False, atomic=False):
- """This is similar to how the :class:`File` works but for manual
- usage. Files are opened non lazy by default. This can open regular
- files as well as stdin/stdout if ``'-'`` is passed.
-
- If stdin/stdout is returned the stream is wrapped so that the context
- manager will not close the stream accidentally. This makes it possible
- to always use the function like this without having to worry to
- accidentally close a standard stream::
-
- with open_file(filename) as f:
- ...
-
- .. versionadded:: 3.0
-
- :param filename: the name of the file to open (or ``'-'`` for stdin/stdout).
- :param mode: the mode in which to open the file.
- :param encoding: the encoding to use.
- :param errors: the error handling for this file.
- :param lazy: can be flipped to true to open the file lazily.
- :param atomic: in atomic mode writes go into a temporary file and it's
- moved on close.
- """
- if lazy:
- return LazyFile(filename, mode, encoding, errors, atomic=atomic)
- f, should_close = open_stream(filename, mode, encoding, errors,
- atomic=atomic)
- if not should_close:
- f = KeepOpenFile(f)
- return f
-
-
-def get_os_args():
- """This returns the argument part of sys.argv in the most appropriate
- form for processing. What this means is that this return value is in
- a format that works for Click to process but does not necessarily
- correspond well to what's actually standard for the interpreter.
-
- On most environments the return value is ``sys.argv[:1]`` unchanged.
- However if you are on Windows and running Python 2 the return value
- will actually be a list of unicode strings instead because the
- default behavior on that platform otherwise will not be able to
- carry all possible values that sys.argv can have.
-
- .. versionadded:: 6.0
- """
- # We can only extract the unicode argv if sys.argv has not been
- # changed since the startup of the application.
- if PY2 and WIN and _initial_argv_hash == _hash_py_argv():
- return _get_windows_argv()
- return sys.argv[1:]
-
-
-def format_filename(filename, shorten=False):
- """Formats a filename for user display. The main purpose of this
- function is to ensure that the filename can be displayed at all. This
- will decode the filename to unicode if necessary in a way that it will
- not fail. Optionally, it can shorten the filename to not include the
- full path to the filename.
-
- :param filename: formats a filename for UI display. This will also convert
- the filename into unicode without failing.
- :param shorten: this optionally shortens the filename to strip of the
- path that leads up to it.
- """
- if shorten:
- filename = os.path.basename(filename)
- return filename_to_ui(filename)
-
-
-def get_app_dir(app_name, roaming=True, force_posix=False):
- r"""Returns the config folder for the application. The default behavior
- is to return whatever is most appropriate for the operating system.
-
- To give you an idea, for an app called ``"Foo Bar"``, something like
- the following folders could be returned:
-
- Mac OS X:
- ``~/Library/Application Support/Foo Bar``
- Mac OS X (POSIX):
- ``~/.foo-bar``
- Unix:
- ``~/.config/foo-bar``
- Unix (POSIX):
- ``~/.foo-bar``
- Win XP (roaming):
- ``C:\Documents and Settings\\Local Settings\Application Data\Foo Bar``
- Win XP (not roaming):
- ``C:\Documents and Settings\\Application Data\Foo Bar``
- Win 7 (roaming):
- ``C:\Users\\AppData\Roaming\Foo Bar``
- Win 7 (not roaming):
- ``C:\Users\\AppData\Local\Foo Bar``
-
- .. versionadded:: 2.0
-
- :param app_name: the application name. This should be properly capitalized
- and can contain whitespace.
- :param roaming: controls if the folder should be roaming or not on Windows.
- Has no affect otherwise.
- :param force_posix: if this is set to `True` then on any POSIX system the
- folder will be stored in the home folder with a leading
- dot instead of the XDG config home or darwin's
- application support folder.
- """
- if WIN:
- key = roaming and 'APPDATA' or 'LOCALAPPDATA'
- folder = os.environ.get(key)
- if folder is None:
- folder = os.path.expanduser('~')
- return os.path.join(folder, app_name)
- if force_posix:
- return os.path.join(os.path.expanduser('~/.' + _posixify(app_name)))
- if sys.platform == 'darwin':
- return os.path.join(os.path.expanduser(
- '~/Library/Application Support'), app_name)
- return os.path.join(
- os.environ.get('XDG_CONFIG_HOME', os.path.expanduser('~/.config')),
- _posixify(app_name))
diff --git a/venv/Lib/site-packages/dateutil/__init__.py b/venv/Lib/site-packages/dateutil/__init__.py
deleted file mode 100644
index 796ef3d..0000000
--- a/venv/Lib/site-packages/dateutil/__init__.py
+++ /dev/null
@@ -1,2 +0,0 @@
-# -*- coding: utf-8 -*-
-from ._version import VERSION as __version__
diff --git a/venv/Lib/site-packages/dateutil/_common.py b/venv/Lib/site-packages/dateutil/_common.py
deleted file mode 100644
index e8b4af7..0000000
--- a/venv/Lib/site-packages/dateutil/_common.py
+++ /dev/null
@@ -1,34 +0,0 @@
-"""
-Common code used in multiple modules.
-"""
-
-
-class weekday(object):
- __slots__ = ["weekday", "n"]
-
- def __init__(self, weekday, n=None):
- self.weekday = weekday
- self.n = n
-
- def __call__(self, n):
- if n == self.n:
- return self
- else:
- return self.__class__(self.weekday, n)
-
- def __eq__(self, other):
- try:
- if self.weekday != other.weekday or self.n != other.n:
- return False
- except AttributeError:
- return False
- return True
-
- __hash__ = None
-
- def __repr__(self):
- s = ("MO", "TU", "WE", "TH", "FR", "SA", "SU")[self.weekday]
- if not self.n:
- return s
- else:
- return "%s(%+d)" % (s, self.n)
diff --git a/venv/Lib/site-packages/dateutil/_version.py b/venv/Lib/site-packages/dateutil/_version.py
deleted file mode 100644
index c1a0357..0000000
--- a/venv/Lib/site-packages/dateutil/_version.py
+++ /dev/null
@@ -1,10 +0,0 @@
-"""
-Contains information about the dateutil version.
-"""
-
-VERSION_MAJOR = 2
-VERSION_MINOR = 6
-VERSION_PATCH = 1
-
-VERSION_TUPLE = (VERSION_MAJOR, VERSION_MINOR, VERSION_PATCH)
-VERSION = '.'.join(map(str, VERSION_TUPLE))
diff --git a/venv/Lib/site-packages/dateutil/easter.py b/venv/Lib/site-packages/dateutil/easter.py
deleted file mode 100644
index e4def97..0000000
--- a/venv/Lib/site-packages/dateutil/easter.py
+++ /dev/null
@@ -1,89 +0,0 @@
-# -*- coding: utf-8 -*-
-"""
-This module offers a generic easter computing method for any given year, using
-Western, Orthodox or Julian algorithms.
-"""
-
-import datetime
-
-__all__ = ["easter", "EASTER_JULIAN", "EASTER_ORTHODOX", "EASTER_WESTERN"]
-
-EASTER_JULIAN = 1
-EASTER_ORTHODOX = 2
-EASTER_WESTERN = 3
-
-
-def easter(year, method=EASTER_WESTERN):
- """
- This method was ported from the work done by GM Arts,
- on top of the algorithm by Claus Tondering, which was
- based in part on the algorithm of Ouding (1940), as
- quoted in "Explanatory Supplement to the Astronomical
- Almanac", P. Kenneth Seidelmann, editor.
-
- This algorithm implements three different easter
- calculation methods:
-
- 1 - Original calculation in Julian calendar, valid in
- dates after 326 AD
- 2 - Original method, with date converted to Gregorian
- calendar, valid in years 1583 to 4099
- 3 - Revised method, in Gregorian calendar, valid in
- years 1583 to 4099 as well
-
- These methods are represented by the constants:
-
- * ``EASTER_JULIAN = 1``
- * ``EASTER_ORTHODOX = 2``
- * ``EASTER_WESTERN = 3``
-
- The default method is method 3.
-
- More about the algorithm may be found at:
-
- http://users.chariot.net.au/~gmarts/eastalg.htm
-
- and
-
- http://www.tondering.dk/claus/calendar.html
-
- """
-
- if not (1 <= method <= 3):
- raise ValueError("invalid method")
-
- # g - Golden year - 1
- # c - Century
- # h - (23 - Epact) mod 30
- # i - Number of days from March 21 to Paschal Full Moon
- # j - Weekday for PFM (0=Sunday, etc)
- # p - Number of days from March 21 to Sunday on or before PFM
- # (-6 to 28 methods 1 & 3, to 56 for method 2)
- # e - Extra days to add for method 2 (converting Julian
- # date to Gregorian date)
-
- y = year
- g = y % 19
- e = 0
- if method < 3:
- # Old method
- i = (19*g + 15) % 30
- j = (y + y//4 + i) % 7
- if method == 2:
- # Extra dates to convert Julian to Gregorian date
- e = 10
- if y > 1600:
- e = e + y//100 - 16 - (y//100 - 16)//4
- else:
- # New method
- c = y//100
- h = (c - c//4 - (8*c + 13)//25 + 19*g + 15) % 30
- i = h - (h//28)*(1 - (h//28)*(29//(h + 1))*((21 - g)//11))
- j = (y + y//4 + i + 2 - c + c//4) % 7
-
- # p can be from -6 to 56 corresponding to dates 22 March to 23 May
- # (later dates apply to method 2, although 23 May never actually occurs)
- p = i - j + e
- d = 1 + (p + 27 + (p + 6)//40) % 31
- m = 3 + (p + 26)//30
- return datetime.date(int(y), int(m), int(d))
diff --git a/venv/Lib/site-packages/dateutil/parser.py b/venv/Lib/site-packages/dateutil/parser.py
deleted file mode 100644
index 595331f..0000000
--- a/venv/Lib/site-packages/dateutil/parser.py
+++ /dev/null
@@ -1,1374 +0,0 @@
-# -*- coding: utf-8 -*-
-"""
-This module offers a generic date/time string parser which is able to parse
-most known formats to represent a date and/or time.
-
-This module attempts to be forgiving with regards to unlikely input formats,
-returning a datetime object even for dates which are ambiguous. If an element
-of a date/time stamp is omitted, the following rules are applied:
-- If AM or PM is left unspecified, a 24-hour clock is assumed, however, an hour
- on a 12-hour clock (``0 <= hour <= 12``) *must* be specified if AM or PM is
- specified.
-- If a time zone is omitted, a timezone-naive datetime is returned.
-
-If any other elements are missing, they are taken from the
-:class:`datetime.datetime` object passed to the parameter ``default``. If this
-results in a day number exceeding the valid number of days per month, the
-value falls back to the end of the month.
-
-Additional resources about date/time string formats can be found below:
-
-- `A summary of the international standard date and time notation
- `_
-- `W3C Date and Time Formats `_
-- `Time Formats (Planetary Rings Node) `_
-- `CPAN ParseDate module
- `_
-- `Java SimpleDateFormat Class
- `_
-"""
-from __future__ import unicode_literals
-
-import datetime
-import string
-import time
-import collections
-import re
-from io import StringIO
-from calendar import monthrange
-
-from six import text_type, binary_type, integer_types
-
-from . import relativedelta
-from . import tz
-
-__all__ = ["parse", "parserinfo"]
-
-
-class _timelex(object):
- # Fractional seconds are sometimes split by a comma
- _split_decimal = re.compile("([.,])")
-
- def __init__(self, instream):
- if isinstance(instream, binary_type):
- instream = instream.decode()
-
- if isinstance(instream, text_type):
- instream = StringIO(instream)
-
- if getattr(instream, 'read', None) is None:
- raise TypeError('Parser must be a string or character stream, not '
- '{itype}'.format(itype=instream.__class__.__name__))
-
- self.instream = instream
- self.charstack = []
- self.tokenstack = []
- self.eof = False
-
- def get_token(self):
- """
- This function breaks the time string into lexical units (tokens), which
- can be parsed by the parser. Lexical units are demarcated by changes in
- the character set, so any continuous string of letters is considered
- one unit, any continuous string of numbers is considered one unit.
-
- The main complication arises from the fact that dots ('.') can be used
- both as separators (e.g. "Sep.20.2009") or decimal points (e.g.
- "4:30:21.447"). As such, it is necessary to read the full context of
- any dot-separated strings before breaking it into tokens; as such, this
- function maintains a "token stack", for when the ambiguous context
- demands that multiple tokens be parsed at once.
- """
- if self.tokenstack:
- return self.tokenstack.pop(0)
-
- seenletters = False
- token = None
- state = None
-
- while not self.eof:
- # We only realize that we've reached the end of a token when we
- # find a character that's not part of the current token - since
- # that character may be part of the next token, it's stored in the
- # charstack.
- if self.charstack:
- nextchar = self.charstack.pop(0)
- else:
- nextchar = self.instream.read(1)
- while nextchar == '\x00':
- nextchar = self.instream.read(1)
-
- if not nextchar:
- self.eof = True
- break
- elif not state:
- # First character of the token - determines if we're starting
- # to parse a word, a number or something else.
- token = nextchar
- if self.isword(nextchar):
- state = 'a'
- elif self.isnum(nextchar):
- state = '0'
- elif self.isspace(nextchar):
- token = ' '
- break # emit token
- else:
- break # emit token
- elif state == 'a':
- # If we've already started reading a word, we keep reading
- # letters until we find something that's not part of a word.
- seenletters = True
- if self.isword(nextchar):
- token += nextchar
- elif nextchar == '.':
- token += nextchar
- state = 'a.'
- else:
- self.charstack.append(nextchar)
- break # emit token
- elif state == '0':
- # If we've already started reading a number, we keep reading
- # numbers until we find something that doesn't fit.
- if self.isnum(nextchar):
- token += nextchar
- elif nextchar == '.' or (nextchar == ',' and len(token) >= 2):
- token += nextchar
- state = '0.'
- else:
- self.charstack.append(nextchar)
- break # emit token
- elif state == 'a.':
- # If we've seen some letters and a dot separator, continue
- # parsing, and the tokens will be broken up later.
- seenletters = True
- if nextchar == '.' or self.isword(nextchar):
- token += nextchar
- elif self.isnum(nextchar) and token[-1] == '.':
- token += nextchar
- state = '0.'
- else:
- self.charstack.append(nextchar)
- break # emit token
- elif state == '0.':
- # If we've seen at least one dot separator, keep going, we'll
- # break up the tokens later.
- if nextchar == '.' or self.isnum(nextchar):
- token += nextchar
- elif self.isword(nextchar) and token[-1] == '.':
- token += nextchar
- state = 'a.'
- else:
- self.charstack.append(nextchar)
- break # emit token
-
- if (state in ('a.', '0.') and (seenletters or token.count('.') > 1 or
- token[-1] in '.,')):
- l = self._split_decimal.split(token)
- token = l[0]
- for tok in l[1:]:
- if tok:
- self.tokenstack.append(tok)
-
- if state == '0.' and token.count('.') == 0:
- token = token.replace(',', '.')
-
- return token
-
- def __iter__(self):
- return self
-
- def __next__(self):
- token = self.get_token()
- if token is None:
- raise StopIteration
-
- return token
-
- def next(self):
- return self.__next__() # Python 2.x support
-
- @classmethod
- def split(cls, s):
- return list(cls(s))
-
- @classmethod
- def isword(cls, nextchar):
- """ Whether or not the next character is part of a word """
- return nextchar.isalpha()
-
- @classmethod
- def isnum(cls, nextchar):
- """ Whether the next character is part of a number """
- return nextchar.isdigit()
-
- @classmethod
- def isspace(cls, nextchar):
- """ Whether the next character is whitespace """
- return nextchar.isspace()
-
-
-class _resultbase(object):
-
- def __init__(self):
- for attr in self.__slots__:
- setattr(self, attr, None)
-
- def _repr(self, classname):
- l = []
- for attr in self.__slots__:
- value = getattr(self, attr)
- if value is not None:
- l.append("%s=%s" % (attr, repr(value)))
- return "%s(%s)" % (classname, ", ".join(l))
-
- def __len__(self):
- return (sum(getattr(self, attr) is not None
- for attr in self.__slots__))
-
- def __repr__(self):
- return self._repr(self.__class__.__name__)
-
-
-class parserinfo(object):
- """
- Class which handles what inputs are accepted. Subclass this to customize
- the language and acceptable values for each parameter.
-
- :param dayfirst:
- Whether to interpret the first value in an ambiguous 3-integer date
- (e.g. 01/05/09) as the day (``True``) or month (``False``). If
- ``yearfirst`` is set to ``True``, this distinguishes between YDM
- and YMD. Default is ``False``.
-
- :param yearfirst:
- Whether to interpret the first value in an ambiguous 3-integer date
- (e.g. 01/05/09) as the year. If ``True``, the first number is taken
- to be the year, otherwise the last number is taken to be the year.
- Default is ``False``.
- """
-
- # m from a.m/p.m, t from ISO T separator
- JUMP = [" ", ".", ",", ";", "-", "/", "'",
- "at", "on", "and", "ad", "m", "t", "of",
- "st", "nd", "rd", "th"]
-
- WEEKDAYS = [("Mon", "Monday"),
- ("Tue", "Tuesday"),
- ("Wed", "Wednesday"),
- ("Thu", "Thursday"),
- ("Fri", "Friday"),
- ("Sat", "Saturday"),
- ("Sun", "Sunday")]
- MONTHS = [("Jan", "January"),
- ("Feb", "February"),
- ("Mar", "March"),
- ("Apr", "April"),
- ("May", "May"),
- ("Jun", "June"),
- ("Jul", "July"),
- ("Aug", "August"),
- ("Sep", "Sept", "September"),
- ("Oct", "October"),
- ("Nov", "November"),
- ("Dec", "December")]
- HMS = [("h", "hour", "hours"),
- ("m", "minute", "minutes"),
- ("s", "second", "seconds")]
- AMPM = [("am", "a"),
- ("pm", "p")]
- UTCZONE = ["UTC", "GMT", "Z"]
- PERTAIN = ["of"]
- TZOFFSET = {}
-
- def __init__(self, dayfirst=False, yearfirst=False):
- self._jump = self._convert(self.JUMP)
- self._weekdays = self._convert(self.WEEKDAYS)
- self._months = self._convert(self.MONTHS)
- self._hms = self._convert(self.HMS)
- self._ampm = self._convert(self.AMPM)
- self._utczone = self._convert(self.UTCZONE)
- self._pertain = self._convert(self.PERTAIN)
-
- self.dayfirst = dayfirst
- self.yearfirst = yearfirst
-
- self._year = time.localtime().tm_year
- self._century = self._year // 100 * 100
-
- def _convert(self, lst):
- dct = {}
- for i, v in enumerate(lst):
- if isinstance(v, tuple):
- for v in v:
- dct[v.lower()] = i
- else:
- dct[v.lower()] = i
- return dct
-
- def jump(self, name):
- return name.lower() in self._jump
-
- def weekday(self, name):
- if len(name) >= min(len(n) for n in self._weekdays.keys()):
- try:
- return self._weekdays[name.lower()]
- except KeyError:
- pass
- return None
-
- def month(self, name):
- if len(name) >= min(len(n) for n in self._months.keys()):
- try:
- return self._months[name.lower()] + 1
- except KeyError:
- pass
- return None
-
- def hms(self, name):
- try:
- return self._hms[name.lower()]
- except KeyError:
- return None
-
- def ampm(self, name):
- try:
- return self._ampm[name.lower()]
- except KeyError:
- return None
-
- def pertain(self, name):
- return name.lower() in self._pertain
-
- def utczone(self, name):
- return name.lower() in self._utczone
-
- def tzoffset(self, name):
- if name in self._utczone:
- return 0
-
- return self.TZOFFSET.get(name)
-
- def convertyear(self, year, century_specified=False):
- if year < 100 and not century_specified:
- year += self._century
- if abs(year - self._year) >= 50:
- if year < self._year:
- year += 100
- else:
- year -= 100
- return year
-
- def validate(self, res):
- # move to info
- if res.year is not None:
- res.year = self.convertyear(res.year, res.century_specified)
-
- if res.tzoffset == 0 and not res.tzname or res.tzname == 'Z':
- res.tzname = "UTC"
- res.tzoffset = 0
- elif res.tzoffset != 0 and res.tzname and self.utczone(res.tzname):
- res.tzoffset = 0
- return True
-
-
-class _ymd(list):
- def __init__(self, tzstr, *args, **kwargs):
- super(self.__class__, self).__init__(*args, **kwargs)
- self.century_specified = False
- self.tzstr = tzstr
-
- @staticmethod
- def token_could_be_year(token, year):
- try:
- return int(token) == year
- except ValueError:
- return False
-
- @staticmethod
- def find_potential_year_tokens(year, tokens):
- return [token for token in tokens if _ymd.token_could_be_year(token, year)]
-
- def find_probable_year_index(self, tokens):
- """
- attempt to deduce if a pre 100 year was lost
- due to padded zeros being taken off
- """
- for index, token in enumerate(self):
- potential_year_tokens = _ymd.find_potential_year_tokens(token, tokens)
- if len(potential_year_tokens) == 1 and len(potential_year_tokens[0]) > 2:
- return index
-
- def append(self, val):
- if hasattr(val, '__len__'):
- if val.isdigit() and len(val) > 2:
- self.century_specified = True
- elif val > 100:
- self.century_specified = True
-
- super(self.__class__, self).append(int(val))
-
- def resolve_ymd(self, mstridx, yearfirst, dayfirst):
- len_ymd = len(self)
- year, month, day = (None, None, None)
-
- if len_ymd > 3:
- raise ValueError("More than three YMD values")
- elif len_ymd == 1 or (mstridx != -1 and len_ymd == 2):
- # One member, or two members with a month string
- if mstridx != -1:
- month = self[mstridx]
- del self[mstridx]
-
- if len_ymd > 1 or mstridx == -1:
- if self[0] > 31:
- year = self[0]
- else:
- day = self[0]
-
- elif len_ymd == 2:
- # Two members with numbers
- if self[0] > 31:
- # 99-01
- year, month = self
- elif self[1] > 31:
- # 01-99
- month, year = self
- elif dayfirst and self[1] <= 12:
- # 13-01
- day, month = self
- else:
- # 01-13
- month, day = self
-
- elif len_ymd == 3:
- # Three members
- if mstridx == 0:
- month, day, year = self
- elif mstridx == 1:
- if self[0] > 31 or (yearfirst and self[2] <= 31):
- # 99-Jan-01
- year, month, day = self
- else:
- # 01-Jan-01
- # Give precendence to day-first, since
- # two-digit years is usually hand-written.
- day, month, year = self
-
- elif mstridx == 2:
- # WTF!?
- if self[1] > 31:
- # 01-99-Jan
- day, year, month = self
- else:
- # 99-01-Jan
- year, day, month = self
-
- else:
- if self[0] > 31 or \
- self.find_probable_year_index(_timelex.split(self.tzstr)) == 0 or \
- (yearfirst and self[1] <= 12 and self[2] <= 31):
- # 99-01-01
- if dayfirst and self[2] <= 12:
- year, day, month = self
- else:
- year, month, day = self
- elif self[0] > 12 or (dayfirst and self[1] <= 12):
- # 13-01-01
- day, month, year = self
- else:
- # 01-13-01
- month, day, year = self
-
- return year, month, day
-
-
-class parser(object):
- def __init__(self, info=None):
- self.info = info or parserinfo()
-
- def parse(self, timestr, default=None, ignoretz=False, tzinfos=None, **kwargs):
- """
- Parse the date/time string into a :class:`datetime.datetime` object.
-
- :param timestr:
- Any date/time string using the supported formats.
-
- :param default:
- The default datetime object, if this is a datetime object and not
- ``None``, elements specified in ``timestr`` replace elements in the
- default object.
-
- :param ignoretz:
- If set ``True``, time zones in parsed strings are ignored and a
- naive :class:`datetime.datetime` object is returned.
-
- :param tzinfos:
- Additional time zone names / aliases which may be present in the
- string. This argument maps time zone names (and optionally offsets
- from those time zones) to time zones. This parameter can be a
- dictionary with timezone aliases mapping time zone names to time
- zones or a function taking two parameters (``tzname`` and
- ``tzoffset``) and returning a time zone.
-
- The timezones to which the names are mapped can be an integer
- offset from UTC in minutes or a :class:`tzinfo` object.
-
- .. doctest::
- :options: +NORMALIZE_WHITESPACE
-
- >>> from dateutil.parser import parse
- >>> from dateutil.tz import gettz
- >>> tzinfos = {"BRST": -10800, "CST": gettz("America/Chicago")}
- >>> parse("2012-01-19 17:21:00 BRST", tzinfos=tzinfos)
- datetime.datetime(2012, 1, 19, 17, 21, tzinfo=tzoffset(u'BRST', -10800))
- >>> parse("2012-01-19 17:21:00 CST", tzinfos=tzinfos)
- datetime.datetime(2012, 1, 19, 17, 21,
- tzinfo=tzfile('/usr/share/zoneinfo/America/Chicago'))
-
- This parameter is ignored if ``ignoretz`` is set.
-
- :param **kwargs:
- Keyword arguments as passed to ``_parse()``.
-
- :return:
- Returns a :class:`datetime.datetime` object or, if the
- ``fuzzy_with_tokens`` option is ``True``, returns a tuple, the
- first element being a :class:`datetime.datetime` object, the second
- a tuple containing the fuzzy tokens.
-
- :raises ValueError:
- Raised for invalid or unknown string format, if the provided
- :class:`tzinfo` is not in a valid format, or if an invalid date
- would be created.
-
- :raises TypeError:
- Raised for non-string or character stream input.
-
- :raises OverflowError:
- Raised if the parsed date exceeds the largest valid C integer on
- your system.
- """
-
- if default is None:
- default = datetime.datetime.now().replace(hour=0, minute=0,
- second=0, microsecond=0)
-
- res, skipped_tokens = self._parse(timestr, **kwargs)
-
- if res is None:
- raise ValueError("Unknown string format")
-
- if len(res) == 0:
- raise ValueError("String does not contain a date.")
-
- repl = {}
- for attr in ("year", "month", "day", "hour",
- "minute", "second", "microsecond"):
- value = getattr(res, attr)
- if value is not None:
- repl[attr] = value
-
- if 'day' not in repl:
- # If the default day exceeds the last day of the month, fall back to
- # the end of the month.
- cyear = default.year if res.year is None else res.year
- cmonth = default.month if res.month is None else res.month
- cday = default.day if res.day is None else res.day
-
- if cday > monthrange(cyear, cmonth)[1]:
- repl['day'] = monthrange(cyear, cmonth)[1]
-
- ret = default.replace(**repl)
-
- if res.weekday is not None and not res.day:
- ret = ret+relativedelta.relativedelta(weekday=res.weekday)
-
- if not ignoretz:
- if (isinstance(tzinfos, collections.Callable) or
- tzinfos and res.tzname in tzinfos):
-
- if isinstance(tzinfos, collections.Callable):
- tzdata = tzinfos(res.tzname, res.tzoffset)
- else:
- tzdata = tzinfos.get(res.tzname)
-
- if isinstance(tzdata, datetime.tzinfo):
- tzinfo = tzdata
- elif isinstance(tzdata, text_type):
- tzinfo = tz.tzstr(tzdata)
- elif isinstance(tzdata, integer_types):
- tzinfo = tz.tzoffset(res.tzname, tzdata)
- else:
- raise ValueError("Offset must be tzinfo subclass, "
- "tz string, or int offset.")
- ret = ret.replace(tzinfo=tzinfo)
- elif res.tzname and res.tzname in time.tzname:
- ret = ret.replace(tzinfo=tz.tzlocal())
- elif res.tzoffset == 0:
- ret = ret.replace(tzinfo=tz.tzutc())
- elif res.tzoffset:
- ret = ret.replace(tzinfo=tz.tzoffset(res.tzname, res.tzoffset))
-
- if kwargs.get('fuzzy_with_tokens', False):
- return ret, skipped_tokens
- else:
- return ret
-
- class _result(_resultbase):
- __slots__ = ["year", "month", "day", "weekday",
- "hour", "minute", "second", "microsecond",
- "tzname", "tzoffset", "ampm"]
-
- def _parse(self, timestr, dayfirst=None, yearfirst=None, fuzzy=False,
- fuzzy_with_tokens=False):
- """
- Private method which performs the heavy lifting of parsing, called from
- ``parse()``, which passes on its ``kwargs`` to this function.
-
- :param timestr:
- The string to parse.
-
- :param dayfirst:
- Whether to interpret the first value in an ambiguous 3-integer date
- (e.g. 01/05/09) as the day (``True``) or month (``False``). If
- ``yearfirst`` is set to ``True``, this distinguishes between YDM
- and YMD. If set to ``None``, this value is retrieved from the
- current :class:`parserinfo` object (which itself defaults to
- ``False``).
-
- :param yearfirst:
- Whether to interpret the first value in an ambiguous 3-integer date
- (e.g. 01/05/09) as the year. If ``True``, the first number is taken
- to be the year, otherwise the last number is taken to be the year.
- If this is set to ``None``, the value is retrieved from the current
- :class:`parserinfo` object (which itself defaults to ``False``).
-
- :param fuzzy:
- Whether to allow fuzzy parsing, allowing for string like "Today is
- January 1, 2047 at 8:21:00AM".
-
- :param fuzzy_with_tokens:
- If ``True``, ``fuzzy`` is automatically set to True, and the parser
- will return a tuple where the first element is the parsed
- :class:`datetime.datetime` datetimestamp and the second element is
- a tuple containing the portions of the string which were ignored:
-
- .. doctest::
-
- >>> from dateutil.parser import parse
- >>> parse("Today is January 1, 2047 at 8:21:00AM", fuzzy_with_tokens=True)
- (datetime.datetime(2047, 1, 1, 8, 21), (u'Today is ', u' ', u'at '))
-
- """
- if fuzzy_with_tokens:
- fuzzy = True
-
- info = self.info
-
- if dayfirst is None:
- dayfirst = info.dayfirst
-
- if yearfirst is None:
- yearfirst = info.yearfirst
-
- res = self._result()
- l = _timelex.split(timestr) # Splits the timestr into tokens
-
- # keep up with the last token skipped so we can recombine
- # consecutively skipped tokens (-2 for when i begins at 0).
- last_skipped_token_i = -2
- skipped_tokens = list()
-
- try:
- # year/month/day list
- ymd = _ymd(timestr)
-
- # Index of the month string in ymd
- mstridx = -1
-
- len_l = len(l)
- i = 0
- while i < len_l:
-
- # Check if it's a number
- try:
- value_repr = l[i]
- value = float(value_repr)
- except ValueError:
- value = None
-
- if value is not None:
- # Token is a number
- len_li = len(l[i])
- i += 1
-
- if (len(ymd) == 3 and len_li in (2, 4)
- and res.hour is None and (i >= len_l or (l[i] != ':' and
- info.hms(l[i]) is None))):
- # 19990101T23[59]
- s = l[i-1]
- res.hour = int(s[:2])
-
- if len_li == 4:
- res.minute = int(s[2:])
-
- elif len_li == 6 or (len_li > 6 and l[i-1].find('.') == 6):
- # YYMMDD or HHMMSS[.ss]
- s = l[i-1]
-
- if not ymd and l[i-1].find('.') == -1:
- #ymd.append(info.convertyear(int(s[:2])))
-
- ymd.append(s[:2])
- ymd.append(s[2:4])
- ymd.append(s[4:])
- else:
- # 19990101T235959[.59]
- res.hour = int(s[:2])
- res.minute = int(s[2:4])
- res.second, res.microsecond = _parsems(s[4:])
-
- elif len_li in (8, 12, 14):
- # YYYYMMDD
- s = l[i-1]
- ymd.append(s[:4])
- ymd.append(s[4:6])
- ymd.append(s[6:8])
-
- if len_li > 8:
- res.hour = int(s[8:10])
- res.minute = int(s[10:12])
-
- if len_li > 12:
- res.second = int(s[12:])
-
- elif ((i < len_l and info.hms(l[i]) is not None) or
- (i+1 < len_l and l[i] == ' ' and
- info.hms(l[i+1]) is not None)):
-
- # HH[ ]h or MM[ ]m or SS[.ss][ ]s
- if l[i] == ' ':
- i += 1
-
- idx = info.hms(l[i])
-
- while True:
- if idx == 0:
- res.hour = int(value)
-
- if value % 1:
- res.minute = int(60*(value % 1))
-
- elif idx == 1:
- res.minute = int(value)
-
- if value % 1:
- res.second = int(60*(value % 1))
-
- elif idx == 2:
- res.second, res.microsecond = \
- _parsems(value_repr)
-
- i += 1
-
- if i >= len_l or idx == 2:
- break
-
- # 12h00
- try:
- value_repr = l[i]
- value = float(value_repr)
- except ValueError:
- break
- else:
- i += 1
- idx += 1
-
- if i < len_l:
- newidx = info.hms(l[i])
-
- if newidx is not None:
- idx = newidx
-
- elif (i == len_l and l[i-2] == ' ' and
- info.hms(l[i-3]) is not None):
- # X h MM or X m SS
- idx = info.hms(l[i-3])
-
- if idx == 0: # h
- res.minute = int(value)
-
- sec_remainder = value % 1
- if sec_remainder:
- res.second = int(60 * sec_remainder)
- elif idx == 1: # m
- res.second, res.microsecond = \
- _parsems(value_repr)
-
- # We don't need to advance the tokens here because the
- # i == len_l call indicates that we're looking at all
- # the tokens already.
-
- elif i+1 < len_l and l[i] == ':':
- # HH:MM[:SS[.ss]]
- res.hour = int(value)
- i += 1
- value = float(l[i])
- res.minute = int(value)
-
- if value % 1:
- res.second = int(60*(value % 1))
-
- i += 1
-
- if i < len_l and l[i] == ':':
- res.second, res.microsecond = _parsems(l[i+1])
- i += 2
-
- elif i < len_l and l[i] in ('-', '/', '.'):
- sep = l[i]
- ymd.append(value_repr)
- i += 1
-
- if i < len_l and not info.jump(l[i]):
- try:
- # 01-01[-01]
- ymd.append(l[i])
- except ValueError:
- # 01-Jan[-01]
- value = info.month(l[i])
-
- if value is not None:
- ymd.append(value)
- assert mstridx == -1
- mstridx = len(ymd)-1
- else:
- return None, None
-
- i += 1
-
- if i < len_l and l[i] == sep:
- # We have three members
- i += 1
- value = info.month(l[i])
-
- if value is not None:
- ymd.append(value)
- mstridx = len(ymd)-1
- assert mstridx == -1
- else:
- ymd.append(l[i])
-
- i += 1
- elif i >= len_l or info.jump(l[i]):
- if i+1 < len_l and info.ampm(l[i+1]) is not None:
- # 12 am
- res.hour = int(value)
-
- if res.hour < 12 and info.ampm(l[i+1]) == 1:
- res.hour += 12
- elif res.hour == 12 and info.ampm(l[i+1]) == 0:
- res.hour = 0
-
- i += 1
- else:
- # Year, month or day
- ymd.append(value)
- i += 1
- elif info.ampm(l[i]) is not None:
-
- # 12am
- res.hour = int(value)
-
- if res.hour < 12 and info.ampm(l[i]) == 1:
- res.hour += 12
- elif res.hour == 12 and info.ampm(l[i]) == 0:
- res.hour = 0
- i += 1
-
- elif not fuzzy:
- return None, None
- else:
- i += 1
- continue
-
- # Check weekday
- value = info.weekday(l[i])
- if value is not None:
- res.weekday = value
- i += 1
- continue
-
- # Check month name
- value = info.month(l[i])
- if value is not None:
- ymd.append(value)
- assert mstridx == -1
- mstridx = len(ymd)-1
-
- i += 1
- if i < len_l:
- if l[i] in ('-', '/'):
- # Jan-01[-99]
- sep = l[i]
- i += 1
- ymd.append(l[i])
- i += 1
-
- if i < len_l and l[i] == sep:
- # Jan-01-99
- i += 1
- ymd.append(l[i])
- i += 1
-
- elif (i+3 < len_l and l[i] == l[i+2] == ' '
- and info.pertain(l[i+1])):
- # Jan of 01
- # In this case, 01 is clearly year
- try:
- value = int(l[i+3])
- except ValueError:
- # Wrong guess
- pass
- else:
- # Convert it here to become unambiguous
- ymd.append(str(info.convertyear(value)))
- i += 4
- continue
-
- # Check am/pm
- value = info.ampm(l[i])
- if value is not None:
- # For fuzzy parsing, 'a' or 'am' (both valid English words)
- # may erroneously trigger the AM/PM flag. Deal with that
- # here.
- val_is_ampm = True
-
- # If there's already an AM/PM flag, this one isn't one.
- if fuzzy and res.ampm is not None:
- val_is_ampm = False
-
- # If AM/PM is found and hour is not, raise a ValueError
- if res.hour is None:
- if fuzzy:
- val_is_ampm = False
- else:
- raise ValueError('No hour specified with ' +
- 'AM or PM flag.')
- elif not 0 <= res.hour <= 12:
- # If AM/PM is found, it's a 12 hour clock, so raise
- # an error for invalid range
- if fuzzy:
- val_is_ampm = False
- else:
- raise ValueError('Invalid hour specified for ' +
- '12-hour clock.')
-
- if val_is_ampm:
- if value == 1 and res.hour < 12:
- res.hour += 12
- elif value == 0 and res.hour == 12:
- res.hour = 0
-
- res.ampm = value
-
- elif fuzzy:
- last_skipped_token_i = self._skip_token(skipped_tokens,
- last_skipped_token_i, i, l)
- i += 1
- continue
-
- # Check for a timezone name
- if (res.hour is not None and len(l[i]) <= 5 and
- res.tzname is None and res.tzoffset is None and
- not [x for x in l[i] if x not in
- string.ascii_uppercase]):
- res.tzname = l[i]
- res.tzoffset = info.tzoffset(res.tzname)
- i += 1
-
- # Check for something like GMT+3, or BRST+3. Notice
- # that it doesn't mean "I am 3 hours after GMT", but
- # "my time +3 is GMT". If found, we reverse the
- # logic so that timezone parsing code will get it
- # right.
- if i < len_l and l[i] in ('+', '-'):
- l[i] = ('+', '-')[l[i] == '+']
- res.tzoffset = None
- if info.utczone(res.tzname):
- # With something like GMT+3, the timezone
- # is *not* GMT.
- res.tzname = None
-
- continue
-
- # Check for a numbered timezone
- if res.hour is not None and l[i] in ('+', '-'):
- signal = (-1, 1)[l[i] == '+']
- i += 1
- len_li = len(l[i])
-
- if len_li == 4:
- # -0300
- res.tzoffset = int(l[i][:2])*3600+int(l[i][2:])*60
- elif i+1 < len_l and l[i+1] == ':':
- # -03:00
- res.tzoffset = int(l[i])*3600+int(l[i+2])*60
- i += 2
- elif len_li <= 2:
- # -[0]3
- res.tzoffset = int(l[i][:2])*3600
- else:
- return None, None
- i += 1
-
- res.tzoffset *= signal
-
- # Look for a timezone name between parenthesis
- if (i+3 < len_l and
- info.jump(l[i]) and l[i+1] == '(' and l[i+3] == ')' and
- 3 <= len(l[i+2]) <= 5 and
- not [x for x in l[i+2]
- if x not in string.ascii_uppercase]):
- # -0300 (BRST)
- res.tzname = l[i+2]
- i += 4
- continue
-
- # Check jumps
- if not (info.jump(l[i]) or fuzzy):
- return None, None
-
- last_skipped_token_i = self._skip_token(skipped_tokens,
- last_skipped_token_i, i, l)
- i += 1
-
- # Process year/month/day
- year, month, day = ymd.resolve_ymd(mstridx, yearfirst, dayfirst)
- if year is not None:
- res.year = year
- res.century_specified = ymd.century_specified
-
- if month is not None:
- res.month = month
-
- if day is not None:
- res.day = day
-
- except (IndexError, ValueError, AssertionError):
- return None, None
-
- if not info.validate(res):
- return None, None
-
- if fuzzy_with_tokens:
- return res, tuple(skipped_tokens)
- else:
- return res, None
-
- @staticmethod
- def _skip_token(skipped_tokens, last_skipped_token_i, i, l):
- if last_skipped_token_i == i - 1:
- # recombine the tokens
- skipped_tokens[-1] += l[i]
- else:
- # just append
- skipped_tokens.append(l[i])
- last_skipped_token_i = i
- return last_skipped_token_i
-
-
-DEFAULTPARSER = parser()
-
-
-def parse(timestr, parserinfo=None, **kwargs):
- """
-
- Parse a string in one of the supported formats, using the
- ``parserinfo`` parameters.
-
- :param timestr:
- A string containing a date/time stamp.
-
- :param parserinfo:
- A :class:`parserinfo` object containing parameters for the parser.
- If ``None``, the default arguments to the :class:`parserinfo`
- constructor are used.
-
- The ``**kwargs`` parameter takes the following keyword arguments:
-
- :param default:
- The default datetime object, if this is a datetime object and not
- ``None``, elements specified in ``timestr`` replace elements in the
- default object.
-
- :param ignoretz:
- If set ``True``, time zones in parsed strings are ignored and a naive
- :class:`datetime` object is returned.
-
- :param tzinfos:
- Additional time zone names / aliases which may be present in the
- string. This argument maps time zone names (and optionally offsets
- from those time zones) to time zones. This parameter can be a
- dictionary with timezone aliases mapping time zone names to time
- zones or a function taking two parameters (``tzname`` and
- ``tzoffset``) and returning a time zone.
-
- The timezones to which the names are mapped can be an integer
- offset from UTC in minutes or a :class:`tzinfo` object.
-
- .. doctest::
- :options: +NORMALIZE_WHITESPACE
-
- >>> from dateutil.parser import parse
- >>> from dateutil.tz import gettz
- >>> tzinfos = {"BRST": -10800, "CST": gettz("America/Chicago")}
- >>> parse("2012-01-19 17:21:00 BRST", tzinfos=tzinfos)
- datetime.datetime(2012, 1, 19, 17, 21, tzinfo=tzoffset(u'BRST', -10800))
- >>> parse("2012-01-19 17:21:00 CST", tzinfos=tzinfos)
- datetime.datetime(2012, 1, 19, 17, 21,
- tzinfo=tzfile('/usr/share/zoneinfo/America/Chicago'))
-
- This parameter is ignored if ``ignoretz`` is set.
-
- :param dayfirst:
- Whether to interpret the first value in an ambiguous 3-integer date
- (e.g. 01/05/09) as the day (``True``) or month (``False``). If
- ``yearfirst`` is set to ``True``, this distinguishes between YDM and
- YMD. If set to ``None``, this value is retrieved from the current
- :class:`parserinfo` object (which itself defaults to ``False``).
-
- :param yearfirst:
- Whether to interpret the first value in an ambiguous 3-integer date
- (e.g. 01/05/09) as the year. If ``True``, the first number is taken to
- be the year, otherwise the last number is taken to be the year. If
- this is set to ``None``, the value is retrieved from the current
- :class:`parserinfo` object (which itself defaults to ``False``).
-
- :param fuzzy:
- Whether to allow fuzzy parsing, allowing for string like "Today is
- January 1, 2047 at 8:21:00AM".
-
- :param fuzzy_with_tokens:
- If ``True``, ``fuzzy`` is automatically set to True, and the parser
- will return a tuple where the first element is the parsed
- :class:`datetime.datetime` datetimestamp and the second element is
- a tuple containing the portions of the string which were ignored:
-
- .. doctest::
-
- >>> from dateutil.parser import parse
- >>> parse("Today is January 1, 2047 at 8:21:00AM", fuzzy_with_tokens=True)
- (datetime.datetime(2047, 1, 1, 8, 21), (u'Today is ', u' ', u'at '))
-
- :return:
- Returns a :class:`datetime.datetime` object or, if the
- ``fuzzy_with_tokens`` option is ``True``, returns a tuple, the
- first element being a :class:`datetime.datetime` object, the second
- a tuple containing the fuzzy tokens.
-
- :raises ValueError:
- Raised for invalid or unknown string format, if the provided
- :class:`tzinfo` is not in a valid format, or if an invalid date
- would be created.
-
- :raises OverflowError:
- Raised if the parsed date exceeds the largest valid C integer on
- your system.
- """
- if parserinfo:
- return parser(parserinfo).parse(timestr, **kwargs)
- else:
- return DEFAULTPARSER.parse(timestr, **kwargs)
-
-
-class _tzparser(object):
-
- class _result(_resultbase):
-
- __slots__ = ["stdabbr", "stdoffset", "dstabbr", "dstoffset",
- "start", "end"]
-
- class _attr(_resultbase):
- __slots__ = ["month", "week", "weekday",
- "yday", "jyday", "day", "time"]
-
- def __repr__(self):
- return self._repr("")
-
- def __init__(self):
- _resultbase.__init__(self)
- self.start = self._attr()
- self.end = self._attr()
-
- def parse(self, tzstr):
- res = self._result()
- l = _timelex.split(tzstr)
- try:
-
- len_l = len(l)
-
- i = 0
- while i < len_l:
- # BRST+3[BRDT[+2]]
- j = i
- while j < len_l and not [x for x in l[j]
- if x in "0123456789:,-+"]:
- j += 1
- if j != i:
- if not res.stdabbr:
- offattr = "stdoffset"
- res.stdabbr = "".join(l[i:j])
- else:
- offattr = "dstoffset"
- res.dstabbr = "".join(l[i:j])
- i = j
- if (i < len_l and (l[i] in ('+', '-') or l[i][0] in
- "0123456789")):
- if l[i] in ('+', '-'):
- # Yes, that's right. See the TZ variable
- # documentation.
- signal = (1, -1)[l[i] == '+']
- i += 1
- else:
- signal = -1
- len_li = len(l[i])
- if len_li == 4:
- # -0300
- setattr(res, offattr, (int(l[i][:2])*3600 +
- int(l[i][2:])*60)*signal)
- elif i+1 < len_l and l[i+1] == ':':
- # -03:00
- setattr(res, offattr,
- (int(l[i])*3600+int(l[i+2])*60)*signal)
- i += 2
- elif len_li <= 2:
- # -[0]3
- setattr(res, offattr,
- int(l[i][:2])*3600*signal)
- else:
- return None
- i += 1
- if res.dstabbr:
- break
- else:
- break
-
- if i < len_l:
- for j in range(i, len_l):
- if l[j] == ';':
- l[j] = ','
-
- assert l[i] == ','
-
- i += 1
-
- if i >= len_l:
- pass
- elif (8 <= l.count(',') <= 9 and
- not [y for x in l[i:] if x != ','
- for y in x if y not in "0123456789"]):
- # GMT0BST,3,0,30,3600,10,0,26,7200[,3600]
- for x in (res.start, res.end):
- x.month = int(l[i])
- i += 2
- if l[i] == '-':
- value = int(l[i+1])*-1
- i += 1
- else:
- value = int(l[i])
- i += 2
- if value:
- x.week = value
- x.weekday = (int(l[i])-1) % 7
- else:
- x.day = int(l[i])
- i += 2
- x.time = int(l[i])
- i += 2
- if i < len_l:
- if l[i] in ('-', '+'):
- signal = (-1, 1)[l[i] == "+"]
- i += 1
- else:
- signal = 1
- res.dstoffset = (res.stdoffset+int(l[i]))*signal
- elif (l.count(',') == 2 and l[i:].count('/') <= 2 and
- not [y for x in l[i:] if x not in (',', '/', 'J', 'M',
- '.', '-', ':')
- for y in x if y not in "0123456789"]):
- for x in (res.start, res.end):
- if l[i] == 'J':
- # non-leap year day (1 based)
- i += 1
- x.jyday = int(l[i])
- elif l[i] == 'M':
- # month[-.]week[-.]weekday
- i += 1
- x.month = int(l[i])
- i += 1
- assert l[i] in ('-', '.')
- i += 1
- x.week = int(l[i])
- if x.week == 5:
- x.week = -1
- i += 1
- assert l[i] in ('-', '.')
- i += 1
- x.weekday = (int(l[i])-1) % 7
- else:
- # year day (zero based)
- x.yday = int(l[i])+1
-
- i += 1
-
- if i < len_l and l[i] == '/':
- i += 1
- # start time
- len_li = len(l[i])
- if len_li == 4:
- # -0300
- x.time = (int(l[i][:2])*3600+int(l[i][2:])*60)
- elif i+1 < len_l and l[i+1] == ':':
- # -03:00
- x.time = int(l[i])*3600+int(l[i+2])*60
- i += 2
- if i+1 < len_l and l[i+1] == ':':
- i += 2
- x.time += int(l[i])
- elif len_li <= 2:
- # -[0]3
- x.time = (int(l[i][:2])*3600)
- else:
- return None
- i += 1
-
- assert i == len_l or l[i] == ','
-
- i += 1
-
- assert i >= len_l
-
- except (IndexError, ValueError, AssertionError):
- return None
-
- return res
-
-
-DEFAULTTZPARSER = _tzparser()
-
-
-def _parsetz(tzstr):
- return DEFAULTTZPARSER.parse(tzstr)
-
-
-def _parsems(value):
- """Parse a I[.F] seconds value into (seconds, microseconds)."""
- if "." not in value:
- return int(value), 0
- else:
- i, f = value.split(".")
- return int(i), int(f.ljust(6, "0")[:6])
-
-
-# vim:ts=4:sw=4:et
diff --git a/venv/Lib/site-packages/dateutil/relativedelta.py b/venv/Lib/site-packages/dateutil/relativedelta.py
deleted file mode 100644
index 0e66afc..0000000
--- a/venv/Lib/site-packages/dateutil/relativedelta.py
+++ /dev/null
@@ -1,549 +0,0 @@
-# -*- coding: utf-8 -*-
-import datetime
-import calendar
-
-import operator
-from math import copysign
-
-from six import integer_types
-from warnings import warn
-
-from ._common import weekday
-
-MO, TU, WE, TH, FR, SA, SU = weekdays = tuple(weekday(x) for x in range(7))
-
-__all__ = ["relativedelta", "MO", "TU", "WE", "TH", "FR", "SA", "SU"]
-
-
-class relativedelta(object):
- """
- The relativedelta type is based on the specification of the excellent
- work done by M.-A. Lemburg in his
- `mx.DateTime `_ extension.
- However, notice that this type does *NOT* implement the same algorithm as
- his work. Do *NOT* expect it to behave like mx.DateTime's counterpart.
-
- There are two different ways to build a relativedelta instance. The
- first one is passing it two date/datetime classes::
-
- relativedelta(datetime1, datetime2)
-
- The second one is passing it any number of the following keyword arguments::
-
- relativedelta(arg1=x,arg2=y,arg3=z...)
-
- year, month, day, hour, minute, second, microsecond:
- Absolute information (argument is singular); adding or subtracting a
- relativedelta with absolute information does not perform an aritmetic
- operation, but rather REPLACES the corresponding value in the
- original datetime with the value(s) in relativedelta.
-
- years, months, weeks, days, hours, minutes, seconds, microseconds:
- Relative information, may be negative (argument is plural); adding
- or subtracting a relativedelta with relative information performs
- the corresponding aritmetic operation on the original datetime value
- with the information in the relativedelta.
-
- weekday:
- One of the weekday instances (MO, TU, etc). These instances may
- receive a parameter N, specifying the Nth weekday, which could
- be positive or negative (like MO(+1) or MO(-2). Not specifying
- it is the same as specifying +1. You can also use an integer,
- where 0=MO.
-
- leapdays:
- Will add given days to the date found, if year is a leap
- year, and the date found is post 28 of february.
-
- yearday, nlyearday:
- Set the yearday or the non-leap year day (jump leap days).
- These are converted to day/month/leapdays information.
-
- Here is the behavior of operations with relativedelta:
-
- 1. Calculate the absolute year, using the 'year' argument, or the
- original datetime year, if the argument is not present.
-
- 2. Add the relative 'years' argument to the absolute year.
-
- 3. Do steps 1 and 2 for month/months.
-
- 4. Calculate the absolute day, using the 'day' argument, or the
- original datetime day, if the argument is not present. Then,
- subtract from the day until it fits in the year and month
- found after their operations.
-
- 5. Add the relative 'days' argument to the absolute day. Notice
- that the 'weeks' argument is multiplied by 7 and added to
- 'days'.
-
- 6. Do steps 1 and 2 for hour/hours, minute/minutes, second/seconds,
- microsecond/microseconds.
-
- 7. If the 'weekday' argument is present, calculate the weekday,
- with the given (wday, nth) tuple. wday is the index of the
- weekday (0-6, 0=Mon), and nth is the number of weeks to add
- forward or backward, depending on its signal. Notice that if
- the calculated date is already Monday, for example, using
- (0, 1) or (0, -1) won't change the day.
- """
-
- def __init__(self, dt1=None, dt2=None,
- years=0, months=0, days=0, leapdays=0, weeks=0,
- hours=0, minutes=0, seconds=0, microseconds=0,
- year=None, month=None, day=None, weekday=None,
- yearday=None, nlyearday=None,
- hour=None, minute=None, second=None, microsecond=None):
-
- # Check for non-integer values in integer-only quantities
- if any(x is not None and x != int(x) for x in (years, months)):
- raise ValueError("Non-integer years and months are "
- "ambiguous and not currently supported.")
-
- if dt1 and dt2:
- # datetime is a subclass of date. So both must be date
- if not (isinstance(dt1, datetime.date) and
- isinstance(dt2, datetime.date)):
- raise TypeError("relativedelta only diffs datetime/date")
-
- # We allow two dates, or two datetimes, so we coerce them to be
- # of the same type
- if (isinstance(dt1, datetime.datetime) !=
- isinstance(dt2, datetime.datetime)):
- if not isinstance(dt1, datetime.datetime):
- dt1 = datetime.datetime.fromordinal(dt1.toordinal())
- elif not isinstance(dt2, datetime.datetime):
- dt2 = datetime.datetime.fromordinal(dt2.toordinal())
-
- self.years = 0
- self.months = 0
- self.days = 0
- self.leapdays = 0
- self.hours = 0
- self.minutes = 0
- self.seconds = 0
- self.microseconds = 0
- self.year = None
- self.month = None
- self.day = None
- self.weekday = None
- self.hour = None
- self.minute = None
- self.second = None
- self.microsecond = None
- self._has_time = 0
-
- # Get year / month delta between the two
- months = (dt1.year - dt2.year) * 12 + (dt1.month - dt2.month)
- self._set_months(months)
-
- # Remove the year/month delta so the timedelta is just well-defined
- # time units (seconds, days and microseconds)
- dtm = self.__radd__(dt2)
-
- # If we've overshot our target, make an adjustment
- if dt1 < dt2:
- compare = operator.gt
- increment = 1
- else:
- compare = operator.lt
- increment = -1
-
- while compare(dt1, dtm):
- months += increment
- self._set_months(months)
- dtm = self.__radd__(dt2)
-
- # Get the timedelta between the "months-adjusted" date and dt1
- delta = dt1 - dtm
- self.seconds = delta.seconds + delta.days * 86400
- self.microseconds = delta.microseconds
- else:
- # Relative information
- self.years = years
- self.months = months
- self.days = days + weeks * 7
- self.leapdays = leapdays
- self.hours = hours
- self.minutes = minutes
- self.seconds = seconds
- self.microseconds = microseconds
-
- # Absolute information
- self.year = year
- self.month = month
- self.day = day
- self.hour = hour
- self.minute = minute
- self.second = second
- self.microsecond = microsecond
-
- if any(x is not None and int(x) != x
- for x in (year, month, day, hour,
- minute, second, microsecond)):
- # For now we'll deprecate floats - later it'll be an error.
- warn("Non-integer value passed as absolute information. " +
- "This is not a well-defined condition and will raise " +
- "errors in future versions.", DeprecationWarning)
-
- if isinstance(weekday, integer_types):
- self.weekday = weekdays[weekday]
- else:
- self.weekday = weekday
-
- yday = 0
- if nlyearday:
- yday = nlyearday
- elif yearday:
- yday = yearday
- if yearday > 59:
- self.leapdays = -1
- if yday:
- ydayidx = [31, 59, 90, 120, 151, 181, 212,
- 243, 273, 304, 334, 366]
- for idx, ydays in enumerate(ydayidx):
- if yday <= ydays:
- self.month = idx+1
- if idx == 0:
- self.day = yday
- else:
- self.day = yday-ydayidx[idx-1]
- break
- else:
- raise ValueError("invalid year day (%d)" % yday)
-
- self._fix()
-
- def _fix(self):
- if abs(self.microseconds) > 999999:
- s = _sign(self.microseconds)
- div, mod = divmod(self.microseconds * s, 1000000)
- self.microseconds = mod * s
- self.seconds += div * s
- if abs(self.seconds) > 59:
- s = _sign(self.seconds)
- div, mod = divmod(self.seconds * s, 60)
- self.seconds = mod * s
- self.minutes += div * s
- if abs(self.minutes) > 59:
- s = _sign(self.minutes)
- div, mod = divmod(self.minutes * s, 60)
- self.minutes = mod * s
- self.hours += div * s
- if abs(self.hours) > 23:
- s = _sign(self.hours)
- div, mod = divmod(self.hours * s, 24)
- self.hours = mod * s
- self.days += div * s
- if abs(self.months) > 11:
- s = _sign(self.months)
- div, mod = divmod(self.months * s, 12)
- self.months = mod * s
- self.years += div * s
- if (self.hours or self.minutes or self.seconds or self.microseconds
- or self.hour is not None or self.minute is not None or
- self.second is not None or self.microsecond is not None):
- self._has_time = 1
- else:
- self._has_time = 0
-
- @property
- def weeks(self):
- return self.days // 7
-
- @weeks.setter
- def weeks(self, value):
- self.days = self.days - (self.weeks * 7) + value * 7
-
- def _set_months(self, months):
- self.months = months
- if abs(self.months) > 11:
- s = _sign(self.months)
- div, mod = divmod(self.months * s, 12)
- self.months = mod * s
- self.years = div * s
- else:
- self.years = 0
-
- def normalized(self):
- """
- Return a version of this object represented entirely using integer
- values for the relative attributes.
-
- >>> relativedelta(days=1.5, hours=2).normalized()
- relativedelta(days=1, hours=14)
-
- :return:
- Returns a :class:`dateutil.relativedelta.relativedelta` object.
- """
- # Cascade remainders down (rounding each to roughly nearest microsecond)
- days = int(self.days)
-
- hours_f = round(self.hours + 24 * (self.days - days), 11)
- hours = int(hours_f)
-
- minutes_f = round(self.minutes + 60 * (hours_f - hours), 10)
- minutes = int(minutes_f)
-
- seconds_f = round(self.seconds + 60 * (minutes_f - minutes), 8)
- seconds = int(seconds_f)
-
- microseconds = round(self.microseconds + 1e6 * (seconds_f - seconds))
-
- # Constructor carries overflow back up with call to _fix()
- return self.__class__(years=self.years, months=self.months,
- days=days, hours=hours, minutes=minutes,
- seconds=seconds, microseconds=microseconds,
- leapdays=self.leapdays, year=self.year,
- month=self.month, day=self.day,
- weekday=self.weekday, hour=self.hour,
- minute=self.minute, second=self.second,
- microsecond=self.microsecond)
-
- def __add__(self, other):
- if isinstance(other, relativedelta):
- return self.__class__(years=other.years + self.years,
- months=other.months + self.months,
- days=other.days + self.days,
- hours=other.hours + self.hours,
- minutes=other.minutes + self.minutes,
- seconds=other.seconds + self.seconds,
- microseconds=(other.microseconds +
- self.microseconds),
- leapdays=other.leapdays or self.leapdays,
- year=(other.year if other.year is not None
- else self.year),
- month=(other.month if other.month is not None
- else self.month),
- day=(other.day if other.day is not None
- else self.day),
- weekday=(other.weekday if other.weekday is not None
- else self.weekday),
- hour=(other.hour if other.hour is not None
- else self.hour),
- minute=(other.minute if other.minute is not None
- else self.minute),
- second=(other.second if other.second is not None
- else self.second),
- microsecond=(other.microsecond if other.microsecond
- is not None else
- self.microsecond))
- if isinstance(other, datetime.timedelta):
- return self.__class__(years=self.years,
- months=self.months,
- days=self.days + other.days,
- hours=self.hours,
- minutes=self.minutes,
- seconds=self.seconds + other.seconds,
- microseconds=self.microseconds + other.microseconds,
- leapdays=self.leapdays,
- year=self.year,
- month=self.month,
- day=self.day,
- weekday=self.weekday,
- hour=self.hour,
- minute=self.minute,
- second=self.second,
- microsecond=self.microsecond)
- if not isinstance(other, datetime.date):
- return NotImplemented
- elif self._has_time and not isinstance(other, datetime.datetime):
- other = datetime.datetime.fromordinal(other.toordinal())
- year = (self.year or other.year)+self.years
- month = self.month or other.month
- if self.months:
- assert 1 <= abs(self.months) <= 12
- month += self.months
- if month > 12:
- year += 1
- month -= 12
- elif month < 1:
- year -= 1
- month += 12
- day = min(calendar.monthrange(year, month)[1],
- self.day or other.day)
- repl = {"year": year, "month": month, "day": day}
- for attr in ["hour", "minute", "second", "microsecond"]:
- value = getattr(self, attr)
- if value is not None:
- repl[attr] = value
- days = self.days
- if self.leapdays and month > 2 and calendar.isleap(year):
- days += self.leapdays
- ret = (other.replace(**repl)
- + datetime.timedelta(days=days,
- hours=self.hours,
- minutes=self.minutes,
- seconds=self.seconds,
- microseconds=self.microseconds))
- if self.weekday:
- weekday, nth = self.weekday.weekday, self.weekday.n or 1
- jumpdays = (abs(nth) - 1) * 7
- if nth > 0:
- jumpdays += (7 - ret.weekday() + weekday) % 7
- else:
- jumpdays += (ret.weekday() - weekday) % 7
- jumpdays *= -1
- ret += datetime.timedelta(days=jumpdays)
- return ret
-
- def __radd__(self, other):
- return self.__add__(other)
-
- def __rsub__(self, other):
- return self.__neg__().__radd__(other)
-
- def __sub__(self, other):
- if not isinstance(other, relativedelta):
- return NotImplemented # In case the other object defines __rsub__
- return self.__class__(years=self.years - other.years,
- months=self.months - other.months,
- days=self.days - other.days,
- hours=self.hours - other.hours,
- minutes=self.minutes - other.minutes,
- seconds=self.seconds - other.seconds,
- microseconds=self.microseconds - other.microseconds,
- leapdays=self.leapdays or other.leapdays,
- year=(self.year if self.year is not None
- else other.year),
- month=(self.month if self.month is not None else
- other.month),
- day=(self.day if self.day is not None else
- other.day),
- weekday=(self.weekday if self.weekday is not None else
- other.weekday),
- hour=(self.hour if self.hour is not None else
- other.hour),
- minute=(self.minute if self.minute is not None else
- other.minute),
- second=(self.second if self.second is not None else
- other.second),
- microsecond=(self.microsecond if self.microsecond
- is not None else
- other.microsecond))
-
- def __neg__(self):
- return self.__class__(years=-self.years,
- months=-self.months,
- days=-self.days,
- hours=-self.hours,
- minutes=-self.minutes,
- seconds=-self.seconds,
- microseconds=-self.microseconds,
- leapdays=self.leapdays,
- year=self.year,
- month=self.month,
- day=self.day,
- weekday=self.weekday,
- hour=self.hour,
- minute=self.minute,
- second=self.second,
- microsecond=self.microsecond)
-
- def __bool__(self):
- return not (not self.years and
- not self.months and
- not self.days and
- not self.hours and
- not self.minutes and
- not self.seconds and
- not self.microseconds and
- not self.leapdays and
- self.year is None and
- self.month is None and
- self.day is None and
- self.weekday is None and
- self.hour is None and
- self.minute is None and
- self.second is None and
- self.microsecond is None)
- # Compatibility with Python 2.x
- __nonzero__ = __bool__
-
- def __mul__(self, other):
- try:
- f = float(other)
- except TypeError:
- return NotImplemented
-
- return self.__class__(years=int(self.years * f),
- months=int(self.months * f),
- days=int(self.days * f),
- hours=int(self.hours * f),
- minutes=int(self.minutes * f),
- seconds=int(self.seconds * f),
- microseconds=int(self.microseconds * f),
- leapdays=self.leapdays,
- year=self.year,
- month=self.month,
- day=self.day,
- weekday=self.weekday,
- hour=self.hour,
- minute=self.minute,
- second=self.second,
- microsecond=self.microsecond)
-
- __rmul__ = __mul__
-
- def __eq__(self, other):
- if not isinstance(other, relativedelta):
- return NotImplemented
- if self.weekday or other.weekday:
- if not self.weekday or not other.weekday:
- return False
- if self.weekday.weekday != other.weekday.weekday:
- return False
- n1, n2 = self.weekday.n, other.weekday.n
- if n1 != n2 and not ((not n1 or n1 == 1) and (not n2 or n2 == 1)):
- return False
- return (self.years == other.years and
- self.months == other.months and
- self.days == other.days and
- self.hours == other.hours and
- self.minutes == other.minutes and
- self.seconds == other.seconds and
- self.microseconds == other.microseconds and
- self.leapdays == other.leapdays and
- self.year == other.year and
- self.month == other.month and
- self.day == other.day and
- self.hour == other.hour and
- self.minute == other.minute and
- self.second == other.second and
- self.microsecond == other.microsecond)
-
- __hash__ = None
-
- def __ne__(self, other):
- return not self.__eq__(other)
-
- def __div__(self, other):
- try:
- reciprocal = 1 / float(other)
- except TypeError:
- return NotImplemented
-
- return self.__mul__(reciprocal)
-
- __truediv__ = __div__
-
- def __repr__(self):
- l = []
- for attr in ["years", "months", "days", "leapdays",
- "hours", "minutes", "seconds", "microseconds"]:
- value = getattr(self, attr)
- if value:
- l.append("{attr}={value:+g}".format(attr=attr, value=value))
- for attr in ["year", "month", "day", "weekday",
- "hour", "minute", "second", "microsecond"]:
- value = getattr(self, attr)
- if value is not None:
- l.append("{attr}={value}".format(attr=attr, value=repr(value)))
- return "{classname}({attrs})".format(classname=self.__class__.__name__,
- attrs=", ".join(l))
-
-
-def _sign(x):
- return int(copysign(1, x))
-
-# vim:ts=4:sw=4:et
diff --git a/venv/Lib/site-packages/dateutil/rrule.py b/venv/Lib/site-packages/dateutil/rrule.py
deleted file mode 100644
index 429f8fc..0000000
--- a/venv/Lib/site-packages/dateutil/rrule.py
+++ /dev/null
@@ -1,1610 +0,0 @@
-# -*- coding: utf-8 -*-
-"""
-The rrule module offers a small, complete, and very fast, implementation of
-the recurrence rules documented in the
-`iCalendar RFC `_,
-including support for caching of results.
-"""
-import itertools
-import datetime
-import calendar
-import sys
-
-try:
- from math import gcd
-except ImportError:
- from fractions import gcd
-
-from six import advance_iterator, integer_types
-from six.moves import _thread, range
-import heapq
-
-from ._common import weekday as weekdaybase
-
-# For warning about deprecation of until and count
-from warnings import warn
-
-__all__ = ["rrule", "rruleset", "rrulestr",
- "YEARLY", "MONTHLY", "WEEKLY", "DAILY",
- "HOURLY", "MINUTELY", "SECONDLY",
- "MO", "TU", "WE", "TH", "FR", "SA", "SU"]
-
-# Every mask is 7 days longer to handle cross-year weekly periods.
-M366MASK = tuple([1]*31+[2]*29+[3]*31+[4]*30+[5]*31+[6]*30 +
- [7]*31+[8]*31+[9]*30+[10]*31+[11]*30+[12]*31+[1]*7)
-M365MASK = list(M366MASK)
-M29, M30, M31 = list(range(1, 30)), list(range(1, 31)), list(range(1, 32))
-MDAY366MASK = tuple(M31+M29+M31+M30+M31+M30+M31+M31+M30+M31+M30+M31+M31[:7])
-MDAY365MASK = list(MDAY366MASK)
-M29, M30, M31 = list(range(-29, 0)), list(range(-30, 0)), list(range(-31, 0))
-NMDAY366MASK = tuple(M31+M29+M31+M30+M31+M30+M31+M31+M30+M31+M30+M31+M31[:7])
-NMDAY365MASK = list(NMDAY366MASK)
-M366RANGE = (0, 31, 60, 91, 121, 152, 182, 213, 244, 274, 305, 335, 366)
-M365RANGE = (0, 31, 59, 90, 120, 151, 181, 212, 243, 273, 304, 334, 365)
-WDAYMASK = [0, 1, 2, 3, 4, 5, 6]*55
-del M29, M30, M31, M365MASK[59], MDAY365MASK[59], NMDAY365MASK[31]
-MDAY365MASK = tuple(MDAY365MASK)
-M365MASK = tuple(M365MASK)
-
-FREQNAMES = ['YEARLY', 'MONTHLY', 'WEEKLY', 'DAILY', 'HOURLY', 'MINUTELY', 'SECONDLY']
-
-(YEARLY,
- MONTHLY,
- WEEKLY,
- DAILY,
- HOURLY,
- MINUTELY,
- SECONDLY) = list(range(7))
-
-# Imported on demand.
-easter = None
-parser = None
-
-
-class weekday(weekdaybase):
- """
- This version of weekday does not allow n = 0.
- """
- def __init__(self, wkday, n=None):
- if n == 0:
- raise ValueError("Can't create weekday with n==0")
-
- super(weekday, self).__init__(wkday, n)
-
-
-MO, TU, WE, TH, FR, SA, SU = weekdays = tuple(weekday(x) for x in range(7))
-
-
-def _invalidates_cache(f):
- """
- Decorator for rruleset methods which may invalidate the
- cached length.
- """
- def inner_func(self, *args, **kwargs):
- rv = f(self, *args, **kwargs)
- self._invalidate_cache()
- return rv
-
- return inner_func
-
-
-class rrulebase(object):
- def __init__(self, cache=False):
- if cache:
- self._cache = []
- self._cache_lock = _thread.allocate_lock()
- self._invalidate_cache()
- else:
- self._cache = None
- self._cache_complete = False
- self._len = None
-
- def __iter__(self):
- if self._cache_complete:
- return iter(self._cache)
- elif self._cache is None:
- return self._iter()
- else:
- return self._iter_cached()
-
- def _invalidate_cache(self):
- if self._cache is not None:
- self._cache = []
- self._cache_complete = False
- self._cache_gen = self._iter()
-
- if self._cache_lock.locked():
- self._cache_lock.release()
-
- self._len = None
-
- def _iter_cached(self):
- i = 0
- gen = self._cache_gen
- cache = self._cache
- acquire = self._cache_lock.acquire
- release = self._cache_lock.release
- while gen:
- if i == len(cache):
- acquire()
- if self._cache_complete:
- break
- try:
- for j in range(10):
- cache.append(advance_iterator(gen))
- except StopIteration:
- self._cache_gen = gen = None
- self._cache_complete = True
- break
- release()
- yield cache[i]
- i += 1
- while i < self._len:
- yield cache[i]
- i += 1
-
- def __getitem__(self, item):
- if self._cache_complete:
- return self._cache[item]
- elif isinstance(item, slice):
- if item.step and item.step < 0:
- return list(iter(self))[item]
- else:
- return list(itertools.islice(self,
- item.start or 0,
- item.stop or sys.maxsize,
- item.step or 1))
- elif item >= 0:
- gen = iter(self)
- try:
- for i in range(item+1):
- res = advance_iterator(gen)
- except StopIteration:
- raise IndexError
- return res
- else:
- return list(iter(self))[item]
-
- def __contains__(self, item):
- if self._cache_complete:
- return item in self._cache
- else:
- for i in self:
- if i == item:
- return True
- elif i > item:
- return False
- return False
-
- # __len__() introduces a large performance penality.
- def count(self):
- """ Returns the number of recurrences in this set. It will have go
- trough the whole recurrence, if this hasn't been done before. """
- if self._len is None:
- for x in self:
- pass
- return self._len
-
- def before(self, dt, inc=False):
- """ Returns the last recurrence before the given datetime instance. The
- inc keyword defines what happens if dt is an occurrence. With
- inc=True, if dt itself is an occurrence, it will be returned. """
- if self._cache_complete:
- gen = self._cache
- else:
- gen = self
- last = None
- if inc:
- for i in gen:
- if i > dt:
- break
- last = i
- else:
- for i in gen:
- if i >= dt:
- break
- last = i
- return last
-
- def after(self, dt, inc=False):
- """ Returns the first recurrence after the given datetime instance. The
- inc keyword defines what happens if dt is an occurrence. With
- inc=True, if dt itself is an occurrence, it will be returned. """
- if self._cache_complete:
- gen = self._cache
- else:
- gen = self
- if inc:
- for i in gen:
- if i >= dt:
- return i
- else:
- for i in gen:
- if i > dt:
- return i
- return None
-
- def xafter(self, dt, count=None, inc=False):
- """
- Generator which yields up to `count` recurrences after the given
- datetime instance, equivalent to `after`.
-
- :param dt:
- The datetime at which to start generating recurrences.
-
- :param count:
- The maximum number of recurrences to generate. If `None` (default),
- dates are generated until the recurrence rule is exhausted.
-
- :param inc:
- If `dt` is an instance of the rule and `inc` is `True`, it is
- included in the output.
-
- :yields: Yields a sequence of `datetime` objects.
- """
-
- if self._cache_complete:
- gen = self._cache
- else:
- gen = self
-
- # Select the comparison function
- if inc:
- comp = lambda dc, dtc: dc >= dtc
- else:
- comp = lambda dc, dtc: dc > dtc
-
- # Generate dates
- n = 0
- for d in gen:
- if comp(d, dt):
- if count is not None:
- n += 1
- if n > count:
- break
-
- yield d
-
- def between(self, after, before, inc=False, count=1):
- """ Returns all the occurrences of the rrule between after and before.
- The inc keyword defines what happens if after and/or before are
- themselves occurrences. With inc=True, they will be included in the
- list, if they are found in the recurrence set. """
- if self._cache_complete:
- gen = self._cache
- else:
- gen = self
- started = False
- l = []
- if inc:
- for i in gen:
- if i > before:
- break
- elif not started:
- if i >= after:
- started = True
- l.append(i)
- else:
- l.append(i)
- else:
- for i in gen:
- if i >= before:
- break
- elif not started:
- if i > after:
- started = True
- l.append(i)
- else:
- l.append(i)
- return l
-
-
-class rrule(rrulebase):
- """
- That's the base of the rrule operation. It accepts all the keywords
- defined in the RFC as its constructor parameters (except byday,
- which was renamed to byweekday) and more. The constructor prototype is::
-
- rrule(freq)
-
- Where freq must be one of YEARLY, MONTHLY, WEEKLY, DAILY, HOURLY, MINUTELY,
- or SECONDLY.
-
- .. note::
- Per RFC section 3.3.10, recurrence instances falling on invalid dates
- and times are ignored rather than coerced:
-
- Recurrence rules may generate recurrence instances with an invalid
- date (e.g., February 30) or nonexistent local time (e.g., 1:30 AM
- on a day where the local time is moved forward by an hour at 1:00
- AM). Such recurrence instances MUST be ignored and MUST NOT be
- counted as part of the recurrence set.
-
- This can lead to possibly surprising behavior when, for example, the
- start date occurs at the end of the month:
-
- >>> from dateutil.rrule import rrule, MONTHLY
- >>> from datetime import datetime
- >>> start_date = datetime(2014, 12, 31)
- >>> list(rrule(freq=MONTHLY, count=4, dtstart=start_date))
- ... # doctest: +NORMALIZE_WHITESPACE
- [datetime.datetime(2014, 12, 31, 0, 0),
- datetime.datetime(2015, 1, 31, 0, 0),
- datetime.datetime(2015, 3, 31, 0, 0),
- datetime.datetime(2015, 5, 31, 0, 0)]
-
- Additionally, it supports the following keyword arguments:
-
- :param cache:
- If given, it must be a boolean value specifying to enable or disable
- caching of results. If you will use the same rrule instance multiple
- times, enabling caching will improve the performance considerably.
- :param dtstart:
- The recurrence start. Besides being the base for the recurrence,
- missing parameters in the final recurrence instances will also be
- extracted from this date. If not given, datetime.now() will be used
- instead.
- :param interval:
- The interval between each freq iteration. For example, when using
- YEARLY, an interval of 2 means once every two years, but with HOURLY,
- it means once every two hours. The default interval is 1.
- :param wkst:
- The week start day. Must be one of the MO, TU, WE constants, or an
- integer, specifying the first day of the week. This will affect
- recurrences based on weekly periods. The default week start is got
- from calendar.firstweekday(), and may be modified by
- calendar.setfirstweekday().
- :param count:
- How many occurrences will be generated.
-
- .. note::
- As of version 2.5.0, the use of the ``until`` keyword together
- with the ``count`` keyword is deprecated per RFC-2445 Sec. 4.3.10.
- :param until:
- If given, this must be a datetime instance, that will specify the
- limit of the recurrence. The last recurrence in the rule is the greatest
- datetime that is less than or equal to the value specified in the
- ``until`` parameter.
-
- .. note::
- As of version 2.5.0, the use of the ``until`` keyword together
- with the ``count`` keyword is deprecated per RFC-2445 Sec. 4.3.10.
- :param bysetpos:
- If given, it must be either an integer, or a sequence of integers,
- positive or negative. Each given integer will specify an occurrence
- number, corresponding to the nth occurrence of the rule inside the
- frequency period. For example, a bysetpos of -1 if combined with a
- MONTHLY frequency, and a byweekday of (MO, TU, WE, TH, FR), will
- result in the last work day of every month.
- :param bymonth:
- If given, it must be either an integer, or a sequence of integers,
- meaning the months to apply the recurrence to.
- :param bymonthday:
- If given, it must be either an integer, or a sequence of integers,
- meaning the month days to apply the recurrence to.
- :param byyearday:
- If given, it must be either an integer, or a sequence of integers,
- meaning the year days to apply the recurrence to.
- :param byweekno:
- If given, it must be either an integer, or a sequence of integers,
- meaning the week numbers to apply the recurrence to. Week numbers
- have the meaning described in ISO8601, that is, the first week of
- the year is that containing at least four days of the new year.
- :param byweekday:
- If given, it must be either an integer (0 == MO), a sequence of
- integers, one of the weekday constants (MO, TU, etc), or a sequence
- of these constants. When given, these variables will define the
- weekdays where the recurrence will be applied. It's also possible to
- use an argument n for the weekday instances, which will mean the nth
- occurrence of this weekday in the period. For example, with MONTHLY,
- or with YEARLY and BYMONTH, using FR(+1) in byweekday will specify the
- first friday of the month where the recurrence happens. Notice that in
- the RFC documentation, this is specified as BYDAY, but was renamed to
- avoid the ambiguity of that keyword.
- :param byhour:
- If given, it must be either an integer, or a sequence of integers,
- meaning the hours to apply the recurrence to.
- :param byminute:
- If given, it must be either an integer, or a sequence of integers,
- meaning the minutes to apply the recurrence to.
- :param bysecond:
- If given, it must be either an integer, or a sequence of integers,
- meaning the seconds to apply the recurrence to.
- :param byeaster:
- If given, it must be either an integer, or a sequence of integers,
- positive or negative. Each integer will define an offset from the
- Easter Sunday. Passing the offset 0 to byeaster will yield the Easter
- Sunday itself. This is an extension to the RFC specification.
- """
- def __init__(self, freq, dtstart=None,
- interval=1, wkst=None, count=None, until=None, bysetpos=None,
- bymonth=None, bymonthday=None, byyearday=None, byeaster=None,
- byweekno=None, byweekday=None,
- byhour=None, byminute=None, bysecond=None,
- cache=False):
- super(rrule, self).__init__(cache)
- global easter
- if not dtstart:
- dtstart = datetime.datetime.now().replace(microsecond=0)
- elif not isinstance(dtstart, datetime.datetime):
- dtstart = datetime.datetime.fromordinal(dtstart.toordinal())
- else:
- dtstart = dtstart.replace(microsecond=0)
- self._dtstart = dtstart
- self._tzinfo = dtstart.tzinfo
- self._freq = freq
- self._interval = interval
- self._count = count
-
- # Cache the original byxxx rules, if they are provided, as the _byxxx
- # attributes do not necessarily map to the inputs, and this can be
- # a problem in generating the strings. Only store things if they've
- # been supplied (the string retrieval will just use .get())
- self._original_rule = {}
-
- if until and not isinstance(until, datetime.datetime):
- until = datetime.datetime.fromordinal(until.toordinal())
- self._until = until
-
- if count is not None and until:
- warn("Using both 'count' and 'until' is inconsistent with RFC 2445"
- " and has been deprecated in dateutil. Future versions will "
- "raise an error.", DeprecationWarning)
-
- if wkst is None:
- self._wkst = calendar.firstweekday()
- elif isinstance(wkst, integer_types):
- self._wkst = wkst
- else:
- self._wkst = wkst.weekday
-
- if bysetpos is None:
- self._bysetpos = None
- elif isinstance(bysetpos, integer_types):
- if bysetpos == 0 or not (-366 <= bysetpos <= 366):
- raise ValueError("bysetpos must be between 1 and 366, "
- "or between -366 and -1")
- self._bysetpos = (bysetpos,)
- else:
- self._bysetpos = tuple(bysetpos)
- for pos in self._bysetpos:
- if pos == 0 or not (-366 <= pos <= 366):
- raise ValueError("bysetpos must be between 1 and 366, "
- "or between -366 and -1")
-
- if self._bysetpos:
- self._original_rule['bysetpos'] = self._bysetpos
-
- if (byweekno is None and byyearday is None and bymonthday is None and
- byweekday is None and byeaster is None):
- if freq == YEARLY:
- if bymonth is None:
- bymonth = dtstart.month
- self._original_rule['bymonth'] = None
- bymonthday = dtstart.day
- self._original_rule['bymonthday'] = None
- elif freq == MONTHLY:
- bymonthday = dtstart.day
- self._original_rule['bymonthday'] = None
- elif freq == WEEKLY:
- byweekday = dtstart.weekday()
- self._original_rule['byweekday'] = None
-
- # bymonth
- if bymonth is None:
- self._bymonth = None
- else:
- if isinstance(bymonth, integer_types):
- bymonth = (bymonth,)
-
- self._bymonth = tuple(sorted(set(bymonth)))
-
- if 'bymonth' not in self._original_rule:
- self._original_rule['bymonth'] = self._bymonth
-
- # byyearday
- if byyearday is None:
- self._byyearday = None
- else:
- if isinstance(byyearday, integer_types):
- byyearday = (byyearday,)
-
- self._byyearday = tuple(sorted(set(byyearday)))
- self._original_rule['byyearday'] = self._byyearday
-
- # byeaster
- if byeaster is not None:
- if not easter:
- from dateutil import easter
- if isinstance(byeaster, integer_types):
- self._byeaster = (byeaster,)
- else:
- self._byeaster = tuple(sorted(byeaster))
-
- self._original_rule['byeaster'] = self._byeaster
- else:
- self._byeaster = None
-
- # bymonthday
- if bymonthday is None:
- self._bymonthday = ()
- self._bynmonthday = ()
- else:
- if isinstance(bymonthday, integer_types):
- bymonthday = (bymonthday,)
-
- bymonthday = set(bymonthday) # Ensure it's unique
-
- self._bymonthday = tuple(sorted(x for x in bymonthday if x > 0))
- self._bynmonthday = tuple(sorted(x for x in bymonthday if x < 0))
-
- # Storing positive numbers first, then negative numbers
- if 'bymonthday' not in self._original_rule:
- self._original_rule['bymonthday'] = tuple(
- itertools.chain(self._bymonthday, self._bynmonthday))
-
- # byweekno
- if byweekno is None:
- self._byweekno = None
- else:
- if isinstance(byweekno, integer_types):
- byweekno = (byweekno,)
-
- self._byweekno = tuple(sorted(set(byweekno)))
-
- self._original_rule['byweekno'] = self._byweekno
-
- # byweekday / bynweekday
- if byweekday is None:
- self._byweekday = None
- self._bynweekday = None
- else:
- # If it's one of the valid non-sequence types, convert to a
- # single-element sequence before the iterator that builds the
- # byweekday set.
- if isinstance(byweekday, integer_types) or hasattr(byweekday, "n"):
- byweekday = (byweekday,)
-
- self._byweekday = set()
- self._bynweekday = set()
- for wday in byweekday:
- if isinstance(wday, integer_types):
- self._byweekday.add(wday)
- elif not wday.n or freq > MONTHLY:
- self._byweekday.add(wday.weekday)
- else:
- self._bynweekday.add((wday.weekday, wday.n))
-
- if not self._byweekday:
- self._byweekday = None
- elif not self._bynweekday:
- self._bynweekday = None
-
- if self._byweekday is not None:
- self._byweekday = tuple(sorted(self._byweekday))
- orig_byweekday = [weekday(x) for x in self._byweekday]
- else:
- orig_byweekday = tuple()
-
- if self._bynweekday is not None:
- self._bynweekday = tuple(sorted(self._bynweekday))
- orig_bynweekday = [weekday(*x) for x in self._bynweekday]
- else:
- orig_bynweekday = tuple()
-
- if 'byweekday' not in self._original_rule:
- self._original_rule['byweekday'] = tuple(itertools.chain(
- orig_byweekday, orig_bynweekday))
-
- # byhour
- if byhour is None:
- if freq < HOURLY:
- self._byhour = set((dtstart.hour,))
- else:
- self._byhour = None
- else:
- if isinstance(byhour, integer_types):
- byhour = (byhour,)
-
- if freq == HOURLY:
- self._byhour = self.__construct_byset(start=dtstart.hour,
- byxxx=byhour,
- base=24)
- else:
- self._byhour = set(byhour)
-
- self._byhour = tuple(sorted(self._byhour))
- self._original_rule['byhour'] = self._byhour
-
- # byminute
- if byminute is None:
- if freq < MINUTELY:
- self._byminute = set((dtstart.minute,))
- else:
- self._byminute = None
- else:
- if isinstance(byminute, integer_types):
- byminute = (byminute,)
-
- if freq == MINUTELY:
- self._byminute = self.__construct_byset(start=dtstart.minute,
- byxxx=byminute,
- base=60)
- else:
- self._byminute = set(byminute)
-
- self._byminute = tuple(sorted(self._byminute))
- self._original_rule['byminute'] = self._byminute
-
- # bysecond
- if bysecond is None:
- if freq < SECONDLY:
- self._bysecond = ((dtstart.second,))
- else:
- self._bysecond = None
- else:
- if isinstance(bysecond, integer_types):
- bysecond = (bysecond,)
-
- self._bysecond = set(bysecond)
-
- if freq == SECONDLY:
- self._bysecond = self.__construct_byset(start=dtstart.second,
- byxxx=bysecond,
- base=60)
- else:
- self._bysecond = set(bysecond)
-
- self._bysecond = tuple(sorted(self._bysecond))
- self._original_rule['bysecond'] = self._bysecond
-
- if self._freq >= HOURLY:
- self._timeset = None
- else:
- self._timeset = []
- for hour in self._byhour:
- for minute in self._byminute:
- for second in self._bysecond:
- self._timeset.append(
- datetime.time(hour, minute, second,
- tzinfo=self._tzinfo))
- self._timeset.sort()
- self._timeset = tuple(self._timeset)
-
- def __str__(self):
- """
- Output a string that would generate this RRULE if passed to rrulestr.
- This is mostly compatible with RFC2445, except for the
- dateutil-specific extension BYEASTER.
- """
-
- output = []
- h, m, s = [None] * 3
- if self._dtstart:
- output.append(self._dtstart.strftime('DTSTART:%Y%m%dT%H%M%S'))
- h, m, s = self._dtstart.timetuple()[3:6]
-
- parts = ['FREQ=' + FREQNAMES[self._freq]]
- if self._interval != 1:
- parts.append('INTERVAL=' + str(self._interval))
-
- if self._wkst:
- parts.append('WKST=' + repr(weekday(self._wkst))[0:2])
-
- if self._count is not None:
- parts.append('COUNT=' + str(self._count))
-
- if self._until:
- parts.append(self._until.strftime('UNTIL=%Y%m%dT%H%M%S'))
-
- if self._original_rule.get('byweekday') is not None:
- # The str() method on weekday objects doesn't generate
- # RFC2445-compliant strings, so we should modify that.
- original_rule = dict(self._original_rule)
- wday_strings = []
- for wday in original_rule['byweekday']:
- if wday.n:
- wday_strings.append('{n:+d}{wday}'.format(
- n=wday.n,
- wday=repr(wday)[0:2]))
- else:
- wday_strings.append(repr(wday))
-
- original_rule['byweekday'] = wday_strings
- else:
- original_rule = self._original_rule
-
- partfmt = '{name}={vals}'
- for name, key in [('BYSETPOS', 'bysetpos'),
- ('BYMONTH', 'bymonth'),
- ('BYMONTHDAY', 'bymonthday'),
- ('BYYEARDAY', 'byyearday'),
- ('BYWEEKNO', 'byweekno'),
- ('BYDAY', 'byweekday'),
- ('BYHOUR', 'byhour'),
- ('BYMINUTE', 'byminute'),
- ('BYSECOND', 'bysecond'),
- ('BYEASTER', 'byeaster')]:
- value = original_rule.get(key)
- if value:
- parts.append(partfmt.format(name=name, vals=(','.join(str(v)
- for v in value))))
-
- output.append(';'.join(parts))
- return '\n'.join(output)
-
- def replace(self, **kwargs):
- """Return new rrule with same attributes except for those attributes given new
- values by whichever keyword arguments are specified."""
- new_kwargs = {"interval": self._interval,
- "count": self._count,
- "dtstart": self._dtstart,
- "freq": self._freq,
- "until": self._until,
- "wkst": self._wkst,
- "cache": False if self._cache is None else True }
- new_kwargs.update(self._original_rule)
- new_kwargs.update(kwargs)
- return rrule(**new_kwargs)
-
- def _iter(self):
- year, month, day, hour, minute, second, weekday, yearday, _ = \
- self._dtstart.timetuple()
-
- # Some local variables to speed things up a bit
- freq = self._freq
- interval = self._interval
- wkst = self._wkst
- until = self._until
- bymonth = self._bymonth
- byweekno = self._byweekno
- byyearday = self._byyearday
- byweekday = self._byweekday
- byeaster = self._byeaster
- bymonthday = self._bymonthday
- bynmonthday = self._bynmonthday
- bysetpos = self._bysetpos
- byhour = self._byhour
- byminute = self._byminute
- bysecond = self._bysecond
-
- ii = _iterinfo(self)
- ii.rebuild(year, month)
-
- getdayset = {YEARLY: ii.ydayset,
- MONTHLY: ii.mdayset,
- WEEKLY: ii.wdayset,
- DAILY: ii.ddayset,
- HOURLY: ii.ddayset,
- MINUTELY: ii.ddayset,
- SECONDLY: ii.ddayset}[freq]
-
- if freq < HOURLY:
- timeset = self._timeset
- else:
- gettimeset = {HOURLY: ii.htimeset,
- MINUTELY: ii.mtimeset,
- SECONDLY: ii.stimeset}[freq]
- if ((freq >= HOURLY and
- self._byhour and hour not in self._byhour) or
- (freq >= MINUTELY and
- self._byminute and minute not in self._byminute) or
- (freq >= SECONDLY and
- self._bysecond and second not in self._bysecond)):
- timeset = ()
- else:
- timeset = gettimeset(hour, minute, second)
-
- total = 0
- count = self._count
- while True:
- # Get dayset with the right frequency
- dayset, start, end = getdayset(year, month, day)
-
- # Do the "hard" work ;-)
- filtered = False
- for i in dayset[start:end]:
- if ((bymonth and ii.mmask[i] not in bymonth) or
- (byweekno and not ii.wnomask[i]) or
- (byweekday and ii.wdaymask[i] not in byweekday) or
- (ii.nwdaymask and not ii.nwdaymask[i]) or
- (byeaster and not ii.eastermask[i]) or
- ((bymonthday or bynmonthday) and
- ii.mdaymask[i] not in bymonthday and
- ii.nmdaymask[i] not in bynmonthday) or
- (byyearday and
- ((i < ii.yearlen and i+1 not in byyearday and
- -ii.yearlen+i not in byyearday) or
- (i >= ii.yearlen and i+1-ii.yearlen not in byyearday and
- -ii.nextyearlen+i-ii.yearlen not in byyearday)))):
- dayset[i] = None
- filtered = True
-
- # Output results
- if bysetpos and timeset:
- poslist = []
- for pos in bysetpos:
- if pos < 0:
- daypos, timepos = divmod(pos, len(timeset))
- else:
- daypos, timepos = divmod(pos-1, len(timeset))
- try:
- i = [x for x in dayset[start:end]
- if x is not None][daypos]
- time = timeset[timepos]
- except IndexError:
- pass
- else:
- date = datetime.date.fromordinal(ii.yearordinal+i)
- res = datetime.datetime.combine(date, time)
- if res not in poslist:
- poslist.append(res)
- poslist.sort()
- for res in poslist:
- if until and res > until:
- self._len = total
- return
- elif res >= self._dtstart:
- if count is not None:
- count -= 1
- if count < 0:
- self._len = total
- return
- total += 1
- yield res
- else:
- for i in dayset[start:end]:
- if i is not None:
- date = datetime.date.fromordinal(ii.yearordinal + i)
- for time in timeset:
- res = datetime.datetime.combine(date, time)
- if until and res > until:
- self._len = total
- return
- elif res >= self._dtstart:
- if count is not None:
- count -= 1
- if count < 0:
- self._len = total
- return
-
- total += 1
- yield res
-
- # Handle frequency and interval
- fixday = False
- if freq == YEARLY:
- year += interval
- if year > datetime.MAXYEAR:
- self._len = total
- return
- ii.rebuild(year, month)
- elif freq == MONTHLY:
- month += interval
- if month > 12:
- div, mod = divmod(month, 12)
- month = mod
- year += div
- if month == 0:
- month = 12
- year -= 1
- if year > datetime.MAXYEAR:
- self._len = total
- return
- ii.rebuild(year, month)
- elif freq == WEEKLY:
- if wkst > weekday:
- day += -(weekday+1+(6-wkst))+self._interval*7
- else:
- day += -(weekday-wkst)+self._interval*7
- weekday = wkst
- fixday = True
- elif freq == DAILY:
- day += interval
- fixday = True
- elif freq == HOURLY:
- if filtered:
- # Jump to one iteration before next day
- hour += ((23-hour)//interval)*interval
-
- if byhour:
- ndays, hour = self.__mod_distance(value=hour,
- byxxx=self._byhour,
- base=24)
- else:
- ndays, hour = divmod(hour+interval, 24)
-
- if ndays:
- day += ndays
- fixday = True
-
- timeset = gettimeset(hour, minute, second)
- elif freq == MINUTELY:
- if filtered:
- # Jump to one iteration before next day
- minute += ((1439-(hour*60+minute))//interval)*interval
-
- valid = False
- rep_rate = (24*60)
- for j in range(rep_rate // gcd(interval, rep_rate)):
- if byminute:
- nhours, minute = \
- self.__mod_distance(value=minute,
- byxxx=self._byminute,
- base=60)
- else:
- nhours, minute = divmod(minute+interval, 60)
-
- div, hour = divmod(hour+nhours, 24)
- if div:
- day += div
- fixday = True
- filtered = False
-
- if not byhour or hour in byhour:
- valid = True
- break
-
- if not valid:
- raise ValueError('Invalid combination of interval and ' +
- 'byhour resulting in empty rule.')
-
- timeset = gettimeset(hour, minute, second)
- elif freq == SECONDLY:
- if filtered:
- # Jump to one iteration before next day
- second += (((86399 - (hour * 3600 + minute * 60 + second))
- // interval) * interval)
-
- rep_rate = (24 * 3600)
- valid = False
- for j in range(0, rep_rate // gcd(interval, rep_rate)):
- if bysecond:
- nminutes, second = \
- self.__mod_distance(value=second,
- byxxx=self._bysecond,
- base=60)
- else:
- nminutes, second = divmod(second+interval, 60)
-
- div, minute = divmod(minute+nminutes, 60)
- if div:
- hour += div
- div, hour = divmod(hour, 24)
- if div:
- day += div
- fixday = True
-
- if ((not byhour or hour in byhour) and
- (not byminute or minute in byminute) and
- (not bysecond or second in bysecond)):
- valid = True
- break
-
- if not valid:
- raise ValueError('Invalid combination of interval, ' +
- 'byhour and byminute resulting in empty' +
- ' rule.')
-
- timeset = gettimeset(hour, minute, second)
-
- if fixday and day > 28:
- daysinmonth = calendar.monthrange(year, month)[1]
- if day > daysinmonth:
- while day > daysinmonth:
- day -= daysinmonth
- month += 1
- if month == 13:
- month = 1
- year += 1
- if year > datetime.MAXYEAR:
- self._len = total
- return
- daysinmonth = calendar.monthrange(year, month)[1]
- ii.rebuild(year, month)
-
- def __construct_byset(self, start, byxxx, base):
- """
- If a `BYXXX` sequence is passed to the constructor at the same level as
- `FREQ` (e.g. `FREQ=HOURLY,BYHOUR={2,4,7},INTERVAL=3`), there are some
- specifications which cannot be reached given some starting conditions.
-
- This occurs whenever the interval is not coprime with the base of a
- given unit and the difference between the starting position and the
- ending position is not coprime with the greatest common denominator
- between the interval and the base. For example, with a FREQ of hourly
- starting at 17:00 and an interval of 4, the only valid values for
- BYHOUR would be {21, 1, 5, 9, 13, 17}, because 4 and 24 are not
- coprime.
-
- :param start:
- Specifies the starting position.
- :param byxxx:
- An iterable containing the list of allowed values.
- :param base:
- The largest allowable value for the specified frequency (e.g.
- 24 hours, 60 minutes).
-
- This does not preserve the type of the iterable, returning a set, since
- the values should be unique and the order is irrelevant, this will
- speed up later lookups.
-
- In the event of an empty set, raises a :exception:`ValueError`, as this
- results in an empty rrule.
- """
-
- cset = set()
-
- # Support a single byxxx value.
- if isinstance(byxxx, integer_types):
- byxxx = (byxxx, )
-
- for num in byxxx:
- i_gcd = gcd(self._interval, base)
- # Use divmod rather than % because we need to wrap negative nums.
- if i_gcd == 1 or divmod(num - start, i_gcd)[1] == 0:
- cset.add(num)
-
- if len(cset) == 0:
- raise ValueError("Invalid rrule byxxx generates an empty set.")
-
- return cset
-
- def __mod_distance(self, value, byxxx, base):
- """
- Calculates the next value in a sequence where the `FREQ` parameter is
- specified along with a `BYXXX` parameter at the same "level"
- (e.g. `HOURLY` specified with `BYHOUR`).
-
- :param value:
- The old value of the component.
- :param byxxx:
- The `BYXXX` set, which should have been generated by
- `rrule._construct_byset`, or something else which checks that a
- valid rule is present.
- :param base:
- The largest allowable value for the specified frequency (e.g.
- 24 hours, 60 minutes).
-
- If a valid value is not found after `base` iterations (the maximum
- number before the sequence would start to repeat), this raises a
- :exception:`ValueError`, as no valid values were found.
-
- This returns a tuple of `divmod(n*interval, base)`, where `n` is the
- smallest number of `interval` repetitions until the next specified
- value in `byxxx` is found.
- """
- accumulator = 0
- for ii in range(1, base + 1):
- # Using divmod() over % to account for negative intervals
- div, value = divmod(value + self._interval, base)
- accumulator += div
- if value in byxxx:
- return (accumulator, value)
-
-
-class _iterinfo(object):
- __slots__ = ["rrule", "lastyear", "lastmonth",
- "yearlen", "nextyearlen", "yearordinal", "yearweekday",
- "mmask", "mrange", "mdaymask", "nmdaymask",
- "wdaymask", "wnomask", "nwdaymask", "eastermask"]
-
- def __init__(self, rrule):
- for attr in self.__slots__:
- setattr(self, attr, None)
- self.rrule = rrule
-
- def rebuild(self, year, month):
- # Every mask is 7 days longer to handle cross-year weekly periods.
- rr = self.rrule
- if year != self.lastyear:
- self.yearlen = 365 + calendar.isleap(year)
- self.nextyearlen = 365 + calendar.isleap(year + 1)
- firstyday = datetime.date(year, 1, 1)
- self.yearordinal = firstyday.toordinal()
- self.yearweekday = firstyday.weekday()
-
- wday = datetime.date(year, 1, 1).weekday()
- if self.yearlen == 365:
- self.mmask = M365MASK
- self.mdaymask = MDAY365MASK
- self.nmdaymask = NMDAY365MASK
- self.wdaymask = WDAYMASK[wday:]
- self.mrange = M365RANGE
- else:
- self.mmask = M366MASK
- self.mdaymask = MDAY366MASK
- self.nmdaymask = NMDAY366MASK
- self.wdaymask = WDAYMASK[wday:]
- self.mrange = M366RANGE
-
- if not rr._byweekno:
- self.wnomask = None
- else:
- self.wnomask = [0]*(self.yearlen+7)
- # no1wkst = firstwkst = self.wdaymask.index(rr._wkst)
- no1wkst = firstwkst = (7-self.yearweekday+rr._wkst) % 7
- if no1wkst >= 4:
- no1wkst = 0
- # Number of days in the year, plus the days we got
- # from last year.
- wyearlen = self.yearlen+(self.yearweekday-rr._wkst) % 7
- else:
- # Number of days in the year, minus the days we
- # left in last year.
- wyearlen = self.yearlen-no1wkst
- div, mod = divmod(wyearlen, 7)
- numweeks = div+mod//4
- for n in rr._byweekno:
- if n < 0:
- n += numweeks+1
- if not (0 < n <= numweeks):
- continue
- if n > 1:
- i = no1wkst+(n-1)*7
- if no1wkst != firstwkst:
- i -= 7-firstwkst
- else:
- i = no1wkst
- for j in range(7):
- self.wnomask[i] = 1
- i += 1
- if self.wdaymask[i] == rr._wkst:
- break
- if 1 in rr._byweekno:
- # Check week number 1 of next year as well
- # TODO: Check -numweeks for next year.
- i = no1wkst+numweeks*7
- if no1wkst != firstwkst:
- i -= 7-firstwkst
- if i < self.yearlen:
- # If week starts in next year, we
- # don't care about it.
- for j in range(7):
- self.wnomask[i] = 1
- i += 1
- if self.wdaymask[i] == rr._wkst:
- break
- if no1wkst:
- # Check last week number of last year as
- # well. If no1wkst is 0, either the year
- # started on week start, or week number 1
- # got days from last year, so there are no
- # days from last year's last week number in
- # this year.
- if -1 not in rr._byweekno:
- lyearweekday = datetime.date(year-1, 1, 1).weekday()
- lno1wkst = (7-lyearweekday+rr._wkst) % 7
- lyearlen = 365+calendar.isleap(year-1)
- if lno1wkst >= 4:
- lno1wkst = 0
- lnumweeks = 52+(lyearlen +
- (lyearweekday-rr._wkst) % 7) % 7//4
- else:
- lnumweeks = 52+(self.yearlen-no1wkst) % 7//4
- else:
- lnumweeks = -1
- if lnumweeks in rr._byweekno:
- for i in range(no1wkst):
- self.wnomask[i] = 1
-
- if (rr._bynweekday and (month != self.lastmonth or
- year != self.lastyear)):
- ranges = []
- if rr._freq == YEARLY:
- if rr._bymonth:
- for month in rr._bymonth:
- ranges.append(self.mrange[month-1:month+1])
- else:
- ranges = [(0, self.yearlen)]
- elif rr._freq == MONTHLY:
- ranges = [self.mrange[month-1:month+1]]
- if ranges:
- # Weekly frequency won't get here, so we may not
- # care about cross-year weekly periods.
- self.nwdaymask = [0]*self.yearlen
- for first, last in ranges:
- last -= 1
- for wday, n in rr._bynweekday:
- if n < 0:
- i = last+(n+1)*7
- i -= (self.wdaymask[i]-wday) % 7
- else:
- i = first+(n-1)*7
- i += (7-self.wdaymask[i]+wday) % 7
- if first <= i <= last:
- self.nwdaymask[i] = 1
-
- if rr._byeaster:
- self.eastermask = [0]*(self.yearlen+7)
- eyday = easter.easter(year).toordinal()-self.yearordinal
- for offset in rr._byeaster:
- self.eastermask[eyday+offset] = 1
-
- self.lastyear = year
- self.lastmonth = month
-
- def ydayset(self, year, month, day):
- return list(range(self.yearlen)), 0, self.yearlen
-
- def mdayset(self, year, month, day):
- dset = [None]*self.yearlen
- start, end = self.mrange[month-1:month+1]
- for i in range(start, end):
- dset[i] = i
- return dset, start, end
-
- def wdayset(self, year, month, day):
- # We need to handle cross-year weeks here.
- dset = [None]*(self.yearlen+7)
- i = datetime.date(year, month, day).toordinal()-self.yearordinal
- start = i
- for j in range(7):
- dset[i] = i
- i += 1
- # if (not (0 <= i < self.yearlen) or
- # self.wdaymask[i] == self.rrule._wkst):
- # This will cross the year boundary, if necessary.
- if self.wdaymask[i] == self.rrule._wkst:
- break
- return dset, start, i
-
- def ddayset(self, year, month, day):
- dset = [None] * self.yearlen
- i = datetime.date(year, month, day).toordinal() - self.yearordinal
- dset[i] = i
- return dset, i, i + 1
-
- def htimeset(self, hour, minute, second):
- tset = []
- rr = self.rrule
- for minute in rr._byminute:
- for second in rr._bysecond:
- tset.append(datetime.time(hour, minute, second,
- tzinfo=rr._tzinfo))
- tset.sort()
- return tset
-
- def mtimeset(self, hour, minute, second):
- tset = []
- rr = self.rrule
- for second in rr._bysecond:
- tset.append(datetime.time(hour, minute, second, tzinfo=rr._tzinfo))
- tset.sort()
- return tset
-
- def stimeset(self, hour, minute, second):
- return (datetime.time(hour, minute, second,
- tzinfo=self.rrule._tzinfo),)
-
-
-class rruleset(rrulebase):
- """ The rruleset type allows more complex recurrence setups, mixing
- multiple rules, dates, exclusion rules, and exclusion dates. The type
- constructor takes the following keyword arguments:
-
- :param cache: If True, caching of results will be enabled, improving
- performance of multiple queries considerably. """
-
- class _genitem(object):
- def __init__(self, genlist, gen):
- try:
- self.dt = advance_iterator(gen)
- genlist.append(self)
- except StopIteration:
- pass
- self.genlist = genlist
- self.gen = gen
-
- def __next__(self):
- try:
- self.dt = advance_iterator(self.gen)
- except StopIteration:
- if self.genlist[0] is self:
- heapq.heappop(self.genlist)
- else:
- self.genlist.remove(self)
- heapq.heapify(self.genlist)
-
- next = __next__
-
- def __lt__(self, other):
- return self.dt < other.dt
-
- def __gt__(self, other):
- return self.dt > other.dt
-
- def __eq__(self, other):
- return self.dt == other.dt
-
- def __ne__(self, other):
- return self.dt != other.dt
-
- def __init__(self, cache=False):
- super(rruleset, self).__init__(cache)
- self._rrule = []
- self._rdate = []
- self._exrule = []
- self._exdate = []
-
- @_invalidates_cache
- def rrule(self, rrule):
- """ Include the given :py:class:`rrule` instance in the recurrence set
- generation. """
- self._rrule.append(rrule)
-
- @_invalidates_cache
- def rdate(self, rdate):
- """ Include the given :py:class:`datetime` instance in the recurrence
- set generation. """
- self._rdate.append(rdate)
-
- @_invalidates_cache
- def exrule(self, exrule):
- """ Include the given rrule instance in the recurrence set exclusion
- list. Dates which are part of the given recurrence rules will not
- be generated, even if some inclusive rrule or rdate matches them.
- """
- self._exrule.append(exrule)
-
- @_invalidates_cache
- def exdate(self, exdate):
- """ Include the given datetime instance in the recurrence set
- exclusion list. Dates included that way will not be generated,
- even if some inclusive rrule or rdate matches them. """
- self._exdate.append(exdate)
-
- def _iter(self):
- rlist = []
- self._rdate.sort()
- self._genitem(rlist, iter(self._rdate))
- for gen in [iter(x) for x in self._rrule]:
- self._genitem(rlist, gen)
- exlist = []
- self._exdate.sort()
- self._genitem(exlist, iter(self._exdate))
- for gen in [iter(x) for x in self._exrule]:
- self._genitem(exlist, gen)
- lastdt = None
- total = 0
- heapq.heapify(rlist)
- heapq.heapify(exlist)
- while rlist:
- ritem = rlist[0]
- if not lastdt or lastdt != ritem.dt:
- while exlist and exlist[0] < ritem:
- exitem = exlist[0]
- advance_iterator(exitem)
- if exlist and exlist[0] is exitem:
- heapq.heapreplace(exlist, exitem)
- if not exlist or ritem != exlist[0]:
- total += 1
- yield ritem.dt
- lastdt = ritem.dt
- advance_iterator(ritem)
- if rlist and rlist[0] is ritem:
- heapq.heapreplace(rlist, ritem)
- self._len = total
-
-
-class _rrulestr(object):
-
- _freq_map = {"YEARLY": YEARLY,
- "MONTHLY": MONTHLY,
- "WEEKLY": WEEKLY,
- "DAILY": DAILY,
- "HOURLY": HOURLY,
- "MINUTELY": MINUTELY,
- "SECONDLY": SECONDLY}
-
- _weekday_map = {"MO": 0, "TU": 1, "WE": 2, "TH": 3,
- "FR": 4, "SA": 5, "SU": 6}
-
- def _handle_int(self, rrkwargs, name, value, **kwargs):
- rrkwargs[name.lower()] = int(value)
-
- def _handle_int_list(self, rrkwargs, name, value, **kwargs):
- rrkwargs[name.lower()] = [int(x) for x in value.split(',')]
-
- _handle_INTERVAL = _handle_int
- _handle_COUNT = _handle_int
- _handle_BYSETPOS = _handle_int_list
- _handle_BYMONTH = _handle_int_list
- _handle_BYMONTHDAY = _handle_int_list
- _handle_BYYEARDAY = _handle_int_list
- _handle_BYEASTER = _handle_int_list
- _handle_BYWEEKNO = _handle_int_list
- _handle_BYHOUR = _handle_int_list
- _handle_BYMINUTE = _handle_int_list
- _handle_BYSECOND = _handle_int_list
-
- def _handle_FREQ(self, rrkwargs, name, value, **kwargs):
- rrkwargs["freq"] = self._freq_map[value]
-
- def _handle_UNTIL(self, rrkwargs, name, value, **kwargs):
- global parser
- if not parser:
- from dateutil import parser
- try:
- rrkwargs["until"] = parser.parse(value,
- ignoretz=kwargs.get("ignoretz"),
- tzinfos=kwargs.get("tzinfos"))
- except ValueError:
- raise ValueError("invalid until date")
-
- def _handle_WKST(self, rrkwargs, name, value, **kwargs):
- rrkwargs["wkst"] = self._weekday_map[value]
-
- def _handle_BYWEEKDAY(self, rrkwargs, name, value, **kwargs):
- """
- Two ways to specify this: +1MO or MO(+1)
- """
- l = []
- for wday in value.split(','):
- if '(' in wday:
- # If it's of the form TH(+1), etc.
- splt = wday.split('(')
- w = splt[0]
- n = int(splt[1][:-1])
- elif len(wday):
- # If it's of the form +1MO
- for i in range(len(wday)):
- if wday[i] not in '+-0123456789':
- break
- n = wday[:i] or None
- w = wday[i:]
- if n:
- n = int(n)
- else:
- raise ValueError("Invalid (empty) BYDAY specification.")
-
- l.append(weekdays[self._weekday_map[w]](n))
- rrkwargs["byweekday"] = l
-
- _handle_BYDAY = _handle_BYWEEKDAY
-
- def _parse_rfc_rrule(self, line,
- dtstart=None,
- cache=False,
- ignoretz=False,
- tzinfos=None):
- if line.find(':') != -1:
- name, value = line.split(':')
- if name != "RRULE":
- raise ValueError("unknown parameter name")
- else:
- value = line
- rrkwargs = {}
- for pair in value.split(';'):
- name, value = pair.split('=')
- name = name.upper()
- value = value.upper()
- try:
- getattr(self, "_handle_"+name)(rrkwargs, name, value,
- ignoretz=ignoretz,
- tzinfos=tzinfos)
- except AttributeError:
- raise ValueError("unknown parameter '%s'" % name)
- except (KeyError, ValueError):
- raise ValueError("invalid '%s': %s" % (name, value))
- return rrule(dtstart=dtstart, cache=cache, **rrkwargs)
-
- def _parse_rfc(self, s,
- dtstart=None,
- cache=False,
- unfold=False,
- forceset=False,
- compatible=False,
- ignoretz=False,
- tzinfos=None):
- global parser
- if compatible:
- forceset = True
- unfold = True
- s = s.upper()
- if not s.strip():
- raise ValueError("empty string")
- if unfold:
- lines = s.splitlines()
- i = 0
- while i < len(lines):
- line = lines[i].rstrip()
- if not line:
- del lines[i]
- elif i > 0 and line[0] == " ":
- lines[i-1] += line[1:]
- del lines[i]
- else:
- i += 1
- else:
- lines = s.split()
- if (not forceset and len(lines) == 1 and (s.find(':') == -1 or
- s.startswith('RRULE:'))):
- return self._parse_rfc_rrule(lines[0], cache=cache,
- dtstart=dtstart, ignoretz=ignoretz,
- tzinfos=tzinfos)
- else:
- rrulevals = []
- rdatevals = []
- exrulevals = []
- exdatevals = []
- for line in lines:
- if not line:
- continue
- if line.find(':') == -1:
- name = "RRULE"
- value = line
- else:
- name, value = line.split(':', 1)
- parms = name.split(';')
- if not parms:
- raise ValueError("empty property name")
- name = parms[0]
- parms = parms[1:]
- if name == "RRULE":
- for parm in parms:
- raise ValueError("unsupported RRULE parm: "+parm)
- rrulevals.append(value)
- elif name == "RDATE":
- for parm in parms:
- if parm != "VALUE=DATE-TIME":
- raise ValueError("unsupported RDATE parm: "+parm)
- rdatevals.append(value)
- elif name == "EXRULE":
- for parm in parms:
- raise ValueError("unsupported EXRULE parm: "+parm)
- exrulevals.append(value)
- elif name == "EXDATE":
- for parm in parms:
- if parm != "VALUE=DATE-TIME":
- raise ValueError("unsupported EXDATE parm: "+parm)
- exdatevals.append(value)
- elif name == "DTSTART":
- for parm in parms:
- raise ValueError("unsupported DTSTART parm: "+parm)
- if not parser:
- from dateutil import parser
- dtstart = parser.parse(value, ignoretz=ignoretz,
- tzinfos=tzinfos)
- else:
- raise ValueError("unsupported property: "+name)
- if (forceset or len(rrulevals) > 1 or rdatevals
- or exrulevals or exdatevals):
- if not parser and (rdatevals or exdatevals):
- from dateutil import parser
- rset = rruleset(cache=cache)
- for value in rrulevals:
- rset.rrule(self._parse_rfc_rrule(value, dtstart=dtstart,
- ignoretz=ignoretz,
- tzinfos=tzinfos))
- for value in rdatevals:
- for datestr in value.split(','):
- rset.rdate(parser.parse(datestr,
- ignoretz=ignoretz,
- tzinfos=tzinfos))
- for value in exrulevals:
- rset.exrule(self._parse_rfc_rrule(value, dtstart=dtstart,
- ignoretz=ignoretz,
- tzinfos=tzinfos))
- for value in exdatevals:
- for datestr in value.split(','):
- rset.exdate(parser.parse(datestr,
- ignoretz=ignoretz,
- tzinfos=tzinfos))
- if compatible and dtstart:
- rset.rdate(dtstart)
- return rset
- else:
- return self._parse_rfc_rrule(rrulevals[0],
- dtstart=dtstart,
- cache=cache,
- ignoretz=ignoretz,
- tzinfos=tzinfos)
-
- def __call__(self, s, **kwargs):
- return self._parse_rfc(s, **kwargs)
-
-
-rrulestr = _rrulestr()
-
-# vim:ts=4:sw=4:et
diff --git a/venv/Lib/site-packages/dateutil/tz/__init__.py b/venv/Lib/site-packages/dateutil/tz/__init__.py
deleted file mode 100644
index b0a5043..0000000
--- a/venv/Lib/site-packages/dateutil/tz/__init__.py
+++ /dev/null
@@ -1,5 +0,0 @@
-from .tz import *
-
-__all__ = ["tzutc", "tzoffset", "tzlocal", "tzfile", "tzrange",
- "tzstr", "tzical", "tzwin", "tzwinlocal", "gettz",
- "enfold", "datetime_ambiguous", "datetime_exists"]
diff --git a/venv/Lib/site-packages/dateutil/tz/_common.py b/venv/Lib/site-packages/dateutil/tz/_common.py
deleted file mode 100644
index f1cf2af..0000000
--- a/venv/Lib/site-packages/dateutil/tz/_common.py
+++ /dev/null
@@ -1,394 +0,0 @@
-from six import PY3
-
-from functools import wraps
-
-from datetime import datetime, timedelta, tzinfo
-
-
-ZERO = timedelta(0)
-
-__all__ = ['tzname_in_python2', 'enfold']
-
-
-def tzname_in_python2(namefunc):
- """Change unicode output into bytestrings in Python 2
-
- tzname() API changed in Python 3. It used to return bytes, but was changed
- to unicode strings
- """
- def adjust_encoding(*args, **kwargs):
- name = namefunc(*args, **kwargs)
- if name is not None and not PY3:
- name = name.encode()
-
- return name
-
- return adjust_encoding
-
-
-# The following is adapted from Alexander Belopolsky's tz library
-# https://github.com/abalkin/tz
-if hasattr(datetime, 'fold'):
- # This is the pre-python 3.6 fold situation
- def enfold(dt, fold=1):
- """
- Provides a unified interface for assigning the ``fold`` attribute to
- datetimes both before and after the implementation of PEP-495.
-
- :param fold:
- The value for the ``fold`` attribute in the returned datetime. This
- should be either 0 or 1.
-
- :return:
- Returns an object for which ``getattr(dt, 'fold', 0)`` returns
- ``fold`` for all versions of Python. In versions prior to
- Python 3.6, this is a ``_DatetimeWithFold`` object, which is a
- subclass of :py:class:`datetime.datetime` with the ``fold``
- attribute added, if ``fold`` is 1.
-
- .. versionadded:: 2.6.0
- """
- return dt.replace(fold=fold)
-
-else:
- class _DatetimeWithFold(datetime):
- """
- This is a class designed to provide a PEP 495-compliant interface for
- Python versions before 3.6. It is used only for dates in a fold, so
- the ``fold`` attribute is fixed at ``1``.
-
- .. versionadded:: 2.6.0
- """
- __slots__ = ()
-
- @property
- def fold(self):
- return 1
-
- def enfold(dt, fold=1):
- """
- Provides a unified interface for assigning the ``fold`` attribute to
- datetimes both before and after the implementation of PEP-495.
-
- :param fold:
- The value for the ``fold`` attribute in the returned datetime. This
- should be either 0 or 1.
-
- :return:
- Returns an object for which ``getattr(dt, 'fold', 0)`` returns
- ``fold`` for all versions of Python. In versions prior to
- Python 3.6, this is a ``_DatetimeWithFold`` object, which is a
- subclass of :py:class:`datetime.datetime` with the ``fold``
- attribute added, if ``fold`` is 1.
-
- .. versionadded:: 2.6.0
- """
- if getattr(dt, 'fold', 0) == fold:
- return dt
-
- args = dt.timetuple()[:6]
- args += (dt.microsecond, dt.tzinfo)
-
- if fold:
- return _DatetimeWithFold(*args)
- else:
- return datetime(*args)
-
-
-def _validate_fromutc_inputs(f):
- """
- The CPython version of ``fromutc`` checks that the input is a ``datetime``
- object and that ``self`` is attached as its ``tzinfo``.
- """
- @wraps(f)
- def fromutc(self, dt):
- if not isinstance(dt, datetime):
- raise TypeError("fromutc() requires a datetime argument")
- if dt.tzinfo is not self:
- raise ValueError("dt.tzinfo is not self")
-
- return f(self, dt)
-
- return fromutc
-
-
-class _tzinfo(tzinfo):
- """
- Base class for all ``dateutil`` ``tzinfo`` objects.
- """
-
- def is_ambiguous(self, dt):
- """
- Whether or not the "wall time" of a given datetime is ambiguous in this
- zone.
-
- :param dt:
- A :py:class:`datetime.datetime`, naive or time zone aware.
-
-
- :return:
- Returns ``True`` if ambiguous, ``False`` otherwise.
-
- .. versionadded:: 2.6.0
- """
-
- dt = dt.replace(tzinfo=self)
-
- wall_0 = enfold(dt, fold=0)
- wall_1 = enfold(dt, fold=1)
-
- same_offset = wall_0.utcoffset() == wall_1.utcoffset()
- same_dt = wall_0.replace(tzinfo=None) == wall_1.replace(tzinfo=None)
-
- return same_dt and not same_offset
-
- def _fold_status(self, dt_utc, dt_wall):
- """
- Determine the fold status of a "wall" datetime, given a representation
- of the same datetime as a (naive) UTC datetime. This is calculated based
- on the assumption that ``dt.utcoffset() - dt.dst()`` is constant for all
- datetimes, and that this offset is the actual number of hours separating
- ``dt_utc`` and ``dt_wall``.
-
- :param dt_utc:
- Representation of the datetime as UTC
-
- :param dt_wall:
- Representation of the datetime as "wall time". This parameter must
- either have a `fold` attribute or have a fold-naive
- :class:`datetime.tzinfo` attached, otherwise the calculation may
- fail.
- """
- if self.is_ambiguous(dt_wall):
- delta_wall = dt_wall - dt_utc
- _fold = int(delta_wall == (dt_utc.utcoffset() - dt_utc.dst()))
- else:
- _fold = 0
-
- return _fold
-
- def _fold(self, dt):
- return getattr(dt, 'fold', 0)
-
- def _fromutc(self, dt):
- """
- Given a timezone-aware datetime in a given timezone, calculates a
- timezone-aware datetime in a new timezone.
-
- Since this is the one time that we *know* we have an unambiguous
- datetime object, we take this opportunity to determine whether the
- datetime is ambiguous and in a "fold" state (e.g. if it's the first
- occurence, chronologically, of the ambiguous datetime).
-
- :param dt:
- A timezone-aware :class:`datetime.datetime` object.
- """
-
- # Re-implement the algorithm from Python's datetime.py
- dtoff = dt.utcoffset()
- if dtoff is None:
- raise ValueError("fromutc() requires a non-None utcoffset() "
- "result")
-
- # The original datetime.py code assumes that `dst()` defaults to
- # zero during ambiguous times. PEP 495 inverts this presumption, so
- # for pre-PEP 495 versions of python, we need to tweak the algorithm.
- dtdst = dt.dst()
- if dtdst is None:
- raise ValueError("fromutc() requires a non-None dst() result")
- delta = dtoff - dtdst
-
- dt += delta
- # Set fold=1 so we can default to being in the fold for
- # ambiguous dates.
- dtdst = enfold(dt, fold=1).dst()
- if dtdst is None:
- raise ValueError("fromutc(): dt.dst gave inconsistent "
- "results; cannot convert")
- return dt + dtdst
-
- @_validate_fromutc_inputs
- def fromutc(self, dt):
- """
- Given a timezone-aware datetime in a given timezone, calculates a
- timezone-aware datetime in a new timezone.
-
- Since this is the one time that we *know* we have an unambiguous
- datetime object, we take this opportunity to determine whether the
- datetime is ambiguous and in a "fold" state (e.g. if it's the first
- occurance, chronologically, of the ambiguous datetime).
-
- :param dt:
- A timezone-aware :class:`datetime.datetime` object.
- """
- dt_wall = self._fromutc(dt)
-
- # Calculate the fold status given the two datetimes.
- _fold = self._fold_status(dt, dt_wall)
-
- # Set the default fold value for ambiguous dates
- return enfold(dt_wall, fold=_fold)
-
-
-class tzrangebase(_tzinfo):
- """
- This is an abstract base class for time zones represented by an annual
- transition into and out of DST. Child classes should implement the following
- methods:
-
- * ``__init__(self, *args, **kwargs)``
- * ``transitions(self, year)`` - this is expected to return a tuple of
- datetimes representing the DST on and off transitions in standard
- time.
-
- A fully initialized ``tzrangebase`` subclass should also provide the
- following attributes:
- * ``hasdst``: Boolean whether or not the zone uses DST.
- * ``_dst_offset`` / ``_std_offset``: :class:`datetime.timedelta` objects
- representing the respective UTC offsets.
- * ``_dst_abbr`` / ``_std_abbr``: Strings representing the timezone short
- abbreviations in DST and STD, respectively.
- * ``_hasdst``: Whether or not the zone has DST.
-
- .. versionadded:: 2.6.0
- """
- def __init__(self):
- raise NotImplementedError('tzrangebase is an abstract base class')
-
- def utcoffset(self, dt):
- isdst = self._isdst(dt)
-
- if isdst is None:
- return None
- elif isdst:
- return self._dst_offset
- else:
- return self._std_offset
-
- def dst(self, dt):
- isdst = self._isdst(dt)
-
- if isdst is None:
- return None
- elif isdst:
- return self._dst_base_offset
- else:
- return ZERO
-
- @tzname_in_python2
- def tzname(self, dt):
- if self._isdst(dt):
- return self._dst_abbr
- else:
- return self._std_abbr
-
- def fromutc(self, dt):
- """ Given a datetime in UTC, return local time """
- if not isinstance(dt, datetime):
- raise TypeError("fromutc() requires a datetime argument")
-
- if dt.tzinfo is not self:
- raise ValueError("dt.tzinfo is not self")
-
- # Get transitions - if there are none, fixed offset
- transitions = self.transitions(dt.year)
- if transitions is None:
- return dt + self.utcoffset(dt)
-
- # Get the transition times in UTC
- dston, dstoff = transitions
-
- dston -= self._std_offset
- dstoff -= self._std_offset
-
- utc_transitions = (dston, dstoff)
- dt_utc = dt.replace(tzinfo=None)
-
- isdst = self._naive_isdst(dt_utc, utc_transitions)
-
- if isdst:
- dt_wall = dt + self._dst_offset
- else:
- dt_wall = dt + self._std_offset
-
- _fold = int(not isdst and self.is_ambiguous(dt_wall))
-
- return enfold(dt_wall, fold=_fold)
-
- def is_ambiguous(self, dt):
- """
- Whether or not the "wall time" of a given datetime is ambiguous in this
- zone.
-
- :param dt:
- A :py:class:`datetime.datetime`, naive or time zone aware.
-
-
- :return:
- Returns ``True`` if ambiguous, ``False`` otherwise.
-
- .. versionadded:: 2.6.0
- """
- if not self.hasdst:
- return False
-
- start, end = self.transitions(dt.year)
-
- dt = dt.replace(tzinfo=None)
- return (end <= dt < end + self._dst_base_offset)
-
- def _isdst(self, dt):
- if not self.hasdst:
- return False
- elif dt is None:
- return None
-
- transitions = self.transitions(dt.year)
-
- if transitions is None:
- return False
-
- dt = dt.replace(tzinfo=None)
-
- isdst = self._naive_isdst(dt, transitions)
-
- # Handle ambiguous dates
- if not isdst and self.is_ambiguous(dt):
- return not self._fold(dt)
- else:
- return isdst
-
- def _naive_isdst(self, dt, transitions):
- dston, dstoff = transitions
-
- dt = dt.replace(tzinfo=None)
-
- if dston < dstoff:
- isdst = dston <= dt < dstoff
- else:
- isdst = not dstoff <= dt < dston
-
- return isdst
-
- @property
- def _dst_base_offset(self):
- return self._dst_offset - self._std_offset
-
- __hash__ = None
-
- def __ne__(self, other):
- return not (self == other)
-
- def __repr__(self):
- return "%s(...)" % self.__class__.__name__
-
- __reduce__ = object.__reduce__
-
-
-def _total_seconds(td):
- # Python 2.6 doesn't have a total_seconds() method on timedelta objects
- return ((td.seconds + td.days * 86400) * 1000000 +
- td.microseconds) // 1000000
-
-
-_total_seconds = getattr(timedelta, 'total_seconds', _total_seconds)
diff --git a/venv/Lib/site-packages/dateutil/tz/tz.py b/venv/Lib/site-packages/dateutil/tz/tz.py
deleted file mode 100644
index 9468282..0000000
--- a/venv/Lib/site-packages/dateutil/tz/tz.py
+++ /dev/null
@@ -1,1511 +0,0 @@
-# -*- coding: utf-8 -*-
-"""
-This module offers timezone implementations subclassing the abstract
-:py:`datetime.tzinfo` type. There are classes to handle tzfile format files
-(usually are in :file:`/etc/localtime`, :file:`/usr/share/zoneinfo`, etc), TZ
-environment string (in all known formats), given ranges (with help from
-relative deltas), local machine timezone, fixed offset timezone, and UTC
-timezone.
-"""
-import datetime
-import struct
-import time
-import sys
-import os
-import bisect
-
-from six import string_types
-from ._common import tzname_in_python2, _tzinfo, _total_seconds
-from ._common import tzrangebase, enfold
-from ._common import _validate_fromutc_inputs
-
-try:
- from .win import tzwin, tzwinlocal
-except ImportError:
- tzwin = tzwinlocal = None
-
-ZERO = datetime.timedelta(0)
-EPOCH = datetime.datetime.utcfromtimestamp(0)
-EPOCHORDINAL = EPOCH.toordinal()
-
-
-class tzutc(datetime.tzinfo):
- """
- This is a tzinfo object that represents the UTC time zone.
- """
- def utcoffset(self, dt):
- return ZERO
-
- def dst(self, dt):
- return ZERO
-
- @tzname_in_python2
- def tzname(self, dt):
- return "UTC"
-
- def is_ambiguous(self, dt):
- """
- Whether or not the "wall time" of a given datetime is ambiguous in this
- zone.
-
- :param dt:
- A :py:class:`datetime.datetime`, naive or time zone aware.
-
-
- :return:
- Returns ``True`` if ambiguous, ``False`` otherwise.
-
- .. versionadded:: 2.6.0
- """
- return False
-
- @_validate_fromutc_inputs
- def fromutc(self, dt):
- """
- Fast track version of fromutc() returns the original ``dt`` object for
- any valid :py:class:`datetime.datetime` object.
- """
- return dt
-
- def __eq__(self, other):
- if not isinstance(other, (tzutc, tzoffset)):
- return NotImplemented
-
- return (isinstance(other, tzutc) or
- (isinstance(other, tzoffset) and other._offset == ZERO))
-
- __hash__ = None
-
- def __ne__(self, other):
- return not (self == other)
-
- def __repr__(self):
- return "%s()" % self.__class__.__name__
-
- __reduce__ = object.__reduce__
-
-
-class tzoffset(datetime.tzinfo):
- """
- A simple class for representing a fixed offset from UTC.
-
- :param name:
- The timezone name, to be returned when ``tzname()`` is called.
-
- :param offset:
- The time zone offset in seconds, or (since version 2.6.0, represented
- as a :py:class:`datetime.timedelta` object.
- """
- def __init__(self, name, offset):
- self._name = name
-
- try:
- # Allow a timedelta
- offset = _total_seconds(offset)
- except (TypeError, AttributeError):
- pass
- self._offset = datetime.timedelta(seconds=offset)
-
- def utcoffset(self, dt):
- return self._offset
-
- def dst(self, dt):
- return ZERO
-
- @tzname_in_python2
- def tzname(self, dt):
- return self._name
-
- @_validate_fromutc_inputs
- def fromutc(self, dt):
- return dt + self._offset
-
- def is_ambiguous(self, dt):
- """
- Whether or not the "wall time" of a given datetime is ambiguous in this
- zone.
-
- :param dt:
- A :py:class:`datetime.datetime`, naive or time zone aware.
-
-
- :return:
- Returns ``True`` if ambiguous, ``False`` otherwise.
-
- .. versionadded:: 2.6.0
- """
- return False
-
- def __eq__(self, other):
- if not isinstance(other, tzoffset):
- return NotImplemented
-
- return self._offset == other._offset
-
- __hash__ = None
-
- def __ne__(self, other):
- return not (self == other)
-
- def __repr__(self):
- return "%s(%s, %s)" % (self.__class__.__name__,
- repr(self._name),
- int(_total_seconds(self._offset)))
-
- __reduce__ = object.__reduce__
-
-
-class tzlocal(_tzinfo):
- """
- A :class:`tzinfo` subclass built around the ``time`` timezone functions.
- """
- def __init__(self):
- super(tzlocal, self).__init__()
-
- self._std_offset = datetime.timedelta(seconds=-time.timezone)
- if time.daylight:
- self._dst_offset = datetime.timedelta(seconds=-time.altzone)
- else:
- self._dst_offset = self._std_offset
-
- self._dst_saved = self._dst_offset - self._std_offset
- self._hasdst = bool(self._dst_saved)
-
- def utcoffset(self, dt):
- if dt is None and self._hasdst:
- return None
-
- if self._isdst(dt):
- return self._dst_offset
- else:
- return self._std_offset
-
- def dst(self, dt):
- if dt is None and self._hasdst:
- return None
-
- if self._isdst(dt):
- return self._dst_offset - self._std_offset
- else:
- return ZERO
-
- @tzname_in_python2
- def tzname(self, dt):
- return time.tzname[self._isdst(dt)]
-
- def is_ambiguous(self, dt):
- """
- Whether or not the "wall time" of a given datetime is ambiguous in this
- zone.
-
- :param dt:
- A :py:class:`datetime.datetime`, naive or time zone aware.
-
-
- :return:
- Returns ``True`` if ambiguous, ``False`` otherwise.
-
- .. versionadded:: 2.6.0
- """
- naive_dst = self._naive_is_dst(dt)
- return (not naive_dst and
- (naive_dst != self._naive_is_dst(dt - self._dst_saved)))
-
- def _naive_is_dst(self, dt):
- timestamp = _datetime_to_timestamp(dt)
- return time.localtime(timestamp + time.timezone).tm_isdst
-
- def _isdst(self, dt, fold_naive=True):
- # We can't use mktime here. It is unstable when deciding if
- # the hour near to a change is DST or not.
- #
- # timestamp = time.mktime((dt.year, dt.month, dt.day, dt.hour,
- # dt.minute, dt.second, dt.weekday(), 0, -1))
- # return time.localtime(timestamp).tm_isdst
- #
- # The code above yields the following result:
- #
- # >>> import tz, datetime
- # >>> t = tz.tzlocal()
- # >>> datetime.datetime(2003,2,15,23,tzinfo=t).tzname()
- # 'BRDT'
- # >>> datetime.datetime(2003,2,16,0,tzinfo=t).tzname()
- # 'BRST'
- # >>> datetime.datetime(2003,2,15,23,tzinfo=t).tzname()
- # 'BRST'
- # >>> datetime.datetime(2003,2,15,22,tzinfo=t).tzname()
- # 'BRDT'
- # >>> datetime.datetime(2003,2,15,23,tzinfo=t).tzname()
- # 'BRDT'
- #
- # Here is a more stable implementation:
- #
- if not self._hasdst:
- return False
-
- # Check for ambiguous times:
- dstval = self._naive_is_dst(dt)
- fold = getattr(dt, 'fold', None)
-
- if self.is_ambiguous(dt):
- if fold is not None:
- return not self._fold(dt)
- else:
- return True
-
- return dstval
-
- def __eq__(self, other):
- if not isinstance(other, tzlocal):
- return NotImplemented
-
- return (self._std_offset == other._std_offset and
- self._dst_offset == other._dst_offset)
-
- __hash__ = None
-
- def __ne__(self, other):
- return not (self == other)
-
- def __repr__(self):
- return "%s()" % self.__class__.__name__
-
- __reduce__ = object.__reduce__
-
-
-class _ttinfo(object):
- __slots__ = ["offset", "delta", "isdst", "abbr",
- "isstd", "isgmt", "dstoffset"]
-
- def __init__(self):
- for attr in self.__slots__:
- setattr(self, attr, None)
-
- def __repr__(self):
- l = []
- for attr in self.__slots__:
- value = getattr(self, attr)
- if value is not None:
- l.append("%s=%s" % (attr, repr(value)))
- return "%s(%s)" % (self.__class__.__name__, ", ".join(l))
-
- def __eq__(self, other):
- if not isinstance(other, _ttinfo):
- return NotImplemented
-
- return (self.offset == other.offset and
- self.delta == other.delta and
- self.isdst == other.isdst and
- self.abbr == other.abbr and
- self.isstd == other.isstd and
- self.isgmt == other.isgmt and
- self.dstoffset == other.dstoffset)
-
- __hash__ = None
-
- def __ne__(self, other):
- return not (self == other)
-
- def __getstate__(self):
- state = {}
- for name in self.__slots__:
- state[name] = getattr(self, name, None)
- return state
-
- def __setstate__(self, state):
- for name in self.__slots__:
- if name in state:
- setattr(self, name, state[name])
-
-
-class _tzfile(object):
- """
- Lightweight class for holding the relevant transition and time zone
- information read from binary tzfiles.
- """
- attrs = ['trans_list', 'trans_list_utc', 'trans_idx', 'ttinfo_list',
- 'ttinfo_std', 'ttinfo_dst', 'ttinfo_before', 'ttinfo_first']
-
- def __init__(self, **kwargs):
- for attr in self.attrs:
- setattr(self, attr, kwargs.get(attr, None))
-
-
-class tzfile(_tzinfo):
- """
- This is a ``tzinfo`` subclass thant allows one to use the ``tzfile(5)``
- format timezone files to extract current and historical zone information.
-
- :param fileobj:
- This can be an opened file stream or a file name that the time zone
- information can be read from.
-
- :param filename:
- This is an optional parameter specifying the source of the time zone
- information in the event that ``fileobj`` is a file object. If omitted
- and ``fileobj`` is a file stream, this parameter will be set either to
- ``fileobj``'s ``name`` attribute or to ``repr(fileobj)``.
-
- See `Sources for Time Zone and Daylight Saving Time Data
- `_ for more information. Time zone
- files can be compiled from the `IANA Time Zone database files
- `_ with the `zic time zone compiler
- `_
- """
-
- def __init__(self, fileobj, filename=None):
- super(tzfile, self).__init__()
-
- file_opened_here = False
- if isinstance(fileobj, string_types):
- self._filename = fileobj
- fileobj = open(fileobj, 'rb')
- file_opened_here = True
- elif filename is not None:
- self._filename = filename
- elif hasattr(fileobj, "name"):
- self._filename = fileobj.name
- else:
- self._filename = repr(fileobj)
-
- if fileobj is not None:
- if not file_opened_here:
- fileobj = _ContextWrapper(fileobj)
-
- with fileobj as file_stream:
- tzobj = self._read_tzfile(file_stream)
-
- self._set_tzdata(tzobj)
-
- def _set_tzdata(self, tzobj):
- """ Set the time zone data of this object from a _tzfile object """
- # Copy the relevant attributes over as private attributes
- for attr in _tzfile.attrs:
- setattr(self, '_' + attr, getattr(tzobj, attr))
-
- def _read_tzfile(self, fileobj):
- out = _tzfile()
-
- # From tzfile(5):
- #
- # The time zone information files used by tzset(3)
- # begin with the magic characters "TZif" to identify
- # them as time zone information files, followed by
- # sixteen bytes reserved for future use, followed by
- # six four-byte values of type long, written in a
- # ``standard'' byte order (the high-order byte
- # of the value is written first).
- if fileobj.read(4).decode() != "TZif":
- raise ValueError("magic not found")
-
- fileobj.read(16)
-
- (
- # The number of UTC/local indicators stored in the file.
- ttisgmtcnt,
-
- # The number of standard/wall indicators stored in the file.
- ttisstdcnt,
-
- # The number of leap seconds for which data is
- # stored in the file.
- leapcnt,
-
- # The number of "transition times" for which data
- # is stored in the file.
- timecnt,
-
- # The number of "local time types" for which data
- # is stored in the file (must not be zero).
- typecnt,
-
- # The number of characters of "time zone
- # abbreviation strings" stored in the file.
- charcnt,
-
- ) = struct.unpack(">6l", fileobj.read(24))
-
- # The above header is followed by tzh_timecnt four-byte
- # values of type long, sorted in ascending order.
- # These values are written in ``standard'' byte order.
- # Each is used as a transition time (as returned by
- # time(2)) at which the rules for computing local time
- # change.
-
- if timecnt:
- out.trans_list_utc = list(struct.unpack(">%dl" % timecnt,
- fileobj.read(timecnt*4)))
- else:
- out.trans_list_utc = []
-
- # Next come tzh_timecnt one-byte values of type unsigned
- # char; each one tells which of the different types of
- # ``local time'' types described in the file is associated
- # with the same-indexed transition time. These values
- # serve as indices into an array of ttinfo structures that
- # appears next in the file.
-
- if timecnt:
- out.trans_idx = struct.unpack(">%dB" % timecnt,
- fileobj.read(timecnt))
- else:
- out.trans_idx = []
-
- # Each ttinfo structure is written as a four-byte value
- # for tt_gmtoff of type long, in a standard byte
- # order, followed by a one-byte value for tt_isdst
- # and a one-byte value for tt_abbrind. In each
- # structure, tt_gmtoff gives the number of
- # seconds to be added to UTC, tt_isdst tells whether
- # tm_isdst should be set by localtime(3), and
- # tt_abbrind serves as an index into the array of
- # time zone abbreviation characters that follow the
- # ttinfo structure(s) in the file.
-
- ttinfo = []
-
- for i in range(typecnt):
- ttinfo.append(struct.unpack(">lbb", fileobj.read(6)))
-
- abbr = fileobj.read(charcnt).decode()
-
- # Then there are tzh_leapcnt pairs of four-byte
- # values, written in standard byte order; the
- # first value of each pair gives the time (as
- # returned by time(2)) at which a leap second
- # occurs; the second gives the total number of
- # leap seconds to be applied after the given time.
- # The pairs of values are sorted in ascending order
- # by time.
-
- # Not used, for now (but seek for correct file position)
- if leapcnt:
- fileobj.seek(leapcnt * 8, os.SEEK_CUR)
-
- # Then there are tzh_ttisstdcnt standard/wall
- # indicators, each stored as a one-byte value;
- # they tell whether the transition times associated
- # with local time types were specified as standard
- # time or wall clock time, and are used when
- # a time zone file is used in handling POSIX-style
- # time zone environment variables.
-
- if ttisstdcnt:
- isstd = struct.unpack(">%db" % ttisstdcnt,
- fileobj.read(ttisstdcnt))
-
- # Finally, there are tzh_ttisgmtcnt UTC/local
- # indicators, each stored as a one-byte value;
- # they tell whether the transition times associated
- # with local time types were specified as UTC or
- # local time, and are used when a time zone file
- # is used in handling POSIX-style time zone envi-
- # ronment variables.
-
- if ttisgmtcnt:
- isgmt = struct.unpack(">%db" % ttisgmtcnt,
- fileobj.read(ttisgmtcnt))
-
- # Build ttinfo list
- out.ttinfo_list = []
- for i in range(typecnt):
- gmtoff, isdst, abbrind = ttinfo[i]
- # Round to full-minutes if that's not the case. Python's
- # datetime doesn't accept sub-minute timezones. Check
- # http://python.org/sf/1447945 for some information.
- gmtoff = 60 * ((gmtoff + 30) // 60)
- tti = _ttinfo()
- tti.offset = gmtoff
- tti.dstoffset = datetime.timedelta(0)
- tti.delta = datetime.timedelta(seconds=gmtoff)
- tti.isdst = isdst
- tti.abbr = abbr[abbrind:abbr.find('\x00', abbrind)]
- tti.isstd = (ttisstdcnt > i and isstd[i] != 0)
- tti.isgmt = (ttisgmtcnt > i and isgmt[i] != 0)
- out.ttinfo_list.append(tti)
-
- # Replace ttinfo indexes for ttinfo objects.
- out.trans_idx = [out.ttinfo_list[idx] for idx in out.trans_idx]
-
- # Set standard, dst, and before ttinfos. before will be
- # used when a given time is before any transitions,
- # and will be set to the first non-dst ttinfo, or to
- # the first dst, if all of them are dst.
- out.ttinfo_std = None
- out.ttinfo_dst = None
- out.ttinfo_before = None
- if out.ttinfo_list:
- if not out.trans_list_utc:
- out.ttinfo_std = out.ttinfo_first = out.ttinfo_list[0]
- else:
- for i in range(timecnt-1, -1, -1):
- tti = out.trans_idx[i]
- if not out.ttinfo_std and not tti.isdst:
- out.ttinfo_std = tti
- elif not out.ttinfo_dst and tti.isdst:
- out.ttinfo_dst = tti
-
- if out.ttinfo_std and out.ttinfo_dst:
- break
- else:
- if out.ttinfo_dst and not out.ttinfo_std:
- out.ttinfo_std = out.ttinfo_dst
-
- for tti in out.ttinfo_list:
- if not tti.isdst:
- out.ttinfo_before = tti
- break
- else:
- out.ttinfo_before = out.ttinfo_list[0]
-
- # Now fix transition times to become relative to wall time.
- #
- # I'm not sure about this. In my tests, the tz source file
- # is setup to wall time, and in the binary file isstd and
- # isgmt are off, so it should be in wall time. OTOH, it's
- # always in gmt time. Let me know if you have comments
- # about this.
- laststdoffset = None
- out.trans_list = []
- for i, tti in enumerate(out.trans_idx):
- if not tti.isdst:
- offset = tti.offset
- laststdoffset = offset
- else:
- if laststdoffset is not None:
- # Store the DST offset as well and update it in the list
- tti.dstoffset = tti.offset - laststdoffset
- out.trans_idx[i] = tti
-
- offset = laststdoffset or 0
-
- out.trans_list.append(out.trans_list_utc[i] + offset)
-
- # In case we missed any DST offsets on the way in for some reason, make
- # a second pass over the list, looking for the /next/ DST offset.
- laststdoffset = None
- for i in reversed(range(len(out.trans_idx))):
- tti = out.trans_idx[i]
- if tti.isdst:
- if not (tti.dstoffset or laststdoffset is None):
- tti.dstoffset = tti.offset - laststdoffset
- else:
- laststdoffset = tti.offset
-
- if not isinstance(tti.dstoffset, datetime.timedelta):
- tti.dstoffset = datetime.timedelta(seconds=tti.dstoffset)
-
- out.trans_idx[i] = tti
-
- out.trans_idx = tuple(out.trans_idx)
- out.trans_list = tuple(out.trans_list)
- out.trans_list_utc = tuple(out.trans_list_utc)
-
- return out
-
- def _find_last_transition(self, dt, in_utc=False):
- # If there's no list, there are no transitions to find
- if not self._trans_list:
- return None
-
- timestamp = _datetime_to_timestamp(dt)
-
- # Find where the timestamp fits in the transition list - if the
- # timestamp is a transition time, it's part of the "after" period.
- trans_list = self._trans_list_utc if in_utc else self._trans_list
- idx = bisect.bisect_right(trans_list, timestamp)
-
- # We want to know when the previous transition was, so subtract off 1
- return idx - 1
-
- def _get_ttinfo(self, idx):
- # For no list or after the last transition, default to _ttinfo_std
- if idx is None or (idx + 1) >= len(self._trans_list):
- return self._ttinfo_std
-
- # If there is a list and the time is before it, return _ttinfo_before
- if idx < 0:
- return self._ttinfo_before
-
- return self._trans_idx[idx]
-
- def _find_ttinfo(self, dt):
- idx = self._resolve_ambiguous_time(dt)
-
- return self._get_ttinfo(idx)
-
- def fromutc(self, dt):
- """
- The ``tzfile`` implementation of :py:func:`datetime.tzinfo.fromutc`.
-
- :param dt:
- A :py:class:`datetime.datetime` object.
-
- :raises TypeError:
- Raised if ``dt`` is not a :py:class:`datetime.datetime` object.
-
- :raises ValueError:
- Raised if this is called with a ``dt`` which does not have this
- ``tzinfo`` attached.
-
- :return:
- Returns a :py:class:`datetime.datetime` object representing the
- wall time in ``self``'s time zone.
- """
- # These isinstance checks are in datetime.tzinfo, so we'll preserve
- # them, even if we don't care about duck typing.
- if not isinstance(dt, datetime.datetime):
- raise TypeError("fromutc() requires a datetime argument")
-
- if dt.tzinfo is not self:
- raise ValueError("dt.tzinfo is not self")
-
- # First treat UTC as wall time and get the transition we're in.
- idx = self._find_last_transition(dt, in_utc=True)
- tti = self._get_ttinfo(idx)
-
- dt_out = dt + datetime.timedelta(seconds=tti.offset)
-
- fold = self.is_ambiguous(dt_out, idx=idx)
-
- return enfold(dt_out, fold=int(fold))
-
- def is_ambiguous(self, dt, idx=None):
- """
- Whether or not the "wall time" of a given datetime is ambiguous in this
- zone.
-
- :param dt:
- A :py:class:`datetime.datetime`, naive or time zone aware.
-
-
- :return:
- Returns ``True`` if ambiguous, ``False`` otherwise.
-
- .. versionadded:: 2.6.0
- """
- if idx is None:
- idx = self._find_last_transition(dt)
-
- # Calculate the difference in offsets from current to previous
- timestamp = _datetime_to_timestamp(dt)
- tti = self._get_ttinfo(idx)
-
- if idx is None or idx <= 0:
- return False
-
- od = self._get_ttinfo(idx - 1).offset - tti.offset
- tt = self._trans_list[idx] # Transition time
-
- return timestamp < tt + od
-
- def _resolve_ambiguous_time(self, dt):
- idx = self._find_last_transition(dt)
-
- # If we have no transitions, return the index
- _fold = self._fold(dt)
- if idx is None or idx == 0:
- return idx
-
- # If it's ambiguous and we're in a fold, shift to a different index.
- idx_offset = int(not _fold and self.is_ambiguous(dt, idx))
-
- return idx - idx_offset
-
- def utcoffset(self, dt):
- if dt is None:
- return None
-
- if not self._ttinfo_std:
- return ZERO
-
- return self._find_ttinfo(dt).delta
-
- def dst(self, dt):
- if dt is None:
- return None
-
- if not self._ttinfo_dst:
- return ZERO
-
- tti = self._find_ttinfo(dt)
-
- if not tti.isdst:
- return ZERO
-
- # The documentation says that utcoffset()-dst() must
- # be constant for every dt.
- return tti.dstoffset
-
- @tzname_in_python2
- def tzname(self, dt):
- if not self._ttinfo_std or dt is None:
- return None
- return self._find_ttinfo(dt).abbr
-
- def __eq__(self, other):
- if not isinstance(other, tzfile):
- return NotImplemented
- return (self._trans_list == other._trans_list and
- self._trans_idx == other._trans_idx and
- self._ttinfo_list == other._ttinfo_list)
-
- __hash__ = None
-
- def __ne__(self, other):
- return not (self == other)
-
- def __repr__(self):
- return "%s(%s)" % (self.__class__.__name__, repr(self._filename))
-
- def __reduce__(self):
- return self.__reduce_ex__(None)
-
- def __reduce_ex__(self, protocol):
- return (self.__class__, (None, self._filename), self.__dict__)
-
-
-class tzrange(tzrangebase):
- """
- The ``tzrange`` object is a time zone specified by a set of offsets and
- abbreviations, equivalent to the way the ``TZ`` variable can be specified
- in POSIX-like systems, but using Python delta objects to specify DST
- start, end and offsets.
-
- :param stdabbr:
- The abbreviation for standard time (e.g. ``'EST'``).
-
- :param stdoffset:
- An integer or :class:`datetime.timedelta` object or equivalent
- specifying the base offset from UTC.
-
- If unspecified, +00:00 is used.
-
- :param dstabbr:
- The abbreviation for DST / "Summer" time (e.g. ``'EDT'``).
-
- If specified, with no other DST information, DST is assumed to occur
- and the default behavior or ``dstoffset``, ``start`` and ``end`` is
- used. If unspecified and no other DST information is specified, it
- is assumed that this zone has no DST.
-
- If this is unspecified and other DST information is *is* specified,
- DST occurs in the zone but the time zone abbreviation is left
- unchanged.
-
- :param dstoffset:
- A an integer or :class:`datetime.timedelta` object or equivalent
- specifying the UTC offset during DST. If unspecified and any other DST
- information is specified, it is assumed to be the STD offset +1 hour.
-
- :param start:
- A :class:`relativedelta.relativedelta` object or equivalent specifying
- the time and time of year that daylight savings time starts. To specify,
- for example, that DST starts at 2AM on the 2nd Sunday in March, pass:
-
- ``relativedelta(hours=2, month=3, day=1, weekday=SU(+2))``
-
- If unspecified and any other DST information is specified, the default
- value is 2 AM on the first Sunday in April.
-
- :param end:
- A :class:`relativedelta.relativedelta` object or equivalent representing
- the time and time of year that daylight savings time ends, with the
- same specification method as in ``start``. One note is that this should
- point to the first time in the *standard* zone, so if a transition
- occurs at 2AM in the DST zone and the clocks are set back 1 hour to 1AM,
- set the `hours` parameter to +1.
-
-
- **Examples:**
-
- .. testsetup:: tzrange
-
- from dateutil.tz import tzrange, tzstr
-
- .. doctest:: tzrange
-
- >>> tzstr('EST5EDT') == tzrange("EST", -18000, "EDT")
- True
-
- >>> from dateutil.relativedelta import *
- >>> range1 = tzrange("EST", -18000, "EDT")
- >>> range2 = tzrange("EST", -18000, "EDT", -14400,
- ... relativedelta(hours=+2, month=4, day=1,
- ... weekday=SU(+1)),
- ... relativedelta(hours=+1, month=10, day=31,
- ... weekday=SU(-1)))
- >>> tzstr('EST5EDT') == range1 == range2
- True
-
- """
- def __init__(self, stdabbr, stdoffset=None,
- dstabbr=None, dstoffset=None,
- start=None, end=None):
-
- global relativedelta
- from dateutil import relativedelta
-
- self._std_abbr = stdabbr
- self._dst_abbr = dstabbr
-
- try:
- stdoffset = _total_seconds(stdoffset)
- except (TypeError, AttributeError):
- pass
-
- try:
- dstoffset = _total_seconds(dstoffset)
- except (TypeError, AttributeError):
- pass
-
- if stdoffset is not None:
- self._std_offset = datetime.timedelta(seconds=stdoffset)
- else:
- self._std_offset = ZERO
-
- if dstoffset is not None:
- self._dst_offset = datetime.timedelta(seconds=dstoffset)
- elif dstabbr and stdoffset is not None:
- self._dst_offset = self._std_offset + datetime.timedelta(hours=+1)
- else:
- self._dst_offset = ZERO
-
- if dstabbr and start is None:
- self._start_delta = relativedelta.relativedelta(
- hours=+2, month=4, day=1, weekday=relativedelta.SU(+1))
- else:
- self._start_delta = start
-
- if dstabbr and end is None:
- self._end_delta = relativedelta.relativedelta(
- hours=+1, month=10, day=31, weekday=relativedelta.SU(-1))
- else:
- self._end_delta = end
-
- self._dst_base_offset_ = self._dst_offset - self._std_offset
- self.hasdst = bool(self._start_delta)
-
- def transitions(self, year):
- """
- For a given year, get the DST on and off transition times, expressed
- always on the standard time side. For zones with no transitions, this
- function returns ``None``.
-
- :param year:
- The year whose transitions you would like to query.
-
- :return:
- Returns a :class:`tuple` of :class:`datetime.datetime` objects,
- ``(dston, dstoff)`` for zones with an annual DST transition, or
- ``None`` for fixed offset zones.
- """
- if not self.hasdst:
- return None
-
- base_year = datetime.datetime(year, 1, 1)
-
- start = base_year + self._start_delta
- end = base_year + self._end_delta
-
- return (start, end)
-
- def __eq__(self, other):
- if not isinstance(other, tzrange):
- return NotImplemented
-
- return (self._std_abbr == other._std_abbr and
- self._dst_abbr == other._dst_abbr and
- self._std_offset == other._std_offset and
- self._dst_offset == other._dst_offset and
- self._start_delta == other._start_delta and
- self._end_delta == other._end_delta)
-
- @property
- def _dst_base_offset(self):
- return self._dst_base_offset_
-
-
-class tzstr(tzrange):
- """
- ``tzstr`` objects are time zone objects specified by a time-zone string as
- it would be passed to a ``TZ`` variable on POSIX-style systems (see
- the `GNU C Library: TZ Variable`_ for more details).
-
- There is one notable exception, which is that POSIX-style time zones use an
- inverted offset format, so normally ``GMT+3`` would be parsed as an offset
- 3 hours *behind* GMT. The ``tzstr`` time zone object will parse this as an
- offset 3 hours *ahead* of GMT. If you would like to maintain the POSIX
- behavior, pass a ``True`` value to ``posix_offset``.
-
- The :class:`tzrange` object provides the same functionality, but is
- specified using :class:`relativedelta.relativedelta` objects. rather than
- strings.
-
- :param s:
- A time zone string in ``TZ`` variable format. This can be a
- :class:`bytes` (2.x: :class:`str`), :class:`str` (2.x: :class:`unicode`)
- or a stream emitting unicode characters (e.g. :class:`StringIO`).
-
- :param posix_offset:
- Optional. If set to ``True``, interpret strings such as ``GMT+3`` or
- ``UTC+3`` as being 3 hours *behind* UTC rather than ahead, per the
- POSIX standard.
-
- .. _`GNU C Library: TZ Variable`:
- https://www.gnu.org/software/libc/manual/html_node/TZ-Variable.html
- """
- def __init__(self, s, posix_offset=False):
- global parser
- from dateutil import parser
-
- self._s = s
-
- res = parser._parsetz(s)
- if res is None:
- raise ValueError("unknown string format")
-
- # Here we break the compatibility with the TZ variable handling.
- # GMT-3 actually *means* the timezone -3.
- if res.stdabbr in ("GMT", "UTC") and not posix_offset:
- res.stdoffset *= -1
-
- # We must initialize it first, since _delta() needs
- # _std_offset and _dst_offset set. Use False in start/end
- # to avoid building it two times.
- tzrange.__init__(self, res.stdabbr, res.stdoffset,
- res.dstabbr, res.dstoffset,
- start=False, end=False)
-
- if not res.dstabbr:
- self._start_delta = None
- self._end_delta = None
- else:
- self._start_delta = self._delta(res.start)
- if self._start_delta:
- self._end_delta = self._delta(res.end, isend=1)
-
- self.hasdst = bool(self._start_delta)
-
- def _delta(self, x, isend=0):
- from dateutil import relativedelta
- kwargs = {}
- if x.month is not None:
- kwargs["month"] = x.month
- if x.weekday is not None:
- kwargs["weekday"] = relativedelta.weekday(x.weekday, x.week)
- if x.week > 0:
- kwargs["day"] = 1
- else:
- kwargs["day"] = 31
- elif x.day:
- kwargs["day"] = x.day
- elif x.yday is not None:
- kwargs["yearday"] = x.yday
- elif x.jyday is not None:
- kwargs["nlyearday"] = x.jyday
- if not kwargs:
- # Default is to start on first sunday of april, and end
- # on last sunday of october.
- if not isend:
- kwargs["month"] = 4
- kwargs["day"] = 1
- kwargs["weekday"] = relativedelta.SU(+1)
- else:
- kwargs["month"] = 10
- kwargs["day"] = 31
- kwargs["weekday"] = relativedelta.SU(-1)
- if x.time is not None:
- kwargs["seconds"] = x.time
- else:
- # Default is 2AM.
- kwargs["seconds"] = 7200
- if isend:
- # Convert to standard time, to follow the documented way
- # of working with the extra hour. See the documentation
- # of the tzinfo class.
- delta = self._dst_offset - self._std_offset
- kwargs["seconds"] -= delta.seconds + delta.days * 86400
- return relativedelta.relativedelta(**kwargs)
-
- def __repr__(self):
- return "%s(%s)" % (self.__class__.__name__, repr(self._s))
-
-
-class _tzicalvtzcomp(object):
- def __init__(self, tzoffsetfrom, tzoffsetto, isdst,
- tzname=None, rrule=None):
- self.tzoffsetfrom = datetime.timedelta(seconds=tzoffsetfrom)
- self.tzoffsetto = datetime.timedelta(seconds=tzoffsetto)
- self.tzoffsetdiff = self.tzoffsetto - self.tzoffsetfrom
- self.isdst = isdst
- self.tzname = tzname
- self.rrule = rrule
-
-
-class _tzicalvtz(_tzinfo):
- def __init__(self, tzid, comps=[]):
- super(_tzicalvtz, self).__init__()
-
- self._tzid = tzid
- self._comps = comps
- self._cachedate = []
- self._cachecomp = []
-
- def _find_comp(self, dt):
- if len(self._comps) == 1:
- return self._comps[0]
-
- dt = dt.replace(tzinfo=None)
-
- try:
- return self._cachecomp[self._cachedate.index((dt, self._fold(dt)))]
- except ValueError:
- pass
-
- lastcompdt = None
- lastcomp = None
-
- for comp in self._comps:
- compdt = self._find_compdt(comp, dt)
-
- if compdt and (not lastcompdt or lastcompdt < compdt):
- lastcompdt = compdt
- lastcomp = comp
-
- if not lastcomp:
- # RFC says nothing about what to do when a given
- # time is before the first onset date. We'll look for the
- # first standard component, or the first component, if
- # none is found.
- for comp in self._comps:
- if not comp.isdst:
- lastcomp = comp
- break
- else:
- lastcomp = comp[0]
-
- self._cachedate.insert(0, (dt, self._fold(dt)))
- self._cachecomp.insert(0, lastcomp)
-
- if len(self._cachedate) > 10:
- self._cachedate.pop()
- self._cachecomp.pop()
-
- return lastcomp
-
- def _find_compdt(self, comp, dt):
- if comp.tzoffsetdiff < ZERO and self._fold(dt):
- dt -= comp.tzoffsetdiff
-
- compdt = comp.rrule.before(dt, inc=True)
-
- return compdt
-
- def utcoffset(self, dt):
- if dt is None:
- return None
-
- return self._find_comp(dt).tzoffsetto
-
- def dst(self, dt):
- comp = self._find_comp(dt)
- if comp.isdst:
- return comp.tzoffsetdiff
- else:
- return ZERO
-
- @tzname_in_python2
- def tzname(self, dt):
- return self._find_comp(dt).tzname
-
- def __repr__(self):
- return "" % repr(self._tzid)
-
- __reduce__ = object.__reduce__
-
-
-class tzical(object):
- """
- This object is designed to parse an iCalendar-style ``VTIMEZONE`` structure
- as set out in `RFC 2445`_ Section 4.6.5 into one or more `tzinfo` objects.
-
- :param `fileobj`:
- A file or stream in iCalendar format, which should be UTF-8 encoded
- with CRLF endings.
-
- .. _`RFC 2445`: https://www.ietf.org/rfc/rfc2445.txt
- """
- def __init__(self, fileobj):
- global rrule
- from dateutil import rrule
-
- if isinstance(fileobj, string_types):
- self._s = fileobj
- # ical should be encoded in UTF-8 with CRLF
- fileobj = open(fileobj, 'r')
- else:
- self._s = getattr(fileobj, 'name', repr(fileobj))
- fileobj = _ContextWrapper(fileobj)
-
- self._vtz = {}
-
- with fileobj as fobj:
- self._parse_rfc(fobj.read())
-
- def keys(self):
- """
- Retrieves the available time zones as a list.
- """
- return list(self._vtz.keys())
-
- def get(self, tzid=None):
- """
- Retrieve a :py:class:`datetime.tzinfo` object by its ``tzid``.
-
- :param tzid:
- If there is exactly one time zone available, omitting ``tzid``
- or passing :py:const:`None` value returns it. Otherwise a valid
- key (which can be retrieved from :func:`keys`) is required.
-
- :raises ValueError:
- Raised if ``tzid`` is not specified but there are either more
- or fewer than 1 zone defined.
-
- :returns:
- Returns either a :py:class:`datetime.tzinfo` object representing
- the relevant time zone or :py:const:`None` if the ``tzid`` was
- not found.
- """
- if tzid is None:
- if len(self._vtz) == 0:
- raise ValueError("no timezones defined")
- elif len(self._vtz) > 1:
- raise ValueError("more than one timezone available")
- tzid = next(iter(self._vtz))
-
- return self._vtz.get(tzid)
-
- def _parse_offset(self, s):
- s = s.strip()
- if not s:
- raise ValueError("empty offset")
- if s[0] in ('+', '-'):
- signal = (-1, +1)[s[0] == '+']
- s = s[1:]
- else:
- signal = +1
- if len(s) == 4:
- return (int(s[:2]) * 3600 + int(s[2:]) * 60) * signal
- elif len(s) == 6:
- return (int(s[:2]) * 3600 + int(s[2:4]) * 60 + int(s[4:])) * signal
- else:
- raise ValueError("invalid offset: " + s)
-
- def _parse_rfc(self, s):
- lines = s.splitlines()
- if not lines:
- raise ValueError("empty string")
-
- # Unfold
- i = 0
- while i < len(lines):
- line = lines[i].rstrip()
- if not line:
- del lines[i]
- elif i > 0 and line[0] == " ":
- lines[i-1] += line[1:]
- del lines[i]
- else:
- i += 1
-
- tzid = None
- comps = []
- invtz = False
- comptype = None
- for line in lines:
- if not line:
- continue
- name, value = line.split(':', 1)
- parms = name.split(';')
- if not parms:
- raise ValueError("empty property name")
- name = parms[0].upper()
- parms = parms[1:]
- if invtz:
- if name == "BEGIN":
- if value in ("STANDARD", "DAYLIGHT"):
- # Process component
- pass
- else:
- raise ValueError("unknown component: "+value)
- comptype = value
- founddtstart = False
- tzoffsetfrom = None
- tzoffsetto = None
- rrulelines = []
- tzname = None
- elif name == "END":
- if value == "VTIMEZONE":
- if comptype:
- raise ValueError("component not closed: "+comptype)
- if not tzid:
- raise ValueError("mandatory TZID not found")
- if not comps:
- raise ValueError(
- "at least one component is needed")
- # Process vtimezone
- self._vtz[tzid] = _tzicalvtz(tzid, comps)
- invtz = False
- elif value == comptype:
- if not founddtstart:
- raise ValueError("mandatory DTSTART not found")
- if tzoffsetfrom is None:
- raise ValueError(
- "mandatory TZOFFSETFROM not found")
- if tzoffsetto is None:
- raise ValueError(
- "mandatory TZOFFSETFROM not found")
- # Process component
- rr = None
- if rrulelines:
- rr = rrule.rrulestr("\n".join(rrulelines),
- compatible=True,
- ignoretz=True,
- cache=True)
- comp = _tzicalvtzcomp(tzoffsetfrom, tzoffsetto,
- (comptype == "DAYLIGHT"),
- tzname, rr)
- comps.append(comp)
- comptype = None
- else:
- raise ValueError("invalid component end: "+value)
- elif comptype:
- if name == "DTSTART":
- rrulelines.append(line)
- founddtstart = True
- elif name in ("RRULE", "RDATE", "EXRULE", "EXDATE"):
- rrulelines.append(line)
- elif name == "TZOFFSETFROM":
- if parms:
- raise ValueError(
- "unsupported %s parm: %s " % (name, parms[0]))
- tzoffsetfrom = self._parse_offset(value)
- elif name == "TZOFFSETTO":
- if parms:
- raise ValueError(
- "unsupported TZOFFSETTO parm: "+parms[0])
- tzoffsetto = self._parse_offset(value)
- elif name == "TZNAME":
- if parms:
- raise ValueError(
- "unsupported TZNAME parm: "+parms[0])
- tzname = value
- elif name == "COMMENT":
- pass
- else:
- raise ValueError("unsupported property: "+name)
- else:
- if name == "TZID":
- if parms:
- raise ValueError(
- "unsupported TZID parm: "+parms[0])
- tzid = value
- elif name in ("TZURL", "LAST-MODIFIED", "COMMENT"):
- pass
- else:
- raise ValueError("unsupported property: "+name)
- elif name == "BEGIN" and value == "VTIMEZONE":
- tzid = None
- comps = []
- invtz = True
-
- def __repr__(self):
- return "%s(%s)" % (self.__class__.__name__, repr(self._s))
-
-
-if sys.platform != "win32":
- TZFILES = ["/etc/localtime", "localtime"]
- TZPATHS = ["/usr/share/zoneinfo",
- "/usr/lib/zoneinfo",
- "/usr/share/lib/zoneinfo",
- "/etc/zoneinfo"]
-else:
- TZFILES = []
- TZPATHS = []
-
-
-def gettz(name=None):
- tz = None
- if not name:
- try:
- name = os.environ["TZ"]
- except KeyError:
- pass
- if name is None or name == ":":
- for filepath in TZFILES:
- if not os.path.isabs(filepath):
- filename = filepath
- for path in TZPATHS:
- filepath = os.path.join(path, filename)
- if os.path.isfile(filepath):
- break
- else:
- continue
- if os.path.isfile(filepath):
- try:
- tz = tzfile(filepath)
- break
- except (IOError, OSError, ValueError):
- pass
- else:
- tz = tzlocal()
- else:
- if name.startswith(":"):
- name = name[:-1]
- if os.path.isabs(name):
- if os.path.isfile(name):
- tz = tzfile(name)
- else:
- tz = None
- else:
- for path in TZPATHS:
- filepath = os.path.join(path, name)
- if not os.path.isfile(filepath):
- filepath = filepath.replace(' ', '_')
- if not os.path.isfile(filepath):
- continue
- try:
- tz = tzfile(filepath)
- break
- except (IOError, OSError, ValueError):
- pass
- else:
- tz = None
- if tzwin is not None:
- try:
- tz = tzwin(name)
- except WindowsError:
- tz = None
-
- if not tz:
- from dateutil.zoneinfo import get_zonefile_instance
- tz = get_zonefile_instance().get(name)
-
- if not tz:
- for c in name:
- # name must have at least one offset to be a tzstr
- if c in "0123456789":
- try:
- tz = tzstr(name)
- except ValueError:
- pass
- break
- else:
- if name in ("GMT", "UTC"):
- tz = tzutc()
- elif name in time.tzname:
- tz = tzlocal()
- return tz
-
-
-def datetime_exists(dt, tz=None):
- """
- Given a datetime and a time zone, determine whether or not a given datetime
- would fall in a gap.
-
- :param dt:
- A :class:`datetime.datetime` (whose time zone will be ignored if ``tz``
- is provided.)
-
- :param tz:
- A :class:`datetime.tzinfo` with support for the ``fold`` attribute. If
- ``None`` or not provided, the datetime's own time zone will be used.
-
- :return:
- Returns a boolean value whether or not the "wall time" exists in ``tz``.
- """
- if tz is None:
- if dt.tzinfo is None:
- raise ValueError('Datetime is naive and no time zone provided.')
- tz = dt.tzinfo
-
- dt = dt.replace(tzinfo=None)
-
- # This is essentially a test of whether or not the datetime can survive
- # a round trip to UTC.
- dt_rt = dt.replace(tzinfo=tz).astimezone(tzutc()).astimezone(tz)
- dt_rt = dt_rt.replace(tzinfo=None)
-
- return dt == dt_rt
-
-
-def datetime_ambiguous(dt, tz=None):
- """
- Given a datetime and a time zone, determine whether or not a given datetime
- is ambiguous (i.e if there are two times differentiated only by their DST
- status).
-
- :param dt:
- A :class:`datetime.datetime` (whose time zone will be ignored if ``tz``
- is provided.)
-
- :param tz:
- A :class:`datetime.tzinfo` with support for the ``fold`` attribute. If
- ``None`` or not provided, the datetime's own time zone will be used.
-
- :return:
- Returns a boolean value whether or not the "wall time" is ambiguous in
- ``tz``.
-
- .. versionadded:: 2.6.0
- """
- if tz is None:
- if dt.tzinfo is None:
- raise ValueError('Datetime is naive and no time zone provided.')
-
- tz = dt.tzinfo
-
- # If a time zone defines its own "is_ambiguous" function, we'll use that.
- is_ambiguous_fn = getattr(tz, 'is_ambiguous', None)
- if is_ambiguous_fn is not None:
- try:
- return tz.is_ambiguous(dt)
- except:
- pass
-
- # If it doesn't come out and tell us it's ambiguous, we'll just check if
- # the fold attribute has any effect on this particular date and time.
- dt = dt.replace(tzinfo=tz)
- wall_0 = enfold(dt, fold=0)
- wall_1 = enfold(dt, fold=1)
-
- same_offset = wall_0.utcoffset() == wall_1.utcoffset()
- same_dst = wall_0.dst() == wall_1.dst()
-
- return not (same_offset and same_dst)
-
-
-def _datetime_to_timestamp(dt):
- """
- Convert a :class:`datetime.datetime` object to an epoch timestamp in seconds
- since January 1, 1970, ignoring the time zone.
- """
- return _total_seconds((dt.replace(tzinfo=None) - EPOCH))
-
-
-class _ContextWrapper(object):
- """
- Class for wrapping contexts so that they are passed through in a
- with statement.
- """
- def __init__(self, context):
- self.context = context
-
- def __enter__(self):
- return self.context
-
- def __exit__(*args, **kwargs):
- pass
-
-# vim:ts=4:sw=4:et
diff --git a/venv/Lib/site-packages/dateutil/tz/win.py b/venv/Lib/site-packages/dateutil/tz/win.py
deleted file mode 100644
index 36a1c26..0000000
--- a/venv/Lib/site-packages/dateutil/tz/win.py
+++ /dev/null
@@ -1,332 +0,0 @@
-# This code was originally contributed by Jeffrey Harris.
-import datetime
-import struct
-
-from six.moves import winreg
-from six import text_type
-
-try:
- import ctypes
- from ctypes import wintypes
-except ValueError:
- # ValueError is raised on non-Windows systems for some horrible reason.
- raise ImportError("Running tzwin on non-Windows system")
-
-from ._common import tzrangebase
-
-__all__ = ["tzwin", "tzwinlocal", "tzres"]
-
-ONEWEEK = datetime.timedelta(7)
-
-TZKEYNAMENT = r"SOFTWARE\Microsoft\Windows NT\CurrentVersion\Time Zones"
-TZKEYNAME9X = r"SOFTWARE\Microsoft\Windows\CurrentVersion\Time Zones"
-TZLOCALKEYNAME = r"SYSTEM\CurrentControlSet\Control\TimeZoneInformation"
-
-
-def _settzkeyname():
- handle = winreg.ConnectRegistry(None, winreg.HKEY_LOCAL_MACHINE)
- try:
- winreg.OpenKey(handle, TZKEYNAMENT).Close()
- TZKEYNAME = TZKEYNAMENT
- except WindowsError:
- TZKEYNAME = TZKEYNAME9X
- handle.Close()
- return TZKEYNAME
-
-
-TZKEYNAME = _settzkeyname()
-
-
-class tzres(object):
- """
- Class for accessing `tzres.dll`, which contains timezone name related
- resources.
-
- .. versionadded:: 2.5.0
- """
- p_wchar = ctypes.POINTER(wintypes.WCHAR) # Pointer to a wide char
-
- def __init__(self, tzres_loc='tzres.dll'):
- # Load the user32 DLL so we can load strings from tzres
- user32 = ctypes.WinDLL('user32')
-
- # Specify the LoadStringW function
- user32.LoadStringW.argtypes = (wintypes.HINSTANCE,
- wintypes.UINT,
- wintypes.LPWSTR,
- ctypes.c_int)
-
- self.LoadStringW = user32.LoadStringW
- self._tzres = ctypes.WinDLL(tzres_loc)
- self.tzres_loc = tzres_loc
-
- def load_name(self, offset):
- """
- Load a timezone name from a DLL offset (integer).
-
- >>> from dateutil.tzwin import tzres
- >>> tzr = tzres()
- >>> print(tzr.load_name(112))
- 'Eastern Standard Time'
-
- :param offset:
- A positive integer value referring to a string from the tzres dll.
-
- ..note:
- Offsets found in the registry are generally of the form
- `@tzres.dll,-114`. The offset in this case if 114, not -114.
-
- """
- resource = self.p_wchar()
- lpBuffer = ctypes.cast(ctypes.byref(resource), wintypes.LPWSTR)
- nchar = self.LoadStringW(self._tzres._handle, offset, lpBuffer, 0)
- return resource[:nchar]
-
- def name_from_string(self, tzname_str):
- """
- Parse strings as returned from the Windows registry into the time zone
- name as defined in the registry.
-
- >>> from dateutil.tzwin import tzres
- >>> tzr = tzres()
- >>> print(tzr.name_from_string('@tzres.dll,-251'))
- 'Dateline Daylight Time'
- >>> print(tzr.name_from_string('Eastern Standard Time'))
- 'Eastern Standard Time'
-
- :param tzname_str:
- A timezone name string as returned from a Windows registry key.
-
- :return:
- Returns the localized timezone string from tzres.dll if the string
- is of the form `@tzres.dll,-offset`, else returns the input string.
- """
- if not tzname_str.startswith('@'):
- return tzname_str
-
- name_splt = tzname_str.split(',-')
- try:
- offset = int(name_splt[1])
- except:
- raise ValueError("Malformed timezone string.")
-
- return self.load_name(offset)
-
-
-class tzwinbase(tzrangebase):
- """tzinfo class based on win32's timezones available in the registry."""
- def __init__(self):
- raise NotImplementedError('tzwinbase is an abstract base class')
-
- def __eq__(self, other):
- # Compare on all relevant dimensions, including name.
- if not isinstance(other, tzwinbase):
- return NotImplemented
-
- return (self._std_offset == other._std_offset and
- self._dst_offset == other._dst_offset and
- self._stddayofweek == other._stddayofweek and
- self._dstdayofweek == other._dstdayofweek and
- self._stdweeknumber == other._stdweeknumber and
- self._dstweeknumber == other._dstweeknumber and
- self._stdhour == other._stdhour and
- self._dsthour == other._dsthour and
- self._stdminute == other._stdminute and
- self._dstminute == other._dstminute and
- self._std_abbr == other._std_abbr and
- self._dst_abbr == other._dst_abbr)
-
- @staticmethod
- def list():
- """Return a list of all time zones known to the system."""
- with winreg.ConnectRegistry(None, winreg.HKEY_LOCAL_MACHINE) as handle:
- with winreg.OpenKey(handle, TZKEYNAME) as tzkey:
- result = [winreg.EnumKey(tzkey, i)
- for i in range(winreg.QueryInfoKey(tzkey)[0])]
- return result
-
- def display(self):
- return self._display
-
- def transitions(self, year):
- """
- For a given year, get the DST on and off transition times, expressed
- always on the standard time side. For zones with no transitions, this
- function returns ``None``.
-
- :param year:
- The year whose transitions you would like to query.
-
- :return:
- Returns a :class:`tuple` of :class:`datetime.datetime` objects,
- ``(dston, dstoff)`` for zones with an annual DST transition, or
- ``None`` for fixed offset zones.
- """
-
- if not self.hasdst:
- return None
-
- dston = picknthweekday(year, self._dstmonth, self._dstdayofweek,
- self._dsthour, self._dstminute,
- self._dstweeknumber)
-
- dstoff = picknthweekday(year, self._stdmonth, self._stddayofweek,
- self._stdhour, self._stdminute,
- self._stdweeknumber)
-
- # Ambiguous dates default to the STD side
- dstoff -= self._dst_base_offset
-
- return dston, dstoff
-
- def _get_hasdst(self):
- return self._dstmonth != 0
-
- @property
- def _dst_base_offset(self):
- return self._dst_base_offset_
-
-
-class tzwin(tzwinbase):
-
- def __init__(self, name):
- self._name = name
-
- # multiple contexts only possible in 2.7 and 3.1, we still support 2.6
- with winreg.ConnectRegistry(None, winreg.HKEY_LOCAL_MACHINE) as handle:
- tzkeyname = text_type("{kn}\\{name}").format(kn=TZKEYNAME, name=name)
- with winreg.OpenKey(handle, tzkeyname) as tzkey:
- keydict = valuestodict(tzkey)
-
- self._std_abbr = keydict["Std"]
- self._dst_abbr = keydict["Dlt"]
-
- self._display = keydict["Display"]
-
- # See http://ww_winreg.jsiinc.com/SUBA/tip0300/rh0398.htm
- tup = struct.unpack("=3l16h", keydict["TZI"])
- stdoffset = -tup[0]-tup[1] # Bias + StandardBias * -1
- dstoffset = stdoffset-tup[2] # + DaylightBias * -1
- self._std_offset = datetime.timedelta(minutes=stdoffset)
- self._dst_offset = datetime.timedelta(minutes=dstoffset)
-
- # for the meaning see the win32 TIME_ZONE_INFORMATION structure docs
- # http://msdn.microsoft.com/en-us/library/windows/desktop/ms725481(v=vs.85).aspx
- (self._stdmonth,
- self._stddayofweek, # Sunday = 0
- self._stdweeknumber, # Last = 5
- self._stdhour,
- self._stdminute) = tup[4:9]
-
- (self._dstmonth,
- self._dstdayofweek, # Sunday = 0
- self._dstweeknumber, # Last = 5
- self._dsthour,
- self._dstminute) = tup[12:17]
-
- self._dst_base_offset_ = self._dst_offset - self._std_offset
- self.hasdst = self._get_hasdst()
-
- def __repr__(self):
- return "tzwin(%s)" % repr(self._name)
-
- def __reduce__(self):
- return (self.__class__, (self._name,))
-
-
-class tzwinlocal(tzwinbase):
- def __init__(self):
- with winreg.ConnectRegistry(None, winreg.HKEY_LOCAL_MACHINE) as handle:
- with winreg.OpenKey(handle, TZLOCALKEYNAME) as tzlocalkey:
- keydict = valuestodict(tzlocalkey)
-
- self._std_abbr = keydict["StandardName"]
- self._dst_abbr = keydict["DaylightName"]
-
- try:
- tzkeyname = text_type('{kn}\\{sn}').format(kn=TZKEYNAME,
- sn=self._std_abbr)
- with winreg.OpenKey(handle, tzkeyname) as tzkey:
- _keydict = valuestodict(tzkey)
- self._display = _keydict["Display"]
- except OSError:
- self._display = None
-
- stdoffset = -keydict["Bias"]-keydict["StandardBias"]
- dstoffset = stdoffset-keydict["DaylightBias"]
-
- self._std_offset = datetime.timedelta(minutes=stdoffset)
- self._dst_offset = datetime.timedelta(minutes=dstoffset)
-
- # For reasons unclear, in this particular key, the day of week has been
- # moved to the END of the SYSTEMTIME structure.
- tup = struct.unpack("=8h", keydict["StandardStart"])
-
- (self._stdmonth,
- self._stdweeknumber, # Last = 5
- self._stdhour,
- self._stdminute) = tup[1:5]
-
- self._stddayofweek = tup[7]
-
- tup = struct.unpack("=8h", keydict["DaylightStart"])
-
- (self._dstmonth,
- self._dstweeknumber, # Last = 5
- self._dsthour,
- self._dstminute) = tup[1:5]
-
- self._dstdayofweek = tup[7]
-
- self._dst_base_offset_ = self._dst_offset - self._std_offset
- self.hasdst = self._get_hasdst()
-
- def __repr__(self):
- return "tzwinlocal()"
-
- def __str__(self):
- # str will return the standard name, not the daylight name.
- return "tzwinlocal(%s)" % repr(self._std_abbr)
-
- def __reduce__(self):
- return (self.__class__, ())
-
-
-def picknthweekday(year, month, dayofweek, hour, minute, whichweek):
- """ dayofweek == 0 means Sunday, whichweek 5 means last instance """
- first = datetime.datetime(year, month, 1, hour, minute)
-
- # This will work if dayofweek is ISO weekday (1-7) or Microsoft-style (0-6),
- # Because 7 % 7 = 0
- weekdayone = first.replace(day=((dayofweek - first.isoweekday()) % 7) + 1)
- wd = weekdayone + ((whichweek - 1) * ONEWEEK)
- if (wd.month != month):
- wd -= ONEWEEK
-
- return wd
-
-
-def valuestodict(key):
- """Convert a registry key's values to a dictionary."""
- dout = {}
- size = winreg.QueryInfoKey(key)[1]
- tz_res = None
-
- for i in range(size):
- key_name, value, dtype = winreg.EnumValue(key, i)
- if dtype == winreg.REG_DWORD or dtype == winreg.REG_DWORD_LITTLE_ENDIAN:
- # If it's a DWORD (32-bit integer), it's stored as unsigned - convert
- # that to a proper signed integer
- if value & (1 << 31):
- value = value - (1 << 32)
- elif dtype == winreg.REG_SZ:
- # If it's a reference to the tzres DLL, load the actual string
- if value.startswith('@tzres'):
- tz_res = tz_res or tzres()
- value = tz_res.name_from_string(value)
-
- value = value.rstrip('\x00') # Remove trailing nulls
-
- dout[key_name] = value
-
- return dout
diff --git a/venv/Lib/site-packages/dateutil/tzwin.py b/venv/Lib/site-packages/dateutil/tzwin.py
deleted file mode 100644
index cebc673..0000000
--- a/venv/Lib/site-packages/dateutil/tzwin.py
+++ /dev/null
@@ -1,2 +0,0 @@
-# tzwin has moved to dateutil.tz.win
-from .tz.win import *
diff --git a/venv/Lib/site-packages/dateutil/zoneinfo/__init__.py b/venv/Lib/site-packages/dateutil/zoneinfo/__init__.py
deleted file mode 100644
index a2ed4f9..0000000
--- a/venv/Lib/site-packages/dateutil/zoneinfo/__init__.py
+++ /dev/null
@@ -1,183 +0,0 @@
-# -*- coding: utf-8 -*-
-import warnings
-import json
-
-from tarfile import TarFile
-from pkgutil import get_data
-from io import BytesIO
-from contextlib import closing
-
-from dateutil.tz import tzfile
-
-__all__ = ["get_zonefile_instance", "gettz", "gettz_db_metadata", "rebuild"]
-
-ZONEFILENAME = "dateutil-zoneinfo.tar.gz"
-METADATA_FN = 'METADATA'
-
-# python2.6 compatability. Note that TarFile.__exit__ != TarFile.close, but
-# it's close enough for python2.6
-tar_open = TarFile.open
-if not hasattr(TarFile, '__exit__'):
- def tar_open(*args, **kwargs):
- return closing(TarFile.open(*args, **kwargs))
-
-
-class tzfile(tzfile):
- def __reduce__(self):
- return (gettz, (self._filename,))
-
-
-def getzoneinfofile_stream():
- try:
- return BytesIO(get_data(__name__, ZONEFILENAME))
- except IOError as e: # TODO switch to FileNotFoundError?
- warnings.warn("I/O error({0}): {1}".format(e.errno, e.strerror))
- return None
-
-
-class ZoneInfoFile(object):
- def __init__(self, zonefile_stream=None):
- if zonefile_stream is not None:
- with tar_open(fileobj=zonefile_stream, mode='r') as tf:
- # dict comprehension does not work on python2.6
- # TODO: get back to the nicer syntax when we ditch python2.6
- # self.zones = {zf.name: tzfile(tf.extractfile(zf),
- # filename = zf.name)
- # for zf in tf.getmembers() if zf.isfile()}
- self.zones = dict((zf.name, tzfile(tf.extractfile(zf),
- filename=zf.name))
- for zf in tf.getmembers()
- if zf.isfile() and zf.name != METADATA_FN)
- # deal with links: They'll point to their parent object. Less
- # waste of memory
- # links = {zl.name: self.zones[zl.linkname]
- # for zl in tf.getmembers() if zl.islnk() or zl.issym()}
- links = dict((zl.name, self.zones[zl.linkname])
- for zl in tf.getmembers() if
- zl.islnk() or zl.issym())
- self.zones.update(links)
- try:
- metadata_json = tf.extractfile(tf.getmember(METADATA_FN))
- metadata_str = metadata_json.read().decode('UTF-8')
- self.metadata = json.loads(metadata_str)
- except KeyError:
- # no metadata in tar file
- self.metadata = None
- else:
- self.zones = dict()
- self.metadata = None
-
- def get(self, name, default=None):
- """
- Wrapper for :func:`ZoneInfoFile.zones.get`. This is a convenience method
- for retrieving zones from the zone dictionary.
-
- :param name:
- The name of the zone to retrieve. (Generally IANA zone names)
-
- :param default:
- The value to return in the event of a missing key.
-
- .. versionadded:: 2.6.0
-
- """
- return self.zones.get(name, default)
-
-
-# The current API has gettz as a module function, although in fact it taps into
-# a stateful class. So as a workaround for now, without changing the API, we
-# will create a new "global" class instance the first time a user requests a
-# timezone. Ugly, but adheres to the api.
-#
-# TODO: Remove after deprecation period.
-_CLASS_ZONE_INSTANCE = list()
-
-
-def get_zonefile_instance(new_instance=False):
- """
- This is a convenience function which provides a :class:`ZoneInfoFile`
- instance using the data provided by the ``dateutil`` package. By default, it
- caches a single instance of the ZoneInfoFile object and returns that.
-
- :param new_instance:
- If ``True``, a new instance of :class:`ZoneInfoFile` is instantiated and
- used as the cached instance for the next call. Otherwise, new instances
- are created only as necessary.
-
- :return:
- Returns a :class:`ZoneInfoFile` object.
-
- .. versionadded:: 2.6
- """
- if new_instance:
- zif = None
- else:
- zif = getattr(get_zonefile_instance, '_cached_instance', None)
-
- if zif is None:
- zif = ZoneInfoFile(getzoneinfofile_stream())
-
- get_zonefile_instance._cached_instance = zif
-
- return zif
-
-
-def gettz(name):
- """
- This retrieves a time zone from the local zoneinfo tarball that is packaged
- with dateutil.
-
- :param name:
- An IANA-style time zone name, as found in the zoneinfo file.
-
- :return:
- Returns a :class:`dateutil.tz.tzfile` time zone object.
-
- .. warning::
- It is generally inadvisable to use this function, and it is only
- provided for API compatibility with earlier versions. This is *not*
- equivalent to ``dateutil.tz.gettz()``, which selects an appropriate
- time zone based on the inputs, favoring system zoneinfo. This is ONLY
- for accessing the dateutil-specific zoneinfo (which may be out of
- date compared to the system zoneinfo).
-
- .. deprecated:: 2.6
- If you need to use a specific zoneinfofile over the system zoneinfo,
- instantiate a :class:`dateutil.zoneinfo.ZoneInfoFile` object and call
- :func:`dateutil.zoneinfo.ZoneInfoFile.get(name)` instead.
-
- Use :func:`get_zonefile_instance` to retrieve an instance of the
- dateutil-provided zoneinfo.
- """
- warnings.warn("zoneinfo.gettz() will be removed in future versions, "
- "to use the dateutil-provided zoneinfo files, instantiate a "
- "ZoneInfoFile object and use ZoneInfoFile.zones.get() "
- "instead. See the documentation for details.",
- DeprecationWarning)
-
- if len(_CLASS_ZONE_INSTANCE) == 0:
- _CLASS_ZONE_INSTANCE.append(ZoneInfoFile(getzoneinfofile_stream()))
- return _CLASS_ZONE_INSTANCE[0].zones.get(name)
-
-
-def gettz_db_metadata():
- """ Get the zonefile metadata
-
- See `zonefile_metadata`_
-
- :returns:
- A dictionary with the database metadata
-
- .. deprecated:: 2.6
- See deprecation warning in :func:`zoneinfo.gettz`. To get metadata,
- query the attribute ``zoneinfo.ZoneInfoFile.metadata``.
- """
- warnings.warn("zoneinfo.gettz_db_metadata() will be removed in future "
- "versions, to use the dateutil-provided zoneinfo files, "
- "ZoneInfoFile object and query the 'metadata' attribute "
- "instead. See the documentation for details.",
- DeprecationWarning)
-
- if len(_CLASS_ZONE_INSTANCE) == 0:
- _CLASS_ZONE_INSTANCE.append(ZoneInfoFile(getzoneinfofile_stream()))
- return _CLASS_ZONE_INSTANCE[0].metadata
diff --git a/venv/Lib/site-packages/dateutil/zoneinfo/dateutil-zoneinfo.tar.gz b/venv/Lib/site-packages/dateutil/zoneinfo/dateutil-zoneinfo.tar.gz
deleted file mode 100644
index 613c0ff..0000000
Binary files a/venv/Lib/site-packages/dateutil/zoneinfo/dateutil-zoneinfo.tar.gz and /dev/null differ
diff --git a/venv/Lib/site-packages/dateutil/zoneinfo/rebuild.py b/venv/Lib/site-packages/dateutil/zoneinfo/rebuild.py
deleted file mode 100644
index 9d53bb8..0000000
--- a/venv/Lib/site-packages/dateutil/zoneinfo/rebuild.py
+++ /dev/null
@@ -1,52 +0,0 @@
-import logging
-import os
-import tempfile
-import shutil
-import json
-from subprocess import check_call
-
-from dateutil.zoneinfo import tar_open, METADATA_FN, ZONEFILENAME
-
-
-def rebuild(filename, tag=None, format="gz", zonegroups=[], metadata=None):
- """Rebuild the internal timezone info in dateutil/zoneinfo/zoneinfo*tar*
-
- filename is the timezone tarball from ftp.iana.org/tz.
-
- """
- tmpdir = tempfile.mkdtemp()
- zonedir = os.path.join(tmpdir, "zoneinfo")
- moduledir = os.path.dirname(__file__)
- try:
- with tar_open(filename) as tf:
- for name in zonegroups:
- tf.extract(name, tmpdir)
- filepaths = [os.path.join(tmpdir, n) for n in zonegroups]
- try:
- check_call(["zic", "-d", zonedir] + filepaths)
- except OSError as e:
- _print_on_nosuchfile(e)
- raise
- # write metadata file
- with open(os.path.join(zonedir, METADATA_FN), 'w') as f:
- json.dump(metadata, f, indent=4, sort_keys=True)
- target = os.path.join(moduledir, ZONEFILENAME)
- with tar_open(target, "w:%s" % format) as tf:
- for entry in os.listdir(zonedir):
- entrypath = os.path.join(zonedir, entry)
- tf.add(entrypath, entry)
- finally:
- shutil.rmtree(tmpdir)
-
-
-def _print_on_nosuchfile(e):
- """Print helpful troubleshooting message
-
- e is an exception raised by subprocess.check_call()
-
- """
- if e.errno == 2:
- logging.error(
- "Could not find zic. Perhaps you need to install "
- "libc-bin or some other package that provides it, "
- "or it's not in your PATH?")
diff --git a/venv/Lib/site-packages/easy_install.py b/venv/Lib/site-packages/easy_install.py
deleted file mode 100644
index d87e984..0000000
--- a/venv/Lib/site-packages/easy_install.py
+++ /dev/null
@@ -1,5 +0,0 @@
-"""Run the EasyInstall command"""
-
-if __name__ == '__main__':
- from setuptools.command.easy_install import main
- main()
diff --git a/venv/Lib/site-packages/editor.py b/venv/Lib/site-packages/editor.py
deleted file mode 100644
index 54ee697..0000000
--- a/venv/Lib/site-packages/editor.py
+++ /dev/null
@@ -1,129 +0,0 @@
-#!/usr/bin/env python
-"""Tools for invoking editors programmatically."""
-
-from __future__ import print_function
-
-import sys
-import locale
-import os.path
-import subprocess
-import tempfile
-from distutils.spawn import find_executable
-
-
-__all__ = [
- 'edit',
- 'get_editor',
- 'EditorError',
-]
-
-__version__ = '1.0.3'
-
-
-class EditorError(RuntimeError):
- pass
-
-
-def get_default_editors():
- # TODO: Make platform-specific
- return [
- 'editor',
- 'vim',
- 'emacs',
- 'nano',
- ]
-
-
-def get_editor_args(editor):
- if editor in ['vim', 'gvim', 'vim.basic', 'vim.tiny']:
- return ['-f', '-o']
-
- elif editor == 'emacs':
- return ['-nw']
-
- elif editor == 'gedit':
- return ['-w', '--new-window']
-
- elif editor == 'nano':
- return ['-R']
-
- else:
- return []
-
-
-def get_editor():
- # Get the editor from the environment. Prefer VISUAL to EDITOR
- editor = os.environ.get('VISUAL') or os.environ.get('EDITOR')
- if editor:
- return editor
-
- # None found in the environment. Fallback to platform-specific defaults.
- for ed in get_default_editors():
- path = find_executable(ed)
- if path is not None:
- return path
-
- raise EditorError("Unable to find a viable editor on this system."
- "Please consider setting your $EDITOR variable")
-
-
-def get_tty_filename():
- if sys.platform == 'win32':
- return 'CON:'
- return '/dev/tty'
-
-
-def edit(filename=None, contents=None, use_tty=None):
- editor = get_editor()
- args = [editor] + get_editor_args(os.path.basename(os.path.realpath(editor)))
-
- if use_tty is None:
- use_tty = sys.stdin.isatty() and not sys.stdout.isatty()
-
- if filename is None:
- tmp = tempfile.NamedTemporaryFile()
- filename = tmp.name
-
- if contents is not None:
- with open(filename, mode='wb') as f:
- f.write(contents)
-
- args += [filename]
-
- stdout = None
- if use_tty:
- stdout = open(get_tty_filename(), 'wb')
-
- proc = subprocess.Popen(args, close_fds=True, stdout=stdout)
- proc.communicate()
-
- with open(filename, mode='rb') as f:
- return f.read()
-
-
-def _get_editor(ns):
- print(get_editor())
-
-
-def _edit(ns):
- contents = ns.contents
- if contents is not None:
- contents = contents.encode(locale.getpreferredencoding())
- print(edit(filename=ns.path, contents=contents))
-
-
-if __name__ == '__main__':
- import argparse
- ap = argparse.ArgumentParser()
- sp = ap.add_subparsers()
-
- cmd = sp.add_parser('get-editor')
- cmd.set_defaults(cmd=_get_editor)
-
- cmd = sp.add_parser('edit')
- cmd.set_defaults(cmd=_edit)
- cmd.add_argument('path', type=str, nargs='?')
- cmd.add_argument('--contents', type=str)
-
- ns = ap.parse_args()
- ns.cmd(ns)
diff --git a/venv/Lib/site-packages/flask/__init__.py b/venv/Lib/site-packages/flask/__init__.py
deleted file mode 100644
index a9a873f..0000000
--- a/venv/Lib/site-packages/flask/__init__.py
+++ /dev/null
@@ -1,49 +0,0 @@
-# -*- coding: utf-8 -*-
-"""
- flask
- ~~~~~
-
- A microframework based on Werkzeug. It's extensively documented
- and follows best practice patterns.
-
- :copyright: (c) 2015 by Armin Ronacher.
- :license: BSD, see LICENSE for more details.
-"""
-
-__version__ = '0.12.2'
-
-# utilities we import from Werkzeug and Jinja2 that are unused
-# in the module but are exported as public interface.
-from werkzeug.exceptions import abort
-from werkzeug.utils import redirect
-from jinja2 import Markup, escape
-
-from .app import Flask, Request, Response
-from .config import Config
-from .helpers import url_for, flash, send_file, send_from_directory, \
- get_flashed_messages, get_template_attribute, make_response, safe_join, \
- stream_with_context
-from .globals import current_app, g, request, session, _request_ctx_stack, \
- _app_ctx_stack
-from .ctx import has_request_context, has_app_context, \
- after_this_request, copy_current_request_context
-from .blueprints import Blueprint
-from .templating import render_template, render_template_string
-
-# the signals
-from .signals import signals_available, template_rendered, request_started, \
- request_finished, got_request_exception, request_tearing_down, \
- appcontext_tearing_down, appcontext_pushed, \
- appcontext_popped, message_flashed, before_render_template
-
-# We're not exposing the actual json module but a convenient wrapper around
-# it.
-from . import json
-
-# This was the only thing that Flask used to export at one point and it had
-# a more generic name.
-jsonify = json.jsonify
-
-# backwards compat, goes away in 1.0
-from .sessions import SecureCookieSession as Session
-json_available = True
diff --git a/venv/Lib/site-packages/flask/__main__.py b/venv/Lib/site-packages/flask/__main__.py
deleted file mode 100644
index cbefccd..0000000
--- a/venv/Lib/site-packages/flask/__main__.py
+++ /dev/null
@@ -1,15 +0,0 @@
-# -*- coding: utf-8 -*-
-"""
- flask.__main__
- ~~~~~~~~~~~~~~
-
- Alias for flask.run for the command line.
-
- :copyright: (c) 2015 by Armin Ronacher.
- :license: BSD, see LICENSE for more details.
-"""
-
-
-if __name__ == '__main__':
- from .cli import main
- main(as_module=True)
diff --git a/venv/Lib/site-packages/flask/_compat.py b/venv/Lib/site-packages/flask/_compat.py
deleted file mode 100644
index 071628f..0000000
--- a/venv/Lib/site-packages/flask/_compat.py
+++ /dev/null
@@ -1,96 +0,0 @@
-# -*- coding: utf-8 -*-
-"""
- flask._compat
- ~~~~~~~~~~~~~
-
- Some py2/py3 compatibility support based on a stripped down
- version of six so we don't have to depend on a specific version
- of it.
-
- :copyright: (c) 2015 by Armin Ronacher.
- :license: BSD, see LICENSE for more details.
-"""
-import sys
-
-PY2 = sys.version_info[0] == 2
-_identity = lambda x: x
-
-
-if not PY2:
- text_type = str
- string_types = (str,)
- integer_types = (int,)
-
- iterkeys = lambda d: iter(d.keys())
- itervalues = lambda d: iter(d.values())
- iteritems = lambda d: iter(d.items())
-
- from io import StringIO
-
- def reraise(tp, value, tb=None):
- if value.__traceback__ is not tb:
- raise value.with_traceback(tb)
- raise value
-
- implements_to_string = _identity
-
-else:
- text_type = unicode
- string_types = (str, unicode)
- integer_types = (int, long)
-
- iterkeys = lambda d: d.iterkeys()
- itervalues = lambda d: d.itervalues()
- iteritems = lambda d: d.iteritems()
-
- from cStringIO import StringIO
-
- exec('def reraise(tp, value, tb=None):\n raise tp, value, tb')
-
- def implements_to_string(cls):
- cls.__unicode__ = cls.__str__
- cls.__str__ = lambda x: x.__unicode__().encode('utf-8')
- return cls
-
-
-def with_metaclass(meta, *bases):
- """Create a base class with a metaclass."""
- # This requires a bit of explanation: the basic idea is to make a
- # dummy metaclass for one level of class instantiation that replaces
- # itself with the actual metaclass.
- class metaclass(type):
- def __new__(cls, name, this_bases, d):
- return meta(name, bases, d)
- return type.__new__(metaclass, 'temporary_class', (), {})
-
-
-# Certain versions of pypy have a bug where clearing the exception stack
-# breaks the __exit__ function in a very peculiar way. The second level of
-# exception blocks is necessary because pypy seems to forget to check if an
-# exception happened until the next bytecode instruction?
-#
-# Relevant PyPy bugfix commit:
-# https://bitbucket.org/pypy/pypy/commits/77ecf91c635a287e88e60d8ddb0f4e9df4003301
-# According to ronan on #pypy IRC, it is released in PyPy2 2.3 and later
-# versions.
-#
-# Ubuntu 14.04 has PyPy 2.2.1, which does exhibit this bug.
-BROKEN_PYPY_CTXMGR_EXIT = False
-if hasattr(sys, 'pypy_version_info'):
- class _Mgr(object):
- def __enter__(self):
- return self
- def __exit__(self, *args):
- if hasattr(sys, 'exc_clear'):
- # Python 3 (PyPy3) doesn't have exc_clear
- sys.exc_clear()
- try:
- try:
- with _Mgr():
- raise AssertionError()
- except:
- raise
- except TypeError:
- BROKEN_PYPY_CTXMGR_EXIT = True
- except AssertionError:
- pass
diff --git a/venv/Lib/site-packages/flask/app.py b/venv/Lib/site-packages/flask/app.py
deleted file mode 100644
index 1404e17..0000000
--- a/venv/Lib/site-packages/flask/app.py
+++ /dev/null
@@ -1,2003 +0,0 @@
-# -*- coding: utf-8 -*-
-"""
- flask.app
- ~~~~~~~~~
-
- This module implements the central WSGI application object.
-
- :copyright: (c) 2015 by Armin Ronacher.
- :license: BSD, see LICENSE for more details.
-"""
-import os
-import sys
-from threading import Lock
-from datetime import timedelta
-from itertools import chain
-from functools import update_wrapper
-
-from werkzeug.datastructures import ImmutableDict
-from werkzeug.routing import Map, Rule, RequestRedirect, BuildError
-from werkzeug.exceptions import HTTPException, InternalServerError, \
- MethodNotAllowed, BadRequest, default_exceptions
-
-from .helpers import _PackageBoundObject, url_for, get_flashed_messages, \
- locked_cached_property, _endpoint_from_view_func, find_package, \
- get_debug_flag
-from . import json, cli
-from .wrappers import Request, Response
-from .config import ConfigAttribute, Config
-from .ctx import RequestContext, AppContext, _AppCtxGlobals
-from .globals import _request_ctx_stack, request, session, g
-from .sessions import SecureCookieSessionInterface
-from .templating import DispatchingJinjaLoader, Environment, \
- _default_template_ctx_processor
-from .signals import request_started, request_finished, got_request_exception, \
- request_tearing_down, appcontext_tearing_down
-from ._compat import reraise, string_types, text_type, integer_types
-
-# a lock used for logger initialization
-_logger_lock = Lock()
-
-# a singleton sentinel value for parameter defaults
-_sentinel = object()
-
-
-def _make_timedelta(value):
- if not isinstance(value, timedelta):
- return timedelta(seconds=value)
- return value
-
-
-def setupmethod(f):
- """Wraps a method so that it performs a check in debug mode if the
- first request was already handled.
- """
- def wrapper_func(self, *args, **kwargs):
- if self.debug and self._got_first_request:
- raise AssertionError('A setup function was called after the '
- 'first request was handled. This usually indicates a bug '
- 'in the application where a module was not imported '
- 'and decorators or other functionality was called too late.\n'
- 'To fix this make sure to import all your view modules, '
- 'database models and everything related at a central place '
- 'before the application starts serving requests.')
- return f(self, *args, **kwargs)
- return update_wrapper(wrapper_func, f)
-
-
-class Flask(_PackageBoundObject):
- """The flask object implements a WSGI application and acts as the central
- object. It is passed the name of the module or package of the
- application. Once it is created it will act as a central registry for
- the view functions, the URL rules, template configuration and much more.
-
- The name of the package is used to resolve resources from inside the
- package or the folder the module is contained in depending on if the
- package parameter resolves to an actual python package (a folder with
- an :file:`__init__.py` file inside) or a standard module (just a ``.py`` file).
-
- For more information about resource loading, see :func:`open_resource`.
-
- Usually you create a :class:`Flask` instance in your main module or
- in the :file:`__init__.py` file of your package like this::
-
- from flask import Flask
- app = Flask(__name__)
-
- .. admonition:: About the First Parameter
-
- The idea of the first parameter is to give Flask an idea of what
- belongs to your application. This name is used to find resources
- on the filesystem, can be used by extensions to improve debugging
- information and a lot more.
-
- So it's important what you provide there. If you are using a single
- module, `__name__` is always the correct value. If you however are
- using a package, it's usually recommended to hardcode the name of
- your package there.
-
- For example if your application is defined in :file:`yourapplication/app.py`
- you should create it with one of the two versions below::
-
- app = Flask('yourapplication')
- app = Flask(__name__.split('.')[0])
-
- Why is that? The application will work even with `__name__`, thanks
- to how resources are looked up. However it will make debugging more
- painful. Certain extensions can make assumptions based on the
- import name of your application. For example the Flask-SQLAlchemy
- extension will look for the code in your application that triggered
- an SQL query in debug mode. If the import name is not properly set
- up, that debugging information is lost. (For example it would only
- pick up SQL queries in `yourapplication.app` and not
- `yourapplication.views.frontend`)
-
- .. versionadded:: 0.7
- The `static_url_path`, `static_folder`, and `template_folder`
- parameters were added.
-
- .. versionadded:: 0.8
- The `instance_path` and `instance_relative_config` parameters were
- added.
-
- .. versionadded:: 0.11
- The `root_path` parameter was added.
-
- :param import_name: the name of the application package
- :param static_url_path: can be used to specify a different path for the
- static files on the web. Defaults to the name
- of the `static_folder` folder.
- :param static_folder: the folder with static files that should be served
- at `static_url_path`. Defaults to the ``'static'``
- folder in the root path of the application.
- :param template_folder: the folder that contains the templates that should
- be used by the application. Defaults to
- ``'templates'`` folder in the root path of the
- application.
- :param instance_path: An alternative instance path for the application.
- By default the folder ``'instance'`` next to the
- package or module is assumed to be the instance
- path.
- :param instance_relative_config: if set to ``True`` relative filenames
- for loading the config are assumed to
- be relative to the instance path instead
- of the application root.
- :param root_path: Flask by default will automatically calculate the path
- to the root of the application. In certain situations
- this cannot be achieved (for instance if the package
- is a Python 3 namespace package) and needs to be
- manually defined.
- """
-
- #: The class that is used for request objects. See :class:`~flask.Request`
- #: for more information.
- request_class = Request
-
- #: The class that is used for response objects. See
- #: :class:`~flask.Response` for more information.
- response_class = Response
-
- #: The class that is used for the Jinja environment.
- #:
- #: .. versionadded:: 0.11
- jinja_environment = Environment
-
- #: The class that is used for the :data:`~flask.g` instance.
- #:
- #: Example use cases for a custom class:
- #:
- #: 1. Store arbitrary attributes on flask.g.
- #: 2. Add a property for lazy per-request database connectors.
- #: 3. Return None instead of AttributeError on unexpected attributes.
- #: 4. Raise exception if an unexpected attr is set, a "controlled" flask.g.
- #:
- #: In Flask 0.9 this property was called `request_globals_class` but it
- #: was changed in 0.10 to :attr:`app_ctx_globals_class` because the
- #: flask.g object is now application context scoped.
- #:
- #: .. versionadded:: 0.10
- app_ctx_globals_class = _AppCtxGlobals
-
- # Backwards compatibility support
- def _get_request_globals_class(self):
- return self.app_ctx_globals_class
- def _set_request_globals_class(self, value):
- from warnings import warn
- warn(DeprecationWarning('request_globals_class attribute is now '
- 'called app_ctx_globals_class'))
- self.app_ctx_globals_class = value
- request_globals_class = property(_get_request_globals_class,
- _set_request_globals_class)
- del _get_request_globals_class, _set_request_globals_class
-
- #: The class that is used for the ``config`` attribute of this app.
- #: Defaults to :class:`~flask.Config`.
- #:
- #: Example use cases for a custom class:
- #:
- #: 1. Default values for certain config options.
- #: 2. Access to config values through attributes in addition to keys.
- #:
- #: .. versionadded:: 0.11
- config_class = Config
-
- #: The debug flag. Set this to ``True`` to enable debugging of the
- #: application. In debug mode the debugger will kick in when an unhandled
- #: exception occurs and the integrated server will automatically reload
- #: the application if changes in the code are detected.
- #:
- #: This attribute can also be configured from the config with the ``DEBUG``
- #: configuration key. Defaults to ``False``.
- debug = ConfigAttribute('DEBUG')
-
- #: The testing flag. Set this to ``True`` to enable the test mode of
- #: Flask extensions (and in the future probably also Flask itself).
- #: For example this might activate unittest helpers that have an
- #: additional runtime cost which should not be enabled by default.
- #:
- #: If this is enabled and PROPAGATE_EXCEPTIONS is not changed from the
- #: default it's implicitly enabled.
- #:
- #: This attribute can also be configured from the config with the
- #: ``TESTING`` configuration key. Defaults to ``False``.
- testing = ConfigAttribute('TESTING')
-
- #: If a secret key is set, cryptographic components can use this to
- #: sign cookies and other things. Set this to a complex random value
- #: when you want to use the secure cookie for instance.
- #:
- #: This attribute can also be configured from the config with the
- #: ``SECRET_KEY`` configuration key. Defaults to ``None``.
- secret_key = ConfigAttribute('SECRET_KEY')
-
- #: The secure cookie uses this for the name of the session cookie.
- #:
- #: This attribute can also be configured from the config with the
- #: ``SESSION_COOKIE_NAME`` configuration key. Defaults to ``'session'``
- session_cookie_name = ConfigAttribute('SESSION_COOKIE_NAME')
-
- #: A :class:`~datetime.timedelta` which is used to set the expiration
- #: date of a permanent session. The default is 31 days which makes a
- #: permanent session survive for roughly one month.
- #:
- #: This attribute can also be configured from the config with the
- #: ``PERMANENT_SESSION_LIFETIME`` configuration key. Defaults to
- #: ``timedelta(days=31)``
- permanent_session_lifetime = ConfigAttribute('PERMANENT_SESSION_LIFETIME',
- get_converter=_make_timedelta)
-
- #: A :class:`~datetime.timedelta` which is used as default cache_timeout
- #: for the :func:`send_file` functions. The default is 12 hours.
- #:
- #: This attribute can also be configured from the config with the
- #: ``SEND_FILE_MAX_AGE_DEFAULT`` configuration key. This configuration
- #: variable can also be set with an integer value used as seconds.
- #: Defaults to ``timedelta(hours=12)``
- send_file_max_age_default = ConfigAttribute('SEND_FILE_MAX_AGE_DEFAULT',
- get_converter=_make_timedelta)
-
- #: Enable this if you want to use the X-Sendfile feature. Keep in
- #: mind that the server has to support this. This only affects files
- #: sent with the :func:`send_file` method.
- #:
- #: .. versionadded:: 0.2
- #:
- #: This attribute can also be configured from the config with the
- #: ``USE_X_SENDFILE`` configuration key. Defaults to ``False``.
- use_x_sendfile = ConfigAttribute('USE_X_SENDFILE')
-
- #: The name of the logger to use. By default the logger name is the
- #: package name passed to the constructor.
- #:
- #: .. versionadded:: 0.4
- logger_name = ConfigAttribute('LOGGER_NAME')
-
- #: The JSON encoder class to use. Defaults to :class:`~flask.json.JSONEncoder`.
- #:
- #: .. versionadded:: 0.10
- json_encoder = json.JSONEncoder
-
- #: The JSON decoder class to use. Defaults to :class:`~flask.json.JSONDecoder`.
- #:
- #: .. versionadded:: 0.10
- json_decoder = json.JSONDecoder
-
- #: Options that are passed directly to the Jinja2 environment.
- jinja_options = ImmutableDict(
- extensions=['jinja2.ext.autoescape', 'jinja2.ext.with_']
- )
-
- #: Default configuration parameters.
- default_config = ImmutableDict({
- 'DEBUG': get_debug_flag(default=False),
- 'TESTING': False,
- 'PROPAGATE_EXCEPTIONS': None,
- 'PRESERVE_CONTEXT_ON_EXCEPTION': None,
- 'SECRET_KEY': None,
- 'PERMANENT_SESSION_LIFETIME': timedelta(days=31),
- 'USE_X_SENDFILE': False,
- 'LOGGER_NAME': None,
- 'LOGGER_HANDLER_POLICY': 'always',
- 'SERVER_NAME': None,
- 'APPLICATION_ROOT': None,
- 'SESSION_COOKIE_NAME': 'session',
- 'SESSION_COOKIE_DOMAIN': None,
- 'SESSION_COOKIE_PATH': None,
- 'SESSION_COOKIE_HTTPONLY': True,
- 'SESSION_COOKIE_SECURE': False,
- 'SESSION_REFRESH_EACH_REQUEST': True,
- 'MAX_CONTENT_LENGTH': None,
- 'SEND_FILE_MAX_AGE_DEFAULT': timedelta(hours=12),
- 'TRAP_BAD_REQUEST_ERRORS': False,
- 'TRAP_HTTP_EXCEPTIONS': False,
- 'EXPLAIN_TEMPLATE_LOADING': False,
- 'PREFERRED_URL_SCHEME': 'http',
- 'JSON_AS_ASCII': True,
- 'JSON_SORT_KEYS': True,
- 'JSONIFY_PRETTYPRINT_REGULAR': True,
- 'JSONIFY_MIMETYPE': 'application/json',
- 'TEMPLATES_AUTO_RELOAD': None,
- })
-
- #: The rule object to use for URL rules created. This is used by
- #: :meth:`add_url_rule`. Defaults to :class:`werkzeug.routing.Rule`.
- #:
- #: .. versionadded:: 0.7
- url_rule_class = Rule
-
- #: the test client that is used with when `test_client` is used.
- #:
- #: .. versionadded:: 0.7
- test_client_class = None
-
- #: the session interface to use. By default an instance of
- #: :class:`~flask.sessions.SecureCookieSessionInterface` is used here.
- #:
- #: .. versionadded:: 0.8
- session_interface = SecureCookieSessionInterface()
-
- def __init__(self, import_name, static_path=None, static_url_path=None,
- static_folder='static', template_folder='templates',
- instance_path=None, instance_relative_config=False,
- root_path=None):
- _PackageBoundObject.__init__(self, import_name,
- template_folder=template_folder,
- root_path=root_path)
- if static_path is not None:
- from warnings import warn
- warn(DeprecationWarning('static_path is now called '
- 'static_url_path'), stacklevel=2)
- static_url_path = static_path
-
- if static_url_path is not None:
- self.static_url_path = static_url_path
- if static_folder is not None:
- self.static_folder = static_folder
- if instance_path is None:
- instance_path = self.auto_find_instance_path()
- elif not os.path.isabs(instance_path):
- raise ValueError('If an instance path is provided it must be '
- 'absolute. A relative path was given instead.')
-
- #: Holds the path to the instance folder.
- #:
- #: .. versionadded:: 0.8
- self.instance_path = instance_path
-
- #: The configuration dictionary as :class:`Config`. This behaves
- #: exactly like a regular dictionary but supports additional methods
- #: to load a config from files.
- self.config = self.make_config(instance_relative_config)
-
- # Prepare the deferred setup of the logger.
- self._logger = None
- self.logger_name = self.import_name
-
- #: A dictionary of all view functions registered. The keys will
- #: be function names which are also used to generate URLs and
- #: the values are the function objects themselves.
- #: To register a view function, use the :meth:`route` decorator.
- self.view_functions = {}
-
- # support for the now deprecated `error_handlers` attribute. The
- # :attr:`error_handler_spec` shall be used now.
- self._error_handlers = {}
-
- #: A dictionary of all registered error handlers. The key is ``None``
- #: for error handlers active on the application, otherwise the key is
- #: the name of the blueprint. Each key points to another dictionary
- #: where the key is the status code of the http exception. The
- #: special key ``None`` points to a list of tuples where the first item
- #: is the class for the instance check and the second the error handler
- #: function.
- #:
- #: To register a error handler, use the :meth:`errorhandler`
- #: decorator.
- self.error_handler_spec = {None: self._error_handlers}
-
- #: A list of functions that are called when :meth:`url_for` raises a
- #: :exc:`~werkzeug.routing.BuildError`. Each function registered here
- #: is called with `error`, `endpoint` and `values`. If a function
- #: returns ``None`` or raises a :exc:`BuildError` the next function is
- #: tried.
- #:
- #: .. versionadded:: 0.9
- self.url_build_error_handlers = []
-
- #: A dictionary with lists of functions that should be called at the
- #: beginning of the request. The key of the dictionary is the name of
- #: the blueprint this function is active for, ``None`` for all requests.
- #: This can for example be used to open database connections or
- #: getting hold of the currently logged in user. To register a
- #: function here, use the :meth:`before_request` decorator.
- self.before_request_funcs = {}
-
- #: A lists of functions that should be called at the beginning of the
- #: first request to this instance. To register a function here, use
- #: the :meth:`before_first_request` decorator.
- #:
- #: .. versionadded:: 0.8
- self.before_first_request_funcs = []
-
- #: A dictionary with lists of functions that should be called after
- #: each request. The key of the dictionary is the name of the blueprint
- #: this function is active for, ``None`` for all requests. This can for
- #: example be used to close database connections. To register a function
- #: here, use the :meth:`after_request` decorator.
- self.after_request_funcs = {}
-
- #: A dictionary with lists of functions that are called after
- #: each request, even if an exception has occurred. The key of the
- #: dictionary is the name of the blueprint this function is active for,
- #: ``None`` for all requests. These functions are not allowed to modify
- #: the request, and their return values are ignored. If an exception
- #: occurred while processing the request, it gets passed to each
- #: teardown_request function. To register a function here, use the
- #: :meth:`teardown_request` decorator.
- #:
- #: .. versionadded:: 0.7
- self.teardown_request_funcs = {}
-
- #: A list of functions that are called when the application context
- #: is destroyed. Since the application context is also torn down
- #: if the request ends this is the place to store code that disconnects
- #: from databases.
- #:
- #: .. versionadded:: 0.9
- self.teardown_appcontext_funcs = []
-
- #: A dictionary with lists of functions that can be used as URL
- #: value processor functions. Whenever a URL is built these functions
- #: are called to modify the dictionary of values in place. The key
- #: ``None`` here is used for application wide
- #: callbacks, otherwise the key is the name of the blueprint.
- #: Each of these functions has the chance to modify the dictionary
- #:
- #: .. versionadded:: 0.7
- self.url_value_preprocessors = {}
-
- #: A dictionary with lists of functions that can be used as URL value
- #: preprocessors. The key ``None`` here is used for application wide
- #: callbacks, otherwise the key is the name of the blueprint.
- #: Each of these functions has the chance to modify the dictionary
- #: of URL values before they are used as the keyword arguments of the
- #: view function. For each function registered this one should also
- #: provide a :meth:`url_defaults` function that adds the parameters
- #: automatically again that were removed that way.
- #:
- #: .. versionadded:: 0.7
- self.url_default_functions = {}
-
- #: A dictionary with list of functions that are called without argument
- #: to populate the template context. The key of the dictionary is the
- #: name of the blueprint this function is active for, ``None`` for all
- #: requests. Each returns a dictionary that the template context is
- #: updated with. To register a function here, use the
- #: :meth:`context_processor` decorator.
- self.template_context_processors = {
- None: [_default_template_ctx_processor]
- }
-
- #: A list of shell context processor functions that should be run
- #: when a shell context is created.
- #:
- #: .. versionadded:: 0.11
- self.shell_context_processors = []
-
- #: all the attached blueprints in a dictionary by name. Blueprints
- #: can be attached multiple times so this dictionary does not tell
- #: you how often they got attached.
- #:
- #: .. versionadded:: 0.7
- self.blueprints = {}
- self._blueprint_order = []
-
- #: a place where extensions can store application specific state. For
- #: example this is where an extension could store database engines and
- #: similar things. For backwards compatibility extensions should register
- #: themselves like this::
- #:
- #: if not hasattr(app, 'extensions'):
- #: app.extensions = {}
- #: app.extensions['extensionname'] = SomeObject()
- #:
- #: The key must match the name of the extension module. For example in
- #: case of a "Flask-Foo" extension in `flask_foo`, the key would be
- #: ``'foo'``.
- #:
- #: .. versionadded:: 0.7
- self.extensions = {}
-
- #: The :class:`~werkzeug.routing.Map` for this instance. You can use
- #: this to change the routing converters after the class was created
- #: but before any routes are connected. Example::
- #:
- #: from werkzeug.routing import BaseConverter
- #:
- #: class ListConverter(BaseConverter):
- #: def to_python(self, value):
- #: return value.split(',')
- #: def to_url(self, values):
- #: return ','.join(super(ListConverter, self).to_url(value)
- #: for value in values)
- #:
- #: app = Flask(__name__)
- #: app.url_map.converters['list'] = ListConverter
- self.url_map = Map()
-
- # tracks internally if the application already handled at least one
- # request.
- self._got_first_request = False
- self._before_request_lock = Lock()
-
- # register the static folder for the application. Do that even
- # if the folder does not exist. First of all it might be created
- # while the server is running (usually happens during development)
- # but also because google appengine stores static files somewhere
- # else when mapped with the .yml file.
- if self.has_static_folder:
- self.add_url_rule(self.static_url_path + '/',
- endpoint='static',
- view_func=self.send_static_file)
-
- #: The click command line context for this application. Commands
- #: registered here show up in the :command:`flask` command once the
- #: application has been discovered. The default commands are
- #: provided by Flask itself and can be overridden.
- #:
- #: This is an instance of a :class:`click.Group` object.
- self.cli = cli.AppGroup(self.name)
-
- def _get_error_handlers(self):
- from warnings import warn
- warn(DeprecationWarning('error_handlers is deprecated, use the '
- 'new error_handler_spec attribute instead.'), stacklevel=1)
- return self._error_handlers
- def _set_error_handlers(self, value):
- self._error_handlers = value
- self.error_handler_spec[None] = value
- error_handlers = property(_get_error_handlers, _set_error_handlers)
- del _get_error_handlers, _set_error_handlers
-
- @locked_cached_property
- def name(self):
- """The name of the application. This is usually the import name
- with the difference that it's guessed from the run file if the
- import name is main. This name is used as a display name when
- Flask needs the name of the application. It can be set and overridden
- to change the value.
-
- .. versionadded:: 0.8
- """
- if self.import_name == '__main__':
- fn = getattr(sys.modules['__main__'], '__file__', None)
- if fn is None:
- return '__main__'
- return os.path.splitext(os.path.basename(fn))[0]
- return self.import_name
-
- @property
- def propagate_exceptions(self):
- """Returns the value of the ``PROPAGATE_EXCEPTIONS`` configuration
- value in case it's set, otherwise a sensible default is returned.
-
- .. versionadded:: 0.7
- """
- rv = self.config['PROPAGATE_EXCEPTIONS']
- if rv is not None:
- return rv
- return self.testing or self.debug
-
- @property
- def preserve_context_on_exception(self):
- """Returns the value of the ``PRESERVE_CONTEXT_ON_EXCEPTION``
- configuration value in case it's set, otherwise a sensible default
- is returned.
-
- .. versionadded:: 0.7
- """
- rv = self.config['PRESERVE_CONTEXT_ON_EXCEPTION']
- if rv is not None:
- return rv
- return self.debug
-
- @property
- def logger(self):
- """A :class:`logging.Logger` object for this application. The
- default configuration is to log to stderr if the application is
- in debug mode. This logger can be used to (surprise) log messages.
- Here some examples::
-
- app.logger.debug('A value for debugging')
- app.logger.warning('A warning occurred (%d apples)', 42)
- app.logger.error('An error occurred')
-
- .. versionadded:: 0.3
- """
- if self._logger and self._logger.name == self.logger_name:
- return self._logger
- with _logger_lock:
- if self._logger and self._logger.name == self.logger_name:
- return self._logger
- from flask.logging import create_logger
- self._logger = rv = create_logger(self)
- return rv
-
- @locked_cached_property
- def jinja_env(self):
- """The Jinja2 environment used to load templates."""
- return self.create_jinja_environment()
-
- @property
- def got_first_request(self):
- """This attribute is set to ``True`` if the application started
- handling the first request.
-
- .. versionadded:: 0.8
- """
- return self._got_first_request
-
- def make_config(self, instance_relative=False):
- """Used to create the config attribute by the Flask constructor.
- The `instance_relative` parameter is passed in from the constructor
- of Flask (there named `instance_relative_config`) and indicates if
- the config should be relative to the instance path or the root path
- of the application.
-
- .. versionadded:: 0.8
- """
- root_path = self.root_path
- if instance_relative:
- root_path = self.instance_path
- return self.config_class(root_path, self.default_config)
-
- def auto_find_instance_path(self):
- """Tries to locate the instance path if it was not provided to the
- constructor of the application class. It will basically calculate
- the path to a folder named ``instance`` next to your main file or
- the package.
-
- .. versionadded:: 0.8
- """
- prefix, package_path = find_package(self.import_name)
- if prefix is None:
- return os.path.join(package_path, 'instance')
- return os.path.join(prefix, 'var', self.name + '-instance')
-
- def open_instance_resource(self, resource, mode='rb'):
- """Opens a resource from the application's instance folder
- (:attr:`instance_path`). Otherwise works like
- :meth:`open_resource`. Instance resources can also be opened for
- writing.
-
- :param resource: the name of the resource. To access resources within
- subfolders use forward slashes as separator.
- :param mode: resource file opening mode, default is 'rb'.
- """
- return open(os.path.join(self.instance_path, resource), mode)
-
- def create_jinja_environment(self):
- """Creates the Jinja2 environment based on :attr:`jinja_options`
- and :meth:`select_jinja_autoescape`. Since 0.7 this also adds
- the Jinja2 globals and filters after initialization. Override
- this function to customize the behavior.
-
- .. versionadded:: 0.5
- .. versionchanged:: 0.11
- ``Environment.auto_reload`` set in accordance with
- ``TEMPLATES_AUTO_RELOAD`` configuration option.
- """
- options = dict(self.jinja_options)
- if 'autoescape' not in options:
- options['autoescape'] = self.select_jinja_autoescape
- if 'auto_reload' not in options:
- if self.config['TEMPLATES_AUTO_RELOAD'] is not None:
- options['auto_reload'] = self.config['TEMPLATES_AUTO_RELOAD']
- else:
- options['auto_reload'] = self.debug
- rv = self.jinja_environment(self, **options)
- rv.globals.update(
- url_for=url_for,
- get_flashed_messages=get_flashed_messages,
- config=self.config,
- # request, session and g are normally added with the
- # context processor for efficiency reasons but for imported
- # templates we also want the proxies in there.
- request=request,
- session=session,
- g=g
- )
- rv.filters['tojson'] = json.tojson_filter
- return rv
-
- def create_global_jinja_loader(self):
- """Creates the loader for the Jinja2 environment. Can be used to
- override just the loader and keeping the rest unchanged. It's
- discouraged to override this function. Instead one should override
- the :meth:`jinja_loader` function instead.
-
- The global loader dispatches between the loaders of the application
- and the individual blueprints.
-
- .. versionadded:: 0.7
- """
- return DispatchingJinjaLoader(self)
-
- def init_jinja_globals(self):
- """Deprecated. Used to initialize the Jinja2 globals.
-
- .. versionadded:: 0.5
- .. versionchanged:: 0.7
- This method is deprecated with 0.7. Override
- :meth:`create_jinja_environment` instead.
- """
-
- def select_jinja_autoescape(self, filename):
- """Returns ``True`` if autoescaping should be active for the given
- template name. If no template name is given, returns `True`.
-
- .. versionadded:: 0.5
- """
- if filename is None:
- return True
- return filename.endswith(('.html', '.htm', '.xml', '.xhtml'))
-
- def update_template_context(self, context):
- """Update the template context with some commonly used variables.
- This injects request, session, config and g into the template
- context as well as everything template context processors want
- to inject. Note that the as of Flask 0.6, the original values
- in the context will not be overridden if a context processor
- decides to return a value with the same key.
-
- :param context: the context as a dictionary that is updated in place
- to add extra variables.
- """
- funcs = self.template_context_processors[None]
- reqctx = _request_ctx_stack.top
- if reqctx is not None:
- bp = reqctx.request.blueprint
- if bp is not None and bp in self.template_context_processors:
- funcs = chain(funcs, self.template_context_processors[bp])
- orig_ctx = context.copy()
- for func in funcs:
- context.update(func())
- # make sure the original values win. This makes it possible to
- # easier add new variables in context processors without breaking
- # existing views.
- context.update(orig_ctx)
-
- def make_shell_context(self):
- """Returns the shell context for an interactive shell for this
- application. This runs all the registered shell context
- processors.
-
- .. versionadded:: 0.11
- """
- rv = {'app': self, 'g': g}
- for processor in self.shell_context_processors:
- rv.update(processor())
- return rv
-
- def run(self, host=None, port=None, debug=None, **options):
- """Runs the application on a local development server.
-
- Do not use ``run()`` in a production setting. It is not intended to
- meet security and performance requirements for a production server.
- Instead, see :ref:`deployment` for WSGI server recommendations.
-
- If the :attr:`debug` flag is set the server will automatically reload
- for code changes and show a debugger in case an exception happened.
-
- If you want to run the application in debug mode, but disable the
- code execution on the interactive debugger, you can pass
- ``use_evalex=False`` as parameter. This will keep the debugger's
- traceback screen active, but disable code execution.
-
- It is not recommended to use this function for development with
- automatic reloading as this is badly supported. Instead you should
- be using the :command:`flask` command line script's ``run`` support.
-
- .. admonition:: Keep in Mind
-
- Flask will suppress any server error with a generic error page
- unless it is in debug mode. As such to enable just the
- interactive debugger without the code reloading, you have to
- invoke :meth:`run` with ``debug=True`` and ``use_reloader=False``.
- Setting ``use_debugger`` to ``True`` without being in debug mode
- won't catch any exceptions because there won't be any to
- catch.
-
- .. versionchanged:: 0.10
- The default port is now picked from the ``SERVER_NAME`` variable.
-
- :param host: the hostname to listen on. Set this to ``'0.0.0.0'`` to
- have the server available externally as well. Defaults to
- ``'127.0.0.1'``.
- :param port: the port of the webserver. Defaults to ``5000`` or the
- port defined in the ``SERVER_NAME`` config variable if
- present.
- :param debug: if given, enable or disable debug mode.
- See :attr:`debug`.
- :param options: the options to be forwarded to the underlying
- Werkzeug server. See
- :func:`werkzeug.serving.run_simple` for more
- information.
- """
- from werkzeug.serving import run_simple
- if host is None:
- host = '127.0.0.1'
- if port is None:
- server_name = self.config['SERVER_NAME']
- if server_name and ':' in server_name:
- port = int(server_name.rsplit(':', 1)[1])
- else:
- port = 5000
- if debug is not None:
- self.debug = bool(debug)
- options.setdefault('use_reloader', self.debug)
- options.setdefault('use_debugger', self.debug)
- try:
- run_simple(host, port, self, **options)
- finally:
- # reset the first request information if the development server
- # reset normally. This makes it possible to restart the server
- # without reloader and that stuff from an interactive shell.
- self._got_first_request = False
-
- def test_client(self, use_cookies=True, **kwargs):
- """Creates a test client for this application. For information
- about unit testing head over to :ref:`testing`.
-
- Note that if you are testing for assertions or exceptions in your
- application code, you must set ``app.testing = True`` in order for the
- exceptions to propagate to the test client. Otherwise, the exception
- will be handled by the application (not visible to the test client) and
- the only indication of an AssertionError or other exception will be a
- 500 status code response to the test client. See the :attr:`testing`
- attribute. For example::
-
- app.testing = True
- client = app.test_client()
-
- The test client can be used in a ``with`` block to defer the closing down
- of the context until the end of the ``with`` block. This is useful if
- you want to access the context locals for testing::
-
- with app.test_client() as c:
- rv = c.get('/?vodka=42')
- assert request.args['vodka'] == '42'
-
- Additionally, you may pass optional keyword arguments that will then
- be passed to the application's :attr:`test_client_class` constructor.
- For example::
-
- from flask.testing import FlaskClient
-
- class CustomClient(FlaskClient):
- def __init__(self, *args, **kwargs):
- self._authentication = kwargs.pop("authentication")
- super(CustomClient,self).__init__( *args, **kwargs)
-
- app.test_client_class = CustomClient
- client = app.test_client(authentication='Basic ....')
-
- See :class:`~flask.testing.FlaskClient` for more information.
-
- .. versionchanged:: 0.4
- added support for ``with`` block usage for the client.
-
- .. versionadded:: 0.7
- The `use_cookies` parameter was added as well as the ability
- to override the client to be used by setting the
- :attr:`test_client_class` attribute.
-
- .. versionchanged:: 0.11
- Added `**kwargs` to support passing additional keyword arguments to
- the constructor of :attr:`test_client_class`.
- """
- cls = self.test_client_class
- if cls is None:
- from flask.testing import FlaskClient as cls
- return cls(self, self.response_class, use_cookies=use_cookies, **kwargs)
-
- def open_session(self, request):
- """Creates or opens a new session. Default implementation stores all
- session data in a signed cookie. This requires that the
- :attr:`secret_key` is set. Instead of overriding this method
- we recommend replacing the :class:`session_interface`.
-
- :param request: an instance of :attr:`request_class`.
- """
- return self.session_interface.open_session(self, request)
-
- def save_session(self, session, response):
- """Saves the session if it needs updates. For the default
- implementation, check :meth:`open_session`. Instead of overriding this
- method we recommend replacing the :class:`session_interface`.
-
- :param session: the session to be saved (a
- :class:`~werkzeug.contrib.securecookie.SecureCookie`
- object)
- :param response: an instance of :attr:`response_class`
- """
- return self.session_interface.save_session(self, session, response)
-
- def make_null_session(self):
- """Creates a new instance of a missing session. Instead of overriding
- this method we recommend replacing the :class:`session_interface`.
-
- .. versionadded:: 0.7
- """
- return self.session_interface.make_null_session(self)
-
- @setupmethod
- def register_blueprint(self, blueprint, **options):
- """Registers a blueprint on the application.
-
- .. versionadded:: 0.7
- """
- first_registration = False
- if blueprint.name in self.blueprints:
- assert self.blueprints[blueprint.name] is blueprint, \
- 'A blueprint\'s name collision occurred between %r and ' \
- '%r. Both share the same name "%s". Blueprints that ' \
- 'are created on the fly need unique names.' % \
- (blueprint, self.blueprints[blueprint.name], blueprint.name)
- else:
- self.blueprints[blueprint.name] = blueprint
- self._blueprint_order.append(blueprint)
- first_registration = True
- blueprint.register(self, options, first_registration)
-
- def iter_blueprints(self):
- """Iterates over all blueprints by the order they were registered.
-
- .. versionadded:: 0.11
- """
- return iter(self._blueprint_order)
-
- @setupmethod
- def add_url_rule(self, rule, endpoint=None, view_func=None, **options):
- """Connects a URL rule. Works exactly like the :meth:`route`
- decorator. If a view_func is provided it will be registered with the
- endpoint.
-
- Basically this example::
-
- @app.route('/')
- def index():
- pass
-
- Is equivalent to the following::
-
- def index():
- pass
- app.add_url_rule('/', 'index', index)
-
- If the view_func is not provided you will need to connect the endpoint
- to a view function like so::
-
- app.view_functions['index'] = index
-
- Internally :meth:`route` invokes :meth:`add_url_rule` so if you want
- to customize the behavior via subclassing you only need to change
- this method.
-
- For more information refer to :ref:`url-route-registrations`.
-
- .. versionchanged:: 0.2
- `view_func` parameter added.
-
- .. versionchanged:: 0.6
- ``OPTIONS`` is added automatically as method.
-
- :param rule: the URL rule as string
- :param endpoint: the endpoint for the registered URL rule. Flask
- itself assumes the name of the view function as
- endpoint
- :param view_func: the function to call when serving a request to the
- provided endpoint
- :param options: the options to be forwarded to the underlying
- :class:`~werkzeug.routing.Rule` object. A change
- to Werkzeug is handling of method options. methods
- is a list of methods this rule should be limited
- to (``GET``, ``POST`` etc.). By default a rule
- just listens for ``GET`` (and implicitly ``HEAD``).
- Starting with Flask 0.6, ``OPTIONS`` is implicitly
- added and handled by the standard request handling.
- """
- if endpoint is None:
- endpoint = _endpoint_from_view_func(view_func)
- options['endpoint'] = endpoint
- methods = options.pop('methods', None)
-
- # if the methods are not given and the view_func object knows its
- # methods we can use that instead. If neither exists, we go with
- # a tuple of only ``GET`` as default.
- if methods is None:
- methods = getattr(view_func, 'methods', None) or ('GET',)
- if isinstance(methods, string_types):
- raise TypeError('Allowed methods have to be iterables of strings, '
- 'for example: @app.route(..., methods=["POST"])')
- methods = set(item.upper() for item in methods)
-
- # Methods that should always be added
- required_methods = set(getattr(view_func, 'required_methods', ()))
-
- # starting with Flask 0.8 the view_func object can disable and
- # force-enable the automatic options handling.
- provide_automatic_options = getattr(view_func,
- 'provide_automatic_options', None)
-
- if provide_automatic_options is None:
- if 'OPTIONS' not in methods:
- provide_automatic_options = True
- required_methods.add('OPTIONS')
- else:
- provide_automatic_options = False
-
- # Add the required methods now.
- methods |= required_methods
-
- rule = self.url_rule_class(rule, methods=methods, **options)
- rule.provide_automatic_options = provide_automatic_options
-
- self.url_map.add(rule)
- if view_func is not None:
- old_func = self.view_functions.get(endpoint)
- if old_func is not None and old_func != view_func:
- raise AssertionError('View function mapping is overwriting an '
- 'existing endpoint function: %s' % endpoint)
- self.view_functions[endpoint] = view_func
-
- def route(self, rule, **options):
- """A decorator that is used to register a view function for a
- given URL rule. This does the same thing as :meth:`add_url_rule`
- but is intended for decorator usage::
-
- @app.route('/')
- def index():
- return 'Hello World'
-
- For more information refer to :ref:`url-route-registrations`.
-
- :param rule: the URL rule as string
- :param endpoint: the endpoint for the registered URL rule. Flask
- itself assumes the name of the view function as
- endpoint
- :param options: the options to be forwarded to the underlying
- :class:`~werkzeug.routing.Rule` object. A change
- to Werkzeug is handling of method options. methods
- is a list of methods this rule should be limited
- to (``GET``, ``POST`` etc.). By default a rule
- just listens for ``GET`` (and implicitly ``HEAD``).
- Starting with Flask 0.6, ``OPTIONS`` is implicitly
- added and handled by the standard request handling.
- """
- def decorator(f):
- endpoint = options.pop('endpoint', None)
- self.add_url_rule(rule, endpoint, f, **options)
- return f
- return decorator
-
- @setupmethod
- def endpoint(self, endpoint):
- """A decorator to register a function as an endpoint.
- Example::
-
- @app.endpoint('example.endpoint')
- def example():
- return "example"
-
- :param endpoint: the name of the endpoint
- """
- def decorator(f):
- self.view_functions[endpoint] = f
- return f
- return decorator
-
- @staticmethod
- def _get_exc_class_and_code(exc_class_or_code):
- """Ensure that we register only exceptions as handler keys"""
- if isinstance(exc_class_or_code, integer_types):
- exc_class = default_exceptions[exc_class_or_code]
- else:
- exc_class = exc_class_or_code
-
- assert issubclass(exc_class, Exception)
-
- if issubclass(exc_class, HTTPException):
- return exc_class, exc_class.code
- else:
- return exc_class, None
-
- @setupmethod
- def errorhandler(self, code_or_exception):
- """A decorator that is used to register a function given an
- error code. Example::
-
- @app.errorhandler(404)
- def page_not_found(error):
- return 'This page does not exist', 404
-
- You can also register handlers for arbitrary exceptions::
-
- @app.errorhandler(DatabaseError)
- def special_exception_handler(error):
- return 'Database connection failed', 500
-
- You can also register a function as error handler without using
- the :meth:`errorhandler` decorator. The following example is
- equivalent to the one above::
-
- def page_not_found(error):
- return 'This page does not exist', 404
- app.error_handler_spec[None][404] = page_not_found
-
- Setting error handlers via assignments to :attr:`error_handler_spec`
- however is discouraged as it requires fiddling with nested dictionaries
- and the special case for arbitrary exception types.
-
- The first ``None`` refers to the active blueprint. If the error
- handler should be application wide ``None`` shall be used.
-
- .. versionadded:: 0.7
- Use :meth:`register_error_handler` instead of modifying
- :attr:`error_handler_spec` directly, for application wide error
- handlers.
-
- .. versionadded:: 0.7
- One can now additionally also register custom exception types
- that do not necessarily have to be a subclass of the
- :class:`~werkzeug.exceptions.HTTPException` class.
-
- :param code_or_exception: the code as integer for the handler, or
- an arbitrary exception
- """
- def decorator(f):
- self._register_error_handler(None, code_or_exception, f)
- return f
- return decorator
-
- def register_error_handler(self, code_or_exception, f):
- """Alternative error attach function to the :meth:`errorhandler`
- decorator that is more straightforward to use for non decorator
- usage.
-
- .. versionadded:: 0.7
- """
- self._register_error_handler(None, code_or_exception, f)
-
- @setupmethod
- def _register_error_handler(self, key, code_or_exception, f):
- """
- :type key: None|str
- :type code_or_exception: int|T<=Exception
- :type f: callable
- """
- if isinstance(code_or_exception, HTTPException): # old broken behavior
- raise ValueError(
- 'Tried to register a handler for an exception instance {0!r}. '
- 'Handlers can only be registered for exception classes or HTTP error codes.'
- .format(code_or_exception))
-
- exc_class, code = self._get_exc_class_and_code(code_or_exception)
-
- handlers = self.error_handler_spec.setdefault(key, {}).setdefault(code, {})
- handlers[exc_class] = f
-
- @setupmethod
- def template_filter(self, name=None):
- """A decorator that is used to register custom template filter.
- You can specify a name for the filter, otherwise the function
- name will be used. Example::
-
- @app.template_filter()
- def reverse(s):
- return s[::-1]
-
- :param name: the optional name of the filter, otherwise the
- function name will be used.
- """
- def decorator(f):
- self.add_template_filter(f, name=name)
- return f
- return decorator
-
- @setupmethod
- def add_template_filter(self, f, name=None):
- """Register a custom template filter. Works exactly like the
- :meth:`template_filter` decorator.
-
- :param name: the optional name of the filter, otherwise the
- function name will be used.
- """
- self.jinja_env.filters[name or f.__name__] = f
-
- @setupmethod
- def template_test(self, name=None):
- """A decorator that is used to register custom template test.
- You can specify a name for the test, otherwise the function
- name will be used. Example::
-
- @app.template_test()
- def is_prime(n):
- if n == 2:
- return True
- for i in range(2, int(math.ceil(math.sqrt(n))) + 1):
- if n % i == 0:
- return False
- return True
-
- .. versionadded:: 0.10
-
- :param name: the optional name of the test, otherwise the
- function name will be used.
- """
- def decorator(f):
- self.add_template_test(f, name=name)
- return f
- return decorator
-
- @setupmethod
- def add_template_test(self, f, name=None):
- """Register a custom template test. Works exactly like the
- :meth:`template_test` decorator.
-
- .. versionadded:: 0.10
-
- :param name: the optional name of the test, otherwise the
- function name will be used.
- """
- self.jinja_env.tests[name or f.__name__] = f
-
- @setupmethod
- def template_global(self, name=None):
- """A decorator that is used to register a custom template global function.
- You can specify a name for the global function, otherwise the function
- name will be used. Example::
-
- @app.template_global()
- def double(n):
- return 2 * n
-
- .. versionadded:: 0.10
-
- :param name: the optional name of the global function, otherwise the
- function name will be used.
- """
- def decorator(f):
- self.add_template_global(f, name=name)
- return f
- return decorator
-
- @setupmethod
- def add_template_global(self, f, name=None):
- """Register a custom template global function. Works exactly like the
- :meth:`template_global` decorator.
-
- .. versionadded:: 0.10
-
- :param name: the optional name of the global function, otherwise the
- function name will be used.
- """
- self.jinja_env.globals[name or f.__name__] = f
-
- @setupmethod
- def before_request(self, f):
- """Registers a function to run before each request.
-
- The function will be called without any arguments.
- If the function returns a non-None value, it's handled as
- if it was the return value from the view and further
- request handling is stopped.
- """
- self.before_request_funcs.setdefault(None, []).append(f)
- return f
-
- @setupmethod
- def before_first_request(self, f):
- """Registers a function to be run before the first request to this
- instance of the application.
-
- The function will be called without any arguments and its return
- value is ignored.
-
- .. versionadded:: 0.8
- """
- self.before_first_request_funcs.append(f)
- return f
-
- @setupmethod
- def after_request(self, f):
- """Register a function to be run after each request.
-
- Your function must take one parameter, an instance of
- :attr:`response_class` and return a new response object or the
- same (see :meth:`process_response`).
-
- As of Flask 0.7 this function might not be executed at the end of the
- request in case an unhandled exception occurred.
- """
- self.after_request_funcs.setdefault(None, []).append(f)
- return f
-
- @setupmethod
- def teardown_request(self, f):
- """Register a function to be run at the end of each request,
- regardless of whether there was an exception or not. These functions
- are executed when the request context is popped, even if not an
- actual request was performed.
-
- Example::
-
- ctx = app.test_request_context()
- ctx.push()
- ...
- ctx.pop()
-
- When ``ctx.pop()`` is executed in the above example, the teardown
- functions are called just before the request context moves from the
- stack of active contexts. This becomes relevant if you are using
- such constructs in tests.
-
- Generally teardown functions must take every necessary step to avoid
- that they will fail. If they do execute code that might fail they
- will have to surround the execution of these code by try/except
- statements and log occurring errors.
-
- When a teardown function was called because of a exception it will
- be passed an error object.
-
- The return values of teardown functions are ignored.
-
- .. admonition:: Debug Note
-
- In debug mode Flask will not tear down a request on an exception
- immediately. Instead it will keep it alive so that the interactive
- debugger can still access it. This behavior can be controlled
- by the ``PRESERVE_CONTEXT_ON_EXCEPTION`` configuration variable.
- """
- self.teardown_request_funcs.setdefault(None, []).append(f)
- return f
-
- @setupmethod
- def teardown_appcontext(self, f):
- """Registers a function to be called when the application context
- ends. These functions are typically also called when the request
- context is popped.
-
- Example::
-
- ctx = app.app_context()
- ctx.push()
- ...
- ctx.pop()
-
- When ``ctx.pop()`` is executed in the above example, the teardown
- functions are called just before the app context moves from the
- stack of active contexts. This becomes relevant if you are using
- such constructs in tests.
-
- Since a request context typically also manages an application
- context it would also be called when you pop a request context.
-
- When a teardown function was called because of an exception it will
- be passed an error object.
-
- The return values of teardown functions are ignored.
-
- .. versionadded:: 0.9
- """
- self.teardown_appcontext_funcs.append(f)
- return f
-
- @setupmethod
- def context_processor(self, f):
- """Registers a template context processor function."""
- self.template_context_processors[None].append(f)
- return f
-
- @setupmethod
- def shell_context_processor(self, f):
- """Registers a shell context processor function.
-
- .. versionadded:: 0.11
- """
- self.shell_context_processors.append(f)
- return f
-
- @setupmethod
- def url_value_preprocessor(self, f):
- """Registers a function as URL value preprocessor for all view
- functions of the application. It's called before the view functions
- are called and can modify the url values provided.
- """
- self.url_value_preprocessors.setdefault(None, []).append(f)
- return f
-
- @setupmethod
- def url_defaults(self, f):
- """Callback function for URL defaults for all view functions of the
- application. It's called with the endpoint and values and should
- update the values passed in place.
- """
- self.url_default_functions.setdefault(None, []).append(f)
- return f
-
- def _find_error_handler(self, e):
- """Finds a registered error handler for the request’s blueprint.
- Otherwise falls back to the app, returns None if not a suitable
- handler is found.
- """
- exc_class, code = self._get_exc_class_and_code(type(e))
-
- def find_handler(handler_map):
- if not handler_map:
- return
- for cls in exc_class.__mro__:
- handler = handler_map.get(cls)
- if handler is not None:
- # cache for next time exc_class is raised
- handler_map[exc_class] = handler
- return handler
-
- # try blueprint handlers
- handler = find_handler(self.error_handler_spec
- .get(request.blueprint, {})
- .get(code))
- if handler is not None:
- return handler
-
- # fall back to app handlers
- return find_handler(self.error_handler_spec[None].get(code))
-
- def handle_http_exception(self, e):
- """Handles an HTTP exception. By default this will invoke the
- registered error handlers and fall back to returning the
- exception as response.
-
- .. versionadded:: 0.3
- """
- # Proxy exceptions don't have error codes. We want to always return
- # those unchanged as errors
- if e.code is None:
- return e
-
- handler = self._find_error_handler(e)
- if handler is None:
- return e
- return handler(e)
-
- def trap_http_exception(self, e):
- """Checks if an HTTP exception should be trapped or not. By default
- this will return ``False`` for all exceptions except for a bad request
- key error if ``TRAP_BAD_REQUEST_ERRORS`` is set to ``True``. It
- also returns ``True`` if ``TRAP_HTTP_EXCEPTIONS`` is set to ``True``.
-
- This is called for all HTTP exceptions raised by a view function.
- If it returns ``True`` for any exception the error handler for this
- exception is not called and it shows up as regular exception in the
- traceback. This is helpful for debugging implicitly raised HTTP
- exceptions.
-
- .. versionadded:: 0.8
- """
- if self.config['TRAP_HTTP_EXCEPTIONS']:
- return True
- if self.config['TRAP_BAD_REQUEST_ERRORS']:
- return isinstance(e, BadRequest)
- return False
-
- def handle_user_exception(self, e):
- """This method is called whenever an exception occurs that should be
- handled. A special case are
- :class:`~werkzeug.exception.HTTPException`\s which are forwarded by
- this function to the :meth:`handle_http_exception` method. This
- function will either return a response value or reraise the
- exception with the same traceback.
-
- .. versionadded:: 0.7
- """
- exc_type, exc_value, tb = sys.exc_info()
- assert exc_value is e
-
- # ensure not to trash sys.exc_info() at that point in case someone
- # wants the traceback preserved in handle_http_exception. Of course
- # we cannot prevent users from trashing it themselves in a custom
- # trap_http_exception method so that's their fault then.
-
- if isinstance(e, HTTPException) and not self.trap_http_exception(e):
- return self.handle_http_exception(e)
-
- handler = self._find_error_handler(e)
-
- if handler is None:
- reraise(exc_type, exc_value, tb)
- return handler(e)
-
- def handle_exception(self, e):
- """Default exception handling that kicks in when an exception
- occurs that is not caught. In debug mode the exception will
- be re-raised immediately, otherwise it is logged and the handler
- for a 500 internal server error is used. If no such handler
- exists, a default 500 internal server error message is displayed.
-
- .. versionadded:: 0.3
- """
- exc_type, exc_value, tb = sys.exc_info()
-
- got_request_exception.send(self, exception=e)
- handler = self._find_error_handler(InternalServerError())
-
- if self.propagate_exceptions:
- # if we want to repropagate the exception, we can attempt to
- # raise it with the whole traceback in case we can do that
- # (the function was actually called from the except part)
- # otherwise, we just raise the error again
- if exc_value is e:
- reraise(exc_type, exc_value, tb)
- else:
- raise e
-
- self.log_exception((exc_type, exc_value, tb))
- if handler is None:
- return InternalServerError()
- return self.finalize_request(handler(e), from_error_handler=True)
-
- def log_exception(self, exc_info):
- """Logs an exception. This is called by :meth:`handle_exception`
- if debugging is disabled and right before the handler is called.
- The default implementation logs the exception as error on the
- :attr:`logger`.
-
- .. versionadded:: 0.8
- """
- self.logger.error('Exception on %s [%s]' % (
- request.path,
- request.method
- ), exc_info=exc_info)
-
- def raise_routing_exception(self, request):
- """Exceptions that are recording during routing are reraised with
- this method. During debug we are not reraising redirect requests
- for non ``GET``, ``HEAD``, or ``OPTIONS`` requests and we're raising
- a different error instead to help debug situations.
-
- :internal:
- """
- if not self.debug \
- or not isinstance(request.routing_exception, RequestRedirect) \
- or request.method in ('GET', 'HEAD', 'OPTIONS'):
- raise request.routing_exception
-
- from .debughelpers import FormDataRoutingRedirect
- raise FormDataRoutingRedirect(request)
-
- def dispatch_request(self):
- """Does the request dispatching. Matches the URL and returns the
- return value of the view or error handler. This does not have to
- be a response object. In order to convert the return value to a
- proper response object, call :func:`make_response`.
-
- .. versionchanged:: 0.7
- This no longer does the exception handling, this code was
- moved to the new :meth:`full_dispatch_request`.
- """
- req = _request_ctx_stack.top.request
- if req.routing_exception is not None:
- self.raise_routing_exception(req)
- rule = req.url_rule
- # if we provide automatic options for this URL and the
- # request came with the OPTIONS method, reply automatically
- if getattr(rule, 'provide_automatic_options', False) \
- and req.method == 'OPTIONS':
- return self.make_default_options_response()
- # otherwise dispatch to the handler for that endpoint
- return self.view_functions[rule.endpoint](**req.view_args)
-
- def full_dispatch_request(self):
- """Dispatches the request and on top of that performs request
- pre and postprocessing as well as HTTP exception catching and
- error handling.
-
- .. versionadded:: 0.7
- """
- self.try_trigger_before_first_request_functions()
- try:
- request_started.send(self)
- rv = self.preprocess_request()
- if rv is None:
- rv = self.dispatch_request()
- except Exception as e:
- rv = self.handle_user_exception(e)
- return self.finalize_request(rv)
-
- def finalize_request(self, rv, from_error_handler=False):
- """Given the return value from a view function this finalizes
- the request by converting it into a response and invoking the
- postprocessing functions. This is invoked for both normal
- request dispatching as well as error handlers.
-
- Because this means that it might be called as a result of a
- failure a special safe mode is available which can be enabled
- with the `from_error_handler` flag. If enabled, failures in
- response processing will be logged and otherwise ignored.
-
- :internal:
- """
- response = self.make_response(rv)
- try:
- response = self.process_response(response)
- request_finished.send(self, response=response)
- except Exception:
- if not from_error_handler:
- raise
- self.logger.exception('Request finalizing failed with an '
- 'error while handling an error')
- return response
-
- def try_trigger_before_first_request_functions(self):
- """Called before each request and will ensure that it triggers
- the :attr:`before_first_request_funcs` and only exactly once per
- application instance (which means process usually).
-
- :internal:
- """
- if self._got_first_request:
- return
- with self._before_request_lock:
- if self._got_first_request:
- return
- for func in self.before_first_request_funcs:
- func()
- self._got_first_request = True
-
- def make_default_options_response(self):
- """This method is called to create the default ``OPTIONS`` response.
- This can be changed through subclassing to change the default
- behavior of ``OPTIONS`` responses.
-
- .. versionadded:: 0.7
- """
- adapter = _request_ctx_stack.top.url_adapter
- if hasattr(adapter, 'allowed_methods'):
- methods = adapter.allowed_methods()
- else:
- # fallback for Werkzeug < 0.7
- methods = []
- try:
- adapter.match(method='--')
- except MethodNotAllowed as e:
- methods = e.valid_methods
- except HTTPException as e:
- pass
- rv = self.response_class()
- rv.allow.update(methods)
- return rv
-
- def should_ignore_error(self, error):
- """This is called to figure out if an error should be ignored
- or not as far as the teardown system is concerned. If this
- function returns ``True`` then the teardown handlers will not be
- passed the error.
-
- .. versionadded:: 0.10
- """
- return False
-
- def make_response(self, rv):
- """Converts the return value from a view function to a real
- response object that is an instance of :attr:`response_class`.
-
- The following types are allowed for `rv`:
-
- .. tabularcolumns:: |p{3.5cm}|p{9.5cm}|
-
- ======================= ===========================================
- :attr:`response_class` the object is returned unchanged
- :class:`str` a response object is created with the
- string as body
- :class:`unicode` a response object is created with the
- string encoded to utf-8 as body
- a WSGI function the function is called as WSGI application
- and buffered as response object
- :class:`tuple` A tuple in the form ``(response, status,
- headers)`` or ``(response, headers)``
- where `response` is any of the
- types defined here, `status` is a string
- or an integer and `headers` is a list or
- a dictionary with header values.
- ======================= ===========================================
-
- :param rv: the return value from the view function
-
- .. versionchanged:: 0.9
- Previously a tuple was interpreted as the arguments for the
- response object.
- """
- status_or_headers = headers = None
- if isinstance(rv, tuple):
- rv, status_or_headers, headers = rv + (None,) * (3 - len(rv))
-
- if rv is None:
- raise ValueError('View function did not return a response')
-
- if isinstance(status_or_headers, (dict, list)):
- headers, status_or_headers = status_or_headers, None
-
- if not isinstance(rv, self.response_class):
- # When we create a response object directly, we let the constructor
- # set the headers and status. We do this because there can be
- # some extra logic involved when creating these objects with
- # specific values (like default content type selection).
- if isinstance(rv, (text_type, bytes, bytearray)):
- rv = self.response_class(rv, headers=headers,
- status=status_or_headers)
- headers = status_or_headers = None
- else:
- rv = self.response_class.force_type(rv, request.environ)
-
- if status_or_headers is not None:
- if isinstance(status_or_headers, string_types):
- rv.status = status_or_headers
- else:
- rv.status_code = status_or_headers
- if headers:
- rv.headers.extend(headers)
-
- return rv
-
- def create_url_adapter(self, request):
- """Creates a URL adapter for the given request. The URL adapter
- is created at a point where the request context is not yet set up
- so the request is passed explicitly.
-
- .. versionadded:: 0.6
-
- .. versionchanged:: 0.9
- This can now also be called without a request object when the
- URL adapter is created for the application context.
- """
- if request is not None:
- return self.url_map.bind_to_environ(request.environ,
- server_name=self.config['SERVER_NAME'])
- # We need at the very least the server name to be set for this
- # to work.
- if self.config['SERVER_NAME'] is not None:
- return self.url_map.bind(
- self.config['SERVER_NAME'],
- script_name=self.config['APPLICATION_ROOT'] or '/',
- url_scheme=self.config['PREFERRED_URL_SCHEME'])
-
- def inject_url_defaults(self, endpoint, values):
- """Injects the URL defaults for the given endpoint directly into
- the values dictionary passed. This is used internally and
- automatically called on URL building.
-
- .. versionadded:: 0.7
- """
- funcs = self.url_default_functions.get(None, ())
- if '.' in endpoint:
- bp = endpoint.rsplit('.', 1)[0]
- funcs = chain(funcs, self.url_default_functions.get(bp, ()))
- for func in funcs:
- func(endpoint, values)
-
- def handle_url_build_error(self, error, endpoint, values):
- """Handle :class:`~werkzeug.routing.BuildError` on :meth:`url_for`.
- """
- exc_type, exc_value, tb = sys.exc_info()
- for handler in self.url_build_error_handlers:
- try:
- rv = handler(error, endpoint, values)
- if rv is not None:
- return rv
- except BuildError as e:
- # make error available outside except block (py3)
- error = e
-
- # At this point we want to reraise the exception. If the error is
- # still the same one we can reraise it with the original traceback,
- # otherwise we raise it from here.
- if error is exc_value:
- reraise(exc_type, exc_value, tb)
- raise error
-
- def preprocess_request(self):
- """Called before the actual request dispatching and will
- call each :meth:`before_request` decorated function, passing no
- arguments.
- If any of these functions returns a value, it's handled as
- if it was the return value from the view and further
- request handling is stopped.
-
- This also triggers the :meth:`url_value_preprocessor` functions before
- the actual :meth:`before_request` functions are called.
- """
- bp = _request_ctx_stack.top.request.blueprint
-
- funcs = self.url_value_preprocessors.get(None, ())
- if bp is not None and bp in self.url_value_preprocessors:
- funcs = chain(funcs, self.url_value_preprocessors[bp])
- for func in funcs:
- func(request.endpoint, request.view_args)
-
- funcs = self.before_request_funcs.get(None, ())
- if bp is not None and bp in self.before_request_funcs:
- funcs = chain(funcs, self.before_request_funcs[bp])
- for func in funcs:
- rv = func()
- if rv is not None:
- return rv
-
- def process_response(self, response):
- """Can be overridden in order to modify the response object
- before it's sent to the WSGI server. By default this will
- call all the :meth:`after_request` decorated functions.
-
- .. versionchanged:: 0.5
- As of Flask 0.5 the functions registered for after request
- execution are called in reverse order of registration.
-
- :param response: a :attr:`response_class` object.
- :return: a new response object or the same, has to be an
- instance of :attr:`response_class`.
- """
- ctx = _request_ctx_stack.top
- bp = ctx.request.blueprint
- funcs = ctx._after_request_functions
- if bp is not None and bp in self.after_request_funcs:
- funcs = chain(funcs, reversed(self.after_request_funcs[bp]))
- if None in self.after_request_funcs:
- funcs = chain(funcs, reversed(self.after_request_funcs[None]))
- for handler in funcs:
- response = handler(response)
- if not self.session_interface.is_null_session(ctx.session):
- self.save_session(ctx.session, response)
- return response
-
- def do_teardown_request(self, exc=_sentinel):
- """Called after the actual request dispatching and will
- call every as :meth:`teardown_request` decorated function. This is
- not actually called by the :class:`Flask` object itself but is always
- triggered when the request context is popped. That way we have a
- tighter control over certain resources under testing environments.
-
- .. versionchanged:: 0.9
- Added the `exc` argument. Previously this was always using the
- current exception information.
- """
- if exc is _sentinel:
- exc = sys.exc_info()[1]
- funcs = reversed(self.teardown_request_funcs.get(None, ()))
- bp = _request_ctx_stack.top.request.blueprint
- if bp is not None and bp in self.teardown_request_funcs:
- funcs = chain(funcs, reversed(self.teardown_request_funcs[bp]))
- for func in funcs:
- func(exc)
- request_tearing_down.send(self, exc=exc)
-
- def do_teardown_appcontext(self, exc=_sentinel):
- """Called when an application context is popped. This works pretty
- much the same as :meth:`do_teardown_request` but for the application
- context.
-
- .. versionadded:: 0.9
- """
- if exc is _sentinel:
- exc = sys.exc_info()[1]
- for func in reversed(self.teardown_appcontext_funcs):
- func(exc)
- appcontext_tearing_down.send(self, exc=exc)
-
- def app_context(self):
- """Binds the application only. For as long as the application is bound
- to the current context the :data:`flask.current_app` points to that
- application. An application context is automatically created when a
- request context is pushed if necessary.
-
- Example usage::
-
- with app.app_context():
- ...
-
- .. versionadded:: 0.9
- """
- return AppContext(self)
-
- def request_context(self, environ):
- """Creates a :class:`~flask.ctx.RequestContext` from the given
- environment and binds it to the current context. This must be used in
- combination with the ``with`` statement because the request is only bound
- to the current context for the duration of the ``with`` block.
-
- Example usage::
-
- with app.request_context(environ):
- do_something_with(request)
-
- The object returned can also be used without the ``with`` statement
- which is useful for working in the shell. The example above is
- doing exactly the same as this code::
-
- ctx = app.request_context(environ)
- ctx.push()
- try:
- do_something_with(request)
- finally:
- ctx.pop()
-
- .. versionchanged:: 0.3
- Added support for non-with statement usage and ``with`` statement
- is now passed the ctx object.
-
- :param environ: a WSGI environment
- """
- return RequestContext(self, environ)
-
- def test_request_context(self, *args, **kwargs):
- """Creates a WSGI environment from the given values (see
- :class:`werkzeug.test.EnvironBuilder` for more information, this
- function accepts the same arguments).
- """
- from flask.testing import make_test_environ_builder
- builder = make_test_environ_builder(self, *args, **kwargs)
- try:
- return self.request_context(builder.get_environ())
- finally:
- builder.close()
-
- def wsgi_app(self, environ, start_response):
- """The actual WSGI application. This is not implemented in
- `__call__` so that middlewares can be applied without losing a
- reference to the class. So instead of doing this::
-
- app = MyMiddleware(app)
-
- It's a better idea to do this instead::
-
- app.wsgi_app = MyMiddleware(app.wsgi_app)
-
- Then you still have the original application object around and
- can continue to call methods on it.
-
- .. versionchanged:: 0.7
- The behavior of the before and after request callbacks was changed
- under error conditions and a new callback was added that will
- always execute at the end of the request, independent on if an
- error occurred or not. See :ref:`callbacks-and-errors`.
-
- :param environ: a WSGI environment
- :param start_response: a callable accepting a status code,
- a list of headers and an optional
- exception context to start the response
- """
- ctx = self.request_context(environ)
- ctx.push()
- error = None
- try:
- try:
- response = self.full_dispatch_request()
- except Exception as e:
- error = e
- response = self.handle_exception(e)
- except:
- error = sys.exc_info()[1]
- raise
- return response(environ, start_response)
- finally:
- if self.should_ignore_error(error):
- error = None
- ctx.auto_pop(error)
-
- def __call__(self, environ, start_response):
- """Shortcut for :attr:`wsgi_app`."""
- return self.wsgi_app(environ, start_response)
-
- def __repr__(self):
- return '<%s %r>' % (
- self.__class__.__name__,
- self.name,
- )
diff --git a/venv/Lib/site-packages/flask/blueprints.py b/venv/Lib/site-packages/flask/blueprints.py
deleted file mode 100644
index 586a1b0..0000000
--- a/venv/Lib/site-packages/flask/blueprints.py
+++ /dev/null
@@ -1,413 +0,0 @@
-# -*- coding: utf-8 -*-
-"""
- flask.blueprints
- ~~~~~~~~~~~~~~~~
-
- Blueprints are the recommended way to implement larger or more
- pluggable applications in Flask 0.7 and later.
-
- :copyright: (c) 2015 by Armin Ronacher.
- :license: BSD, see LICENSE for more details.
-"""
-from functools import update_wrapper
-
-from .helpers import _PackageBoundObject, _endpoint_from_view_func
-
-
-class BlueprintSetupState(object):
- """Temporary holder object for registering a blueprint with the
- application. An instance of this class is created by the
- :meth:`~flask.Blueprint.make_setup_state` method and later passed
- to all register callback functions.
- """
-
- def __init__(self, blueprint, app, options, first_registration):
- #: a reference to the current application
- self.app = app
-
- #: a reference to the blueprint that created this setup state.
- self.blueprint = blueprint
-
- #: a dictionary with all options that were passed to the
- #: :meth:`~flask.Flask.register_blueprint` method.
- self.options = options
-
- #: as blueprints can be registered multiple times with the
- #: application and not everything wants to be registered
- #: multiple times on it, this attribute can be used to figure
- #: out if the blueprint was registered in the past already.
- self.first_registration = first_registration
-
- subdomain = self.options.get('subdomain')
- if subdomain is None:
- subdomain = self.blueprint.subdomain
-
- #: The subdomain that the blueprint should be active for, ``None``
- #: otherwise.
- self.subdomain = subdomain
-
- url_prefix = self.options.get('url_prefix')
- if url_prefix is None:
- url_prefix = self.blueprint.url_prefix
-
- #: The prefix that should be used for all URLs defined on the
- #: blueprint.
- self.url_prefix = url_prefix
-
- #: A dictionary with URL defaults that is added to each and every
- #: URL that was defined with the blueprint.
- self.url_defaults = dict(self.blueprint.url_values_defaults)
- self.url_defaults.update(self.options.get('url_defaults', ()))
-
- def add_url_rule(self, rule, endpoint=None, view_func=None, **options):
- """A helper method to register a rule (and optionally a view function)
- to the application. The endpoint is automatically prefixed with the
- blueprint's name.
- """
- if self.url_prefix:
- rule = self.url_prefix + rule
- options.setdefault('subdomain', self.subdomain)
- if endpoint is None:
- endpoint = _endpoint_from_view_func(view_func)
- defaults = self.url_defaults
- if 'defaults' in options:
- defaults = dict(defaults, **options.pop('defaults'))
- self.app.add_url_rule(rule, '%s.%s' % (self.blueprint.name, endpoint),
- view_func, defaults=defaults, **options)
-
-
-class Blueprint(_PackageBoundObject):
- """Represents a blueprint. A blueprint is an object that records
- functions that will be called with the
- :class:`~flask.blueprints.BlueprintSetupState` later to register functions
- or other things on the main application. See :ref:`blueprints` for more
- information.
-
- .. versionadded:: 0.7
- """
-
- warn_on_modifications = False
- _got_registered_once = False
-
- def __init__(self, name, import_name, static_folder=None,
- static_url_path=None, template_folder=None,
- url_prefix=None, subdomain=None, url_defaults=None,
- root_path=None):
- _PackageBoundObject.__init__(self, import_name, template_folder,
- root_path=root_path)
- self.name = name
- self.url_prefix = url_prefix
- self.subdomain = subdomain
- self.static_folder = static_folder
- self.static_url_path = static_url_path
- self.deferred_functions = []
- if url_defaults is None:
- url_defaults = {}
- self.url_values_defaults = url_defaults
-
- def record(self, func):
- """Registers a function that is called when the blueprint is
- registered on the application. This function is called with the
- state as argument as returned by the :meth:`make_setup_state`
- method.
- """
- if self._got_registered_once and self.warn_on_modifications:
- from warnings import warn
- warn(Warning('The blueprint was already registered once '
- 'but is getting modified now. These changes '
- 'will not show up.'))
- self.deferred_functions.append(func)
-
- def record_once(self, func):
- """Works like :meth:`record` but wraps the function in another
- function that will ensure the function is only called once. If the
- blueprint is registered a second time on the application, the
- function passed is not called.
- """
- def wrapper(state):
- if state.first_registration:
- func(state)
- return self.record(update_wrapper(wrapper, func))
-
- def make_setup_state(self, app, options, first_registration=False):
- """Creates an instance of :meth:`~flask.blueprints.BlueprintSetupState`
- object that is later passed to the register callback functions.
- Subclasses can override this to return a subclass of the setup state.
- """
- return BlueprintSetupState(self, app, options, first_registration)
-
- def register(self, app, options, first_registration=False):
- """Called by :meth:`Flask.register_blueprint` to register a blueprint
- on the application. This can be overridden to customize the register
- behavior. Keyword arguments from
- :func:`~flask.Flask.register_blueprint` are directly forwarded to this
- method in the `options` dictionary.
- """
- self._got_registered_once = True
- state = self.make_setup_state(app, options, first_registration)
- if self.has_static_folder:
- state.add_url_rule(self.static_url_path + '/',
- view_func=self.send_static_file,
- endpoint='static')
-
- for deferred in self.deferred_functions:
- deferred(state)
-
- def route(self, rule, **options):
- """Like :meth:`Flask.route` but for a blueprint. The endpoint for the
- :func:`url_for` function is prefixed with the name of the blueprint.
- """
- def decorator(f):
- endpoint = options.pop("endpoint", f.__name__)
- self.add_url_rule(rule, endpoint, f, **options)
- return f
- return decorator
-
- def add_url_rule(self, rule, endpoint=None, view_func=None, **options):
- """Like :meth:`Flask.add_url_rule` but for a blueprint. The endpoint for
- the :func:`url_for` function is prefixed with the name of the blueprint.
- """
- if endpoint:
- assert '.' not in endpoint, "Blueprint endpoints should not contain dots"
- self.record(lambda s:
- s.add_url_rule(rule, endpoint, view_func, **options))
-
- def endpoint(self, endpoint):
- """Like :meth:`Flask.endpoint` but for a blueprint. This does not
- prefix the endpoint with the blueprint name, this has to be done
- explicitly by the user of this method. If the endpoint is prefixed
- with a `.` it will be registered to the current blueprint, otherwise
- it's an application independent endpoint.
- """
- def decorator(f):
- def register_endpoint(state):
- state.app.view_functions[endpoint] = f
- self.record_once(register_endpoint)
- return f
- return decorator
-
- def app_template_filter(self, name=None):
- """Register a custom template filter, available application wide. Like
- :meth:`Flask.template_filter` but for a blueprint.
-
- :param name: the optional name of the filter, otherwise the
- function name will be used.
- """
- def decorator(f):
- self.add_app_template_filter(f, name=name)
- return f
- return decorator
-
- def add_app_template_filter(self, f, name=None):
- """Register a custom template filter, available application wide. Like
- :meth:`Flask.add_template_filter` but for a blueprint. Works exactly
- like the :meth:`app_template_filter` decorator.
-
- :param name: the optional name of the filter, otherwise the
- function name will be used.
- """
- def register_template(state):
- state.app.jinja_env.filters[name or f.__name__] = f
- self.record_once(register_template)
-
- def app_template_test(self, name=None):
- """Register a custom template test, available application wide. Like
- :meth:`Flask.template_test` but for a blueprint.
-
- .. versionadded:: 0.10
-
- :param name: the optional name of the test, otherwise the
- function name will be used.
- """
- def decorator(f):
- self.add_app_template_test(f, name=name)
- return f
- return decorator
-
- def add_app_template_test(self, f, name=None):
- """Register a custom template test, available application wide. Like
- :meth:`Flask.add_template_test` but for a blueprint. Works exactly
- like the :meth:`app_template_test` decorator.
-
- .. versionadded:: 0.10
-
- :param name: the optional name of the test, otherwise the
- function name will be used.
- """
- def register_template(state):
- state.app.jinja_env.tests[name or f.__name__] = f
- self.record_once(register_template)
-
- def app_template_global(self, name=None):
- """Register a custom template global, available application wide. Like
- :meth:`Flask.template_global` but for a blueprint.
-
- .. versionadded:: 0.10
-
- :param name: the optional name of the global, otherwise the
- function name will be used.
- """
- def decorator(f):
- self.add_app_template_global(f, name=name)
- return f
- return decorator
-
- def add_app_template_global(self, f, name=None):
- """Register a custom template global, available application wide. Like
- :meth:`Flask.add_template_global` but for a blueprint. Works exactly
- like the :meth:`app_template_global` decorator.
-
- .. versionadded:: 0.10
-
- :param name: the optional name of the global, otherwise the
- function name will be used.
- """
- def register_template(state):
- state.app.jinja_env.globals[name or f.__name__] = f
- self.record_once(register_template)
-
- def before_request(self, f):
- """Like :meth:`Flask.before_request` but for a blueprint. This function
- is only executed before each request that is handled by a function of
- that blueprint.
- """
- self.record_once(lambda s: s.app.before_request_funcs
- .setdefault(self.name, []).append(f))
- return f
-
- def before_app_request(self, f):
- """Like :meth:`Flask.before_request`. Such a function is executed
- before each request, even if outside of a blueprint.
- """
- self.record_once(lambda s: s.app.before_request_funcs
- .setdefault(None, []).append(f))
- return f
-
- def before_app_first_request(self, f):
- """Like :meth:`Flask.before_first_request`. Such a function is
- executed before the first request to the application.
- """
- self.record_once(lambda s: s.app.before_first_request_funcs.append(f))
- return f
-
- def after_request(self, f):
- """Like :meth:`Flask.after_request` but for a blueprint. This function
- is only executed after each request that is handled by a function of
- that blueprint.
- """
- self.record_once(lambda s: s.app.after_request_funcs
- .setdefault(self.name, []).append(f))
- return f
-
- def after_app_request(self, f):
- """Like :meth:`Flask.after_request` but for a blueprint. Such a function
- is executed after each request, even if outside of the blueprint.
- """
- self.record_once(lambda s: s.app.after_request_funcs
- .setdefault(None, []).append(f))
- return f
-
- def teardown_request(self, f):
- """Like :meth:`Flask.teardown_request` but for a blueprint. This
- function is only executed when tearing down requests handled by a
- function of that blueprint. Teardown request functions are executed
- when the request context is popped, even when no actual request was
- performed.
- """
- self.record_once(lambda s: s.app.teardown_request_funcs
- .setdefault(self.name, []).append(f))
- return f
-
- def teardown_app_request(self, f):
- """Like :meth:`Flask.teardown_request` but for a blueprint. Such a
- function is executed when tearing down each request, even if outside of
- the blueprint.
- """
- self.record_once(lambda s: s.app.teardown_request_funcs
- .setdefault(None, []).append(f))
- return f
-
- def context_processor(self, f):
- """Like :meth:`Flask.context_processor` but for a blueprint. This
- function is only executed for requests handled by a blueprint.
- """
- self.record_once(lambda s: s.app.template_context_processors
- .setdefault(self.name, []).append(f))
- return f
-
- def app_context_processor(self, f):
- """Like :meth:`Flask.context_processor` but for a blueprint. Such a
- function is executed each request, even if outside of the blueprint.
- """
- self.record_once(lambda s: s.app.template_context_processors
- .setdefault(None, []).append(f))
- return f
-
- def app_errorhandler(self, code):
- """Like :meth:`Flask.errorhandler` but for a blueprint. This
- handler is used for all requests, even if outside of the blueprint.
- """
- def decorator(f):
- self.record_once(lambda s: s.app.errorhandler(code)(f))
- return f
- return decorator
-
- def url_value_preprocessor(self, f):
- """Registers a function as URL value preprocessor for this
- blueprint. It's called before the view functions are called and
- can modify the url values provided.
- """
- self.record_once(lambda s: s.app.url_value_preprocessors
- .setdefault(self.name, []).append(f))
- return f
-
- def url_defaults(self, f):
- """Callback function for URL defaults for this blueprint. It's called
- with the endpoint and values and should update the values passed
- in place.
- """
- self.record_once(lambda s: s.app.url_default_functions
- .setdefault(self.name, []).append(f))
- return f
-
- def app_url_value_preprocessor(self, f):
- """Same as :meth:`url_value_preprocessor` but application wide.
- """
- self.record_once(lambda s: s.app.url_value_preprocessors
- .setdefault(None, []).append(f))
- return f
-
- def app_url_defaults(self, f):
- """Same as :meth:`url_defaults` but application wide.
- """
- self.record_once(lambda s: s.app.url_default_functions
- .setdefault(None, []).append(f))
- return f
-
- def errorhandler(self, code_or_exception):
- """Registers an error handler that becomes active for this blueprint
- only. Please be aware that routing does not happen local to a
- blueprint so an error handler for 404 usually is not handled by
- a blueprint unless it is caused inside a view function. Another
- special case is the 500 internal server error which is always looked
- up from the application.
-
- Otherwise works as the :meth:`~flask.Flask.errorhandler` decorator
- of the :class:`~flask.Flask` object.
- """
- def decorator(f):
- self.record_once(lambda s: s.app._register_error_handler(
- self.name, code_or_exception, f))
- return f
- return decorator
-
- def register_error_handler(self, code_or_exception, f):
- """Non-decorator version of the :meth:`errorhandler` error attach
- function, akin to the :meth:`~flask.Flask.register_error_handler`
- application-wide function of the :class:`~flask.Flask` object but
- for error handlers limited to this blueprint.
-
- .. versionadded:: 0.11
- """
- self.record_once(lambda s: s.app._register_error_handler(
- self.name, code_or_exception, f))
diff --git a/venv/Lib/site-packages/flask/cli.py b/venv/Lib/site-packages/flask/cli.py
deleted file mode 100644
index 074ee76..0000000
--- a/venv/Lib/site-packages/flask/cli.py
+++ /dev/null
@@ -1,517 +0,0 @@
-# -*- coding: utf-8 -*-
-"""
- flask.cli
- ~~~~~~~~~
-
- A simple command line application to run flask apps.
-
- :copyright: (c) 2015 by Armin Ronacher.
- :license: BSD, see LICENSE for more details.
-"""
-
-import os
-import sys
-from threading import Lock, Thread
-from functools import update_wrapper
-
-import click
-
-from ._compat import iteritems, reraise
-from .helpers import get_debug_flag
-from . import __version__
-
-class NoAppException(click.UsageError):
- """Raised if an application cannot be found or loaded."""
-
-
-def find_best_app(module):
- """Given a module instance this tries to find the best possible
- application in the module or raises an exception.
- """
- from . import Flask
-
- # Search for the most common names first.
- for attr_name in 'app', 'application':
- app = getattr(module, attr_name, None)
- if app is not None and isinstance(app, Flask):
- return app
-
- # Otherwise find the only object that is a Flask instance.
- matches = [v for k, v in iteritems(module.__dict__)
- if isinstance(v, Flask)]
-
- if len(matches) == 1:
- return matches[0]
- raise NoAppException('Failed to find application in module "%s". Are '
- 'you sure it contains a Flask application? Maybe '
- 'you wrapped it in a WSGI middleware or you are '
- 'using a factory function.' % module.__name__)
-
-
-def prepare_exec_for_file(filename):
- """Given a filename this will try to calculate the python path, add it
- to the search path and return the actual module name that is expected.
- """
- module = []
-
- # Chop off file extensions or package markers
- if os.path.split(filename)[1] == '__init__.py':
- filename = os.path.dirname(filename)
- elif filename.endswith('.py'):
- filename = filename[:-3]
- else:
- raise NoAppException('The file provided (%s) does exist but is not a '
- 'valid Python file. This means that it cannot '
- 'be used as application. Please change the '
- 'extension to .py' % filename)
- filename = os.path.realpath(filename)
-
- dirpath = filename
- while 1:
- dirpath, extra = os.path.split(dirpath)
- module.append(extra)
- if not os.path.isfile(os.path.join(dirpath, '__init__.py')):
- break
-
- sys.path.insert(0, dirpath)
- return '.'.join(module[::-1])
-
-
-def locate_app(app_id):
- """Attempts to locate the application."""
- __traceback_hide__ = True
- if ':' in app_id:
- module, app_obj = app_id.split(':', 1)
- else:
- module = app_id
- app_obj = None
-
- try:
- __import__(module)
- except ImportError:
- # Reraise the ImportError if it occurred within the imported module.
- # Determine this by checking whether the trace has a depth > 1.
- if sys.exc_info()[-1].tb_next:
- raise
- else:
- raise NoAppException('The file/path provided (%s) does not appear'
- ' to exist. Please verify the path is '
- 'correct. If app is not on PYTHONPATH, '
- 'ensure the extension is .py' % module)
-
- mod = sys.modules[module]
- if app_obj is None:
- app = find_best_app(mod)
- else:
- app = getattr(mod, app_obj, None)
- if app is None:
- raise RuntimeError('Failed to find application in module "%s"'
- % module)
-
- return app
-
-
-def find_default_import_path():
- app = os.environ.get('FLASK_APP')
- if app is None:
- return
- if os.path.isfile(app):
- return prepare_exec_for_file(app)
- return app
-
-
-def get_version(ctx, param, value):
- if not value or ctx.resilient_parsing:
- return
- message = 'Flask %(version)s\nPython %(python_version)s'
- click.echo(message % {
- 'version': __version__,
- 'python_version': sys.version,
- }, color=ctx.color)
- ctx.exit()
-
-version_option = click.Option(['--version'],
- help='Show the flask version',
- expose_value=False,
- callback=get_version,
- is_flag=True, is_eager=True)
-
-class DispatchingApp(object):
- """Special application that dispatches to a Flask application which
- is imported by name in a background thread. If an error happens
- it is recorded and shown as part of the WSGI handling which in case
- of the Werkzeug debugger means that it shows up in the browser.
- """
-
- def __init__(self, loader, use_eager_loading=False):
- self.loader = loader
- self._app = None
- self._lock = Lock()
- self._bg_loading_exc_info = None
- if use_eager_loading:
- self._load_unlocked()
- else:
- self._load_in_background()
-
- def _load_in_background(self):
- def _load_app():
- __traceback_hide__ = True
- with self._lock:
- try:
- self._load_unlocked()
- except Exception:
- self._bg_loading_exc_info = sys.exc_info()
- t = Thread(target=_load_app, args=())
- t.start()
-
- def _flush_bg_loading_exception(self):
- __traceback_hide__ = True
- exc_info = self._bg_loading_exc_info
- if exc_info is not None:
- self._bg_loading_exc_info = None
- reraise(*exc_info)
-
- def _load_unlocked(self):
- __traceback_hide__ = True
- self._app = rv = self.loader()
- self._bg_loading_exc_info = None
- return rv
-
- def __call__(self, environ, start_response):
- __traceback_hide__ = True
- if self._app is not None:
- return self._app(environ, start_response)
- self._flush_bg_loading_exception()
- with self._lock:
- if self._app is not None:
- rv = self._app
- else:
- rv = self._load_unlocked()
- return rv(environ, start_response)
-
-
-class ScriptInfo(object):
- """Help object to deal with Flask applications. This is usually not
- necessary to interface with as it's used internally in the dispatching
- to click. In future versions of Flask this object will most likely play
- a bigger role. Typically it's created automatically by the
- :class:`FlaskGroup` but you can also manually create it and pass it
- onwards as click object.
- """
-
- def __init__(self, app_import_path=None, create_app=None):
- if create_app is None:
- if app_import_path is None:
- app_import_path = find_default_import_path()
- self.app_import_path = app_import_path
- else:
- app_import_path = None
-
- #: Optionally the import path for the Flask application.
- self.app_import_path = app_import_path
- #: Optionally a function that is passed the script info to create
- #: the instance of the application.
- self.create_app = create_app
- #: A dictionary with arbitrary data that can be associated with
- #: this script info.
- self.data = {}
- self._loaded_app = None
-
- def load_app(self):
- """Loads the Flask app (if not yet loaded) and returns it. Calling
- this multiple times will just result in the already loaded app to
- be returned.
- """
- __traceback_hide__ = True
- if self._loaded_app is not None:
- return self._loaded_app
- if self.create_app is not None:
- rv = self.create_app(self)
- else:
- if not self.app_import_path:
- raise NoAppException(
- 'Could not locate Flask application. You did not provide '
- 'the FLASK_APP environment variable.\n\nFor more '
- 'information see '
- 'http://flask.pocoo.org/docs/latest/quickstart/')
- rv = locate_app(self.app_import_path)
- debug = get_debug_flag()
- if debug is not None:
- rv.debug = debug
- self._loaded_app = rv
- return rv
-
-
-pass_script_info = click.make_pass_decorator(ScriptInfo, ensure=True)
-
-
-def with_appcontext(f):
- """Wraps a callback so that it's guaranteed to be executed with the
- script's application context. If callbacks are registered directly
- to the ``app.cli`` object then they are wrapped with this function
- by default unless it's disabled.
- """
- @click.pass_context
- def decorator(__ctx, *args, **kwargs):
- with __ctx.ensure_object(ScriptInfo).load_app().app_context():
- return __ctx.invoke(f, *args, **kwargs)
- return update_wrapper(decorator, f)
-
-
-class AppGroup(click.Group):
- """This works similar to a regular click :class:`~click.Group` but it
- changes the behavior of the :meth:`command` decorator so that it
- automatically wraps the functions in :func:`with_appcontext`.
-
- Not to be confused with :class:`FlaskGroup`.
- """
-
- def command(self, *args, **kwargs):
- """This works exactly like the method of the same name on a regular
- :class:`click.Group` but it wraps callbacks in :func:`with_appcontext`
- unless it's disabled by passing ``with_appcontext=False``.
- """
- wrap_for_ctx = kwargs.pop('with_appcontext', True)
- def decorator(f):
- if wrap_for_ctx:
- f = with_appcontext(f)
- return click.Group.command(self, *args, **kwargs)(f)
- return decorator
-
- def group(self, *args, **kwargs):
- """This works exactly like the method of the same name on a regular
- :class:`click.Group` but it defaults the group class to
- :class:`AppGroup`.
- """
- kwargs.setdefault('cls', AppGroup)
- return click.Group.group(self, *args, **kwargs)
-
-
-class FlaskGroup(AppGroup):
- """Special subclass of the :class:`AppGroup` group that supports
- loading more commands from the configured Flask app. Normally a
- developer does not have to interface with this class but there are
- some very advanced use cases for which it makes sense to create an
- instance of this.
-
- For information as of why this is useful see :ref:`custom-scripts`.
-
- :param add_default_commands: if this is True then the default run and
- shell commands wil be added.
- :param add_version_option: adds the ``--version`` option.
- :param create_app: an optional callback that is passed the script info
- and returns the loaded app.
- """
-
- def __init__(self, add_default_commands=True, create_app=None,
- add_version_option=True, **extra):
- params = list(extra.pop('params', None) or ())
-
- if add_version_option:
- params.append(version_option)
-
- AppGroup.__init__(self, params=params, **extra)
- self.create_app = create_app
-
- if add_default_commands:
- self.add_command(run_command)
- self.add_command(shell_command)
-
- self._loaded_plugin_commands = False
-
- def _load_plugin_commands(self):
- if self._loaded_plugin_commands:
- return
- try:
- import pkg_resources
- except ImportError:
- self._loaded_plugin_commands = True
- return
-
- for ep in pkg_resources.iter_entry_points('flask.commands'):
- self.add_command(ep.load(), ep.name)
- self._loaded_plugin_commands = True
-
- def get_command(self, ctx, name):
- self._load_plugin_commands()
-
- # We load built-in commands first as these should always be the
- # same no matter what the app does. If the app does want to
- # override this it needs to make a custom instance of this group
- # and not attach the default commands.
- #
- # This also means that the script stays functional in case the
- # application completely fails.
- rv = AppGroup.get_command(self, ctx, name)
- if rv is not None:
- return rv
-
- info = ctx.ensure_object(ScriptInfo)
- try:
- rv = info.load_app().cli.get_command(ctx, name)
- if rv is not None:
- return rv
- except NoAppException:
- pass
-
- def list_commands(self, ctx):
- self._load_plugin_commands()
-
- # The commands available is the list of both the application (if
- # available) plus the builtin commands.
- rv = set(click.Group.list_commands(self, ctx))
- info = ctx.ensure_object(ScriptInfo)
- try:
- rv.update(info.load_app().cli.list_commands(ctx))
- except Exception:
- # Here we intentionally swallow all exceptions as we don't
- # want the help page to break if the app does not exist.
- # If someone attempts to use the command we try to create
- # the app again and this will give us the error.
- pass
- return sorted(rv)
-
- def main(self, *args, **kwargs):
- obj = kwargs.get('obj')
- if obj is None:
- obj = ScriptInfo(create_app=self.create_app)
- kwargs['obj'] = obj
- kwargs.setdefault('auto_envvar_prefix', 'FLASK')
- return AppGroup.main(self, *args, **kwargs)
-
-
-@click.command('run', short_help='Runs a development server.')
-@click.option('--host', '-h', default='127.0.0.1',
- help='The interface to bind to.')
-@click.option('--port', '-p', default=5000,
- help='The port to bind to.')
-@click.option('--reload/--no-reload', default=None,
- help='Enable or disable the reloader. By default the reloader '
- 'is active if debug is enabled.')
-@click.option('--debugger/--no-debugger', default=None,
- help='Enable or disable the debugger. By default the debugger '
- 'is active if debug is enabled.')
-@click.option('--eager-loading/--lazy-loader', default=None,
- help='Enable or disable eager loading. By default eager '
- 'loading is enabled if the reloader is disabled.')
-@click.option('--with-threads/--without-threads', default=False,
- help='Enable or disable multithreading.')
-@pass_script_info
-def run_command(info, host, port, reload, debugger, eager_loading,
- with_threads):
- """Runs a local development server for the Flask application.
-
- This local server is recommended for development purposes only but it
- can also be used for simple intranet deployments. By default it will
- not support any sort of concurrency at all to simplify debugging. This
- can be changed with the --with-threads option which will enable basic
- multithreading.
-
- The reloader and debugger are by default enabled if the debug flag of
- Flask is enabled and disabled otherwise.
- """
- from werkzeug.serving import run_simple
-
- debug = get_debug_flag()
- if reload is None:
- reload = bool(debug)
- if debugger is None:
- debugger = bool(debug)
- if eager_loading is None:
- eager_loading = not reload
-
- app = DispatchingApp(info.load_app, use_eager_loading=eager_loading)
-
- # Extra startup messages. This depends a bit on Werkzeug internals to
- # not double execute when the reloader kicks in.
- if os.environ.get('WERKZEUG_RUN_MAIN') != 'true':
- # If we have an import path we can print it out now which can help
- # people understand what's being served. If we do not have an
- # import path because the app was loaded through a callback then
- # we won't print anything.
- if info.app_import_path is not None:
- print(' * Serving Flask app "%s"' % info.app_import_path)
- if debug is not None:
- print(' * Forcing debug mode %s' % (debug and 'on' or 'off'))
-
- run_simple(host, port, app, use_reloader=reload,
- use_debugger=debugger, threaded=with_threads)
-
-
-@click.command('shell', short_help='Runs a shell in the app context.')
-@with_appcontext
-def shell_command():
- """Runs an interactive Python shell in the context of a given
- Flask application. The application will populate the default
- namespace of this shell according to it's configuration.
-
- This is useful for executing small snippets of management code
- without having to manually configuring the application.
- """
- import code
- from flask.globals import _app_ctx_stack
- app = _app_ctx_stack.top.app
- banner = 'Python %s on %s\nApp: %s%s\nInstance: %s' % (
- sys.version,
- sys.platform,
- app.import_name,
- app.debug and ' [debug]' or '',
- app.instance_path,
- )
- ctx = {}
-
- # Support the regular Python interpreter startup script if someone
- # is using it.
- startup = os.environ.get('PYTHONSTARTUP')
- if startup and os.path.isfile(startup):
- with open(startup, 'r') as f:
- eval(compile(f.read(), startup, 'exec'), ctx)
-
- ctx.update(app.make_shell_context())
-
- code.interact(banner=banner, local=ctx)
-
-
-cli = FlaskGroup(help="""\
-This shell command acts as general utility script for Flask applications.
-
-It loads the application configured (through the FLASK_APP environment
-variable) and then provides commands either provided by the application or
-Flask itself.
-
-The most useful commands are the "run" and "shell" command.
-
-Example usage:
-
-\b
- %(prefix)s%(cmd)s FLASK_APP=hello.py
- %(prefix)s%(cmd)s FLASK_DEBUG=1
- %(prefix)sflask run
-""" % {
- 'cmd': os.name == 'posix' and 'export' or 'set',
- 'prefix': os.name == 'posix' and '$ ' or '',
-})
-
-
-def main(as_module=False):
- this_module = __package__ + '.cli'
- args = sys.argv[1:]
-
- if as_module:
- if sys.version_info >= (2, 7):
- name = 'python -m ' + this_module.rsplit('.', 1)[0]
- else:
- name = 'python -m ' + this_module
-
- # This module is always executed as "python -m flask.run" and as such
- # we need to ensure that we restore the actual command line so that
- # the reloader can properly operate.
- sys.argv = ['-m', this_module] + sys.argv[1:]
- else:
- name = None
-
- cli.main(args=args, prog_name=name)
-
-
-if __name__ == '__main__':
- main(as_module=True)
diff --git a/venv/Lib/site-packages/flask/config.py b/venv/Lib/site-packages/flask/config.py
deleted file mode 100644
index 697add7..0000000
--- a/venv/Lib/site-packages/flask/config.py
+++ /dev/null
@@ -1,263 +0,0 @@
-# -*- coding: utf-8 -*-
-"""
- flask.config
- ~~~~~~~~~~~~
-
- Implements the configuration related objects.
-
- :copyright: (c) 2015 by Armin Ronacher.
- :license: BSD, see LICENSE for more details.
-"""
-
-import os
-import types
-import errno
-
-from werkzeug.utils import import_string
-from ._compat import string_types, iteritems
-from . import json
-
-
-class ConfigAttribute(object):
- """Makes an attribute forward to the config"""
-
- def __init__(self, name, get_converter=None):
- self.__name__ = name
- self.get_converter = get_converter
-
- def __get__(self, obj, type=None):
- if obj is None:
- return self
- rv = obj.config[self.__name__]
- if self.get_converter is not None:
- rv = self.get_converter(rv)
- return rv
-
- def __set__(self, obj, value):
- obj.config[self.__name__] = value
-
-
-class Config(dict):
- """Works exactly like a dict but provides ways to fill it from files
- or special dictionaries. There are two common patterns to populate the
- config.
-
- Either you can fill the config from a config file::
-
- app.config.from_pyfile('yourconfig.cfg')
-
- Or alternatively you can define the configuration options in the
- module that calls :meth:`from_object` or provide an import path to
- a module that should be loaded. It is also possible to tell it to
- use the same module and with that provide the configuration values
- just before the call::
-
- DEBUG = True
- SECRET_KEY = 'development key'
- app.config.from_object(__name__)
-
- In both cases (loading from any Python file or loading from modules),
- only uppercase keys are added to the config. This makes it possible to use
- lowercase values in the config file for temporary values that are not added
- to the config or to define the config keys in the same file that implements
- the application.
-
- Probably the most interesting way to load configurations is from an
- environment variable pointing to a file::
-
- app.config.from_envvar('YOURAPPLICATION_SETTINGS')
-
- In this case before launching the application you have to set this
- environment variable to the file you want to use. On Linux and OS X
- use the export statement::
-
- export YOURAPPLICATION_SETTINGS='/path/to/config/file'
-
- On windows use `set` instead.
-
- :param root_path: path to which files are read relative from. When the
- config object is created by the application, this is
- the application's :attr:`~flask.Flask.root_path`.
- :param defaults: an optional dictionary of default values
- """
-
- def __init__(self, root_path, defaults=None):
- dict.__init__(self, defaults or {})
- self.root_path = root_path
-
- def from_envvar(self, variable_name, silent=False):
- """Loads a configuration from an environment variable pointing to
- a configuration file. This is basically just a shortcut with nicer
- error messages for this line of code::
-
- app.config.from_pyfile(os.environ['YOURAPPLICATION_SETTINGS'])
-
- :param variable_name: name of the environment variable
- :param silent: set to ``True`` if you want silent failure for missing
- files.
- :return: bool. ``True`` if able to load config, ``False`` otherwise.
- """
- rv = os.environ.get(variable_name)
- if not rv:
- if silent:
- return False
- raise RuntimeError('The environment variable %r is not set '
- 'and as such configuration could not be '
- 'loaded. Set this variable and make it '
- 'point to a configuration file' %
- variable_name)
- return self.from_pyfile(rv, silent=silent)
-
- def from_pyfile(self, filename, silent=False):
- """Updates the values in the config from a Python file. This function
- behaves as if the file was imported as module with the
- :meth:`from_object` function.
-
- :param filename: the filename of the config. This can either be an
- absolute filename or a filename relative to the
- root path.
- :param silent: set to ``True`` if you want silent failure for missing
- files.
-
- .. versionadded:: 0.7
- `silent` parameter.
- """
- filename = os.path.join(self.root_path, filename)
- d = types.ModuleType('config')
- d.__file__ = filename
- try:
- with open(filename, mode='rb') as config_file:
- exec(compile(config_file.read(), filename, 'exec'), d.__dict__)
- except IOError as e:
- if silent and e.errno in (errno.ENOENT, errno.EISDIR):
- return False
- e.strerror = 'Unable to load configuration file (%s)' % e.strerror
- raise
- self.from_object(d)
- return True
-
- def from_object(self, obj):
- """Updates the values from the given object. An object can be of one
- of the following two types:
-
- - a string: in this case the object with that name will be imported
- - an actual object reference: that object is used directly
-
- Objects are usually either modules or classes. :meth:`from_object`
- loads only the uppercase attributes of the module/class. A ``dict``
- object will not work with :meth:`from_object` because the keys of a
- ``dict`` are not attributes of the ``dict`` class.
-
- Example of module-based configuration::
-
- app.config.from_object('yourapplication.default_config')
- from yourapplication import default_config
- app.config.from_object(default_config)
-
- You should not use this function to load the actual configuration but
- rather configuration defaults. The actual config should be loaded
- with :meth:`from_pyfile` and ideally from a location not within the
- package because the package might be installed system wide.
-
- See :ref:`config-dev-prod` for an example of class-based configuration
- using :meth:`from_object`.
-
- :param obj: an import name or object
- """
- if isinstance(obj, string_types):
- obj = import_string(obj)
- for key in dir(obj):
- if key.isupper():
- self[key] = getattr(obj, key)
-
- def from_json(self, filename, silent=False):
- """Updates the values in the config from a JSON file. This function
- behaves as if the JSON object was a dictionary and passed to the
- :meth:`from_mapping` function.
-
- :param filename: the filename of the JSON file. This can either be an
- absolute filename or a filename relative to the
- root path.
- :param silent: set to ``True`` if you want silent failure for missing
- files.
-
- .. versionadded:: 0.11
- """
- filename = os.path.join(self.root_path, filename)
-
- try:
- with open(filename) as json_file:
- obj = json.loads(json_file.read())
- except IOError as e:
- if silent and e.errno in (errno.ENOENT, errno.EISDIR):
- return False
- e.strerror = 'Unable to load configuration file (%s)' % e.strerror
- raise
- return self.from_mapping(obj)
-
- def from_mapping(self, *mapping, **kwargs):
- """Updates the config like :meth:`update` ignoring items with non-upper
- keys.
-
- .. versionadded:: 0.11
- """
- mappings = []
- if len(mapping) == 1:
- if hasattr(mapping[0], 'items'):
- mappings.append(mapping[0].items())
- else:
- mappings.append(mapping[0])
- elif len(mapping) > 1:
- raise TypeError(
- 'expected at most 1 positional argument, got %d' % len(mapping)
- )
- mappings.append(kwargs.items())
- for mapping in mappings:
- for (key, value) in mapping:
- if key.isupper():
- self[key] = value
- return True
-
- def get_namespace(self, namespace, lowercase=True, trim_namespace=True):
- """Returns a dictionary containing a subset of configuration options
- that match the specified namespace/prefix. Example usage::
-
- app.config['IMAGE_STORE_TYPE'] = 'fs'
- app.config['IMAGE_STORE_PATH'] = '/var/app/images'
- app.config['IMAGE_STORE_BASE_URL'] = 'http://img.website.com'
- image_store_config = app.config.get_namespace('IMAGE_STORE_')
-
- The resulting dictionary `image_store_config` would look like::
-
- {
- 'type': 'fs',
- 'path': '/var/app/images',
- 'base_url': 'http://img.website.com'
- }
-
- This is often useful when configuration options map directly to
- keyword arguments in functions or class constructors.
-
- :param namespace: a configuration namespace
- :param lowercase: a flag indicating if the keys of the resulting
- dictionary should be lowercase
- :param trim_namespace: a flag indicating if the keys of the resulting
- dictionary should not include the namespace
-
- .. versionadded:: 0.11
- """
- rv = {}
- for k, v in iteritems(self):
- if not k.startswith(namespace):
- continue
- if trim_namespace:
- key = k[len(namespace):]
- else:
- key = k
- if lowercase:
- key = key.lower()
- rv[key] = v
- return rv
-
- def __repr__(self):
- return '<%s %s>' % (self.__class__.__name__, dict.__repr__(self))
diff --git a/venv/Lib/site-packages/flask/ctx.py b/venv/Lib/site-packages/flask/ctx.py
deleted file mode 100644
index 480d9c5..0000000
--- a/venv/Lib/site-packages/flask/ctx.py
+++ /dev/null
@@ -1,410 +0,0 @@
-# -*- coding: utf-8 -*-
-"""
- flask.ctx
- ~~~~~~~~~
-
- Implements the objects required to keep the context.
-
- :copyright: (c) 2015 by Armin Ronacher.
- :license: BSD, see LICENSE for more details.
-"""
-
-import sys
-from functools import update_wrapper
-
-from werkzeug.exceptions import HTTPException
-
-from .globals import _request_ctx_stack, _app_ctx_stack
-from .signals import appcontext_pushed, appcontext_popped
-from ._compat import BROKEN_PYPY_CTXMGR_EXIT, reraise
-
-
-# a singleton sentinel value for parameter defaults
-_sentinel = object()
-
-
-class _AppCtxGlobals(object):
- """A plain object."""
-
- def get(self, name, default=None):
- return self.__dict__.get(name, default)
-
- def pop(self, name, default=_sentinel):
- if default is _sentinel:
- return self.__dict__.pop(name)
- else:
- return self.__dict__.pop(name, default)
-
- def setdefault(self, name, default=None):
- return self.__dict__.setdefault(name, default)
-
- def __contains__(self, item):
- return item in self.__dict__
-
- def __iter__(self):
- return iter(self.__dict__)
-
- def __repr__(self):
- top = _app_ctx_stack.top
- if top is not None:
- return '' % top.app.name
- return object.__repr__(self)
-
-
-def after_this_request(f):
- """Executes a function after this request. This is useful to modify
- response objects. The function is passed the response object and has
- to return the same or a new one.
-
- Example::
-
- @app.route('/')
- def index():
- @after_this_request
- def add_header(response):
- response.headers['X-Foo'] = 'Parachute'
- return response
- return 'Hello World!'
-
- This is more useful if a function other than the view function wants to
- modify a response. For instance think of a decorator that wants to add
- some headers without converting the return value into a response object.
-
- .. versionadded:: 0.9
- """
- _request_ctx_stack.top._after_request_functions.append(f)
- return f
-
-
-def copy_current_request_context(f):
- """A helper function that decorates a function to retain the current
- request context. This is useful when working with greenlets. The moment
- the function is decorated a copy of the request context is created and
- then pushed when the function is called.
-
- Example::
-
- import gevent
- from flask import copy_current_request_context
-
- @app.route('/')
- def index():
- @copy_current_request_context
- def do_some_work():
- # do some work here, it can access flask.request like you
- # would otherwise in the view function.
- ...
- gevent.spawn(do_some_work)
- return 'Regular response'
-
- .. versionadded:: 0.10
- """
- top = _request_ctx_stack.top
- if top is None:
- raise RuntimeError('This decorator can only be used at local scopes '
- 'when a request context is on the stack. For instance within '
- 'view functions.')
- reqctx = top.copy()
- def wrapper(*args, **kwargs):
- with reqctx:
- return f(*args, **kwargs)
- return update_wrapper(wrapper, f)
-
-
-def has_request_context():
- """If you have code that wants to test if a request context is there or
- not this function can be used. For instance, you may want to take advantage
- of request information if the request object is available, but fail
- silently if it is unavailable.
-
- ::
-
- class User(db.Model):
-
- def __init__(self, username, remote_addr=None):
- self.username = username
- if remote_addr is None and has_request_context():
- remote_addr = request.remote_addr
- self.remote_addr = remote_addr
-
- Alternatively you can also just test any of the context bound objects
- (such as :class:`request` or :class:`g` for truthness)::
-
- class User(db.Model):
-
- def __init__(self, username, remote_addr=None):
- self.username = username
- if remote_addr is None and request:
- remote_addr = request.remote_addr
- self.remote_addr = remote_addr
-
- .. versionadded:: 0.7
- """
- return _request_ctx_stack.top is not None
-
-
-def has_app_context():
- """Works like :func:`has_request_context` but for the application
- context. You can also just do a boolean check on the
- :data:`current_app` object instead.
-
- .. versionadded:: 0.9
- """
- return _app_ctx_stack.top is not None
-
-
-class AppContext(object):
- """The application context binds an application object implicitly
- to the current thread or greenlet, similar to how the
- :class:`RequestContext` binds request information. The application
- context is also implicitly created if a request context is created
- but the application is not on top of the individual application
- context.
- """
-
- def __init__(self, app):
- self.app = app
- self.url_adapter = app.create_url_adapter(None)
- self.g = app.app_ctx_globals_class()
-
- # Like request context, app contexts can be pushed multiple times
- # but there a basic "refcount" is enough to track them.
- self._refcnt = 0
-
- def push(self):
- """Binds the app context to the current context."""
- self._refcnt += 1
- if hasattr(sys, 'exc_clear'):
- sys.exc_clear()
- _app_ctx_stack.push(self)
- appcontext_pushed.send(self.app)
-
- def pop(self, exc=_sentinel):
- """Pops the app context."""
- try:
- self._refcnt -= 1
- if self._refcnt <= 0:
- if exc is _sentinel:
- exc = sys.exc_info()[1]
- self.app.do_teardown_appcontext(exc)
- finally:
- rv = _app_ctx_stack.pop()
- assert rv is self, 'Popped wrong app context. (%r instead of %r)' \
- % (rv, self)
- appcontext_popped.send(self.app)
-
- def __enter__(self):
- self.push()
- return self
-
- def __exit__(self, exc_type, exc_value, tb):
- self.pop(exc_value)
-
- if BROKEN_PYPY_CTXMGR_EXIT and exc_type is not None:
- reraise(exc_type, exc_value, tb)
-
-
-class RequestContext(object):
- """The request context contains all request relevant information. It is
- created at the beginning of the request and pushed to the
- `_request_ctx_stack` and removed at the end of it. It will create the
- URL adapter and request object for the WSGI environment provided.
-
- Do not attempt to use this class directly, instead use
- :meth:`~flask.Flask.test_request_context` and
- :meth:`~flask.Flask.request_context` to create this object.
-
- When the request context is popped, it will evaluate all the
- functions registered on the application for teardown execution
- (:meth:`~flask.Flask.teardown_request`).
-
- The request context is automatically popped at the end of the request
- for you. In debug mode the request context is kept around if
- exceptions happen so that interactive debuggers have a chance to
- introspect the data. With 0.4 this can also be forced for requests
- that did not fail and outside of ``DEBUG`` mode. By setting
- ``'flask._preserve_context'`` to ``True`` on the WSGI environment the
- context will not pop itself at the end of the request. This is used by
- the :meth:`~flask.Flask.test_client` for example to implement the
- deferred cleanup functionality.
-
- You might find this helpful for unittests where you need the
- information from the context local around for a little longer. Make
- sure to properly :meth:`~werkzeug.LocalStack.pop` the stack yourself in
- that situation, otherwise your unittests will leak memory.
- """
-
- def __init__(self, app, environ, request=None):
- self.app = app
- if request is None:
- request = app.request_class(environ)
- self.request = request
- self.url_adapter = app.create_url_adapter(self.request)
- self.flashes = None
- self.session = None
-
- # Request contexts can be pushed multiple times and interleaved with
- # other request contexts. Now only if the last level is popped we
- # get rid of them. Additionally if an application context is missing
- # one is created implicitly so for each level we add this information
- self._implicit_app_ctx_stack = []
-
- # indicator if the context was preserved. Next time another context
- # is pushed the preserved context is popped.
- self.preserved = False
-
- # remembers the exception for pop if there is one in case the context
- # preservation kicks in.
- self._preserved_exc = None
-
- # Functions that should be executed after the request on the response
- # object. These will be called before the regular "after_request"
- # functions.
- self._after_request_functions = []
-
- self.match_request()
-
- def _get_g(self):
- return _app_ctx_stack.top.g
- def _set_g(self, value):
- _app_ctx_stack.top.g = value
- g = property(_get_g, _set_g)
- del _get_g, _set_g
-
- def copy(self):
- """Creates a copy of this request context with the same request object.
- This can be used to move a request context to a different greenlet.
- Because the actual request object is the same this cannot be used to
- move a request context to a different thread unless access to the
- request object is locked.
-
- .. versionadded:: 0.10
- """
- return self.__class__(self.app,
- environ=self.request.environ,
- request=self.request
- )
-
- def match_request(self):
- """Can be overridden by a subclass to hook into the matching
- of the request.
- """
- try:
- url_rule, self.request.view_args = \
- self.url_adapter.match(return_rule=True)
- self.request.url_rule = url_rule
- except HTTPException as e:
- self.request.routing_exception = e
-
- def push(self):
- """Binds the request context to the current context."""
- # If an exception occurs in debug mode or if context preservation is
- # activated under exception situations exactly one context stays
- # on the stack. The rationale is that you want to access that
- # information under debug situations. However if someone forgets to
- # pop that context again we want to make sure that on the next push
- # it's invalidated, otherwise we run at risk that something leaks
- # memory. This is usually only a problem in test suite since this
- # functionality is not active in production environments.
- top = _request_ctx_stack.top
- if top is not None and top.preserved:
- top.pop(top._preserved_exc)
-
- # Before we push the request context we have to ensure that there
- # is an application context.
- app_ctx = _app_ctx_stack.top
- if app_ctx is None or app_ctx.app != self.app:
- app_ctx = self.app.app_context()
- app_ctx.push()
- self._implicit_app_ctx_stack.append(app_ctx)
- else:
- self._implicit_app_ctx_stack.append(None)
-
- if hasattr(sys, 'exc_clear'):
- sys.exc_clear()
-
- _request_ctx_stack.push(self)
-
- # Open the session at the moment that the request context is
- # available. This allows a custom open_session method to use the
- # request context (e.g. code that access database information
- # stored on `g` instead of the appcontext).
- self.session = self.app.open_session(self.request)
- if self.session is None:
- self.session = self.app.make_null_session()
-
- def pop(self, exc=_sentinel):
- """Pops the request context and unbinds it by doing that. This will
- also trigger the execution of functions registered by the
- :meth:`~flask.Flask.teardown_request` decorator.
-
- .. versionchanged:: 0.9
- Added the `exc` argument.
- """
- app_ctx = self._implicit_app_ctx_stack.pop()
-
- try:
- clear_request = False
- if not self._implicit_app_ctx_stack:
- self.preserved = False
- self._preserved_exc = None
- if exc is _sentinel:
- exc = sys.exc_info()[1]
- self.app.do_teardown_request(exc)
-
- # If this interpreter supports clearing the exception information
- # we do that now. This will only go into effect on Python 2.x,
- # on 3.x it disappears automatically at the end of the exception
- # stack.
- if hasattr(sys, 'exc_clear'):
- sys.exc_clear()
-
- request_close = getattr(self.request, 'close', None)
- if request_close is not None:
- request_close()
- clear_request = True
- finally:
- rv = _request_ctx_stack.pop()
-
- # get rid of circular dependencies at the end of the request
- # so that we don't require the GC to be active.
- if clear_request:
- rv.request.environ['werkzeug.request'] = None
-
- # Get rid of the app as well if necessary.
- if app_ctx is not None:
- app_ctx.pop(exc)
-
- assert rv is self, 'Popped wrong request context. ' \
- '(%r instead of %r)' % (rv, self)
-
- def auto_pop(self, exc):
- if self.request.environ.get('flask._preserve_context') or \
- (exc is not None and self.app.preserve_context_on_exception):
- self.preserved = True
- self._preserved_exc = exc
- else:
- self.pop(exc)
-
- def __enter__(self):
- self.push()
- return self
-
- def __exit__(self, exc_type, exc_value, tb):
- # do not pop the request stack if we are in debug mode and an
- # exception happened. This will allow the debugger to still
- # access the request object in the interactive shell. Furthermore
- # the context can be force kept alive for the test client.
- # See flask.testing for how this works.
- self.auto_pop(exc_value)
-
- if BROKEN_PYPY_CTXMGR_EXIT and exc_type is not None:
- reraise(exc_type, exc_value, tb)
-
- def __repr__(self):
- return '<%s \'%s\' [%s] of %s>' % (
- self.__class__.__name__,
- self.request.url,
- self.request.method,
- self.app.name,
- )
diff --git a/venv/Lib/site-packages/flask/debughelpers.py b/venv/Lib/site-packages/flask/debughelpers.py
deleted file mode 100644
index 90710dd..0000000
--- a/venv/Lib/site-packages/flask/debughelpers.py
+++ /dev/null
@@ -1,155 +0,0 @@
-# -*- coding: utf-8 -*-
-"""
- flask.debughelpers
- ~~~~~~~~~~~~~~~~~~
-
- Various helpers to make the development experience better.
-
- :copyright: (c) 2015 by Armin Ronacher.
- :license: BSD, see LICENSE for more details.
-"""
-from ._compat import implements_to_string, text_type
-from .app import Flask
-from .blueprints import Blueprint
-from .globals import _request_ctx_stack
-
-
-class UnexpectedUnicodeError(AssertionError, UnicodeError):
- """Raised in places where we want some better error reporting for
- unexpected unicode or binary data.
- """
-
-
-@implements_to_string
-class DebugFilesKeyError(KeyError, AssertionError):
- """Raised from request.files during debugging. The idea is that it can
- provide a better error message than just a generic KeyError/BadRequest.
- """
-
- def __init__(self, request, key):
- form_matches = request.form.getlist(key)
- buf = ['You tried to access the file "%s" in the request.files '
- 'dictionary but it does not exist. The mimetype for the request '
- 'is "%s" instead of "multipart/form-data" which means that no '
- 'file contents were transmitted. To fix this error you should '
- 'provide enctype="multipart/form-data" in your form.' %
- (key, request.mimetype)]
- if form_matches:
- buf.append('\n\nThe browser instead transmitted some file names. '
- 'This was submitted: %s' % ', '.join('"%s"' % x
- for x in form_matches))
- self.msg = ''.join(buf)
-
- def __str__(self):
- return self.msg
-
-
-class FormDataRoutingRedirect(AssertionError):
- """This exception is raised by Flask in debug mode if it detects a
- redirect caused by the routing system when the request method is not
- GET, HEAD or OPTIONS. Reasoning: form data will be dropped.
- """
-
- def __init__(self, request):
- exc = request.routing_exception
- buf = ['A request was sent to this URL (%s) but a redirect was '
- 'issued automatically by the routing system to "%s".'
- % (request.url, exc.new_url)]
-
- # In case just a slash was appended we can be extra helpful
- if request.base_url + '/' == exc.new_url.split('?')[0]:
- buf.append(' The URL was defined with a trailing slash so '
- 'Flask will automatically redirect to the URL '
- 'with the trailing slash if it was accessed '
- 'without one.')
-
- buf.append(' Make sure to directly send your %s-request to this URL '
- 'since we can\'t make browsers or HTTP clients redirect '
- 'with form data reliably or without user interaction.' %
- request.method)
- buf.append('\n\nNote: this exception is only raised in debug mode')
- AssertionError.__init__(self, ''.join(buf).encode('utf-8'))
-
-
-def attach_enctype_error_multidict(request):
- """Since Flask 0.8 we're monkeypatching the files object in case a
- request is detected that does not use multipart form data but the files
- object is accessed.
- """
- oldcls = request.files.__class__
- class newcls(oldcls):
- def __getitem__(self, key):
- try:
- return oldcls.__getitem__(self, key)
- except KeyError:
- if key not in request.form:
- raise
- raise DebugFilesKeyError(request, key)
- newcls.__name__ = oldcls.__name__
- newcls.__module__ = oldcls.__module__
- request.files.__class__ = newcls
-
-
-def _dump_loader_info(loader):
- yield 'class: %s.%s' % (type(loader).__module__, type(loader).__name__)
- for key, value in sorted(loader.__dict__.items()):
- if key.startswith('_'):
- continue
- if isinstance(value, (tuple, list)):
- if not all(isinstance(x, (str, text_type)) for x in value):
- continue
- yield '%s:' % key
- for item in value:
- yield ' - %s' % item
- continue
- elif not isinstance(value, (str, text_type, int, float, bool)):
- continue
- yield '%s: %r' % (key, value)
-
-
-def explain_template_loading_attempts(app, template, attempts):
- """This should help developers understand what failed"""
- info = ['Locating template "%s":' % template]
- total_found = 0
- blueprint = None
- reqctx = _request_ctx_stack.top
- if reqctx is not None and reqctx.request.blueprint is not None:
- blueprint = reqctx.request.blueprint
-
- for idx, (loader, srcobj, triple) in enumerate(attempts):
- if isinstance(srcobj, Flask):
- src_info = 'application "%s"' % srcobj.import_name
- elif isinstance(srcobj, Blueprint):
- src_info = 'blueprint "%s" (%s)' % (srcobj.name,
- srcobj.import_name)
- else:
- src_info = repr(srcobj)
-
- info.append('% 5d: trying loader of %s' % (
- idx + 1, src_info))
-
- for line in _dump_loader_info(loader):
- info.append(' %s' % line)
-
- if triple is None:
- detail = 'no match'
- else:
- detail = 'found (%r)' % (triple[1] or '')
- total_found += 1
- info.append(' -> %s' % detail)
-
- seems_fishy = False
- if total_found == 0:
- info.append('Error: the template could not be found.')
- seems_fishy = True
- elif total_found > 1:
- info.append('Warning: multiple loaders returned a match for the template.')
- seems_fishy = True
-
- if blueprint is not None and seems_fishy:
- info.append(' The template was looked up from an endpoint that '
- 'belongs to the blueprint "%s".' % blueprint)
- info.append(' Maybe you did not place a template in the right folder?')
- info.append(' See http://flask.pocoo.org/docs/blueprints/#templates')
-
- app.logger.info('\n'.join(info))
diff --git a/venv/Lib/site-packages/flask/ext/__init__.py b/venv/Lib/site-packages/flask/ext/__init__.py
deleted file mode 100644
index 051f44a..0000000
--- a/venv/Lib/site-packages/flask/ext/__init__.py
+++ /dev/null
@@ -1,29 +0,0 @@
-# -*- coding: utf-8 -*-
-"""
- flask.ext
- ~~~~~~~~~
-
- Redirect imports for extensions. This module basically makes it possible
- for us to transition from flaskext.foo to flask_foo without having to
- force all extensions to upgrade at the same time.
-
- When a user does ``from flask.ext.foo import bar`` it will attempt to
- import ``from flask_foo import bar`` first and when that fails it will
- try to import ``from flaskext.foo import bar``.
-
- We're switching from namespace packages because it was just too painful for
- everybody involved.
-
- :copyright: (c) 2015 by Armin Ronacher.
- :license: BSD, see LICENSE for more details.
-"""
-
-
-def setup():
- from ..exthook import ExtensionImporter
- importer = ExtensionImporter(['flask_%s', 'flaskext.%s'], __name__)
- importer.install()
-
-
-setup()
-del setup
diff --git a/venv/Lib/site-packages/flask/exthook.py b/venv/Lib/site-packages/flask/exthook.py
deleted file mode 100644
index d884280..0000000
--- a/venv/Lib/site-packages/flask/exthook.py
+++ /dev/null
@@ -1,143 +0,0 @@
-# -*- coding: utf-8 -*-
-"""
- flask.exthook
- ~~~~~~~~~~~~~
-
- Redirect imports for extensions. This module basically makes it possible
- for us to transition from flaskext.foo to flask_foo without having to
- force all extensions to upgrade at the same time.
-
- When a user does ``from flask.ext.foo import bar`` it will attempt to
- import ``from flask_foo import bar`` first and when that fails it will
- try to import ``from flaskext.foo import bar``.
-
- We're switching from namespace packages because it was just too painful for
- everybody involved.
-
- This is used by `flask.ext`.
-
- :copyright: (c) 2015 by Armin Ronacher.
- :license: BSD, see LICENSE for more details.
-"""
-import sys
-import os
-import warnings
-from ._compat import reraise
-
-
-class ExtDeprecationWarning(DeprecationWarning):
- pass
-
-warnings.simplefilter('always', ExtDeprecationWarning)
-
-
-class ExtensionImporter(object):
- """This importer redirects imports from this submodule to other locations.
- This makes it possible to transition from the old flaskext.name to the
- newer flask_name without people having a hard time.
- """
-
- def __init__(self, module_choices, wrapper_module):
- self.module_choices = module_choices
- self.wrapper_module = wrapper_module
- self.prefix = wrapper_module + '.'
- self.prefix_cutoff = wrapper_module.count('.') + 1
-
- def __eq__(self, other):
- return self.__class__.__module__ == other.__class__.__module__ and \
- self.__class__.__name__ == other.__class__.__name__ and \
- self.wrapper_module == other.wrapper_module and \
- self.module_choices == other.module_choices
-
- def __ne__(self, other):
- return not self.__eq__(other)
-
- def install(self):
- sys.meta_path[:] = [x for x in sys.meta_path if self != x] + [self]
-
- def find_module(self, fullname, path=None):
- if fullname.startswith(self.prefix) and \
- fullname != 'flask.ext.ExtDeprecationWarning':
- return self
-
- def load_module(self, fullname):
- if fullname in sys.modules:
- return sys.modules[fullname]
-
- modname = fullname.split('.', self.prefix_cutoff)[self.prefix_cutoff]
-
- warnings.warn(
- "Importing flask.ext.{x} is deprecated, use flask_{x} instead."
- .format(x=modname), ExtDeprecationWarning, stacklevel=2
- )
-
- for path in self.module_choices:
- realname = path % modname
- try:
- __import__(realname)
- except ImportError:
- exc_type, exc_value, tb = sys.exc_info()
- # since we only establish the entry in sys.modules at the
- # very this seems to be redundant, but if recursive imports
- # happen we will call into the move import a second time.
- # On the second invocation we still don't have an entry for
- # fullname in sys.modules, but we will end up with the same
- # fake module name and that import will succeed since this
- # one already has a temporary entry in the modules dict.
- # Since this one "succeeded" temporarily that second
- # invocation now will have created a fullname entry in
- # sys.modules which we have to kill.
- sys.modules.pop(fullname, None)
-
- # If it's an important traceback we reraise it, otherwise
- # we swallow it and try the next choice. The skipped frame
- # is the one from __import__ above which we don't care about
- if self.is_important_traceback(realname, tb):
- reraise(exc_type, exc_value, tb.tb_next)
- continue
- module = sys.modules[fullname] = sys.modules[realname]
- if '.' not in modname:
- setattr(sys.modules[self.wrapper_module], modname, module)
-
- if realname.startswith('flaskext.'):
- warnings.warn(
- "Detected extension named flaskext.{x}, please rename it "
- "to flask_{x}. The old form is deprecated."
- .format(x=modname), ExtDeprecationWarning
- )
-
- return module
- raise ImportError('No module named %s' % fullname)
-
- def is_important_traceback(self, important_module, tb):
- """Walks a traceback's frames and checks if any of the frames
- originated in the given important module. If that is the case then we
- were able to import the module itself but apparently something went
- wrong when the module was imported. (Eg: import of an import failed).
- """
- while tb is not None:
- if self.is_important_frame(important_module, tb):
- return True
- tb = tb.tb_next
- return False
-
- def is_important_frame(self, important_module, tb):
- """Checks a single frame if it's important."""
- g = tb.tb_frame.f_globals
- if '__name__' not in g:
- return False
-
- module_name = g['__name__']
-
- # Python 2.7 Behavior. Modules are cleaned up late so the
- # name shows up properly here. Success!
- if module_name == important_module:
- return True
-
- # Some python versions will clean up modules so early that the
- # module name at that point is no longer set. Try guessing from
- # the filename then.
- filename = os.path.abspath(tb.tb_frame.f_code.co_filename)
- test_string = os.path.sep + important_module.replace('.', os.path.sep)
- return test_string + '.py' in filename or \
- test_string + os.path.sep + '__init__.py' in filename
diff --git a/venv/Lib/site-packages/flask/globals.py b/venv/Lib/site-packages/flask/globals.py
deleted file mode 100644
index 0b70a3e..0000000
--- a/venv/Lib/site-packages/flask/globals.py
+++ /dev/null
@@ -1,61 +0,0 @@
-# -*- coding: utf-8 -*-
-"""
- flask.globals
- ~~~~~~~~~~~~~
-
- Defines all the global objects that are proxies to the current
- active context.
-
- :copyright: (c) 2015 by Armin Ronacher.
- :license: BSD, see LICENSE for more details.
-"""
-
-from functools import partial
-from werkzeug.local import LocalStack, LocalProxy
-
-
-_request_ctx_err_msg = '''\
-Working outside of request context.
-
-This typically means that you attempted to use functionality that needed
-an active HTTP request. Consult the documentation on testing for
-information about how to avoid this problem.\
-'''
-_app_ctx_err_msg = '''\
-Working outside of application context.
-
-This typically means that you attempted to use functionality that needed
-to interface with the current application object in a way. To solve
-this set up an application context with app.app_context(). See the
-documentation for more information.\
-'''
-
-
-def _lookup_req_object(name):
- top = _request_ctx_stack.top
- if top is None:
- raise RuntimeError(_request_ctx_err_msg)
- return getattr(top, name)
-
-
-def _lookup_app_object(name):
- top = _app_ctx_stack.top
- if top is None:
- raise RuntimeError(_app_ctx_err_msg)
- return getattr(top, name)
-
-
-def _find_app():
- top = _app_ctx_stack.top
- if top is None:
- raise RuntimeError(_app_ctx_err_msg)
- return top.app
-
-
-# context locals
-_request_ctx_stack = LocalStack()
-_app_ctx_stack = LocalStack()
-current_app = LocalProxy(_find_app)
-request = LocalProxy(partial(_lookup_req_object, 'request'))
-session = LocalProxy(partial(_lookup_req_object, 'session'))
-g = LocalProxy(partial(_lookup_app_object, 'g'))
diff --git a/venv/Lib/site-packages/flask/helpers.py b/venv/Lib/site-packages/flask/helpers.py
deleted file mode 100644
index 4bb1d1c..0000000
--- a/venv/Lib/site-packages/flask/helpers.py
+++ /dev/null
@@ -1,966 +0,0 @@
-# -*- coding: utf-8 -*-
-"""
- flask.helpers
- ~~~~~~~~~~~~~
-
- Implements various helpers.
-
- :copyright: (c) 2015 by Armin Ronacher.
- :license: BSD, see LICENSE for more details.
-"""
-
-import os
-import sys
-import pkgutil
-import posixpath
-import mimetypes
-from time import time
-from zlib import adler32
-from threading import RLock
-from werkzeug.routing import BuildError
-from functools import update_wrapper
-
-try:
- from werkzeug.urls import url_quote
-except ImportError:
- from urlparse import quote as url_quote
-
-from werkzeug.datastructures import Headers, Range
-from werkzeug.exceptions import BadRequest, NotFound, \
- RequestedRangeNotSatisfiable
-
-# this was moved in 0.7
-try:
- from werkzeug.wsgi import wrap_file
-except ImportError:
- from werkzeug.utils import wrap_file
-
-from jinja2 import FileSystemLoader
-
-from .signals import message_flashed
-from .globals import session, _request_ctx_stack, _app_ctx_stack, \
- current_app, request
-from ._compat import string_types, text_type
-
-
-# sentinel
-_missing = object()
-
-
-# what separators does this operating system provide that are not a slash?
-# this is used by the send_from_directory function to ensure that nobody is
-# able to access files from outside the filesystem.
-_os_alt_seps = list(sep for sep in [os.path.sep, os.path.altsep]
- if sep not in (None, '/'))
-
-
-def get_debug_flag(default=None):
- val = os.environ.get('FLASK_DEBUG')
- if not val:
- return default
- return val not in ('0', 'false', 'no')
-
-
-def _endpoint_from_view_func(view_func):
- """Internal helper that returns the default endpoint for a given
- function. This always is the function name.
- """
- assert view_func is not None, 'expected view func if endpoint ' \
- 'is not provided.'
- return view_func.__name__
-
-
-def stream_with_context(generator_or_function):
- """Request contexts disappear when the response is started on the server.
- This is done for efficiency reasons and to make it less likely to encounter
- memory leaks with badly written WSGI middlewares. The downside is that if
- you are using streamed responses, the generator cannot access request bound
- information any more.
-
- This function however can help you keep the context around for longer::
-
- from flask import stream_with_context, request, Response
-
- @app.route('/stream')
- def streamed_response():
- @stream_with_context
- def generate():
- yield 'Hello '
- yield request.args['name']
- yield '!'
- return Response(generate())
-
- Alternatively it can also be used around a specific generator::
-
- from flask import stream_with_context, request, Response
-
- @app.route('/stream')
- def streamed_response():
- def generate():
- yield 'Hello '
- yield request.args['name']
- yield '!'
- return Response(stream_with_context(generate()))
-
- .. versionadded:: 0.9
- """
- try:
- gen = iter(generator_or_function)
- except TypeError:
- def decorator(*args, **kwargs):
- gen = generator_or_function(*args, **kwargs)
- return stream_with_context(gen)
- return update_wrapper(decorator, generator_or_function)
-
- def generator():
- ctx = _request_ctx_stack.top
- if ctx is None:
- raise RuntimeError('Attempted to stream with context but '
- 'there was no context in the first place to keep around.')
- with ctx:
- # Dummy sentinel. Has to be inside the context block or we're
- # not actually keeping the context around.
- yield None
-
- # The try/finally is here so that if someone passes a WSGI level
- # iterator in we're still running the cleanup logic. Generators
- # don't need that because they are closed on their destruction
- # automatically.
- try:
- for item in gen:
- yield item
- finally:
- if hasattr(gen, 'close'):
- gen.close()
-
- # The trick is to start the generator. Then the code execution runs until
- # the first dummy None is yielded at which point the context was already
- # pushed. This item is discarded. Then when the iteration continues the
- # real generator is executed.
- wrapped_g = generator()
- next(wrapped_g)
- return wrapped_g
-
-
-def make_response(*args):
- """Sometimes it is necessary to set additional headers in a view. Because
- views do not have to return response objects but can return a value that
- is converted into a response object by Flask itself, it becomes tricky to
- add headers to it. This function can be called instead of using a return
- and you will get a response object which you can use to attach headers.
-
- If view looked like this and you want to add a new header::
-
- def index():
- return render_template('index.html', foo=42)
-
- You can now do something like this::
-
- def index():
- response = make_response(render_template('index.html', foo=42))
- response.headers['X-Parachutes'] = 'parachutes are cool'
- return response
-
- This function accepts the very same arguments you can return from a
- view function. This for example creates a response with a 404 error
- code::
-
- response = make_response(render_template('not_found.html'), 404)
-
- The other use case of this function is to force the return value of a
- view function into a response which is helpful with view
- decorators::
-
- response = make_response(view_function())
- response.headers['X-Parachutes'] = 'parachutes are cool'
-
- Internally this function does the following things:
-
- - if no arguments are passed, it creates a new response argument
- - if one argument is passed, :meth:`flask.Flask.make_response`
- is invoked with it.
- - if more than one argument is passed, the arguments are passed
- to the :meth:`flask.Flask.make_response` function as tuple.
-
- .. versionadded:: 0.6
- """
- if not args:
- return current_app.response_class()
- if len(args) == 1:
- args = args[0]
- return current_app.make_response(args)
-
-
-def url_for(endpoint, **values):
- """Generates a URL to the given endpoint with the method provided.
-
- Variable arguments that are unknown to the target endpoint are appended
- to the generated URL as query arguments. If the value of a query argument
- is ``None``, the whole pair is skipped. In case blueprints are active
- you can shortcut references to the same blueprint by prefixing the
- local endpoint with a dot (``.``).
-
- This will reference the index function local to the current blueprint::
-
- url_for('.index')
-
- For more information, head over to the :ref:`Quickstart `.
-
- To integrate applications, :class:`Flask` has a hook to intercept URL build
- errors through :attr:`Flask.url_build_error_handlers`. The `url_for`
- function results in a :exc:`~werkzeug.routing.BuildError` when the current
- app does not have a URL for the given endpoint and values. When it does, the
- :data:`~flask.current_app` calls its :attr:`~Flask.url_build_error_handlers` if
- it is not ``None``, which can return a string to use as the result of
- `url_for` (instead of `url_for`'s default to raise the
- :exc:`~werkzeug.routing.BuildError` exception) or re-raise the exception.
- An example::
-
- def external_url_handler(error, endpoint, values):
- "Looks up an external URL when `url_for` cannot build a URL."
- # This is an example of hooking the build_error_handler.
- # Here, lookup_url is some utility function you've built
- # which looks up the endpoint in some external URL registry.
- url = lookup_url(endpoint, **values)
- if url is None:
- # External lookup did not have a URL.
- # Re-raise the BuildError, in context of original traceback.
- exc_type, exc_value, tb = sys.exc_info()
- if exc_value is error:
- raise exc_type, exc_value, tb
- else:
- raise error
- # url_for will use this result, instead of raising BuildError.
- return url
-
- app.url_build_error_handlers.append(external_url_handler)
-
- Here, `error` is the instance of :exc:`~werkzeug.routing.BuildError`, and
- `endpoint` and `values` are the arguments passed into `url_for`. Note
- that this is for building URLs outside the current application, and not for
- handling 404 NotFound errors.
-
- .. versionadded:: 0.10
- The `_scheme` parameter was added.
-
- .. versionadded:: 0.9
- The `_anchor` and `_method` parameters were added.
-
- .. versionadded:: 0.9
- Calls :meth:`Flask.handle_build_error` on
- :exc:`~werkzeug.routing.BuildError`.
-
- :param endpoint: the endpoint of the URL (name of the function)
- :param values: the variable arguments of the URL rule
- :param _external: if set to ``True``, an absolute URL is generated. Server
- address can be changed via ``SERVER_NAME`` configuration variable which
- defaults to `localhost`.
- :param _scheme: a string specifying the desired URL scheme. The `_external`
- parameter must be set to ``True`` or a :exc:`ValueError` is raised. The default
- behavior uses the same scheme as the current request, or
- ``PREFERRED_URL_SCHEME`` from the :ref:`app configuration ` if no
- request context is available. As of Werkzeug 0.10, this also can be set
- to an empty string to build protocol-relative URLs.
- :param _anchor: if provided this is added as anchor to the URL.
- :param _method: if provided this explicitly specifies an HTTP method.
- """
- appctx = _app_ctx_stack.top
- reqctx = _request_ctx_stack.top
- if appctx is None:
- raise RuntimeError('Attempted to generate a URL without the '
- 'application context being pushed. This has to be '
- 'executed when application context is available.')
-
- # If request specific information is available we have some extra
- # features that support "relative" URLs.
- if reqctx is not None:
- url_adapter = reqctx.url_adapter
- blueprint_name = request.blueprint
- if not reqctx.request._is_old_module:
- if endpoint[:1] == '.':
- if blueprint_name is not None:
- endpoint = blueprint_name + endpoint
- else:
- endpoint = endpoint[1:]
- else:
- # TODO: get rid of this deprecated functionality in 1.0
- if '.' not in endpoint:
- if blueprint_name is not None:
- endpoint = blueprint_name + '.' + endpoint
- elif endpoint.startswith('.'):
- endpoint = endpoint[1:]
- external = values.pop('_external', False)
-
- # Otherwise go with the url adapter from the appctx and make
- # the URLs external by default.
- else:
- url_adapter = appctx.url_adapter
- if url_adapter is None:
- raise RuntimeError('Application was not able to create a URL '
- 'adapter for request independent URL generation. '
- 'You might be able to fix this by setting '
- 'the SERVER_NAME config variable.')
- external = values.pop('_external', True)
-
- anchor = values.pop('_anchor', None)
- method = values.pop('_method', None)
- scheme = values.pop('_scheme', None)
- appctx.app.inject_url_defaults(endpoint, values)
-
- # This is not the best way to deal with this but currently the
- # underlying Werkzeug router does not support overriding the scheme on
- # a per build call basis.
- old_scheme = None
- if scheme is not None:
- if not external:
- raise ValueError('When specifying _scheme, _external must be True')
- old_scheme = url_adapter.url_scheme
- url_adapter.url_scheme = scheme
-
- try:
- try:
- rv = url_adapter.build(endpoint, values, method=method,
- force_external=external)
- finally:
- if old_scheme is not None:
- url_adapter.url_scheme = old_scheme
- except BuildError as error:
- # We need to inject the values again so that the app callback can
- # deal with that sort of stuff.
- values['_external'] = external
- values['_anchor'] = anchor
- values['_method'] = method
- return appctx.app.handle_url_build_error(error, endpoint, values)
-
- if anchor is not None:
- rv += '#' + url_quote(anchor)
- return rv
-
-
-def get_template_attribute(template_name, attribute):
- """Loads a macro (or variable) a template exports. This can be used to
- invoke a macro from within Python code. If you for example have a
- template named :file:`_cider.html` with the following contents:
-
- .. sourcecode:: html+jinja
-
- {% macro hello(name) %}Hello {{ name }}!{% endmacro %}
-
- You can access this from Python code like this::
-
- hello = get_template_attribute('_cider.html', 'hello')
- return hello('World')
-
- .. versionadded:: 0.2
-
- :param template_name: the name of the template
- :param attribute: the name of the variable of macro to access
- """
- return getattr(current_app.jinja_env.get_template(template_name).module,
- attribute)
-
-
-def flash(message, category='message'):
- """Flashes a message to the next request. In order to remove the
- flashed message from the session and to display it to the user,
- the template has to call :func:`get_flashed_messages`.
-
- .. versionchanged:: 0.3
- `category` parameter added.
-
- :param message: the message to be flashed.
- :param category: the category for the message. The following values
- are recommended: ``'message'`` for any kind of message,
- ``'error'`` for errors, ``'info'`` for information
- messages and ``'warning'`` for warnings. However any
- kind of string can be used as category.
- """
- # Original implementation:
- #
- # session.setdefault('_flashes', []).append((category, message))
- #
- # This assumed that changes made to mutable structures in the session are
- # are always in sync with the session object, which is not true for session
- # implementations that use external storage for keeping their keys/values.
- flashes = session.get('_flashes', [])
- flashes.append((category, message))
- session['_flashes'] = flashes
- message_flashed.send(current_app._get_current_object(),
- message=message, category=category)
-
-
-def get_flashed_messages(with_categories=False, category_filter=[]):
- """Pulls all flashed messages from the session and returns them.
- Further calls in the same request to the function will return
- the same messages. By default just the messages are returned,
- but when `with_categories` is set to ``True``, the return value will
- be a list of tuples in the form ``(category, message)`` instead.
-
- Filter the flashed messages to one or more categories by providing those
- categories in `category_filter`. This allows rendering categories in
- separate html blocks. The `with_categories` and `category_filter`
- arguments are distinct:
-
- * `with_categories` controls whether categories are returned with message
- text (``True`` gives a tuple, where ``False`` gives just the message text).
- * `category_filter` filters the messages down to only those matching the
- provided categories.
-
- See :ref:`message-flashing-pattern` for examples.
-
- .. versionchanged:: 0.3
- `with_categories` parameter added.
-
- .. versionchanged:: 0.9
- `category_filter` parameter added.
-
- :param with_categories: set to ``True`` to also receive categories.
- :param category_filter: whitelist of categories to limit return values
- """
- flashes = _request_ctx_stack.top.flashes
- if flashes is None:
- _request_ctx_stack.top.flashes = flashes = session.pop('_flashes') \
- if '_flashes' in session else []
- if category_filter:
- flashes = list(filter(lambda f: f[0] in category_filter, flashes))
- if not with_categories:
- return [x[1] for x in flashes]
- return flashes
-
-
-def send_file(filename_or_fp, mimetype=None, as_attachment=False,
- attachment_filename=None, add_etags=True,
- cache_timeout=None, conditional=False, last_modified=None):
- """Sends the contents of a file to the client. This will use the
- most efficient method available and configured. By default it will
- try to use the WSGI server's file_wrapper support. Alternatively
- you can set the application's :attr:`~Flask.use_x_sendfile` attribute
- to ``True`` to directly emit an ``X-Sendfile`` header. This however
- requires support of the underlying webserver for ``X-Sendfile``.
-
- By default it will try to guess the mimetype for you, but you can
- also explicitly provide one. For extra security you probably want
- to send certain files as attachment (HTML for instance). The mimetype
- guessing requires a `filename` or an `attachment_filename` to be
- provided.
-
- ETags will also be attached automatically if a `filename` is provided. You
- can turn this off by setting `add_etags=False`.
-
- If `conditional=True` and `filename` is provided, this method will try to
- upgrade the response stream to support range requests. This will allow
- the request to be answered with partial content response.
-
- Please never pass filenames to this function from user sources;
- you should use :func:`send_from_directory` instead.
-
- .. versionadded:: 0.2
-
- .. versionadded:: 0.5
- The `add_etags`, `cache_timeout` and `conditional` parameters were
- added. The default behavior is now to attach etags.
-
- .. versionchanged:: 0.7
- mimetype guessing and etag support for file objects was
- deprecated because it was unreliable. Pass a filename if you are
- able to, otherwise attach an etag yourself. This functionality
- will be removed in Flask 1.0
-
- .. versionchanged:: 0.9
- cache_timeout pulls its default from application config, when None.
-
- .. versionchanged:: 0.12
- The filename is no longer automatically inferred from file objects. If
- you want to use automatic mimetype and etag support, pass a filepath via
- `filename_or_fp` or `attachment_filename`.
-
- .. versionchanged:: 0.12
- The `attachment_filename` is preferred over `filename` for MIME-type
- detection.
-
- :param filename_or_fp: the filename of the file to send in `latin-1`.
- This is relative to the :attr:`~Flask.root_path`
- if a relative path is specified.
- Alternatively a file object might be provided in
- which case ``X-Sendfile`` might not work and fall
- back to the traditional method. Make sure that the
- file pointer is positioned at the start of data to
- send before calling :func:`send_file`.
- :param mimetype: the mimetype of the file if provided. If a file path is
- given, auto detection happens as fallback, otherwise an
- error will be raised.
- :param as_attachment: set to ``True`` if you want to send this file with
- a ``Content-Disposition: attachment`` header.
- :param attachment_filename: the filename for the attachment if it
- differs from the file's filename.
- :param add_etags: set to ``False`` to disable attaching of etags.
- :param conditional: set to ``True`` to enable conditional responses.
-
- :param cache_timeout: the timeout in seconds for the headers. When ``None``
- (default), this value is set by
- :meth:`~Flask.get_send_file_max_age` of
- :data:`~flask.current_app`.
- :param last_modified: set the ``Last-Modified`` header to this value,
- a :class:`~datetime.datetime` or timestamp.
- If a file was passed, this overrides its mtime.
- """
- mtime = None
- fsize = None
- if isinstance(filename_or_fp, string_types):
- filename = filename_or_fp
- if not os.path.isabs(filename):
- filename = os.path.join(current_app.root_path, filename)
- file = None
- if attachment_filename is None:
- attachment_filename = os.path.basename(filename)
- else:
- file = filename_or_fp
- filename = None
-
- if mimetype is None:
- if attachment_filename is not None:
- mimetype = mimetypes.guess_type(attachment_filename)[0] \
- or 'application/octet-stream'
-
- if mimetype is None:
- raise ValueError(
- 'Unable to infer MIME-type because no filename is available. '
- 'Please set either `attachment_filename`, pass a filepath to '
- '`filename_or_fp` or set your own MIME-type via `mimetype`.'
- )
-
- headers = Headers()
- if as_attachment:
- if attachment_filename is None:
- raise TypeError('filename unavailable, required for '
- 'sending as attachment')
- headers.add('Content-Disposition', 'attachment',
- filename=attachment_filename)
-
- if current_app.use_x_sendfile and filename:
- if file is not None:
- file.close()
- headers['X-Sendfile'] = filename
- fsize = os.path.getsize(filename)
- headers['Content-Length'] = fsize
- data = None
- else:
- if file is None:
- file = open(filename, 'rb')
- mtime = os.path.getmtime(filename)
- fsize = os.path.getsize(filename)
- headers['Content-Length'] = fsize
- data = wrap_file(request.environ, file)
-
- rv = current_app.response_class(data, mimetype=mimetype, headers=headers,
- direct_passthrough=True)
-
- if last_modified is not None:
- rv.last_modified = last_modified
- elif mtime is not None:
- rv.last_modified = mtime
-
- rv.cache_control.public = True
- if cache_timeout is None:
- cache_timeout = current_app.get_send_file_max_age(filename)
- if cache_timeout is not None:
- rv.cache_control.max_age = cache_timeout
- rv.expires = int(time() + cache_timeout)
-
- if add_etags and filename is not None:
- from warnings import warn
-
- try:
- rv.set_etag('%s-%s-%s' % (
- os.path.getmtime(filename),
- os.path.getsize(filename),
- adler32(
- filename.encode('utf-8') if isinstance(filename, text_type)
- else filename
- ) & 0xffffffff
- ))
- except OSError:
- warn('Access %s failed, maybe it does not exist, so ignore etags in '
- 'headers' % filename, stacklevel=2)
-
- if conditional:
- if callable(getattr(Range, 'to_content_range_header', None)):
- # Werkzeug supports Range Requests
- # Remove this test when support for Werkzeug <0.12 is dropped
- try:
- rv = rv.make_conditional(request, accept_ranges=True,
- complete_length=fsize)
- except RequestedRangeNotSatisfiable:
- file.close()
- raise
- else:
- rv = rv.make_conditional(request)
- # make sure we don't send x-sendfile for servers that
- # ignore the 304 status code for x-sendfile.
- if rv.status_code == 304:
- rv.headers.pop('x-sendfile', None)
- return rv
-
-
-def safe_join(directory, *pathnames):
- """Safely join `directory` and zero or more untrusted `pathnames`
- components.
-
- Example usage::
-
- @app.route('/wiki/')
- def wiki_page(filename):
- filename = safe_join(app.config['WIKI_FOLDER'], filename)
- with open(filename, 'rb') as fd:
- content = fd.read() # Read and process the file content...
-
- :param directory: the trusted base directory.
- :param pathnames: the untrusted pathnames relative to that directory.
- :raises: :class:`~werkzeug.exceptions.NotFound` if one or more passed
- paths fall out of its boundaries.
- """
-
- parts = [directory]
-
- for filename in pathnames:
- if filename != '':
- filename = posixpath.normpath(filename)
-
- if (
- any(sep in filename for sep in _os_alt_seps)
- or os.path.isabs(filename)
- or filename == '..'
- or filename.startswith('../')
- ):
- raise NotFound()
-
- parts.append(filename)
-
- return posixpath.join(*parts)
-
-
-def send_from_directory(directory, filename, **options):
- """Send a file from a given directory with :func:`send_file`. This
- is a secure way to quickly expose static files from an upload folder
- or something similar.
-
- Example usage::
-
- @app.route('/uploads/')
- def download_file(filename):
- return send_from_directory(app.config['UPLOAD_FOLDER'],
- filename, as_attachment=True)
-
- .. admonition:: Sending files and Performance
-
- It is strongly recommended to activate either ``X-Sendfile`` support in
- your webserver or (if no authentication happens) to tell the webserver
- to serve files for the given path on its own without calling into the
- web application for improved performance.
-
- .. versionadded:: 0.5
-
- :param directory: the directory where all the files are stored.
- :param filename: the filename relative to that directory to
- download.
- :param options: optional keyword arguments that are directly
- forwarded to :func:`send_file`.
- """
- filename = safe_join(directory, filename)
- if not os.path.isabs(filename):
- filename = os.path.join(current_app.root_path, filename)
- try:
- if not os.path.isfile(filename):
- raise NotFound()
- except (TypeError, ValueError):
- raise BadRequest()
- options.setdefault('conditional', True)
- return send_file(filename, **options)
-
-
-def get_root_path(import_name):
- """Returns the path to a package or cwd if that cannot be found. This
- returns the path of a package or the folder that contains a module.
-
- Not to be confused with the package path returned by :func:`find_package`.
- """
- # Module already imported and has a file attribute. Use that first.
- mod = sys.modules.get(import_name)
- if mod is not None and hasattr(mod, '__file__'):
- return os.path.dirname(os.path.abspath(mod.__file__))
-
- # Next attempt: check the loader.
- loader = pkgutil.get_loader(import_name)
-
- # Loader does not exist or we're referring to an unloaded main module
- # or a main module without path (interactive sessions), go with the
- # current working directory.
- if loader is None or import_name == '__main__':
- return os.getcwd()
-
- # For .egg, zipimporter does not have get_filename until Python 2.7.
- # Some other loaders might exhibit the same behavior.
- if hasattr(loader, 'get_filename'):
- filepath = loader.get_filename(import_name)
- else:
- # Fall back to imports.
- __import__(import_name)
- mod = sys.modules[import_name]
- filepath = getattr(mod, '__file__', None)
-
- # If we don't have a filepath it might be because we are a
- # namespace package. In this case we pick the root path from the
- # first module that is contained in our package.
- if filepath is None:
- raise RuntimeError('No root path can be found for the provided '
- 'module "%s". This can happen because the '
- 'module came from an import hook that does '
- 'not provide file name information or because '
- 'it\'s a namespace package. In this case '
- 'the root path needs to be explicitly '
- 'provided.' % import_name)
-
- # filepath is import_name.py for a module, or __init__.py for a package.
- return os.path.dirname(os.path.abspath(filepath))
-
-
-def _matching_loader_thinks_module_is_package(loader, mod_name):
- """Given the loader that loaded a module and the module this function
- attempts to figure out if the given module is actually a package.
- """
- # If the loader can tell us if something is a package, we can
- # directly ask the loader.
- if hasattr(loader, 'is_package'):
- return loader.is_package(mod_name)
- # importlib's namespace loaders do not have this functionality but
- # all the modules it loads are packages, so we can take advantage of
- # this information.
- elif (loader.__class__.__module__ == '_frozen_importlib' and
- loader.__class__.__name__ == 'NamespaceLoader'):
- return True
- # Otherwise we need to fail with an error that explains what went
- # wrong.
- raise AttributeError(
- ('%s.is_package() method is missing but is required by Flask of '
- 'PEP 302 import hooks. If you do not use import hooks and '
- 'you encounter this error please file a bug against Flask.') %
- loader.__class__.__name__)
-
-
-def find_package(import_name):
- """Finds a package and returns the prefix (or None if the package is
- not installed) as well as the folder that contains the package or
- module as a tuple. The package path returned is the module that would
- have to be added to the pythonpath in order to make it possible to
- import the module. The prefix is the path below which a UNIX like
- folder structure exists (lib, share etc.).
- """
- root_mod_name = import_name.split('.')[0]
- loader = pkgutil.get_loader(root_mod_name)
- if loader is None or import_name == '__main__':
- # import name is not found, or interactive/main module
- package_path = os.getcwd()
- else:
- # For .egg, zipimporter does not have get_filename until Python 2.7.
- if hasattr(loader, 'get_filename'):
- filename = loader.get_filename(root_mod_name)
- elif hasattr(loader, 'archive'):
- # zipimporter's loader.archive points to the .egg or .zip
- # archive filename is dropped in call to dirname below.
- filename = loader.archive
- else:
- # At least one loader is missing both get_filename and archive:
- # Google App Engine's HardenedModulesHook
- #
- # Fall back to imports.
- __import__(import_name)
- filename = sys.modules[import_name].__file__
- package_path = os.path.abspath(os.path.dirname(filename))
-
- # In case the root module is a package we need to chop of the
- # rightmost part. This needs to go through a helper function
- # because of python 3.3 namespace packages.
- if _matching_loader_thinks_module_is_package(
- loader, root_mod_name):
- package_path = os.path.dirname(package_path)
-
- site_parent, site_folder = os.path.split(package_path)
- py_prefix = os.path.abspath(sys.prefix)
- if package_path.startswith(py_prefix):
- return py_prefix, package_path
- elif site_folder.lower() == 'site-packages':
- parent, folder = os.path.split(site_parent)
- # Windows like installations
- if folder.lower() == 'lib':
- base_dir = parent
- # UNIX like installations
- elif os.path.basename(parent).lower() == 'lib':
- base_dir = os.path.dirname(parent)
- else:
- base_dir = site_parent
- return base_dir, package_path
- return None, package_path
-
-
-class locked_cached_property(object):
- """A decorator that converts a function into a lazy property. The
- function wrapped is called the first time to retrieve the result
- and then that calculated result is used the next time you access
- the value. Works like the one in Werkzeug but has a lock for
- thread safety.
- """
-
- def __init__(self, func, name=None, doc=None):
- self.__name__ = name or func.__name__
- self.__module__ = func.__module__
- self.__doc__ = doc or func.__doc__
- self.func = func
- self.lock = RLock()
-
- def __get__(self, obj, type=None):
- if obj is None:
- return self
- with self.lock:
- value = obj.__dict__.get(self.__name__, _missing)
- if value is _missing:
- value = self.func(obj)
- obj.__dict__[self.__name__] = value
- return value
-
-
-class _PackageBoundObject(object):
-
- def __init__(self, import_name, template_folder=None, root_path=None):
- #: The name of the package or module. Do not change this once
- #: it was set by the constructor.
- self.import_name = import_name
-
- #: location of the templates. ``None`` if templates should not be
- #: exposed.
- self.template_folder = template_folder
-
- if root_path is None:
- root_path = get_root_path(self.import_name)
-
- #: Where is the app root located?
- self.root_path = root_path
-
- self._static_folder = None
- self._static_url_path = None
-
- def _get_static_folder(self):
- if self._static_folder is not None:
- return os.path.join(self.root_path, self._static_folder)
- def _set_static_folder(self, value):
- self._static_folder = value
- static_folder = property(_get_static_folder, _set_static_folder, doc='''
- The absolute path to the configured static folder.
- ''')
- del _get_static_folder, _set_static_folder
-
- def _get_static_url_path(self):
- if self._static_url_path is not None:
- return self._static_url_path
- if self.static_folder is not None:
- return '/' + os.path.basename(self.static_folder)
- def _set_static_url_path(self, value):
- self._static_url_path = value
- static_url_path = property(_get_static_url_path, _set_static_url_path)
- del _get_static_url_path, _set_static_url_path
-
- @property
- def has_static_folder(self):
- """This is ``True`` if the package bound object's container has a
- folder for static files.
-
- .. versionadded:: 0.5
- """
- return self.static_folder is not None
-
- @locked_cached_property
- def jinja_loader(self):
- """The Jinja loader for this package bound object.
-
- .. versionadded:: 0.5
- """
- if self.template_folder is not None:
- return FileSystemLoader(os.path.join(self.root_path,
- self.template_folder))
-
- def get_send_file_max_age(self, filename):
- """Provides default cache_timeout for the :func:`send_file` functions.
-
- By default, this function returns ``SEND_FILE_MAX_AGE_DEFAULT`` from
- the configuration of :data:`~flask.current_app`.
-
- Static file functions such as :func:`send_from_directory` use this
- function, and :func:`send_file` calls this function on
- :data:`~flask.current_app` when the given cache_timeout is ``None``. If a
- cache_timeout is given in :func:`send_file`, that timeout is used;
- otherwise, this method is called.
-
- This allows subclasses to change the behavior when sending files based
- on the filename. For example, to set the cache timeout for .js files
- to 60 seconds::
-
- class MyFlask(flask.Flask):
- def get_send_file_max_age(self, name):
- if name.lower().endswith('.js'):
- return 60
- return flask.Flask.get_send_file_max_age(self, name)
-
- .. versionadded:: 0.9
- """
- return total_seconds(current_app.send_file_max_age_default)
-
- def send_static_file(self, filename):
- """Function used internally to send static files from the static
- folder to the browser.
-
- .. versionadded:: 0.5
- """
- if not self.has_static_folder:
- raise RuntimeError('No static folder for this object')
- # Ensure get_send_file_max_age is called in all cases.
- # Here, we ensure get_send_file_max_age is called for Blueprints.
- cache_timeout = self.get_send_file_max_age(filename)
- return send_from_directory(self.static_folder, filename,
- cache_timeout=cache_timeout)
-
- def open_resource(self, resource, mode='rb'):
- """Opens a resource from the application's resource folder. To see
- how this works, consider the following folder structure::
-
- /myapplication.py
- /schema.sql
- /static
- /style.css
- /templates
- /layout.html
- /index.html
-
- If you want to open the :file:`schema.sql` file you would do the
- following::
-
- with app.open_resource('schema.sql') as f:
- contents = f.read()
- do_something_with(contents)
-
- :param resource: the name of the resource. To access resources within
- subfolders use forward slashes as separator.
- :param mode: resource file opening mode, default is 'rb'.
- """
- if mode not in ('r', 'rb'):
- raise ValueError('Resources can only be opened for reading')
- return open(os.path.join(self.root_path, resource), mode)
-
-
-def total_seconds(td):
- """Returns the total seconds from a timedelta object.
-
- :param timedelta td: the timedelta to be converted in seconds
-
- :returns: number of seconds
- :rtype: int
- """
- return td.days * 60 * 60 * 24 + td.seconds
diff --git a/venv/Lib/site-packages/flask/json.py b/venv/Lib/site-packages/flask/json.py
deleted file mode 100644
index 16e0c29..0000000
--- a/venv/Lib/site-packages/flask/json.py
+++ /dev/null
@@ -1,269 +0,0 @@
-# -*- coding: utf-8 -*-
-"""
- flask.jsonimpl
- ~~~~~~~~~~~~~~
-
- Implementation helpers for the JSON support in Flask.
-
- :copyright: (c) 2015 by Armin Ronacher.
- :license: BSD, see LICENSE for more details.
-"""
-import io
-import uuid
-from datetime import date
-from .globals import current_app, request
-from ._compat import text_type, PY2
-
-from werkzeug.http import http_date
-from jinja2 import Markup
-
-# Use the same json implementation as itsdangerous on which we
-# depend anyways.
-from itsdangerous import json as _json
-
-
-# Figure out if simplejson escapes slashes. This behavior was changed
-# from one version to another without reason.
-_slash_escape = '\\/' not in _json.dumps('/')
-
-
-__all__ = ['dump', 'dumps', 'load', 'loads', 'htmlsafe_dump',
- 'htmlsafe_dumps', 'JSONDecoder', 'JSONEncoder',
- 'jsonify']
-
-
-def _wrap_reader_for_text(fp, encoding):
- if isinstance(fp.read(0), bytes):
- fp = io.TextIOWrapper(io.BufferedReader(fp), encoding)
- return fp
-
-
-def _wrap_writer_for_text(fp, encoding):
- try:
- fp.write('')
- except TypeError:
- fp = io.TextIOWrapper(fp, encoding)
- return fp
-
-
-class JSONEncoder(_json.JSONEncoder):
- """The default Flask JSON encoder. This one extends the default simplejson
- encoder by also supporting ``datetime`` objects, ``UUID`` as well as
- ``Markup`` objects which are serialized as RFC 822 datetime strings (same
- as the HTTP date format). In order to support more data types override the
- :meth:`default` method.
- """
-
- def default(self, o):
- """Implement this method in a subclass such that it returns a
- serializable object for ``o``, or calls the base implementation (to
- raise a :exc:`TypeError`).
-
- For example, to support arbitrary iterators, you could implement
- default like this::
-
- def default(self, o):
- try:
- iterable = iter(o)
- except TypeError:
- pass
- else:
- return list(iterable)
- return JSONEncoder.default(self, o)
- """
- if isinstance(o, date):
- return http_date(o.timetuple())
- if isinstance(o, uuid.UUID):
- return str(o)
- if hasattr(o, '__html__'):
- return text_type(o.__html__())
- return _json.JSONEncoder.default(self, o)
-
-
-class JSONDecoder(_json.JSONDecoder):
- """The default JSON decoder. This one does not change the behavior from
- the default simplejson decoder. Consult the :mod:`json` documentation
- for more information. This decoder is not only used for the load
- functions of this module but also :attr:`~flask.Request`.
- """
-
-
-def _dump_arg_defaults(kwargs):
- """Inject default arguments for dump functions."""
- if current_app:
- kwargs.setdefault('cls', current_app.json_encoder)
- if not current_app.config['JSON_AS_ASCII']:
- kwargs.setdefault('ensure_ascii', False)
- kwargs.setdefault('sort_keys', current_app.config['JSON_SORT_KEYS'])
- else:
- kwargs.setdefault('sort_keys', True)
- kwargs.setdefault('cls', JSONEncoder)
-
-
-def _load_arg_defaults(kwargs):
- """Inject default arguments for load functions."""
- if current_app:
- kwargs.setdefault('cls', current_app.json_decoder)
- else:
- kwargs.setdefault('cls', JSONDecoder)
-
-
-def dumps(obj, **kwargs):
- """Serialize ``obj`` to a JSON formatted ``str`` by using the application's
- configured encoder (:attr:`~flask.Flask.json_encoder`) if there is an
- application on the stack.
-
- This function can return ``unicode`` strings or ascii-only bytestrings by
- default which coerce into unicode strings automatically. That behavior by
- default is controlled by the ``JSON_AS_ASCII`` configuration variable
- and can be overridden by the simplejson ``ensure_ascii`` parameter.
- """
- _dump_arg_defaults(kwargs)
- encoding = kwargs.pop('encoding', None)
- rv = _json.dumps(obj, **kwargs)
- if encoding is not None and isinstance(rv, text_type):
- rv = rv.encode(encoding)
- return rv
-
-
-def dump(obj, fp, **kwargs):
- """Like :func:`dumps` but writes into a file object."""
- _dump_arg_defaults(kwargs)
- encoding = kwargs.pop('encoding', None)
- if encoding is not None:
- fp = _wrap_writer_for_text(fp, encoding)
- _json.dump(obj, fp, **kwargs)
-
-
-def loads(s, **kwargs):
- """Unserialize a JSON object from a string ``s`` by using the application's
- configured decoder (:attr:`~flask.Flask.json_decoder`) if there is an
- application on the stack.
- """
- _load_arg_defaults(kwargs)
- if isinstance(s, bytes):
- s = s.decode(kwargs.pop('encoding', None) or 'utf-8')
- return _json.loads(s, **kwargs)
-
-
-def load(fp, **kwargs):
- """Like :func:`loads` but reads from a file object.
- """
- _load_arg_defaults(kwargs)
- if not PY2:
- fp = _wrap_reader_for_text(fp, kwargs.pop('encoding', None) or 'utf-8')
- return _json.load(fp, **kwargs)
-
-
-def htmlsafe_dumps(obj, **kwargs):
- """Works exactly like :func:`dumps` but is safe for use in ``
-
-'''
-
-__all__ = ["RecaptchaWidget"]
-
-
-class RecaptchaWidget(object):
-
- def recaptcha_html(self, public_key):
- html = current_app.config.get('RECAPTCHA_HTML')
- if html:
- return Markup(html)
- params = current_app.config.get('RECAPTCHA_PARAMETERS')
- script = RECAPTCHA_SCRIPT
- if params:
- script += u'?' + url_encode(params)
-
- attrs = current_app.config.get('RECAPTCHA_DATA_ATTRS', {})
- attrs['sitekey'] = public_key
- snippet = u' '.join([u'data-%s="%s"' % (k, attrs[k]) for k in attrs])
- return Markup(RECAPTCHA_TEMPLATE % (script, snippet))
-
- def __call__(self, field, error=None, **kwargs):
- """Returns the recaptcha input HTML."""
-
- try:
- public_key = current_app.config['RECAPTCHA_PUBLIC_KEY']
- except KeyError:
- raise RuntimeError("RECAPTCHA_PUBLIC_KEY config not set")
-
- return self.recaptcha_html(public_key)
diff --git a/venv/Lib/site-packages/itsdangerous-0.24-py3.6.egg-info/PKG-INFO b/venv/Lib/site-packages/itsdangerous-0.24-py3.6.egg-info/PKG-INFO
deleted file mode 100644
index c46c662..0000000
--- a/venv/Lib/site-packages/itsdangerous-0.24-py3.6.egg-info/PKG-INFO
+++ /dev/null
@@ -1,13 +0,0 @@
-Metadata-Version: 1.1
-Name: itsdangerous
-Version: 0.24
-Summary: Various helpers to pass trusted data to untrusted environments and back.
-Home-page: http://github.com/mitsuhiko/itsdangerous
-Author: Armin Ronacher
-Author-email: armin.ronacher@active-4.com
-License: UNKNOWN
-Description: UNKNOWN
-Platform: UNKNOWN
-Classifier: License :: OSI Approved :: BSD License
-Classifier: Programming Language :: Python
-Classifier: Programming Language :: Python :: 3
diff --git a/venv/Lib/site-packages/itsdangerous-0.24-py3.6.egg-info/SOURCES.txt b/venv/Lib/site-packages/itsdangerous-0.24-py3.6.egg-info/SOURCES.txt
deleted file mode 100644
index 8251619..0000000
--- a/venv/Lib/site-packages/itsdangerous-0.24-py3.6.egg-info/SOURCES.txt
+++ /dev/null
@@ -1,27 +0,0 @@
-CHANGES
-LICENSE
-MANIFEST.in
-Makefile
-README
-itsdangerous.py
-setup.cfg
-setup.py
-tests.py
-tox.ini
-docs/Makefile
-docs/conf.py
-docs/index.rst
-docs/make.bat
-docs/_static/itsdangerous.png
-docs/_themes/.gitignore
-docs/_themes/LICENSE
-docs/_themes/README
-docs/_themes/flask_theme_support.py
-docs/_themes/flask_small/layout.html
-docs/_themes/flask_small/theme.conf
-docs/_themes/flask_small/static/flasky.css_t
-itsdangerous.egg-info/PKG-INFO
-itsdangerous.egg-info/SOURCES.txt
-itsdangerous.egg-info/dependency_links.txt
-itsdangerous.egg-info/not-zip-safe
-itsdangerous.egg-info/top_level.txt
\ No newline at end of file
diff --git a/venv/Lib/site-packages/itsdangerous-0.24-py3.6.egg-info/dependency_links.txt b/venv/Lib/site-packages/itsdangerous-0.24-py3.6.egg-info/dependency_links.txt
deleted file mode 100644
index 8b13789..0000000
--- a/venv/Lib/site-packages/itsdangerous-0.24-py3.6.egg-info/dependency_links.txt
+++ /dev/null
@@ -1 +0,0 @@
-
diff --git a/venv/Lib/site-packages/itsdangerous-0.24-py3.6.egg-info/installed-files.txt b/venv/Lib/site-packages/itsdangerous-0.24-py3.6.egg-info/installed-files.txt
deleted file mode 100644
index 8c277b8..0000000
--- a/venv/Lib/site-packages/itsdangerous-0.24-py3.6.egg-info/installed-files.txt
+++ /dev/null
@@ -1,7 +0,0 @@
-..\itsdangerous.py
-..\__pycache__\itsdangerous.cpython-36.pyc
-dependency_links.txt
-not-zip-safe
-PKG-INFO
-SOURCES.txt
-top_level.txt
diff --git a/venv/Lib/site-packages/itsdangerous-0.24-py3.6.egg-info/not-zip-safe b/venv/Lib/site-packages/itsdangerous-0.24-py3.6.egg-info/not-zip-safe
deleted file mode 100644
index 8b13789..0000000
--- a/venv/Lib/site-packages/itsdangerous-0.24-py3.6.egg-info/not-zip-safe
+++ /dev/null
@@ -1 +0,0 @@
-
diff --git a/venv/Lib/site-packages/itsdangerous-0.24-py3.6.egg-info/top_level.txt b/venv/Lib/site-packages/itsdangerous-0.24-py3.6.egg-info/top_level.txt
deleted file mode 100644
index e163955..0000000
--- a/venv/Lib/site-packages/itsdangerous-0.24-py3.6.egg-info/top_level.txt
+++ /dev/null
@@ -1 +0,0 @@
-itsdangerous
diff --git a/venv/Lib/site-packages/itsdangerous.py b/venv/Lib/site-packages/itsdangerous.py
deleted file mode 100644
index 228d101..0000000
--- a/venv/Lib/site-packages/itsdangerous.py
+++ /dev/null
@@ -1,872 +0,0 @@
-# -*- coding: utf-8 -*-
-"""
- itsdangerous
- ~~~~~~~~~~~~
-
- A module that implements various functions to deal with untrusted
- sources. Mainly useful for web applications.
-
- :copyright: (c) 2014 by Armin Ronacher and the Django Software Foundation.
- :license: BSD, see LICENSE for more details.
-"""
-
-import sys
-import hmac
-import zlib
-import time
-import base64
-import hashlib
-import operator
-from datetime import datetime
-
-
-PY2 = sys.version_info[0] == 2
-if PY2:
- from itertools import izip
- text_type = unicode
- int_to_byte = chr
- number_types = (int, long, float)
-else:
- from functools import reduce
- izip = zip
- text_type = str
- int_to_byte = operator.methodcaller('to_bytes', 1, 'big')
- number_types = (int, float)
-
-
-try:
- import simplejson as json
-except ImportError:
- import json
-
-
-class _CompactJSON(object):
- """Wrapper around simplejson that strips whitespace.
- """
-
- def loads(self, payload):
- return json.loads(payload)
-
- def dumps(self, obj):
- return json.dumps(obj, separators=(',', ':'))
-
-
-compact_json = _CompactJSON()
-
-
-# 2011/01/01 in UTC
-EPOCH = 1293840000
-
-
-def want_bytes(s, encoding='utf-8', errors='strict'):
- if isinstance(s, text_type):
- s = s.encode(encoding, errors)
- return s
-
-
-def is_text_serializer(serializer):
- """Checks wheather a serializer generates text or binary."""
- return isinstance(serializer.dumps({}), text_type)
-
-
-# Starting with 3.3 the standard library has a c-implementation for
-# constant time string compares.
-_builtin_constant_time_compare = getattr(hmac, 'compare_digest', None)
-
-
-def constant_time_compare(val1, val2):
- """Returns True if the two strings are equal, False otherwise.
-
- The time taken is independent of the number of characters that match. Do
- not use this function for anything else than comparision with known
- length targets.
-
- This is should be implemented in C in order to get it completely right.
- """
- if _builtin_constant_time_compare is not None:
- return _builtin_constant_time_compare(val1, val2)
- len_eq = len(val1) == len(val2)
- if len_eq:
- result = 0
- left = val1
- else:
- result = 1
- left = val2
- for x, y in izip(bytearray(left), bytearray(val2)):
- result |= x ^ y
- return result == 0
-
-
-class BadData(Exception):
- """Raised if bad data of any sort was encountered. This is the
- base for all exceptions that itsdangerous is currently using.
-
- .. versionadded:: 0.15
- """
- message = None
-
- def __init__(self, message):
- Exception.__init__(self, message)
- self.message = message
-
- def __str__(self):
- return text_type(self.message)
-
- if PY2:
- __unicode__ = __str__
- def __str__(self):
- return self.__unicode__().encode('utf-8')
-
-
-class BadPayload(BadData):
- """This error is raised in situations when payload is loaded without
- checking the signature first and an exception happend as a result of
- that. The original exception that caused that will be stored on the
- exception as :attr:`original_error`.
-
- This can also happen with a :class:`JSONWebSignatureSerializer` that
- is subclassed and uses a different serializer for the payload than
- the expected one.
-
- .. versionadded:: 0.15
- """
-
- def __init__(self, message, original_error=None):
- BadData.__init__(self, message)
- #: If available, the error that indicates why the payload
- #: was not valid. This might be `None`.
- self.original_error = original_error
-
-
-class BadSignature(BadData):
- """This error is raised if a signature does not match. As of
- itsdangerous 0.14 there are helpful attributes on the exception
- instances. You can also catch down the baseclass :exc:`BadData`.
- """
-
- def __init__(self, message, payload=None):
- BadData.__init__(self, message)
- #: The payload that failed the signature test. In some
- #: situations you might still want to inspect this, even if
- #: you know it was tampered with.
- #:
- #: .. versionadded:: 0.14
- self.payload = payload
-
-
-class BadTimeSignature(BadSignature):
- """Raised for time based signatures that fail. This is a subclass
- of :class:`BadSignature` so you can catch those down as well.
- """
-
- def __init__(self, message, payload=None, date_signed=None):
- BadSignature.__init__(self, message, payload)
-
- #: If the signature expired this exposes the date of when the
- #: signature was created. This can be helpful in order to
- #: tell the user how long a link has been gone stale.
- #:
- #: .. versionadded:: 0.14
- self.date_signed = date_signed
-
-
-class BadHeader(BadSignature):
- """Raised if a signed header is invalid in some form. This only
- happens for serializers that have a header that goes with the
- signature.
-
- .. versionadded:: 0.24
- """
-
- def __init__(self, message, payload=None, header=None,
- original_error=None):
- BadSignature.__init__(self, message, payload)
-
- #: If the header is actually available but just malformed it
- #: might be stored here.
- self.header = header
-
- #: If available, the error that indicates why the payload
- #: was not valid. This might be `None`.
- self.original_error = original_error
-
-
-class SignatureExpired(BadTimeSignature):
- """Signature timestamp is older than required max_age. This is a
- subclass of :exc:`BadTimeSignature` so you can use the baseclass for
- catching the error.
- """
-
-
-def base64_encode(string):
- """base64 encodes a single bytestring (and is tolerant to getting
- called with a unicode string).
- The resulting bytestring is safe for putting into URLs.
- """
- string = want_bytes(string)
- return base64.urlsafe_b64encode(string).strip(b'=')
-
-
-def base64_decode(string):
- """base64 decodes a single bytestring (and is tolerant to getting
- called with a unicode string).
- The result is also a bytestring.
- """
- string = want_bytes(string, encoding='ascii', errors='ignore')
- return base64.urlsafe_b64decode(string + b'=' * (-len(string) % 4))
-
-
-def int_to_bytes(num):
- assert num >= 0
- rv = []
- while num:
- rv.append(int_to_byte(num & 0xff))
- num >>= 8
- return b''.join(reversed(rv))
-
-
-def bytes_to_int(bytestr):
- return reduce(lambda a, b: a << 8 | b, bytearray(bytestr), 0)
-
-
-class SigningAlgorithm(object):
- """Subclasses of `SigningAlgorithm` have to implement `get_signature` to
- provide signature generation functionality.
- """
-
- def get_signature(self, key, value):
- """Returns the signature for the given key and value"""
- raise NotImplementedError()
-
- def verify_signature(self, key, value, sig):
- """Verifies the given signature matches the expected signature"""
- return constant_time_compare(sig, self.get_signature(key, value))
-
-
-class NoneAlgorithm(SigningAlgorithm):
- """This class provides a algorithm that does not perform any signing and
- returns an empty signature.
- """
-
- def get_signature(self, key, value):
- return b''
-
-
-class HMACAlgorithm(SigningAlgorithm):
- """This class provides signature generation using HMACs."""
-
- #: The digest method to use with the MAC algorithm. This defaults to sha1
- #: but can be changed for any other function in the hashlib module.
- default_digest_method = staticmethod(hashlib.sha1)
-
- def __init__(self, digest_method=None):
- if digest_method is None:
- digest_method = self.default_digest_method
- self.digest_method = digest_method
-
- def get_signature(self, key, value):
- mac = hmac.new(key, msg=value, digestmod=self.digest_method)
- return mac.digest()
-
-
-class Signer(object):
- """This class can sign bytes and unsign it and validate the signature
- provided.
-
- Salt can be used to namespace the hash, so that a signed string is only
- valid for a given namespace. Leaving this at the default value or re-using
- a salt value across different parts of your application where the same
- signed value in one part can mean something different in another part
- is a security risk.
-
- See :ref:`the-salt` for an example of what the salt is doing and how you
- can utilize it.
-
- .. versionadded:: 0.14
- `key_derivation` and `digest_method` were added as arguments to the
- class constructor.
-
- .. versionadded:: 0.18
- `algorithm` was added as an argument to the class constructor.
- """
-
- #: The digest method to use for the signer. This defaults to sha1 but can
- #: be changed for any other function in the hashlib module.
- #:
- #: .. versionchanged:: 0.14
- default_digest_method = staticmethod(hashlib.sha1)
-
- #: Controls how the key is derived. The default is Django style
- #: concatenation. Possible values are ``concat``, ``django-concat``
- #: and ``hmac``. This is used for deriving a key from the secret key
- #: with an added salt.
- #:
- #: .. versionadded:: 0.14
- default_key_derivation = 'django-concat'
-
- def __init__(self, secret_key, salt=None, sep='.', key_derivation=None,
- digest_method=None, algorithm=None):
- self.secret_key = want_bytes(secret_key)
- self.sep = sep
- self.salt = 'itsdangerous.Signer' if salt is None else salt
- if key_derivation is None:
- key_derivation = self.default_key_derivation
- self.key_derivation = key_derivation
- if digest_method is None:
- digest_method = self.default_digest_method
- self.digest_method = digest_method
- if algorithm is None:
- algorithm = HMACAlgorithm(self.digest_method)
- self.algorithm = algorithm
-
- def derive_key(self):
- """This method is called to derive the key. If you're unhappy with
- the default key derivation choices you can override them here.
- Keep in mind that the key derivation in itsdangerous is not intended
- to be used as a security method to make a complex key out of a short
- password. Instead you should use large random secret keys.
- """
- salt = want_bytes(self.salt)
- if self.key_derivation == 'concat':
- return self.digest_method(salt + self.secret_key).digest()
- elif self.key_derivation == 'django-concat':
- return self.digest_method(salt + b'signer' +
- self.secret_key).digest()
- elif self.key_derivation == 'hmac':
- mac = hmac.new(self.secret_key, digestmod=self.digest_method)
- mac.update(salt)
- return mac.digest()
- elif self.key_derivation == 'none':
- return self.secret_key
- else:
- raise TypeError('Unknown key derivation method')
-
- def get_signature(self, value):
- """Returns the signature for the given value"""
- value = want_bytes(value)
- key = self.derive_key()
- sig = self.algorithm.get_signature(key, value)
- return base64_encode(sig)
-
- def sign(self, value):
- """Signs the given string."""
- return value + want_bytes(self.sep) + self.get_signature(value)
-
- def verify_signature(self, value, sig):
- """Verifies the signature for the given value."""
- key = self.derive_key()
- try:
- sig = base64_decode(sig)
- except Exception:
- return False
- return self.algorithm.verify_signature(key, value, sig)
-
- def unsign(self, signed_value):
- """Unsigns the given string."""
- signed_value = want_bytes(signed_value)
- sep = want_bytes(self.sep)
- if sep not in signed_value:
- raise BadSignature('No %r found in value' % self.sep)
- value, sig = signed_value.rsplit(sep, 1)
- if self.verify_signature(value, sig):
- return value
- raise BadSignature('Signature %r does not match' % sig,
- payload=value)
-
- def validate(self, signed_value):
- """Just validates the given signed value. Returns `True` if the
- signature exists and is valid, `False` otherwise."""
- try:
- self.unsign(signed_value)
- return True
- except BadSignature:
- return False
-
-
-class TimestampSigner(Signer):
- """Works like the regular :class:`Signer` but also records the time
- of the signing and can be used to expire signatures. The unsign
- method can rause a :exc:`SignatureExpired` method if the unsigning
- failed because the signature is expired. This exception is a subclass
- of :exc:`BadSignature`.
- """
-
- def get_timestamp(self):
- """Returns the current timestamp. This implementation returns the
- seconds since 1/1/2011. The function must return an integer.
- """
- return int(time.time() - EPOCH)
-
- def timestamp_to_datetime(self, ts):
- """Used to convert the timestamp from `get_timestamp` into a
- datetime object.
- """
- return datetime.utcfromtimestamp(ts + EPOCH)
-
- def sign(self, value):
- """Signs the given string and also attaches a time information."""
- value = want_bytes(value)
- timestamp = base64_encode(int_to_bytes(self.get_timestamp()))
- sep = want_bytes(self.sep)
- value = value + sep + timestamp
- return value + sep + self.get_signature(value)
-
- def unsign(self, value, max_age=None, return_timestamp=False):
- """Works like the regular :meth:`~Signer.unsign` but can also
- validate the time. See the base docstring of the class for
- the general behavior. If `return_timestamp` is set to `True`
- the timestamp of the signature will be returned as naive
- :class:`datetime.datetime` object in UTC.
- """
- try:
- result = Signer.unsign(self, value)
- sig_error = None
- except BadSignature as e:
- sig_error = e
- result = e.payload or b''
- sep = want_bytes(self.sep)
-
- # If there is no timestamp in the result there is something
- # seriously wrong. In case there was a signature error, we raise
- # that one directly, otherwise we have a weird situation in which
- # we shouldn't have come except someone uses a time-based serializer
- # on non-timestamp data, so catch that.
- if not sep in result:
- if sig_error:
- raise sig_error
- raise BadTimeSignature('timestamp missing', payload=result)
-
- value, timestamp = result.rsplit(sep, 1)
- try:
- timestamp = bytes_to_int(base64_decode(timestamp))
- except Exception:
- timestamp = None
-
- # Signature is *not* okay. Raise a proper error now that we have
- # split the value and the timestamp.
- if sig_error is not None:
- raise BadTimeSignature(text_type(sig_error), payload=value,
- date_signed=timestamp)
-
- # Signature was okay but the timestamp is actually not there or
- # malformed. Should not happen, but well. We handle it nonetheless
- if timestamp is None:
- raise BadTimeSignature('Malformed timestamp', payload=value)
-
- # Check timestamp is not older than max_age
- if max_age is not None:
- age = self.get_timestamp() - timestamp
- if age > max_age:
- raise SignatureExpired(
- 'Signature age %s > %s seconds' % (age, max_age),
- payload=value,
- date_signed=self.timestamp_to_datetime(timestamp))
-
- if return_timestamp:
- return value, self.timestamp_to_datetime(timestamp)
- return value
-
- def validate(self, signed_value, max_age=None):
- """Just validates the given signed value. Returns `True` if the
- signature exists and is valid, `False` otherwise."""
- try:
- self.unsign(signed_value, max_age=max_age)
- return True
- except BadSignature:
- return False
-
-
-class Serializer(object):
- """This class provides a serialization interface on top of the
- signer. It provides a similar API to json/pickle and other modules but is
- slightly differently structured internally. If you want to change the
- underlying implementation for parsing and loading you have to override the
- :meth:`load_payload` and :meth:`dump_payload` functions.
-
- This implementation uses simplejson if available for dumping and loading
- and will fall back to the standard library's json module if it's not
- available.
-
- Starting with 0.14 you do not need to subclass this class in order to
- switch out or customer the :class:`Signer`. You can instead also pass a
- different class to the constructor as well as keyword arguments as
- dictionary that should be forwarded::
-
- s = Serializer(signer_kwargs={'key_derivation': 'hmac'})
-
- .. versionchanged:: 0.14:
- The `signer` and `signer_kwargs` parameters were added to the
- constructor.
- """
-
- #: If a serializer module or class is not passed to the constructor
- #: this one is picked up. This currently defaults to :mod:`json`.
- default_serializer = json
-
- #: The default :class:`Signer` class that is being used by this
- #: serializer.
- #:
- #: .. versionadded:: 0.14
- default_signer = Signer
-
- def __init__(self, secret_key, salt=b'itsdangerous', serializer=None,
- signer=None, signer_kwargs=None):
- self.secret_key = want_bytes(secret_key)
- self.salt = want_bytes(salt)
- if serializer is None:
- serializer = self.default_serializer
- self.serializer = serializer
- self.is_text_serializer = is_text_serializer(serializer)
- if signer is None:
- signer = self.default_signer
- self.signer = signer
- self.signer_kwargs = signer_kwargs or {}
-
- def load_payload(self, payload, serializer=None):
- """Loads the encoded object. This function raises :class:`BadPayload`
- if the payload is not valid. The `serializer` parameter can be used to
- override the serializer stored on the class. The encoded payload is
- always byte based.
- """
- if serializer is None:
- serializer = self.serializer
- is_text = self.is_text_serializer
- else:
- is_text = is_text_serializer(serializer)
- try:
- if is_text:
- payload = payload.decode('utf-8')
- return serializer.loads(payload)
- except Exception as e:
- raise BadPayload('Could not load the payload because an '
- 'exception occurred on unserializing the data',
- original_error=e)
-
- def dump_payload(self, obj):
- """Dumps the encoded object. The return value is always a
- bytestring. If the internal serializer is text based the value
- will automatically be encoded to utf-8.
- """
- return want_bytes(self.serializer.dumps(obj))
-
- def make_signer(self, salt=None):
- """A method that creates a new instance of the signer to be used.
- The default implementation uses the :class:`Signer` baseclass.
- """
- if salt is None:
- salt = self.salt
- return self.signer(self.secret_key, salt=salt, **self.signer_kwargs)
-
- def dumps(self, obj, salt=None):
- """Returns a signed string serialized with the internal serializer.
- The return value can be either a byte or unicode string depending
- on the format of the internal serializer.
- """
- payload = want_bytes(self.dump_payload(obj))
- rv = self.make_signer(salt).sign(payload)
- if self.is_text_serializer:
- rv = rv.decode('utf-8')
- return rv
-
- def dump(self, obj, f, salt=None):
- """Like :meth:`dumps` but dumps into a file. The file handle has
- to be compatible with what the internal serializer expects.
- """
- f.write(self.dumps(obj, salt))
-
- def loads(self, s, salt=None):
- """Reverse of :meth:`dumps`, raises :exc:`BadSignature` if the
- signature validation fails.
- """
- s = want_bytes(s)
- return self.load_payload(self.make_signer(salt).unsign(s))
-
- def load(self, f, salt=None):
- """Like :meth:`loads` but loads from a file."""
- return self.loads(f.read(), salt)
-
- def loads_unsafe(self, s, salt=None):
- """Like :meth:`loads` but without verifying the signature. This is
- potentially very dangerous to use depending on how your serializer
- works. The return value is ``(signature_okay, payload)`` instead of
- just the payload. The first item will be a boolean that indicates
- if the signature is okay (``True``) or if it failed. This function
- never fails.
-
- Use it for debugging only and if you know that your serializer module
- is not exploitable (eg: do not use it with a pickle serializer).
-
- .. versionadded:: 0.15
- """
- return self._loads_unsafe_impl(s, salt)
-
- def _loads_unsafe_impl(self, s, salt, load_kwargs=None,
- load_payload_kwargs=None):
- """Lowlevel helper function to implement :meth:`loads_unsafe` in
- serializer subclasses.
- """
- try:
- return True, self.loads(s, salt=salt, **(load_kwargs or {}))
- except BadSignature as e:
- if e.payload is None:
- return False, None
- try:
- return False, self.load_payload(e.payload,
- **(load_payload_kwargs or {}))
- except BadPayload:
- return False, None
-
- def load_unsafe(self, f, *args, **kwargs):
- """Like :meth:`loads_unsafe` but loads from a file.
-
- .. versionadded:: 0.15
- """
- return self.loads_unsafe(f.read(), *args, **kwargs)
-
-
-class TimedSerializer(Serializer):
- """Uses the :class:`TimestampSigner` instead of the default
- :meth:`Signer`.
- """
-
- default_signer = TimestampSigner
-
- def loads(self, s, max_age=None, return_timestamp=False, salt=None):
- """Reverse of :meth:`dumps`, raises :exc:`BadSignature` if the
- signature validation fails. If a `max_age` is provided it will
- ensure the signature is not older than that time in seconds. In
- case the signature is outdated, :exc:`SignatureExpired` is raised
- which is a subclass of :exc:`BadSignature`. All arguments are
- forwarded to the signer's :meth:`~TimestampSigner.unsign` method.
- """
- base64d, timestamp = self.make_signer(salt) \
- .unsign(s, max_age, return_timestamp=True)
- payload = self.load_payload(base64d)
- if return_timestamp:
- return payload, timestamp
- return payload
-
- def loads_unsafe(self, s, max_age=None, salt=None):
- load_kwargs = {'max_age': max_age}
- load_payload_kwargs = {}
- return self._loads_unsafe_impl(s, salt, load_kwargs, load_payload_kwargs)
-
-
-class JSONWebSignatureSerializer(Serializer):
- """This serializer implements JSON Web Signature (JWS) support. Only
- supports the JWS Compact Serialization.
- """
-
- jws_algorithms = {
- 'HS256': HMACAlgorithm(hashlib.sha256),
- 'HS384': HMACAlgorithm(hashlib.sha384),
- 'HS512': HMACAlgorithm(hashlib.sha512),
- 'none': NoneAlgorithm(),
- }
-
- #: The default algorithm to use for signature generation
- default_algorithm = 'HS256'
-
- default_serializer = compact_json
-
- def __init__(self, secret_key, salt=None, serializer=None,
- signer=None, signer_kwargs=None, algorithm_name=None):
- Serializer.__init__(self, secret_key, salt, serializer,
- signer, signer_kwargs)
- if algorithm_name is None:
- algorithm_name = self.default_algorithm
- self.algorithm_name = algorithm_name
- self.algorithm = self.make_algorithm(algorithm_name)
-
- def load_payload(self, payload, return_header=False):
- payload = want_bytes(payload)
- if b'.' not in payload:
- raise BadPayload('No "." found in value')
- base64d_header, base64d_payload = payload.split(b'.', 1)
- try:
- json_header = base64_decode(base64d_header)
- except Exception as e:
- raise BadHeader('Could not base64 decode the header because of '
- 'an exception', original_error=e)
- try:
- json_payload = base64_decode(base64d_payload)
- except Exception as e:
- raise BadPayload('Could not base64 decode the payload because of '
- 'an exception', original_error=e)
- try:
- header = Serializer.load_payload(self, json_header,
- serializer=json)
- except BadData as e:
- raise BadHeader('Could not unserialize header because it was '
- 'malformed', original_error=e)
- if not isinstance(header, dict):
- raise BadHeader('Header payload is not a JSON object',
- header=header)
- payload = Serializer.load_payload(self, json_payload)
- if return_header:
- return payload, header
- return payload
-
- def dump_payload(self, header, obj):
- base64d_header = base64_encode(self.serializer.dumps(header))
- base64d_payload = base64_encode(self.serializer.dumps(obj))
- return base64d_header + b'.' + base64d_payload
-
- def make_algorithm(self, algorithm_name):
- try:
- return self.jws_algorithms[algorithm_name]
- except KeyError:
- raise NotImplementedError('Algorithm not supported')
-
- def make_signer(self, salt=None, algorithm=None):
- if salt is None:
- salt = self.salt
- key_derivation = 'none' if salt is None else None
- if algorithm is None:
- algorithm = self.algorithm
- return self.signer(self.secret_key, salt=salt, sep='.',
- key_derivation=key_derivation, algorithm=algorithm)
-
- def make_header(self, header_fields):
- header = header_fields.copy() if header_fields else {}
- header['alg'] = self.algorithm_name
- return header
-
- def dumps(self, obj, salt=None, header_fields=None):
- """Like :meth:`~Serializer.dumps` but creates a JSON Web Signature. It
- also allows for specifying additional fields to be included in the JWS
- Header.
- """
- header = self.make_header(header_fields)
- signer = self.make_signer(salt, self.algorithm)
- return signer.sign(self.dump_payload(header, obj))
-
- def loads(self, s, salt=None, return_header=False):
- """Reverse of :meth:`dumps`. If requested via `return_header` it will
- return a tuple of payload and header.
- """
- payload, header = self.load_payload(
- self.make_signer(salt, self.algorithm).unsign(want_bytes(s)),
- return_header=True)
- if header.get('alg') != self.algorithm_name:
- raise BadHeader('Algorithm mismatch', header=header,
- payload=payload)
- if return_header:
- return payload, header
- return payload
-
- def loads_unsafe(self, s, salt=None, return_header=False):
- kwargs = {'return_header': return_header}
- return self._loads_unsafe_impl(s, salt, kwargs, kwargs)
-
-
-class TimedJSONWebSignatureSerializer(JSONWebSignatureSerializer):
- """Works like the regular :class:`JSONWebSignatureSerializer` but also
- records the time of the signing and can be used to expire signatures.
-
- JWS currently does not specify this behavior but it mentions a possibility
- extension like this in the spec. Expiry date is encoded into the header
- similarily as specified in `draft-ietf-oauth-json-web-token
- 0):
- raise BadSignature('expiry date is not an IntDate',
- payload=payload)
-
- if header['exp'] < self.now():
- raise SignatureExpired('Signature expired', payload=payload,
- date_signed=self.get_issue_date(header))
-
- if return_header:
- return payload, header
- return payload
-
- def get_issue_date(self, header):
- rv = header.get('iat')
- if isinstance(rv, number_types):
- return datetime.utcfromtimestamp(int(rv))
-
- def now(self):
- return int(time.time())
-
-
-class URLSafeSerializerMixin(object):
- """Mixed in with a regular serializer it will attempt to zlib compress
- the string to make it shorter if necessary. It will also base64 encode
- the string so that it can safely be placed in a URL.
- """
-
- def load_payload(self, payload):
- decompress = False
- if payload.startswith(b'.'):
- payload = payload[1:]
- decompress = True
- try:
- json = base64_decode(payload)
- except Exception as e:
- raise BadPayload('Could not base64 decode the payload because of '
- 'an exception', original_error=e)
- if decompress:
- try:
- json = zlib.decompress(json)
- except Exception as e:
- raise BadPayload('Could not zlib decompress the payload before '
- 'decoding the payload', original_error=e)
- return super(URLSafeSerializerMixin, self).load_payload(json)
-
- def dump_payload(self, obj):
- json = super(URLSafeSerializerMixin, self).dump_payload(obj)
- is_compressed = False
- compressed = zlib.compress(json)
- if len(compressed) < (len(json) - 1):
- json = compressed
- is_compressed = True
- base64d = base64_encode(json)
- if is_compressed:
- base64d = b'.' + base64d
- return base64d
-
-
-class URLSafeSerializer(URLSafeSerializerMixin, Serializer):
- """Works like :class:`Serializer` but dumps and loads into a URL
- safe string consisting of the upper and lowercase character of the
- alphabet as well as ``'_'``, ``'-'`` and ``'.'``.
- """
- default_serializer = compact_json
-
-
-class URLSafeTimedSerializer(URLSafeSerializerMixin, TimedSerializer):
- """Works like :class:`TimedSerializer` but dumps and loads into a URL
- safe string consisting of the upper and lowercase character of the
- alphabet as well as ``'_'``, ``'-'`` and ``'.'``.
- """
- default_serializer = compact_json
diff --git a/venv/Lib/site-packages/jinja2/__init__.py b/venv/Lib/site-packages/jinja2/__init__.py
deleted file mode 100644
index 42aa763..0000000
--- a/venv/Lib/site-packages/jinja2/__init__.py
+++ /dev/null
@@ -1,83 +0,0 @@
-# -*- coding: utf-8 -*-
-"""
- jinja2
- ~~~~~~
-
- Jinja2 is a template engine written in pure Python. It provides a
- Django inspired non-XML syntax but supports inline expressions and
- an optional sandboxed environment.
-
- Nutshell
- --------
-
- Here a small example of a Jinja2 template::
-
- {% extends 'base.html' %}
- {% block title %}Memberlist{% endblock %}
- {% block content %}
-
- {% endblock %}
-
-
- :copyright: (c) 2017 by the Jinja Team.
- :license: BSD, see LICENSE for more details.
-"""
-__docformat__ = 'restructuredtext en'
-__version__ = '2.10'
-
-# high level interface
-from jinja2.environment import Environment, Template
-
-# loaders
-from jinja2.loaders import BaseLoader, FileSystemLoader, PackageLoader, \
- DictLoader, FunctionLoader, PrefixLoader, ChoiceLoader, \
- ModuleLoader
-
-# bytecode caches
-from jinja2.bccache import BytecodeCache, FileSystemBytecodeCache, \
- MemcachedBytecodeCache
-
-# undefined types
-from jinja2.runtime import Undefined, DebugUndefined, StrictUndefined, \
- make_logging_undefined
-
-# exceptions
-from jinja2.exceptions import TemplateError, UndefinedError, \
- TemplateNotFound, TemplatesNotFound, TemplateSyntaxError, \
- TemplateAssertionError, TemplateRuntimeError
-
-# decorators and public utilities
-from jinja2.filters import environmentfilter, contextfilter, \
- evalcontextfilter
-from jinja2.utils import Markup, escape, clear_caches, \
- environmentfunction, evalcontextfunction, contextfunction, \
- is_undefined, select_autoescape
-
-__all__ = [
- 'Environment', 'Template', 'BaseLoader', 'FileSystemLoader',
- 'PackageLoader', 'DictLoader', 'FunctionLoader', 'PrefixLoader',
- 'ChoiceLoader', 'BytecodeCache', 'FileSystemBytecodeCache',
- 'MemcachedBytecodeCache', 'Undefined', 'DebugUndefined',
- 'StrictUndefined', 'TemplateError', 'UndefinedError', 'TemplateNotFound',
- 'TemplatesNotFound', 'TemplateSyntaxError', 'TemplateAssertionError',
- 'TemplateRuntimeError',
- 'ModuleLoader', 'environmentfilter', 'contextfilter', 'Markup', 'escape',
- 'environmentfunction', 'contextfunction', 'clear_caches', 'is_undefined',
- 'evalcontextfilter', 'evalcontextfunction', 'make_logging_undefined',
- 'select_autoescape',
-]
-
-
-def _patch_async():
- from jinja2.utils import have_async_gen
- if have_async_gen:
- from jinja2.asyncsupport import patch_all
- patch_all()
-
-
-_patch_async()
-del _patch_async
diff --git a/venv/Lib/site-packages/jinja2/_compat.py b/venv/Lib/site-packages/jinja2/_compat.py
deleted file mode 100644
index 61d8530..0000000
--- a/venv/Lib/site-packages/jinja2/_compat.py
+++ /dev/null
@@ -1,99 +0,0 @@
-# -*- coding: utf-8 -*-
-"""
- jinja2._compat
- ~~~~~~~~~~~~~~
-
- Some py2/py3 compatibility support based on a stripped down
- version of six so we don't have to depend on a specific version
- of it.
-
- :copyright: Copyright 2013 by the Jinja team, see AUTHORS.
- :license: BSD, see LICENSE for details.
-"""
-import sys
-
-PY2 = sys.version_info[0] == 2
-PYPY = hasattr(sys, 'pypy_translation_info')
-_identity = lambda x: x
-
-
-if not PY2:
- unichr = chr
- range_type = range
- text_type = str
- string_types = (str,)
- integer_types = (int,)
-
- iterkeys = lambda d: iter(d.keys())
- itervalues = lambda d: iter(d.values())
- iteritems = lambda d: iter(d.items())
-
- import pickle
- from io import BytesIO, StringIO
- NativeStringIO = StringIO
-
- def reraise(tp, value, tb=None):
- if value.__traceback__ is not tb:
- raise value.with_traceback(tb)
- raise value
-
- ifilter = filter
- imap = map
- izip = zip
- intern = sys.intern
-
- implements_iterator = _identity
- implements_to_string = _identity
- encode_filename = _identity
-
-else:
- unichr = unichr
- text_type = unicode
- range_type = xrange
- string_types = (str, unicode)
- integer_types = (int, long)
-
- iterkeys = lambda d: d.iterkeys()
- itervalues = lambda d: d.itervalues()
- iteritems = lambda d: d.iteritems()
-
- import cPickle as pickle
- from cStringIO import StringIO as BytesIO, StringIO
- NativeStringIO = BytesIO
-
- exec('def reraise(tp, value, tb=None):\n raise tp, value, tb')
-
- from itertools import imap, izip, ifilter
- intern = intern
-
- def implements_iterator(cls):
- cls.next = cls.__next__
- del cls.__next__
- return cls
-
- def implements_to_string(cls):
- cls.__unicode__ = cls.__str__
- cls.__str__ = lambda x: x.__unicode__().encode('utf-8')
- return cls
-
- def encode_filename(filename):
- if isinstance(filename, unicode):
- return filename.encode('utf-8')
- return filename
-
-
-def with_metaclass(meta, *bases):
- """Create a base class with a metaclass."""
- # This requires a bit of explanation: the basic idea is to make a
- # dummy metaclass for one level of class instantiation that replaces
- # itself with the actual metaclass.
- class metaclass(type):
- def __new__(cls, name, this_bases, d):
- return meta(name, bases, d)
- return type.__new__(metaclass, 'temporary_class', (), {})
-
-
-try:
- from urllib.parse import quote_from_bytes as url_quote
-except ImportError:
- from urllib import quote as url_quote
diff --git a/venv/Lib/site-packages/jinja2/_identifier.py b/venv/Lib/site-packages/jinja2/_identifier.py
deleted file mode 100644
index 2eac35d..0000000
--- a/venv/Lib/site-packages/jinja2/_identifier.py
+++ /dev/null
@@ -1,2 +0,0 @@
-# generated by scripts/generate_identifier_pattern.py
-pattern = '·̀-ͯ·҃-֑҇-ׇֽֿׁׂׅׄؐ-ًؚ-ٰٟۖ-ۜ۟-۪ۤۧۨ-ܑۭܰ-݊ަ-ް߫-߳ࠖ-࠙ࠛ-ࠣࠥ-ࠧࠩ-࡙࠭-࡛ࣔ-ࣣ࣡-ःऺ-़ा-ॏ॑-ॗॢॣঁ-ঃ়া-ৄেৈো-্ৗৢৣਁ-ਃ਼ਾ-ੂੇੈੋ-੍ੑੰੱੵઁ-ઃ઼ા-ૅે-ૉો-્ૢૣଁ-ଃ଼ା-ୄେୈୋ-୍ୖୗୢୣஂா-ூெ-ைொ-்ௗఀ-ఃా-ౄె-ైొ-్ౕౖౢౣಁ-ಃ಼ಾ-ೄೆ-ೈೊ-್ೕೖೢೣഁ-ഃാ-ൄെ-ൈൊ-്ൗൢൣංඃ්ා-ුූෘ-ෟෲෳัิ-ฺ็-๎ັິ-ູົຼ່-ໍ༹༘༙༵༷༾༿ཱ-྄྆྇ྍ-ྗྙ-ྼ࿆ါ-ှၖ-ၙၞ-ၠၢ-ၤၧ-ၭၱ-ၴႂ-ႍႏႚ-ႝ፝-፟ᜒ-᜔ᜲ-᜴ᝒᝓᝲᝳ឴-៓៝᠋-᠍ᢅᢆᢩᤠ-ᤫᤰ-᤻ᨗ-ᨛᩕ-ᩞ᩠-᩿᩼᪰-᪽ᬀ-ᬄ᬴-᭄᭫-᭳ᮀ-ᮂᮡ-ᮭ᯦-᯳ᰤ-᰷᳐-᳔᳒-᳨᳭ᳲ-᳴᳸᳹᷀-᷵᷻-᷿‿⁀⁔⃐-⃥⃜⃡-⃰℘℮⳯-⵿⳱ⷠ-〪ⷿ-゙゚〯꙯ꙴ-꙽ꚞꚟ꛰꛱ꠂ꠆ꠋꠣ-ꠧꢀꢁꢴ-ꣅ꣠-꣱ꤦ-꤭ꥇ-꥓ꦀ-ꦃ꦳-꧀ꧥꨩ-ꨶꩃꩌꩍꩻ-ꩽꪰꪲ-ꪴꪷꪸꪾ꪿꫁ꫫ-ꫯꫵ꫶ꯣ-ꯪ꯬꯭ﬞ︀-️︠-︯︳︴﹍-﹏_𐇽𐋠𐍶-𐍺𐨁-𐨃𐨅𐨆𐨌-𐨏𐨸-𐨿𐨺𐫦𐫥𑀀-𑀂𑀸-𑁆𑁿-𑂂𑂰-𑂺𑄀-𑄂𑄧-𑅳𑄴𑆀-𑆂𑆳-𑇊𑇀-𑇌𑈬-𑈷𑈾𑋟-𑋪𑌀-𑌃𑌼𑌾-𑍄𑍇𑍈𑍋-𑍍𑍗𑍢𑍣𑍦-𑍬𑍰-𑍴𑐵-𑑆𑒰-𑓃𑖯-𑖵𑖸-𑗀𑗜𑗝𑘰-𑙀𑚫-𑚷𑜝-𑜫𑰯-𑰶𑰸-𑰿𑲒-𑲧𑲩-𑲶𖫰-𖫴𖬰-𖬶𖽑-𖽾𖾏-𖾒𛲝𛲞𝅥-𝅩𝅭-𝅲𝅻-𝆂𝆅-𝆋𝆪-𝆭𝉂-𝉄𝨀-𝨶𝨻-𝩬𝩵𝪄𝪛-𝪟𝪡-𝪯𞀀-𞀆𞀈-𞀘𞀛-𞀡𞀣𞀤𞀦-𞣐𞀪-𞣖𞥄-𞥊󠄀-󠇯'
diff --git a/venv/Lib/site-packages/jinja2/asyncfilters.py b/venv/Lib/site-packages/jinja2/asyncfilters.py
deleted file mode 100644
index 5c1f46d..0000000
--- a/venv/Lib/site-packages/jinja2/asyncfilters.py
+++ /dev/null
@@ -1,146 +0,0 @@
-from functools import wraps
-
-from jinja2.asyncsupport import auto_aiter
-from jinja2 import filters
-
-
-async def auto_to_seq(value):
- seq = []
- if hasattr(value, '__aiter__'):
- async for item in value:
- seq.append(item)
- else:
- for item in value:
- seq.append(item)
- return seq
-
-
-async def async_select_or_reject(args, kwargs, modfunc, lookup_attr):
- seq, func = filters.prepare_select_or_reject(
- args, kwargs, modfunc, lookup_attr)
- if seq:
- async for item in auto_aiter(seq):
- if func(item):
- yield item
-
-
-def dualfilter(normal_filter, async_filter):
- wrap_evalctx = False
- if getattr(normal_filter, 'environmentfilter', False):
- is_async = lambda args: args[0].is_async
- wrap_evalctx = False
- else:
- if not getattr(normal_filter, 'evalcontextfilter', False) and \
- not getattr(normal_filter, 'contextfilter', False):
- wrap_evalctx = True
- is_async = lambda args: args[0].environment.is_async
-
- @wraps(normal_filter)
- def wrapper(*args, **kwargs):
- b = is_async(args)
- if wrap_evalctx:
- args = args[1:]
- if b:
- return async_filter(*args, **kwargs)
- return normal_filter(*args, **kwargs)
-
- if wrap_evalctx:
- wrapper.evalcontextfilter = True
-
- wrapper.asyncfiltervariant = True
-
- return wrapper
-
-
-def asyncfiltervariant(original):
- def decorator(f):
- return dualfilter(original, f)
- return decorator
-
-
-@asyncfiltervariant(filters.do_first)
-async def do_first(environment, seq):
- try:
- return await auto_aiter(seq).__anext__()
- except StopAsyncIteration:
- return environment.undefined('No first item, sequence was empty.')
-
-
-@asyncfiltervariant(filters.do_groupby)
-async def do_groupby(environment, value, attribute):
- expr = filters.make_attrgetter(environment, attribute)
- return [filters._GroupTuple(key, await auto_to_seq(values))
- for key, values in filters.groupby(sorted(
- await auto_to_seq(value), key=expr), expr)]
-
-
-@asyncfiltervariant(filters.do_join)
-async def do_join(eval_ctx, value, d=u'', attribute=None):
- return filters.do_join(eval_ctx, await auto_to_seq(value), d, attribute)
-
-
-@asyncfiltervariant(filters.do_list)
-async def do_list(value):
- return await auto_to_seq(value)
-
-
-@asyncfiltervariant(filters.do_reject)
-async def do_reject(*args, **kwargs):
- return async_select_or_reject(args, kwargs, lambda x: not x, False)
-
-
-@asyncfiltervariant(filters.do_rejectattr)
-async def do_rejectattr(*args, **kwargs):
- return async_select_or_reject(args, kwargs, lambda x: not x, True)
-
-
-@asyncfiltervariant(filters.do_select)
-async def do_select(*args, **kwargs):
- return async_select_or_reject(args, kwargs, lambda x: x, False)
-
-
-@asyncfiltervariant(filters.do_selectattr)
-async def do_selectattr(*args, **kwargs):
- return async_select_or_reject(args, kwargs, lambda x: x, True)
-
-
-@asyncfiltervariant(filters.do_map)
-async def do_map(*args, **kwargs):
- seq, func = filters.prepare_map(args, kwargs)
- if seq:
- async for item in auto_aiter(seq):
- yield func(item)
-
-
-@asyncfiltervariant(filters.do_sum)
-async def do_sum(environment, iterable, attribute=None, start=0):
- rv = start
- if attribute is not None:
- func = filters.make_attrgetter(environment, attribute)
- else:
- func = lambda x: x
- async for item in auto_aiter(iterable):
- rv += func(item)
- return rv
-
-
-@asyncfiltervariant(filters.do_slice)
-async def do_slice(value, slices, fill_with=None):
- return filters.do_slice(await auto_to_seq(value), slices, fill_with)
-
-
-ASYNC_FILTERS = {
- 'first': do_first,
- 'groupby': do_groupby,
- 'join': do_join,
- 'list': do_list,
- # we intentionally do not support do_last because that would be
- # ridiculous
- 'reject': do_reject,
- 'rejectattr': do_rejectattr,
- 'map': do_map,
- 'select': do_select,
- 'selectattr': do_selectattr,
- 'sum': do_sum,
- 'slice': do_slice,
-}
diff --git a/venv/Lib/site-packages/jinja2/asyncsupport.py b/venv/Lib/site-packages/jinja2/asyncsupport.py
deleted file mode 100644
index b1e7b5c..0000000
--- a/venv/Lib/site-packages/jinja2/asyncsupport.py
+++ /dev/null
@@ -1,256 +0,0 @@
-# -*- coding: utf-8 -*-
-"""
- jinja2.asyncsupport
- ~~~~~~~~~~~~~~~~~~~
-
- Has all the code for async support which is implemented as a patch
- for supported Python versions.
-
- :copyright: (c) 2017 by the Jinja Team.
- :license: BSD, see LICENSE for more details.
-"""
-import sys
-import asyncio
-import inspect
-from functools import update_wrapper
-
-from jinja2.utils import concat, internalcode, Markup
-from jinja2.environment import TemplateModule
-from jinja2.runtime import LoopContextBase, _last_iteration
-
-
-async def concat_async(async_gen):
- rv = []
- async def collect():
- async for event in async_gen:
- rv.append(event)
- await collect()
- return concat(rv)
-
-
-async def generate_async(self, *args, **kwargs):
- vars = dict(*args, **kwargs)
- try:
- async for event in self.root_render_func(self.new_context(vars)):
- yield event
- except Exception:
- exc_info = sys.exc_info()
- else:
- return
- yield self.environment.handle_exception(exc_info, True)
-
-
-def wrap_generate_func(original_generate):
- def _convert_generator(self, loop, args, kwargs):
- async_gen = self.generate_async(*args, **kwargs)
- try:
- while 1:
- yield loop.run_until_complete(async_gen.__anext__())
- except StopAsyncIteration:
- pass
- def generate(self, *args, **kwargs):
- if not self.environment.is_async:
- return original_generate(self, *args, **kwargs)
- return _convert_generator(self, asyncio.get_event_loop(), args, kwargs)
- return update_wrapper(generate, original_generate)
-
-
-async def render_async(self, *args, **kwargs):
- if not self.environment.is_async:
- raise RuntimeError('The environment was not created with async mode '
- 'enabled.')
-
- vars = dict(*args, **kwargs)
- ctx = self.new_context(vars)
-
- try:
- return await concat_async(self.root_render_func(ctx))
- except Exception:
- exc_info = sys.exc_info()
- return self.environment.handle_exception(exc_info, True)
-
-
-def wrap_render_func(original_render):
- def render(self, *args, **kwargs):
- if not self.environment.is_async:
- return original_render(self, *args, **kwargs)
- loop = asyncio.get_event_loop()
- return loop.run_until_complete(self.render_async(*args, **kwargs))
- return update_wrapper(render, original_render)
-
-
-def wrap_block_reference_call(original_call):
- @internalcode
- async def async_call(self):
- rv = await concat_async(self._stack[self._depth](self._context))
- if self._context.eval_ctx.autoescape:
- rv = Markup(rv)
- return rv
-
- @internalcode
- def __call__(self):
- if not self._context.environment.is_async:
- return original_call(self)
- return async_call(self)
-
- return update_wrapper(__call__, original_call)
-
-
-def wrap_macro_invoke(original_invoke):
- @internalcode
- async def async_invoke(self, arguments, autoescape):
- rv = await self._func(*arguments)
- if autoescape:
- rv = Markup(rv)
- return rv
-
- @internalcode
- def _invoke(self, arguments, autoescape):
- if not self._environment.is_async:
- return original_invoke(self, arguments, autoescape)
- return async_invoke(self, arguments, autoescape)
- return update_wrapper(_invoke, original_invoke)
-
-
-@internalcode
-async def get_default_module_async(self):
- if self._module is not None:
- return self._module
- self._module = rv = await self.make_module_async()
- return rv
-
-
-def wrap_default_module(original_default_module):
- @internalcode
- def _get_default_module(self):
- if self.environment.is_async:
- raise RuntimeError('Template module attribute is unavailable '
- 'in async mode')
- return original_default_module(self)
- return _get_default_module
-
-
-async def make_module_async(self, vars=None, shared=False, locals=None):
- context = self.new_context(vars, shared, locals)
- body_stream = []
- async for item in self.root_render_func(context):
- body_stream.append(item)
- return TemplateModule(self, context, body_stream)
-
-
-def patch_template():
- from jinja2 import Template
- Template.generate = wrap_generate_func(Template.generate)
- Template.generate_async = update_wrapper(
- generate_async, Template.generate_async)
- Template.render_async = update_wrapper(
- render_async, Template.render_async)
- Template.render = wrap_render_func(Template.render)
- Template._get_default_module = wrap_default_module(
- Template._get_default_module)
- Template._get_default_module_async = get_default_module_async
- Template.make_module_async = update_wrapper(
- make_module_async, Template.make_module_async)
-
-
-def patch_runtime():
- from jinja2.runtime import BlockReference, Macro
- BlockReference.__call__ = wrap_block_reference_call(
- BlockReference.__call__)
- Macro._invoke = wrap_macro_invoke(Macro._invoke)
-
-
-def patch_filters():
- from jinja2.filters import FILTERS
- from jinja2.asyncfilters import ASYNC_FILTERS
- FILTERS.update(ASYNC_FILTERS)
-
-
-def patch_all():
- patch_template()
- patch_runtime()
- patch_filters()
-
-
-async def auto_await(value):
- if inspect.isawaitable(value):
- return await value
- return value
-
-
-async def auto_aiter(iterable):
- if hasattr(iterable, '__aiter__'):
- async for item in iterable:
- yield item
- return
- for item in iterable:
- yield item
-
-
-class AsyncLoopContext(LoopContextBase):
-
- def __init__(self, async_iterator, undefined, after, length, recurse=None,
- depth0=0):
- LoopContextBase.__init__(self, undefined, recurse, depth0)
- self._async_iterator = async_iterator
- self._after = after
- self._length = length
-
- @property
- def length(self):
- if self._length is None:
- raise TypeError('Loop length for some iterators cannot be '
- 'lazily calculated in async mode')
- return self._length
-
- def __aiter__(self):
- return AsyncLoopContextIterator(self)
-
-
-class AsyncLoopContextIterator(object):
- __slots__ = ('context',)
-
- def __init__(self, context):
- self.context = context
-
- def __aiter__(self):
- return self
-
- async def __anext__(self):
- ctx = self.context
- ctx.index0 += 1
- if ctx._after is _last_iteration:
- raise StopAsyncIteration()
- ctx._before = ctx._current
- ctx._current = ctx._after
- try:
- ctx._after = await ctx._async_iterator.__anext__()
- except StopAsyncIteration:
- ctx._after = _last_iteration
- return ctx._current, ctx
-
-
-async def make_async_loop_context(iterable, undefined, recurse=None, depth0=0):
- # Length is more complicated and less efficient in async mode. The
- # reason for this is that we cannot know if length will be used
- # upfront but because length is a property we cannot lazily execute it
- # later. This means that we need to buffer it up and measure :(
- #
- # We however only do this for actual iterators, not for async
- # iterators as blocking here does not seem like the best idea in the
- # world.
- try:
- length = len(iterable)
- except (TypeError, AttributeError):
- if not hasattr(iterable, '__aiter__'):
- iterable = tuple(iterable)
- length = len(iterable)
- else:
- length = None
- async_iterator = auto_aiter(iterable)
- try:
- after = await async_iterator.__anext__()
- except StopAsyncIteration:
- after = _last_iteration
- return AsyncLoopContext(async_iterator, undefined, after, length, recurse,
- depth0)
diff --git a/venv/Lib/site-packages/jinja2/bccache.py b/venv/Lib/site-packages/jinja2/bccache.py
deleted file mode 100644
index 080e527..0000000
--- a/venv/Lib/site-packages/jinja2/bccache.py
+++ /dev/null
@@ -1,362 +0,0 @@
-# -*- coding: utf-8 -*-
-"""
- jinja2.bccache
- ~~~~~~~~~~~~~~
-
- This module implements the bytecode cache system Jinja is optionally
- using. This is useful if you have very complex template situations and
- the compiliation of all those templates slow down your application too
- much.
-
- Situations where this is useful are often forking web applications that
- are initialized on the first request.
-
- :copyright: (c) 2017 by the Jinja Team.
- :license: BSD.
-"""
-from os import path, listdir
-import os
-import sys
-import stat
-import errno
-import marshal
-import tempfile
-import fnmatch
-from hashlib import sha1
-from jinja2.utils import open_if_exists
-from jinja2._compat import BytesIO, pickle, PY2, text_type
-
-
-# marshal works better on 3.x, one hack less required
-if not PY2:
- marshal_dump = marshal.dump
- marshal_load = marshal.load
-else:
-
- def marshal_dump(code, f):
- if isinstance(f, file):
- marshal.dump(code, f)
- else:
- f.write(marshal.dumps(code))
-
- def marshal_load(f):
- if isinstance(f, file):
- return marshal.load(f)
- return marshal.loads(f.read())
-
-
-bc_version = 3
-
-# magic version used to only change with new jinja versions. With 2.6
-# we change this to also take Python version changes into account. The
-# reason for this is that Python tends to segfault if fed earlier bytecode
-# versions because someone thought it would be a good idea to reuse opcodes
-# or make Python incompatible with earlier versions.
-bc_magic = 'j2'.encode('ascii') + \
- pickle.dumps(bc_version, 2) + \
- pickle.dumps((sys.version_info[0] << 24) | sys.version_info[1])
-
-
-class Bucket(object):
- """Buckets are used to store the bytecode for one template. It's created
- and initialized by the bytecode cache and passed to the loading functions.
-
- The buckets get an internal checksum from the cache assigned and use this
- to automatically reject outdated cache material. Individual bytecode
- cache subclasses don't have to care about cache invalidation.
- """
-
- def __init__(self, environment, key, checksum):
- self.environment = environment
- self.key = key
- self.checksum = checksum
- self.reset()
-
- def reset(self):
- """Resets the bucket (unloads the bytecode)."""
- self.code = None
-
- def load_bytecode(self, f):
- """Loads bytecode from a file or file like object."""
- # make sure the magic header is correct
- magic = f.read(len(bc_magic))
- if magic != bc_magic:
- self.reset()
- return
- # the source code of the file changed, we need to reload
- checksum = pickle.load(f)
- if self.checksum != checksum:
- self.reset()
- return
- # if marshal_load fails then we need to reload
- try:
- self.code = marshal_load(f)
- except (EOFError, ValueError, TypeError):
- self.reset()
- return
-
- def write_bytecode(self, f):
- """Dump the bytecode into the file or file like object passed."""
- if self.code is None:
- raise TypeError('can\'t write empty bucket')
- f.write(bc_magic)
- pickle.dump(self.checksum, f, 2)
- marshal_dump(self.code, f)
-
- def bytecode_from_string(self, string):
- """Load bytecode from a string."""
- self.load_bytecode(BytesIO(string))
-
- def bytecode_to_string(self):
- """Return the bytecode as string."""
- out = BytesIO()
- self.write_bytecode(out)
- return out.getvalue()
-
-
-class BytecodeCache(object):
- """To implement your own bytecode cache you have to subclass this class
- and override :meth:`load_bytecode` and :meth:`dump_bytecode`. Both of
- these methods are passed a :class:`~jinja2.bccache.Bucket`.
-
- A very basic bytecode cache that saves the bytecode on the file system::
-
- from os import path
-
- class MyCache(BytecodeCache):
-
- def __init__(self, directory):
- self.directory = directory
-
- def load_bytecode(self, bucket):
- filename = path.join(self.directory, bucket.key)
- if path.exists(filename):
- with open(filename, 'rb') as f:
- bucket.load_bytecode(f)
-
- def dump_bytecode(self, bucket):
- filename = path.join(self.directory, bucket.key)
- with open(filename, 'wb') as f:
- bucket.write_bytecode(f)
-
- A more advanced version of a filesystem based bytecode cache is part of
- Jinja2.
- """
-
- def load_bytecode(self, bucket):
- """Subclasses have to override this method to load bytecode into a
- bucket. If they are not able to find code in the cache for the
- bucket, it must not do anything.
- """
- raise NotImplementedError()
-
- def dump_bytecode(self, bucket):
- """Subclasses have to override this method to write the bytecode
- from a bucket back to the cache. If it unable to do so it must not
- fail silently but raise an exception.
- """
- raise NotImplementedError()
-
- def clear(self):
- """Clears the cache. This method is not used by Jinja2 but should be
- implemented to allow applications to clear the bytecode cache used
- by a particular environment.
- """
-
- def get_cache_key(self, name, filename=None):
- """Returns the unique hash key for this template name."""
- hash = sha1(name.encode('utf-8'))
- if filename is not None:
- filename = '|' + filename
- if isinstance(filename, text_type):
- filename = filename.encode('utf-8')
- hash.update(filename)
- return hash.hexdigest()
-
- def get_source_checksum(self, source):
- """Returns a checksum for the source."""
- return sha1(source.encode('utf-8')).hexdigest()
-
- def get_bucket(self, environment, name, filename, source):
- """Return a cache bucket for the given template. All arguments are
- mandatory but filename may be `None`.
- """
- key = self.get_cache_key(name, filename)
- checksum = self.get_source_checksum(source)
- bucket = Bucket(environment, key, checksum)
- self.load_bytecode(bucket)
- return bucket
-
- def set_bucket(self, bucket):
- """Put the bucket into the cache."""
- self.dump_bytecode(bucket)
-
-
-class FileSystemBytecodeCache(BytecodeCache):
- """A bytecode cache that stores bytecode on the filesystem. It accepts
- two arguments: The directory where the cache items are stored and a
- pattern string that is used to build the filename.
-
- If no directory is specified a default cache directory is selected. On
- Windows the user's temp directory is used, on UNIX systems a directory
- is created for the user in the system temp directory.
-
- The pattern can be used to have multiple separate caches operate on the
- same directory. The default pattern is ``'__jinja2_%s.cache'``. ``%s``
- is replaced with the cache key.
-
- >>> bcc = FileSystemBytecodeCache('/tmp/jinja_cache', '%s.cache')
-
- This bytecode cache supports clearing of the cache using the clear method.
- """
-
- def __init__(self, directory=None, pattern='__jinja2_%s.cache'):
- if directory is None:
- directory = self._get_default_cache_dir()
- self.directory = directory
- self.pattern = pattern
-
- def _get_default_cache_dir(self):
- def _unsafe_dir():
- raise RuntimeError('Cannot determine safe temp directory. You '
- 'need to explicitly provide one.')
-
- tmpdir = tempfile.gettempdir()
-
- # On windows the temporary directory is used specific unless
- # explicitly forced otherwise. We can just use that.
- if os.name == 'nt':
- return tmpdir
- if not hasattr(os, 'getuid'):
- _unsafe_dir()
-
- dirname = '_jinja2-cache-%d' % os.getuid()
- actual_dir = os.path.join(tmpdir, dirname)
-
- try:
- os.mkdir(actual_dir, stat.S_IRWXU)
- except OSError as e:
- if e.errno != errno.EEXIST:
- raise
- try:
- os.chmod(actual_dir, stat.S_IRWXU)
- actual_dir_stat = os.lstat(actual_dir)
- if actual_dir_stat.st_uid != os.getuid() \
- or not stat.S_ISDIR(actual_dir_stat.st_mode) \
- or stat.S_IMODE(actual_dir_stat.st_mode) != stat.S_IRWXU:
- _unsafe_dir()
- except OSError as e:
- if e.errno != errno.EEXIST:
- raise
-
- actual_dir_stat = os.lstat(actual_dir)
- if actual_dir_stat.st_uid != os.getuid() \
- or not stat.S_ISDIR(actual_dir_stat.st_mode) \
- or stat.S_IMODE(actual_dir_stat.st_mode) != stat.S_IRWXU:
- _unsafe_dir()
-
- return actual_dir
-
- def _get_cache_filename(self, bucket):
- return path.join(self.directory, self.pattern % bucket.key)
-
- def load_bytecode(self, bucket):
- f = open_if_exists(self._get_cache_filename(bucket), 'rb')
- if f is not None:
- try:
- bucket.load_bytecode(f)
- finally:
- f.close()
-
- def dump_bytecode(self, bucket):
- f = open(self._get_cache_filename(bucket), 'wb')
- try:
- bucket.write_bytecode(f)
- finally:
- f.close()
-
- def clear(self):
- # imported lazily here because google app-engine doesn't support
- # write access on the file system and the function does not exist
- # normally.
- from os import remove
- files = fnmatch.filter(listdir(self.directory), self.pattern % '*')
- for filename in files:
- try:
- remove(path.join(self.directory, filename))
- except OSError:
- pass
-
-
-class MemcachedBytecodeCache(BytecodeCache):
- """This class implements a bytecode cache that uses a memcache cache for
- storing the information. It does not enforce a specific memcache library
- (tummy's memcache or cmemcache) but will accept any class that provides
- the minimal interface required.
-
- Libraries compatible with this class:
-
- - `werkzeug `_.contrib.cache
- - `python-memcached `_
- - `cmemcache `_
-
- (Unfortunately the django cache interface is not compatible because it
- does not support storing binary data, only unicode. You can however pass
- the underlying cache client to the bytecode cache which is available
- as `django.core.cache.cache._client`.)
-
- The minimal interface for the client passed to the constructor is this:
-
- .. class:: MinimalClientInterface
-
- .. method:: set(key, value[, timeout])
-
- Stores the bytecode in the cache. `value` is a string and
- `timeout` the timeout of the key. If timeout is not provided
- a default timeout or no timeout should be assumed, if it's
- provided it's an integer with the number of seconds the cache
- item should exist.
-
- .. method:: get(key)
-
- Returns the value for the cache key. If the item does not
- exist in the cache the return value must be `None`.
-
- The other arguments to the constructor are the prefix for all keys that
- is added before the actual cache key and the timeout for the bytecode in
- the cache system. We recommend a high (or no) timeout.
-
- This bytecode cache does not support clearing of used items in the cache.
- The clear method is a no-operation function.
-
- .. versionadded:: 2.7
- Added support for ignoring memcache errors through the
- `ignore_memcache_errors` parameter.
- """
-
- def __init__(self, client, prefix='jinja2/bytecode/', timeout=None,
- ignore_memcache_errors=True):
- self.client = client
- self.prefix = prefix
- self.timeout = timeout
- self.ignore_memcache_errors = ignore_memcache_errors
-
- def load_bytecode(self, bucket):
- try:
- code = self.client.get(self.prefix + bucket.key)
- except Exception:
- if not self.ignore_memcache_errors:
- raise
- code = None
- if code is not None:
- bucket.bytecode_from_string(code)
-
- def dump_bytecode(self, bucket):
- args = (self.prefix + bucket.key, bucket.bytecode_to_string())
- if self.timeout is not None:
- args += (self.timeout,)
- try:
- self.client.set(*args)
- except Exception:
- if not self.ignore_memcache_errors:
- raise
diff --git a/venv/Lib/site-packages/jinja2/compiler.py b/venv/Lib/site-packages/jinja2/compiler.py
deleted file mode 100644
index d534a82..0000000
--- a/venv/Lib/site-packages/jinja2/compiler.py
+++ /dev/null
@@ -1,1721 +0,0 @@
-# -*- coding: utf-8 -*-
-"""
- jinja2.compiler
- ~~~~~~~~~~~~~~~
-
- Compiles nodes into python code.
-
- :copyright: (c) 2017 by the Jinja Team.
- :license: BSD, see LICENSE for more details.
-"""
-from itertools import chain
-from copy import deepcopy
-from keyword import iskeyword as is_python_keyword
-from functools import update_wrapper
-from jinja2 import nodes
-from jinja2.nodes import EvalContext
-from jinja2.visitor import NodeVisitor
-from jinja2.optimizer import Optimizer
-from jinja2.exceptions import TemplateAssertionError
-from jinja2.utils import Markup, concat, escape
-from jinja2._compat import range_type, text_type, string_types, \
- iteritems, NativeStringIO, imap, izip
-from jinja2.idtracking import Symbols, VAR_LOAD_PARAMETER, \
- VAR_LOAD_RESOLVE, VAR_LOAD_ALIAS, VAR_LOAD_UNDEFINED
-
-
-operators = {
- 'eq': '==',
- 'ne': '!=',
- 'gt': '>',
- 'gteq': '>=',
- 'lt': '<',
- 'lteq': '<=',
- 'in': 'in',
- 'notin': 'not in'
-}
-
-# what method to iterate over items do we want to use for dict iteration
-# in generated code? on 2.x let's go with iteritems, on 3.x with items
-if hasattr(dict, 'iteritems'):
- dict_item_iter = 'iteritems'
-else:
- dict_item_iter = 'items'
-
-code_features = ['division']
-
-# does this python version support generator stops? (PEP 0479)
-try:
- exec('from __future__ import generator_stop')
- code_features.append('generator_stop')
-except SyntaxError:
- pass
-
-# does this python version support yield from?
-try:
- exec('def f(): yield from x()')
-except SyntaxError:
- supports_yield_from = False
-else:
- supports_yield_from = True
-
-
-def optimizeconst(f):
- def new_func(self, node, frame, **kwargs):
- # Only optimize if the frame is not volatile
- if self.optimized and not frame.eval_ctx.volatile:
- new_node = self.optimizer.visit(node, frame.eval_ctx)
- if new_node != node:
- return self.visit(new_node, frame)
- return f(self, node, frame, **kwargs)
- return update_wrapper(new_func, f)
-
-
-def generate(node, environment, name, filename, stream=None,
- defer_init=False, optimized=True):
- """Generate the python source for a node tree."""
- if not isinstance(node, nodes.Template):
- raise TypeError('Can\'t compile non template nodes')
- generator = environment.code_generator_class(environment, name, filename,
- stream, defer_init,
- optimized)
- generator.visit(node)
- if stream is None:
- return generator.stream.getvalue()
-
-
-def has_safe_repr(value):
- """Does the node have a safe representation?"""
- if value is None or value is NotImplemented or value is Ellipsis:
- return True
- if type(value) in (bool, int, float, complex, range_type, Markup) + string_types:
- return True
- if type(value) in (tuple, list, set, frozenset):
- for item in value:
- if not has_safe_repr(item):
- return False
- return True
- elif type(value) is dict:
- for key, value in iteritems(value):
- if not has_safe_repr(key):
- return False
- if not has_safe_repr(value):
- return False
- return True
- return False
-
-
-def find_undeclared(nodes, names):
- """Check if the names passed are accessed undeclared. The return value
- is a set of all the undeclared names from the sequence of names found.
- """
- visitor = UndeclaredNameVisitor(names)
- try:
- for node in nodes:
- visitor.visit(node)
- except VisitorExit:
- pass
- return visitor.undeclared
-
-
-class MacroRef(object):
-
- def __init__(self, node):
- self.node = node
- self.accesses_caller = False
- self.accesses_kwargs = False
- self.accesses_varargs = False
-
-
-class Frame(object):
- """Holds compile time information for us."""
-
- def __init__(self, eval_ctx, parent=None, level=None):
- self.eval_ctx = eval_ctx
- self.symbols = Symbols(parent and parent.symbols or None,
- level=level)
-
- # a toplevel frame is the root + soft frames such as if conditions.
- self.toplevel = False
-
- # the root frame is basically just the outermost frame, so no if
- # conditions. This information is used to optimize inheritance
- # situations.
- self.rootlevel = False
-
- # in some dynamic inheritance situations the compiler needs to add
- # write tests around output statements.
- self.require_output_check = parent and parent.require_output_check
-
- # inside some tags we are using a buffer rather than yield statements.
- # this for example affects {% filter %} or {% macro %}. If a frame
- # is buffered this variable points to the name of the list used as
- # buffer.
- self.buffer = None
-
- # the name of the block we're in, otherwise None.
- self.block = parent and parent.block or None
-
- # the parent of this frame
- self.parent = parent
-
- if parent is not None:
- self.buffer = parent.buffer
-
- def copy(self):
- """Create a copy of the current one."""
- rv = object.__new__(self.__class__)
- rv.__dict__.update(self.__dict__)
- rv.symbols = self.symbols.copy()
- return rv
-
- def inner(self, isolated=False):
- """Return an inner frame."""
- if isolated:
- return Frame(self.eval_ctx, level=self.symbols.level + 1)
- return Frame(self.eval_ctx, self)
-
- def soft(self):
- """Return a soft frame. A soft frame may not be modified as
- standalone thing as it shares the resources with the frame it
- was created of, but it's not a rootlevel frame any longer.
-
- This is only used to implement if-statements.
- """
- rv = self.copy()
- rv.rootlevel = False
- return rv
-
- __copy__ = copy
-
-
-class VisitorExit(RuntimeError):
- """Exception used by the `UndeclaredNameVisitor` to signal a stop."""
-
-
-class DependencyFinderVisitor(NodeVisitor):
- """A visitor that collects filter and test calls."""
-
- def __init__(self):
- self.filters = set()
- self.tests = set()
-
- def visit_Filter(self, node):
- self.generic_visit(node)
- self.filters.add(node.name)
-
- def visit_Test(self, node):
- self.generic_visit(node)
- self.tests.add(node.name)
-
- def visit_Block(self, node):
- """Stop visiting at blocks."""
-
-
-class UndeclaredNameVisitor(NodeVisitor):
- """A visitor that checks if a name is accessed without being
- declared. This is different from the frame visitor as it will
- not stop at closure frames.
- """
-
- def __init__(self, names):
- self.names = set(names)
- self.undeclared = set()
-
- def visit_Name(self, node):
- if node.ctx == 'load' and node.name in self.names:
- self.undeclared.add(node.name)
- if self.undeclared == self.names:
- raise VisitorExit()
- else:
- self.names.discard(node.name)
-
- def visit_Block(self, node):
- """Stop visiting a blocks."""
-
-
-class CompilerExit(Exception):
- """Raised if the compiler encountered a situation where it just
- doesn't make sense to further process the code. Any block that
- raises such an exception is not further processed.
- """
-
-
-class CodeGenerator(NodeVisitor):
-
- def __init__(self, environment, name, filename, stream=None,
- defer_init=False, optimized=True):
- if stream is None:
- stream = NativeStringIO()
- self.environment = environment
- self.name = name
- self.filename = filename
- self.stream = stream
- self.created_block_context = False
- self.defer_init = defer_init
- self.optimized = optimized
- if optimized:
- self.optimizer = Optimizer(environment)
-
- # aliases for imports
- self.import_aliases = {}
-
- # a registry for all blocks. Because blocks are moved out
- # into the global python scope they are registered here
- self.blocks = {}
-
- # the number of extends statements so far
- self.extends_so_far = 0
-
- # some templates have a rootlevel extends. In this case we
- # can safely assume that we're a child template and do some
- # more optimizations.
- self.has_known_extends = False
-
- # the current line number
- self.code_lineno = 1
-
- # registry of all filters and tests (global, not block local)
- self.tests = {}
- self.filters = {}
-
- # the debug information
- self.debug_info = []
- self._write_debug_info = None
-
- # the number of new lines before the next write()
- self._new_lines = 0
-
- # the line number of the last written statement
- self._last_line = 0
-
- # true if nothing was written so far.
- self._first_write = True
-
- # used by the `temporary_identifier` method to get new
- # unique, temporary identifier
- self._last_identifier = 0
-
- # the current indentation
- self._indentation = 0
-
- # Tracks toplevel assignments
- self._assign_stack = []
-
- # Tracks parameter definition blocks
- self._param_def_block = []
-
- # Tracks the current context.
- self._context_reference_stack = ['context']
-
- # -- Various compilation helpers
-
- def fail(self, msg, lineno):
- """Fail with a :exc:`TemplateAssertionError`."""
- raise TemplateAssertionError(msg, lineno, self.name, self.filename)
-
- def temporary_identifier(self):
- """Get a new unique identifier."""
- self._last_identifier += 1
- return 't_%d' % self._last_identifier
-
- def buffer(self, frame):
- """Enable buffering for the frame from that point onwards."""
- frame.buffer = self.temporary_identifier()
- self.writeline('%s = []' % frame.buffer)
-
- def return_buffer_contents(self, frame, force_unescaped=False):
- """Return the buffer contents of the frame."""
- if not force_unescaped:
- if frame.eval_ctx.volatile:
- self.writeline('if context.eval_ctx.autoescape:')
- self.indent()
- self.writeline('return Markup(concat(%s))' % frame.buffer)
- self.outdent()
- self.writeline('else:')
- self.indent()
- self.writeline('return concat(%s)' % frame.buffer)
- self.outdent()
- return
- elif frame.eval_ctx.autoescape:
- self.writeline('return Markup(concat(%s))' % frame.buffer)
- return
- self.writeline('return concat(%s)' % frame.buffer)
-
- def indent(self):
- """Indent by one."""
- self._indentation += 1
-
- def outdent(self, step=1):
- """Outdent by step."""
- self._indentation -= step
-
- def start_write(self, frame, node=None):
- """Yield or write into the frame buffer."""
- if frame.buffer is None:
- self.writeline('yield ', node)
- else:
- self.writeline('%s.append(' % frame.buffer, node)
-
- def end_write(self, frame):
- """End the writing process started by `start_write`."""
- if frame.buffer is not None:
- self.write(')')
-
- def simple_write(self, s, frame, node=None):
- """Simple shortcut for start_write + write + end_write."""
- self.start_write(frame, node)
- self.write(s)
- self.end_write(frame)
-
- def blockvisit(self, nodes, frame):
- """Visit a list of nodes as block in a frame. If the current frame
- is no buffer a dummy ``if 0: yield None`` is written automatically.
- """
- try:
- self.writeline('pass')
- for node in nodes:
- self.visit(node, frame)
- except CompilerExit:
- pass
-
- def write(self, x):
- """Write a string into the output stream."""
- if self._new_lines:
- if not self._first_write:
- self.stream.write('\n' * self._new_lines)
- self.code_lineno += self._new_lines
- if self._write_debug_info is not None:
- self.debug_info.append((self._write_debug_info,
- self.code_lineno))
- self._write_debug_info = None
- self._first_write = False
- self.stream.write(' ' * self._indentation)
- self._new_lines = 0
- self.stream.write(x)
-
- def writeline(self, x, node=None, extra=0):
- """Combination of newline and write."""
- self.newline(node, extra)
- self.write(x)
-
- def newline(self, node=None, extra=0):
- """Add one or more newlines before the next write."""
- self._new_lines = max(self._new_lines, 1 + extra)
- if node is not None and node.lineno != self._last_line:
- self._write_debug_info = node.lineno
- self._last_line = node.lineno
-
- def signature(self, node, frame, extra_kwargs=None):
- """Writes a function call to the stream for the current node.
- A leading comma is added automatically. The extra keyword
- arguments may not include python keywords otherwise a syntax
- error could occour. The extra keyword arguments should be given
- as python dict.
- """
- # if any of the given keyword arguments is a python keyword
- # we have to make sure that no invalid call is created.
- kwarg_workaround = False
- for kwarg in chain((x.key for x in node.kwargs), extra_kwargs or ()):
- if is_python_keyword(kwarg):
- kwarg_workaround = True
- break
-
- for arg in node.args:
- self.write(', ')
- self.visit(arg, frame)
-
- if not kwarg_workaround:
- for kwarg in node.kwargs:
- self.write(', ')
- self.visit(kwarg, frame)
- if extra_kwargs is not None:
- for key, value in iteritems(extra_kwargs):
- self.write(', %s=%s' % (key, value))
- if node.dyn_args:
- self.write(', *')
- self.visit(node.dyn_args, frame)
-
- if kwarg_workaround:
- if node.dyn_kwargs is not None:
- self.write(', **dict({')
- else:
- self.write(', **{')
- for kwarg in node.kwargs:
- self.write('%r: ' % kwarg.key)
- self.visit(kwarg.value, frame)
- self.write(', ')
- if extra_kwargs is not None:
- for key, value in iteritems(extra_kwargs):
- self.write('%r: %s, ' % (key, value))
- if node.dyn_kwargs is not None:
- self.write('}, **')
- self.visit(node.dyn_kwargs, frame)
- self.write(')')
- else:
- self.write('}')
-
- elif node.dyn_kwargs is not None:
- self.write(', **')
- self.visit(node.dyn_kwargs, frame)
-
- def pull_dependencies(self, nodes):
- """Pull all the dependencies."""
- visitor = DependencyFinderVisitor()
- for node in nodes:
- visitor.visit(node)
- for dependency in 'filters', 'tests':
- mapping = getattr(self, dependency)
- for name in getattr(visitor, dependency):
- if name not in mapping:
- mapping[name] = self.temporary_identifier()
- self.writeline('%s = environment.%s[%r]' %
- (mapping[name], dependency, name))
-
- def enter_frame(self, frame):
- undefs = []
- for target, (action, param) in iteritems(frame.symbols.loads):
- if action == VAR_LOAD_PARAMETER:
- pass
- elif action == VAR_LOAD_RESOLVE:
- self.writeline('%s = %s(%r)' %
- (target, self.get_resolve_func(), param))
- elif action == VAR_LOAD_ALIAS:
- self.writeline('%s = %s' % (target, param))
- elif action == VAR_LOAD_UNDEFINED:
- undefs.append(target)
- else:
- raise NotImplementedError('unknown load instruction')
- if undefs:
- self.writeline('%s = missing' % ' = '.join(undefs))
-
- def leave_frame(self, frame, with_python_scope=False):
- if not with_python_scope:
- undefs = []
- for target, _ in iteritems(frame.symbols.loads):
- undefs.append(target)
- if undefs:
- self.writeline('%s = missing' % ' = '.join(undefs))
-
- def func(self, name):
- if self.environment.is_async:
- return 'async def %s' % name
- return 'def %s' % name
-
- def macro_body(self, node, frame):
- """Dump the function def of a macro or call block."""
- frame = frame.inner()
- frame.symbols.analyze_node(node)
- macro_ref = MacroRef(node)
-
- explicit_caller = None
- skip_special_params = set()
- args = []
- for idx, arg in enumerate(node.args):
- if arg.name == 'caller':
- explicit_caller = idx
- if arg.name in ('kwargs', 'varargs'):
- skip_special_params.add(arg.name)
- args.append(frame.symbols.ref(arg.name))
-
- undeclared = find_undeclared(node.body, ('caller', 'kwargs', 'varargs'))
-
- if 'caller' in undeclared:
- # In older Jinja2 versions there was a bug that allowed caller
- # to retain the special behavior even if it was mentioned in
- # the argument list. However thankfully this was only really
- # working if it was the last argument. So we are explicitly
- # checking this now and error out if it is anywhere else in
- # the argument list.
- if explicit_caller is not None:
- try:
- node.defaults[explicit_caller - len(node.args)]
- except IndexError:
- self.fail('When defining macros or call blocks the '
- 'special "caller" argument must be omitted '
- 'or be given a default.', node.lineno)
- else:
- args.append(frame.symbols.declare_parameter('caller'))
- macro_ref.accesses_caller = True
- if 'kwargs' in undeclared and not 'kwargs' in skip_special_params:
- args.append(frame.symbols.declare_parameter('kwargs'))
- macro_ref.accesses_kwargs = True
- if 'varargs' in undeclared and not 'varargs' in skip_special_params:
- args.append(frame.symbols.declare_parameter('varargs'))
- macro_ref.accesses_varargs = True
-
- # macros are delayed, they never require output checks
- frame.require_output_check = False
- frame.symbols.analyze_node(node)
- self.writeline('%s(%s):' % (self.func('macro'), ', '.join(args)), node)
- self.indent()
-
- self.buffer(frame)
- self.enter_frame(frame)
-
- self.push_parameter_definitions(frame)
- for idx, arg in enumerate(node.args):
- ref = frame.symbols.ref(arg.name)
- self.writeline('if %s is missing:' % ref)
- self.indent()
- try:
- default = node.defaults[idx - len(node.args)]
- except IndexError:
- self.writeline('%s = undefined(%r, name=%r)' % (
- ref,
- 'parameter %r was not provided' % arg.name,
- arg.name))
- else:
- self.writeline('%s = ' % ref)
- self.visit(default, frame)
- self.mark_parameter_stored(ref)
- self.outdent()
- self.pop_parameter_definitions()
-
- self.blockvisit(node.body, frame)
- self.return_buffer_contents(frame, force_unescaped=True)
- self.leave_frame(frame, with_python_scope=True)
- self.outdent()
-
- return frame, macro_ref
-
- def macro_def(self, macro_ref, frame):
- """Dump the macro definition for the def created by macro_body."""
- arg_tuple = ', '.join(repr(x.name) for x in macro_ref.node.args)
- name = getattr(macro_ref.node, 'name', None)
- if len(macro_ref.node.args) == 1:
- arg_tuple += ','
- self.write('Macro(environment, macro, %r, (%s), %r, %r, %r, '
- 'context.eval_ctx.autoescape)' %
- (name, arg_tuple, macro_ref.accesses_kwargs,
- macro_ref.accesses_varargs, macro_ref.accesses_caller))
-
- def position(self, node):
- """Return a human readable position for the node."""
- rv = 'line %d' % node.lineno
- if self.name is not None:
- rv += ' in ' + repr(self.name)
- return rv
-
- def dump_local_context(self, frame):
- return '{%s}' % ', '.join(
- '%r: %s' % (name, target) for name, target
- in iteritems(frame.symbols.dump_stores()))
-
- def write_commons(self):
- """Writes a common preamble that is used by root and block functions.
- Primarily this sets up common local helpers and enforces a generator
- through a dead branch.
- """
- self.writeline('resolve = context.resolve_or_missing')
- self.writeline('undefined = environment.undefined')
- self.writeline('if 0: yield None')
-
- def push_parameter_definitions(self, frame):
- """Pushes all parameter targets from the given frame into a local
- stack that permits tracking of yet to be assigned parameters. In
- particular this enables the optimization from `visit_Name` to skip
- undefined expressions for parameters in macros as macros can reference
- otherwise unbound parameters.
- """
- self._param_def_block.append(frame.symbols.dump_param_targets())
-
- def pop_parameter_definitions(self):
- """Pops the current parameter definitions set."""
- self._param_def_block.pop()
-
- def mark_parameter_stored(self, target):
- """Marks a parameter in the current parameter definitions as stored.
- This will skip the enforced undefined checks.
- """
- if self._param_def_block:
- self._param_def_block[-1].discard(target)
-
- def push_context_reference(self, target):
- self._context_reference_stack.append(target)
-
- def pop_context_reference(self):
- self._context_reference_stack.pop()
-
- def get_context_ref(self):
- return self._context_reference_stack[-1]
-
- def get_resolve_func(self):
- target = self._context_reference_stack[-1]
- if target == 'context':
- return 'resolve'
- return '%s.resolve' % target
-
- def derive_context(self, frame):
- return '%s.derived(%s)' % (
- self.get_context_ref(),
- self.dump_local_context(frame),
- )
-
- def parameter_is_undeclared(self, target):
- """Checks if a given target is an undeclared parameter."""
- if not self._param_def_block:
- return False
- return target in self._param_def_block[-1]
-
- def push_assign_tracking(self):
- """Pushes a new layer for assignment tracking."""
- self._assign_stack.append(set())
-
- def pop_assign_tracking(self, frame):
- """Pops the topmost level for assignment tracking and updates the
- context variables if necessary.
- """
- vars = self._assign_stack.pop()
- if not frame.toplevel or not vars:
- return
- public_names = [x for x in vars if x[:1] != '_']
- if len(vars) == 1:
- name = next(iter(vars))
- ref = frame.symbols.ref(name)
- self.writeline('context.vars[%r] = %s' % (name, ref))
- else:
- self.writeline('context.vars.update({')
- for idx, name in enumerate(vars):
- if idx:
- self.write(', ')
- ref = frame.symbols.ref(name)
- self.write('%r: %s' % (name, ref))
- self.write('})')
- if public_names:
- if len(public_names) == 1:
- self.writeline('context.exported_vars.add(%r)' %
- public_names[0])
- else:
- self.writeline('context.exported_vars.update((%s))' %
- ', '.join(imap(repr, public_names)))
-
- # -- Statement Visitors
-
- def visit_Template(self, node, frame=None):
- assert frame is None, 'no root frame allowed'
- eval_ctx = EvalContext(self.environment, self.name)
-
- from jinja2.runtime import __all__ as exported
- self.writeline('from __future__ import %s' % ', '.join(code_features))
- self.writeline('from jinja2.runtime import ' + ', '.join(exported))
-
- if self.environment.is_async:
- self.writeline('from jinja2.asyncsupport import auto_await, '
- 'auto_aiter, make_async_loop_context')
-
- # if we want a deferred initialization we cannot move the
- # environment into a local name
- envenv = not self.defer_init and ', environment=environment' or ''
-
- # do we have an extends tag at all? If not, we can save some
- # overhead by just not processing any inheritance code.
- have_extends = node.find(nodes.Extends) is not None
-
- # find all blocks
- for block in node.find_all(nodes.Block):
- if block.name in self.blocks:
- self.fail('block %r defined twice' % block.name, block.lineno)
- self.blocks[block.name] = block
-
- # find all imports and import them
- for import_ in node.find_all(nodes.ImportedName):
- if import_.importname not in self.import_aliases:
- imp = import_.importname
- self.import_aliases[imp] = alias = self.temporary_identifier()
- if '.' in imp:
- module, obj = imp.rsplit('.', 1)
- self.writeline('from %s import %s as %s' %
- (module, obj, alias))
- else:
- self.writeline('import %s as %s' % (imp, alias))
-
- # add the load name
- self.writeline('name = %r' % self.name)
-
- # generate the root render function.
- self.writeline('%s(context, missing=missing%s):' %
- (self.func('root'), envenv), extra=1)
- self.indent()
- self.write_commons()
-
- # process the root
- frame = Frame(eval_ctx)
- if 'self' in find_undeclared(node.body, ('self',)):
- ref = frame.symbols.declare_parameter('self')
- self.writeline('%s = TemplateReference(context)' % ref)
- frame.symbols.analyze_node(node)
- frame.toplevel = frame.rootlevel = True
- frame.require_output_check = have_extends and not self.has_known_extends
- if have_extends:
- self.writeline('parent_template = None')
- self.enter_frame(frame)
- self.pull_dependencies(node.body)
- self.blockvisit(node.body, frame)
- self.leave_frame(frame, with_python_scope=True)
- self.outdent()
-
- # make sure that the parent root is called.
- if have_extends:
- if not self.has_known_extends:
- self.indent()
- self.writeline('if parent_template is not None:')
- self.indent()
- if supports_yield_from and not self.environment.is_async:
- self.writeline('yield from parent_template.'
- 'root_render_func(context)')
- else:
- self.writeline('%sfor event in parent_template.'
- 'root_render_func(context):' %
- (self.environment.is_async and 'async ' or ''))
- self.indent()
- self.writeline('yield event')
- self.outdent()
- self.outdent(1 + (not self.has_known_extends))
-
- # at this point we now have the blocks collected and can visit them too.
- for name, block in iteritems(self.blocks):
- self.writeline('%s(context, missing=missing%s):' %
- (self.func('block_' + name), envenv),
- block, 1)
- self.indent()
- self.write_commons()
- # It's important that we do not make this frame a child of the
- # toplevel template. This would cause a variety of
- # interesting issues with identifier tracking.
- block_frame = Frame(eval_ctx)
- undeclared = find_undeclared(block.body, ('self', 'super'))
- if 'self' in undeclared:
- ref = block_frame.symbols.declare_parameter('self')
- self.writeline('%s = TemplateReference(context)' % ref)
- if 'super' in undeclared:
- ref = block_frame.symbols.declare_parameter('super')
- self.writeline('%s = context.super(%r, '
- 'block_%s)' % (ref, name, name))
- block_frame.symbols.analyze_node(block)
- block_frame.block = name
- self.enter_frame(block_frame)
- self.pull_dependencies(block.body)
- self.blockvisit(block.body, block_frame)
- self.leave_frame(block_frame, with_python_scope=True)
- self.outdent()
-
- self.writeline('blocks = {%s}' % ', '.join('%r: block_%s' % (x, x)
- for x in self.blocks),
- extra=1)
-
- # add a function that returns the debug info
- self.writeline('debug_info = %r' % '&'.join('%s=%s' % x for x
- in self.debug_info))
-
- def visit_Block(self, node, frame):
- """Call a block and register it for the template."""
- level = 0
- if frame.toplevel:
- # if we know that we are a child template, there is no need to
- # check if we are one
- if self.has_known_extends:
- return
- if self.extends_so_far > 0:
- self.writeline('if parent_template is None:')
- self.indent()
- level += 1
-
- if node.scoped:
- context = self.derive_context(frame)
- else:
- context = self.get_context_ref()
-
- if supports_yield_from and not self.environment.is_async and \
- frame.buffer is None:
- self.writeline('yield from context.blocks[%r][0](%s)' % (
- node.name, context), node)
- else:
- loop = self.environment.is_async and 'async for' or 'for'
- self.writeline('%s event in context.blocks[%r][0](%s):' % (
- loop, node.name, context), node)
- self.indent()
- self.simple_write('event', frame)
- self.outdent()
-
- self.outdent(level)
-
- def visit_Extends(self, node, frame):
- """Calls the extender."""
- if not frame.toplevel:
- self.fail('cannot use extend from a non top-level scope',
- node.lineno)
-
- # if the number of extends statements in general is zero so
- # far, we don't have to add a check if something extended
- # the template before this one.
- if self.extends_so_far > 0:
-
- # if we have a known extends we just add a template runtime
- # error into the generated code. We could catch that at compile
- # time too, but i welcome it not to confuse users by throwing the
- # same error at different times just "because we can".
- if not self.has_known_extends:
- self.writeline('if parent_template is not None:')
- self.indent()
- self.writeline('raise TemplateRuntimeError(%r)' %
- 'extended multiple times')
-
- # if we have a known extends already we don't need that code here
- # as we know that the template execution will end here.
- if self.has_known_extends:
- raise CompilerExit()
- else:
- self.outdent()
-
- self.writeline('parent_template = environment.get_template(', node)
- self.visit(node.template, frame)
- self.write(', %r)' % self.name)
- self.writeline('for name, parent_block in parent_template.'
- 'blocks.%s():' % dict_item_iter)
- self.indent()
- self.writeline('context.blocks.setdefault(name, []).'
- 'append(parent_block)')
- self.outdent()
-
- # if this extends statement was in the root level we can take
- # advantage of that information and simplify the generated code
- # in the top level from this point onwards
- if frame.rootlevel:
- self.has_known_extends = True
-
- # and now we have one more
- self.extends_so_far += 1
-
- def visit_Include(self, node, frame):
- """Handles includes."""
- if node.ignore_missing:
- self.writeline('try:')
- self.indent()
-
- func_name = 'get_or_select_template'
- if isinstance(node.template, nodes.Const):
- if isinstance(node.template.value, string_types):
- func_name = 'get_template'
- elif isinstance(node.template.value, (tuple, list)):
- func_name = 'select_template'
- elif isinstance(node.template, (nodes.Tuple, nodes.List)):
- func_name = 'select_template'
-
- self.writeline('template = environment.%s(' % func_name, node)
- self.visit(node.template, frame)
- self.write(', %r)' % self.name)
- if node.ignore_missing:
- self.outdent()
- self.writeline('except TemplateNotFound:')
- self.indent()
- self.writeline('pass')
- self.outdent()
- self.writeline('else:')
- self.indent()
-
- skip_event_yield = False
- if node.with_context:
- loop = self.environment.is_async and 'async for' or 'for'
- self.writeline('%s event in template.root_render_func('
- 'template.new_context(context.get_all(), True, '
- '%s)):' % (loop, self.dump_local_context(frame)))
- elif self.environment.is_async:
- self.writeline('for event in (await '
- 'template._get_default_module_async())'
- '._body_stream:')
- else:
- if supports_yield_from:
- self.writeline('yield from template._get_default_module()'
- '._body_stream')
- skip_event_yield = True
- else:
- self.writeline('for event in template._get_default_module()'
- '._body_stream:')
-
- if not skip_event_yield:
- self.indent()
- self.simple_write('event', frame)
- self.outdent()
-
- if node.ignore_missing:
- self.outdent()
-
- def visit_Import(self, node, frame):
- """Visit regular imports."""
- self.writeline('%s = ' % frame.symbols.ref(node.target), node)
- if frame.toplevel:
- self.write('context.vars[%r] = ' % node.target)
- if self.environment.is_async:
- self.write('await ')
- self.write('environment.get_template(')
- self.visit(node.template, frame)
- self.write(', %r).' % self.name)
- if node.with_context:
- self.write('make_module%s(context.get_all(), True, %s)'
- % (self.environment.is_async and '_async' or '',
- self.dump_local_context(frame)))
- elif self.environment.is_async:
- self.write('_get_default_module_async()')
- else:
- self.write('_get_default_module()')
- if frame.toplevel and not node.target.startswith('_'):
- self.writeline('context.exported_vars.discard(%r)' % node.target)
-
- def visit_FromImport(self, node, frame):
- """Visit named imports."""
- self.newline(node)
- self.write('included_template = %senvironment.get_template('
- % (self.environment.is_async and 'await ' or ''))
- self.visit(node.template, frame)
- self.write(', %r).' % self.name)
- if node.with_context:
- self.write('make_module%s(context.get_all(), True, %s)'
- % (self.environment.is_async and '_async' or '',
- self.dump_local_context(frame)))
- elif self.environment.is_async:
- self.write('_get_default_module_async()')
- else:
- self.write('_get_default_module()')
-
- var_names = []
- discarded_names = []
- for name in node.names:
- if isinstance(name, tuple):
- name, alias = name
- else:
- alias = name
- self.writeline('%s = getattr(included_template, '
- '%r, missing)' % (frame.symbols.ref(alias), name))
- self.writeline('if %s is missing:' % frame.symbols.ref(alias))
- self.indent()
- self.writeline('%s = undefined(%r %% '
- 'included_template.__name__, '
- 'name=%r)' %
- (frame.symbols.ref(alias),
- 'the template %%r (imported on %s) does '
- 'not export the requested name %s' % (
- self.position(node),
- repr(name)
- ), name))
- self.outdent()
- if frame.toplevel:
- var_names.append(alias)
- if not alias.startswith('_'):
- discarded_names.append(alias)
-
- if var_names:
- if len(var_names) == 1:
- name = var_names[0]
- self.writeline('context.vars[%r] = %s' %
- (name, frame.symbols.ref(name)))
- else:
- self.writeline('context.vars.update({%s})' % ', '.join(
- '%r: %s' % (name, frame.symbols.ref(name)) for name in var_names
- ))
- if discarded_names:
- if len(discarded_names) == 1:
- self.writeline('context.exported_vars.discard(%r)' %
- discarded_names[0])
- else:
- self.writeline('context.exported_vars.difference_'
- 'update((%s))' % ', '.join(imap(repr, discarded_names)))
-
- def visit_For(self, node, frame):
- loop_frame = frame.inner()
- test_frame = frame.inner()
- else_frame = frame.inner()
-
- # try to figure out if we have an extended loop. An extended loop
- # is necessary if the loop is in recursive mode if the special loop
- # variable is accessed in the body.
- extended_loop = node.recursive or 'loop' in \
- find_undeclared(node.iter_child_nodes(
- only=('body',)), ('loop',))
-
- loop_ref = None
- if extended_loop:
- loop_ref = loop_frame.symbols.declare_parameter('loop')
-
- loop_frame.symbols.analyze_node(node, for_branch='body')
- if node.else_:
- else_frame.symbols.analyze_node(node, for_branch='else')
-
- if node.test:
- loop_filter_func = self.temporary_identifier()
- test_frame.symbols.analyze_node(node, for_branch='test')
- self.writeline('%s(fiter):' % self.func(loop_filter_func), node.test)
- self.indent()
- self.enter_frame(test_frame)
- self.writeline(self.environment.is_async and 'async for ' or 'for ')
- self.visit(node.target, loop_frame)
- self.write(' in ')
- self.write(self.environment.is_async and 'auto_aiter(fiter)' or 'fiter')
- self.write(':')
- self.indent()
- self.writeline('if ', node.test)
- self.visit(node.test, test_frame)
- self.write(':')
- self.indent()
- self.writeline('yield ')
- self.visit(node.target, loop_frame)
- self.outdent(3)
- self.leave_frame(test_frame, with_python_scope=True)
-
- # if we don't have an recursive loop we have to find the shadowed
- # variables at that point. Because loops can be nested but the loop
- # variable is a special one we have to enforce aliasing for it.
- if node.recursive:
- self.writeline('%s(reciter, loop_render_func, depth=0):' %
- self.func('loop'), node)
- self.indent()
- self.buffer(loop_frame)
-
- # Use the same buffer for the else frame
- else_frame.buffer = loop_frame.buffer
-
- # make sure the loop variable is a special one and raise a template
- # assertion error if a loop tries to write to loop
- if extended_loop:
- self.writeline('%s = missing' % loop_ref)
-
- for name in node.find_all(nodes.Name):
- if name.ctx == 'store' and name.name == 'loop':
- self.fail('Can\'t assign to special loop variable '
- 'in for-loop target', name.lineno)
-
- if node.else_:
- iteration_indicator = self.temporary_identifier()
- self.writeline('%s = 1' % iteration_indicator)
-
- self.writeline(self.environment.is_async and 'async for ' or 'for ', node)
- self.visit(node.target, loop_frame)
- if extended_loop:
- if self.environment.is_async:
- self.write(', %s in await make_async_loop_context(' % loop_ref)
- else:
- self.write(', %s in LoopContext(' % loop_ref)
- else:
- self.write(' in ')
-
- if node.test:
- self.write('%s(' % loop_filter_func)
- if node.recursive:
- self.write('reciter')
- else:
- if self.environment.is_async and not extended_loop:
- self.write('auto_aiter(')
- self.visit(node.iter, frame)
- if self.environment.is_async and not extended_loop:
- self.write(')')
- if node.test:
- self.write(')')
-
- if node.recursive:
- self.write(', undefined, loop_render_func, depth):')
- else:
- self.write(extended_loop and ', undefined):' or ':')
-
- self.indent()
- self.enter_frame(loop_frame)
-
- self.blockvisit(node.body, loop_frame)
- if node.else_:
- self.writeline('%s = 0' % iteration_indicator)
- self.outdent()
- self.leave_frame(loop_frame, with_python_scope=node.recursive
- and not node.else_)
-
- if node.else_:
- self.writeline('if %s:' % iteration_indicator)
- self.indent()
- self.enter_frame(else_frame)
- self.blockvisit(node.else_, else_frame)
- self.leave_frame(else_frame)
- self.outdent()
-
- # if the node was recursive we have to return the buffer contents
- # and start the iteration code
- if node.recursive:
- self.return_buffer_contents(loop_frame)
- self.outdent()
- self.start_write(frame, node)
- if self.environment.is_async:
- self.write('await ')
- self.write('loop(')
- if self.environment.is_async:
- self.write('auto_aiter(')
- self.visit(node.iter, frame)
- if self.environment.is_async:
- self.write(')')
- self.write(', loop)')
- self.end_write(frame)
-
- def visit_If(self, node, frame):
- if_frame = frame.soft()
- self.writeline('if ', node)
- self.visit(node.test, if_frame)
- self.write(':')
- self.indent()
- self.blockvisit(node.body, if_frame)
- self.outdent()
- for elif_ in node.elif_:
- self.writeline('elif ', elif_)
- self.visit(elif_.test, if_frame)
- self.write(':')
- self.indent()
- self.blockvisit(elif_.body, if_frame)
- self.outdent()
- if node.else_:
- self.writeline('else:')
- self.indent()
- self.blockvisit(node.else_, if_frame)
- self.outdent()
-
- def visit_Macro(self, node, frame):
- macro_frame, macro_ref = self.macro_body(node, frame)
- self.newline()
- if frame.toplevel:
- if not node.name.startswith('_'):
- self.write('context.exported_vars.add(%r)' % node.name)
- ref = frame.symbols.ref(node.name)
- self.writeline('context.vars[%r] = ' % node.name)
- self.write('%s = ' % frame.symbols.ref(node.name))
- self.macro_def(macro_ref, macro_frame)
-
- def visit_CallBlock(self, node, frame):
- call_frame, macro_ref = self.macro_body(node, frame)
- self.writeline('caller = ')
- self.macro_def(macro_ref, call_frame)
- self.start_write(frame, node)
- self.visit_Call(node.call, frame, forward_caller=True)
- self.end_write(frame)
-
- def visit_FilterBlock(self, node, frame):
- filter_frame = frame.inner()
- filter_frame.symbols.analyze_node(node)
- self.enter_frame(filter_frame)
- self.buffer(filter_frame)
- self.blockvisit(node.body, filter_frame)
- self.start_write(frame, node)
- self.visit_Filter(node.filter, filter_frame)
- self.end_write(frame)
- self.leave_frame(filter_frame)
-
- def visit_With(self, node, frame):
- with_frame = frame.inner()
- with_frame.symbols.analyze_node(node)
- self.enter_frame(with_frame)
- for idx, (target, expr) in enumerate(izip(node.targets, node.values)):
- self.newline()
- self.visit(target, with_frame)
- self.write(' = ')
- self.visit(expr, frame)
- self.blockvisit(node.body, with_frame)
- self.leave_frame(with_frame)
-
- def visit_ExprStmt(self, node, frame):
- self.newline(node)
- self.visit(node.node, frame)
-
- def visit_Output(self, node, frame):
- # if we have a known extends statement, we don't output anything
- # if we are in a require_output_check section
- if self.has_known_extends and frame.require_output_check:
- return
-
- allow_constant_finalize = True
- if self.environment.finalize:
- func = self.environment.finalize
- if getattr(func, 'contextfunction', False) or \
- getattr(func, 'evalcontextfunction', False):
- allow_constant_finalize = False
- elif getattr(func, 'environmentfunction', False):
- finalize = lambda x: text_type(
- self.environment.finalize(self.environment, x))
- else:
- finalize = lambda x: text_type(self.environment.finalize(x))
- else:
- finalize = text_type
-
- # if we are inside a frame that requires output checking, we do so
- outdent_later = False
- if frame.require_output_check:
- self.writeline('if parent_template is None:')
- self.indent()
- outdent_later = True
-
- # try to evaluate as many chunks as possible into a static
- # string at compile time.
- body = []
- for child in node.nodes:
- try:
- if not allow_constant_finalize:
- raise nodes.Impossible()
- const = child.as_const(frame.eval_ctx)
- except nodes.Impossible:
- body.append(child)
- continue
- # the frame can't be volatile here, becaus otherwise the
- # as_const() function would raise an Impossible exception
- # at that point.
- try:
- if frame.eval_ctx.autoescape:
- if hasattr(const, '__html__'):
- const = const.__html__()
- else:
- const = escape(const)
- const = finalize(const)
- except Exception:
- # if something goes wrong here we evaluate the node
- # at runtime for easier debugging
- body.append(child)
- continue
- if body and isinstance(body[-1], list):
- body[-1].append(const)
- else:
- body.append([const])
-
- # if we have less than 3 nodes or a buffer we yield or extend/append
- if len(body) < 3 or frame.buffer is not None:
- if frame.buffer is not None:
- # for one item we append, for more we extend
- if len(body) == 1:
- self.writeline('%s.append(' % frame.buffer)
- else:
- self.writeline('%s.extend((' % frame.buffer)
- self.indent()
- for item in body:
- if isinstance(item, list):
- val = repr(concat(item))
- if frame.buffer is None:
- self.writeline('yield ' + val)
- else:
- self.writeline(val + ',')
- else:
- if frame.buffer is None:
- self.writeline('yield ', item)
- else:
- self.newline(item)
- close = 1
- if frame.eval_ctx.volatile:
- self.write('(escape if context.eval_ctx.autoescape'
- ' else to_string)(')
- elif frame.eval_ctx.autoescape:
- self.write('escape(')
- else:
- self.write('to_string(')
- if self.environment.finalize is not None:
- self.write('environment.finalize(')
- if getattr(self.environment.finalize,
- "contextfunction", False):
- self.write('context, ')
- close += 1
- self.visit(item, frame)
- self.write(')' * close)
- if frame.buffer is not None:
- self.write(',')
- if frame.buffer is not None:
- # close the open parentheses
- self.outdent()
- self.writeline(len(body) == 1 and ')' or '))')
-
- # otherwise we create a format string as this is faster in that case
- else:
- format = []
- arguments = []
- for item in body:
- if isinstance(item, list):
- format.append(concat(item).replace('%', '%%'))
- else:
- format.append('%s')
- arguments.append(item)
- self.writeline('yield ')
- self.write(repr(concat(format)) + ' % (')
- self.indent()
- for argument in arguments:
- self.newline(argument)
- close = 0
- if frame.eval_ctx.volatile:
- self.write('(escape if context.eval_ctx.autoescape else'
- ' to_string)(')
- close += 1
- elif frame.eval_ctx.autoescape:
- self.write('escape(')
- close += 1
- if self.environment.finalize is not None:
- self.write('environment.finalize(')
- if getattr(self.environment.finalize,
- 'contextfunction', False):
- self.write('context, ')
- elif getattr(self.environment.finalize,
- 'evalcontextfunction', False):
- self.write('context.eval_ctx, ')
- elif getattr(self.environment.finalize,
- 'environmentfunction', False):
- self.write('environment, ')
- close += 1
- self.visit(argument, frame)
- self.write(')' * close + ', ')
- self.outdent()
- self.writeline(')')
-
- if outdent_later:
- self.outdent()
-
- def visit_Assign(self, node, frame):
- self.push_assign_tracking()
- self.newline(node)
- self.visit(node.target, frame)
- self.write(' = ')
- self.visit(node.node, frame)
- self.pop_assign_tracking(frame)
-
- def visit_AssignBlock(self, node, frame):
- self.push_assign_tracking()
- block_frame = frame.inner()
- # This is a special case. Since a set block always captures we
- # will disable output checks. This way one can use set blocks
- # toplevel even in extended templates.
- block_frame.require_output_check = False
- block_frame.symbols.analyze_node(node)
- self.enter_frame(block_frame)
- self.buffer(block_frame)
- self.blockvisit(node.body, block_frame)
- self.newline(node)
- self.visit(node.target, frame)
- self.write(' = (Markup if context.eval_ctx.autoescape '
- 'else identity)(')
- if node.filter is not None:
- self.visit_Filter(node.filter, block_frame)
- else:
- self.write('concat(%s)' % block_frame.buffer)
- self.write(')')
- self.pop_assign_tracking(frame)
- self.leave_frame(block_frame)
-
- # -- Expression Visitors
-
- def visit_Name(self, node, frame):
- if node.ctx == 'store' and frame.toplevel:
- if self._assign_stack:
- self._assign_stack[-1].add(node.name)
- ref = frame.symbols.ref(node.name)
-
- # If we are looking up a variable we might have to deal with the
- # case where it's undefined. We can skip that case if the load
- # instruction indicates a parameter which are always defined.
- if node.ctx == 'load':
- load = frame.symbols.find_load(ref)
- if not (load is not None and load[0] == VAR_LOAD_PARAMETER and \
- not self.parameter_is_undeclared(ref)):
- self.write('(undefined(name=%r) if %s is missing else %s)' %
- (node.name, ref, ref))
- return
-
- self.write(ref)
-
- def visit_NSRef(self, node, frame):
- # NSRefs can only be used to store values; since they use the normal
- # `foo.bar` notation they will be parsed as a normal attribute access
- # when used anywhere but in a `set` context
- ref = frame.symbols.ref(node.name)
- self.writeline('if not isinstance(%s, Namespace):' % ref)
- self.indent()
- self.writeline('raise TemplateRuntimeError(%r)' %
- 'cannot assign attribute on non-namespace object')
- self.outdent()
- self.writeline('%s[%r]' % (ref, node.attr))
-
- def visit_Const(self, node, frame):
- val = node.as_const(frame.eval_ctx)
- if isinstance(val, float):
- self.write(str(val))
- else:
- self.write(repr(val))
-
- def visit_TemplateData(self, node, frame):
- try:
- self.write(repr(node.as_const(frame.eval_ctx)))
- except nodes.Impossible:
- self.write('(Markup if context.eval_ctx.autoescape else identity)(%r)'
- % node.data)
-
- def visit_Tuple(self, node, frame):
- self.write('(')
- idx = -1
- for idx, item in enumerate(node.items):
- if idx:
- self.write(', ')
- self.visit(item, frame)
- self.write(idx == 0 and ',)' or ')')
-
- def visit_List(self, node, frame):
- self.write('[')
- for idx, item in enumerate(node.items):
- if idx:
- self.write(', ')
- self.visit(item, frame)
- self.write(']')
-
- def visit_Dict(self, node, frame):
- self.write('{')
- for idx, item in enumerate(node.items):
- if idx:
- self.write(', ')
- self.visit(item.key, frame)
- self.write(': ')
- self.visit(item.value, frame)
- self.write('}')
-
- def binop(operator, interceptable=True):
- @optimizeconst
- def visitor(self, node, frame):
- if self.environment.sandboxed and \
- operator in self.environment.intercepted_binops:
- self.write('environment.call_binop(context, %r, ' % operator)
- self.visit(node.left, frame)
- self.write(', ')
- self.visit(node.right, frame)
- else:
- self.write('(')
- self.visit(node.left, frame)
- self.write(' %s ' % operator)
- self.visit(node.right, frame)
- self.write(')')
- return visitor
-
- def uaop(operator, interceptable=True):
- @optimizeconst
- def visitor(self, node, frame):
- if self.environment.sandboxed and \
- operator in self.environment.intercepted_unops:
- self.write('environment.call_unop(context, %r, ' % operator)
- self.visit(node.node, frame)
- else:
- self.write('(' + operator)
- self.visit(node.node, frame)
- self.write(')')
- return visitor
-
- visit_Add = binop('+')
- visit_Sub = binop('-')
- visit_Mul = binop('*')
- visit_Div = binop('/')
- visit_FloorDiv = binop('//')
- visit_Pow = binop('**')
- visit_Mod = binop('%')
- visit_And = binop('and', interceptable=False)
- visit_Or = binop('or', interceptable=False)
- visit_Pos = uaop('+')
- visit_Neg = uaop('-')
- visit_Not = uaop('not ', interceptable=False)
- del binop, uaop
-
- @optimizeconst
- def visit_Concat(self, node, frame):
- if frame.eval_ctx.volatile:
- func_name = '(context.eval_ctx.volatile and' \
- ' markup_join or unicode_join)'
- elif frame.eval_ctx.autoescape:
- func_name = 'markup_join'
- else:
- func_name = 'unicode_join'
- self.write('%s((' % func_name)
- for arg in node.nodes:
- self.visit(arg, frame)
- self.write(', ')
- self.write('))')
-
- @optimizeconst
- def visit_Compare(self, node, frame):
- self.visit(node.expr, frame)
- for op in node.ops:
- self.visit(op, frame)
-
- def visit_Operand(self, node, frame):
- self.write(' %s ' % operators[node.op])
- self.visit(node.expr, frame)
-
- @optimizeconst
- def visit_Getattr(self, node, frame):
- self.write('environment.getattr(')
- self.visit(node.node, frame)
- self.write(', %r)' % node.attr)
-
- @optimizeconst
- def visit_Getitem(self, node, frame):
- # slices bypass the environment getitem method.
- if isinstance(node.arg, nodes.Slice):
- self.visit(node.node, frame)
- self.write('[')
- self.visit(node.arg, frame)
- self.write(']')
- else:
- self.write('environment.getitem(')
- self.visit(node.node, frame)
- self.write(', ')
- self.visit(node.arg, frame)
- self.write(')')
-
- def visit_Slice(self, node, frame):
- if node.start is not None:
- self.visit(node.start, frame)
- self.write(':')
- if node.stop is not None:
- self.visit(node.stop, frame)
- if node.step is not None:
- self.write(':')
- self.visit(node.step, frame)
-
- @optimizeconst
- def visit_Filter(self, node, frame):
- if self.environment.is_async:
- self.write('await auto_await(')
- self.write(self.filters[node.name] + '(')
- func = self.environment.filters.get(node.name)
- if func is None:
- self.fail('no filter named %r' % node.name, node.lineno)
- if getattr(func, 'contextfilter', False):
- self.write('context, ')
- elif getattr(func, 'evalcontextfilter', False):
- self.write('context.eval_ctx, ')
- elif getattr(func, 'environmentfilter', False):
- self.write('environment, ')
-
- # if the filter node is None we are inside a filter block
- # and want to write to the current buffer
- if node.node is not None:
- self.visit(node.node, frame)
- elif frame.eval_ctx.volatile:
- self.write('(context.eval_ctx.autoescape and'
- ' Markup(concat(%s)) or concat(%s))' %
- (frame.buffer, frame.buffer))
- elif frame.eval_ctx.autoescape:
- self.write('Markup(concat(%s))' % frame.buffer)
- else:
- self.write('concat(%s)' % frame.buffer)
- self.signature(node, frame)
- self.write(')')
- if self.environment.is_async:
- self.write(')')
-
- @optimizeconst
- def visit_Test(self, node, frame):
- self.write(self.tests[node.name] + '(')
- if node.name not in self.environment.tests:
- self.fail('no test named %r' % node.name, node.lineno)
- self.visit(node.node, frame)
- self.signature(node, frame)
- self.write(')')
-
- @optimizeconst
- def visit_CondExpr(self, node, frame):
- def write_expr2():
- if node.expr2 is not None:
- return self.visit(node.expr2, frame)
- self.write('undefined(%r)' % ('the inline if-'
- 'expression on %s evaluated to false and '
- 'no else section was defined.' % self.position(node)))
-
- self.write('(')
- self.visit(node.expr1, frame)
- self.write(' if ')
- self.visit(node.test, frame)
- self.write(' else ')
- write_expr2()
- self.write(')')
-
- @optimizeconst
- def visit_Call(self, node, frame, forward_caller=False):
- if self.environment.is_async:
- self.write('await auto_await(')
- if self.environment.sandboxed:
- self.write('environment.call(context, ')
- else:
- self.write('context.call(')
- self.visit(node.node, frame)
- extra_kwargs = forward_caller and {'caller': 'caller'} or None
- self.signature(node, frame, extra_kwargs)
- self.write(')')
- if self.environment.is_async:
- self.write(')')
-
- def visit_Keyword(self, node, frame):
- self.write(node.key + '=')
- self.visit(node.value, frame)
-
- # -- Unused nodes for extensions
-
- def visit_MarkSafe(self, node, frame):
- self.write('Markup(')
- self.visit(node.expr, frame)
- self.write(')')
-
- def visit_MarkSafeIfAutoescape(self, node, frame):
- self.write('(context.eval_ctx.autoescape and Markup or identity)(')
- self.visit(node.expr, frame)
- self.write(')')
-
- def visit_EnvironmentAttribute(self, node, frame):
- self.write('environment.' + node.name)
-
- def visit_ExtensionAttribute(self, node, frame):
- self.write('environment.extensions[%r].%s' % (node.identifier, node.name))
-
- def visit_ImportedName(self, node, frame):
- self.write(self.import_aliases[node.importname])
-
- def visit_InternalName(self, node, frame):
- self.write(node.name)
-
- def visit_ContextReference(self, node, frame):
- self.write('context')
-
- def visit_Continue(self, node, frame):
- self.writeline('continue', node)
-
- def visit_Break(self, node, frame):
- self.writeline('break', node)
-
- def visit_Scope(self, node, frame):
- scope_frame = frame.inner()
- scope_frame.symbols.analyze_node(node)
- self.enter_frame(scope_frame)
- self.blockvisit(node.body, scope_frame)
- self.leave_frame(scope_frame)
-
- def visit_OverlayScope(self, node, frame):
- ctx = self.temporary_identifier()
- self.writeline('%s = %s' % (ctx, self.derive_context(frame)))
- self.writeline('%s.vars = ' % ctx)
- self.visit(node.context, frame)
- self.push_context_reference(ctx)
-
- scope_frame = frame.inner(isolated=True)
- scope_frame.symbols.analyze_node(node)
- self.enter_frame(scope_frame)
- self.blockvisit(node.body, scope_frame)
- self.leave_frame(scope_frame)
- self.pop_context_reference()
-
- def visit_EvalContextModifier(self, node, frame):
- for keyword in node.options:
- self.writeline('context.eval_ctx.%s = ' % keyword.key)
- self.visit(keyword.value, frame)
- try:
- val = keyword.value.as_const(frame.eval_ctx)
- except nodes.Impossible:
- frame.eval_ctx.volatile = True
- else:
- setattr(frame.eval_ctx, keyword.key, val)
-
- def visit_ScopedEvalContextModifier(self, node, frame):
- old_ctx_name = self.temporary_identifier()
- saved_ctx = frame.eval_ctx.save()
- self.writeline('%s = context.eval_ctx.save()' % old_ctx_name)
- self.visit_EvalContextModifier(node, frame)
- for child in node.body:
- self.visit(child, frame)
- frame.eval_ctx.revert(saved_ctx)
- self.writeline('context.eval_ctx.revert(%s)' % old_ctx_name)
diff --git a/venv/Lib/site-packages/jinja2/constants.py b/venv/Lib/site-packages/jinja2/constants.py
deleted file mode 100644
index 11efd1e..0000000
--- a/venv/Lib/site-packages/jinja2/constants.py
+++ /dev/null
@@ -1,32 +0,0 @@
-# -*- coding: utf-8 -*-
-"""
- jinja.constants
- ~~~~~~~~~~~~~~~
-
- Various constants.
-
- :copyright: (c) 2017 by the Jinja Team.
- :license: BSD, see LICENSE for more details.
-"""
-
-
-#: list of lorem ipsum words used by the lipsum() helper function
-LOREM_IPSUM_WORDS = u'''\
-a ac accumsan ad adipiscing aenean aliquam aliquet amet ante aptent arcu at
-auctor augue bibendum blandit class commodo condimentum congue consectetuer
-consequat conubia convallis cras cubilia cum curabitur curae cursus dapibus
-diam dictum dictumst dignissim dis dolor donec dui duis egestas eget eleifend
-elementum elit enim erat eros est et etiam eu euismod facilisi facilisis fames
-faucibus felis fermentum feugiat fringilla fusce gravida habitant habitasse hac
-hendrerit hymenaeos iaculis id imperdiet in inceptos integer interdum ipsum
-justo lacinia lacus laoreet lectus leo libero ligula litora lobortis lorem
-luctus maecenas magna magnis malesuada massa mattis mauris metus mi molestie
-mollis montes morbi mus nam nascetur natoque nec neque netus nibh nisi nisl non
-nonummy nostra nulla nullam nunc odio orci ornare parturient pede pellentesque
-penatibus per pharetra phasellus placerat platea porta porttitor posuere
-potenti praesent pretium primis proin pulvinar purus quam quis quisque rhoncus
-ridiculus risus rutrum sagittis sapien scelerisque sed sem semper senectus sit
-sociis sociosqu sodales sollicitudin suscipit suspendisse taciti tellus tempor
-tempus tincidunt torquent tortor tristique turpis ullamcorper ultrices
-ultricies urna ut varius vehicula vel velit venenatis vestibulum vitae vivamus
-viverra volutpat vulputate'''
diff --git a/venv/Lib/site-packages/jinja2/debug.py b/venv/Lib/site-packages/jinja2/debug.py
deleted file mode 100644
index b61139f..0000000
--- a/venv/Lib/site-packages/jinja2/debug.py
+++ /dev/null
@@ -1,372 +0,0 @@
-# -*- coding: utf-8 -*-
-"""
- jinja2.debug
- ~~~~~~~~~~~~
-
- Implements the debug interface for Jinja. This module does some pretty
- ugly stuff with the Python traceback system in order to achieve tracebacks
- with correct line numbers, locals and contents.
-
- :copyright: (c) 2017 by the Jinja Team.
- :license: BSD, see LICENSE for more details.
-"""
-import sys
-import traceback
-from types import TracebackType, CodeType
-from jinja2.utils import missing, internal_code
-from jinja2.exceptions import TemplateSyntaxError
-from jinja2._compat import iteritems, reraise, PY2
-
-# on pypy we can take advantage of transparent proxies
-try:
- from __pypy__ import tproxy
-except ImportError:
- tproxy = None
-
-
-# how does the raise helper look like?
-try:
- exec("raise TypeError, 'foo'")
-except SyntaxError:
- raise_helper = 'raise __jinja_exception__[1]'
-except TypeError:
- raise_helper = 'raise __jinja_exception__[0], __jinja_exception__[1]'
-
-
-class TracebackFrameProxy(object):
- """Proxies a traceback frame."""
-
- def __init__(self, tb):
- self.tb = tb
- self._tb_next = None
-
- @property
- def tb_next(self):
- return self._tb_next
-
- def set_next(self, next):
- if tb_set_next is not None:
- try:
- tb_set_next(self.tb, next and next.tb or None)
- except Exception:
- # this function can fail due to all the hackery it does
- # on various python implementations. We just catch errors
- # down and ignore them if necessary.
- pass
- self._tb_next = next
-
- @property
- def is_jinja_frame(self):
- return '__jinja_template__' in self.tb.tb_frame.f_globals
-
- def __getattr__(self, name):
- return getattr(self.tb, name)
-
-
-def make_frame_proxy(frame):
- proxy = TracebackFrameProxy(frame)
- if tproxy is None:
- return proxy
- def operation_handler(operation, *args, **kwargs):
- if operation in ('__getattribute__', '__getattr__'):
- return getattr(proxy, args[0])
- elif operation == '__setattr__':
- proxy.__setattr__(*args, **kwargs)
- else:
- return getattr(proxy, operation)(*args, **kwargs)
- return tproxy(TracebackType, operation_handler)
-
-
-class ProcessedTraceback(object):
- """Holds a Jinja preprocessed traceback for printing or reraising."""
-
- def __init__(self, exc_type, exc_value, frames):
- assert frames, 'no frames for this traceback?'
- self.exc_type = exc_type
- self.exc_value = exc_value
- self.frames = frames
-
- # newly concatenate the frames (which are proxies)
- prev_tb = None
- for tb in self.frames:
- if prev_tb is not None:
- prev_tb.set_next(tb)
- prev_tb = tb
- prev_tb.set_next(None)
-
- def render_as_text(self, limit=None):
- """Return a string with the traceback."""
- lines = traceback.format_exception(self.exc_type, self.exc_value,
- self.frames[0], limit=limit)
- return ''.join(lines).rstrip()
-
- def render_as_html(self, full=False):
- """Return a unicode string with the traceback as rendered HTML."""
- from jinja2.debugrenderer import render_traceback
- return u'%s\n\n' % (
- render_traceback(self, full=full),
- self.render_as_text().decode('utf-8', 'replace')
- )
-
- @property
- def is_template_syntax_error(self):
- """`True` if this is a template syntax error."""
- return isinstance(self.exc_value, TemplateSyntaxError)
-
- @property
- def exc_info(self):
- """Exception info tuple with a proxy around the frame objects."""
- return self.exc_type, self.exc_value, self.frames[0]
-
- @property
- def standard_exc_info(self):
- """Standard python exc_info for re-raising"""
- tb = self.frames[0]
- # the frame will be an actual traceback (or transparent proxy) if
- # we are on pypy or a python implementation with support for tproxy
- if type(tb) is not TracebackType:
- tb = tb.tb
- return self.exc_type, self.exc_value, tb
-
-
-def make_traceback(exc_info, source_hint=None):
- """Creates a processed traceback object from the exc_info."""
- exc_type, exc_value, tb = exc_info
- if isinstance(exc_value, TemplateSyntaxError):
- exc_info = translate_syntax_error(exc_value, source_hint)
- initial_skip = 0
- else:
- initial_skip = 1
- return translate_exception(exc_info, initial_skip)
-
-
-def translate_syntax_error(error, source=None):
- """Rewrites a syntax error to please traceback systems."""
- error.source = source
- error.translated = True
- exc_info = (error.__class__, error, None)
- filename = error.filename
- if filename is None:
- filename = ''
- return fake_exc_info(exc_info, filename, error.lineno)
-
-
-def translate_exception(exc_info, initial_skip=0):
- """If passed an exc_info it will automatically rewrite the exceptions
- all the way down to the correct line numbers and frames.
- """
- tb = exc_info[2]
- frames = []
-
- # skip some internal frames if wanted
- for x in range(initial_skip):
- if tb is not None:
- tb = tb.tb_next
- initial_tb = tb
-
- while tb is not None:
- # skip frames decorated with @internalcode. These are internal
- # calls we can't avoid and that are useless in template debugging
- # output.
- if tb.tb_frame.f_code in internal_code:
- tb = tb.tb_next
- continue
-
- # save a reference to the next frame if we override the current
- # one with a faked one.
- next = tb.tb_next
-
- # fake template exceptions
- template = tb.tb_frame.f_globals.get('__jinja_template__')
- if template is not None:
- lineno = template.get_corresponding_lineno(tb.tb_lineno)
- tb = fake_exc_info(exc_info[:2] + (tb,), template.filename,
- lineno)[2]
-
- frames.append(make_frame_proxy(tb))
- tb = next
-
- # if we don't have any exceptions in the frames left, we have to
- # reraise it unchanged.
- # XXX: can we backup here? when could this happen?
- if not frames:
- reraise(exc_info[0], exc_info[1], exc_info[2])
-
- return ProcessedTraceback(exc_info[0], exc_info[1], frames)
-
-
-def get_jinja_locals(real_locals):
- ctx = real_locals.get('context')
- if ctx:
- locals = ctx.get_all().copy()
- else:
- locals = {}
-
- local_overrides = {}
-
- for name, value in iteritems(real_locals):
- if not name.startswith('l_') or value is missing:
- continue
- try:
- _, depth, name = name.split('_', 2)
- depth = int(depth)
- except ValueError:
- continue
- cur_depth = local_overrides.get(name, (-1,))[0]
- if cur_depth < depth:
- local_overrides[name] = (depth, value)
-
- for name, (_, value) in iteritems(local_overrides):
- if value is missing:
- locals.pop(name, None)
- else:
- locals[name] = value
-
- return locals
-
-
-def fake_exc_info(exc_info, filename, lineno):
- """Helper for `translate_exception`."""
- exc_type, exc_value, tb = exc_info
-
- # figure the real context out
- if tb is not None:
- locals = get_jinja_locals(tb.tb_frame.f_locals)
-
- # if there is a local called __jinja_exception__, we get
- # rid of it to not break the debug functionality.
- locals.pop('__jinja_exception__', None)
- else:
- locals = {}
-
- # assamble fake globals we need
- globals = {
- '__name__': filename,
- '__file__': filename,
- '__jinja_exception__': exc_info[:2],
-
- # we don't want to keep the reference to the template around
- # to not cause circular dependencies, but we mark it as Jinja
- # frame for the ProcessedTraceback
- '__jinja_template__': None
- }
-
- # and fake the exception
- code = compile('\n' * (lineno - 1) + raise_helper, filename, 'exec')
-
- # if it's possible, change the name of the code. This won't work
- # on some python environments such as google appengine
- try:
- if tb is None:
- location = 'template'
- else:
- function = tb.tb_frame.f_code.co_name
- if function == 'root':
- location = 'top-level template code'
- elif function.startswith('block_'):
- location = 'block "%s"' % function[6:]
- else:
- location = 'template'
-
- if PY2:
- code = CodeType(0, code.co_nlocals, code.co_stacksize,
- code.co_flags, code.co_code, code.co_consts,
- code.co_names, code.co_varnames, filename,
- location, code.co_firstlineno,
- code.co_lnotab, (), ())
- else:
- code = CodeType(0, code.co_kwonlyargcount,
- code.co_nlocals, code.co_stacksize,
- code.co_flags, code.co_code, code.co_consts,
- code.co_names, code.co_varnames, filename,
- location, code.co_firstlineno,
- code.co_lnotab, (), ())
- except Exception as e:
- pass
-
- # execute the code and catch the new traceback
- try:
- exec(code, globals, locals)
- except:
- exc_info = sys.exc_info()
- new_tb = exc_info[2].tb_next
-
- # return without this frame
- return exc_info[:2] + (new_tb,)
-
-
-def _init_ugly_crap():
- """This function implements a few ugly things so that we can patch the
- traceback objects. The function returned allows resetting `tb_next` on
- any python traceback object. Do not attempt to use this on non cpython
- interpreters
- """
- import ctypes
- from types import TracebackType
-
- if PY2:
- # figure out size of _Py_ssize_t for Python 2:
- if hasattr(ctypes.pythonapi, 'Py_InitModule4_64'):
- _Py_ssize_t = ctypes.c_int64
- else:
- _Py_ssize_t = ctypes.c_int
- else:
- # platform ssize_t on Python 3
- _Py_ssize_t = ctypes.c_ssize_t
-
- # regular python
- class _PyObject(ctypes.Structure):
- pass
- _PyObject._fields_ = [
- ('ob_refcnt', _Py_ssize_t),
- ('ob_type', ctypes.POINTER(_PyObject))
- ]
-
- # python with trace
- if hasattr(sys, 'getobjects'):
- class _PyObject(ctypes.Structure):
- pass
- _PyObject._fields_ = [
- ('_ob_next', ctypes.POINTER(_PyObject)),
- ('_ob_prev', ctypes.POINTER(_PyObject)),
- ('ob_refcnt', _Py_ssize_t),
- ('ob_type', ctypes.POINTER(_PyObject))
- ]
-
- class _Traceback(_PyObject):
- pass
- _Traceback._fields_ = [
- ('tb_next', ctypes.POINTER(_Traceback)),
- ('tb_frame', ctypes.POINTER(_PyObject)),
- ('tb_lasti', ctypes.c_int),
- ('tb_lineno', ctypes.c_int)
- ]
-
- def tb_set_next(tb, next):
- """Set the tb_next attribute of a traceback object."""
- if not (isinstance(tb, TracebackType) and
- (next is None or isinstance(next, TracebackType))):
- raise TypeError('tb_set_next arguments must be traceback objects')
- obj = _Traceback.from_address(id(tb))
- if tb.tb_next is not None:
- old = _Traceback.from_address(id(tb.tb_next))
- old.ob_refcnt -= 1
- if next is None:
- obj.tb_next = ctypes.POINTER(_Traceback)()
- else:
- next = _Traceback.from_address(id(next))
- next.ob_refcnt += 1
- obj.tb_next = ctypes.pointer(next)
-
- return tb_set_next
-
-
-# try to get a tb_set_next implementation if we don't have transparent
-# proxies.
-tb_set_next = None
-if tproxy is None:
- try:
- tb_set_next = _init_ugly_crap()
- except:
- pass
- del _init_ugly_crap
diff --git a/venv/Lib/site-packages/jinja2/defaults.py b/venv/Lib/site-packages/jinja2/defaults.py
deleted file mode 100644
index 7c93dec..0000000
--- a/venv/Lib/site-packages/jinja2/defaults.py
+++ /dev/null
@@ -1,56 +0,0 @@
-# -*- coding: utf-8 -*-
-"""
- jinja2.defaults
- ~~~~~~~~~~~~~~~
-
- Jinja default filters and tags.
-
- :copyright: (c) 2017 by the Jinja Team.
- :license: BSD, see LICENSE for more details.
-"""
-from jinja2._compat import range_type
-from jinja2.utils import generate_lorem_ipsum, Cycler, Joiner, Namespace
-
-
-# defaults for the parser / lexer
-BLOCK_START_STRING = '{%'
-BLOCK_END_STRING = '%}'
-VARIABLE_START_STRING = '{{'
-VARIABLE_END_STRING = '}}'
-COMMENT_START_STRING = '{#'
-COMMENT_END_STRING = '#}'
-LINE_STATEMENT_PREFIX = None
-LINE_COMMENT_PREFIX = None
-TRIM_BLOCKS = False
-LSTRIP_BLOCKS = False
-NEWLINE_SEQUENCE = '\n'
-KEEP_TRAILING_NEWLINE = False
-
-
-# default filters, tests and namespace
-from jinja2.filters import FILTERS as DEFAULT_FILTERS
-from jinja2.tests import TESTS as DEFAULT_TESTS
-DEFAULT_NAMESPACE = {
- 'range': range_type,
- 'dict': dict,
- 'lipsum': generate_lorem_ipsum,
- 'cycler': Cycler,
- 'joiner': Joiner,
- 'namespace': Namespace
-}
-
-
-# default policies
-DEFAULT_POLICIES = {
- 'compiler.ascii_str': True,
- 'urlize.rel': 'noopener',
- 'urlize.target': None,
- 'truncate.leeway': 5,
- 'json.dumps_function': None,
- 'json.dumps_kwargs': {'sort_keys': True},
- 'ext.i18n.trimmed': False,
-}
-
-
-# export all constants
-__all__ = tuple(x for x in locals().keys() if x.isupper())
diff --git a/venv/Lib/site-packages/jinja2/environment.py b/venv/Lib/site-packages/jinja2/environment.py
deleted file mode 100644
index 549d9af..0000000
--- a/venv/Lib/site-packages/jinja2/environment.py
+++ /dev/null
@@ -1,1276 +0,0 @@
-# -*- coding: utf-8 -*-
-"""
- jinja2.environment
- ~~~~~~~~~~~~~~~~~~
-
- Provides a class that holds runtime and parsing time options.
-
- :copyright: (c) 2017 by the Jinja Team.
- :license: BSD, see LICENSE for more details.
-"""
-import os
-import sys
-import weakref
-from functools import reduce, partial
-from jinja2 import nodes
-from jinja2.defaults import BLOCK_START_STRING, \
- BLOCK_END_STRING, VARIABLE_START_STRING, VARIABLE_END_STRING, \
- COMMENT_START_STRING, COMMENT_END_STRING, LINE_STATEMENT_PREFIX, \
- LINE_COMMENT_PREFIX, TRIM_BLOCKS, NEWLINE_SEQUENCE, \
- DEFAULT_FILTERS, DEFAULT_TESTS, DEFAULT_NAMESPACE, \
- DEFAULT_POLICIES, KEEP_TRAILING_NEWLINE, LSTRIP_BLOCKS
-from jinja2.lexer import get_lexer, TokenStream
-from jinja2.parser import Parser
-from jinja2.nodes import EvalContext
-from jinja2.compiler import generate, CodeGenerator
-from jinja2.runtime import Undefined, new_context, Context
-from jinja2.exceptions import TemplateSyntaxError, TemplateNotFound, \
- TemplatesNotFound, TemplateRuntimeError
-from jinja2.utils import import_string, LRUCache, Markup, missing, \
- concat, consume, internalcode, have_async_gen
-from jinja2._compat import imap, ifilter, string_types, iteritems, \
- text_type, reraise, implements_iterator, implements_to_string, \
- encode_filename, PY2, PYPY
-
-
-# for direct template usage we have up to ten living environments
-_spontaneous_environments = LRUCache(10)
-
-# the function to create jinja traceback objects. This is dynamically
-# imported on the first exception in the exception handler.
-_make_traceback = None
-
-
-def get_spontaneous_environment(*args):
- """Return a new spontaneous environment. A spontaneous environment is an
- unnamed and unaccessible (in theory) environment that is used for
- templates generated from a string and not from the file system.
- """
- try:
- env = _spontaneous_environments.get(args)
- except TypeError:
- return Environment(*args)
- if env is not None:
- return env
- _spontaneous_environments[args] = env = Environment(*args)
- env.shared = True
- return env
-
-
-def create_cache(size):
- """Return the cache class for the given size."""
- if size == 0:
- return None
- if size < 0:
- return {}
- return LRUCache(size)
-
-
-def copy_cache(cache):
- """Create an empty copy of the given cache."""
- if cache is None:
- return None
- elif type(cache) is dict:
- return {}
- return LRUCache(cache.capacity)
-
-
-def load_extensions(environment, extensions):
- """Load the extensions from the list and bind it to the environment.
- Returns a dict of instantiated environments.
- """
- result = {}
- for extension in extensions:
- if isinstance(extension, string_types):
- extension = import_string(extension)
- result[extension.identifier] = extension(environment)
- return result
-
-
-def fail_for_missing_callable(string, name):
- msg = string % name
- if isinstance(name, Undefined):
- try:
- name._fail_with_undefined_error()
- except Exception as e:
- msg = '%s (%s; did you forget to quote the callable name?)' % (msg, e)
- raise TemplateRuntimeError(msg)
-
-
-def _environment_sanity_check(environment):
- """Perform a sanity check on the environment."""
- assert issubclass(environment.undefined, Undefined), 'undefined must ' \
- 'be a subclass of undefined because filters depend on it.'
- assert environment.block_start_string != \
- environment.variable_start_string != \
- environment.comment_start_string, 'block, variable and comment ' \
- 'start strings must be different'
- assert environment.newline_sequence in ('\r', '\r\n', '\n'), \
- 'newline_sequence set to unknown line ending string.'
- return environment
-
-
-class Environment(object):
- r"""The core component of Jinja is the `Environment`. It contains
- important shared variables like configuration, filters, tests,
- globals and others. Instances of this class may be modified if
- they are not shared and if no template was loaded so far.
- Modifications on environments after the first template was loaded
- will lead to surprising effects and undefined behavior.
-
- Here are the possible initialization parameters:
-
- `block_start_string`
- The string marking the beginning of a block. Defaults to ``'{%'``.
-
- `block_end_string`
- The string marking the end of a block. Defaults to ``'%}'``.
-
- `variable_start_string`
- The string marking the beginning of a print statement.
- Defaults to ``'{{'``.
-
- `variable_end_string`
- The string marking the end of a print statement. Defaults to
- ``'}}'``.
-
- `comment_start_string`
- The string marking the beginning of a comment. Defaults to ``'{#'``.
-
- `comment_end_string`
- The string marking the end of a comment. Defaults to ``'#}'``.
-
- `line_statement_prefix`
- If given and a string, this will be used as prefix for line based
- statements. See also :ref:`line-statements`.
-
- `line_comment_prefix`
- If given and a string, this will be used as prefix for line based
- comments. See also :ref:`line-statements`.
-
- .. versionadded:: 2.2
-
- `trim_blocks`
- If this is set to ``True`` the first newline after a block is
- removed (block, not variable tag!). Defaults to `False`.
-
- `lstrip_blocks`
- If this is set to ``True`` leading spaces and tabs are stripped
- from the start of a line to a block. Defaults to `False`.
-
- `newline_sequence`
- The sequence that starts a newline. Must be one of ``'\r'``,
- ``'\n'`` or ``'\r\n'``. The default is ``'\n'`` which is a
- useful default for Linux and OS X systems as well as web
- applications.
-
- `keep_trailing_newline`
- Preserve the trailing newline when rendering templates.
- The default is ``False``, which causes a single newline,
- if present, to be stripped from the end of the template.
-
- .. versionadded:: 2.7
-
- `extensions`
- List of Jinja extensions to use. This can either be import paths
- as strings or extension classes. For more information have a
- look at :ref:`the extensions documentation `.
-
- `optimized`
- should the optimizer be enabled? Default is ``True``.
-
- `undefined`
- :class:`Undefined` or a subclass of it that is used to represent
- undefined values in the template.
-
- `finalize`
- A callable that can be used to process the result of a variable
- expression before it is output. For example one can convert
- ``None`` implicitly into an empty string here.
-
- `autoescape`
- If set to ``True`` the XML/HTML autoescaping feature is enabled by
- default. For more details about autoescaping see
- :class:`~jinja2.utils.Markup`. As of Jinja 2.4 this can also
- be a callable that is passed the template name and has to
- return ``True`` or ``False`` depending on autoescape should be
- enabled by default.
-
- .. versionchanged:: 2.4
- `autoescape` can now be a function
-
- `loader`
- The template loader for this environment.
-
- `cache_size`
- The size of the cache. Per default this is ``400`` which means
- that if more than 400 templates are loaded the loader will clean
- out the least recently used template. If the cache size is set to
- ``0`` templates are recompiled all the time, if the cache size is
- ``-1`` the cache will not be cleaned.
-
- .. versionchanged:: 2.8
- The cache size was increased to 400 from a low 50.
-
- `auto_reload`
- Some loaders load templates from locations where the template
- sources may change (ie: file system or database). If
- ``auto_reload`` is set to ``True`` (default) every time a template is
- requested the loader checks if the source changed and if yes, it
- will reload the template. For higher performance it's possible to
- disable that.
-
- `bytecode_cache`
- If set to a bytecode cache object, this object will provide a
- cache for the internal Jinja bytecode so that templates don't
- have to be parsed if they were not changed.
-
- See :ref:`bytecode-cache` for more information.
-
- `enable_async`
- If set to true this enables async template execution which allows
- you to take advantage of newer Python features. This requires
- Python 3.6 or later.
- """
-
- #: if this environment is sandboxed. Modifying this variable won't make
- #: the environment sandboxed though. For a real sandboxed environment
- #: have a look at jinja2.sandbox. This flag alone controls the code
- #: generation by the compiler.
- sandboxed = False
-
- #: True if the environment is just an overlay
- overlayed = False
-
- #: the environment this environment is linked to if it is an overlay
- linked_to = None
-
- #: shared environments have this set to `True`. A shared environment
- #: must not be modified
- shared = False
-
- #: these are currently EXPERIMENTAL undocumented features.
- exception_handler = None
- exception_formatter = None
-
- #: the class that is used for code generation. See
- #: :class:`~jinja2.compiler.CodeGenerator` for more information.
- code_generator_class = CodeGenerator
-
- #: the context class thatis used for templates. See
- #: :class:`~jinja2.runtime.Context` for more information.
- context_class = Context
-
- def __init__(self,
- block_start_string=BLOCK_START_STRING,
- block_end_string=BLOCK_END_STRING,
- variable_start_string=VARIABLE_START_STRING,
- variable_end_string=VARIABLE_END_STRING,
- comment_start_string=COMMENT_START_STRING,
- comment_end_string=COMMENT_END_STRING,
- line_statement_prefix=LINE_STATEMENT_PREFIX,
- line_comment_prefix=LINE_COMMENT_PREFIX,
- trim_blocks=TRIM_BLOCKS,
- lstrip_blocks=LSTRIP_BLOCKS,
- newline_sequence=NEWLINE_SEQUENCE,
- keep_trailing_newline=KEEP_TRAILING_NEWLINE,
- extensions=(),
- optimized=True,
- undefined=Undefined,
- finalize=None,
- autoescape=False,
- loader=None,
- cache_size=400,
- auto_reload=True,
- bytecode_cache=None,
- enable_async=False):
- # !!Important notice!!
- # The constructor accepts quite a few arguments that should be
- # passed by keyword rather than position. However it's important to
- # not change the order of arguments because it's used at least
- # internally in those cases:
- # - spontaneous environments (i18n extension and Template)
- # - unittests
- # If parameter changes are required only add parameters at the end
- # and don't change the arguments (or the defaults!) of the arguments
- # existing already.
-
- # lexer / parser information
- self.block_start_string = block_start_string
- self.block_end_string = block_end_string
- self.variable_start_string = variable_start_string
- self.variable_end_string = variable_end_string
- self.comment_start_string = comment_start_string
- self.comment_end_string = comment_end_string
- self.line_statement_prefix = line_statement_prefix
- self.line_comment_prefix = line_comment_prefix
- self.trim_blocks = trim_blocks
- self.lstrip_blocks = lstrip_blocks
- self.newline_sequence = newline_sequence
- self.keep_trailing_newline = keep_trailing_newline
-
- # runtime information
- self.undefined = undefined
- self.optimized = optimized
- self.finalize = finalize
- self.autoescape = autoescape
-
- # defaults
- self.filters = DEFAULT_FILTERS.copy()
- self.tests = DEFAULT_TESTS.copy()
- self.globals = DEFAULT_NAMESPACE.copy()
-
- # set the loader provided
- self.loader = loader
- self.cache = create_cache(cache_size)
- self.bytecode_cache = bytecode_cache
- self.auto_reload = auto_reload
-
- # configurable policies
- self.policies = DEFAULT_POLICIES.copy()
-
- # load extensions
- self.extensions = load_extensions(self, extensions)
-
- self.enable_async = enable_async
- self.is_async = self.enable_async and have_async_gen
-
- _environment_sanity_check(self)
-
- def add_extension(self, extension):
- """Adds an extension after the environment was created.
-
- .. versionadded:: 2.5
- """
- self.extensions.update(load_extensions(self, [extension]))
-
- def extend(self, **attributes):
- """Add the items to the instance of the environment if they do not exist
- yet. This is used by :ref:`extensions ` to register
- callbacks and configuration values without breaking inheritance.
- """
- for key, value in iteritems(attributes):
- if not hasattr(self, key):
- setattr(self, key, value)
-
- def overlay(self, block_start_string=missing, block_end_string=missing,
- variable_start_string=missing, variable_end_string=missing,
- comment_start_string=missing, comment_end_string=missing,
- line_statement_prefix=missing, line_comment_prefix=missing,
- trim_blocks=missing, lstrip_blocks=missing,
- extensions=missing, optimized=missing,
- undefined=missing, finalize=missing, autoescape=missing,
- loader=missing, cache_size=missing, auto_reload=missing,
- bytecode_cache=missing):
- """Create a new overlay environment that shares all the data with the
- current environment except for cache and the overridden attributes.
- Extensions cannot be removed for an overlayed environment. An overlayed
- environment automatically gets all the extensions of the environment it
- is linked to plus optional extra extensions.
-
- Creating overlays should happen after the initial environment was set
- up completely. Not all attributes are truly linked, some are just
- copied over so modifications on the original environment may not shine
- through.
- """
- args = dict(locals())
- del args['self'], args['cache_size'], args['extensions']
-
- rv = object.__new__(self.__class__)
- rv.__dict__.update(self.__dict__)
- rv.overlayed = True
- rv.linked_to = self
-
- for key, value in iteritems(args):
- if value is not missing:
- setattr(rv, key, value)
-
- if cache_size is not missing:
- rv.cache = create_cache(cache_size)
- else:
- rv.cache = copy_cache(self.cache)
-
- rv.extensions = {}
- for key, value in iteritems(self.extensions):
- rv.extensions[key] = value.bind(rv)
- if extensions is not missing:
- rv.extensions.update(load_extensions(rv, extensions))
-
- return _environment_sanity_check(rv)
-
- lexer = property(get_lexer, doc="The lexer for this environment.")
-
- def iter_extensions(self):
- """Iterates over the extensions by priority."""
- return iter(sorted(self.extensions.values(),
- key=lambda x: x.priority))
-
- def getitem(self, obj, argument):
- """Get an item or attribute of an object but prefer the item."""
- try:
- return obj[argument]
- except (AttributeError, TypeError, LookupError):
- if isinstance(argument, string_types):
- try:
- attr = str(argument)
- except Exception:
- pass
- else:
- try:
- return getattr(obj, attr)
- except AttributeError:
- pass
- return self.undefined(obj=obj, name=argument)
-
- def getattr(self, obj, attribute):
- """Get an item or attribute of an object but prefer the attribute.
- Unlike :meth:`getitem` the attribute *must* be a bytestring.
- """
- try:
- return getattr(obj, attribute)
- except AttributeError:
- pass
- try:
- return obj[attribute]
- except (TypeError, LookupError, AttributeError):
- return self.undefined(obj=obj, name=attribute)
-
- def call_filter(self, name, value, args=None, kwargs=None,
- context=None, eval_ctx=None):
- """Invokes a filter on a value the same way the compiler does it.
-
- Note that on Python 3 this might return a coroutine in case the
- filter is running from an environment in async mode and the filter
- supports async execution. It's your responsibility to await this
- if needed.
-
- .. versionadded:: 2.7
- """
- func = self.filters.get(name)
- if func is None:
- fail_for_missing_callable('no filter named %r', name)
- args = [value] + list(args or ())
- if getattr(func, 'contextfilter', False):
- if context is None:
- raise TemplateRuntimeError('Attempted to invoke context '
- 'filter without context')
- args.insert(0, context)
- elif getattr(func, 'evalcontextfilter', False):
- if eval_ctx is None:
- if context is not None:
- eval_ctx = context.eval_ctx
- else:
- eval_ctx = EvalContext(self)
- args.insert(0, eval_ctx)
- elif getattr(func, 'environmentfilter', False):
- args.insert(0, self)
- return func(*args, **(kwargs or {}))
-
- def call_test(self, name, value, args=None, kwargs=None):
- """Invokes a test on a value the same way the compiler does it.
-
- .. versionadded:: 2.7
- """
- func = self.tests.get(name)
- if func is None:
- fail_for_missing_callable('no test named %r', name)
- return func(value, *(args or ()), **(kwargs or {}))
-
- @internalcode
- def parse(self, source, name=None, filename=None):
- """Parse the sourcecode and return the abstract syntax tree. This
- tree of nodes is used by the compiler to convert the template into
- executable source- or bytecode. This is useful for debugging or to
- extract information from templates.
-
- If you are :ref:`developing Jinja2 extensions `
- this gives you a good overview of the node tree generated.
- """
- try:
- return self._parse(source, name, filename)
- except TemplateSyntaxError:
- exc_info = sys.exc_info()
- self.handle_exception(exc_info, source_hint=source)
-
- def _parse(self, source, name, filename):
- """Internal parsing function used by `parse` and `compile`."""
- return Parser(self, source, name, encode_filename(filename)).parse()
-
- def lex(self, source, name=None, filename=None):
- """Lex the given sourcecode and return a generator that yields
- tokens as tuples in the form ``(lineno, token_type, value)``.
- This can be useful for :ref:`extension development `
- and debugging templates.
-
- This does not perform preprocessing. If you want the preprocessing
- of the extensions to be applied you have to filter source through
- the :meth:`preprocess` method.
- """
- source = text_type(source)
- try:
- return self.lexer.tokeniter(source, name, filename)
- except TemplateSyntaxError:
- exc_info = sys.exc_info()
- self.handle_exception(exc_info, source_hint=source)
-
- def preprocess(self, source, name=None, filename=None):
- """Preprocesses the source with all extensions. This is automatically
- called for all parsing and compiling methods but *not* for :meth:`lex`
- because there you usually only want the actual source tokenized.
- """
- return reduce(lambda s, e: e.preprocess(s, name, filename),
- self.iter_extensions(), text_type(source))
-
- def _tokenize(self, source, name, filename=None, state=None):
- """Called by the parser to do the preprocessing and filtering
- for all the extensions. Returns a :class:`~jinja2.lexer.TokenStream`.
- """
- source = self.preprocess(source, name, filename)
- stream = self.lexer.tokenize(source, name, filename, state)
- for ext in self.iter_extensions():
- stream = ext.filter_stream(stream)
- if not isinstance(stream, TokenStream):
- stream = TokenStream(stream, name, filename)
- return stream
-
- def _generate(self, source, name, filename, defer_init=False):
- """Internal hook that can be overridden to hook a different generate
- method in.
-
- .. versionadded:: 2.5
- """
- return generate(source, self, name, filename, defer_init=defer_init,
- optimized=self.optimized)
-
- def _compile(self, source, filename):
- """Internal hook that can be overridden to hook a different compile
- method in.
-
- .. versionadded:: 2.5
- """
- return compile(source, filename, 'exec')
-
- @internalcode
- def compile(self, source, name=None, filename=None, raw=False,
- defer_init=False):
- """Compile a node or template source code. The `name` parameter is
- the load name of the template after it was joined using
- :meth:`join_path` if necessary, not the filename on the file system.
- the `filename` parameter is the estimated filename of the template on
- the file system. If the template came from a database or memory this
- can be omitted.
-
- The return value of this method is a python code object. If the `raw`
- parameter is `True` the return value will be a string with python
- code equivalent to the bytecode returned otherwise. This method is
- mainly used internally.
-
- `defer_init` is use internally to aid the module code generator. This
- causes the generated code to be able to import without the global
- environment variable to be set.
-
- .. versionadded:: 2.4
- `defer_init` parameter added.
- """
- source_hint = None
- try:
- if isinstance(source, string_types):
- source_hint = source
- source = self._parse(source, name, filename)
- source = self._generate(source, name, filename,
- defer_init=defer_init)
- if raw:
- return source
- if filename is None:
- filename = ''
- else:
- filename = encode_filename(filename)
- return self._compile(source, filename)
- except TemplateSyntaxError:
- exc_info = sys.exc_info()
- self.handle_exception(exc_info, source_hint=source_hint)
-
- def compile_expression(self, source, undefined_to_none=True):
- """A handy helper method that returns a callable that accepts keyword
- arguments that appear as variables in the expression. If called it
- returns the result of the expression.
-
- This is useful if applications want to use the same rules as Jinja
- in template "configuration files" or similar situations.
-
- Example usage:
-
- >>> env = Environment()
- >>> expr = env.compile_expression('foo == 42')
- >>> expr(foo=23)
- False
- >>> expr(foo=42)
- True
-
- Per default the return value is converted to `None` if the
- expression returns an undefined value. This can be changed
- by setting `undefined_to_none` to `False`.
-
- >>> env.compile_expression('var')() is None
- True
- >>> env.compile_expression('var', undefined_to_none=False)()
- Undefined
-
- .. versionadded:: 2.1
- """
- parser = Parser(self, source, state='variable')
- exc_info = None
- try:
- expr = parser.parse_expression()
- if not parser.stream.eos:
- raise TemplateSyntaxError('chunk after expression',
- parser.stream.current.lineno,
- None, None)
- expr.set_environment(self)
- except TemplateSyntaxError:
- exc_info = sys.exc_info()
- if exc_info is not None:
- self.handle_exception(exc_info, source_hint=source)
- body = [nodes.Assign(nodes.Name('result', 'store'), expr, lineno=1)]
- template = self.from_string(nodes.Template(body, lineno=1))
- return TemplateExpression(template, undefined_to_none)
-
- def compile_templates(self, target, extensions=None, filter_func=None,
- zip='deflated', log_function=None,
- ignore_errors=True, py_compile=False):
- """Finds all the templates the loader can find, compiles them
- and stores them in `target`. If `zip` is `None`, instead of in a
- zipfile, the templates will be stored in a directory.
- By default a deflate zip algorithm is used. To switch to
- the stored algorithm, `zip` can be set to ``'stored'``.
-
- `extensions` and `filter_func` are passed to :meth:`list_templates`.
- Each template returned will be compiled to the target folder or
- zipfile.
-
- By default template compilation errors are ignored. In case a
- log function is provided, errors are logged. If you want template
- syntax errors to abort the compilation you can set `ignore_errors`
- to `False` and you will get an exception on syntax errors.
-
- If `py_compile` is set to `True` .pyc files will be written to the
- target instead of standard .py files. This flag does not do anything
- on pypy and Python 3 where pyc files are not picked up by itself and
- don't give much benefit.
-
- .. versionadded:: 2.4
- """
- from jinja2.loaders import ModuleLoader
-
- if log_function is None:
- log_function = lambda x: None
-
- if py_compile:
- if not PY2 or PYPY:
- from warnings import warn
- warn(Warning('py_compile has no effect on pypy or Python 3'))
- py_compile = False
- else:
- import imp
- import marshal
- py_header = imp.get_magic() + \
- u'\xff\xff\xff\xff'.encode('iso-8859-15')
-
- # Python 3.3 added a source filesize to the header
- if sys.version_info >= (3, 3):
- py_header += u'\x00\x00\x00\x00'.encode('iso-8859-15')
-
- def write_file(filename, data, mode):
- if zip:
- info = ZipInfo(filename)
- info.external_attr = 0o755 << 16
- zip_file.writestr(info, data)
- else:
- f = open(os.path.join(target, filename), mode)
- try:
- f.write(data)
- finally:
- f.close()
-
- if zip is not None:
- from zipfile import ZipFile, ZipInfo, ZIP_DEFLATED, ZIP_STORED
- zip_file = ZipFile(target, 'w', dict(deflated=ZIP_DEFLATED,
- stored=ZIP_STORED)[zip])
- log_function('Compiling into Zip archive "%s"' % target)
- else:
- if not os.path.isdir(target):
- os.makedirs(target)
- log_function('Compiling into folder "%s"' % target)
-
- try:
- for name in self.list_templates(extensions, filter_func):
- source, filename, _ = self.loader.get_source(self, name)
- try:
- code = self.compile(source, name, filename, True, True)
- except TemplateSyntaxError as e:
- if not ignore_errors:
- raise
- log_function('Could not compile "%s": %s' % (name, e))
- continue
-
- filename = ModuleLoader.get_module_filename(name)
-
- if py_compile:
- c = self._compile(code, encode_filename(filename))
- write_file(filename + 'c', py_header +
- marshal.dumps(c), 'wb')
- log_function('Byte-compiled "%s" as %s' %
- (name, filename + 'c'))
- else:
- write_file(filename, code, 'w')
- log_function('Compiled "%s" as %s' % (name, filename))
- finally:
- if zip:
- zip_file.close()
-
- log_function('Finished compiling templates')
-
- def list_templates(self, extensions=None, filter_func=None):
- """Returns a list of templates for this environment. This requires
- that the loader supports the loader's
- :meth:`~BaseLoader.list_templates` method.
-
- If there are other files in the template folder besides the
- actual templates, the returned list can be filtered. There are two
- ways: either `extensions` is set to a list of file extensions for
- templates, or a `filter_func` can be provided which is a callable that
- is passed a template name and should return `True` if it should end up
- in the result list.
-
- If the loader does not support that, a :exc:`TypeError` is raised.
-
- .. versionadded:: 2.4
- """
- x = self.loader.list_templates()
- if extensions is not None:
- if filter_func is not None:
- raise TypeError('either extensions or filter_func '
- 'can be passed, but not both')
- filter_func = lambda x: '.' in x and \
- x.rsplit('.', 1)[1] in extensions
- if filter_func is not None:
- x = list(ifilter(filter_func, x))
- return x
-
- def handle_exception(self, exc_info=None, rendered=False, source_hint=None):
- """Exception handling helper. This is used internally to either raise
- rewritten exceptions or return a rendered traceback for the template.
- """
- global _make_traceback
- if exc_info is None:
- exc_info = sys.exc_info()
-
- # the debugging module is imported when it's used for the first time.
- # we're doing a lot of stuff there and for applications that do not
- # get any exceptions in template rendering there is no need to load
- # all of that.
- if _make_traceback is None:
- from jinja2.debug import make_traceback as _make_traceback
- traceback = _make_traceback(exc_info, source_hint)
- if rendered and self.exception_formatter is not None:
- return self.exception_formatter(traceback)
- if self.exception_handler is not None:
- self.exception_handler(traceback)
- exc_type, exc_value, tb = traceback.standard_exc_info
- reraise(exc_type, exc_value, tb)
-
- def join_path(self, template, parent):
- """Join a template with the parent. By default all the lookups are
- relative to the loader root so this method returns the `template`
- parameter unchanged, but if the paths should be relative to the
- parent template, this function can be used to calculate the real
- template name.
-
- Subclasses may override this method and implement template path
- joining here.
- """
- return template
-
- @internalcode
- def _load_template(self, name, globals):
- if self.loader is None:
- raise TypeError('no loader for this environment specified')
- cache_key = (weakref.ref(self.loader), name)
- if self.cache is not None:
- template = self.cache.get(cache_key)
- if template is not None and (not self.auto_reload or
- template.is_up_to_date):
- return template
- template = self.loader.load(self, name, globals)
- if self.cache is not None:
- self.cache[cache_key] = template
- return template
-
- @internalcode
- def get_template(self, name, parent=None, globals=None):
- """Load a template from the loader. If a loader is configured this
- method asks the loader for the template and returns a :class:`Template`.
- If the `parent` parameter is not `None`, :meth:`join_path` is called
- to get the real template name before loading.
-
- The `globals` parameter can be used to provide template wide globals.
- These variables are available in the context at render time.
-
- If the template does not exist a :exc:`TemplateNotFound` exception is
- raised.
-
- .. versionchanged:: 2.4
- If `name` is a :class:`Template` object it is returned from the
- function unchanged.
- """
- if isinstance(name, Template):
- return name
- if parent is not None:
- name = self.join_path(name, parent)
- return self._load_template(name, self.make_globals(globals))
-
- @internalcode
- def select_template(self, names, parent=None, globals=None):
- """Works like :meth:`get_template` but tries a number of templates
- before it fails. If it cannot find any of the templates, it will
- raise a :exc:`TemplatesNotFound` exception.
-
- .. versionadded:: 2.3
-
- .. versionchanged:: 2.4
- If `names` contains a :class:`Template` object it is returned
- from the function unchanged.
- """
- if not names:
- raise TemplatesNotFound(message=u'Tried to select from an empty list '
- u'of templates.')
- globals = self.make_globals(globals)
- for name in names:
- if isinstance(name, Template):
- return name
- if parent is not None:
- name = self.join_path(name, parent)
- try:
- return self._load_template(name, globals)
- except TemplateNotFound:
- pass
- raise TemplatesNotFound(names)
-
- @internalcode
- def get_or_select_template(self, template_name_or_list,
- parent=None, globals=None):
- """Does a typecheck and dispatches to :meth:`select_template`
- if an iterable of template names is given, otherwise to
- :meth:`get_template`.
-
- .. versionadded:: 2.3
- """
- if isinstance(template_name_or_list, string_types):
- return self.get_template(template_name_or_list, parent, globals)
- elif isinstance(template_name_or_list, Template):
- return template_name_or_list
- return self.select_template(template_name_or_list, parent, globals)
-
- def from_string(self, source, globals=None, template_class=None):
- """Load a template from a string. This parses the source given and
- returns a :class:`Template` object.
- """
- globals = self.make_globals(globals)
- cls = template_class or self.template_class
- return cls.from_code(self, self.compile(source), globals, None)
-
- def make_globals(self, d):
- """Return a dict for the globals."""
- if not d:
- return self.globals
- return dict(self.globals, **d)
-
-
-class Template(object):
- """The central template object. This class represents a compiled template
- and is used to evaluate it.
-
- Normally the template object is generated from an :class:`Environment` but
- it also has a constructor that makes it possible to create a template
- instance directly using the constructor. It takes the same arguments as
- the environment constructor but it's not possible to specify a loader.
-
- Every template object has a few methods and members that are guaranteed
- to exist. However it's important that a template object should be
- considered immutable. Modifications on the object are not supported.
-
- Template objects created from the constructor rather than an environment
- do have an `environment` attribute that points to a temporary environment
- that is probably shared with other templates created with the constructor
- and compatible settings.
-
- >>> template = Template('Hello {{ name }}!')
- >>> template.render(name='John Doe') == u'Hello John Doe!'
- True
- >>> stream = template.stream(name='John Doe')
- >>> next(stream) == u'Hello John Doe!'
- True
- >>> next(stream)
- Traceback (most recent call last):
- ...
- StopIteration
- """
-
- def __new__(cls, source,
- block_start_string=BLOCK_START_STRING,
- block_end_string=BLOCK_END_STRING,
- variable_start_string=VARIABLE_START_STRING,
- variable_end_string=VARIABLE_END_STRING,
- comment_start_string=COMMENT_START_STRING,
- comment_end_string=COMMENT_END_STRING,
- line_statement_prefix=LINE_STATEMENT_PREFIX,
- line_comment_prefix=LINE_COMMENT_PREFIX,
- trim_blocks=TRIM_BLOCKS,
- lstrip_blocks=LSTRIP_BLOCKS,
- newline_sequence=NEWLINE_SEQUENCE,
- keep_trailing_newline=KEEP_TRAILING_NEWLINE,
- extensions=(),
- optimized=True,
- undefined=Undefined,
- finalize=None,
- autoescape=False,
- enable_async=False):
- env = get_spontaneous_environment(
- block_start_string, block_end_string, variable_start_string,
- variable_end_string, comment_start_string, comment_end_string,
- line_statement_prefix, line_comment_prefix, trim_blocks,
- lstrip_blocks, newline_sequence, keep_trailing_newline,
- frozenset(extensions), optimized, undefined, finalize, autoescape,
- None, 0, False, None, enable_async)
- return env.from_string(source, template_class=cls)
-
- @classmethod
- def from_code(cls, environment, code, globals, uptodate=None):
- """Creates a template object from compiled code and the globals. This
- is used by the loaders and environment to create a template object.
- """
- namespace = {
- 'environment': environment,
- '__file__': code.co_filename
- }
- exec(code, namespace)
- rv = cls._from_namespace(environment, namespace, globals)
- rv._uptodate = uptodate
- return rv
-
- @classmethod
- def from_module_dict(cls, environment, module_dict, globals):
- """Creates a template object from a module. This is used by the
- module loader to create a template object.
-
- .. versionadded:: 2.4
- """
- return cls._from_namespace(environment, module_dict, globals)
-
- @classmethod
- def _from_namespace(cls, environment, namespace, globals):
- t = object.__new__(cls)
- t.environment = environment
- t.globals = globals
- t.name = namespace['name']
- t.filename = namespace['__file__']
- t.blocks = namespace['blocks']
-
- # render function and module
- t.root_render_func = namespace['root']
- t._module = None
-
- # debug and loader helpers
- t._debug_info = namespace['debug_info']
- t._uptodate = None
-
- # store the reference
- namespace['environment'] = environment
- namespace['__jinja_template__'] = t
-
- return t
-
- def render(self, *args, **kwargs):
- """This method accepts the same arguments as the `dict` constructor:
- A dict, a dict subclass or some keyword arguments. If no arguments
- are given the context will be empty. These two calls do the same::
-
- template.render(knights='that say nih')
- template.render({'knights': 'that say nih'})
-
- This will return the rendered template as unicode string.
- """
- vars = dict(*args, **kwargs)
- try:
- return concat(self.root_render_func(self.new_context(vars)))
- except Exception:
- exc_info = sys.exc_info()
- return self.environment.handle_exception(exc_info, True)
-
- def render_async(self, *args, **kwargs):
- """This works similar to :meth:`render` but returns a coroutine
- that when awaited returns the entire rendered template string. This
- requires the async feature to be enabled.
-
- Example usage::
-
- await template.render_async(knights='that say nih; asynchronously')
- """
- # see asyncsupport for the actual implementation
- raise NotImplementedError('This feature is not available for this '
- 'version of Python')
-
- def stream(self, *args, **kwargs):
- """Works exactly like :meth:`generate` but returns a
- :class:`TemplateStream`.
- """
- return TemplateStream(self.generate(*args, **kwargs))
-
- def generate(self, *args, **kwargs):
- """For very large templates it can be useful to not render the whole
- template at once but evaluate each statement after another and yield
- piece for piece. This method basically does exactly that and returns
- a generator that yields one item after another as unicode strings.
-
- It accepts the same arguments as :meth:`render`.
- """
- vars = dict(*args, **kwargs)
- try:
- for event in self.root_render_func(self.new_context(vars)):
- yield event
- except Exception:
- exc_info = sys.exc_info()
- else:
- return
- yield self.environment.handle_exception(exc_info, True)
-
- def generate_async(self, *args, **kwargs):
- """An async version of :meth:`generate`. Works very similarly but
- returns an async iterator instead.
- """
- # see asyncsupport for the actual implementation
- raise NotImplementedError('This feature is not available for this '
- 'version of Python')
-
- def new_context(self, vars=None, shared=False, locals=None):
- """Create a new :class:`Context` for this template. The vars
- provided will be passed to the template. Per default the globals
- are added to the context. If shared is set to `True` the data
- is passed as it to the context without adding the globals.
-
- `locals` can be a dict of local variables for internal usage.
- """
- return new_context(self.environment, self.name, self.blocks,
- vars, shared, self.globals, locals)
-
- def make_module(self, vars=None, shared=False, locals=None):
- """This method works like the :attr:`module` attribute when called
- without arguments but it will evaluate the template on every call
- rather than caching it. It's also possible to provide
- a dict which is then used as context. The arguments are the same
- as for the :meth:`new_context` method.
- """
- return TemplateModule(self, self.new_context(vars, shared, locals))
-
- def make_module_async(self, vars=None, shared=False, locals=None):
- """As template module creation can invoke template code for
- asynchronous exections this method must be used instead of the
- normal :meth:`make_module` one. Likewise the module attribute
- becomes unavailable in async mode.
- """
- # see asyncsupport for the actual implementation
- raise NotImplementedError('This feature is not available for this '
- 'version of Python')
-
- @internalcode
- def _get_default_module(self):
- if self._module is not None:
- return self._module
- self._module = rv = self.make_module()
- return rv
-
- @property
- def module(self):
- """The template as module. This is used for imports in the
- template runtime but is also useful if one wants to access
- exported template variables from the Python layer:
-
- >>> t = Template('{% macro foo() %}42{% endmacro %}23')
- >>> str(t.module)
- '23'
- >>> t.module.foo() == u'42'
- True
-
- This attribute is not available if async mode is enabled.
- """
- return self._get_default_module()
-
- def get_corresponding_lineno(self, lineno):
- """Return the source line number of a line number in the
- generated bytecode as they are not in sync.
- """
- for template_line, code_line in reversed(self.debug_info):
- if code_line <= lineno:
- return template_line
- return 1
-
- @property
- def is_up_to_date(self):
- """If this variable is `False` there is a newer version available."""
- if self._uptodate is None:
- return True
- return self._uptodate()
-
- @property
- def debug_info(self):
- """The debug info mapping."""
- return [tuple(imap(int, x.split('='))) for x in
- self._debug_info.split('&')]
-
- def __repr__(self):
- if self.name is None:
- name = 'memory:%x' % id(self)
- else:
- name = repr(self.name)
- return '<%s %s>' % (self.__class__.__name__, name)
-
-
-@implements_to_string
-class TemplateModule(object):
- """Represents an imported template. All the exported names of the
- template are available as attributes on this object. Additionally
- converting it into an unicode- or bytestrings renders the contents.
- """
-
- def __init__(self, template, context, body_stream=None):
- if body_stream is None:
- if context.environment.is_async:
- raise RuntimeError('Async mode requires a body stream '
- 'to be passed to a template module. Use '
- 'the async methods of the API you are '
- 'using.')
- body_stream = list(template.root_render_func(context))
- self._body_stream = body_stream
- self.__dict__.update(context.get_exported())
- self.__name__ = template.name
-
- def __html__(self):
- return Markup(concat(self._body_stream))
-
- def __str__(self):
- return concat(self._body_stream)
-
- def __repr__(self):
- if self.__name__ is None:
- name = 'memory:%x' % id(self)
- else:
- name = repr(self.__name__)
- return '<%s %s>' % (self.__class__.__name__, name)
-
-
-class TemplateExpression(object):
- """The :meth:`jinja2.Environment.compile_expression` method returns an
- instance of this object. It encapsulates the expression-like access
- to the template with an expression it wraps.
- """
-
- def __init__(self, template, undefined_to_none):
- self._template = template
- self._undefined_to_none = undefined_to_none
-
- def __call__(self, *args, **kwargs):
- context = self._template.new_context(dict(*args, **kwargs))
- consume(self._template.root_render_func(context))
- rv = context.vars['result']
- if self._undefined_to_none and isinstance(rv, Undefined):
- rv = None
- return rv
-
-
-@implements_iterator
-class TemplateStream(object):
- """A template stream works pretty much like an ordinary python generator
- but it can buffer multiple items to reduce the number of total iterations.
- Per default the output is unbuffered which means that for every unbuffered
- instruction in the template one unicode string is yielded.
-
- If buffering is enabled with a buffer size of 5, five items are combined
- into a new unicode string. This is mainly useful if you are streaming
- big templates to a client via WSGI which flushes after each iteration.
- """
-
- def __init__(self, gen):
- self._gen = gen
- self.disable_buffering()
-
- def dump(self, fp, encoding=None, errors='strict'):
- """Dump the complete stream into a file or file-like object.
- Per default unicode strings are written, if you want to encode
- before writing specify an `encoding`.
-
- Example usage::
-
- Template('Hello {{ name }}!').stream(name='foo').dump('hello.html')
- """
- close = False
- if isinstance(fp, string_types):
- if encoding is None:
- encoding = 'utf-8'
- fp = open(fp, 'wb')
- close = True
- try:
- if encoding is not None:
- iterable = (x.encode(encoding, errors) for x in self)
- else:
- iterable = self
- if hasattr(fp, 'writelines'):
- fp.writelines(iterable)
- else:
- for item in iterable:
- fp.write(item)
- finally:
- if close:
- fp.close()
-
- def disable_buffering(self):
- """Disable the output buffering."""
- self._next = partial(next, self._gen)
- self.buffered = False
-
- def _buffered_generator(self, size):
- buf = []
- c_size = 0
- push = buf.append
-
- while 1:
- try:
- while c_size < size:
- c = next(self._gen)
- push(c)
- if c:
- c_size += 1
- except StopIteration:
- if not c_size:
- return
- yield concat(buf)
- del buf[:]
- c_size = 0
-
- def enable_buffering(self, size=5):
- """Enable buffering. Buffer `size` items before yielding them."""
- if size <= 1:
- raise ValueError('buffer size too small')
-
- self.buffered = True
- self._next = partial(next, self._buffered_generator(size))
-
- def __iter__(self):
- return self
-
- def __next__(self):
- return self._next()
-
-
-# hook in default template class. if anyone reads this comment: ignore that
-# it's possible to use custom templates ;-)
-Environment.template_class = Template
diff --git a/venv/Lib/site-packages/jinja2/exceptions.py b/venv/Lib/site-packages/jinja2/exceptions.py
deleted file mode 100644
index c018a33..0000000
--- a/venv/Lib/site-packages/jinja2/exceptions.py
+++ /dev/null
@@ -1,146 +0,0 @@
-# -*- coding: utf-8 -*-
-"""
- jinja2.exceptions
- ~~~~~~~~~~~~~~~~~
-
- Jinja exceptions.
-
- :copyright: (c) 2017 by the Jinja Team.
- :license: BSD, see LICENSE for more details.
-"""
-from jinja2._compat import imap, text_type, PY2, implements_to_string
-
-
-class TemplateError(Exception):
- """Baseclass for all template errors."""
-
- if PY2:
- def __init__(self, message=None):
- if message is not None:
- message = text_type(message).encode('utf-8')
- Exception.__init__(self, message)
-
- @property
- def message(self):
- if self.args:
- message = self.args[0]
- if message is not None:
- return message.decode('utf-8', 'replace')
-
- def __unicode__(self):
- return self.message or u''
- else:
- def __init__(self, message=None):
- Exception.__init__(self, message)
-
- @property
- def message(self):
- if self.args:
- message = self.args[0]
- if message is not None:
- return message
-
-
-@implements_to_string
-class TemplateNotFound(IOError, LookupError, TemplateError):
- """Raised if a template does not exist."""
-
- # looks weird, but removes the warning descriptor that just
- # bogusly warns us about message being deprecated
- message = None
-
- def __init__(self, name, message=None):
- IOError.__init__(self)
- if message is None:
- message = name
- self.message = message
- self.name = name
- self.templates = [name]
-
- def __str__(self):
- return self.message
-
-
-class TemplatesNotFound(TemplateNotFound):
- """Like :class:`TemplateNotFound` but raised if multiple templates
- are selected. This is a subclass of :class:`TemplateNotFound`
- exception, so just catching the base exception will catch both.
-
- .. versionadded:: 2.2
- """
-
- def __init__(self, names=(), message=None):
- if message is None:
- message = u'none of the templates given were found: ' + \
- u', '.join(imap(text_type, names))
- TemplateNotFound.__init__(self, names and names[-1] or None, message)
- self.templates = list(names)
-
-
-@implements_to_string
-class TemplateSyntaxError(TemplateError):
- """Raised to tell the user that there is a problem with the template."""
-
- def __init__(self, message, lineno, name=None, filename=None):
- TemplateError.__init__(self, message)
- self.lineno = lineno
- self.name = name
- self.filename = filename
- self.source = None
-
- # this is set to True if the debug.translate_syntax_error
- # function translated the syntax error into a new traceback
- self.translated = False
-
- def __str__(self):
- # for translated errors we only return the message
- if self.translated:
- return self.message
-
- # otherwise attach some stuff
- location = 'line %d' % self.lineno
- name = self.filename or self.name
- if name:
- location = 'File "%s", %s' % (name, location)
- lines = [self.message, ' ' + location]
-
- # if the source is set, add the line to the output
- if self.source is not None:
- try:
- line = self.source.splitlines()[self.lineno - 1]
- except IndexError:
- line = None
- if line:
- lines.append(' ' + line.strip())
-
- return u'\n'.join(lines)
-
-
-class TemplateAssertionError(TemplateSyntaxError):
- """Like a template syntax error, but covers cases where something in the
- template caused an error at compile time that wasn't necessarily caused
- by a syntax error. However it's a direct subclass of
- :exc:`TemplateSyntaxError` and has the same attributes.
- """
-
-
-class TemplateRuntimeError(TemplateError):
- """A generic runtime error in the template engine. Under some situations
- Jinja may raise this exception.
- """
-
-
-class UndefinedError(TemplateRuntimeError):
- """Raised if a template tries to operate on :class:`Undefined`."""
-
-
-class SecurityError(TemplateRuntimeError):
- """Raised if a template tries to do something insecure if the
- sandbox is enabled.
- """
-
-
-class FilterArgumentError(TemplateRuntimeError):
- """This error is raised if a filter was called with inappropriate
- arguments
- """
diff --git a/venv/Lib/site-packages/jinja2/ext.py b/venv/Lib/site-packages/jinja2/ext.py
deleted file mode 100644
index 0734a84..0000000
--- a/venv/Lib/site-packages/jinja2/ext.py
+++ /dev/null
@@ -1,627 +0,0 @@
-# -*- coding: utf-8 -*-
-"""
- jinja2.ext
- ~~~~~~~~~~
-
- Jinja extensions allow to add custom tags similar to the way django custom
- tags work. By default two example extensions exist: an i18n and a cache
- extension.
-
- :copyright: (c) 2017 by the Jinja Team.
- :license: BSD.
-"""
-import re
-
-from jinja2 import nodes
-from jinja2.defaults import BLOCK_START_STRING, \
- BLOCK_END_STRING, VARIABLE_START_STRING, VARIABLE_END_STRING, \
- COMMENT_START_STRING, COMMENT_END_STRING, LINE_STATEMENT_PREFIX, \
- LINE_COMMENT_PREFIX, TRIM_BLOCKS, NEWLINE_SEQUENCE, \
- KEEP_TRAILING_NEWLINE, LSTRIP_BLOCKS
-from jinja2.environment import Environment
-from jinja2.runtime import concat
-from jinja2.exceptions import TemplateAssertionError, TemplateSyntaxError
-from jinja2.utils import contextfunction, import_string, Markup
-from jinja2._compat import with_metaclass, string_types, iteritems
-
-
-# the only real useful gettext functions for a Jinja template. Note
-# that ugettext must be assigned to gettext as Jinja doesn't support
-# non unicode strings.
-GETTEXT_FUNCTIONS = ('_', 'gettext', 'ngettext')
-
-
-class ExtensionRegistry(type):
- """Gives the extension an unique identifier."""
-
- def __new__(cls, name, bases, d):
- rv = type.__new__(cls, name, bases, d)
- rv.identifier = rv.__module__ + '.' + rv.__name__
- return rv
-
-
-class Extension(with_metaclass(ExtensionRegistry, object)):
- """Extensions can be used to add extra functionality to the Jinja template
- system at the parser level. Custom extensions are bound to an environment
- but may not store environment specific data on `self`. The reason for
- this is that an extension can be bound to another environment (for
- overlays) by creating a copy and reassigning the `environment` attribute.
-
- As extensions are created by the environment they cannot accept any
- arguments for configuration. One may want to work around that by using
- a factory function, but that is not possible as extensions are identified
- by their import name. The correct way to configure the extension is
- storing the configuration values on the environment. Because this way the
- environment ends up acting as central configuration storage the
- attributes may clash which is why extensions have to ensure that the names
- they choose for configuration are not too generic. ``prefix`` for example
- is a terrible name, ``fragment_cache_prefix`` on the other hand is a good
- name as includes the name of the extension (fragment cache).
- """
-
- #: if this extension parses this is the list of tags it's listening to.
- tags = set()
-
- #: the priority of that extension. This is especially useful for
- #: extensions that preprocess values. A lower value means higher
- #: priority.
- #:
- #: .. versionadded:: 2.4
- priority = 100
-
- def __init__(self, environment):
- self.environment = environment
-
- def bind(self, environment):
- """Create a copy of this extension bound to another environment."""
- rv = object.__new__(self.__class__)
- rv.__dict__.update(self.__dict__)
- rv.environment = environment
- return rv
-
- def preprocess(self, source, name, filename=None):
- """This method is called before the actual lexing and can be used to
- preprocess the source. The `filename` is optional. The return value
- must be the preprocessed source.
- """
- return source
-
- def filter_stream(self, stream):
- """It's passed a :class:`~jinja2.lexer.TokenStream` that can be used
- to filter tokens returned. This method has to return an iterable of
- :class:`~jinja2.lexer.Token`\\s, but it doesn't have to return a
- :class:`~jinja2.lexer.TokenStream`.
-
- In the `ext` folder of the Jinja2 source distribution there is a file
- called `inlinegettext.py` which implements a filter that utilizes this
- method.
- """
- return stream
-
- def parse(self, parser):
- """If any of the :attr:`tags` matched this method is called with the
- parser as first argument. The token the parser stream is pointing at
- is the name token that matched. This method has to return one or a
- list of multiple nodes.
- """
- raise NotImplementedError()
-
- def attr(self, name, lineno=None):
- """Return an attribute node for the current extension. This is useful
- to pass constants on extensions to generated template code.
-
- ::
-
- self.attr('_my_attribute', lineno=lineno)
- """
- return nodes.ExtensionAttribute(self.identifier, name, lineno=lineno)
-
- def call_method(self, name, args=None, kwargs=None, dyn_args=None,
- dyn_kwargs=None, lineno=None):
- """Call a method of the extension. This is a shortcut for
- :meth:`attr` + :class:`jinja2.nodes.Call`.
- """
- if args is None:
- args = []
- if kwargs is None:
- kwargs = []
- return nodes.Call(self.attr(name, lineno=lineno), args, kwargs,
- dyn_args, dyn_kwargs, lineno=lineno)
-
-
-@contextfunction
-def _gettext_alias(__context, *args, **kwargs):
- return __context.call(__context.resolve('gettext'), *args, **kwargs)
-
-
-def _make_new_gettext(func):
- @contextfunction
- def gettext(__context, __string, **variables):
- rv = __context.call(func, __string)
- if __context.eval_ctx.autoescape:
- rv = Markup(rv)
- return rv % variables
- return gettext
-
-
-def _make_new_ngettext(func):
- @contextfunction
- def ngettext(__context, __singular, __plural, __num, **variables):
- variables.setdefault('num', __num)
- rv = __context.call(func, __singular, __plural, __num)
- if __context.eval_ctx.autoescape:
- rv = Markup(rv)
- return rv % variables
- return ngettext
-
-
-class InternationalizationExtension(Extension):
- """This extension adds gettext support to Jinja2."""
- tags = set(['trans'])
-
- # TODO: the i18n extension is currently reevaluating values in a few
- # situations. Take this example:
- # {% trans count=something() %}{{ count }} foo{% pluralize
- # %}{{ count }} fooss{% endtrans %}
- # something is called twice here. One time for the gettext value and
- # the other time for the n-parameter of the ngettext function.
-
- def __init__(self, environment):
- Extension.__init__(self, environment)
- environment.globals['_'] = _gettext_alias
- environment.extend(
- install_gettext_translations=self._install,
- install_null_translations=self._install_null,
- install_gettext_callables=self._install_callables,
- uninstall_gettext_translations=self._uninstall,
- extract_translations=self._extract,
- newstyle_gettext=False
- )
-
- def _install(self, translations, newstyle=None):
- gettext = getattr(translations, 'ugettext', None)
- if gettext is None:
- gettext = translations.gettext
- ngettext = getattr(translations, 'ungettext', None)
- if ngettext is None:
- ngettext = translations.ngettext
- self._install_callables(gettext, ngettext, newstyle)
-
- def _install_null(self, newstyle=None):
- self._install_callables(
- lambda x: x,
- lambda s, p, n: (n != 1 and (p,) or (s,))[0],
- newstyle
- )
-
- def _install_callables(self, gettext, ngettext, newstyle=None):
- if newstyle is not None:
- self.environment.newstyle_gettext = newstyle
- if self.environment.newstyle_gettext:
- gettext = _make_new_gettext(gettext)
- ngettext = _make_new_ngettext(ngettext)
- self.environment.globals.update(
- gettext=gettext,
- ngettext=ngettext
- )
-
- def _uninstall(self, translations):
- for key in 'gettext', 'ngettext':
- self.environment.globals.pop(key, None)
-
- def _extract(self, source, gettext_functions=GETTEXT_FUNCTIONS):
- if isinstance(source, string_types):
- source = self.environment.parse(source)
- return extract_from_ast(source, gettext_functions)
-
- def parse(self, parser):
- """Parse a translatable tag."""
- lineno = next(parser.stream).lineno
- num_called_num = False
-
- # find all the variables referenced. Additionally a variable can be
- # defined in the body of the trans block too, but this is checked at
- # a later state.
- plural_expr = None
- plural_expr_assignment = None
- variables = {}
- trimmed = None
- while parser.stream.current.type != 'block_end':
- if variables:
- parser.stream.expect('comma')
-
- # skip colon for python compatibility
- if parser.stream.skip_if('colon'):
- break
-
- name = parser.stream.expect('name')
- if name.value in variables:
- parser.fail('translatable variable %r defined twice.' %
- name.value, name.lineno,
- exc=TemplateAssertionError)
-
- # expressions
- if parser.stream.current.type == 'assign':
- next(parser.stream)
- variables[name.value] = var = parser.parse_expression()
- elif trimmed is None and name.value in ('trimmed', 'notrimmed'):
- trimmed = name.value == 'trimmed'
- continue
- else:
- variables[name.value] = var = nodes.Name(name.value, 'load')
-
- if plural_expr is None:
- if isinstance(var, nodes.Call):
- plural_expr = nodes.Name('_trans', 'load')
- variables[name.value] = plural_expr
- plural_expr_assignment = nodes.Assign(
- nodes.Name('_trans', 'store'), var)
- else:
- plural_expr = var
- num_called_num = name.value == 'num'
-
- parser.stream.expect('block_end')
-
- plural = None
- have_plural = False
- referenced = set()
-
- # now parse until endtrans or pluralize
- singular_names, singular = self._parse_block(parser, True)
- if singular_names:
- referenced.update(singular_names)
- if plural_expr is None:
- plural_expr = nodes.Name(singular_names[0], 'load')
- num_called_num = singular_names[0] == 'num'
-
- # if we have a pluralize block, we parse that too
- if parser.stream.current.test('name:pluralize'):
- have_plural = True
- next(parser.stream)
- if parser.stream.current.type != 'block_end':
- name = parser.stream.expect('name')
- if name.value not in variables:
- parser.fail('unknown variable %r for pluralization' %
- name.value, name.lineno,
- exc=TemplateAssertionError)
- plural_expr = variables[name.value]
- num_called_num = name.value == 'num'
- parser.stream.expect('block_end')
- plural_names, plural = self._parse_block(parser, False)
- next(parser.stream)
- referenced.update(plural_names)
- else:
- next(parser.stream)
-
- # register free names as simple name expressions
- for var in referenced:
- if var not in variables:
- variables[var] = nodes.Name(var, 'load')
-
- if not have_plural:
- plural_expr = None
- elif plural_expr is None:
- parser.fail('pluralize without variables', lineno)
-
- if trimmed is None:
- trimmed = self.environment.policies['ext.i18n.trimmed']
- if trimmed:
- singular = self._trim_whitespace(singular)
- if plural:
- plural = self._trim_whitespace(plural)
-
- node = self._make_node(singular, plural, variables, plural_expr,
- bool(referenced),
- num_called_num and have_plural)
- node.set_lineno(lineno)
- if plural_expr_assignment is not None:
- return [plural_expr_assignment, node]
- else:
- return node
-
- def _trim_whitespace(self, string, _ws_re=re.compile(r'\s*\n\s*')):
- return _ws_re.sub(' ', string.strip())
-
- def _parse_block(self, parser, allow_pluralize):
- """Parse until the next block tag with a given name."""
- referenced = []
- buf = []
- while 1:
- if parser.stream.current.type == 'data':
- buf.append(parser.stream.current.value.replace('%', '%%'))
- next(parser.stream)
- elif parser.stream.current.type == 'variable_begin':
- next(parser.stream)
- name = parser.stream.expect('name').value
- referenced.append(name)
- buf.append('%%(%s)s' % name)
- parser.stream.expect('variable_end')
- elif parser.stream.current.type == 'block_begin':
- next(parser.stream)
- if parser.stream.current.test('name:endtrans'):
- break
- elif parser.stream.current.test('name:pluralize'):
- if allow_pluralize:
- break
- parser.fail('a translatable section can have only one '
- 'pluralize section')
- parser.fail('control structures in translatable sections are '
- 'not allowed')
- elif parser.stream.eos:
- parser.fail('unclosed translation block')
- else:
- assert False, 'internal parser error'
-
- return referenced, concat(buf)
-
- def _make_node(self, singular, plural, variables, plural_expr,
- vars_referenced, num_called_num):
- """Generates a useful node from the data provided."""
- # no variables referenced? no need to escape for old style
- # gettext invocations only if there are vars.
- if not vars_referenced and not self.environment.newstyle_gettext:
- singular = singular.replace('%%', '%')
- if plural:
- plural = plural.replace('%%', '%')
-
- # singular only:
- if plural_expr is None:
- gettext = nodes.Name('gettext', 'load')
- node = nodes.Call(gettext, [nodes.Const(singular)],
- [], None, None)
-
- # singular and plural
- else:
- ngettext = nodes.Name('ngettext', 'load')
- node = nodes.Call(ngettext, [
- nodes.Const(singular),
- nodes.Const(plural),
- plural_expr
- ], [], None, None)
-
- # in case newstyle gettext is used, the method is powerful
- # enough to handle the variable expansion and autoescape
- # handling itself
- if self.environment.newstyle_gettext:
- for key, value in iteritems(variables):
- # the function adds that later anyways in case num was
- # called num, so just skip it.
- if num_called_num and key == 'num':
- continue
- node.kwargs.append(nodes.Keyword(key, value))
-
- # otherwise do that here
- else:
- # mark the return value as safe if we are in an
- # environment with autoescaping turned on
- node = nodes.MarkSafeIfAutoescape(node)
- if variables:
- node = nodes.Mod(node, nodes.Dict([
- nodes.Pair(nodes.Const(key), value)
- for key, value in variables.items()
- ]))
- return nodes.Output([node])
-
-
-class ExprStmtExtension(Extension):
- """Adds a `do` tag to Jinja2 that works like the print statement just
- that it doesn't print the return value.
- """
- tags = set(['do'])
-
- def parse(self, parser):
- node = nodes.ExprStmt(lineno=next(parser.stream).lineno)
- node.node = parser.parse_tuple()
- return node
-
-
-class LoopControlExtension(Extension):
- """Adds break and continue to the template engine."""
- tags = set(['break', 'continue'])
-
- def parse(self, parser):
- token = next(parser.stream)
- if token.value == 'break':
- return nodes.Break(lineno=token.lineno)
- return nodes.Continue(lineno=token.lineno)
-
-
-class WithExtension(Extension):
- pass
-
-
-class AutoEscapeExtension(Extension):
- pass
-
-
-def extract_from_ast(node, gettext_functions=GETTEXT_FUNCTIONS,
- babel_style=True):
- """Extract localizable strings from the given template node. Per
- default this function returns matches in babel style that means non string
- parameters as well as keyword arguments are returned as `None`. This
- allows Babel to figure out what you really meant if you are using
- gettext functions that allow keyword arguments for placeholder expansion.
- If you don't want that behavior set the `babel_style` parameter to `False`
- which causes only strings to be returned and parameters are always stored
- in tuples. As a consequence invalid gettext calls (calls without a single
- string parameter or string parameters after non-string parameters) are
- skipped.
-
- This example explains the behavior:
-
- >>> from jinja2 import Environment
- >>> env = Environment()
- >>> node = env.parse('{{ (_("foo"), _(), ngettext("foo", "bar", 42)) }}')
- >>> list(extract_from_ast(node))
- [(1, '_', 'foo'), (1, '_', ()), (1, 'ngettext', ('foo', 'bar', None))]
- >>> list(extract_from_ast(node, babel_style=False))
- [(1, '_', ('foo',)), (1, 'ngettext', ('foo', 'bar'))]
-
- For every string found this function yields a ``(lineno, function,
- message)`` tuple, where:
-
- * ``lineno`` is the number of the line on which the string was found,
- * ``function`` is the name of the ``gettext`` function used (if the
- string was extracted from embedded Python code), and
- * ``message`` is the string itself (a ``unicode`` object, or a tuple
- of ``unicode`` objects for functions with multiple string arguments).
-
- This extraction function operates on the AST and is because of that unable
- to extract any comments. For comment support you have to use the babel
- extraction interface or extract comments yourself.
- """
- for node in node.find_all(nodes.Call):
- if not isinstance(node.node, nodes.Name) or \
- node.node.name not in gettext_functions:
- continue
-
- strings = []
- for arg in node.args:
- if isinstance(arg, nodes.Const) and \
- isinstance(arg.value, string_types):
- strings.append(arg.value)
- else:
- strings.append(None)
-
- for arg in node.kwargs:
- strings.append(None)
- if node.dyn_args is not None:
- strings.append(None)
- if node.dyn_kwargs is not None:
- strings.append(None)
-
- if not babel_style:
- strings = tuple(x for x in strings if x is not None)
- if not strings:
- continue
- else:
- if len(strings) == 1:
- strings = strings[0]
- else:
- strings = tuple(strings)
- yield node.lineno, node.node.name, strings
-
-
-class _CommentFinder(object):
- """Helper class to find comments in a token stream. Can only
- find comments for gettext calls forwards. Once the comment
- from line 4 is found, a comment for line 1 will not return a
- usable value.
- """
-
- def __init__(self, tokens, comment_tags):
- self.tokens = tokens
- self.comment_tags = comment_tags
- self.offset = 0
- self.last_lineno = 0
-
- def find_backwards(self, offset):
- try:
- for _, token_type, token_value in \
- reversed(self.tokens[self.offset:offset]):
- if token_type in ('comment', 'linecomment'):
- try:
- prefix, comment = token_value.split(None, 1)
- except ValueError:
- continue
- if prefix in self.comment_tags:
- return [comment.rstrip()]
- return []
- finally:
- self.offset = offset
-
- def find_comments(self, lineno):
- if not self.comment_tags or self.last_lineno > lineno:
- return []
- for idx, (token_lineno, _, _) in enumerate(self.tokens[self.offset:]):
- if token_lineno > lineno:
- return self.find_backwards(self.offset + idx)
- return self.find_backwards(len(self.tokens))
-
-
-def babel_extract(fileobj, keywords, comment_tags, options):
- """Babel extraction method for Jinja templates.
-
- .. versionchanged:: 2.3
- Basic support for translation comments was added. If `comment_tags`
- is now set to a list of keywords for extraction, the extractor will
- try to find the best preceeding comment that begins with one of the
- keywords. For best results, make sure to not have more than one
- gettext call in one line of code and the matching comment in the
- same line or the line before.
-
- .. versionchanged:: 2.5.1
- The `newstyle_gettext` flag can be set to `True` to enable newstyle
- gettext calls.
-
- .. versionchanged:: 2.7
- A `silent` option can now be provided. If set to `False` template
- syntax errors are propagated instead of being ignored.
-
- :param fileobj: the file-like object the messages should be extracted from
- :param keywords: a list of keywords (i.e. function names) that should be
- recognized as translation functions
- :param comment_tags: a list of translator tags to search for and include
- in the results.
- :param options: a dictionary of additional options (optional)
- :return: an iterator over ``(lineno, funcname, message, comments)`` tuples.
- (comments will be empty currently)
- """
- extensions = set()
- for extension in options.get('extensions', '').split(','):
- extension = extension.strip()
- if not extension:
- continue
- extensions.add(import_string(extension))
- if InternationalizationExtension not in extensions:
- extensions.add(InternationalizationExtension)
-
- def getbool(options, key, default=False):
- return options.get(key, str(default)).lower() in \
- ('1', 'on', 'yes', 'true')
-
- silent = getbool(options, 'silent', True)
- environment = Environment(
- options.get('block_start_string', BLOCK_START_STRING),
- options.get('block_end_string', BLOCK_END_STRING),
- options.get('variable_start_string', VARIABLE_START_STRING),
- options.get('variable_end_string', VARIABLE_END_STRING),
- options.get('comment_start_string', COMMENT_START_STRING),
- options.get('comment_end_string', COMMENT_END_STRING),
- options.get('line_statement_prefix') or LINE_STATEMENT_PREFIX,
- options.get('line_comment_prefix') or LINE_COMMENT_PREFIX,
- getbool(options, 'trim_blocks', TRIM_BLOCKS),
- getbool(options, 'lstrip_blocks', LSTRIP_BLOCKS),
- NEWLINE_SEQUENCE,
- getbool(options, 'keep_trailing_newline', KEEP_TRAILING_NEWLINE),
- frozenset(extensions),
- cache_size=0,
- auto_reload=False
- )
-
- if getbool(options, 'trimmed'):
- environment.policies['ext.i18n.trimmed'] = True
- if getbool(options, 'newstyle_gettext'):
- environment.newstyle_gettext = True
-
- source = fileobj.read().decode(options.get('encoding', 'utf-8'))
- try:
- node = environment.parse(source)
- tokens = list(environment.lex(environment.preprocess(source)))
- except TemplateSyntaxError as e:
- if not silent:
- raise
- # skip templates with syntax errors
- return
-
- finder = _CommentFinder(tokens, comment_tags)
- for lineno, func, message in extract_from_ast(node, keywords):
- yield lineno, func, message, finder.find_comments(lineno)
-
-
-#: nicer import names
-i18n = InternationalizationExtension
-do = ExprStmtExtension
-loopcontrols = LoopControlExtension
-with_ = WithExtension
-autoescape = AutoEscapeExtension
diff --git a/venv/Lib/site-packages/jinja2/filters.py b/venv/Lib/site-packages/jinja2/filters.py
deleted file mode 100644
index 267dddd..0000000
--- a/venv/Lib/site-packages/jinja2/filters.py
+++ /dev/null
@@ -1,1190 +0,0 @@
-# -*- coding: utf-8 -*-
-"""
- jinja2.filters
- ~~~~~~~~~~~~~~
-
- Bundled jinja filters.
-
- :copyright: (c) 2017 by the Jinja Team.
- :license: BSD, see LICENSE for more details.
-"""
-import re
-import math
-import random
-import warnings
-
-from itertools import groupby, chain
-from collections import namedtuple
-from jinja2.utils import Markup, escape, pformat, urlize, soft_unicode, \
- unicode_urlencode, htmlsafe_json_dumps
-from jinja2.runtime import Undefined
-from jinja2.exceptions import FilterArgumentError
-from jinja2._compat import imap, string_types, text_type, iteritems, PY2
-
-
-_word_re = re.compile(r'\w+', re.UNICODE)
-_word_beginning_split_re = re.compile(r'([-\s\(\{\[\<]+)', re.UNICODE)
-
-
-def contextfilter(f):
- """Decorator for marking context dependent filters. The current
- :class:`Context` will be passed as first argument.
- """
- f.contextfilter = True
- return f
-
-
-def evalcontextfilter(f):
- """Decorator for marking eval-context dependent filters. An eval
- context object is passed as first argument. For more information
- about the eval context, see :ref:`eval-context`.
-
- .. versionadded:: 2.4
- """
- f.evalcontextfilter = True
- return f
-
-
-def environmentfilter(f):
- """Decorator for marking environment dependent filters. The current
- :class:`Environment` is passed to the filter as first argument.
- """
- f.environmentfilter = True
- return f
-
-
-def ignore_case(value):
- """For use as a postprocessor for :func:`make_attrgetter`. Converts strings
- to lowercase and returns other types as-is."""
- return value.lower() if isinstance(value, string_types) else value
-
-
-def make_attrgetter(environment, attribute, postprocess=None):
- """Returns a callable that looks up the given attribute from a
- passed object with the rules of the environment. Dots are allowed
- to access attributes of attributes. Integer parts in paths are
- looked up as integers.
- """
- if attribute is None:
- attribute = []
- elif isinstance(attribute, string_types):
- attribute = [int(x) if x.isdigit() else x for x in attribute.split('.')]
- else:
- attribute = [attribute]
-
- def attrgetter(item):
- for part in attribute:
- item = environment.getitem(item, part)
-
- if postprocess is not None:
- item = postprocess(item)
-
- return item
-
- return attrgetter
-
-
-def do_forceescape(value):
- """Enforce HTML escaping. This will probably double escape variables."""
- if hasattr(value, '__html__'):
- value = value.__html__()
- return escape(text_type(value))
-
-
-def do_urlencode(value):
- """Escape strings for use in URLs (uses UTF-8 encoding). It accepts both
- dictionaries and regular strings as well as pairwise iterables.
-
- .. versionadded:: 2.7
- """
- itemiter = None
- if isinstance(value, dict):
- itemiter = iteritems(value)
- elif not isinstance(value, string_types):
- try:
- itemiter = iter(value)
- except TypeError:
- pass
- if itemiter is None:
- return unicode_urlencode(value)
- return u'&'.join(unicode_urlencode(k) + '=' +
- unicode_urlencode(v, for_qs=True)
- for k, v in itemiter)
-
-
-@evalcontextfilter
-def do_replace(eval_ctx, s, old, new, count=None):
- """Return a copy of the value with all occurrences of a substring
- replaced with a new one. The first argument is the substring
- that should be replaced, the second is the replacement string.
- If the optional third argument ``count`` is given, only the first
- ``count`` occurrences are replaced:
-
- .. sourcecode:: jinja
-
- {{ "Hello World"|replace("Hello", "Goodbye") }}
- -> Goodbye World
-
- {{ "aaaaargh"|replace("a", "d'oh, ", 2) }}
- -> d'oh, d'oh, aaargh
- """
- if count is None:
- count = -1
- if not eval_ctx.autoescape:
- return text_type(s).replace(text_type(old), text_type(new), count)
- if hasattr(old, '__html__') or hasattr(new, '__html__') and \
- not hasattr(s, '__html__'):
- s = escape(s)
- else:
- s = soft_unicode(s)
- return s.replace(soft_unicode(old), soft_unicode(new), count)
-
-
-def do_upper(s):
- """Convert a value to uppercase."""
- return soft_unicode(s).upper()
-
-
-def do_lower(s):
- """Convert a value to lowercase."""
- return soft_unicode(s).lower()
-
-
-@evalcontextfilter
-def do_xmlattr(_eval_ctx, d, autospace=True):
- """Create an SGML/XML attribute string based on the items in a dict.
- All values that are neither `none` nor `undefined` are automatically
- escaped:
-
- .. sourcecode:: html+jinja
-
-
-
- Results in something like this:
-
- .. sourcecode:: html
-
-
-
- As you can see it automatically prepends a space in front of the item
- if the filter returned something unless the second parameter is false.
- """
- rv = u' '.join(
- u'%s="%s"' % (escape(key), escape(value))
- for key, value in iteritems(d)
- if value is not None and not isinstance(value, Undefined)
- )
- if autospace and rv:
- rv = u' ' + rv
- if _eval_ctx.autoescape:
- rv = Markup(rv)
- return rv
-
-
-def do_capitalize(s):
- """Capitalize a value. The first character will be uppercase, all others
- lowercase.
- """
- return soft_unicode(s).capitalize()
-
-
-def do_title(s):
- """Return a titlecased version of the value. I.e. words will start with
- uppercase letters, all remaining characters are lowercase.
- """
- return ''.join(
- [item[0].upper() + item[1:].lower()
- for item in _word_beginning_split_re.split(soft_unicode(s))
- if item])
-
-
-def do_dictsort(value, case_sensitive=False, by='key', reverse=False):
- """Sort a dict and yield (key, value) pairs. Because python dicts are
- unsorted you may want to use this function to order them by either
- key or value:
-
- .. sourcecode:: jinja
-
- {% for item in mydict|dictsort %}
- sort the dict by key, case insensitive
-
- {% for item in mydict|dictsort(reverse=true) %}
- sort the dict by key, case insensitive, reverse order
-
- {% for item in mydict|dictsort(true) %}
- sort the dict by key, case sensitive
-
- {% for item in mydict|dictsort(false, 'value') %}
- sort the dict by value, case insensitive
- """
- if by == 'key':
- pos = 0
- elif by == 'value':
- pos = 1
- else:
- raise FilterArgumentError(
- 'You can only sort by either "key" or "value"'
- )
-
- def sort_func(item):
- value = item[pos]
-
- if not case_sensitive:
- value = ignore_case(value)
-
- return value
-
- return sorted(value.items(), key=sort_func, reverse=reverse)
-
-
-@environmentfilter
-def do_sort(
- environment, value, reverse=False, case_sensitive=False, attribute=None
-):
- """Sort an iterable. Per default it sorts ascending, if you pass it
- true as first argument it will reverse the sorting.
-
- If the iterable is made of strings the third parameter can be used to
- control the case sensitiveness of the comparison which is disabled by
- default.
-
- .. sourcecode:: jinja
-
- {% for item in iterable|sort %}
- ...
- {% endfor %}
-
- It is also possible to sort by an attribute (for example to sort
- by the date of an object) by specifying the `attribute` parameter:
-
- .. sourcecode:: jinja
-
- {% for item in iterable|sort(attribute='date') %}
- ...
- {% endfor %}
-
- .. versionchanged:: 2.6
- The `attribute` parameter was added.
- """
- key_func = make_attrgetter(
- environment, attribute,
- postprocess=ignore_case if not case_sensitive else None
- )
- return sorted(value, key=key_func, reverse=reverse)
-
-
-@environmentfilter
-def do_unique(environment, value, case_sensitive=False, attribute=None):
- """Returns a list of unique items from the the given iterable.
-
- .. sourcecode:: jinja
-
- {{ ['foo', 'bar', 'foobar', 'FooBar']|unique }}
- -> ['foo', 'bar', 'foobar']
-
- The unique items are yielded in the same order as their first occurrence in
- the iterable passed to the filter.
-
- :param case_sensitive: Treat upper and lower case strings as distinct.
- :param attribute: Filter objects with unique values for this attribute.
- """
- getter = make_attrgetter(
- environment, attribute,
- postprocess=ignore_case if not case_sensitive else None
- )
- seen = set()
-
- for item in value:
- key = getter(item)
-
- if key not in seen:
- seen.add(key)
- yield item
-
-
-def _min_or_max(environment, value, func, case_sensitive, attribute):
- it = iter(value)
-
- try:
- first = next(it)
- except StopIteration:
- return environment.undefined('No aggregated item, sequence was empty.')
-
- key_func = make_attrgetter(
- environment, attribute,
- ignore_case if not case_sensitive else None
- )
- return func(chain([first], it), key=key_func)
-
-
-@environmentfilter
-def do_min(environment, value, case_sensitive=False, attribute=None):
- """Return the smallest item from the sequence.
-
- .. sourcecode:: jinja
-
- {{ [1, 2, 3]|min }}
- -> 1
-
- :param case_sensitive: Treat upper and lower case strings as distinct.
- :param attribute: Get the object with the max value of this attribute.
- """
- return _min_or_max(environment, value, min, case_sensitive, attribute)
-
-
-@environmentfilter
-def do_max(environment, value, case_sensitive=False, attribute=None):
- """Return the largest item from the sequence.
-
- .. sourcecode:: jinja
-
- {{ [1, 2, 3]|max }}
- -> 3
-
- :param case_sensitive: Treat upper and lower case strings as distinct.
- :param attribute: Get the object with the max value of this attribute.
- """
- return _min_or_max(environment, value, max, case_sensitive, attribute)
-
-
-def do_default(value, default_value=u'', boolean=False):
- """If the value is undefined it will return the passed default value,
- otherwise the value of the variable:
-
- .. sourcecode:: jinja
-
- {{ my_variable|default('my_variable is not defined') }}
-
- This will output the value of ``my_variable`` if the variable was
- defined, otherwise ``'my_variable is not defined'``. If you want
- to use default with variables that evaluate to false you have to
- set the second parameter to `true`:
-
- .. sourcecode:: jinja
-
- {{ ''|default('the string was empty', true) }}
- """
- if isinstance(value, Undefined) or (boolean and not value):
- return default_value
- return value
-
-
-@evalcontextfilter
-def do_join(eval_ctx, value, d=u'', attribute=None):
- """Return a string which is the concatenation of the strings in the
- sequence. The separator between elements is an empty string per
- default, you can define it with the optional parameter:
-
- .. sourcecode:: jinja
-
- {{ [1, 2, 3]|join('|') }}
- -> 1|2|3
-
- {{ [1, 2, 3]|join }}
- -> 123
-
- It is also possible to join certain attributes of an object:
-
- .. sourcecode:: jinja
-
- {{ users|join(', ', attribute='username') }}
-
- .. versionadded:: 2.6
- The `attribute` parameter was added.
- """
- if attribute is not None:
- value = imap(make_attrgetter(eval_ctx.environment, attribute), value)
-
- # no automatic escaping? joining is a lot eaiser then
- if not eval_ctx.autoescape:
- return text_type(d).join(imap(text_type, value))
-
- # if the delimiter doesn't have an html representation we check
- # if any of the items has. If yes we do a coercion to Markup
- if not hasattr(d, '__html__'):
- value = list(value)
- do_escape = False
- for idx, item in enumerate(value):
- if hasattr(item, '__html__'):
- do_escape = True
- else:
- value[idx] = text_type(item)
- if do_escape:
- d = escape(d)
- else:
- d = text_type(d)
- return d.join(value)
-
- # no html involved, to normal joining
- return soft_unicode(d).join(imap(soft_unicode, value))
-
-
-def do_center(value, width=80):
- """Centers the value in a field of a given width."""
- return text_type(value).center(width)
-
-
-@environmentfilter
-def do_first(environment, seq):
- """Return the first item of a sequence."""
- try:
- return next(iter(seq))
- except StopIteration:
- return environment.undefined('No first item, sequence was empty.')
-
-
-@environmentfilter
-def do_last(environment, seq):
- """Return the last item of a sequence."""
- try:
- return next(iter(reversed(seq)))
- except StopIteration:
- return environment.undefined('No last item, sequence was empty.')
-
-
-@contextfilter
-def do_random(context, seq):
- """Return a random item from the sequence."""
- try:
- return random.choice(seq)
- except IndexError:
- return context.environment.undefined('No random item, sequence was empty.')
-
-
-def do_filesizeformat(value, binary=False):
- """Format the value like a 'human-readable' file size (i.e. 13 kB,
- 4.1 MB, 102 Bytes, etc). Per default decimal prefixes are used (Mega,
- Giga, etc.), if the second parameter is set to `True` the binary
- prefixes are used (Mebi, Gibi).
- """
- bytes = float(value)
- base = binary and 1024 or 1000
- prefixes = [
- (binary and 'KiB' or 'kB'),
- (binary and 'MiB' or 'MB'),
- (binary and 'GiB' or 'GB'),
- (binary and 'TiB' or 'TB'),
- (binary and 'PiB' or 'PB'),
- (binary and 'EiB' or 'EB'),
- (binary and 'ZiB' or 'ZB'),
- (binary and 'YiB' or 'YB')
- ]
- if bytes == 1:
- return '1 Byte'
- elif bytes < base:
- return '%d Bytes' % bytes
- else:
- for i, prefix in enumerate(prefixes):
- unit = base ** (i + 2)
- if bytes < unit:
- return '%.1f %s' % ((base * bytes / unit), prefix)
- return '%.1f %s' % ((base * bytes / unit), prefix)
-
-
-def do_pprint(value, verbose=False):
- """Pretty print a variable. Useful for debugging.
-
- With Jinja 1.2 onwards you can pass it a parameter. If this parameter
- is truthy the output will be more verbose (this requires `pretty`)
- """
- return pformat(value, verbose=verbose)
-
-
-@evalcontextfilter
-def do_urlize(eval_ctx, value, trim_url_limit=None, nofollow=False,
- target=None, rel=None):
- """Converts URLs in plain text into clickable links.
-
- If you pass the filter an additional integer it will shorten the urls
- to that number. Also a third argument exists that makes the urls
- "nofollow":
-
- .. sourcecode:: jinja
-
- {{ mytext|urlize(40, true) }}
- links are shortened to 40 chars and defined with rel="nofollow"
-
- If *target* is specified, the ``target`` attribute will be added to the
- ```` tag:
-
- .. sourcecode:: jinja
-
- {{ mytext|urlize(40, target='_blank') }}
-
- .. versionchanged:: 2.8+
- The *target* parameter was added.
- """
- policies = eval_ctx.environment.policies
- rel = set((rel or '').split() or [])
- if nofollow:
- rel.add('nofollow')
- rel.update((policies['urlize.rel'] or '').split())
- if target is None:
- target = policies['urlize.target']
- rel = ' '.join(sorted(rel)) or None
- rv = urlize(value, trim_url_limit, rel=rel, target=target)
- if eval_ctx.autoescape:
- rv = Markup(rv)
- return rv
-
-
-def do_indent(
- s, width=4, first=False, blank=False, indentfirst=None
-):
- """Return a copy of the string with each line indented by 4 spaces. The
- first line and blank lines are not indented by default.
-
- :param width: Number of spaces to indent by.
- :param first: Don't skip indenting the first line.
- :param blank: Don't skip indenting empty lines.
-
- .. versionchanged:: 2.10
- Blank lines are not indented by default.
-
- Rename the ``indentfirst`` argument to ``first``.
- """
- if indentfirst is not None:
- warnings.warn(DeprecationWarning(
- 'The "indentfirst" argument is renamed to "first".'
- ), stacklevel=2)
- first = indentfirst
-
- s += u'\n' # this quirk is necessary for splitlines method
- indention = u' ' * width
-
- if blank:
- rv = (u'\n' + indention).join(s.splitlines())
- else:
- lines = s.splitlines()
- rv = lines.pop(0)
-
- if lines:
- rv += u'\n' + u'\n'.join(
- indention + line if line else line for line in lines
- )
-
- if first:
- rv = indention + rv
-
- return rv
-
-
-@environmentfilter
-def do_truncate(env, s, length=255, killwords=False, end='...', leeway=None):
- """Return a truncated copy of the string. The length is specified
- with the first parameter which defaults to ``255``. If the second
- parameter is ``true`` the filter will cut the text at length. Otherwise
- it will discard the last word. If the text was in fact
- truncated it will append an ellipsis sign (``"..."``). If you want a
- different ellipsis sign than ``"..."`` you can specify it using the
- third parameter. Strings that only exceed the length by the tolerance
- margin given in the fourth parameter will not be truncated.
-
- .. sourcecode:: jinja
-
- {{ "foo bar baz qux"|truncate(9) }}
- -> "foo..."
- {{ "foo bar baz qux"|truncate(9, True) }}
- -> "foo ba..."
- {{ "foo bar baz qux"|truncate(11) }}
- -> "foo bar baz qux"
- {{ "foo bar baz qux"|truncate(11, False, '...', 0) }}
- -> "foo bar..."
-
- The default leeway on newer Jinja2 versions is 5 and was 0 before but
- can be reconfigured globally.
- """
- if leeway is None:
- leeway = env.policies['truncate.leeway']
- assert length >= len(end), 'expected length >= %s, got %s' % (len(end), length)
- assert leeway >= 0, 'expected leeway >= 0, got %s' % leeway
- if len(s) <= length + leeway:
- return s
- if killwords:
- return s[:length - len(end)] + end
- result = s[:length - len(end)].rsplit(' ', 1)[0]
- return result + end
-
-
-@environmentfilter
-def do_wordwrap(environment, s, width=79, break_long_words=True,
- wrapstring=None):
- """
- Return a copy of the string passed to the filter wrapped after
- ``79`` characters. You can override this default using the first
- parameter. If you set the second parameter to `false` Jinja will not
- split words apart if they are longer than `width`. By default, the newlines
- will be the default newlines for the environment, but this can be changed
- using the wrapstring keyword argument.
-
- .. versionadded:: 2.7
- Added support for the `wrapstring` parameter.
- """
- if not wrapstring:
- wrapstring = environment.newline_sequence
- import textwrap
- return wrapstring.join(textwrap.wrap(s, width=width, expand_tabs=False,
- replace_whitespace=False,
- break_long_words=break_long_words))
-
-
-def do_wordcount(s):
- """Count the words in that string."""
- return len(_word_re.findall(s))
-
-
-def do_int(value, default=0, base=10):
- """Convert the value into an integer. If the
- conversion doesn't work it will return ``0``. You can
- override this default using the first parameter. You
- can also override the default base (10) in the second
- parameter, which handles input with prefixes such as
- 0b, 0o and 0x for bases 2, 8 and 16 respectively.
- The base is ignored for decimal numbers and non-string values.
- """
- try:
- if isinstance(value, string_types):
- return int(value, base)
- return int(value)
- except (TypeError, ValueError):
- # this quirk is necessary so that "42.23"|int gives 42.
- try:
- return int(float(value))
- except (TypeError, ValueError):
- return default
-
-
-def do_float(value, default=0.0):
- """Convert the value into a floating point number. If the
- conversion doesn't work it will return ``0.0``. You can
- override this default using the first parameter.
- """
- try:
- return float(value)
- except (TypeError, ValueError):
- return default
-
-
-def do_format(value, *args, **kwargs):
- """
- Apply python string formatting on an object:
-
- .. sourcecode:: jinja
-
- {{ "%s - %s"|format("Hello?", "Foo!") }}
- -> Hello? - Foo!
- """
- if args and kwargs:
- raise FilterArgumentError('can\'t handle positional and keyword '
- 'arguments at the same time')
- return soft_unicode(value) % (kwargs or args)
-
-
-def do_trim(value):
- """Strip leading and trailing whitespace."""
- return soft_unicode(value).strip()
-
-
-def do_striptags(value):
- """Strip SGML/XML tags and replace adjacent whitespace by one space.
- """
- if hasattr(value, '__html__'):
- value = value.__html__()
- return Markup(text_type(value)).striptags()
-
-
-def do_slice(value, slices, fill_with=None):
- """Slice an iterator and return a list of lists containing
- those items. Useful if you want to create a div containing
- three ul tags that represent columns:
-
- .. sourcecode:: html+jinja
-
-
- {%- for column in items|slice(3) %}
-
- {%- for item in column %}
- - {{ item }}
- {%- endfor %}
-
- {%- endfor %}
-
-
- If you pass it a second argument it's used to fill missing
- values on the last iteration.
- """
- seq = list(value)
- length = len(seq)
- items_per_slice = length // slices
- slices_with_extra = length % slices
- offset = 0
- for slice_number in range(slices):
- start = offset + slice_number * items_per_slice
- if slice_number < slices_with_extra:
- offset += 1
- end = offset + (slice_number + 1) * items_per_slice
- tmp = seq[start:end]
- if fill_with is not None and slice_number >= slices_with_extra:
- tmp.append(fill_with)
- yield tmp
-
-
-def do_batch(value, linecount, fill_with=None):
- """
- A filter that batches items. It works pretty much like `slice`
- just the other way round. It returns a list of lists with the
- given number of items. If you provide a second parameter this
- is used to fill up missing items. See this example:
-
- .. sourcecode:: html+jinja
-
-
- {%- for row in items|batch(3, ' ') %}
-
- {%- for column in row %}
- {{ column }} |
- {%- endfor %}
-
- {%- endfor %}
-
- """
- tmp = []
- for item in value:
- if len(tmp) == linecount:
- yield tmp
- tmp = []
- tmp.append(item)
- if tmp:
- if fill_with is not None and len(tmp) < linecount:
- tmp += [fill_with] * (linecount - len(tmp))
- yield tmp
-
-
-def do_round(value, precision=0, method='common'):
- """Round the number to a given precision. The first
- parameter specifies the precision (default is ``0``), the
- second the rounding method:
-
- - ``'common'`` rounds either up or down
- - ``'ceil'`` always rounds up
- - ``'floor'`` always rounds down
-
- If you don't specify a method ``'common'`` is used.
-
- .. sourcecode:: jinja
-
- {{ 42.55|round }}
- -> 43.0
- {{ 42.55|round(1, 'floor') }}
- -> 42.5
-
- Note that even if rounded to 0 precision, a float is returned. If
- you need a real integer, pipe it through `int`:
-
- .. sourcecode:: jinja
-
- {{ 42.55|round|int }}
- -> 43
- """
- if not method in ('common', 'ceil', 'floor'):
- raise FilterArgumentError('method must be common, ceil or floor')
- if method == 'common':
- return round(value, precision)
- func = getattr(math, method)
- return func(value * (10 ** precision)) / (10 ** precision)
-
-
-# Use a regular tuple repr here. This is what we did in the past and we
-# really want to hide this custom type as much as possible. In particular
-# we do not want to accidentally expose an auto generated repr in case
-# people start to print this out in comments or something similar for
-# debugging.
-_GroupTuple = namedtuple('_GroupTuple', ['grouper', 'list'])
-_GroupTuple.__repr__ = tuple.__repr__
-_GroupTuple.__str__ = tuple.__str__
-
-@environmentfilter
-def do_groupby(environment, value, attribute):
- """Group a sequence of objects by a common attribute.
-
- If you for example have a list of dicts or objects that represent persons
- with `gender`, `first_name` and `last_name` attributes and you want to
- group all users by genders you can do something like the following
- snippet:
-
- .. sourcecode:: html+jinja
-
-
- {% for group in persons|groupby('gender') %}
- - {{ group.grouper }}
- {% for person in group.list %}
- - {{ person.first_name }} {{ person.last_name }}
- {% endfor %}
- {% endfor %}
-
-
- Additionally it's possible to use tuple unpacking for the grouper and
- list:
-
- .. sourcecode:: html+jinja
-
-
- {% for grouper, list in persons|groupby('gender') %}
- ...
- {% endfor %}
-
-
- As you can see the item we're grouping by is stored in the `grouper`
- attribute and the `list` contains all the objects that have this grouper
- in common.
-
- .. versionchanged:: 2.6
- It's now possible to use dotted notation to group by the child
- attribute of another attribute.
- """
- expr = make_attrgetter(environment, attribute)
- return [_GroupTuple(key, list(values)) for key, values
- in groupby(sorted(value, key=expr), expr)]
-
-
-@environmentfilter
-def do_sum(environment, iterable, attribute=None, start=0):
- """Returns the sum of a sequence of numbers plus the value of parameter
- 'start' (which defaults to 0). When the sequence is empty it returns
- start.
-
- It is also possible to sum up only certain attributes:
-
- .. sourcecode:: jinja
-
- Total: {{ items|sum(attribute='price') }}
-
- .. versionchanged:: 2.6
- The `attribute` parameter was added to allow suming up over
- attributes. Also the `start` parameter was moved on to the right.
- """
- if attribute is not None:
- iterable = imap(make_attrgetter(environment, attribute), iterable)
- return sum(iterable, start)
-
-
-def do_list(value):
- """Convert the value into a list. If it was a string the returned list
- will be a list of characters.
- """
- return list(value)
-
-
-def do_mark_safe(value):
- """Mark the value as safe which means that in an environment with automatic
- escaping enabled this variable will not be escaped.
- """
- return Markup(value)
-
-
-def do_mark_unsafe(value):
- """Mark a value as unsafe. This is the reverse operation for :func:`safe`."""
- return text_type(value)
-
-
-def do_reverse(value):
- """Reverse the object or return an iterator that iterates over it the other
- way round.
- """
- if isinstance(value, string_types):
- return value[::-1]
- try:
- return reversed(value)
- except TypeError:
- try:
- rv = list(value)
- rv.reverse()
- return rv
- except TypeError:
- raise FilterArgumentError('argument must be iterable')
-
-
-@environmentfilter
-def do_attr(environment, obj, name):
- """Get an attribute of an object. ``foo|attr("bar")`` works like
- ``foo.bar`` just that always an attribute is returned and items are not
- looked up.
-
- See :ref:`Notes on subscriptions ` for more details.
- """
- try:
- name = str(name)
- except UnicodeError:
- pass
- else:
- try:
- value = getattr(obj, name)
- except AttributeError:
- pass
- else:
- if environment.sandboxed and not \
- environment.is_safe_attribute(obj, name, value):
- return environment.unsafe_undefined(obj, name)
- return value
- return environment.undefined(obj=obj, name=name)
-
-
-@contextfilter
-def do_map(*args, **kwargs):
- """Applies a filter on a sequence of objects or looks up an attribute.
- This is useful when dealing with lists of objects but you are really
- only interested in a certain value of it.
-
- The basic usage is mapping on an attribute. Imagine you have a list
- of users but you are only interested in a list of usernames:
-
- .. sourcecode:: jinja
-
- Users on this page: {{ users|map(attribute='username')|join(', ') }}
-
- Alternatively you can let it invoke a filter by passing the name of the
- filter and the arguments afterwards. A good example would be applying a
- text conversion filter on a sequence:
-
- .. sourcecode:: jinja
-
- Users on this page: {{ titles|map('lower')|join(', ') }}
-
- .. versionadded:: 2.7
- """
- seq, func = prepare_map(args, kwargs)
- if seq:
- for item in seq:
- yield func(item)
-
-
-@contextfilter
-def do_select(*args, **kwargs):
- """Filters a sequence of objects by applying a test to each object,
- and only selecting the objects with the test succeeding.
-
- If no test is specified, each object will be evaluated as a boolean.
-
- Example usage:
-
- .. sourcecode:: jinja
-
- {{ numbers|select("odd") }}
- {{ numbers|select("odd") }}
- {{ numbers|select("divisibleby", 3) }}
- {{ numbers|select("lessthan", 42) }}
- {{ strings|select("equalto", "mystring") }}
-
- .. versionadded:: 2.7
- """
- return select_or_reject(args, kwargs, lambda x: x, False)
-
-
-@contextfilter
-def do_reject(*args, **kwargs):
- """Filters a sequence of objects by applying a test to each object,
- and rejecting the objects with the test succeeding.
-
- If no test is specified, each object will be evaluated as a boolean.
-
- Example usage:
-
- .. sourcecode:: jinja
-
- {{ numbers|reject("odd") }}
-
- .. versionadded:: 2.7
- """
- return select_or_reject(args, kwargs, lambda x: not x, False)
-
-
-@contextfilter
-def do_selectattr(*args, **kwargs):
- """Filters a sequence of objects by applying a test to the specified
- attribute of each object, and only selecting the objects with the
- test succeeding.
-
- If no test is specified, the attribute's value will be evaluated as
- a boolean.
-
- Example usage:
-
- .. sourcecode:: jinja
-
- {{ users|selectattr("is_active") }}
- {{ users|selectattr("email", "none") }}
-
- .. versionadded:: 2.7
- """
- return select_or_reject(args, kwargs, lambda x: x, True)
-
-
-@contextfilter
-def do_rejectattr(*args, **kwargs):
- """Filters a sequence of objects by applying a test to the specified
- attribute of each object, and rejecting the objects with the test
- succeeding.
-
- If no test is specified, the attribute's value will be evaluated as
- a boolean.
-
- .. sourcecode:: jinja
-
- {{ users|rejectattr("is_active") }}
- {{ users|rejectattr("email", "none") }}
-
- .. versionadded:: 2.7
- """
- return select_or_reject(args, kwargs, lambda x: not x, True)
-
-
-@evalcontextfilter
-def do_tojson(eval_ctx, value, indent=None):
- """Dumps a structure to JSON so that it's safe to use in ``')
- # => <script> do_nasty_stuff() </script>
- # sanitize_html('Click here for $100')
- # => Click here for $100
- def sanitize_token(self, token):
-
- # accommodate filters which use token_type differently
- token_type = token["type"]
- if token_type in ("StartTag", "EndTag", "EmptyTag"):
- name = token["name"]
- namespace = token["namespace"]
- if ((namespace, name) in self.allowed_elements or
- (namespace is None and
- (namespaces["html"], name) in self.allowed_elements)):
- return self.allowed_token(token)
- else:
- return self.disallowed_token(token)
- elif token_type == "Comment":
- pass
- else:
- return token
-
- def allowed_token(self, token):
- if "data" in token:
- attrs = token["data"]
- attr_names = set(attrs.keys())
-
- # Remove forbidden attributes
- for to_remove in (attr_names - self.allowed_attributes):
- del token["data"][to_remove]
- attr_names.remove(to_remove)
-
- # Remove attributes with disallowed URL values
- for attr in (attr_names & self.attr_val_is_uri):
- assert attr in attrs
- # I don't have a clue where this regexp comes from or why it matches those
- # characters, nor why we call unescape. I just know it's always been here.
- # Should you be worried by this comment in a sanitizer? Yes. On the other hand, all
- # this will do is remove *more* than it otherwise would.
- val_unescaped = re.sub("[`\x00-\x20\x7f-\xa0\s]+", '',
- unescape(attrs[attr])).lower()
- # remove replacement characters from unescaped characters
- val_unescaped = val_unescaped.replace("\ufffd", "")
- try:
- uri = urlparse.urlparse(val_unescaped)
- except ValueError:
- uri = None
- del attrs[attr]
- if uri and uri.scheme:
- if uri.scheme not in self.allowed_protocols:
- del attrs[attr]
- if uri.scheme == 'data':
- m = data_content_type.match(uri.path)
- if not m:
- del attrs[attr]
- elif m.group('content_type') not in self.allowed_content_types:
- del attrs[attr]
-
- for attr in self.svg_attr_val_allows_ref:
- if attr in attrs:
- attrs[attr] = re.sub(r'url\s*\(\s*[^#\s][^)]+?\)',
- ' ',
- unescape(attrs[attr]))
- if (token["name"] in self.svg_allow_local_href and
- (namespaces['xlink'], 'href') in attrs and re.search('^\s*[^#\s].*',
- attrs[(namespaces['xlink'], 'href')])):
- del attrs[(namespaces['xlink'], 'href')]
- if (None, 'style') in attrs:
- attrs[(None, 'style')] = self.sanitize_css(attrs[(None, 'style')])
- token["data"] = attrs
- return token
-
- def disallowed_token(self, token):
- token_type = token["type"]
- if token_type == "EndTag":
- token["data"] = "%s>" % token["name"]
- elif token["data"]:
- assert token_type in ("StartTag", "EmptyTag")
- attrs = []
- for (ns, name), v in token["data"].items():
- attrs.append(' %s="%s"' % (name if ns is None else "%s:%s" % (prefixes[ns], name), escape(v)))
- token["data"] = "<%s%s>" % (token["name"], ''.join(attrs))
- else:
- token["data"] = "<%s>" % token["name"]
- if token.get("selfClosing"):
- token["data"] = token["data"][:-1] + "/>"
-
- token["type"] = "Characters"
-
- del token["name"]
- return token
-
- def sanitize_css(self, style):
- # disallow urls
- style = re.compile('url\s*\(\s*[^\s)]+?\s*\)\s*').sub(' ', style)
-
- # gauntlet
- if not re.match("""^([:,;#%.\sa-zA-Z0-9!]|\w-\w|'[\s\w]+'|"[\s\w]+"|\([\d,\s]+\))*$""", style):
- return ''
- if not re.match("^\s*([-\w]+\s*:[^:;]*(;\s*|$))*$", style):
- return ''
-
- clean = []
- for prop, value in re.findall("([-\w]+)\s*:\s*([^:;]*)", style):
- if not value:
- continue
- if prop.lower() in self.allowed_css_properties:
- clean.append(prop + ': ' + value + ';')
- elif prop.split('-')[0].lower() in ['background', 'border', 'margin',
- 'padding']:
- for keyword in value.split():
- if keyword not in self.allowed_css_keywords and \
- not re.match("^(#[0-9a-f]+|rgb\(\d+%?,\d*%?,?\d*%?\)?|\d{0,2}\.?\d{0,2}(cm|em|ex|in|mm|pc|pt|px|%|,|\))?)$", keyword): # noqa
- break
- else:
- clean.append(prop + ': ' + value + ';')
- elif prop.lower() in self.allowed_svg_properties:
- clean.append(prop + ': ' + value + ';')
-
- return ' '.join(clean)
diff --git a/venv/Lib/site-packages/pip/_vendor/html5lib/filters/whitespace.py b/venv/Lib/site-packages/pip/_vendor/html5lib/filters/whitespace.py
deleted file mode 100644
index 8921052..0000000
--- a/venv/Lib/site-packages/pip/_vendor/html5lib/filters/whitespace.py
+++ /dev/null
@@ -1,38 +0,0 @@
-from __future__ import absolute_import, division, unicode_literals
-
-import re
-
-from . import base
-from ..constants import rcdataElements, spaceCharacters
-spaceCharacters = "".join(spaceCharacters)
-
-SPACES_REGEX = re.compile("[%s]+" % spaceCharacters)
-
-
-class Filter(base.Filter):
-
- spacePreserveElements = frozenset(["pre", "textarea"] + list(rcdataElements))
-
- def __iter__(self):
- preserve = 0
- for token in base.Filter.__iter__(self):
- type = token["type"]
- if type == "StartTag" \
- and (preserve or token["name"] in self.spacePreserveElements):
- preserve += 1
-
- elif type == "EndTag" and preserve:
- preserve -= 1
-
- elif not preserve and type == "SpaceCharacters" and token["data"]:
- # Test on token["data"] above to not introduce spaces where there were not
- token["data"] = " "
-
- elif not preserve and type == "Characters":
- token["data"] = collapse_spaces(token["data"])
-
- yield token
-
-
-def collapse_spaces(text):
- return SPACES_REGEX.sub(' ', text)
diff --git a/venv/Lib/site-packages/pip/_vendor/html5lib/html5parser.py b/venv/Lib/site-packages/pip/_vendor/html5lib/html5parser.py
deleted file mode 100644
index f7043cb..0000000
--- a/venv/Lib/site-packages/pip/_vendor/html5lib/html5parser.py
+++ /dev/null
@@ -1,2733 +0,0 @@
-from __future__ import absolute_import, division, unicode_literals
-from pip._vendor.six import with_metaclass, viewkeys, PY3
-
-import types
-
-try:
- from collections import OrderedDict
-except ImportError:
- from pip._vendor.ordereddict import OrderedDict
-
-from . import _inputstream
-from . import _tokenizer
-
-from . import treebuilders
-from .treebuilders.base import Marker
-
-from . import _utils
-from .constants import (
- spaceCharacters, asciiUpper2Lower,
- specialElements, headingElements, cdataElements, rcdataElements,
- tokenTypes, tagTokenTypes,
- namespaces,
- htmlIntegrationPointElements, mathmlTextIntegrationPointElements,
- adjustForeignAttributes as adjustForeignAttributesMap,
- adjustMathMLAttributes, adjustSVGAttributes,
- E,
- ReparseException
-)
-
-
-def parse(doc, treebuilder="etree", namespaceHTMLElements=True, **kwargs):
- """Parse a string or file-like object into a tree"""
- tb = treebuilders.getTreeBuilder(treebuilder)
- p = HTMLParser(tb, namespaceHTMLElements=namespaceHTMLElements)
- return p.parse(doc, **kwargs)
-
-
-def parseFragment(doc, container="div", treebuilder="etree", namespaceHTMLElements=True, **kwargs):
- tb = treebuilders.getTreeBuilder(treebuilder)
- p = HTMLParser(tb, namespaceHTMLElements=namespaceHTMLElements)
- return p.parseFragment(doc, container=container, **kwargs)
-
-
-def method_decorator_metaclass(function):
- class Decorated(type):
- def __new__(meta, classname, bases, classDict):
- for attributeName, attribute in classDict.items():
- if isinstance(attribute, types.FunctionType):
- attribute = function(attribute)
-
- classDict[attributeName] = attribute
- return type.__new__(meta, classname, bases, classDict)
- return Decorated
-
-
-class HTMLParser(object):
- """HTML parser. Generates a tree structure from a stream of (possibly
- malformed) HTML"""
-
- def __init__(self, tree=None, strict=False, namespaceHTMLElements=True, debug=False):
- """
- strict - raise an exception when a parse error is encountered
-
- tree - a treebuilder class controlling the type of tree that will be
- returned. Built in treebuilders can be accessed through
- html5lib.treebuilders.getTreeBuilder(treeType)
- """
-
- # Raise an exception on the first error encountered
- self.strict = strict
-
- if tree is None:
- tree = treebuilders.getTreeBuilder("etree")
- self.tree = tree(namespaceHTMLElements)
- self.errors = []
-
- self.phases = dict([(name, cls(self, self.tree)) for name, cls in
- getPhases(debug).items()])
-
- def _parse(self, stream, innerHTML=False, container="div", scripting=False, **kwargs):
-
- self.innerHTMLMode = innerHTML
- self.container = container
- self.scripting = scripting
- self.tokenizer = _tokenizer.HTMLTokenizer(stream, parser=self, **kwargs)
- self.reset()
-
- try:
- self.mainLoop()
- except ReparseException:
- self.reset()
- self.mainLoop()
-
- def reset(self):
- self.tree.reset()
- self.firstStartTag = False
- self.errors = []
- self.log = [] # only used with debug mode
- # "quirks" / "limited quirks" / "no quirks"
- self.compatMode = "no quirks"
-
- if self.innerHTMLMode:
- self.innerHTML = self.container.lower()
-
- if self.innerHTML in cdataElements:
- self.tokenizer.state = self.tokenizer.rcdataState
- elif self.innerHTML in rcdataElements:
- self.tokenizer.state = self.tokenizer.rawtextState
- elif self.innerHTML == 'plaintext':
- self.tokenizer.state = self.tokenizer.plaintextState
- else:
- # state already is data state
- # self.tokenizer.state = self.tokenizer.dataState
- pass
- self.phase = self.phases["beforeHtml"]
- self.phase.insertHtmlElement()
- self.resetInsertionMode()
- else:
- self.innerHTML = False # pylint:disable=redefined-variable-type
- self.phase = self.phases["initial"]
-
- self.lastPhase = None
-
- self.beforeRCDataPhase = None
-
- self.framesetOK = True
-
- @property
- def documentEncoding(self):
- """The name of the character encoding
- that was used to decode the input stream,
- or :obj:`None` if that is not determined yet.
-
- """
- if not hasattr(self, 'tokenizer'):
- return None
- return self.tokenizer.stream.charEncoding[0].name
-
- def isHTMLIntegrationPoint(self, element):
- if (element.name == "annotation-xml" and
- element.namespace == namespaces["mathml"]):
- return ("encoding" in element.attributes and
- element.attributes["encoding"].translate(
- asciiUpper2Lower) in
- ("text/html", "application/xhtml+xml"))
- else:
- return (element.namespace, element.name) in htmlIntegrationPointElements
-
- def isMathMLTextIntegrationPoint(self, element):
- return (element.namespace, element.name) in mathmlTextIntegrationPointElements
-
- def mainLoop(self):
- CharactersToken = tokenTypes["Characters"]
- SpaceCharactersToken = tokenTypes["SpaceCharacters"]
- StartTagToken = tokenTypes["StartTag"]
- EndTagToken = tokenTypes["EndTag"]
- CommentToken = tokenTypes["Comment"]
- DoctypeToken = tokenTypes["Doctype"]
- ParseErrorToken = tokenTypes["ParseError"]
-
- for token in self.normalizedTokens():
- prev_token = None
- new_token = token
- while new_token is not None:
- prev_token = new_token
- currentNode = self.tree.openElements[-1] if self.tree.openElements else None
- currentNodeNamespace = currentNode.namespace if currentNode else None
- currentNodeName = currentNode.name if currentNode else None
-
- type = new_token["type"]
-
- if type == ParseErrorToken:
- self.parseError(new_token["data"], new_token.get("datavars", {}))
- new_token = None
- else:
- if (len(self.tree.openElements) == 0 or
- currentNodeNamespace == self.tree.defaultNamespace or
- (self.isMathMLTextIntegrationPoint(currentNode) and
- ((type == StartTagToken and
- token["name"] not in frozenset(["mglyph", "malignmark"])) or
- type in (CharactersToken, SpaceCharactersToken))) or
- (currentNodeNamespace == namespaces["mathml"] and
- currentNodeName == "annotation-xml" and
- type == StartTagToken and
- token["name"] == "svg") or
- (self.isHTMLIntegrationPoint(currentNode) and
- type in (StartTagToken, CharactersToken, SpaceCharactersToken))):
- phase = self.phase
- else:
- phase = self.phases["inForeignContent"]
-
- if type == CharactersToken:
- new_token = phase.processCharacters(new_token)
- elif type == SpaceCharactersToken:
- new_token = phase.processSpaceCharacters(new_token)
- elif type == StartTagToken:
- new_token = phase.processStartTag(new_token)
- elif type == EndTagToken:
- new_token = phase.processEndTag(new_token)
- elif type == CommentToken:
- new_token = phase.processComment(new_token)
- elif type == DoctypeToken:
- new_token = phase.processDoctype(new_token)
-
- if (type == StartTagToken and prev_token["selfClosing"] and
- not prev_token["selfClosingAcknowledged"]):
- self.parseError("non-void-element-with-trailing-solidus",
- {"name": prev_token["name"]})
-
- # When the loop finishes it's EOF
- reprocess = True
- phases = []
- while reprocess:
- phases.append(self.phase)
- reprocess = self.phase.processEOF()
- if reprocess:
- assert self.phase not in phases
-
- def normalizedTokens(self):
- for token in self.tokenizer:
- yield self.normalizeToken(token)
-
- def parse(self, stream, *args, **kwargs):
- """Parse a HTML document into a well-formed tree
-
- stream - a filelike object or string containing the HTML to be parsed
-
- The optional encoding parameter must be a string that indicates
- the encoding. If specified, that encoding will be used,
- regardless of any BOM or later declaration (such as in a meta
- element)
-
- scripting - treat noscript elements as if javascript was turned on
- """
- self._parse(stream, False, None, *args, **kwargs)
- return self.tree.getDocument()
-
- def parseFragment(self, stream, *args, **kwargs):
- """Parse a HTML fragment into a well-formed tree fragment
-
- container - name of the element we're setting the innerHTML property
- if set to None, default to 'div'
-
- stream - a filelike object or string containing the HTML to be parsed
-
- The optional encoding parameter must be a string that indicates
- the encoding. If specified, that encoding will be used,
- regardless of any BOM or later declaration (such as in a meta
- element)
-
- scripting - treat noscript elements as if javascript was turned on
- """
- self._parse(stream, True, *args, **kwargs)
- return self.tree.getFragment()
-
- def parseError(self, errorcode="XXX-undefined-error", datavars=None):
- # XXX The idea is to make errorcode mandatory.
- if datavars is None:
- datavars = {}
- self.errors.append((self.tokenizer.stream.position(), errorcode, datavars))
- if self.strict:
- raise ParseError(E[errorcode] % datavars)
-
- def normalizeToken(self, token):
- """ HTML5 specific normalizations to the token stream """
-
- if token["type"] == tokenTypes["StartTag"]:
- raw = token["data"]
- token["data"] = OrderedDict(raw)
- if len(raw) > len(token["data"]):
- # we had some duplicated attribute, fix so first wins
- token["data"].update(raw[::-1])
-
- return token
-
- def adjustMathMLAttributes(self, token):
- adjust_attributes(token, adjustMathMLAttributes)
-
- def adjustSVGAttributes(self, token):
- adjust_attributes(token, adjustSVGAttributes)
-
- def adjustForeignAttributes(self, token):
- adjust_attributes(token, adjustForeignAttributesMap)
-
- def reparseTokenNormal(self, token):
- # pylint:disable=unused-argument
- self.parser.phase()
-
- def resetInsertionMode(self):
- # The name of this method is mostly historical. (It's also used in the
- # specification.)
- last = False
- newModes = {
- "select": "inSelect",
- "td": "inCell",
- "th": "inCell",
- "tr": "inRow",
- "tbody": "inTableBody",
- "thead": "inTableBody",
- "tfoot": "inTableBody",
- "caption": "inCaption",
- "colgroup": "inColumnGroup",
- "table": "inTable",
- "head": "inBody",
- "body": "inBody",
- "frameset": "inFrameset",
- "html": "beforeHead"
- }
- for node in self.tree.openElements[::-1]:
- nodeName = node.name
- new_phase = None
- if node == self.tree.openElements[0]:
- assert self.innerHTML
- last = True
- nodeName = self.innerHTML
- # Check for conditions that should only happen in the innerHTML
- # case
- if nodeName in ("select", "colgroup", "head", "html"):
- assert self.innerHTML
-
- if not last and node.namespace != self.tree.defaultNamespace:
- continue
-
- if nodeName in newModes:
- new_phase = self.phases[newModes[nodeName]]
- break
- elif last:
- new_phase = self.phases["inBody"]
- break
-
- self.phase = new_phase
-
- def parseRCDataRawtext(self, token, contentType):
- """Generic RCDATA/RAWTEXT Parsing algorithm
- contentType - RCDATA or RAWTEXT
- """
- assert contentType in ("RAWTEXT", "RCDATA")
-
- self.tree.insertElement(token)
-
- if contentType == "RAWTEXT":
- self.tokenizer.state = self.tokenizer.rawtextState
- else:
- self.tokenizer.state = self.tokenizer.rcdataState
-
- self.originalPhase = self.phase
-
- self.phase = self.phases["text"]
-
-
-@_utils.memoize
-def getPhases(debug):
- def log(function):
- """Logger that records which phase processes each token"""
- type_names = dict((value, key) for key, value in
- tokenTypes.items())
-
- def wrapped(self, *args, **kwargs):
- if function.__name__.startswith("process") and len(args) > 0:
- token = args[0]
- try:
- info = {"type": type_names[token['type']]}
- except:
- raise
- if token['type'] in tagTokenTypes:
- info["name"] = token['name']
-
- self.parser.log.append((self.parser.tokenizer.state.__name__,
- self.parser.phase.__class__.__name__,
- self.__class__.__name__,
- function.__name__,
- info))
- return function(self, *args, **kwargs)
- else:
- return function(self, *args, **kwargs)
- return wrapped
-
- def getMetaclass(use_metaclass, metaclass_func):
- if use_metaclass:
- return method_decorator_metaclass(metaclass_func)
- else:
- return type
-
- # pylint:disable=unused-argument
- class Phase(with_metaclass(getMetaclass(debug, log))):
- """Base class for helper object that implements each phase of processing
- """
-
- def __init__(self, parser, tree):
- self.parser = parser
- self.tree = tree
-
- def processEOF(self):
- raise NotImplementedError
-
- def processComment(self, token):
- # For most phases the following is correct. Where it's not it will be
- # overridden.
- self.tree.insertComment(token, self.tree.openElements[-1])
-
- def processDoctype(self, token):
- self.parser.parseError("unexpected-doctype")
-
- def processCharacters(self, token):
- self.tree.insertText(token["data"])
-
- def processSpaceCharacters(self, token):
- self.tree.insertText(token["data"])
-
- def processStartTag(self, token):
- return self.startTagHandler[token["name"]](token)
-
- def startTagHtml(self, token):
- if not self.parser.firstStartTag and token["name"] == "html":
- self.parser.parseError("non-html-root")
- # XXX Need a check here to see if the first start tag token emitted is
- # this token... If it's not, invoke self.parser.parseError().
- for attr, value in token["data"].items():
- if attr not in self.tree.openElements[0].attributes:
- self.tree.openElements[0].attributes[attr] = value
- self.parser.firstStartTag = False
-
- def processEndTag(self, token):
- return self.endTagHandler[token["name"]](token)
-
- class InitialPhase(Phase):
- def processSpaceCharacters(self, token):
- pass
-
- def processComment(self, token):
- self.tree.insertComment(token, self.tree.document)
-
- def processDoctype(self, token):
- name = token["name"]
- publicId = token["publicId"]
- systemId = token["systemId"]
- correct = token["correct"]
-
- if (name != "html" or publicId is not None or
- systemId is not None and systemId != "about:legacy-compat"):
- self.parser.parseError("unknown-doctype")
-
- if publicId is None:
- publicId = ""
-
- self.tree.insertDoctype(token)
-
- if publicId != "":
- publicId = publicId.translate(asciiUpper2Lower)
-
- if (not correct or token["name"] != "html" or
- publicId.startswith(
- ("+//silmaril//dtd html pro v0r11 19970101//",
- "-//advasoft ltd//dtd html 3.0 aswedit + extensions//",
- "-//as//dtd html 3.0 aswedit + extensions//",
- "-//ietf//dtd html 2.0 level 1//",
- "-//ietf//dtd html 2.0 level 2//",
- "-//ietf//dtd html 2.0 strict level 1//",
- "-//ietf//dtd html 2.0 strict level 2//",
- "-//ietf//dtd html 2.0 strict//",
- "-//ietf//dtd html 2.0//",
- "-//ietf//dtd html 2.1e//",
- "-//ietf//dtd html 3.0//",
- "-//ietf//dtd html 3.2 final//",
- "-//ietf//dtd html 3.2//",
- "-//ietf//dtd html 3//",
- "-//ietf//dtd html level 0//",
- "-//ietf//dtd html level 1//",
- "-//ietf//dtd html level 2//",
- "-//ietf//dtd html level 3//",
- "-//ietf//dtd html strict level 0//",
- "-//ietf//dtd html strict level 1//",
- "-//ietf//dtd html strict level 2//",
- "-//ietf//dtd html strict level 3//",
- "-//ietf//dtd html strict//",
- "-//ietf//dtd html//",
- "-//metrius//dtd metrius presentational//",
- "-//microsoft//dtd internet explorer 2.0 html strict//",
- "-//microsoft//dtd internet explorer 2.0 html//",
- "-//microsoft//dtd internet explorer 2.0 tables//",
- "-//microsoft//dtd internet explorer 3.0 html strict//",
- "-//microsoft//dtd internet explorer 3.0 html//",
- "-//microsoft//dtd internet explorer 3.0 tables//",
- "-//netscape comm. corp.//dtd html//",
- "-//netscape comm. corp.//dtd strict html//",
- "-//o'reilly and associates//dtd html 2.0//",
- "-//o'reilly and associates//dtd html extended 1.0//",
- "-//o'reilly and associates//dtd html extended relaxed 1.0//",
- "-//softquad software//dtd hotmetal pro 6.0::19990601::extensions to html 4.0//",
- "-//softquad//dtd hotmetal pro 4.0::19971010::extensions to html 4.0//",
- "-//spyglass//dtd html 2.0 extended//",
- "-//sq//dtd html 2.0 hotmetal + extensions//",
- "-//sun microsystems corp.//dtd hotjava html//",
- "-//sun microsystems corp.//dtd hotjava strict html//",
- "-//w3c//dtd html 3 1995-03-24//",
- "-//w3c//dtd html 3.2 draft//",
- "-//w3c//dtd html 3.2 final//",
- "-//w3c//dtd html 3.2//",
- "-//w3c//dtd html 3.2s draft//",
- "-//w3c//dtd html 4.0 frameset//",
- "-//w3c//dtd html 4.0 transitional//",
- "-//w3c//dtd html experimental 19960712//",
- "-//w3c//dtd html experimental 970421//",
- "-//w3c//dtd w3 html//",
- "-//w3o//dtd w3 html 3.0//",
- "-//webtechs//dtd mozilla html 2.0//",
- "-//webtechs//dtd mozilla html//")) or
- publicId in ("-//w3o//dtd w3 html strict 3.0//en//",
- "-/w3c/dtd html 4.0 transitional/en",
- "html") or
- publicId.startswith(
- ("-//w3c//dtd html 4.01 frameset//",
- "-//w3c//dtd html 4.01 transitional//")) and
- systemId is None or
- systemId and systemId.lower() == "http://www.ibm.com/data/dtd/v11/ibmxhtml1-transitional.dtd"):
- self.parser.compatMode = "quirks"
- elif (publicId.startswith(
- ("-//w3c//dtd xhtml 1.0 frameset//",
- "-//w3c//dtd xhtml 1.0 transitional//")) or
- publicId.startswith(
- ("-//w3c//dtd html 4.01 frameset//",
- "-//w3c//dtd html 4.01 transitional//")) and
- systemId is not None):
- self.parser.compatMode = "limited quirks"
-
- self.parser.phase = self.parser.phases["beforeHtml"]
-
- def anythingElse(self):
- self.parser.compatMode = "quirks"
- self.parser.phase = self.parser.phases["beforeHtml"]
-
- def processCharacters(self, token):
- self.parser.parseError("expected-doctype-but-got-chars")
- self.anythingElse()
- return token
-
- def processStartTag(self, token):
- self.parser.parseError("expected-doctype-but-got-start-tag",
- {"name": token["name"]})
- self.anythingElse()
- return token
-
- def processEndTag(self, token):
- self.parser.parseError("expected-doctype-but-got-end-tag",
- {"name": token["name"]})
- self.anythingElse()
- return token
-
- def processEOF(self):
- self.parser.parseError("expected-doctype-but-got-eof")
- self.anythingElse()
- return True
-
- class BeforeHtmlPhase(Phase):
- # helper methods
- def insertHtmlElement(self):
- self.tree.insertRoot(impliedTagToken("html", "StartTag"))
- self.parser.phase = self.parser.phases["beforeHead"]
-
- # other
- def processEOF(self):
- self.insertHtmlElement()
- return True
-
- def processComment(self, token):
- self.tree.insertComment(token, self.tree.document)
-
- def processSpaceCharacters(self, token):
- pass
-
- def processCharacters(self, token):
- self.insertHtmlElement()
- return token
-
- def processStartTag(self, token):
- if token["name"] == "html":
- self.parser.firstStartTag = True
- self.insertHtmlElement()
- return token
-
- def processEndTag(self, token):
- if token["name"] not in ("head", "body", "html", "br"):
- self.parser.parseError("unexpected-end-tag-before-html",
- {"name": token["name"]})
- else:
- self.insertHtmlElement()
- return token
-
- class BeforeHeadPhase(Phase):
- def __init__(self, parser, tree):
- Phase.__init__(self, parser, tree)
-
- self.startTagHandler = _utils.MethodDispatcher([
- ("html", self.startTagHtml),
- ("head", self.startTagHead)
- ])
- self.startTagHandler.default = self.startTagOther
-
- self.endTagHandler = _utils.MethodDispatcher([
- (("head", "body", "html", "br"), self.endTagImplyHead)
- ])
- self.endTagHandler.default = self.endTagOther
-
- def processEOF(self):
- self.startTagHead(impliedTagToken("head", "StartTag"))
- return True
-
- def processSpaceCharacters(self, token):
- pass
-
- def processCharacters(self, token):
- self.startTagHead(impliedTagToken("head", "StartTag"))
- return token
-
- def startTagHtml(self, token):
- return self.parser.phases["inBody"].processStartTag(token)
-
- def startTagHead(self, token):
- self.tree.insertElement(token)
- self.tree.headPointer = self.tree.openElements[-1]
- self.parser.phase = self.parser.phases["inHead"]
-
- def startTagOther(self, token):
- self.startTagHead(impliedTagToken("head", "StartTag"))
- return token
-
- def endTagImplyHead(self, token):
- self.startTagHead(impliedTagToken("head", "StartTag"))
- return token
-
- def endTagOther(self, token):
- self.parser.parseError("end-tag-after-implied-root",
- {"name": token["name"]})
-
- class InHeadPhase(Phase):
- def __init__(self, parser, tree):
- Phase.__init__(self, parser, tree)
-
- self.startTagHandler = _utils.MethodDispatcher([
- ("html", self.startTagHtml),
- ("title", self.startTagTitle),
- (("noframes", "style"), self.startTagNoFramesStyle),
- ("noscript", self.startTagNoscript),
- ("script", self.startTagScript),
- (("base", "basefont", "bgsound", "command", "link"),
- self.startTagBaseLinkCommand),
- ("meta", self.startTagMeta),
- ("head", self.startTagHead)
- ])
- self.startTagHandler.default = self.startTagOther
-
- self.endTagHandler = _utils.MethodDispatcher([
- ("head", self.endTagHead),
- (("br", "html", "body"), self.endTagHtmlBodyBr)
- ])
- self.endTagHandler.default = self.endTagOther
-
- # the real thing
- def processEOF(self):
- self.anythingElse()
- return True
-
- def processCharacters(self, token):
- self.anythingElse()
- return token
-
- def startTagHtml(self, token):
- return self.parser.phases["inBody"].processStartTag(token)
-
- def startTagHead(self, token):
- self.parser.parseError("two-heads-are-not-better-than-one")
-
- def startTagBaseLinkCommand(self, token):
- self.tree.insertElement(token)
- self.tree.openElements.pop()
- token["selfClosingAcknowledged"] = True
-
- def startTagMeta(self, token):
- self.tree.insertElement(token)
- self.tree.openElements.pop()
- token["selfClosingAcknowledged"] = True
-
- attributes = token["data"]
- if self.parser.tokenizer.stream.charEncoding[1] == "tentative":
- if "charset" in attributes:
- self.parser.tokenizer.stream.changeEncoding(attributes["charset"])
- elif ("content" in attributes and
- "http-equiv" in attributes and
- attributes["http-equiv"].lower() == "content-type"):
- # Encoding it as UTF-8 here is a hack, as really we should pass
- # the abstract Unicode string, and just use the
- # ContentAttrParser on that, but using UTF-8 allows all chars
- # to be encoded and as a ASCII-superset works.
- data = _inputstream.EncodingBytes(attributes["content"].encode("utf-8"))
- parser = _inputstream.ContentAttrParser(data)
- codec = parser.parse()
- self.parser.tokenizer.stream.changeEncoding(codec)
-
- def startTagTitle(self, token):
- self.parser.parseRCDataRawtext(token, "RCDATA")
-
- def startTagNoFramesStyle(self, token):
- # Need to decide whether to implement the scripting-disabled case
- self.parser.parseRCDataRawtext(token, "RAWTEXT")
-
- def startTagNoscript(self, token):
- if self.parser.scripting:
- self.parser.parseRCDataRawtext(token, "RAWTEXT")
- else:
- self.tree.insertElement(token)
- self.parser.phase = self.parser.phases["inHeadNoscript"]
-
- def startTagScript(self, token):
- self.tree.insertElement(token)
- self.parser.tokenizer.state = self.parser.tokenizer.scriptDataState
- self.parser.originalPhase = self.parser.phase
- self.parser.phase = self.parser.phases["text"]
-
- def startTagOther(self, token):
- self.anythingElse()
- return token
-
- def endTagHead(self, token):
- node = self.parser.tree.openElements.pop()
- assert node.name == "head", "Expected head got %s" % node.name
- self.parser.phase = self.parser.phases["afterHead"]
-
- def endTagHtmlBodyBr(self, token):
- self.anythingElse()
- return token
-
- def endTagOther(self, token):
- self.parser.parseError("unexpected-end-tag", {"name": token["name"]})
-
- def anythingElse(self):
- self.endTagHead(impliedTagToken("head"))
-
- class InHeadNoscriptPhase(Phase):
- def __init__(self, parser, tree):
- Phase.__init__(self, parser, tree)
-
- self.startTagHandler = _utils.MethodDispatcher([
- ("html", self.startTagHtml),
- (("basefont", "bgsound", "link", "meta", "noframes", "style"), self.startTagBaseLinkCommand),
- (("head", "noscript"), self.startTagHeadNoscript),
- ])
- self.startTagHandler.default = self.startTagOther
-
- self.endTagHandler = _utils.MethodDispatcher([
- ("noscript", self.endTagNoscript),
- ("br", self.endTagBr),
- ])
- self.endTagHandler.default = self.endTagOther
-
- def processEOF(self):
- self.parser.parseError("eof-in-head-noscript")
- self.anythingElse()
- return True
-
- def processComment(self, token):
- return self.parser.phases["inHead"].processComment(token)
-
- def processCharacters(self, token):
- self.parser.parseError("char-in-head-noscript")
- self.anythingElse()
- return token
-
- def processSpaceCharacters(self, token):
- return self.parser.phases["inHead"].processSpaceCharacters(token)
-
- def startTagHtml(self, token):
- return self.parser.phases["inBody"].processStartTag(token)
-
- def startTagBaseLinkCommand(self, token):
- return self.parser.phases["inHead"].processStartTag(token)
-
- def startTagHeadNoscript(self, token):
- self.parser.parseError("unexpected-start-tag", {"name": token["name"]})
-
- def startTagOther(self, token):
- self.parser.parseError("unexpected-inhead-noscript-tag", {"name": token["name"]})
- self.anythingElse()
- return token
-
- def endTagNoscript(self, token):
- node = self.parser.tree.openElements.pop()
- assert node.name == "noscript", "Expected noscript got %s" % node.name
- self.parser.phase = self.parser.phases["inHead"]
-
- def endTagBr(self, token):
- self.parser.parseError("unexpected-inhead-noscript-tag", {"name": token["name"]})
- self.anythingElse()
- return token
-
- def endTagOther(self, token):
- self.parser.parseError("unexpected-end-tag", {"name": token["name"]})
-
- def anythingElse(self):
- # Caller must raise parse error first!
- self.endTagNoscript(impliedTagToken("noscript"))
-
- class AfterHeadPhase(Phase):
- def __init__(self, parser, tree):
- Phase.__init__(self, parser, tree)
-
- self.startTagHandler = _utils.MethodDispatcher([
- ("html", self.startTagHtml),
- ("body", self.startTagBody),
- ("frameset", self.startTagFrameset),
- (("base", "basefont", "bgsound", "link", "meta", "noframes", "script",
- "style", "title"),
- self.startTagFromHead),
- ("head", self.startTagHead)
- ])
- self.startTagHandler.default = self.startTagOther
- self.endTagHandler = _utils.MethodDispatcher([(("body", "html", "br"),
- self.endTagHtmlBodyBr)])
- self.endTagHandler.default = self.endTagOther
-
- def processEOF(self):
- self.anythingElse()
- return True
-
- def processCharacters(self, token):
- self.anythingElse()
- return token
-
- def startTagHtml(self, token):
- return self.parser.phases["inBody"].processStartTag(token)
-
- def startTagBody(self, token):
- self.parser.framesetOK = False
- self.tree.insertElement(token)
- self.parser.phase = self.parser.phases["inBody"]
-
- def startTagFrameset(self, token):
- self.tree.insertElement(token)
- self.parser.phase = self.parser.phases["inFrameset"]
-
- def startTagFromHead(self, token):
- self.parser.parseError("unexpected-start-tag-out-of-my-head",
- {"name": token["name"]})
- self.tree.openElements.append(self.tree.headPointer)
- self.parser.phases["inHead"].processStartTag(token)
- for node in self.tree.openElements[::-1]:
- if node.name == "head":
- self.tree.openElements.remove(node)
- break
-
- def startTagHead(self, token):
- self.parser.parseError("unexpected-start-tag", {"name": token["name"]})
-
- def startTagOther(self, token):
- self.anythingElse()
- return token
-
- def endTagHtmlBodyBr(self, token):
- self.anythingElse()
- return token
-
- def endTagOther(self, token):
- self.parser.parseError("unexpected-end-tag", {"name": token["name"]})
-
- def anythingElse(self):
- self.tree.insertElement(impliedTagToken("body", "StartTag"))
- self.parser.phase = self.parser.phases["inBody"]
- self.parser.framesetOK = True
-
- class InBodyPhase(Phase):
- # http://www.whatwg.org/specs/web-apps/current-work/#parsing-main-inbody
- # the really-really-really-very crazy mode
- def __init__(self, parser, tree):
- Phase.__init__(self, parser, tree)
-
- # Set this to the default handler
- self.processSpaceCharacters = self.processSpaceCharactersNonPre
-
- self.startTagHandler = _utils.MethodDispatcher([
- ("html", self.startTagHtml),
- (("base", "basefont", "bgsound", "command", "link", "meta",
- "script", "style", "title"),
- self.startTagProcessInHead),
- ("body", self.startTagBody),
- ("frameset", self.startTagFrameset),
- (("address", "article", "aside", "blockquote", "center", "details",
- "dir", "div", "dl", "fieldset", "figcaption", "figure",
- "footer", "header", "hgroup", "main", "menu", "nav", "ol", "p",
- "section", "summary", "ul"),
- self.startTagCloseP),
- (headingElements, self.startTagHeading),
- (("pre", "listing"), self.startTagPreListing),
- ("form", self.startTagForm),
- (("li", "dd", "dt"), self.startTagListItem),
- ("plaintext", self.startTagPlaintext),
- ("a", self.startTagA),
- (("b", "big", "code", "em", "font", "i", "s", "small", "strike",
- "strong", "tt", "u"), self.startTagFormatting),
- ("nobr", self.startTagNobr),
- ("button", self.startTagButton),
- (("applet", "marquee", "object"), self.startTagAppletMarqueeObject),
- ("xmp", self.startTagXmp),
- ("table", self.startTagTable),
- (("area", "br", "embed", "img", "keygen", "wbr"),
- self.startTagVoidFormatting),
- (("param", "source", "track"), self.startTagParamSource),
- ("input", self.startTagInput),
- ("hr", self.startTagHr),
- ("image", self.startTagImage),
- ("isindex", self.startTagIsIndex),
- ("textarea", self.startTagTextarea),
- ("iframe", self.startTagIFrame),
- ("noscript", self.startTagNoscript),
- (("noembed", "noframes"), self.startTagRawtext),
- ("select", self.startTagSelect),
- (("rp", "rt"), self.startTagRpRt),
- (("option", "optgroup"), self.startTagOpt),
- (("math"), self.startTagMath),
- (("svg"), self.startTagSvg),
- (("caption", "col", "colgroup", "frame", "head",
- "tbody", "td", "tfoot", "th", "thead",
- "tr"), self.startTagMisplaced)
- ])
- self.startTagHandler.default = self.startTagOther
-
- self.endTagHandler = _utils.MethodDispatcher([
- ("body", self.endTagBody),
- ("html", self.endTagHtml),
- (("address", "article", "aside", "blockquote", "button", "center",
- "details", "dialog", "dir", "div", "dl", "fieldset", "figcaption", "figure",
- "footer", "header", "hgroup", "listing", "main", "menu", "nav", "ol", "pre",
- "section", "summary", "ul"), self.endTagBlock),
- ("form", self.endTagForm),
- ("p", self.endTagP),
- (("dd", "dt", "li"), self.endTagListItem),
- (headingElements, self.endTagHeading),
- (("a", "b", "big", "code", "em", "font", "i", "nobr", "s", "small",
- "strike", "strong", "tt", "u"), self.endTagFormatting),
- (("applet", "marquee", "object"), self.endTagAppletMarqueeObject),
- ("br", self.endTagBr),
- ])
- self.endTagHandler.default = self.endTagOther
-
- def isMatchingFormattingElement(self, node1, node2):
- return (node1.name == node2.name and
- node1.namespace == node2.namespace and
- node1.attributes == node2.attributes)
-
- # helper
- def addFormattingElement(self, token):
- self.tree.insertElement(token)
- element = self.tree.openElements[-1]
-
- matchingElements = []
- for node in self.tree.activeFormattingElements[::-1]:
- if node is Marker:
- break
- elif self.isMatchingFormattingElement(node, element):
- matchingElements.append(node)
-
- assert len(matchingElements) <= 3
- if len(matchingElements) == 3:
- self.tree.activeFormattingElements.remove(matchingElements[-1])
- self.tree.activeFormattingElements.append(element)
-
- # the real deal
- def processEOF(self):
- allowed_elements = frozenset(("dd", "dt", "li", "p", "tbody", "td",
- "tfoot", "th", "thead", "tr", "body",
- "html"))
- for node in self.tree.openElements[::-1]:
- if node.name not in allowed_elements:
- self.parser.parseError("expected-closing-tag-but-got-eof")
- break
- # Stop parsing
-
- def processSpaceCharactersDropNewline(self, token):
- # Sometimes (start of , , and