diff --git a/AUTHORS b/AUTHORS new file mode 100644 index 0000000000000000000000000000000000000000..60677ddb6aaacf1e9a74fe4e441213446b9fe943 --- /dev/null +++ b/AUTHORS @@ -0,0 +1,15 @@ +Main authors: + Federico Di Gregorio + Daniele Varrazzo + +For the win32 port: + Jason Erickson + +Additional Help: + + Peter Fein contributed a logging connection/cursor class that even if it + was not used directly heavily influenced the implementation currently in + psycopg2.extras. + + Jan UrbaƄski (re)started the work on asynchronous queries and contributed + both on that and on other parts of psycopg2. diff --git a/INSTALL b/INSTALL new file mode 100644 index 0000000000000000000000000000000000000000..7a04aabca5163f99a3d0435dc81f3d19f57b4650 --- /dev/null +++ b/INSTALL @@ -0,0 +1,4 @@ +Installation instructions are included in the docs. + +Please check the 'doc/src/install.rst' file or online at +. diff --git a/LICENSE b/LICENSE new file mode 100644 index 0000000000000000000000000000000000000000..9029e70fc8c8dfdca187cb9632af896c660d15fe --- /dev/null +++ b/LICENSE @@ -0,0 +1,49 @@ +psycopg2 and the LGPL +--------------------- + +psycopg2 is free software: you can redistribute it and/or modify it +under the terms of the GNU Lesser General Public License as published +by the Free Software Foundation, either version 3 of the License, or +(at your option) any later version. + +psycopg2 is distributed in the hope that it will be useful, but WITHOUT +ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or +FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public +License for more details. + +In addition, as a special exception, the copyright holders give +permission to link this program with the OpenSSL library (or with +modified versions of OpenSSL that use the same license as OpenSSL), +and distribute linked combinations including the two. + +You must obey the GNU Lesser General Public License in all respects for +all of the code used other than OpenSSL. If you modify file(s) with this +exception, you may extend this exception to your version of the file(s), +but you are not obligated to do so. If you do not wish to do so, delete +this exception statement from your version. If you delete this exception +statement from all source files in the program, then also delete it here. + +You should have received a copy of the GNU Lesser General Public License +along with psycopg2 (see the doc/ directory.) +If not, see . + + +Alternative licenses +-------------------- + +The following BSD-like license applies (at your option) to the files following +the pattern ``psycopg/adapter*.{h,c}`` and ``psycopg/microprotocol*.{h,c}``: + + Permission is granted to anyone to use this software for any purpose, + including commercial applications, and to alter it and redistribute it + freely, subject to the following restrictions: + + 1. The origin of this software must not be misrepresented; you must not + claim that you wrote the original software. If you use this + software in a product, an acknowledgment in the product documentation + would be appreciated but is not required. + + 2. Altered source versions must be plainly marked as such, and must not + be misrepresented as being the original software. + + 3. This notice may not be removed or altered from any source distribution. diff --git a/MANIFEST.in b/MANIFEST.in new file mode 100644 index 0000000000000000000000000000000000000000..3fcce43ba7108945decc3e8bf1824a0502b833ff --- /dev/null +++ b/MANIFEST.in @@ -0,0 +1,9 @@ +recursive-include psycopg *.c *.h *.manifest +recursive-include lib *.py +recursive-include tests *.py +include doc/README.rst doc/SUCCESS doc/COPYING.LESSER doc/pep-0249.txt +include doc/Makefile doc/requirements.txt +recursive-include doc/src *.rst *.py *.css Makefile +recursive-include scripts *.py *.sh +include AUTHORS README.rst INSTALL LICENSE NEWS +include MANIFEST.in setup.py setup.cfg Makefile diff --git a/Makefile b/Makefile new file mode 100644 index 0000000000000000000000000000000000000000..9cbb16f97bbf7ec7a96dcad6092a5b0f9546a212 --- /dev/null +++ b/Makefile @@ -0,0 +1,104 @@ +# Makefile for psycopg2. Do you want to... +# +# Build the library:: +# +# make +# +# Build the documentation:: +# +# make env (once) +# make docs +# +# Create a source package:: +# +# make sdist +# +# Run the test:: +# +# make check # this requires setting up a test database with the correct user + +PYTHON := python$(PYTHON_VERSION) +PYTHON_VERSION ?= $(shell $(PYTHON) -c 'import sys; print ("%d.%d" % sys.version_info[:2])') +BUILD_DIR = $(shell pwd)/build/lib.$(PYTHON_VERSION) + +SOURCE_C := $(wildcard psycopg/*.c psycopg/*.h) +SOURCE_PY := $(wildcard lib/*.py) +SOURCE_TESTS := $(wildcard tests/*.py) +SOURCE_DOC := $(wildcard doc/src/*.rst) +SOURCE := $(SOURCE_C) $(SOURCE_PY) $(SOURCE_TESTS) $(SOURCE_DOC) + +PACKAGE := $(BUILD_DIR)/psycopg2 +PLATLIB := $(PACKAGE)/_psycopg.so +PURELIB := $(patsubst lib/%,$(PACKAGE)/%,$(SOURCE_PY)) + +BUILD_OPT := --build-lib=$(BUILD_DIR) +BUILD_EXT_OPT := --build-lib=$(BUILD_DIR) +SDIST_OPT := --formats=gztar + +ifdef PG_CONFIG + BUILD_EXT_OPT += --pg-config=$(PG_CONFIG) +endif + +VERSION := $(shell grep PSYCOPG_VERSION setup.py | head -1 | sed -e "s/.*'\(.*\)'/\1/") +SDIST := dist/psycopg2-$(VERSION).tar.gz + +.PHONY: check clean + +default: package + +all: package sdist + +package: $(PLATLIB) $(PURELIB) + +docs: docs-html + +docs-html: doc/html/genindex.html + +# for PyPI documentation +docs-zip: doc/docs.zip + +sdist: $(SDIST) + +env: + $(MAKE) -C doc $@ + +check: + PYTHONPATH=$(BUILD_DIR) $(PYTHON) -c "import tests; tests.unittest.main(defaultTest='tests.test_suite')" --verbose + +testdb: + @echo "* Creating $(TESTDB)" + @if psql -l | grep -q " $(TESTDB) "; then \ + dropdb $(TESTDB) >/dev/null; \ + fi + createdb $(TESTDB) + # Note to packagers: this requires the postgres user running the test + # to be a superuser. You may change this line to use the superuser only + # to install the contrib. Feel free to suggest a better way to set up the + # testing environment (as the current is enough for development). + psql -f `pg_config --sharedir`/contrib/hstore.sql $(TESTDB) + + +$(PLATLIB): $(SOURCE_C) + $(PYTHON) setup.py build_ext $(BUILD_EXT_OPT) + +$(PACKAGE)/%.py: lib/%.py + $(PYTHON) setup.py build_py $(BUILD_OPT) + touch $@ + +$(PACKAGE)/tests/%.py: tests/%.py + $(PYTHON) setup.py build_py $(BUILD_OPT) + touch $@ + +$(SDIST): $(SOURCE) + $(PYTHON) setup.py sdist $(SDIST_OPT) + +# docs depend on the build as it partly use introspection. +doc/html/genindex.html: $(PLATLIB) $(PURELIB) $(SOURCE_DOC) + $(MAKE) -C doc html + +doc/docs.zip: doc/html/genindex.html + (cd doc/html && zip -r ../docs.zip *) + +clean: + rm -rf build + $(MAKE) -C doc clean diff --git a/NEWS b/NEWS new file mode 100644 index 0000000000000000000000000000000000000000..51347b7776ce4300fd81d1a3f7a7d298c54e95d9 --- /dev/null +++ b/NEWS @@ -0,0 +1,1426 @@ +Current release +--------------- + +What's new in psycopg 2.9 +------------------------- + +- ``with connection`` starts a transaction on autocommit transactions too + (:ticket:`#941`). +- Timezones with fractional minutes are supported on Python 3.7 and following + (:ticket:`#1272`). +- Escape table and column names in `~cursor.copy_from()` and + `~cursor.copy_to()`. +- Connection exceptions with sqlstate ``08XXX`` reclassified as + `~psycopg2.OperationalError` (a subclass of the previously used + `~psycopg2.DatabaseError`) (:ticket:`#1148`). +- Include library dirs required from libpq to work around MacOS build problems + (:ticket:`#1200`). + +Other changes: + +- Dropped support for Python 2.7, 3.4, 3.5 (:tickets:`#1198, #1000, #1197`). +- Dropped support for mx.DateTime. +- Use `datetime.timezone` objects by default in datetime objects instead of + `~psycopg2.tz.FixedOffsetTimezone`. +- The `psycopg2.tz` module is deprecated and scheduled to be dropped in the + next major release. +- Provide :pep:`599` wheels packages (manylinux2014 tag) for i686 and x86_64 + platforms. +- Provide :pep:`600` wheels packages (manylinux_2_24 tag) for aarch64 and + ppc64le platforms. +- Wheel package compiled against OpenSSL 1.1.1k and PostgreSQL 13.3. +- Build system for Linux/MacOS binary packages moved to GitHub action. + + +What's new in psycopg 2.8.7 +^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +- Accept empty params as `~psycopg2.connect()` (:ticket:`#1250`). +- Fix attributes refcount in `Column` initialisation (:ticket:`#1252`). +- Allow re-initialisation of static variables in the C module (:ticket:`#1267`). + + +What's new in psycopg 2.8.6 +^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +- Fixed memory leak changing connection encoding to the current one + (:ticket:`#1101`). +- Fixed search of mxDateTime headers in virtualenvs (:ticket:`#996`). +- Added missing values from errorcodes (:ticket:`#1133`). +- `cursor.query` reports the query of the last :sql:`COPY` operation too + (:ticket:`#1141`). +- `~psycopg2.errorcodes` map and `~psycopg2.errors` classes updated to + PostgreSQL 13. +- Added wheel packages for ARM architecture (:ticket:`#1125`). +- Wheel package compiled against OpenSSL 1.1.1g. + + +What's new in psycopg 2.8.5 +^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +- Fixed use of `!connection_factory` and `!cursor_factory` together + (:ticket:`#1019`). +- Added support for `~logging.LoggerAdapter` in + `~psycopg2.extras.LoggingConnection` (:ticket:`#1026`). +- `~psycopg2.extensions.Column` objects in `cursor.description` can be sliced + (:ticket:`#1034`). +- Added AIX support (:ticket:`#1061`). +- Fixed `~copy.copy()` of `~psycopg2.extras.DictCursor` rows (:ticket:`#1073`). + + +What's new in psycopg 2.8.4 +^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +- Fixed building with Python 3.8 (:ticket:`#854`). +- Don't swallow keyboard interrupts on connect when a password is specified + in the connection string (:ticket:`#898`). +- Don't advance replication cursor when the message wasn't confirmed + (:ticket:`#940`). +- Fixed inclusion of ``time.h`` on linux (:ticket:`#951`). +- Fixed int overflow for large values in `~psycopg2.extensions.Column.table_oid` + and `~psycopg2.extensions.Column.type_code` (:ticket:`#961`). +- `~psycopg2.errorcodes` map and `~psycopg2.errors` classes updated to + PostgreSQL 12. +- Wheel package compiled against OpenSSL 1.1.1d and PostgreSQL at least 11.4. + + +What's new in psycopg 2.8.3 +^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +- Added *interval_status* parameter to + `~psycopg2.extras.ReplicationCursor.start_replication()` method and other + facilities to send automatic replication keepalives at periodic intervals + (:ticket:`#913`). +- Fixed namedtuples caching introduced in 2.8 (:ticket:`#928`). + + +What's new in psycopg 2.8.2 +^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +- Fixed `~psycopg2.extras.RealDictCursor` when there are repeated columns + (:ticket:`#884`). +- Binary packages built with openssl 1.1.1b. Should fix concurrency problems + (:tickets:`#543, #836`). + + +What's new in psycopg 2.8.1 +^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +- Fixed `~psycopg2.extras.RealDictRow` modifiability (:ticket:`#886`). +- Fixed "there's no async cursor" error polling a connection with no cursor + (:ticket:`#887`). + + +What's new in psycopg 2.8 +------------------------- + +New features: + +- Added `~psycopg2.errors` module. Every PostgreSQL error is converted into + a specific exception class (:ticket:`#682`). +- Added `~psycopg2.extensions.encrypt_password()` function (:ticket:`#576`). +- Added `~psycopg2.extensions.BYTES` adapter to manage databases with mixed + encodings on Python 3 (:ticket:`#835`). +- Added `~psycopg2.extensions.Column.table_oid` and + `~psycopg2.extensions.Column.table_column` attributes on `cursor.description` + items (:ticket:`#661`). +- Added `connection.info` object to retrieve various PostgreSQL connection + information (:ticket:`#726`). +- Added `~connection.get_native_connection()` to expose the raw ``PGconn`` + structure to C extensions via Capsule (:ticket:`#782`). +- Added `~connection.pgconn_ptr` and `~cursor.pgresult_ptr` to expose raw + C structures to Python and interact with libpq via ctypes (:ticket:`#782`). +- `~psycopg2.sql.Identifier` can represent qualified names in SQL composition + (:ticket:`#732`). +- Added `!ReplicationCursor`.\ `~psycopg2.extras.ReplicationCursor.wal_end` + attribute (:ticket:`#800`). +- Added *fetch* parameter to `~psycopg2.extras.execute_values()` function + (:ticket:`#813`). +- `!str()` on `~psycopg2.extras.Range` produces a human-readable representation + (:ticket:`#773`). +- `~psycopg2.extras.DictCursor` and `~psycopg2.extras.RealDictCursor` rows + maintain columns order (:ticket:`#177`). +- Added `~psycopg2.extensions.Diagnostics.severity_nonlocalized` attribute on + the `~psycopg2.extensions.Diagnostics` object (:ticket:`#783`). +- More efficient `~psycopg2.extras.NamedTupleCursor` (:ticket:`#838`). + +Bug fixes: + +- Fixed connections occasionally broken by the unrelated use of the + multiprocessing module (:ticket:`#829`). +- Fixed async communication blocking if results are returned in different + chunks, e.g. with notices interspersed to the results (:ticket:`#856`). +- Fixed adaptation of numeric subclasses such as `~enum.IntEnum` + (:ticket:`#591`). + +Other changes: + +- Dropped support for Python 2.6, 3.2, 3.3. +- Dropped `psycopg1` module. +- Dropped deprecated `!register_tstz_w_secs()` (was previously a no-op). +- Dropped deprecated `!PersistentConnectionPool`. This pool class was mostly + designed to interact with Zope. Use `!ZPsycopgDA.pool` instead. +- Binary packages no longer installed by default. The 'psycopg2-binary' + package must be used explicitly. +- Dropped `!PSYCOPG_DISPLAY_SIZE` build parameter. +- Dropped support for mxDateTime as the default date and time adapter. + mxDatetime support continues to be available as an alternative to Python's + builtin datetime. +- No longer use 2to3 during installation for Python 2 & 3 compatibility. All + source files are now compatible with Python 2 & 3 as is. +- The `!psycopg2.test` package is no longer installed by ``python setup.py + install``. +- Wheel package compiled against OpenSSL 1.0.2r and PostgreSQL 11.2 libpq. + + +What's new in psycopg 2.7.7 +^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +- Cleanup of the cursor results assignment code, which might have solved + double free and inconsistencies in concurrent usage (:tickets:`#346, #384`). +- Wheel package compiled against OpenSSL 1.0.2q. + + +What's new in psycopg 2.7.6.1 +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +- Fixed binary package broken on OS X 10.12 (:ticket:`#807`). +- Wheel package compiled against PostgreSQL 11.1 libpq. + + +What's new in psycopg 2.7.6 +^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +- Close named cursors if exist, even if `~cursor.execute()` wasn't called + (:ticket:`#746`). +- Fixed building on modern FreeBSD versions with Python 3.7 (:ticket:`#755`). +- Fixed hang trying to :sql:`COPY` via `~cursor.execute()` in asynchronous + connections (:ticket:`#781`). +- Fixed adaptation of arrays of empty arrays (:ticket:`#788`). +- Fixed segfault accessing the connection's `~connection.readonly` and + `~connection.deferrable` attributes repeatedly (:ticket:`#790`). +- `~psycopg2.extras.execute_values()` accepts `~psycopg2.sql.Composable` + objects (:ticket:`#794`). +- `~psycopg2.errorcodes` map updated to PostgreSQL 11. +- Wheel package compiled against PostgreSQL 10.5 libpq and OpenSSL 1.0.2p. + + +What's new in psycopg 2.7.5 +^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +- Allow non-ascii chars in namedtuple fields (regression introduced fixing + :ticket:`#211`). +- Fixed adaptation of arrays of arrays of nulls (:ticket:`#325`). +- Fixed building on Solaris 11 and derivatives such as SmartOS and illumos + (:ticket:`#677`). +- Maybe fixed building on MSYS2 (as reported in :ticket:`#658`). +- Allow string subclasses in connection and other places (:ticket:`#679`). +- Don't raise an exception closing an unused named cursor (:ticket:`#716`). +- Wheel package compiled against PostgreSQL 10.4 libpq and OpenSSL 1.0.2o. + + +What's new in psycopg 2.7.4 +^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +- Moving away from installing the wheel package by default. + Packages installed from wheel raise a warning on import. Added package + ``psycopg2-binary`` to install from wheel instead (:ticket:`#543`). +- Convert fields names into valid Python identifiers in + `~psycopg2.extras.NamedTupleCursor` (:ticket:`#211`). +- Fixed Solaris 10 support (:ticket:`#532`). +- `cursor.mogrify()` can be called on closed cursors (:ticket:`#579`). +- Fixed setting session characteristics in corner cases on autocommit + connections (:ticket:`#580`). +- Fixed `~psycopg2.extras.MinTimeLoggingCursor` on Python 3 (:ticket:`#609`). +- Fixed parsing of array of points as floats (:ticket:`#613`). +- Fixed `~psycopg2.__libpq_version__` building with libpq >= 10.1 + (:ticket:`#632`). +- Fixed `~cursor.rowcount` after `~cursor.executemany()` with :sql:`RETURNING` + statements (:ticket:`#633`). +- Fixed compatibility problem with pypy3 (:ticket:`#649`). +- Wheel packages compiled against PostgreSQL 10.1 libpq and OpenSSL 1.0.2n. +- Wheel packages for Python 2.6 no more available (support dropped from + wheel building infrastructure). + + +What's new in psycopg 2.7.3.2 +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +- Wheel package compiled against PostgreSQL 10.0 libpq and OpenSSL 1.0.2l + (:tickets:`#601, #602`). + + +What's new in psycopg 2.7.3.1 +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +- Dropped libresolv from wheel package to avoid incompatibility with + glibc 2.26 (wheels ticket #2). + + +What's new in psycopg 2.7.3 +^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +- Restored default :sql:`timestamptz[]` typecasting to Python `!datetime`. + Regression introduced in Psycopg 2.7.2 (:ticket:`#578`). + + +What's new in psycopg 2.7.2 +^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +- Fixed inconsistent state in externally closed connections + (:tickets:`#263, #311, #443`). Was fixed in 2.6.2 but not included in + 2.7 by mistake. +- Fixed Python exceptions propagation in green callback (:ticket:`#410`). +- Don't display the password in `connection.dsn` when the connection + string is specified as an URI (:ticket:`#528`). +- Return objects with timezone parsing "infinity" :sql:`timestamptz` + (:ticket:`#536`). +- Dropped dependency on VC9 runtime on Windows binary packages + (:ticket:`#541`). +- Fixed segfault in `~connection.lobject()` when *mode*\=\ `!None` + (:ticket:`#544`). +- Fixed `~connection.lobject()` keyword argument *lobject_factory* + (:ticket:`#545`). +- Fixed `~psycopg2.extras.ReplicationCursor.consume_stream()` + *keepalive_interval* argument (:ticket:`#547`). +- Maybe fixed random import error on Python 3.6 in multiprocess + environment (:ticket:`#550`). +- Fixed random `!SystemError` upon receiving abort signal (:ticket:`#551`). +- Accept `~psycopg2.sql.Composable` objects in + `~psycopg2.extras.ReplicationCursor.start_replication_expert()` + (:ticket:`#554`). +- Parse intervals returned as microseconds from Redshift (:ticket:`#558`). +- Added `~psycopg2.extras.Json` `!prepare()` method to consider connection + params when adapting (:ticket:`#562`). +- `~psycopg2.errorcodes` map updated to PostgreSQL 10 beta 1. + + +What's new in psycopg 2.7.1 +^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +- Ignore `!None` arguments passed to `~psycopg2.connect()` and + `~psycopg2.extensions.make_dsn()` (:ticket:`#517`). +- OpenSSL upgraded from major version 0.9.8 to 1.0.2 in the Linux wheel + packages (:ticket:`#518`). +- Fixed build with libpq versions < 9.3 (:ticket:`#520`). + + +What's new in psycopg 2.7 +------------------------- + +New features: + +- Added `~psycopg2.sql` module to generate SQL dynamically (:ticket:`#308`). +- Added :ref:`replication-support` (:ticket:`#322`). Main authors are + Oleksandr Shulgin and Craig Ringer, who deserve a huge thank you. +- Added `~psycopg2.extensions.parse_dsn()` and + `~psycopg2.extensions.make_dsn()` functions (:tickets:`#321, #363`). + `~psycopg2.connect()` now can take both *dsn* and keyword arguments, merging + them together. +- Added `~psycopg2.__libpq_version__` and + `~psycopg2.extensions.libpq_version()` to inspect the version of the + ``libpq`` library the module was compiled/loaded with + (:tickets:`#35, #323`). +- The attributes `~connection.notices` and `~connection.notifies` can be + customized replacing them with any object exposing an `!append()` method + (:ticket:`#326`). +- Adapt network types to `ipaddress` objects when available. When not + enabled, convert arrays of network types to lists by default. The old `!Inet` + adapter is deprecated (:tickets:`#317, #343, #387`). +- Added `~psycopg2.extensions.quote_ident()` function (:ticket:`#359`). +- Added `~connection.get_dsn_parameters()` connection method (:ticket:`#364`). +- `~cursor.callproc()` now accepts a dictionary of parameters (:ticket:`#381`). +- Give precedence to `!__conform__()` over superclasses to choose an object + adapter (:ticket:`#456`). +- Using Python C API decoding functions and codecs caching for faster + unicode encoding/decoding (:ticket:`#473`). +- `~cursor.executemany()` slowness addressed by + `~psycopg2.extras.execute_batch()` and `~psycopg2.extras.execute_values()` + (:ticket:`#491`). +- Added ``async_`` as an alias for ``async`` to support Python 3.7 where + ``async`` will become a keyword (:ticket:`#495`). +- Unless in autocommit, do not use :sql:`default_transaction_*` settings to + control the session characteristics as it may create problems with external + connection pools such as pgbouncer; use :sql:`BEGIN` options instead + (:ticket:`#503`). +- `~connection.isolation_level` is now writable and entirely separated from + `~connection.autocommit`; added `~connection.readonly`, + `~connection.deferrable` writable attributes. + +Bug fixes: + +- Throw an exception trying to pass ``NULL`` chars as parameters + (:ticket:`#420`). +- Fixed error caused by missing decoding `~psycopg2.extras.LoggingConnection` + (:ticket:`#483`). +- Fixed integer overflow in :sql:`interval` seconds (:ticket:`#512`). +- Make `~psycopg2.extras.Range` objects picklable (:ticket:`#462`). +- Fixed version parsing and building with PostgreSQL 10 (:ticket:`#489`). + +Other changes: + +- Dropped support for Python 2.5 and 3.1. +- Dropped support for client library older than PostgreSQL 9.1 (but older + server versions are still supported). +- `~connection.isolation_level` doesn't read from the database but will return + `~psycopg2.extensions.ISOLATION_LEVEL_DEFAULT` if no value was set on the + connection. +- Empty arrays no more converted into lists if they don't have a type attached + (:ticket:`#506`) + + +What's new in psycopg 2.6.2 +^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +- Fixed inconsistent state in externally closed connections + (:tickets:`#263, #311, #443`). +- Report the server response status on errors (such as :ticket:`#281`). +- Raise `!NotSupportedError` on unhandled server response status + (:ticket:`#352`). +- Allow overriding string adapter encoding with no connection (:ticket:`#331`). +- The `~psycopg2.extras.wait_select` callback allows interrupting a + long-running query in an interactive shell using :kbd:`Ctrl-C` + (:ticket:`#333`). +- Fixed `!PersistentConnectionPool` on Python 3 (:ticket:`#348`). +- Fixed segfault on `repr()` of an unitialized connection (:ticket:`#361`). +- Allow adapting bytes using `~psycopg2.extensions.QuotedString` on Python 3 + (:ticket:`#365`). +- Added support for setuptools/wheel (:ticket:`#370`). +- Fix build on Windows with Python 3.5, VS 2015 (:ticket:`#380`). +- Fixed `!errorcodes.lookup` initialization thread-safety (:ticket:`#382`). +- Fixed `!read()` exception propagation in copy_from (:ticket:`#412`). +- Fixed possible NULL TZ decref (:ticket:`#424`). +- `~psycopg2.errorcodes` map updated to PostgreSQL 9.5. + + +What's new in psycopg 2.6.1 +^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +- Lists consisting of only `None` are escaped correctly (:ticket:`#285`). +- Fixed deadlock in multithread programs using OpenSSL (:ticket:`#290`). +- Correctly unlock the connection after error in flush (:ticket:`#294`). +- Fixed `!MinTimeLoggingCursor.callproc()` (:ticket:`#309`). +- Added support for MSVC 2015 compiler (:ticket:`#350`). + + +What's new in psycopg 2.6 +------------------------- + +New features: + +- Added support for large objects larger than 2GB. Many thanks to Blake Rouse + and the MAAS Team for the feature development. +- Python `time` objects with a tzinfo specified and PostgreSQL :sql:`timetz` + data are converted into each other (:ticket:`#272`). + +Bug fixes: + +- Json adapter's `!str()` returns the adapted content instead of the `!repr()` + (:ticket:`#191`). + + +What's new in psycopg 2.5.5 +^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +- Named cursors used as context manager don't swallow the exception on exit + (:ticket:`#262`). +- `cursor.description` can be pickled (:ticket:`#265`). +- Propagate read error messages in COPY FROM (:ticket:`#270`). +- PostgreSQL time 24:00 is converted to Python 00:00 (:ticket:`#278`). + + +What's new in psycopg 2.5.4 +^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +- Added :sql:`jsonb` support for PostgreSQL 9.4 (:ticket:`#226`). +- Fixed segfault if COPY statements are passed to `~cursor.execute()` instead + of using the proper methods (:ticket:`#219`). +- Force conversion of pool arguments to integer to avoid potentially unbounded + pools (:ticket:`#220`). +- Cursors :sql:`WITH HOLD` don't begin a new transaction upon move/fetch/close + (:ticket:`#228`). +- Cursors :sql:`WITH HOLD` can be used in autocommit (:ticket:`#229`). +- `~cursor.callproc()` doesn't silently ignore an argument without a length. +- Fixed memory leak with large objects (:ticket:`#256`). +- Make sure the internal ``_psycopg.so`` module can be imported stand-alone (to + allow modules juggling such as the one described in :ticket:`#201`). + + +What's new in psycopg 2.5.3 +^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +- Work around `pip issue #1630 `__ + making installation via ``pip -e git+url`` impossible (:ticket:`#18`). +- Copy operations correctly set the `cursor.rowcount` attribute + (:ticket:`#180`). +- It is now possible to call `get_transaction_status()` on closed connections. +- Fixed unsafe access to object names causing assertion failures in + Python 3 debug builds (:ticket:`#188`). +- Mark the connection closed if found broken on `poll()` (from :ticket:`#192` + discussion) +- Fixed handling of dsn and closed attributes in connection subclasses + failing to connect (from :ticket:`#192` discussion). +- Added arbitrary but stable order to `Range` objects, thanks to + Chris Withers (:ticket:`#193`). +- Avoid blocking async connections on connect (:ticket:`#194`). Thanks to + Adam Petrovich for the bug report and diagnosis. +- Don't segfault using poorly defined cursor subclasses which forgot to call + the superclass init (:ticket:`#195`). +- Mark the connection closed when a Socket connection is broken, as it + happens for TCP connections instead (:ticket:`#196`). +- Fixed overflow opening a lobject with an oid not fitting in a signed int + (:ticket:`#203`). +- Fixed handling of explicit default ``cursor_factory=None`` in + `connection.cursor()` (:ticket:`#210`). +- Fixed possible segfault in named cursors creation. +- Fixed debug build on Windows, thanks to James Emerton. + + +What's new in psycopg 2.5.2 +^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +- Fixed segfault pickling the exception raised on connection error + (:ticket:`#170`). +- Meaningful connection errors report a meaningful message, thanks to + Alexey Borzenkov (:ticket:`#173`). +- Manually creating `lobject` with the wrong parameter doesn't segfault + (:ticket:`#187`). + + +What's new in psycopg 2.5.1 +^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +- Fixed build on Solaris 10 and 11 where the round() function is already + declared (:ticket:`#146`). +- Fixed comparison of `Range` with non-range objects (:ticket:`#164`). + Thanks to Chris Withers for the patch. +- Fixed double-free on connection dealloc (:ticket:`#166`). Thanks to + Gangadharan S.A. for the report and fix suggestion. + + +What's new in psycopg 2.5 +------------------------- + +New features: + +- Added :ref:`JSON adaptation `. +- Added :ref:`support for PostgreSQL 9.2 range types `. +- `connection` and `cursor` objects can be used in ``with`` statements + as context managers as specified by recent |DBAPI|_ extension. +- Added `~psycopg2.extensions.Diagnostics` object to get extended info + from a database error. Many thanks to Matthew Woodcraft for the + implementation (:ticket:`#149`). +- Added `connection.cursor_factory` attribute to customize the default + object returned by `~connection.cursor()`. +- Added support for backward scrollable cursors. Thanks to Jon Nelson + for the initial patch (:ticket:`#108`). +- Added a simple way to :ref:`customize casting of composite types + ` into Python objects other than namedtuples. + Many thanks to Ronan Dunklau and Tobias Oberstein for the feature + development. +- `connection.reset()` implemented using :sql:`DISCARD ALL` on server + versions supporting it. + +Bug fixes: + +- Properly cleanup memory of broken connections (:ticket:`#148`). +- Fixed bad interaction of ``setup.py`` with other dependencies in + Distribute projects on Python 3 (:ticket:`#153`). + +Other changes: + +- Added support for Python 3.3. +- Dropped support for Python 2.4. Please use Psycopg 2.4.x if you need it. +- `~psycopg2.errorcodes` map updated to PostgreSQL 9.2. +- Dropped Zope adapter from source repository. ZPsycopgDA now has its own + project at . + + +What's new in psycopg 2.4.6 +^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +- Fixed 'cursor()' arguments propagation in connection subclasses + and overriding of the 'cursor_factory' argument. Thanks to + Corry Haines for the report and the initial patch (:ticket:`#105`). +- Dropped GIL release during string adaptation around a function call + invoking a Python API function, which could cause interpreter crash. + Thanks to Manu Cupcic for the report (:ticket:`#110`). +- Close a green connection if there is an error in the callback. + Maybe a harsh solution but it leaves the program responsive + (:ticket:`#113`). +- 'register_hstore()', 'register_composite()', 'tpc_recover()' work with + RealDictConnection and Cursor (:ticket:`#114`). +- Fixed broken pool for Zope and connections re-init across ZSQL methods + in the same request (:tickets:`#123, #125, #142`). +- connect() raises an exception instead of swallowing keyword arguments + when a connection string is specified as well (:ticket:`#131`). +- Discard any result produced by 'executemany()' (:ticket:`#133`). +- Fixed pickling of FixedOffsetTimezone objects (:ticket:`#135`). +- Release the GIL around PQgetResult calls after COPY (:ticket:`#140`). +- Fixed empty strings handling in composite caster (:ticket:`#141`). +- Fixed pickling of DictRow and RealDictRow objects. + + +What's new in psycopg 2.4.5 +^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +- The close() methods on connections and cursors don't raise exceptions + if called on already closed objects. +- Fixed fetchmany() with no argument in cursor subclasses + (:ticket:`#84`). +- Use lo_creat() instead of lo_create() when possible for better + interaction with pgpool-II (:ticket:`#88`). +- Error and its subclasses are picklable, useful for multiprocessing + interaction (:ticket:`#90`). +- Better efficiency and formatting of timezone offset objects thanks + to Menno Smits (:tickets:`#94, #95`). +- Fixed 'rownumber' during iteration on cursor subclasses. + Regression introduced in 2.4.4 (:ticket:`#100`). +- Added support for 'inet' arrays. +- Fixed 'commit()' concurrency problem (:ticket:`#103`). +- Codebase cleaned up using the GCC Python plugin's static analysis + tool, which has revealed several unchecked return values, possible + NULL dereferences, reference counting problems. Many thanks to David + Malcolm for the useful tool and the assistance provided using it. + + +What's new in psycopg 2.4.4 +^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +- 'register_composite()' also works with the types implicitly defined + after a table row, not only with the ones created by 'CREATE TYPE'. +- Values for the isolation level symbolic constants restored to what + they were before release 2.4.2 to avoid breaking apps using the + values instead of the constants. +- Named DictCursor/RealDictCursor honour itersize (:ticket:`#80`). +- Fixed rollback on error on Zope (:ticket:`#73`). +- Raise 'DatabaseError' instead of 'Error' with empty libpq errors, + consistently with other disconnection-related errors: regression + introduced in release 2.4.1 (:ticket:`#82`). + + +What's new in psycopg 2.4.3 +^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +- connect() supports all the keyword arguments supported by the + database +- Added 'new_array_type()' function for easy creation of array + typecasters. +- Added support for arrays of hstores and composite types (:ticket:`#66`). +- Fixed segfault in case of transaction started with connection lost + (and possibly other events). +- Fixed adaptation of Decimal type in sub-interpreters, such as in + certain mod_wsgi configurations (:ticket:`#52`). +- Rollback connections in transaction or in error before putting them + back into a pool. Also discard broken connections (:ticket:`#62`). +- Lazy import of the slow uuid module, thanks to Marko Kreen. +- Fixed NamedTupleCursor.executemany() (:ticket:`#65`). +- Fixed --static-libpq setup option (:ticket:`#64`). +- Fixed interaction between RealDictCursor and named cursors + (:ticket:`#67`). +- Dropped limit on the columns length in COPY operations (:ticket:`#68`). +- Fixed reference leak with arguments referenced more than once + in queries (:ticket:`#81`). +- Fixed typecasting of arrays containing consecutive backslashes. +- 'errorcodes' map updated to PostgreSQL 9.1. + + +What's new in psycopg 2.4.2 +^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +- Added 'set_session()' method and 'autocommit' property to the + connection. Added support for read-only sessions and, for PostgreSQL + 9.1, for the "repeatable read" isolation level and the "deferrable" + transaction property. +- Psycopg doesn't execute queries at connection time to find the + default isolation level. +- Fixed bug with multithread code potentially causing loss of sync + with the server communication or lock of the client (:ticket:`#55`). +- Don't fail import if mx.DateTime module can't be found, even if its + support was built (:ticket:`#53`). +- Fixed escape for negative numbers prefixed by minus operator + (:ticket:`#57`). +- Fixed refcount issue during copy. Reported and fixed by Dave + Malcolm (:ticket:`#58`, Red Hat Bug 711095). +- Trying to execute concurrent operations on the same connection + through concurrent green thread results in an error instead of a + deadlock. +- Fixed detection of pg_config on Window. Report and fix, plus some + long needed setup.py cleanup by Steve Lacy: thanks! + + +What's new in psycopg 2.4.1 +^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +- Use own parser for bytea output, not requiring anymore the libpq 9.0 + to parse the hex format. +- Don't fail connection if the client encoding is a non-normalized + variant. Issue reported by Peter Eisentraut. +- Correctly detect an empty query sent to the backend (:ticket:`#46`). +- Fixed a SystemError clobbering libpq errors raised without SQLSTATE. + Bug vivisectioned by Eric Snow. +- Fixed interaction between NamedTuple and server-side cursors. +- Allow to specify --static-libpq on setup.py command line instead of + just in 'setup.cfg'. Patch provided by Matthew Ryan (:ticket:`#48`). + + +What's new in psycopg 2.4 +------------------------- + +New features and changes: + +- Added support for Python 3.1 and 3.2. The conversion has also + brought several improvements: + + - Added 'b' and 't' mode to large objects: write can deal with both + bytes strings and unicode; read can return either bytes strings + or decoded unicode. + - COPY sends Unicode data to files implementing 'io.TextIOBase'. + - Improved PostgreSQL-Python encodings mapping. + - Added a few missing encodings: EUC_CN, EUC_JIS_2004, ISO885910, + ISO885916, LATIN10, SHIFT_JIS_2004. + - Dropped repeated dictionary lookups with unicode query/parameters. + +- Improvements to the named cursors: + + - More efficient iteration on named cursors, fetching 'itersize' + records at time from the backend. + - The named cursors name can be an invalid identifier. + +- Improvements in data handling: + + - Added 'register_composite()' function to cast PostgreSQL + composite types into Python tuples/namedtuples. + - Adapt types 'bytearray' (from Python 2.6), 'memoryview' (from + Python 2.7) and other objects implementing the "Revised Buffer + Protocol" to 'bytea' data type. + - The 'hstore' adapter can work even when the data type is not + installed in the 'public' namespace. + - Raise a clean exception instead of returning bad data when + receiving bytea in 'hex' format and the client libpq can't parse + them. + - Empty lists correctly roundtrip Python -> PostgreSQL -> Python. + +- Other changes: + + - 'cursor.description' is provided as named tuples if available. + - The build script refuses to guess values if 'pg_config' is not + found. + - Connections and cursors are weakly referenceable. + +Bug fixes: + +- Fixed adaptation of None in composite types (:ticket:`#26`). Bug + report by Karsten Hilbert. +- Fixed several reference leaks in less common code paths. +- Fixed segfault when a large object is closed and its connection no + more available. +- Added missing icon to ZPsycopgDA package, not available in Zope + 2.12.9 (:ticket:`#30`). Bug report and patch by Pumukel. +- Fixed conversion of negative infinity (:ticket:`#40`). Bug report and + patch by Marti Raudsepp. + + +What's new in psycopg 2.3.2 +^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +- Fixed segfault with middleware not passing DateStyle to the client + (:ticket:`#24`). Bug report and patch by Marti Raudsepp. + + +What's new in psycopg 2.3.1 +^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +- Fixed build problem on CentOS 5.5 x86_64 (:ticket:`#23`). + + +What's new in psycopg 2.3 +------------------------- + +psycopg 2.3 aims to expose some new features introduced in PostgreSQL 9.0. + +Main new features: + +- `dict` to `hstore` adapter and `hstore` to `dict` typecaster, using both + 9.0 and pre-9.0 syntax. +- Two-phase commit protocol support as per DBAPI specification. +- Support for payload in notifications received from the backend. +- `namedtuple`-returning cursor. +- Query execution cancel. + +Other features and changes: + +- Dropped support for protocol 2: Psycopg 2.3 can only connect to PostgreSQL + servers with version at least 7.4. +- Don't issue a query at every connection to detect the client encoding + and to set the datestyle to ISO if it is already compatible with what + expected. +- `mogrify()` now supports unicode queries. +- Subclasses of a type that can be adapted are adapted as the superclass. +- `errorcodes` knows a couple of new codes introduced in PostgreSQL 9.0. +- Dropped deprecated Psycopg "own quoting". +- Never issue a ROLLBACK on close/GC. This behaviour was introduced as a bug + in release 2.2, but trying to send a command while being destroyed has been + considered not safe. + +Bug fixes: + +- Fixed use of `PQfreemem` instead of `free` in binary typecaster. +- Fixed access to freed memory in `conn_get_isolation_level()`. +- Fixed crash during Decimal adaptation with a few 2.5.x Python versions + (:ticket:`#7`). +- Fixed notices order (:ticket:`#9`). + + +What's new in psycopg 2.2.2 +^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +Bux fixes: + +- the call to logging.basicConfig() in pool.py has been dropped: it was + messing with some projects using logging (and a library should not + initialize the logging system anyway.) +- psycopg now correctly handles time zones with seconds in the UTC offset. + The old register_tstz_w_secs() function is deprecated and will raise a + warning if called. +- Exceptions raised by the column iterator are propagated. +- Exceptions raised by executemany() iterators are propagated. + + +What's new in psycopg 2.2.1 +^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +Bux fixes: + +- psycopg now builds again on MS Windows. + + +What's new in psycopg 2.2 +------------------------- + +This is the first release of the new 2.2 series, supporting not just one but +two different ways of executing asynchronous queries, thanks to Jan and Daniele +(with a little help from me and others, but they did 99% of the work so they +deserve their names here in the news.) + +psycopg now supports both classic select() loops and "green" coroutine +libraries. It is all in the documentation, so just point your browser to +doc/html/advanced.html. + +Other new features: + +- truncate() method for lobjects. +- COPY functions are now a little bit faster. +- All builtin PostgreSQL to Python typecasters are now available from the + psycopg2.extensions module. +- Notifications from the backend are now available right after the execute() + call (before client code needed to call isbusy() to ensure NOTIFY + reception.) +- Better timezone support. +- Lots of documentation updates. + +Bug fixes: + +- Fixed some gc/refcounting problems. +- Fixed reference leak in NOTIFY reception. +- Fixed problem with PostgreSQL not casting string literals to the correct + types in some situations: psycopg now add an explicit cast to dates, times + and bytea representations. +- Fixed TimestampFromTicks() and TimeFromTicks() for seconds >= 59.5. +- Fixed spurious exception raised when calling C typecasters from Python + ones. + + +What's new in psycopg 2.0.14 +^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +New features: + +- Support for adapting tuples to PostgreSQL arrays is now enabled by + default and does not require importing psycopg2.extensions anymore. +- "can't adapt" error message now includes full type information. +- Thank to Daniele Varrazzo (piro) psycopg2's source package now includes + full documentation in HTML and plain text format. + +Bug fixes: + +- No loss of precision when using floats anymore. +- decimal.Decimal "nan" and "infinity" correctly converted to PostgreSQL + numeric NaN values (note that PostgreSQL numeric type does not support + infinity but just NaNs.) +- psycopg2.extensions now includes Binary. + +It seems we're good citizens of the free software ecosystem and that big +big big companies and people ranting on the pgsql-hackers mailing list +we'll now not dislike us. *g* (See LICENSE file for the details.) + + +What's new in psycopg 2.0.13 +^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +New features: + +- Support for UUID arrays. +- It is now possible to build psycopg linking to a static libpq + library. + +Bug fixes: + +- Fixed a deadlock related to using the same connection with + multiple cursors from different threads. +- Builds again with MSVC. + + +What's new in psycopg 2.0.12 +^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +New features: + +- The connection object now has a reset() method that can be used to + reset the connection to its default state. + +Bug fixes: + +- copy_to() and copy_from() now accept a much larger number of columns. +- Fixed PostgreSQL version detection. +- Fixed ZPsycopgDA version check. +- Fixed regression in ZPsycopgDA that made it behave wrongly when + receiving serialization errors: now the query is re-issued as it + should be by propagating the correct exception to Zope. +- Writing "large" large objects should now work. + + +What's new in psycopg 2.0.11 +^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +New features: + +- DictRow and RealDictRow now use less memory. If you inherit on them + remember to set __slots__ for your new attributes or be prepare to + go back to old memory usage. + +Bug fixes: + +- Fixed exception in setup.py. +- More robust detection of PostgreSQL development versions. +- Fixed exception in RealDictCursor, introduced in 2.0.10. + + +What's new in psycopg 2.0.10 +^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +New features: + +- A specialized type-caster that can parse time zones with seconds is + now available. Note that after enabling it (see extras.py) "wrong" + time zones will be parsed without raising an exception but the + result will be rounded. +- DictCursor can be used as a named cursor. +- DictRow now implements more dict methods. +- The connection object now expose PostgreSQL server version as the + .server_version attribute and the protocol version used as + .protocol_version. +- The connection object has a .get_parameter_status() methods that + can be used to obtain useful information from the server. + +Bug fixes: + +- None is now correctly always adapted to NULL. +- Two double memory free errors provoked by multithreading and + garbage collection are now fixed. +- Fixed usage of internal Python code in the notice processor; this + should fix segfaults when receiving a lot of notices in + multithreaded programs. +- Should build again on MSVC and Solaris. +- Should build with development versions of PostgreSQL (ones with + -devel version string.) +- Fixed some tests that failed even when psycopg was right. + + +What's new in psycopg 2.0.9 +^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +New features: + +- "import psycopg2.extras" to get some support for handling times + and timestamps with seconds in the time zone offset. +- DictCursors can now be used as named cursors. + +Bug fixes: + +- register_type() now accept an explicit None as its second parameter. +- psycopg2 should build again on MSVC and Solaris. + + +What's new in psycopg 2.0.9 +^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +New features: + +- COPY TO/COPY FROM queries now can be of any size and psycopg will + correctly quote separators. +- float values Inf and NaN are now correctly handled and can + round-trip to the database. +- executemany() now return the numer of total INSERTed or UPDATEd + rows. Note that, as it has always been, executemany() should not + be used to execute multiple SELECT statements and while it will + execute the statements without any problem, it will return the + wrong value. +- copy_from() and copy_to() can now use quoted separators. +- "import psycopg2.extras" to get UUID support. + +Bug fixes: + +- register_type() now works on connection and cursor subclasses. +- fixed a memory leak when using lobjects. + + +What's new in psycopg 2.0.8 +^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +New features: + +- The connection object now has a get_backend_pid() method that + returns the current PostgreSQL connection backend process PID. +- The PostgreSQL large object API has been exposed through the + Cursor.lobject() method. + +Bug fixes: + +- Some fixes to ZPsycopgDA have been merged from the Debian package. +- A memory leak was fixed in Cursor.executemany(). +- A double free was fixed in pq_complete_error(), that caused crashes + under some error conditions. + + +What's new in psycopg 2.0.7 +^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +Improved error handling: + +- All instances of psycopg2.Error subclasses now have pgerror, + pgcode and cursor attributes. They will be set to None if no + value is available. +- Exception classes are now chosen based on the SQLSTATE value from + the result. (#184) +- The commit() and rollback() methods now set the pgerror and pgcode + attributes on exceptions. (#152) +- errors from commit() and rollback() are no longer considered + fatal. (#194) +- If a disconnect is detected during execute(), an exception will be + raised at that point rather than resulting in "ProgrammingError: + no results to fetch" later on. (#186) + +Better PostgreSQL compatibility: + +- If the server uses standard_conforming_strings, perform + appropriate quoting. +- BC dates are now handled if psycopg is compiled with mxDateTime + support. If using datetime, an appropriate ValueError is + raised. (#203) + +Other bug fixes: + +- If multiple sub-interpreters are in use, do not share the Decimal + type between them. (#192) +- Buffer objects obtained from psycopg are now accepted by psycopg + too, without segfaulting. (#209) +- A few small changes were made to improve DB-API compatibility. + All the dbapi20 tests now pass. + +Miscellaneous: + +- The PSYCOPG_DISPLAY_SIZE option is now off by default. This means + that display size will always be set to "None" in + cursor.description. Calculating the display size was expensive, + and infrequently used so this should improve performance. +- New QueryCanceledError and TransactionRollbackError exceptions + have been added to the psycopg2.extensions module. They can be + used to detect statement timeouts and deadlocks respectively. +- Cursor objects now have a "closed" attribute. (#164) +- If psycopg has been built with debug support, it is now necessary + to set the PSYCOPG_DEBUG environment variable to turn on debug + spew. + + +What's new in psycopg 2.0.6 +^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +Better support for PostgreSQL, Python and win32: + +- full support for PostgreSQL 8.2, including NULLs in arrays +- support for almost all existing PostgreSQL encodings +- full list of PostgreSQL error codes available by importing the + psycopg2.errorcodes module +- full support for Python 2.5 and 64 bit architectures +- better build support on win32 platform + +Support for per-connection type-casters (used by ZPsycopgDA too, this +fixes a long standing bug that made different connections use a random +set of date/time type-casters instead of the configured one.) + +Better management of times and dates both from Python and in Zope. + +copy_to and copy_from now take an extra "columns" parameter. + +Python tuples are now adapted to SQL sequences that can be used with +the "IN" operator by default if the psycopg2.extensions module is +imported (i.e., the SQL_IN adapter was moved from extras to extensions.) + +Fixed some small buglets and build glitches: + +- removed double mutex destroy +- removed all non-constant initializers +- fixed PyObject_HEAD declarations to avoid memory corruption + on 64 bit architectures +- fixed several Python API calls to work on 64 bit architectures +- applied compatibility macros from PEP 353 +- now using more than one argument format raise an error instead of + a segfault + + +What's new in psycopg 2.0.5.1 +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +* Now it really, really builds on MSVC and older gcc versions. + +What's new in psycopg 2.0.5 +^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +* Fixed various buglets such as: + + - segfault when passing an empty string to Binary() + - segfault on null queries + - segfault and bad keyword naming in .executemany() + - OperationalError in connection objects was always None + +* Various changes to ZPsycopgDA to make it more zope2.9-ish. + +* connect() now accept both integers and strings as port parameter + +What's new in psycopg 2.0.4 +^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +* Fixed float conversion bug introduced in 2.0.3. + +What's new in psycopg 2.0.3 +^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +* Fixed various buglets and a memory leak (see ChangeLog for details) + +What's new in psycopg 2.0.2 +^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +* Fixed a bug in array typecasting that sometimes made psycopg forget about + the last element in the array. + +* Fixed some minor buglets in string memory allocations. + +* Builds again with compilers different from gcc (#warning about PostgreSQL + version is issued only if __GCC__ is defined.) + +What's new in psycopg 2.0.1 +^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +* ZPsycopgDA now actually loads. + +What's new in psycopg 2.0 +------------------------- + +* Fixed handle leak on win32. + +* If available the new "safe" encoding functions of libpq are used. + +* django and tinyerp people, please switch to psycopg 2 _without_ + using a psycopg 1 compatibility layer (this release was anticipated + so that you all stop grumbling about psycopg 2 is still in beta.. :) + +What's new in psycopg 2.0 beta 7 +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +* Ironed out last problems with times and date (should be quite solid now.) + +* Fixed problems with some arrays. + +* Slightly better ZPsycopgDA (no more double connection objects in the menu + and other minor fixes.) + +* ProgrammingError exceptions now have three extra attributes: .cursor + (it is possible to access the query that caused the exception using + error.cursor.query), .pgerror and .pgcode (PostgreSQL original error + text and code.) + +* The build system uses pg_config when available. + +* Documentation in the doc/ directory! (With many kudos to piro.) + +What's new in psycopg 2.0 beta 6 +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +* Support for named cursors. + +* Safer parsing of time intervals. + +* Better parsing of times and dates, no more locale problems. + +* Should now play well with py2exe and similar tools. + +* The "decimal" module is now used if available under Python 2.3. + +What's new in psycopg 2.0 beta 5 +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +* Fixed all known bugs. + +* The initial isolation level is now read from the server and + .set_isolation_level() now takes values defined in psycopg2.extensions. + +* .callproc() implemented as a SELECT of the given procedure. + +* Better docstrings for a few functions/methods. + +* Some time-related functions like psycopg2.TimeFromTicks() now take the + local timezone into account. Also a tzinfo object (as per datetime module + specifications) can be passed to the psycopg2.Time and psycopg2.Datetime + constructors. + +* All classes have been renamed to exist in the psycopg2._psycopg module, + to fix problems with automatic documentation generators like epydoc. + +* NOTIFY is correctly trapped. + +What's new in psycopg 2.0 beta 4 +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +* psycopg module is now named psycopg2. + +* No more segfaults when a UNICODE query can't be converted to the + backend encoding. + +* No more segfaults on empty queries. + +* psycopg2.connect() now takes an integer for the port keyword parameter. + +* "python setup.py bdist_rpm" now works. + +* Fixed lots of small bugs, see ChangeLog for details. + +What's new in psycopg 2.0 beta 3 +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +* ZPsycopgDA now works (except table browsing.) + +* psycopg build again on Python 2.2. + +What's new in psycopg 2.0 beta 2 +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +* Fixed ZPsycopgDA version check (ZPsycopgDA can now be imported in + Zope.) + +* psycopg.extras.DictRow works even after a new query on the generating + cursor. + +* Better setup.py for win32 (should build with MSCV or mingw.) + +* Generic fixed and memory leaks plugs. + +What's new in psycopg 2.0 beta 1 +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +* Officially in beta (i.e., no new features will be added.) + +* Array support: list objects can be passed as bound variables and are + correctly returned for array columns. + +* Added the psycopg.psycopg1 compatibility module (if you want instant + psycopg 1 compatibility just "from psycopg import psycopg1 as psycopg".) + +* Complete support for BYTEA columns and buffer objects. + +* Added error codes to error messages. + +* The AsIs adapter is now exported by default (also Decimal objects are + adapted using the AsIs adapter (when str() is called on them they + already format themselves using the right precision and scale.) + +* The connect() function now takes "connection_factory" instead of + "factory" as keyword argument. + +* New setup.py code to build on win32 using mingw and better error + messages on missing datetime headers, + +* Internal changes that allow much better user-defined type casters. + +* A lot of bugfixes (binary, datetime, 64 bit arches, GIL, .executemany()) + +What's new in psycopg 1.99.13 +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +* Added missing .executemany() method. + +* Optimized type cast from PostgreSQL to Python (psycopg should be even + faster than before.) + +What's new in psycopg 1.99.12 +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +* .rowcount should be ok and in sync with psycopg 1. + +* Implemented the new COPY FROM/COPY TO code when connection to the + backend using libpq protocol 3 (this also removes all asprintf calls: + build on win32 works again.) A protocol 3-enabled psycopg *can* + connect to an old protocol 2 database and will detect it and use the + right code. + +* getquoted() called for real by the mogrification code. + +What's new in psycopg 1.99.11 +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +* 'cursor' argument in .cursor() connection method renamed to + 'cursor_factory'. + +* changed 'tuple_factory' cursor attribute name to 'row_factory'. + +* the .cursor attribute is gone and connections and cursors are properly + gc-managed. + +* fixes to the async core. + +What's new in psycopg 1.99.10 +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +* The adapt() function now fully supports the adaptation protocol + described in PEP 246. Note that the adapters registry now is indexed + by (type, protocol) and not by type alone. Change your adapters + accordingly. + +* More configuration options moved from setup.py to setup.cfg. + +* Fixed two memory leaks: one in cursor deallocation and one in row + fetching (.fetchXXX() methods.) + +What's new in psycopg 1.99.9 +^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +* Added simple pooling code (psycopg.pool module). + +* Added DECIMAL typecaster to convert postgresql DECIMAL and NUMERIC + types (i.e, all types with an OID of NUMERICOID.) Note that the + DECIMAL typecaster does not set scale and precision on the created + objects but uses Python defaults. + +* ZPsycopgDA back in and working using the new pooling code. + +* Isn't that enough? :) + +What's new in psycopg 1.99.8 +^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +* added support for UNICODE queries. +* added UNICODE typecaster; to activate it just do:: + + psycopg.extensions.register_type(psycopg.extensions.UNICODE) + + Note that the UNICODE typecaster override the STRING one, so it is + not activated by default. + +* cursors now really support the iterator protocol. +* solved the rounding errors in time conversions. +* now cursors support .fileno() and .isready() methods, to be used in + select() calls. +* .copy_from() and .copy_in() methods are back in (still using the old + protocol, will be updated to use new one in next release.) +* fixed memory corruption bug reported on win32 platform. + +What's new in psycopg 1.99.7 +^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +* added support for tuple factories in cursor objects (removed factory + argument in favor of a .tuple_factory attribute on the cursor object); + see the new module psycopg.extras for a cursor (DictCursor) that + return rows as objects that support indexing both by position and + column name. +* added support for tzinfo objects in datetime.timestamp objects: the + PostgreSQL type "timestamp with time zone" is converted to + datetime.timestamp with a FixedOffsetTimezone initialized as necessary. + +What's new in psycopg 1.99.6 +^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +* sslmode parameter from 1.1.x +* various datetime conversion improvements. +* now psycopg should compile without mx or without native datetime + (not both, obviously.) +* included various win32/MSVC fixes (pthread.h changes, winsock2 + library, include path in setup.py, etc.) +* ported interval fixes from 1.1.14/1.1.15. +* the last query executed by a cursor is now available in the + .query attribute. +* conversion of unicode strings to backend encoding now uses a table + (that still need to be filled.) +* cursors now have a .mogrify() method that return the query string + instead of executing it. +* connection objects now have a .dsn read-only attribute that holds the + connection string. +* moved psycopg C module to _psycopg and made psycopg a python module: + this allows for a neat separation of DBAPI-2.0 functionality and psycopg + extensions; the psycopg namespace will be also used to provide + python-only extensions (like the pooling code, some ZPsycopgDA support + functions and the like.) + +What's new in psycopg 1.99.3 +^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +* added support for python 2.3 datetime types (both ways) and made datetime + the default set of typecasters when available. +* added example: dt.py. + +What's new in psycopg 1.99.3 +^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +* initial working support for unicode bound variables: UTF-8 and latin-1 + backend encodings are natively supported (and the encoding.py example even + works!) +* added .set_client_encoding() method on the connection object. +* added examples: encoding.py, binary.py, lastrowid.py. + +What's new in psycopg 1.99.2 +^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +* better typecasting: + + - DateTimeDelta used for postgresql TIME (merge from 1.1) + - BYTEA now is converted to a real buffer object, not to a string + +* buffer objects are now adapted into Binary objects automatically. +* ported scroll method from 1.1 (DBAPI-2.0 extension for cursors) +* initial support for some DBAPI-2.0 extensions: + + - .rownumber attribute for cursors + - .connection attribute for cursors + - .next() and .__iter__() methods to have cursors support the iterator + protocol + - all exception objects are exported to the connection object + +What's new in psycopg 1.99.1 +^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +* implemented microprotocols to adapt arbitrary types to the interface used by + psycopg to bind variables in execute; + +* moved qstring, pboolean and mxdatetime to the new adapter layout (binary is + still missing; python 2.3 datetime needs to be written). + + +What's new in psycopg 1.99.0 +^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +* reorganized the whole source tree; + +* async core is in place; + +* splitted QuotedString objects from mx stuff; + +* dropped autotools and moved to pythonic setup.py (needs work.) diff --git a/README.rst b/README.rst new file mode 100644 index 0000000000000000000000000000000000000000..c9747da790ed9bbfa31a0700725a91329a5a2ada --- /dev/null +++ b/README.rst @@ -0,0 +1,73 @@ +psycopg2 - Python-PostgreSQL Database Adapter +============================================= + +Psycopg is the most popular PostgreSQL database adapter for the Python +programming language. Its main features are the complete implementation of +the Python DB API 2.0 specification and the thread safety (several threads can +share the same connection). It was designed for heavily multi-threaded +applications that create and destroy lots of cursors and make a large number +of concurrent "INSERT"s or "UPDATE"s. + +Psycopg 2 is mostly implemented in C as a libpq wrapper, resulting in being +both efficient and secure. It features client-side and server-side cursors, +asynchronous communication and notifications, "COPY TO/COPY FROM" support. +Many Python types are supported out-of-the-box and adapted to matching +PostgreSQL data types; adaptation can be extended and customized thanks to a +flexible objects adaptation system. + +Psycopg 2 is both Unicode and Python 3 friendly. + + +Documentation +------------- + +Documentation is included in the ``doc`` directory and is `available online`__. + +.. __: https://www.psycopg.org/docs/ + +For any other resource (source code repository, bug tracker, mailing list) +please check the `project homepage`__. + +.. __: https://psycopg.org/ + + +Installation +------------ + +Building Psycopg requires a few prerequisites (a C compiler, some development +packages): please check the install_ and the faq_ documents in the ``doc`` dir +or online for the details. + +If prerequisites are met, you can install psycopg like any other Python +package, using ``pip`` to download it from PyPI_:: + + $ pip install psycopg2 + +or using ``setup.py`` if you have downloaded the source package locally:: + + $ python setup.py build + $ sudo python setup.py install + +You can also obtain a stand-alone package, not requiring a compiler or +external libraries, by installing the `psycopg2-binary`_ package from PyPI:: + + $ pip install psycopg2-binary + +The binary package is a practical choice for development and testing but in +production it is advised to use the package built from sources. + +.. _PyPI: https://pypi.org/project/psycopg2/ +.. _psycopg2-binary: https://pypi.org/project/psycopg2-binary/ +.. _install: https://www.psycopg.org/docs/install.html#install-from-source +.. _faq: https://www.psycopg.org/docs/faq.html#faq-compile + +:Linux/OSX: |gh-actions| +:Windows: |appveyor| + +.. |gh-actions| image:: https://github.com/psycopg/psycopg2/actions/workflows/tests.yml/badge.svg + :target: https://github.com/psycopg/psycopg2/actions/workflows/tests.yml + :alt: Linux and OSX build status + +.. |appveyor| image:: https://ci.appveyor.com/api/projects/status/github/psycopg/psycopg2?branch=master&svg=true + :target: https://ci.appveyor.com/project/psycopg/psycopg2/branch/master + :alt: Windows build status diff --git a/build.sh b/build.sh new file mode 100644 index 0000000000000000000000000000000000000000..6f2be2177044b7c86940817ce523e510b53b7db9 --- /dev/null +++ b/build.sh @@ -0,0 +1,295 @@ +#!/bin/bash +####################################################################### +# Copyright (c): 2020-2025, Huawei Tech. Co., Ltd. +# descript: Compile and pack python driver for openGauss +# Return 0 means OK. +# Return 1 means failed. +# version: 2.0 +# date: 2020-08-09 +####################################################################### +declare install_package_format='tar' +declare serverlib_dir='None' + +#detect platform information. +PLATFORM=32 +bit=$(getconf LONG_BIT) +if [ "$bit" -eq 64 ]; then + PLATFORM=64 +fi + +#get OS distributed version. +if [ -f "/etc/euleros-release" ]; then + kernel=$(cat /etc/euleros-release | awk -F ' ' '{print $1}' | tr A-Z a-z) + version=$(cat /etc/euleros-release | awk -F '(' '{print $2}'| awk -F ')' '{print $1}' | tr A-Z a-z) +elif [ -f "/etc/openEuler-release" ]; then + kernel=$(cat /etc/openEuler-release | awk -F ' ' '{print $1}' | tr A-Z a-z) + version=$(cat /etc/openEuler-release | awk -F '(' '{print $2}'| awk -F ')' '{print $1}' | tr A-Z a-z) +elif [ -f "/etc/centos-release" ]; then + kernel=$(cat /etc/centos-release | awk -F ' ' '{print $1}' | tr A-Z a-z) + version=$(cat /etc/centos-release | awk -F '(' '{print $2}'| awk -F ')' '{print $1}' | tr A-Z a-z) +elif [ -f "/etc/kylin-release" ]; then + kernel=$(cat /etc/kylin-release | awk -F ' ' '{print $1}' | tr A-Z a-z) + version=$(cat /etc/kylin-release | awk '{print $6}' | tr A-Z a-z) +else + kernel=$(lsb_release -d | awk -F ' ' '{print $2}'| tr A-Z a-z) + version=$(lsb_release -r | awk -F ' ' '{print $2}') +fi + +if [ X"$kernel" == X"euleros" ]; then + dist_version="EULER" +elif [ X"$kernel" == X"centos" ]; then + dist_version="CENTOS" +elif [ X"$kernel" == X"openeuler" ]; then + dist_version="OPENEULER" +elif [ X"$kernel" == X"kylin" ]; then + dist_version="KYLIN" +else + echo "We only support EulerOS, OPENEULER(aarch64) and CentOS platform." + echo "Kernel is $kernel" + exit 1 +fi + +##default install version storage path +declare db_name_for_package='openGauss' +declare version_number='2.0.0' + +if [ $# = 0 ] ; then + echo "missing option" + print_help + exit 1 +fi + +LOCAL_PATH=${0} +FIRST_CHAR=$(expr substr "$LOCAL_PATH" 1 1) +if [ "$FIRST_CHAR" = "/" ]; then + LOCAL_PATH=${0} +else + LOCAL_PATH="$(pwd)/${LOCAL_PATH}" +fi + +LOCAL_DIR=$(dirname "${LOCAL_PATH}") +######################################################################### +##read command line paramenters +####################################################################### +while [ $# -gt 0 ]; do + case "$1" in + -h|--help) + print_help + exit 1 + ;; + -bd|--serverlib_dir) + if [ "$2"X = X ]; then + echo "no given binarylib directory values" + exit 1 + fi + serverlib_dir=$2 + shift 2 + ;; + *) + echo "Internal Error: option processing error: $1" 1>&2 + echo "please input right paramtenter, the following command may help you" + echo "./build.sh --help or ./build.sh -h" + exit 1 + esac +done + +####################################################################### +## declare all package name +####################################################################### +declare version_string="${db_name_for_package}-${version_number}" +declare package_pre_name="${version_string}-${dist_version}-${PLATFORM}bit" +declare python_package_name="${package_pre_name}-Python.${install_package_format}.gz" + +declare BUILD_DIR="${LOCAL_DIR}/build" +declare MKGS_OK=0 +SERVERLIBS_PATH="${serverlib_dir}" +PSYCOPG_VERSION=psycopg2-2.9 +declare LOG_FILE="${LOCAL_DIR}/build_psycopg2.log" +declare ERR_MKGS_FAILED=1 +echo "[makepython] $(date +%y-%m-%d' '%T): script dir : ${LOCAL_DIR}" + +####################################################################### +## print help information +####################################################################### +function print_help() +{ + echo "Usage: $0 [OPTION] + -h|--help show help information. + -bd|--serverlib_dir the directory of sever binarylibs. +" +} + +####################################################################### +# Print log. +####################################################################### +log() +{ + echo "[Build psycopg2] $(date +%y-%m-%d' '%T): $@" + echo "[Build psycopg2] $(date +%y-%m-%d' '%T): $@" >> "$LOG_FILE" 2>&1 +} + +####################################################################### +# print log and exit. +####################################################################### +die() +{ + log "$@" + echo "$@" + exit $ERR_MKGS_FAILED +} + +# clean build python log +function clean_environment() +{ + + if [ -f "$LOG_FILE" ]; then + rm -rf "$LOG_FILE" + fi + + if [ -d "${LOCAL_DIR}/build" ]; then + rm -rf ${LOCAL_DIR}/build + fi + + echo "clean completely" +} + +function check_python() +{ + array=(python3 python2 python) + for py in ${array[@]}; do + ${py} -c 'import setuptools' >> /dev/null 2>&1 + if [ $? -eq 0 ]; then + log "choose python: $(which ${py})" + PYTHON=${py} + version=$(${py} --version 2>&1) + PYTHON_VERSION=python${version:7:1} + return 0 + fi + done + die "the python in your current enviroment is invalid, please check again and install setuptools." +} + +function change_gaussdb_version() +{ + if [ ! -f setup.py ]; then + die "not found psycopg2 code in current working directory." + fi + + src_text='(pgmajor, pgminor, pgpatch)' + dst_text='(9, 2, 4)' + sed -i "s/${src_text}/${dst_text}/g" setup.py + search_result=$(cat setup.py | grep "$src_text") + if [ "${search_result}"X != "X" ]; then + die "failed to replace PG_VERSION_NUM at setup.py." + fi +} + +############################################################### +## copy the target to set path +############################################################### +function target_file_copy() +{ + cd ${LOCAL_DIR}/build/lib* + mv psycopg2/_psycopg*.so psycopg2/_psycopg.so + + mkdir -p lib + #copy libraries into lib + cd ./psycopg2 + libs=$(ldd _psycopg.so | awk '{print $3}' | grep $GAUSSHOME/lib) + for lib in ${libs[@]}; do + cp $lib ../lib + done + cd .. + +} + +####################################################################### +# build and install component +####################################################################### +function build_python() +{ + cd ${LOCAL_DIR} + # set GAUSSHOME enviroment variable with BUILD_OPTION + export GAUSSHOME=$SERVERLIBS_PATH + export LD_LIBRARY_PATH=$GAUSSHOME/lib:$LD_LIBRARY_PATH + export PATH=${GAUSSHOME}/bin:${PATH} + + echo "GAUSSHOME: ${GAUSSHOME}" + + change_gaussdb_version + check_python + ${PYTHON} ./setup.py build + if [ $? -ne 0 ]; then + die "failed to compile psycopg2." + fi + + echo "End make python" >> "$LOG_FILE" 2>&1 +} + +declare package_command +####################################################################### +##select package command accroding to install_package_format +####################################################################### +function select_package_command() +{ + case "$install_package_format" in + tar) + tar='tar' + option=' -zcvf' + package_command="$tar$option" + ;; + rpm) + rpm='rpm' + option=' -i' + package_command="$rpm$option" + ;; + esac +} + +####################################################################### +##function make_package have tree actions +##1.copy target file into a newly created temporary directory temp +##2.package all file in the temp directory and renome to destination package_path +####################################################################### +function make_package() +{ + target_file_copy + cd ${BUILD_DIR}/lib* + select_package_command + + echo "packaging python..." + $package_command "${python_package_name}" ./lib ./psycopg2 >>"$LOG_FILE" 2>&1 + if [ $? -ne 0 ]; then + die "$package_command ${python_package_name} failed" + fi + + mv ${python_package_name} ${BUILD_DIR}/ + + echo "install python tools is ${python_package_name} of ${BUILD_DIR} directory " >> "$LOG_FILE" 2>&1 + echo "success!" +} + +############################################################# +# main function +############################################################# + +# 1. clean environment +echo "clean enviroment" +echo "[makedb] $(date +%y-%m-%d' '%T): remove ${BUILD_DIR}" >>"$LOG_FILE" 2>&1 +clean_environment + + +# 2. build python +build_python + +# 3. make python package +make_package + +# 4. cp python package to output +mkdir ${LOCAL_DIR}/output +mv ${BUILD_DIR}/*.tar.gz ${LOCAL_DIR}/output/ + +echo "now, python driver package has finished!" + +exit 0 + diff --git a/doc/.gitignore b/doc/.gitignore new file mode 100644 index 0000000000000000000000000000000000000000..f10120683cb890292cce2c27fcc9e066baf75512 --- /dev/null +++ b/doc/.gitignore @@ -0,0 +1,8 @@ +env +src/_build/* +html/* +psycopg2.txt +src/sqlstate_errors.rst + +# Added by psycopg-website to customize published docs +src/_templates/layout.html diff --git a/doc/COPYING.LESSER b/doc/COPYING.LESSER new file mode 100644 index 0000000000000000000000000000000000000000..cca7fc278f5c81ce23a2687208f0d63a6ea44009 --- /dev/null +++ b/doc/COPYING.LESSER @@ -0,0 +1,165 @@ + GNU LESSER GENERAL PUBLIC LICENSE + Version 3, 29 June 2007 + + Copyright (C) 2007 Free Software Foundation, Inc. + Everyone is permitted to copy and distribute verbatim copies + of this license document, but changing it is not allowed. + + + This version of the GNU Lesser General Public License incorporates +the terms and conditions of version 3 of the GNU General Public +License, supplemented by the additional permissions listed below. + + 0. Additional Definitions. + + As used herein, "this License" refers to version 3 of the GNU Lesser +General Public License, and the "GNU GPL" refers to version 3 of the GNU +General Public License. + + "The Library" refers to a covered work governed by this License, +other than an Application or a Combined Work as defined below. + + An "Application" is any work that makes use of an interface provided +by the Library, but which is not otherwise based on the Library. +Defining a subclass of a class defined by the Library is deemed a mode +of using an interface provided by the Library. + + A "Combined Work" is a work produced by combining or linking an +Application with the Library. The particular version of the Library +with which the Combined Work was made is also called the "Linked +Version". + + The "Minimal Corresponding Source" for a Combined Work means the +Corresponding Source for the Combined Work, excluding any source code +for portions of the Combined Work that, considered in isolation, are +based on the Application, and not on the Linked Version. + + The "Corresponding Application Code" for a Combined Work means the +object code and/or source code for the Application, including any data +and utility programs needed for reproducing the Combined Work from the +Application, but excluding the System Libraries of the Combined Work. + + 1. Exception to Section 3 of the GNU GPL. + + You may convey a covered work under sections 3 and 4 of this License +without being bound by section 3 of the GNU GPL. + + 2. Conveying Modified Versions. + + If you modify a copy of the Library, and, in your modifications, a +facility refers to a function or data to be supplied by an Application +that uses the facility (other than as an argument passed when the +facility is invoked), then you may convey a copy of the modified +version: + + a) under this License, provided that you make a good faith effort to + ensure that, in the event an Application does not supply the + function or data, the facility still operates, and performs + whatever part of its purpose remains meaningful, or + + b) under the GNU GPL, with none of the additional permissions of + this License applicable to that copy. + + 3. Object Code Incorporating Material from Library Header Files. + + The object code form of an Application may incorporate material from +a header file that is part of the Library. You may convey such object +code under terms of your choice, provided that, if the incorporated +material is not limited to numerical parameters, data structure +layouts and accessors, or small macros, inline functions and templates +(ten or fewer lines in length), you do both of the following: + + a) Give prominent notice with each copy of the object code that the + Library is used in it and that the Library and its use are + covered by this License. + + b) Accompany the object code with a copy of the GNU GPL and this license + document. + + 4. Combined Works. + + You may convey a Combined Work under terms of your choice that, +taken together, effectively do not restrict modification of the +portions of the Library contained in the Combined Work and reverse +engineering for debugging such modifications, if you also do each of +the following: + + a) Give prominent notice with each copy of the Combined Work that + the Library is used in it and that the Library and its use are + covered by this License. + + b) Accompany the Combined Work with a copy of the GNU GPL and this license + document. + + c) For a Combined Work that displays copyright notices during + execution, include the copyright notice for the Library among + these notices, as well as a reference directing the user to the + copies of the GNU GPL and this license document. + + d) Do one of the following: + + 0) Convey the Minimal Corresponding Source under the terms of this + License, and the Corresponding Application Code in a form + suitable for, and under terms that permit, the user to + recombine or relink the Application with a modified version of + the Linked Version to produce a modified Combined Work, in the + manner specified by section 6 of the GNU GPL for conveying + Corresponding Source. + + 1) Use a suitable shared library mechanism for linking with the + Library. A suitable mechanism is one that (a) uses at run time + a copy of the Library already present on the user's computer + system, and (b) will operate properly with a modified version + of the Library that is interface-compatible with the Linked + Version. + + e) Provide Installation Information, but only if you would otherwise + be required to provide such information under section 6 of the + GNU GPL, and only to the extent that such information is + necessary to install and execute a modified version of the + Combined Work produced by recombining or relinking the + Application with a modified version of the Linked Version. (If + you use option 4d0, the Installation Information must accompany + the Minimal Corresponding Source and Corresponding Application + Code. If you use option 4d1, you must provide the Installation + Information in the manner specified by section 6 of the GNU GPL + for conveying Corresponding Source.) + + 5. Combined Libraries. + + You may place library facilities that are a work based on the +Library side by side in a single library together with other library +facilities that are not Applications and are not covered by this +License, and convey such a combined library under terms of your +choice, if you do both of the following: + + a) Accompany the combined library with a copy of the same work based + on the Library, uncombined with any other library facilities, + conveyed under the terms of this License. + + b) Give prominent notice with the combined library that part of it + is a work based on the Library, and explaining where to find the + accompanying uncombined form of the same work. + + 6. Revised Versions of the GNU Lesser General Public License. + + The Free Software Foundation may publish revised and/or new versions +of the GNU Lesser General Public License from time to time. Such new +versions will be similar in spirit to the present version, but may +differ in detail to address new problems or concerns. + + Each version is given a distinguishing version number. If the +Library as you received it specifies that a certain numbered version +of the GNU Lesser General Public License "or any later version" +applies to it, you have the option of following the terms and +conditions either of that published version or of any later version +published by the Free Software Foundation. If the Library as you +received it does not specify a version number of the GNU Lesser +General Public License, you may choose any version of the GNU Lesser +General Public License ever published by the Free Software Foundation. + + If the Library as you received it specifies that a proxy can decide +whether future versions of the GNU Lesser General Public License shall +apply, that proxy's public statement of acceptance of any version is +permanent authorization for you to choose that version for the +Library. diff --git a/doc/Makefile b/doc/Makefile new file mode 100644 index 0000000000000000000000000000000000000000..4b2752b0e439c0d7c5c090c9e05b290d8206cc3f --- /dev/null +++ b/doc/Makefile @@ -0,0 +1,39 @@ +.PHONY: env help clean html package doctest + +docs: html + +check: doctest + +# The environment is currently required to build the documentation. +# It is not clean by 'make clean' + +PYTHON := python$(PYTHON_VERSION) +PYTHON_VERSION ?= $(shell $(PYTHON) -c 'import sys; print ("%d.%d" % sys.version_info[:2])') +BUILD_DIR = $(shell pwd)/../build/lib.$(PYTHON_VERSION) + +SPHINXBUILD ?= $$(pwd)/env/bin/sphinx-build +SPHOPTS = SPHINXBUILD=$(SPHINXBUILD) + +html: package src/sqlstate_errors.rst + $(MAKE) $(SPHOPTS) -C src $@ + cp -r src/_build/html . + +src/sqlstate_errors.rst: ../psycopg/sqlstate_errors.h $(BUILD_DIR) + env/bin/python src/tools/make_sqlstate_docs.py $< > $@ + +$(BUILD_DIR): + $(MAKE) PYTHON=$(PYTHON) -C .. package + +doctest: + $(MAKE) PYTHON=$(PYTHON) -C .. package + $(MAKE) $(SPHOPTS) -C src $@ + +clean: + $(MAKE) $(SPHOPTS) -C src $@ + rm -rf html src/sqlstate_errors.rst + +env: requirements.txt + virtualenv -p $(PYTHON) env + ./env/bin/pip install -r requirements.txt + echo "$$(pwd)/../build/lib.$(PYTHON_VERSION)" \ + > env/lib/python$(PYTHON_VERSION)/site-packages/psycopg.pth diff --git a/doc/README.rst b/doc/README.rst new file mode 100644 index 0000000000000000000000000000000000000000..7c435b374cc56c3c94129ee2621f99c544408c9a --- /dev/null +++ b/doc/README.rst @@ -0,0 +1,20 @@ +How to build psycopg documentation +---------------------------------- + +Building the documentation usually requires building the library too for +introspection, so you will need the same prerequisites_. The only extra +prerequisite is virtualenv_: the packages needed to build the docs will be +installed when building the env. + +.. _prerequisites: https://www.psycopg.org/docs/install.html#install-from-source +.. _virtualenv: https://virtualenv.pypa.io/en/latest/ + +Build the env once with:: + + make env + +Then you can build the documentation with:: + + make + +You should find the rendered documentation in the ``html`` directory. diff --git a/doc/SUCCESS b/doc/SUCCESS new file mode 100644 index 0000000000000000000000000000000000000000..de45991846b2855079f92ebb02d685d8e8493107 --- /dev/null +++ b/doc/SUCCESS @@ -0,0 +1,114 @@ +From: Jack Moffitt +To: Psycopg Mailing List +Subject: Re: [Psycopg] preparing for 1.0 +Date: 22 Oct 2001 11:16:21 -0600 + +www.vorbis.com is serving from 5-10k pages per day with psycopg serving +data for most of that. + +I plan to use it for several of our other sites, so that number will +increase. + +I've never had a single problem (that wasn't my fault) besides those +segfaults, and those are now gone as well, and I've been using psycopg +since June (around 0.99.2?). + +jack. + + +From: Yury Don +To: Psycopg Mailing List +Subject: Re: [Psycopg] preparing for 1.0 +Date: 23 Oct 2001 09:53:11 +0600 + +We use psycopg and psycopg zope adapter since fisrt public +release (it seems version 0.4). Now it works on 3 our sites and in intranet +applications. We had few problems, but all problems were quickly +solved. The strong side of psycopg is that it's code is well organized +and easy to understand. When I found a problem with non-ISO datestyle in first +version of psycopg, it took for me 15 or 20 minutes to learn code and +to solve the problem, even thouth my knowledge of c were poor. + +BTW, segfault with dictfetchall on particular data set (see [Psycopg] +dictfetchXXX() problems) disappeared in 0.99.8pre2. + +-- +Best regards, +Yury Don + + +From: Tom Jenkins +To: Federico Di Gregorio +Cc: Psycopg Mailing List +Subject: Re: [Psycopg] preparing for 1.0 +Date: 23 Oct 2001 08:25:52 -0400 + +The US Govt Department of Labor's Office of Disability Employment +Policy's DisabilityDirect website is run on zope and zpsycopg. + + +From: Scott Leerssen +To: Federico Di Gregorio +Subject: Re: [Psycopg] preparing for 1.0 +Date: 23 Oct 2001 09:56:10 -0400 + +Racemi's load management software infrastructure uses psycopg to handle +complex server allocation decisions, plus storage and access of +environmental conditions and accounting records for potentially +thousands of servers. Psycopg has, to this point, been the only +Python/PostGreSQL interface that could handle the scaling required for +our multithreaded applications. + +Scott + + +From: Andre Schubert +To: Federico Di Gregorio +Cc: Psycopg Mailing List +Subject: Re: [Psycopg] preparing for 1.0 +Date: 23 Oct 2001 11:46:07 +0200 + +i have changed the psycopg version to 0.99.8pre2 on all devel-machines +and all segfaults are gone. after my holiday i wil change to 0.99.8pre2 +or 1.0 on our production-server. +this server contains several web-sites which are all connected to +postgres over ZPsycopgDA. + +thanks as + + +From: Fred Wilson Horch +To: +Subject: [Psycopg] Success story for psycopg +Date: 23 Oct 2001 10:59:17 -0400 + +Due to various quirks of PyGreSQL and PoPy, EcoAccess has been looking for +a reliable, fast and relatively bug-free Python-PostgreSQL interface for +our project. + +Binary support in psycopg, along with the umlimited tuple size in +PostgreSQL 7.1, allowed us to quickly prototype a database-backed file +storage web application, which we're using for file sharing among our +staff and volunteers. Using a database backend instead of a file system +allows us to easily enrich the meta-information associated with each file +and simplifies our data handling routines. + +We've been impressed by the responsiveness of the psycopg team to bug +reports and feature requests, and we're looking forward to using psycopg +as the Python interface for additional database-backed web applications. + +Keep up the good work! +-- +Fred Wilson Horch mailto:fhorch@ecoaccess.org +Executive Director, EcoAccess http://ecoaccess.org/ + + +From: Damon Fasching +To: Michele Comitini +Cc: fog@debian.org +Subject: Re: How does one create a database within Python using psycopg? +Date: 25 Feb 2002 17:39:41 -0800 + +[snip] +btw I checked out 4 different Python-PostgreSQL packages. psycopg is the +only one which built and imported w/o any trouble! (At least for me.) diff --git a/doc/pep-0249.txt b/doc/pep-0249.txt new file mode 100644 index 0000000000000000000000000000000000000000..e74fd0dbc4caf55a62efa1fa3351206b19a2b8dc --- /dev/null +++ b/doc/pep-0249.txt @@ -0,0 +1,1005 @@ +PEP: 249 +Title: Python Database API Specification v2.0 +Version: $Revision: 1555 $ +Author: db-sig@python.org (Python Database SIG) +Editor: mal@lemburg.com (Marc-Andre Lemburg) +Status: Final +Type: Informational +Replaces: 248 +Release-Date: 07 Apr 1999 + +Introduction + + This API has been defined to encourage similarity between the + Python modules that are used to access databases. By doing this, + we hope to achieve a consistency leading to more easily understood + modules, code that is generally more portable across databases, + and a broader reach of database connectivity from Python. + + The interface specification consists of several sections: + + * Module Interface + * Connection Objects + * Cursor Objects + * DBI Helper Objects + * Type Objects and Constructors + * Implementation Hints + * Major Changes from 1.0 to 2.0 + + Comments and questions about this specification may be directed + to the SIG for Database Interfacing with Python + (db-sig@python.org). + + For more information on database interfacing with Python and + available packages see the Database Topic + Guide at http://www.python.org/topics/database/. + + This document describes the Python Database API Specification 2.0 + and a set of common optional extensions. The previous version 1.0 + version is still available as reference, in PEP 248. Package + writers are encouraged to use this version of the specification as + basis for new interfaces. + +Module Interface + + Access to the database is made available through connection + objects. The module must provide the following constructor for + these: + + connect(parameters...) + + Constructor for creating a connection to the database. + Returns a Connection Object. It takes a number of + parameters which are database dependent. [1] + + These module globals must be defined: + + apilevel + + String constant stating the supported DB API level. + Currently only the strings '1.0' and '2.0' are allowed. + + If not given, a DB-API 1.0 level interface should be + assumed. + + threadsafety + + Integer constant stating the level of thread safety the + interface supports. Possible values are: + + 0 Threads may not share the module. + 1 Threads may share the module, but not connections. + 2 Threads may share the module and connections. + 3 Threads may share the module, connections and + cursors. + + Sharing in the above context means that two threads may + use a resource without wrapping it using a mutex semaphore + to implement resource locking. Note that you cannot always + make external resources thread safe by managing access + using a mutex: the resource may rely on global variables + or other external sources that are beyond your control. + + paramstyle + + String constant stating the type of parameter marker + formatting expected by the interface. Possible values are + [2]: + + 'qmark' Question mark style, + e.g. '...WHERE name=?' + 'numeric' Numeric, positional style, + e.g. '...WHERE name=:1' + 'named' Named style, + e.g. '...WHERE name=:name' + 'format' ANSI C printf format codes, + e.g. '...WHERE name=%s' + 'pyformat' Python extended format codes, + e.g. '...WHERE name=%(name)s' + + The module should make all error information available through + these exceptions or subclasses thereof: + + Warning + + Exception raised for important warnings like data + truncations while inserting, etc. It must be a subclass of + the Python StandardError (defined in the module + exceptions). + + Error + + Exception that is the base class of all other error + exceptions. You can use this to catch all errors with one + single 'except' statement. Warnings are not considered + errors and thus should not use this class as base. It must + be a subclass of the Python StandardError (defined in the + module exceptions). + + InterfaceError + + Exception raised for errors that are related to the + database interface rather than the database itself. It + must be a subclass of Error. + + DatabaseError + + Exception raised for errors that are related to the + database. It must be a subclass of Error. + + DataError + + Exception raised for errors that are due to problems with + the processed data like division by zero, numeric value + out of range, etc. It must be a subclass of DatabaseError. + + OperationalError + + Exception raised for errors that are related to the + database's operation and not necessarily under the control + of the programmer, e.g. an unexpected disconnect occurs, + the data source name is not found, a transaction could not + be processed, a memory allocation error occurred during + processing, etc. It must be a subclass of DatabaseError. + + IntegrityError + + Exception raised when the relational integrity of the + database is affected, e.g. a foreign key check fails. It + must be a subclass of DatabaseError. + + InternalError + + Exception raised when the database encounters an internal + error, e.g. the cursor is not valid anymore, the + transaction is out of sync, etc. It must be a subclass of + DatabaseError. + + ProgrammingError + + Exception raised for programming errors, e.g. table not + found or already exists, syntax error in the SQL + statement, wrong number of parameters specified, etc. It + must be a subclass of DatabaseError. + + NotSupportedError + + Exception raised in case a method or database API was used + which is not supported by the database, e.g. requesting a + .rollback() on a connection that does not support + transaction or has transactions turned off. It must be a + subclass of DatabaseError. + + This is the exception inheritance layout: + + StandardError + |__Warning + |__Error + |__InterfaceError + |__DatabaseError + |__DataError + |__OperationalError + |__IntegrityError + |__InternalError + |__ProgrammingError + |__NotSupportedError + + Note: The values of these exceptions are not defined. They should + give the user a fairly good idea of what went wrong, though. + + +Connection Objects + + Connection Objects should respond to the following methods: + + .close() + + Close the connection now (rather than whenever __del__ is + called). The connection will be unusable from this point + forward; an Error (or subclass) exception will be raised + if any operation is attempted with the connection. The + same applies to all cursor objects trying to use the + connection. Note that closing a connection without + committing the changes first will cause an implicit + rollback to be performed. + + + .commit() + + Commit any pending transaction to the database. Note that + if the database supports an auto-commit feature, this must + be initially off. An interface method may be provided to + turn it back on. + + Database modules that do not support transactions should + implement this method with void functionality. + + .rollback() + + This method is optional since not all databases provide + transaction support. [3] + + In case a database does provide transactions this method + causes the the database to roll back to the start of any + pending transaction. Closing a connection without + committing the changes first will cause an implicit + rollback to be performed. + + .cursor() + + Return a new Cursor Object using the connection. If the + database does not provide a direct cursor concept, the + module will have to emulate cursors using other means to + the extent needed by this specification. [4] + + +Cursor Objects + + These objects represent a database cursor, which is used to + manage the context of a fetch operation. Cursors created from + the same connection are not isolated, i.e., any changes + done to the database by a cursor are immediately visible by the + other cursors. Cursors created from different connections can + or can not be isolated, depending on how the transaction support + is implemented (see also the connection's rollback() and commit() + methods.) + + Cursor Objects should respond to the following methods and + attributes: + + .description + + This read-only attribute is a sequence of 7-item + sequences. Each of these sequences contains information + describing one result column: (name, type_code, + display_size, internal_size, precision, scale, + null_ok). The first two items (name and type_code) are + mandatory, the other five are optional and must be set to + None if meaningful values are not provided. + + This attribute will be None for operations that + do not return rows or if the cursor has not had an + operation invoked via the executeXXX() method yet. + + The type_code can be interpreted by comparing it to the + Type Objects specified in the section below. + + .rowcount + + This read-only attribute specifies the number of rows that + the last executeXXX() produced (for DQL statements like + 'select') or affected (for DML statements like 'update' or + 'insert'). + + The attribute is -1 in case no executeXXX() has been + performed on the cursor or the rowcount of the last + operation is not determinable by the interface. [7] + + Note: Future versions of the DB API specification could + redefine the latter case to have the object return None + instead of -1. + + .callproc(procname[,parameters]) + + (This method is optional since not all databases provide + stored procedures. [3]) + + Call a stored database procedure with the given name. The + sequence of parameters must contain one entry for each + argument that the procedure expects. The result of the + call is returned as modified copy of the input + sequence. Input parameters are left untouched, output and + input/output parameters replaced with possibly new values. + + The procedure may also provide a result set as + output. This must then be made available through the + standard fetchXXX() methods. + + .close() + + Close the cursor now (rather than whenever __del__ is + called). The cursor will be unusable from this point + forward; an Error (or subclass) exception will be raised + if any operation is attempted with the cursor. + + .execute(operation[,parameters]) + + Prepare and execute a database operation (query or + command). Parameters may be provided as sequence or + mapping and will be bound to variables in the operation. + Variables are specified in a database-specific notation + (see the module's paramstyle attribute for details). [5] + + A reference to the operation will be retained by the + cursor. If the same operation object is passed in again, + then the cursor can optimize its behavior. This is most + effective for algorithms where the same operation is used, + but different parameters are bound to it (many times). + + For maximum efficiency when reusing an operation, it is + best to use the setinputsizes() method to specify the + parameter types and sizes ahead of time. It is legal for + a parameter to not match the predefined information; the + implementation should compensate, possibly with a loss of + efficiency. + + The parameters may also be specified as list of tuples to + e.g. insert multiple rows in a single operation, but this + kind of usage is depreciated: executemany() should be used + instead. + + Return values are not defined. + + .executemany(operation,seq_of_parameters) + + Prepare a database operation (query or command) and then + execute it against all parameter sequences or mappings + found in the sequence seq_of_parameters. + + Modules are free to implement this method using multiple + calls to the execute() method or by using array operations + to have the database process the sequence as a whole in + one call. + + Use of this method for an operation which produces one or + more result sets constitutes undefined behavior, and the + implementation is permitted (but not required) to raise + an exception when it detects that a result set has been + created by an invocation of the operation. + + The same comments as for execute() also apply accordingly + to this method. + + Return values are not defined. + + .fetchone() + + Fetch the next row of a query result set, returning a + single sequence, or None when no more data is + available. [6] + + An Error (or subclass) exception is raised if the previous + call to executeXXX() did not produce any result set or no + call was issued yet. + + fetchmany([size=cursor.arraysize]) + + Fetch the next set of rows of a query result, returning a + sequence of sequences (e.g. a list of tuples). An empty + sequence is returned when no more rows are available. + + The number of rows to fetch per call is specified by the + parameter. If it is not given, the cursor's arraysize + determines the number of rows to be fetched. The method + should try to fetch as many rows as indicated by the size + parameter. If this is not possible due to the specified + number of rows not being available, fewer rows may be + returned. + + An Error (or subclass) exception is raised if the previous + call to executeXXX() did not produce any result set or no + call was issued yet. + + Note there are performance considerations involved with + the size parameter. For optimal performance, it is + usually best to use the arraysize attribute. If the size + parameter is used, then it is best for it to retain the + same value from one fetchmany() call to the next. + + .fetchall() + + Fetch all (remaining) rows of a query result, returning + them as a sequence of sequences (e.g. a list of tuples). + Note that the cursor's arraysize attribute can affect the + performance of this operation. + + An Error (or subclass) exception is raised if the previous + call to executeXXX() did not produce any result set or no + call was issued yet. + + .nextset() + + (This method is optional since not all databases support + multiple result sets. [3]) + + This method will make the cursor skip to the next + available set, discarding any remaining rows from the + current set. + + If there are no more sets, the method returns + None. Otherwise, it returns a true value and subsequent + calls to the fetch methods will return rows from the next + result set. + + An Error (or subclass) exception is raised if the previous + call to executeXXX() did not produce any result set or no + call was issued yet. + + .arraysize + + This read/write attribute specifies the number of rows to + fetch at a time with fetchmany(). It defaults to 1 meaning + to fetch a single row at a time. + + Implementations must observe this value with respect to + the fetchmany() method, but are free to interact with the + database a single row at a time. It may also be used in + the implementation of executemany(). + + .setinputsizes(sizes) + + This can be used before a call to executeXXX() to + predefine memory areas for the operation's parameters. + + sizes is specified as a sequence -- one item for each + input parameter. The item should be a Type Object that + corresponds to the input that will be used, or it should + be an integer specifying the maximum length of a string + parameter. If the item is None, then no predefined memory + area will be reserved for that column (this is useful to + avoid predefined areas for large inputs). + + This method would be used before the executeXXX() method + is invoked. + + Implementations are free to have this method do nothing + and users are free to not use it. + + .setoutputsize(size[,column]) + + Set a column buffer size for fetches of large columns + (e.g. LONGs, BLOBs, etc.). The column is specified as an + index into the result sequence. Not specifying the column + will set the default size for all large columns in the + cursor. + + This method would be used before the executeXXX() method + is invoked. + + Implementations are free to have this method do nothing + and users are free to not use it. + + +Type Objects and Constructors + + Many databases need to have the input in a particular format for + binding to an operation's input parameters. For example, if an + input is destined for a DATE column, then it must be bound to the + database in a particular string format. Similar problems exist + for "Row ID" columns or large binary items (e.g. blobs or RAW + columns). This presents problems for Python since the parameters + to the executeXXX() method are untyped. When the database module + sees a Python string object, it doesn't know if it should be bound + as a simple CHAR column, as a raw BINARY item, or as a DATE. + + To overcome this problem, a module must provide the constructors + defined below to create objects that can hold special values. + When passed to the cursor methods, the module can then detect the + proper type of the input parameter and bind it accordingly. + + A Cursor Object's description attribute returns information about + each of the result columns of a query. The type_code must compare + equal to one of Type Objects defined below. Type Objects may be + equal to more than one type code (e.g. DATETIME could be equal to + the type codes for date, time and timestamp columns; see the + Implementation Hints below for details). + + The module exports the following constructors and singletons: + + Date(year,month,day) + + This function constructs an object holding a date value. + + Time(hour,minute,second) + + This function constructs an object holding a time value. + + Timestamp(year,month,day,hour,minute,second) + + This function constructs an object holding a time stamp + value. + + DateFromTicks(ticks) + + This function constructs an object holding a date value + from the given ticks value (number of seconds since the + epoch; see the documentation of the standard Python time + module for details). + + TimeFromTicks(ticks) + + This function constructs an object holding a time value + from the given ticks value (number of seconds since the + epoch; see the documentation of the standard Python time + module for details). + + TimestampFromTicks(ticks) + + This function constructs an object holding a time stamp + value from the given ticks value (number of seconds since + the epoch; see the documentation of the standard Python + time module for details). + + Binary(string) + + This function constructs an object capable of holding a + binary (long) string value. + + + STRING + + This type object is used to describe columns in a database + that are string-based (e.g. CHAR). + + BINARY + + This type object is used to describe (long) binary columns + in a database (e.g. LONG, RAW, BLOBs). + + NUMBER + + This type object is used to describe numeric columns in a + database. + + DATETIME + + This type object is used to describe date/time columns in + a database. + + ROWID + + This type object is used to describe the "Row ID" column + in a database. + + SQL NULL values are represented by the Python None singleton on + input and output. + + Note: Usage of Unix ticks for database interfacing can cause + troubles because of the limited date range they cover. + + +Implementation Hints for Module Authors + + * The preferred object types for the date/time objects are those + defined in the mxDateTime package. It provides all necessary + constructors and methods both at Python and C level. + + * The preferred object type for Binary objects are the + buffer types available in standard Python starting with + version 1.5.2. Please see the Python documentation for + details. For information about the the C interface have a + look at Include/bufferobject.h and + Objects/bufferobject.c in the Python source + distribution. + + * Starting with Python 2.3, module authors can also use the object + types defined in the standard datetime module for date/time + processing. However, it should be noted that this does not + expose a C API like mxDateTime does which means that integration + with C based database modules is more difficult. + + * Here is a sample implementation of the Unix ticks based + constructors for date/time delegating work to the generic + constructors: + + import time + + def DateFromTicks(ticks): + return apply(Date,time.localtime(ticks)[:3]) + + def TimeFromTicks(ticks): + return apply(Time,time.localtime(ticks)[3:6]) + + def TimestampFromTicks(ticks): + return apply(Timestamp,time.localtime(ticks)[:6]) + + * This Python class allows implementing the above type + objects even though the description type code field yields + multiple values for on type object: + + class DBAPITypeObject: + def __init__(self,*values): + self.values = values + def __cmp__(self,other): + if other in self.values: + return 0 + if other < self.values: + return 1 + else: + return -1 + + The resulting type object compares equal to all values + passed to the constructor. + + * Here is a snippet of Python code that implements the exception + hierarchy defined above: + + import exceptions + + class Error(exceptions.StandardError): + pass + + class Warning(exceptions.StandardError): + pass + + class InterfaceError(Error): + pass + + class DatabaseError(Error): + pass + + class InternalError(DatabaseError): + pass + + class OperationalError(DatabaseError): + pass + + class ProgrammingError(DatabaseError): + pass + + class IntegrityError(DatabaseError): + pass + + class DataError(DatabaseError): + pass + + class NotSupportedError(DatabaseError): + pass + + In C you can use the PyErr_NewException(fullname, + base, NULL) API to create the exception objects. + + +Optional DB API Extensions + + During the lifetime of DB API 2.0, module authors have often + extended their implementations beyond what is required by this DB + API specification. To enhance compatibility and to provide a clean + upgrade path to possible future versions of the specification, + this section defines a set of common extensions to the core DB API + 2.0 specification. + + As with all DB API optional features, the database module authors + are free to not implement these additional attributes and methods + (using them will then result in an AttributeError) or to raise a + NotSupportedError in case the availability can only be checked at + run-time. + + It has been proposed to make usage of these extensions optionally + visible to the programmer by issuing Python warnings through the + Python warning framework. To make this feature useful, the warning + messages must be standardized in order to be able to mask them. These + standard messages are referred to below as "Warning Message". + + Cursor Attribute .rownumber + + This read-only attribute should provide the current 0-based + index of the cursor in the result set or None if the index cannot + be determined. + + The index can be seen as index of the cursor in a sequence (the + result set). The next fetch operation will fetch the row + indexed by .rownumber in that sequence. + + Warning Message: "DB-API extension cursor.rownumber used" + + Connection Attributes .Error, .ProgrammingError, etc. + + All exception classes defined by the DB API standard should be + exposed on the Connection objects are attributes (in addition + to being available at module scope). + + These attributes simplify error handling in multi-connection + environments. + + Warning Message: "DB-API extension connection. used" + + Cursor Attributes .connection + + This read-only attribute return a reference to the Connection + object on which the cursor was created. + + The attribute simplifies writing polymorph code in + multi-connection environments. + + Warning Message: "DB-API extension cursor.connection used" + + Cursor Method .scroll(value[,mode='relative']) + + Scroll the cursor in the result set to a new position according + to mode. + + If mode is 'relative' (default), value is taken as offset to + the current position in the result set, if set to 'absolute', + value states an absolute target position. + + An IndexError should be raised in case a scroll operation would + leave the result set. In this case, the cursor position is left + undefined (ideal would be to not move the cursor at all). + + Note: This method should use native scrollable cursors, if + available , or revert to an emulation for forward-only + scrollable cursors. The method may raise NotSupportedErrors to + signal that a specific operation is not supported by the + database (e.g. backward scrolling). + + Warning Message: "DB-API extension cursor.scroll() used" + + Cursor Attribute .messages + + This is a Python list object to which the interface appends + tuples (exception class, exception value) for all messages + which the interfaces receives from the underlying database for + this cursor. + + The list is cleared by all standard cursor methods calls (prior + to executing the call) except for the .fetchXXX() calls + automatically to avoid excessive memory usage and can also be + cleared by executing "del cursor.messages[:]". + + All error and warning messages generated by the database are + placed into this list, so checking the list allows the user to + verify correct operation of the method calls. + + The aim of this attribute is to eliminate the need for a + Warning exception which often causes problems (some warnings + really only have informational character). + + Warning Message: "DB-API extension cursor.messages used" + + Connection Attribute .messages + + Same as cursor.messages except that the messages in the list + are connection oriented. + + The list is cleared automatically by all standard connection + methods calls (prior to executing the call) to avoid excessive + memory usage and can also be cleared by executing "del + connection.messages[:]". + + Warning Message: "DB-API extension connection.messages used" + + Cursor Method .next() + + Return the next row from the currently executing SQL statement + using the same semantics as .fetchone(). A StopIteration + exception is raised when the result set is exhausted for Python + versions 2.2 and later. Previous versions don't have the + StopIteration exception and so the method should raise an + IndexError instead. + + Warning Message: "DB-API extension cursor.next() used" + + Cursor Method .__iter__() + + Return self to make cursors compatible to the iteration protocol. + + Warning Message: "DB-API extension cursor.__iter__() used" + + Cursor Attribute .lastrowid + + This read-only attribute provides the rowid of the last + modified row (most databases return a rowid only when a single + INSERT operation is performed). If the operation does not set + a rowid or if the database does not support rowids, this + attribute should be set to None. + + The semantics of .lastrowid are undefined in case the last + executed statement modified more than one row, e.g. when + using INSERT with .executemany(). + + Warning Message: "DB-API extension cursor.lastrowid used" + + +Optional Error Handling Extension + + The core DB API specification only introduces a set of exceptions + which can be raised to report errors to the user. In some cases, + exceptions may be too disruptive for the flow of a program or even + render execution impossible. + + For these cases and in order to simplify error handling when + dealing with databases, database module authors may choose to + implement user defineable error handlers. This section describes a + standard way of defining these error handlers. + + Cursor/Connection Attribute .errorhandler + + Read/write attribute which references an error handler to call + in case an error condition is met. + + The handler must be a Python callable taking the following + arguments: errorhandler(connection, cursor, errorclass, + errorvalue) where connection is a reference to the connection + on which the cursor operates, cursor a reference to the cursor + (or None in case the error does not apply to a cursor), + errorclass is an error class which to instantiate using + errorvalue as construction argument. + + The standard error handler should add the error information to + the appropriate .messages attribute (connection.messages or + cursor.messages) and raise the exception defined by the given + errorclass and errorvalue parameters. + + If no errorhandler is set (the attribute is None), the standard + error handling scheme as outlined above, should be applied. + + Warning Message: "DB-API extension .errorhandler used" + + Cursors should inherit the .errorhandler setting from their + connection objects at cursor creation time. + + +Frequently Asked Questions + + The database SIG often sees reoccurring questions about the DB API + specification. This section covers some of the issues people + sometimes have with the specification. + + Question: + + How can I construct a dictionary out of the tuples returned by + .fetchxxx(): + + Answer: + + There are several existing tools available which provide + helpers for this task. Most of them use the approach of using + the column names defined in the cursor attribute .description + as basis for the keys in the row dictionary. + + Note that the reason for not extending the DB API specification + to also support dictionary return values for the .fetchxxx() + methods is that this approach has several drawbacks: + + * Some databases don't support case-sensitive column names or + auto-convert them to all lowercase or all uppercase + characters. + + * Columns in the result set which are generated by the query + (e.g. using SQL functions) don't map to table column names + and databases usually generate names for these columns in a + very database specific way. + + As a result, accessing the columns through dictionary keys + varies between databases and makes writing portable code + impossible. + + +Major Changes from Version 1.0 to Version 2.0 + + The Python Database API 2.0 introduces a few major changes + compared to the 1.0 version. Because some of these changes will + cause existing DB API 1.0 based scripts to break, the major + version number was adjusted to reflect this change. + + These are the most important changes from 1.0 to 2.0: + + * The need for a separate dbi module was dropped and the + functionality merged into the module interface itself. + + * New constructors and Type Objects were added for date/time + values, the RAW Type Object was renamed to BINARY. The + resulting set should cover all basic data types commonly + found in modern SQL databases. + + * New constants (apilevel, threadlevel, paramstyle) and + methods (executemany, nextset) were added to provide better + database bindings. + + * The semantics of .callproc() needed to call stored + procedures are now clearly defined. + + * The definition of the .execute() return value changed. + Previously, the return value was based on the SQL statement + type (which was hard to implement right) -- it is undefined + now; use the more flexible .rowcount attribute + instead. Modules are free to return the old style return + values, but these are no longer mandated by the + specification and should be considered database interface + dependent. + + * Class based exceptions were incorporated into the + specification. Module implementors are free to extend the + exception layout defined in this specification by + subclassing the defined exception classes. + + Post-publishing additions to the DB API 2.0 specification: + + * Additional optional DB API extensions to the set of + core functionality were specified. + + +Open Issues + + Although the version 2.0 specification clarifies a lot of + questions that were left open in the 1.0 version, there are still + some remaining issues which should be addressed in future + versions: + + * Define a useful return value for .nextset() for the case where + a new result set is available. + + * Create a fixed point numeric type for use as loss-less + monetary and decimal interchange format. + + +Footnotes + + [1] As a guideline the connection constructor parameters should be + implemented as keyword parameters for more intuitive use and + follow this order of parameters: + + dsn Data source name as string + user User name as string (optional) + password Password as string (optional) + host Hostname (optional) + database Database name (optional) + + E.g. a connect could look like this: + + connect(dsn='myhost:MYDB',user='guido',password='234$') + + [2] Module implementors should prefer 'numeric', 'named' or + 'pyformat' over the other formats because these offer more + clarity and flexibility. + + [3] If the database does not support the functionality required + by the method, the interface should throw an exception in + case the method is used. + + The preferred approach is to not implement the method and + thus have Python generate an AttributeError in + case the method is requested. This allows the programmer to + check for database capabilities using the standard + hasattr() function. + + For some dynamically configured interfaces it may not be + appropriate to require dynamically making the method + available. These interfaces should then raise a + NotSupportedError to indicate the non-ability + to perform the roll back when the method is invoked. + + [4] a database interface may choose to support named cursors by + allowing a string argument to the method. This feature is + not part of the specification, since it complicates + semantics of the .fetchXXX() methods. + + [5] The module will use the __getitem__ method of the parameters + object to map either positions (integers) or names (strings) + to parameter values. This allows for both sequences and + mappings to be used as input. + + The term "bound" refers to the process of binding an input + value to a database execution buffer. In practical terms, + this means that the input value is directly used as a value + in the operation. The client should not be required to + "escape" the value so that it can be used -- the value + should be equal to the actual database value. + + [6] Note that the interface may implement row fetching using + arrays and other optimizations. It is not + guaranteed that a call to this method will only move the + associated cursor forward by one row. + + [7] The rowcount attribute may be coded in a way that updates + its value dynamically. This can be useful for databases that + return usable rowcount values only after the first call to + a .fetchXXX() method. + +Acknowledgements + + Many thanks go to Andrew Kuchling who converted the Python + Database API Specification 2.0 from the original HTML format into + the PEP format. + +Copyright + + This document has been placed in the Public Domain. + + + +Local Variables: +mode: indented-text +indent-tabs-mode: nil +End: diff --git a/doc/release.rst b/doc/release.rst new file mode 100644 index 0000000000000000000000000000000000000000..c61e1214e7b61c3c6447bc2b37ff72efb7fc7240 --- /dev/null +++ b/doc/release.rst @@ -0,0 +1,82 @@ +How to make a psycopg2 release +============================== + +- Edit ``setup.py`` and set a stable version release. Use PEP 440 to choose + version numbers, e.g. + + - ``2.7``: a new major release, new features + - ``2.7.1``: a bugfix release + - ``2.7.1.1``: a release to fix packaging problems + - ``2.7.2.dev0``: version held during development, non-public test packages... + - ``2.8b1``: a beta for public tests + + In the rest of this document we assume you have exported the version number + into an environment variable, e.g.:: + + $ export VERSION=2.8.4 + +- Push psycopg2 to master or to the maint branch. Make sure tests on `GitHub + Actions`__ and AppVeyor__ pass. + +.. __: https://github.com/psycopg/psycopg2/actions/workflows/tests.yml +.. __: https://ci.appveyor.com/project/psycopg/psycopg2 + +- Create a signed tag with the content of the relevant NEWS bit and push it. + E.g.:: + + # Tag name will be 2_8_4 + $ git tag -a -s ${VERSION//\./_} + + Psycopg 2.8.4 released + + What's new in psycopg 2.8.4 + --------------------------- + + New features: + + - Fixed bug blah (:ticket:`#42`). + ... + +- Create the packages: + + - On GitHub Actions run manually a `package build workflow`__. + + - On Appveyor change the `build settings`__ and replace the custom + configuration file name from ``.appveyor/tests.yml`` to + ``.appveyor/packages.yml`` (yeah, that sucks a bit. Remember to put it + back to testing). + +.. __: https://github.com/psycopg/psycopg2/actions/workflows/packages.yml +.. __: https://ci.appveyor.com/project/psycopg/psycopg2/settings + +- When the workflows have finished download the packages using the + ``download_packages_{github|appveyor}.py`` scripts from the + ``scripts/build`` directory. They will be saved in a + ``packages/psycopg2-${VERSION}`` directory. + +- Remove the ``.exe`` from the dir, because we don't want to upload them on + PyPI:: + + $ rm -v psycopg2-${VERSION}/*.exe + +- Only for stable packages: upload the signed packages on PyPI:: + + $ twine upload -s psycopg2-${VERSION}/* + +- Create a release and release notes in the psycopg website, announce to + psycopg and pgsql-announce mailing lists. + +- Edit ``setup.py`` changing the version again (e.g. go to ``2.8.5.dev0``). + + +Releasing test packages +----------------------- + +Test packages may be uploaded on the `PyPI testing site`__ using:: + + $ twine upload -s -r testpypi psycopg2-${VERSION}/* + +assuming `proper configuration`__ of ``~/.pypirc``. + +.. __: https://test.pypi.org/project/psycopg2/ +.. __: https://wiki.python.org/moin/TestPyPI diff --git a/doc/requirements.txt b/doc/requirements.txt new file mode 100644 index 0000000000000000000000000000000000000000..9449985d60ec4d9d027db873f2f86d7a30bef814 --- /dev/null +++ b/doc/requirements.txt @@ -0,0 +1,8 @@ +# Packages only needed to build the docs +Pygments>=2.2,<2.3 +Sphinx>=1.6,<=1.7 +sphinx-better-theme>=0.1.5,<0.2 + +# 0.15.2 affected by https://sourceforge.net/p/docutils/bugs/353/ +# Can update to 0.16 after release (currently in rc) but must update Sphinx too +docutils<0.15 diff --git a/doc/src/Makefile b/doc/src/Makefile new file mode 100644 index 0000000000000000000000000000000000000000..53d0680d945b9dd0c6f1a61b9169efb0e2751565 --- /dev/null +++ b/doc/src/Makefile @@ -0,0 +1,99 @@ +# Makefile for Sphinx documentation +# + +# You can set these variables from the command line. +SPHINXOPTS = +SPHINXBUILD = sphinx-build +PAPER = +BUILDDIR = _build + +# DSN for the doctest database +PSYCOPG2_DSN="user=postgres dbname=test" + +# Internal variables. +PAPEROPT_a4 = -D latex_paper_size=a4 +PAPEROPT_letter = -D latex_paper_size=letter +ALLSPHINXOPTS = -d $(BUILDDIR)/doctrees $(PAPEROPT_$(PAPER)) $(SPHINXOPTS) . + +.PHONY: help clean html dirhtml pickle json htmlhelp qthelp latex changes linkcheck doctest + +help: + @echo "Please use \`make ' where is one of" + @echo " html to make standalone HTML files" + @echo " dirhtml to make HTML files named index.html in directories" + @echo " pickle to make pickle files" + @echo " json to make JSON files" + @echo " htmlhelp to make HTML files and a HTML help project" + @echo " qthelp to make HTML files and a qthelp project" + @echo " latex to make LaTeX files, you can set PAPER=a4 or PAPER=letter" + @echo " changes to make an overview of all changed/added/deprecated items" + @echo " linkcheck to check all external links for integrity" + @echo " doctest to run all doctests embedded in the documentation (if enabled)" + +clean: + -rm -rf $(BUILDDIR)/* + -rm -rf ./html/* + +html: + $(SPHINXBUILD) -b html $(ALLSPHINXOPTS) $(BUILDDIR)/html + @echo + @echo "Build finished. The HTML pages are in $(BUILDDIR)/html." + +dirhtml: + $(SPHINXBUILD) -b dirhtml $(ALLSPHINXOPTS) $(BUILDDIR)/dirhtml + @echo + @echo "Build finished. The HTML pages are in $(BUILDDIR)/dirhtml." + +text: + $(SPHINXBUILD) -b text $(ALLSPHINXOPTS) $(BUILDDIR)/text + @echo + @echo "Build finished. The text pages are in $(BUILDDIR)/text." + +pickle: + $(SPHINXBUILD) -b pickle $(ALLSPHINXOPTS) $(BUILDDIR)/pickle + @echo + @echo "Build finished; now you can process the pickle files." + +json: + $(SPHINXBUILD) -b json $(ALLSPHINXOPTS) $(BUILDDIR)/json + @echo + @echo "Build finished; now you can process the JSON files." + +htmlhelp: + $(SPHINXBUILD) -b htmlhelp $(ALLSPHINXOPTS) $(BUILDDIR)/htmlhelp + @echo + @echo "Build finished; now you can run HTML Help Workshop with the" \ + ".hhp project file in $(BUILDDIR)/htmlhelp." + +qthelp: + $(SPHINXBUILD) -b qthelp $(ALLSPHINXOPTS) $(BUILDDIR)/qthelp + @echo + @echo "Build finished; now you can run "qcollectiongenerator" with the" \ + ".qhcp project file in $(BUILDDIR)/qthelp, like this:" + @echo "# qcollectiongenerator $(BUILDDIR)/qthelp/psycopg.qhcp" + @echo "To view the help file:" + @echo "# assistant -collectionFile $(BUILDDIR)/qthelp/psycopg.qhc" + +latex: + $(SPHINXBUILD) -b latex $(ALLSPHINXOPTS) $(BUILDDIR)/latex + @echo + @echo "Build finished; the LaTeX files are in $(BUILDDIR)/latex." + @echo "Run \`make all-pdf' or \`make all-ps' in that directory to" \ + "run these through (pdf)latex." + +changes: + $(SPHINXBUILD) -b changes $(ALLSPHINXOPTS) $(BUILDDIR)/changes + @echo + @echo "The overview file is in $(BUILDDIR)/changes." + +linkcheck: + $(SPHINXBUILD) -b linkcheck $(ALLSPHINXOPTS) $(BUILDDIR)/linkcheck + @echo + @echo "Link check complete; look for any errors in the above output " \ + "or in $(BUILDDIR)/linkcheck/output.txt." + +doctest: + PSYCOPG2_DSN=$(PSYCOPG2_DSN) \ + $(SPHINXBUILD) -b doctest $(ALLSPHINXOPTS) $(BUILDDIR)/doctest + @echo "Testing of doctests in the sources finished, look at the " \ + "results in $(BUILDDIR)/doctest/output.txt." diff --git a/doc/src/_static/psycopg.css b/doc/src/_static/psycopg.css new file mode 100644 index 0000000000000000000000000000000000000000..f7ff7560c5987acc9b361b1bd28c39fed0e46379 --- /dev/null +++ b/doc/src/_static/psycopg.css @@ -0,0 +1,136 @@ +blockquote { + font-style: italic; +} + +div.admonition-todo { + background-color: #ffa; + border: 1px solid #ee2; +} + +div.dbapi-extension { + background-color: #eef; + border: 1px solid #aaf; +} + +code.sql, +tt.sql { + font-size: 1em; + background-color: transparent; +} + +a > code.sql, +a > tt.sql { + font-weight: normal; +} + +a > code.sql:hover, +a > tt.sql:hover { + text-decoration: underline; +} + +dl.faq dt { + font-weight: bold; +} + +table.data-types div.line-block { + margin-bottom: 0; +} + + +/* better theme customisation */ + +body { + background-color: #216464; +} + +header, .related, .document, footer { + background-color: white; +} + +header h1 { + font-size: 150%; + margin-bottom: 0; + padding: 0.5rem 10px 0.5rem 10px; +} + +h1, h2, h3 { + font-weight: normal; +} + +.body h1, .body h2, .body h3 { + color: #074848; +} + +h1 { + font-size: 200%; +} + +h2 { + font-size: 160%; +} + +h3 { + font-size: 140%; +} + +footer#pagefooter { + margin-bottom: 1rem; + font-size: 85%; + color: #444; +} + +#rellinks, #breadcrumbs { + padding-right: 10px; + padding-left: 10px; +} + +.sphinxsidebar { + padding-left: 10px; +} + +.bodywrapper { + padding-right: 10px; +} + +div.body h1, div.body h2, div.body h3 { + background-color: #f2f2f2; + border-bottom: 1px solid #d0d0d0; +} + +div.body p.rubric { + border-bottom: 1px solid #d0d0d0; +} + +body .sphinxsidebar .search { + margin-top: 0; +} + +html pre { + background-color: #efc; + border: 1px solid #ac9; + border-left: none; + border-right: none; +} + +a, a:visited { + color: #0b6868; +} + +th { + background-color: #ede; +} + +code.xref, a code { + font-weight: bold; +} + +code.descname { + font-weight: bold; + font-size: 120%; +} + +@media (max-width: 820px) { + body { + background-color: white; + } +} diff --git a/doc/src/_templates/searchbox.html b/doc/src/_templates/searchbox.html new file mode 100644 index 0000000000000000000000000000000000000000..c51da22d1a1119d5290824efea4975d09bf585ab --- /dev/null +++ b/doc/src/_templates/searchbox.html @@ -0,0 +1,6 @@ +{# Add a title over the search box #} + +{%- if pagename != "search" %} +

Quick search

+{%- include "!searchbox.html" %} +{%- endif %} diff --git a/doc/src/advanced.rst b/doc/src/advanced.rst new file mode 100644 index 0000000000000000000000000000000000000000..28c4be9f9c018fd612aeb550466fa1423a74444b --- /dev/null +++ b/doc/src/advanced.rst @@ -0,0 +1,599 @@ +More advanced topics +==================== + +.. sectionauthor:: Daniele Varrazzo + +.. testsetup:: * + + import re + import select + + cur.execute("CREATE TABLE atable (apoint point)") + conn.commit() + + def wait(conn): + while True: + state = conn.poll() + if state == psycopg2.extensions.POLL_OK: + break + elif state == psycopg2.extensions.POLL_WRITE: + select.select([], [conn.fileno()], []) + elif state == psycopg2.extensions.POLL_READ: + select.select([conn.fileno()], [], []) + else: + raise psycopg2.OperationalError("poll() returned %s" % state) + + aconn = psycopg2.connect(database='test', async=1) + wait(aconn) + acurs = aconn.cursor() + + +.. index:: + double: Subclassing; Cursor + double: Subclassing; Connection + +.. _subclassing-connection: +.. _subclassing-cursor: + +Connection and cursor factories +------------------------------- + +Psycopg exposes two new-style classes that can be sub-classed and expanded to +adapt them to the needs of the programmer: `psycopg2.extensions.cursor` +and `psycopg2.extensions.connection`. The `connection` class is +usually sub-classed only to provide an easy way to create customized cursors +but other uses are possible. `cursor` is much more interesting, because +it is the class where query building, execution and result type-casting into +Python variables happens. + +The `~psycopg2.extras` module contains several examples of :ref:`connection +and cursor subclasses `. + +.. note:: + + If you only need a customized cursor class, since Psycopg 2.5 you can use + the `~connection.cursor_factory` parameter of a regular connection instead + of creating a new `!connection` subclass. + + +.. index:: + single: Example; Cursor subclass + +An example of cursor subclass performing logging is:: + + import psycopg2 + import psycopg2.extensions + import logging + + class LoggingCursor(psycopg2.extensions.cursor): + def execute(self, sql, args=None): + logger = logging.getLogger('sql_debug') + logger.info(self.mogrify(sql, args)) + + try: + psycopg2.extensions.cursor.execute(self, sql, args) + except Exception, exc: + logger.error("%s: %s" % (exc.__class__.__name__, exc)) + raise + + conn = psycopg2.connect(DSN) + cur = conn.cursor(cursor_factory=LoggingCursor) + cur.execute("INSERT INTO mytable VALUES (%s, %s, %s);", + (10, 20, 30)) + + + +.. index:: + single: Objects; Creating new adapters + single: Adaptation; Creating new adapters + single: Data types; Creating new adapters + +.. _adapting-new-types: + +Adapting new Python types to SQL syntax +--------------------------------------- + +Any Python class or type can be adapted to an SQL string. Adaptation mechanism +is similar to the Object Adaptation proposed in the :pep:`246` and is exposed +by the `psycopg2.extensions.adapt()` function. + +The `~cursor.execute()` method adapts its arguments to the +`~psycopg2.extensions.ISQLQuote` protocol. Objects that conform to this +protocol expose a `!getquoted()` method returning the SQL representation +of the object as a string (the method must return `!bytes` in Python 3). +Optionally the conform object may expose a +`~psycopg2.extensions.ISQLQuote.prepare()` method. + +There are two basic ways to have a Python object adapted to SQL: + +- the object itself is conform, or knows how to make itself conform. Such + object must expose a `__conform__()` method that will be called with the + protocol object as argument. The object can check that the protocol is + `!ISQLQuote`, in which case it can return `!self` (if the object also + implements `!getquoted()`) or a suitable wrapper object. This option is + viable if you are the author of the object and if the object is specifically + designed for the database (i.e. having Psycopg as a dependency and polluting + its interface with the required methods doesn't bother you). For a simple + example you can take a look at the source code for the + `psycopg2.extras.Inet` object. + +- If implementing the `!ISQLQuote` interface directly in the object is not an + option (maybe because the object to adapt comes from a third party library), + you can use an *adaptation function*, taking the object to be adapted as + argument and returning a conforming object. The adapter must be + registered via the `~psycopg2.extensions.register_adapter()` function. A + simple example wrapper is `!psycopg2.extras.UUID_adapter` used by the + `~psycopg2.extras.register_uuid()` function. + +A convenient object to write adapters is the `~psycopg2.extensions.AsIs` +wrapper, whose `!getquoted()` result is simply the `!str()`\ ing conversion of +the wrapped object. + +.. index:: + single: Example; Types adaptation + +Example: mapping of a `!Point` class into the |point|_ PostgreSQL +geometric type: + +.. doctest:: + + >>> from psycopg2.extensions import adapt, register_adapter, AsIs + + >>> class Point(object): + ... def __init__(self, x, y): + ... self.x = x + ... self.y = y + + >>> def adapt_point(point): + ... x = adapt(point.x).getquoted() + ... y = adapt(point.y).getquoted() + ... return AsIs("'(%s, %s)'" % (x, y)) + + >>> register_adapter(Point, adapt_point) + + >>> cur.execute("INSERT INTO atable (apoint) VALUES (%s)", + ... (Point(1.23, 4.56),)) + + +.. |point| replace:: :sql:`point` +.. _point: https://www.postgresql.org/docs/current/static/datatype-geometric.html#DATATYPE-GEOMETRIC + +The above function call results in the SQL command:: + + INSERT INTO atable (apoint) VALUES ('(1.23, 4.56)'); + + + +.. index:: Type casting + +.. _type-casting-from-sql-to-python: + +Type casting of SQL types into Python objects +--------------------------------------------- + +PostgreSQL objects read from the database can be adapted to Python objects +through an user-defined adapting function. An adapter function takes two +arguments: the object string representation as returned by PostgreSQL and the +cursor currently being read, and should return a new Python object. For +example, the following function parses the PostgreSQL :sql:`point` +representation into the previously defined `!Point` class: + + >>> def cast_point(value, cur): + ... if value is None: + ... return None + ... + ... # Convert from (f1, f2) syntax using a regular expression. + ... m = re.match(r"\(([^)]+),([^)]+)\)", value) + ... if m: + ... return Point(float(m.group(1)), float(m.group(2))) + ... else: + ... raise InterfaceError("bad point representation: %r" % value) + + +In order to create a mapping from a PostgreSQL type (either standard or +user-defined), its OID must be known. It can be retrieved either by the second +column of the `cursor.description`: + + >>> cur.execute("SELECT NULL::point") + >>> point_oid = cur.description[0][1] + >>> point_oid + 600 + +or by querying the system catalog for the type name and namespace (the +namespace for system objects is :sql:`pg_catalog`): + + >>> cur.execute(""" + ... SELECT pg_type.oid + ... FROM pg_type JOIN pg_namespace + ... ON typnamespace = pg_namespace.oid + ... WHERE typname = %(typename)s + ... AND nspname = %(namespace)s""", + ... {'typename': 'point', 'namespace': 'pg_catalog'}) + >>> point_oid = cur.fetchone()[0] + >>> point_oid + 600 + +After you know the object OID, you can create and register the new type: + + >>> POINT = psycopg2.extensions.new_type((point_oid,), "POINT", cast_point) + >>> psycopg2.extensions.register_type(POINT) + +The `~psycopg2.extensions.new_type()` function binds the object OIDs +(more than one can be specified) to the adapter function. +`~psycopg2.extensions.register_type()` completes the spell. Conversion +is automatically performed when a column whose type is a registered OID is +read: + + >>> cur.execute("SELECT '(10.2,20.3)'::point") + >>> point = cur.fetchone()[0] + >>> print type(point), point.x, point.y + 10.2 20.3 + +A typecaster created by `!new_type()` can be also used with +`~psycopg2.extensions.new_array_type()` to create a typecaster converting a +PostgreSQL array into a Python list. + + +.. index:: + pair: Asynchronous; Notifications + pair: LISTEN; SQL command + pair: NOTIFY; SQL command + +.. _async-notify: + +Asynchronous notifications +-------------------------- + +Psycopg allows asynchronous interaction with other database sessions using the +facilities offered by PostgreSQL commands |LISTEN|_ and |NOTIFY|_. Please +refer to the PostgreSQL documentation for examples about how to use this form of +communication. + +Notifications are instances of the `~psycopg2.extensions.Notify` object made +available upon reception in the `connection.notifies` list. Notifications can +be sent from Python code simply executing a :sql:`NOTIFY` command in an +`~cursor.execute()` call. + +Because of the way sessions interact with notifications (see |NOTIFY|_ +documentation), you should keep the connection in `~connection.autocommit` +mode if you wish to receive or send notifications in a timely manner. + +.. |LISTEN| replace:: :sql:`LISTEN` +.. _LISTEN: https://www.postgresql.org/docs/current/static/sql-listen.html +.. |NOTIFY| replace:: :sql:`NOTIFY` +.. _NOTIFY: https://www.postgresql.org/docs/current/static/sql-notify.html + +Notifications are received after every query execution. If the user is +interested in receiving notifications but not in performing any query, the +`~connection.poll()` method can be used to check for new messages without +wasting resources. + +A simple application could poll the connection from time to time to check if +something new has arrived. A better strategy is to use some I/O completion +function such as :py:func:`~select.select` to sleep until awakened by the kernel when there is +some data to read on the connection, thereby using no CPU unless there is +something to read:: + + import select + import psycopg2 + import psycopg2.extensions + + conn = psycopg2.connect(DSN) + conn.set_isolation_level(psycopg2.extensions.ISOLATION_LEVEL_AUTOCOMMIT) + + curs = conn.cursor() + curs.execute("LISTEN test;") + + print "Waiting for notifications on channel 'test'" + while True: + if select.select([conn],[],[],5) == ([],[],[]): + print "Timeout" + else: + conn.poll() + while conn.notifies: + notify = conn.notifies.pop(0) + print "Got NOTIFY:", notify.pid, notify.channel, notify.payload + +Running the script and executing a command such as :sql:`NOTIFY test, 'hello'` +in a separate :program:`psql` shell, the output may look similar to: + +.. code-block:: none + + Waiting for notifications on channel 'test' + Timeout + Timeout + Got NOTIFY: 6535 test hello + Timeout + ... + +Note that the payload is only available from PostgreSQL 9.0: notifications +received from a previous version server will have the +`~psycopg2.extensions.Notify.payload` attribute set to the empty string. + +.. versionchanged:: 2.3 + Added `~psycopg2.extensions.Notify` object and handling notification + payload. + +.. versionchanged:: 2.7 + The `~connection.notifies` attribute is writable: it is possible to + replace it with any object exposing an `!append()` method. An useful + example would be to use a `~collections.deque` object. + + +.. index:: + double: Asynchronous; Connection + +.. _async-support: + +Asynchronous support +-------------------- + +.. versionadded:: 2.2 + +Psycopg can issue asynchronous queries to a PostgreSQL database. An asynchronous +communication style is established passing the parameter *async*\=1 to the +`~psycopg2.connect()` function: the returned connection will work in +*asynchronous mode*. + +In asynchronous mode, a Psycopg connection will rely on the caller to poll the +socket file descriptor, checking if it is ready to accept data or if a query +result has been transferred and is ready to be read on the client. The caller +can use the method `~connection.fileno()` to get the connection file +descriptor and `~connection.poll()` to make communication proceed according to +the current connection state. + +The following is an example loop using methods `!fileno()` and `!poll()` +together with the Python :py:func:`~select.select` function in order to carry on +asynchronous operations with Psycopg:: + + def wait(conn): + while True: + state = conn.poll() + if state == psycopg2.extensions.POLL_OK: + break + elif state == psycopg2.extensions.POLL_WRITE: + select.select([], [conn.fileno()], []) + elif state == psycopg2.extensions.POLL_READ: + select.select([conn.fileno()], [], []) + else: + raise psycopg2.OperationalError("poll() returned %s" % state) + +The above loop of course would block an entire application: in a real +asynchronous framework, `!select()` would be called on many file descriptors +waiting for any of them to be ready. Nonetheless the function can be used to +connect to a PostgreSQL server only using nonblocking commands and the +connection obtained can be used to perform further nonblocking queries. After +`!poll()` has returned `~psycopg2.extensions.POLL_OK`, and thus `!wait()` has +returned, the connection can be safely used: + + >>> aconn = psycopg2.connect(database='test', async=1) + >>> wait(aconn) + >>> acurs = aconn.cursor() + +Note that there are a few other requirements to be met in order to have a +completely non-blocking connection attempt: see the libpq documentation for +|PQconnectStart|_. + +.. |PQconnectStart| replace:: `!PQconnectStart()` +.. _PQconnectStart: https://www.postgresql.org/docs/current/static/libpq-connect.html#LIBPQ-PQCONNECTSTARTPARAMS + +The same loop should be also used to perform nonblocking queries: after +sending a query via `~cursor.execute()` or `~cursor.callproc()`, call +`!poll()` on the connection available from `cursor.connection` until it +returns `!POLL_OK`, at which point the query has been completely sent to the +server and, if it produced data, the results have been transferred to the +client and available using the regular cursor methods: + + >>> acurs.execute("SELECT pg_sleep(5); SELECT 42;") + >>> wait(acurs.connection) + >>> acurs.fetchone()[0] + 42 + +When an asynchronous query is being executed, `connection.isexecuting()` returns +`!True`. Two cursors can't execute concurrent queries on the same asynchronous +connection. + +There are several limitations in using asynchronous connections: the +connection is always in `~connection.autocommit` mode and it is not +possible to change it. So a +transaction is not implicitly started at the first query and is not possible +to use methods `~connection.commit()` and `~connection.rollback()`: you can +manually control transactions using `~cursor.execute()` to send database +commands such as :sql:`BEGIN`, :sql:`COMMIT` and :sql:`ROLLBACK`. Similarly +`~connection.set_session()` can't be used but it is still possible to invoke the +:sql:`SET` command with the proper :sql:`default_transaction_...` parameter. + +With asynchronous connections it is also not possible to use +`~connection.set_client_encoding()`, `~cursor.executemany()`, :ref:`large +objects `, :ref:`named cursors `. + +:ref:`COPY commands ` are not supported either in asynchronous mode, but +this will be probably implemented in a future release. + + + + +.. index:: + single: Greenlet + single: Coroutine + single: Eventlet + single: gevent + single: Wait callback + +.. _green-support: + +Support for coroutine libraries +------------------------------- + +.. versionadded:: 2.2 + +Psycopg can be used together with coroutine_\-based libraries and participate +in cooperative multithreading. + +Coroutine-based libraries (such as Eventlet_ or gevent_) can usually patch the +Python standard library in order to enable a coroutine switch in the presence of +blocking I/O: the process is usually referred as making the system *green*, in +reference to the `green threads`_. + +Because Psycopg is a C extension module, it is not possible for coroutine +libraries to patch it: Psycopg instead enables cooperative multithreading by +allowing the registration of a *wait callback* using the +`psycopg2.extensions.set_wait_callback()` function. When a wait callback is +registered, Psycopg will use `libpq non-blocking calls`__ instead of the regular +blocking ones, and will delegate to the callback the responsibility to wait +for the socket to become readable or writable. + +Working this way, the caller does not have the complete freedom to schedule the +socket check whenever they want as with an :ref:`asynchronous connection +`, but has the advantage of maintaining a complete |DBAPI| +semantics: from the point of view of the end user, all Psycopg functions and +objects will work transparently in the coroutine environment (blocking the +calling green thread and giving other green threads the possibility to be +scheduled), allowing non modified code and third party libraries (such as +SQLAlchemy_) to be used in coroutine-based programs. + +.. warning:: + Psycopg connections are not *green thread safe* and can't be used + concurrently by different green threads. Trying to execute more than one + command at time using one cursor per thread will result in an error (or a + deadlock on versions before 2.4.2). + + Therefore, programmers are advised to either avoid sharing connections + between coroutines or to use a library-friendly lock to synchronize shared + connections, e.g. for pooling. + +Coroutine libraries authors should provide a callback implementation (and +possibly a method to register it) to make Psycopg as green as they want. An +example callback (using `!select()` to block) is provided as +`psycopg2.extras.wait_select()`: it boils down to something similar to:: + + def wait_select(conn): + while True: + state = conn.poll() + if state == extensions.POLL_OK: + break + elif state == extensions.POLL_READ: + select.select([conn.fileno()], [], []) + elif state == extensions.POLL_WRITE: + select.select([], [conn.fileno()], []) + else: + raise OperationalError("bad state from poll: %s" % state) + +Providing callback functions for the single coroutine libraries is out of +psycopg2 scope, as the callback can be tied to the libraries' implementation +details. You can check the `psycogreen`_ project for further informations and +resources about the topic. + +.. _coroutine: https://en.wikipedia.org/wiki/Coroutine +.. _greenlet: https://pypi.org/project/greenlet/ +.. _green threads: https://en.wikipedia.org/wiki/Green_threads +.. _Eventlet: https://eventlet.net/ +.. _gevent: http://www.gevent.org/ +.. _SQLAlchemy: https://www.sqlalchemy.org/ +.. _psycogreen: https://github.com/psycopg/psycogreen/ +.. __: https://www.postgresql.org/docs/current/static/libpq-async.html + +.. warning:: + + :ref:`COPY commands ` are currently not supported when a wait callback + is registered, but they will be probably implemented in a future release. + + :ref:`Large objects ` are not supported either: they are + not compatible with asynchronous connections. + + +.. testcode:: + :hide: + + aconn.close() + conn.rollback() + cur.execute("DROP TABLE atable") + conn.commit() + cur.close() + conn.close() + + + +.. index:: + single: Replication + +.. _replication-support: + +Replication protocol support +---------------------------- + +.. versionadded:: 2.7 + +Modern PostgreSQL servers (version 9.0 and above) support replication. The +replication protocol is built on top of the client-server protocol and can be +operated using ``libpq``, as such it can be also operated by ``psycopg2``. +The replication protocol can be operated on both synchronous and +:ref:`asynchronous ` connections. + +Server version 9.4 adds a new feature called *Logical Replication*. + +.. seealso:: + + - PostgreSQL `Streaming Replication Protocol`__ + + .. __: https://www.postgresql.org/docs/current/static/protocol-replication.html + + +Logical replication Quick-Start +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +You must be using PostgreSQL server version 9.4 or above to run this quick +start. + +Make sure that replication connections are permitted for user ``postgres`` in +``pg_hba.conf`` and reload the server configuration. You also need to set +``wal_level=logical`` and ``max_wal_senders``, ``max_replication_slots`` to +value greater than zero in ``postgresql.conf`` (these changes require a server +restart). Create a database ``psycopg2_test``. + +Then run the following code to quickly try the replication support out. This +is not production code -- it's only intended as a simple demo of logical +replication:: + + from __future__ import print_function + import sys + import psycopg2 + import psycopg2.extras + + conn = psycopg2.connect('dbname=psycopg2_test user=postgres', + connection_factory=psycopg2.extras.LogicalReplicationConnection) + cur = conn.cursor() + try: + # test_decoding produces textual output + cur.start_replication(slot_name='pytest', decode=True) + except psycopg2.ProgrammingError: + cur.create_replication_slot('pytest', output_plugin='test_decoding') + cur.start_replication(slot_name='pytest', decode=True) + + class DemoConsumer(object): + def __call__(self, msg): + print(msg.payload) + msg.cursor.send_feedback(flush_lsn=msg.data_start) + + democonsumer = DemoConsumer() + + print("Starting streaming, press Control-C to end...", file=sys.stderr) + try: + cur.consume_stream(democonsumer) + except KeyboardInterrupt: + cur.close() + conn.close() + print("The slot 'pytest' still exists. Drop it with " + "SELECT pg_drop_replication_slot('pytest'); if no longer needed.", + file=sys.stderr) + print("WARNING: Transaction logs will accumulate in pg_xlog " + "until the slot is dropped.", file=sys.stderr) + + +You can now make changes to the ``psycopg2_test`` database using a normal +psycopg2 session, ``psql``, etc. and see the logical decoding stream printed +by this demo client. + +This will continue running until terminated with ``Control-C``. + +For the details see :ref:`replication-objects`. diff --git a/doc/src/conf.py b/doc/src/conf.py new file mode 100644 index 0000000000000000000000000000000000000000..c40c49307630e6a7839cab77661023319437b49c --- /dev/null +++ b/doc/src/conf.py @@ -0,0 +1,288 @@ +# +# Psycopg documentation build configuration file, created by +# sphinx-quickstart on Sun Feb 7 13:48:41 2010. +# +# This file is execfile()d with the current directory set to its containing dir. +# +# Note that not all possible configuration values are present in this +# autogenerated file. +# +# All configuration values have a default; values that are commented out +# serve to show the default. + +import os +import sys +from better import better_theme_path + +# If extensions (or modules to document with autodoc) are in another directory, +# add these directories to sys.path here. If the directory is relative to the +# documentation root, use os.path.abspath to make it absolute, like shown here. +sys.path.append(os.path.abspath('tools/lib')) + +# -- General configuration ----------------------------------------------------- + +# Add any Sphinx extension module names here, as strings. They can be extensions +# coming with Sphinx (named 'sphinx.ext.*') or your custom ones. +extensions = [ + 'sphinx.ext.autodoc', + 'sphinx.ext.todo', + 'sphinx.ext.ifconfig', + 'sphinx.ext.doctest', + 'sphinx.ext.intersphinx', +] + +# Specific extensions for Psycopg documentation. +extensions += ['dbapi_extension', 'sql_role', 'ticket_role'] + +# Add any paths that contain templates here, relative to this directory. +templates_path = ['_templates'] + +# The suffix of source filenames. +source_suffix = '.rst' + +# The encoding of source files. +# source_encoding = 'utf-8' + +# The master toctree document. +master_doc = 'index' + +# General information about the project. +project = 'Psycopg' +copyright = ( + '2001-2021, Federico Di Gregorio, Daniele Varrazzo, The Psycopg Team' +) + +# The version info for the project you're documenting, acts as replacement for +# |version| and |release|, also used in various other places throughout the +# built documents. +# +# The short X.Y version. +version = '2.0' + +# The full version, including alpha/beta/rc tags. +try: + import psycopg2 +except ImportError: + print("WARNING: couldn't import psycopg to read version.") + release = version +else: + release = psycopg2.__version__.split()[0] + version = '.'.join(release.split('.')[:2]) + +intersphinx_mapping = {'py': ('https://docs.python.org/3', None)} + +# Pattern to generate links to the bug tracker +ticket_url = 'https://github.com/psycopg/psycopg2/issues/%s' +ticket_remap_until = 25 +ticket_remap_offset = 230 + +# The language for content autogenerated by Sphinx. Refer to documentation +# for a list of supported languages. +# language = None + +# There are two options for replacing |today|: either, you set today to some +# non-false value, then it is used: +# today = '' +# Else, today_fmt is used as the format for a strftime call. +# today_fmt = '%B %d, %Y' + +# List of documents that shouldn't be included in the build. +# unused_docs = [] + +# List of directories, relative to source directory, that shouldn't be searched +# for source files. +exclude_trees = ['_build', 'html'] + +# The reST default role (used for this markup: `text`) to use for all documents. +default_role = 'obj' + +# If true, '()' will be appended to :func: etc. cross-reference text. +# add_function_parentheses = True + +# If true, the current module name will be prepended to all description +# unit titles (such as .. function::). +# add_module_names = True + +# If true, sectionauthor and moduleauthor directives will be shown in the +# output. They are ignored by default. +# show_authors = False + +# Using 'python' instead of the default gives warnings if parsing an example +# fails, instead of defaulting to none +highlight_language = 'python' + +# The name of the Pygments (syntax highlighting) style to use. +pygments_style = 'sphinx' + +# A list of ignored prefixes for module index sorting. +# modindex_common_prefix = [] + +# Include TODO items in the documentation +todo_include_todos = False + +rst_epilog = """ +.. |DBAPI| replace:: DB API 2.0 + +.. _DBAPI: https://www.python.org/dev/peps/pep-0249/ + +.. _transaction isolation level: + https://www.postgresql.org/docs/current/static/transaction-iso.html + +.. |MVCC| replace:: :abbr:`MVCC (Multiversion concurrency control)` +""" + +# -- Options for HTML output --------------------------------------------------- + +# The theme to use for HTML and HTML Help pages. Major themes that come with +# Sphinx are currently 'default' and 'sphinxdoc'. +html_theme = 'better' + +# The stylesheet to use with HTML output: this will include the original one +# adding a few classes. +# html_style = 'psycopg.css' + +# Hide the sphinx footer +html_show_sphinx = False + +# Theme options are theme-specific and customize the look and feel of a theme +# further. For a list of options available for each theme, see the +# documentation. +html_theme_options = { + 'linktotheme': False, + 'cssfiles': ['_static/psycopg.css'], +} + +# Add any paths that contain custom themes here, relative to this directory. +html_theme_path = [better_theme_path] + +# The name for this set of Sphinx documents. If None, it defaults to +# " v documentation". +# html_title = None + +# A shorter title for the navigation bar. Default is the same as html_title. +html_short_title = 'Home' + +# The name of an image file (relative to this directory) to place at the top +# of the sidebar. +# html_logo = None + +# The name of an image file (within the static path) to use as favicon of the +# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32 +# pixels large. +# html_favicon = None + +# Add any paths that contain custom static files (such as style sheets) here, +# relative to this directory. They are copied after the builtin static files, +# so a file named "default.css" will overwrite the builtin "default.css". +html_static_path = ['_static'] + +# If not '', a 'Last updated on:' timestamp is inserted at every page bottom, +# using the given strftime format. +# html_last_updated_fmt = '%b %d, %Y' + +# If true, SmartyPants will be used to convert quotes and dashes to +# typographically correct entities. +# html_use_smartypants = True + +# Custom sidebar templates, maps document names to template names. +# no need for the prev/next topic link using better theme: they are on top +html_sidebars = { + '**': ['localtoc.html', 'searchbox.html'], +} + +# Additional templates that should be rendered to pages, maps page names to +# template names. +# html_additional_pages = {} + +# If false, no module index is generated. +# html_use_modindex = True + +# If false, no index is generated. +# html_use_index = True + +# If true, the index is split into individual pages for each letter. +# html_split_index = False + +# If true, links to the reST sources are added to the pages. +# html_show_sourcelink = True + +# If true, an OpenSearch description file will be output, and all pages will +# contain a tag referring to it. The value of this option must be the +# base URL from which the finished HTML is served. +# html_use_opensearch = '' + +# If nonempty, this is the file name suffix for HTML files (e.g. ".xhtml"). +# html_file_suffix = '' + +# Output file base name for HTML help builder. +htmlhelp_basename = 'psycopgdoc' + + +# -- Options for LaTeX output -------------------------------------------------- + +# The paper size ('letter' or 'a4'). +# latex_paper_size = 'letter' + +# The font size ('10pt', '11pt' or '12pt'). +# latex_font_size = '10pt' + +# Grouping the document tree into LaTeX files. List of tuples +# (source start file, target name, title, author, documentclass [howto/manual]). +latex_documents = [ + ( + 'index', + 'psycopg.tex', + 'Psycopg Documentation', + 'Federico Di Gregorio', + 'manual', + ) +] + +# The name of an image file (relative to this directory) to place at the top of +# the title page. +# latex_logo = None + +# For "manual" documents, if this is true, then toplevel headings are parts, +# not chapters. +# latex_use_parts = False + +# Additional stuff for the LaTeX preamble. +# latex_preamble = '' + +# Documents to append as an appendix to all manuals. +# latex_appendices = [] + +# If false, no module index is generated. +# latex_use_modindex = True + + +doctest_global_setup = """ + +import os +import psycopg2 + +def test_connect(): + try: + dsn = os.environ['PSYCOPG2_DSN'] + except KeyError: + assert False, "You need to set the environment variable PSYCOPG2_DSN" \ + " in order to test the documentation!" + return psycopg2.connect(dsn) + +conn = test_connect() +cur = conn.cursor() + +def drop_test_table(name): + cur.execute("SAVEPOINT drop_test_table;") + try: + cur.execute("DROP TABLE %s;" % name) + except: + cur.execute("ROLLBACK TO SAVEPOINT drop_test_table;") + conn.commit() + +def create_test_table(): + drop_test_table('test') + cur.execute("CREATE TABLE test (id SERIAL PRIMARY KEY, num INT, data TEXT)") + conn.commit() + +""" diff --git a/doc/src/connection.rst b/doc/src/connection.rst new file mode 100644 index 0000000000000000000000000000000000000000..05ad1404421d9167b426960e3e94ac85eb190e14 --- /dev/null +++ b/doc/src/connection.rst @@ -0,0 +1,916 @@ +The ``connection`` class +======================== + +.. sectionauthor:: Daniele Varrazzo + +.. testsetup:: + + from pprint import pprint + import psycopg2.extensions + + drop_test_table('foo') + +.. class:: connection + + Handles the connection to a PostgreSQL database instance. It encapsulates + a database session. + + Connections are created using the factory function + `~psycopg2.connect()`. + + Connections are thread safe and can be shared among many threads. See + :ref:`thread-safety` for details. + + Connections can be used as context managers. Note that a context wraps a + transaction: if the context exits with success the transaction is + committed, if it exits with an exception the transaction is rolled back. + Note that the connection is not closed by the context and it can be used + for several contexts. + + .. code:: python + + conn = psycopg2.connect(DSN) + + with conn: + with conn.cursor() as curs: + curs.execute(SQL1) + + with conn: + with conn.cursor() as curs: + curs.execute(SQL2) + + # leaving contexts doesn't close the connection + conn.close() + + + .. method:: cursor(name=None, cursor_factory=None, scrollable=None, withhold=False) + + Return a new `cursor` object using the connection. + + If *name* is specified, the returned cursor will be a :ref:`server + side cursor ` (also known as *named cursor*). + Otherwise it will be a regular *client side* cursor. By default a + named cursor is declared without :sql:`SCROLL` option and + :sql:`WITHOUT HOLD`: set the argument or property `~cursor.scrollable` + to `!True`/`!False` and or `~cursor.withhold` to `!True` to change the + declaration. + + The name can be a string not valid as a PostgreSQL identifier: for + example it may start with a digit and contain non-alphanumeric + characters and quotes. + + .. versionchanged:: 2.4 + previously only valid PostgreSQL identifiers were accepted as + cursor name. + + The *cursor_factory* argument can be used to create non-standard + cursors. The class returned must be a subclass of + `psycopg2.extensions.cursor`. See :ref:`subclassing-cursor` for + details. A default factory for the connection can also be specified + using the `~connection.cursor_factory` attribute. + + .. versionchanged:: 2.4.3 added the *withhold* argument. + .. versionchanged:: 2.5 added the *scrollable* argument. + + .. extension:: + + All the function arguments are Psycopg extensions to the |DBAPI|. + + + .. index:: + pair: Transaction; Commit + + .. method:: commit() + + Commit any pending transaction to the database. + + By default, Psycopg opens a transaction before executing the first + command: if `!commit()` is not called, the effect of any data + manipulation will be lost. + + The connection can be also set in "autocommit" mode: no transaction is + automatically open, commands have immediate effect. See + :ref:`transactions-control` for details. + + .. versionchanged:: 2.5 if the connection is used in a ``with`` + statement, the method is automatically called if no exception is + raised in the ``with`` block. + + + .. index:: + pair: Transaction; Rollback + + .. method:: rollback() + + Roll back to the start of any pending transaction. Closing a + connection without committing the changes first will cause an implicit + rollback to be performed. + + .. versionchanged:: 2.5 if the connection is used in a ``with`` + statement, the method is automatically called if an exception is + raised in the ``with`` block. + + + .. method:: close() + + Close the connection now (rather than whenever `del` is executed). + The connection will be unusable from this point forward; an + `~psycopg2.InterfaceError` will be raised if any operation is + attempted with the connection. The same applies to all cursor objects + trying to use the connection. Note that closing a connection without + committing the changes first will cause any pending change to be + discarded as if a :sql:`ROLLBACK` was performed (unless a different + isolation level has been selected: see + `~connection.set_isolation_level()`). + + .. index:: + single: PgBouncer; unclean server + + .. versionchanged:: 2.2 + previously an explicit :sql:`ROLLBACK` was issued by Psycopg on + `!close()`. The command could have been sent to the backend at an + inappropriate time, so Psycopg currently relies on the backend to + implicitly discard uncommitted changes. Some middleware are known + to behave incorrectly though when the connection is closed during + a transaction (when `~connection.status` is + `~psycopg2.extensions.STATUS_IN_TRANSACTION`), e.g. PgBouncer_ + reports an ``unclean server`` and discards the connection. To + avoid this problem you can ensure to terminate the transaction + with a `~connection.commit()`/`~connection.rollback()` before + closing. + + .. _PgBouncer: http://www.pgbouncer.org/ + + + .. index:: + single: Exceptions; In the connection class + + .. rubric:: Exceptions as connection class attributes + + The `!connection` also exposes as attributes the same exceptions + available in the `psycopg2` module. See :ref:`dbapi-exceptions`. + + + + .. index:: + single: Two-phase commit; methods + + .. rubric:: Two-phase commit support methods + + .. versionadded:: 2.3 + + .. seealso:: :ref:`tpc` for an introductory explanation of these methods. + + Note that PostgreSQL supports two-phase commit since release 8.1: these + methods raise `~psycopg2.NotSupportedError` if used with an older version + server. + + + .. _tpc_methods: + + .. method:: xid(format_id, gtrid, bqual) + + Returns a `~psycopg2.extensions.Xid` instance to be passed to the + `!tpc_*()` methods of this connection. The argument types and + constraints are explained in :ref:`tpc`. + + The values passed to the method will be available on the returned + object as the members `~psycopg2.extensions.Xid.format_id`, + `~psycopg2.extensions.Xid.gtrid`, `~psycopg2.extensions.Xid.bqual`. + The object also allows accessing to these members and unpacking as a + 3-items tuple. + + + .. method:: tpc_begin(xid) + + Begins a TPC transaction with the given transaction ID *xid*. + + This method should be called outside of a transaction (i.e. nothing + may have executed since the last `~connection.commit()` or + `~connection.rollback()` and `connection.status` is + `~psycopg2.extensions.STATUS_READY`). + + Furthermore, it is an error to call `!commit()` or `!rollback()` + within the TPC transaction: in this case a `~psycopg2.ProgrammingError` + is raised. + + The *xid* may be either an object returned by the `~connection.xid()` + method or a plain string: the latter allows to create a transaction + using the provided string as PostgreSQL transaction id. See also + `~connection.tpc_recover()`. + + + .. index:: + pair: Transaction; Prepare + + .. method:: tpc_prepare() + + Performs the first phase of a transaction started with + `~connection.tpc_begin()`. A `~psycopg2.ProgrammingError` is raised if + this method is used outside of a TPC transaction. + + After calling `!tpc_prepare()`, no statements can be executed until + `~connection.tpc_commit()` or `~connection.tpc_rollback()` will be + called. The `~connection.reset()` method can be used to restore the + status of the connection to `~psycopg2.extensions.STATUS_READY`: the + transaction will remain prepared in the database and will be + possible to finish it with `!tpc_commit(xid)` and + `!tpc_rollback(xid)`. + + .. seealso:: the |PREPARE TRANSACTION|_ PostgreSQL command. + + .. |PREPARE TRANSACTION| replace:: :sql:`PREPARE TRANSACTION` + .. _PREPARE TRANSACTION: https://www.postgresql.org/docs/current/static/sql-prepare-transaction.html + + + .. index:: + pair: Commit; Prepared + + .. method:: tpc_commit([xid]) + + When called with no arguments, `!tpc_commit()` commits a TPC + transaction previously prepared with `~connection.tpc_prepare()`. + + If `!tpc_commit()` is called prior to `!tpc_prepare()`, a single phase + commit is performed. A transaction manager may choose to do this if + only a single resource is participating in the global transaction. + + When called with a transaction ID *xid*, the database commits + the given transaction. If an invalid transaction ID is + provided, a `~psycopg2.ProgrammingError` will be raised. This form + should be called outside of a transaction, and is intended for use in + recovery. + + On return, the TPC transaction is ended. + + .. seealso:: the |COMMIT PREPARED|_ PostgreSQL command. + + .. |COMMIT PREPARED| replace:: :sql:`COMMIT PREPARED` + .. _COMMIT PREPARED: https://www.postgresql.org/docs/current/static/sql-commit-prepared.html + + + .. index:: + pair: Rollback; Prepared + + .. method:: tpc_rollback([xid]) + + When called with no arguments, `!tpc_rollback()` rolls back a TPC + transaction. It may be called before or after + `~connection.tpc_prepare()`. + + When called with a transaction ID *xid*, it rolls back the given + transaction. If an invalid transaction ID is provided, a + `~psycopg2.ProgrammingError` is raised. This form should be called + outside of a transaction, and is intended for use in recovery. + + On return, the TPC transaction is ended. + + .. seealso:: the |ROLLBACK PREPARED|_ PostgreSQL command. + + .. |ROLLBACK PREPARED| replace:: :sql:`ROLLBACK PREPARED` + .. _ROLLBACK PREPARED: https://www.postgresql.org/docs/current/static/sql-rollback-prepared.html + + + .. index:: + pair: Transaction; Recover + + .. method:: tpc_recover() + + Returns a list of `~psycopg2.extensions.Xid` representing pending + transactions, suitable for use with `tpc_commit()` or + `tpc_rollback()`. + + If a transaction was not initiated by Psycopg, the returned Xids will + have attributes `~psycopg2.extensions.Xid.format_id` and + `~psycopg2.extensions.Xid.bqual` set to `!None` and the + `~psycopg2.extensions.Xid.gtrid` set to the PostgreSQL transaction ID: such Xids are still + usable for recovery. Psycopg uses the same algorithm of the + `PostgreSQL JDBC driver`__ to encode a XA triple in a string, so + transactions initiated by a program using such driver should be + unpacked correctly. + + .. __: https://jdbc.postgresql.org/ + + Xids returned by `!tpc_recover()` also have extra attributes + `~psycopg2.extensions.Xid.prepared`, `~psycopg2.extensions.Xid.owner`, + `~psycopg2.extensions.Xid.database` populated with the values read + from the server. + + .. seealso:: the |pg_prepared_xacts|_ system view. + + .. |pg_prepared_xacts| replace:: `pg_prepared_xacts` + .. _pg_prepared_xacts: https://www.postgresql.org/docs/current/static/view-pg-prepared-xacts.html + + + + .. extension:: + + The above methods are the only ones defined by the |DBAPI| protocol. + The Psycopg connection objects exports the following additional + methods and attributes. + + + .. attribute:: closed + + Read-only integer attribute: 0 if the connection is open, nonzero if + it is closed or broken. + + + .. method:: cancel + + Cancel the current database operation. + + The method interrupts the processing of the current operation. If no + query is being executed, it does nothing. You can call this function + from a different thread than the one currently executing a database + operation, for instance if you want to cancel a long running query if a + button is pushed in the UI. Interrupting query execution will cause the + cancelled method to raise a + `~psycopg2.extensions.QueryCanceledError`. Note that the termination + of the query is not guaranteed to succeed: see the documentation for + |PQcancel|_. + + .. |PQcancel| replace:: `!PQcancel()` + .. _PQcancel: https://www.postgresql.org/docs/current/static/libpq-cancel.html#LIBPQ-PQCANCEL + + .. versionadded:: 2.3 + + + .. method:: reset + + Reset the connection to the default. + + The method rolls back an eventual pending transaction and executes the + PostgreSQL |RESET|_ and |SET SESSION AUTHORIZATION|__ to revert the + session to the default values. A two-phase commit transaction prepared + using `~connection.tpc_prepare()` will remain in the database + available for recover. + + .. |RESET| replace:: :sql:`RESET` + .. _RESET: https://www.postgresql.org/docs/current/static/sql-reset.html + + .. |SET SESSION AUTHORIZATION| replace:: :sql:`SET SESSION AUTHORIZATION` + .. __: https://www.postgresql.org/docs/current/static/sql-set-session-authorization.html + + .. versionadded:: 2.0.12 + + + .. attribute:: dsn + + Read-only string containing the connection string used by the + connection. + + If a password was specified in the connection string it will be + obscured. + + + + .. rubric:: Transaction control methods and attributes. + + .. index:: + pair: Transaction; Autocommit + pair: Transaction; Isolation level + + .. method:: set_session(isolation_level=None, readonly=None, deferrable=None, autocommit=None) + + Set one or more parameters for the next transactions or statements in + the current session. + + :param isolation_level: set the `isolation level`_ for the next + transactions/statements. The value can be one of the literal + values ``READ UNCOMMITTED``, ``READ COMMITTED``, ``REPEATABLE + READ``, ``SERIALIZABLE`` or the equivalent :ref:`constant + ` defined in the `~psycopg2.extensions` + module. + :param readonly: if `!True`, set the connection to read only; + read/write if `!False`. + :param deferrable: if `!True`, set the connection to deferrable; + non deferrable if `!False`. Only available from PostgreSQL 9.1. + :param autocommit: switch the connection to autocommit mode: not a + PostgreSQL session setting but an alias for setting the + `autocommit` attribute. + + .. _isolation level: + https://www.postgresql.org/docs/current/static/transaction-iso.html + + Arguments set to `!None` (the default for all) will not be changed. + The parameters *isolation_level*, *readonly* and *deferrable* also + accept the string ``DEFAULT`` as a value: the effect is to reset the + parameter to the server default. Defaults are defined by the server + configuration: see values for |default_transaction_isolation|__, + |default_transaction_read_only|__, |default_transaction_deferrable|__. + + .. |default_transaction_isolation| replace:: :sql:`default_transaction_isolation` + .. __: https://www.postgresql.org/docs/current/static/runtime-config-client.html#GUC-DEFAULT-TRANSACTION-ISOLATION + .. |default_transaction_read_only| replace:: :sql:`default_transaction_read_only` + .. __: https://www.postgresql.org/docs/current/static/runtime-config-client.html#GUC-DEFAULT-TRANSACTION-READ-ONLY + .. |default_transaction_deferrable| replace:: :sql:`default_transaction_deferrable` + .. __: https://www.postgresql.org/docs/current/static/runtime-config-client.html#GUC-DEFAULT-TRANSACTION-DEFERRABLE + + The function must be invoked with no transaction in progress. + + .. seealso:: |SET TRANSACTION|_ for further details about the behaviour + of the transaction parameters in the server. + + .. |SET TRANSACTION| replace:: :sql:`SET TRANSACTION` + .. _SET TRANSACTION: https://www.postgresql.org/docs/current/static/sql-set-transaction.html + + .. versionadded:: 2.4.2 + + .. versionchanged:: 2.7 + Before this version, the function would have set + :sql:`default_transaction_*` attribute in the current session; + this implementation has the problem of not playing well with + external connection pooling working at transaction level and not + resetting the state of the session: changing the default + transaction would pollute the connections in the pool and create + problems to other applications using the same pool. + + Starting from 2.7, if the connection is not autocommit, the + transaction characteristics are issued together with :sql:`BEGIN` + and will leave the :sql:`default_transaction_*` settings untouched. + For example:: + + conn.set_session(readonly=True) + + will not change :sql:`default_transaction_read_only`, but + following transaction will start with a :sql:`BEGIN READ ONLY`. + Conversely, using:: + + conn.set_session(readonly=True, autocommit=True) + + will set :sql:`default_transaction_read_only` to :sql:`on` and + rely on the server to apply the read only state to whatever + transaction, implicit or explicit, is executed in the connection. + + + .. attribute:: autocommit + + Read/write attribute: if `!True`, no transaction is handled by the + driver and every statement sent to the backend has immediate effect; + if `!False` a new transaction is started at the first command + execution: the methods `commit()` or `rollback()` must be manually + invoked to terminate the transaction. + + The autocommit mode is useful to execute commands requiring to be run + outside a transaction, such as :sql:`CREATE DATABASE` or + :sql:`VACUUM`. + + The default is `!False` (manual commit) as per DBAPI specification. + + .. warning:: + + By default, any query execution, including a simple :sql:`SELECT` + will start a transaction: for long-running programs, if no further + action is taken, the session will remain "idle in transaction", an + undesirable condition for several reasons (locks are held by + the session, tables bloat...). For long lived scripts, either + ensure to terminate a transaction as soon as possible or use an + autocommit connection. + + .. versionadded:: 2.4.2 + + + .. attribute:: isolation_level + + Return or set the `transaction isolation level`_ for the current + session. The value is one of the :ref:`isolation-level-constants` + defined in the `psycopg2.extensions` module. On set it is also + possible to use one of the literal values ``READ UNCOMMITTED``, ``READ + COMMITTED``, ``REPEATABLE READ``, ``SERIALIZABLE``, ``DEFAULT``. + + .. versionchanged:: 2.7 + + the property is writable. + + .. versionchanged:: 2.7 + + the default value for `!isolation_level` is + `~psycopg2.extensions.ISOLATION_LEVEL_DEFAULT`; previously the + property would have queried the server and returned the real value + applied. To know this value you can run a query such as :sql:`show + transaction_isolation`. Usually the default value is `READ + COMMITTED`, but this may be changed in the server configuration. + + This value is now entirely separate from the `autocommit` + property: in previous version, if `!autocommit` was set to `!True` + this property would have returned + `~psycopg2.extensions.ISOLATION_LEVEL_AUTOCOMMIT`; it will now + return the server isolation level. + + + .. attribute:: readonly + + Return or set the read-only status for the current session. Available + values are `!True` (new transactions will be in read-only mode), + `!False` (new transactions will be writable), `!None` (use the default + configured for the server by :sql:`default_transaction_read_only`). + + .. versionadded:: 2.7 + + + .. attribute:: deferrable + + Return or set the `deferrable status`__ for the current session. + Available values are `!True` (new transactions will be in deferrable + mode), `!False` (new transactions will be in non deferrable mode), + `!None` (use the default configured for the server by + :sql:`default_transaction_deferrable`). + + .. __: `SET TRANSACTION`_ + + .. versionadded:: 2.7 + + + .. method:: set_isolation_level(level) + + .. note:: + + This is a legacy method mixing `~conn.isolation_level` and + `~conn.autocommit`. Using the respective properties is a better + option. + + Set the `transaction isolation level`_ for the current session. + The level defines the different phenomena that can happen in the + database between concurrent transactions. + + The value set is an integer: symbolic constants are defined in + the module `psycopg2.extensions`: see + :ref:`isolation-level-constants` for the available values. + + The default level is `~psycopg2.extensions.ISOLATION_LEVEL_DEFAULT`: + at this level a transaction is automatically started the first time a + database command is executed. If you want an *autocommit* mode, + switch to `~psycopg2.extensions.ISOLATION_LEVEL_AUTOCOMMIT` before + executing any command:: + + >>> conn.set_isolation_level(psycopg2.extensions.ISOLATION_LEVEL_AUTOCOMMIT) + + See also :ref:`transactions-control`. + + + .. index:: + pair: Client; Encoding + + .. attribute:: encoding + .. method:: set_client_encoding(enc) + + Read or set the client encoding for the current session. The default + is the encoding defined by the database. It should be one of the + `characters set supported by PostgreSQL`__ + + .. __: https://www.postgresql.org/docs/current/static/multibyte.html + + + .. index:: + pair: Client; Logging + + .. attribute:: notices + + A list containing all the database messages sent to the client during + the session. + + .. doctest:: + :options: +NORMALIZE_WHITESPACE + + >>> cur.execute("CREATE TABLE foo (id serial PRIMARY KEY);") + >>> pprint(conn.notices) + ['NOTICE: CREATE TABLE / PRIMARY KEY will create implicit index "foo_pkey" for table "foo"\n', + 'NOTICE: CREATE TABLE will create implicit sequence "foo_id_seq" for serial column "foo.id"\n'] + + .. versionchanged:: 2.7 + The `!notices` attribute is writable: the user may replace it + with any Python object exposing an `!append()` method. If + appending raises an exception the notice is silently + dropped. + + To avoid a leak in case excessive notices are generated, only the last + 50 messages are kept. This check is only in place if the `!notices` + attribute is a list: if any other object is used it will be up to the + user to guard from leakage. + + You can configure what messages to receive using `PostgreSQL logging + configuration parameters`__ such as ``log_statement``, + ``client_min_messages``, ``log_min_duration_statement`` etc. + + .. __: https://www.postgresql.org/docs/current/static/runtime-config-logging.html + + + .. attribute:: notifies + + List of `~psycopg2.extensions.Notify` objects containing asynchronous + notifications received by the session. + + For other details see :ref:`async-notify`. + + .. versionchanged:: 2.3 + Notifications are instances of the `!Notify` object. Previously the + list was composed by 2 items tuples :samp:`({pid},{channel})` and + the payload was not accessible. To keep backward compatibility, + `!Notify` objects can still be accessed as 2 items tuples. + + .. versionchanged:: 2.7 + The `!notifies` attribute is writable: the user may replace it + with any Python object exposing an `!append()` method. If + appending raises an exception the notification is silently + dropped. + + + .. attribute:: cursor_factory + + The default cursor factory used by `~connection.cursor()` if the + parameter is not specified. + + .. versionadded:: 2.5 + + + .. index:: + pair: Connection; Info + + .. attribute:: info + + A `~psycopg2.extensions.ConnectionInfo` object exposing information + about the native libpq connection. + + .. versionadded:: 2.8 + + + .. index:: + pair: Connection; Status + + .. attribute:: status + + A read-only integer representing the status of the connection. + Symbolic constants for the values are defined in the module + `psycopg2.extensions`: see :ref:`connection-status-constants` + for the available values. + + The status is undefined for `closed` connections. + + + .. method:: lobject([oid [, mode [, new_oid [, new_file [, lobject_factory]]]]]) + + Return a new database large object as a `~psycopg2.extensions.lobject` + instance. + + See :ref:`large-objects` for an overview. + + :param oid: The OID of the object to read or write. 0 to create + a new large object and and have its OID assigned automatically. + :param mode: Access mode to the object, see below. + :param new_oid: Create a new object using the specified OID. The + function raises `~psycopg2.OperationalError` if the OID is already + in use. Default is 0, meaning assign a new one automatically. + :param new_file: The name of a file to be imported in the database + (using the |lo_import|_ function) + :param lobject_factory: Subclass of + `~psycopg2.extensions.lobject` to be instantiated. + + .. |lo_import| replace:: `!lo_import()` + .. _lo_import: https://www.postgresql.org/docs/current/static/lo-interfaces.html#LO-IMPORT + + Available values for *mode* are: + + ======= ========= + *mode* meaning + ======= ========= + ``r`` Open for read only + ``w`` Open for write only + ``rw`` Open for read/write + ``n`` Don't open the file + ``b`` Don't decode read data (return data as `!str` in Python 2 or `!bytes` in Python 3) + ``t`` Decode read data according to `connection.encoding` (return data as `!unicode` in Python 2 or `!str` in Python 3) + ======= ========= + + ``b`` and ``t`` can be specified together with a read/write mode. If + neither ``b`` nor ``t`` is specified, the default is ``b`` in Python 2 + and ``t`` in Python 3. + + .. versionadded:: 2.0.8 + + .. versionchanged:: 2.4 added ``b`` and ``t`` mode and unicode + support. + + + .. rubric:: Methods related to asynchronous support + + .. versionadded:: 2.2 + + .. seealso:: :ref:`async-support` and :ref:`green-support`. + + + .. attribute:: async + async_ + + Read only attribute: 1 if the connection is asynchronous, 0 otherwise. + + .. versionchanged:: 2.7 added the `!async_` alias for Python versions + where `!async` is a keyword. + + + .. method:: poll() + + Used during an asynchronous connection attempt, or when a cursor is + executing a query on an asynchronous connection, make communication + proceed if it wouldn't block. + + Return one of the constants defined in :ref:`poll-constants`. If it + returns `~psycopg2.extensions.POLL_OK` then the connection has been + established or the query results are available on the client. + Otherwise wait until the file descriptor returned by `fileno()` is + ready to read or to write, as explained in :ref:`async-support`. + `poll()` should be also used by the function installed by + `~psycopg2.extensions.set_wait_callback()` as explained in + :ref:`green-support`. + + `poll()` is also used to receive asynchronous notifications from the + database: see :ref:`async-notify` from further details. + + + .. method:: fileno() + + Return the file descriptor underlying the connection: useful to read + its status during asynchronous communication. + + + .. method:: isexecuting() + + Return `!True` if the connection is executing an asynchronous operation. + + + .. rubric:: Interoperation with other C API modules + + .. attribute:: pgconn_ptr + + Return the internal `!PGconn*` as integer. Useful to pass the libpq + raw connection structure to C functions, e.g. via `ctypes`:: + + >>> import ctypes + >>> import ctypes.util + >>> libpq = ctypes.pydll.LoadLibrary(ctypes.util.find_library('pq')) + >>> libpq.PQserverVersion.argtypes = [ctypes.c_void_p] + >>> libpq.PQserverVersion.restype = ctypes.c_int + >>> libpq.PQserverVersion(conn.pgconn_ptr) + 90611 + + .. versionadded:: 2.8 + + + .. method:: get_native_connection() + + Return the internal `!PGconn*` wrapped in a PyCapsule object. This is + only useful for passing the `libpq` raw connection associated to this + connection object to other C-level modules that may have a use for it. + + .. seealso:: Python C API `Capsules`__ docs. + + .. __: https://docs.python.org/3.1/c-api/capsule.html + + .. versionadded:: 2.8 + + + + .. rubric:: informative methods of the native connection + + .. note:: + + These methods are better accessed using the `~connection.info` + attributes and may be dropped in future versions. + + + .. index:: + pair: Transaction; Status + + .. method:: get_transaction_status() + + Also available as `~connection.info`\ `!.`\ + `~psycopg2.extensions.ConnectionInfo.transaction_status`. + + Return the current session transaction status as an integer. Symbolic + constants for the values are defined in the module + `psycopg2.extensions`: see :ref:`transaction-status-constants` + for the available values. + + .. seealso:: libpq docs for `PQtransactionStatus()`__ for details. + + .. __: https://www.postgresql.org/docs/current/static/libpq-status.html#LIBPQ-PQTRANSACTIONSTATUS + + + .. index:: + pair: Protocol; Version + + .. attribute:: protocol_version + + Also available as `~connection.info`\ `!.`\ + `~psycopg2.extensions.ConnectionInfo.protocol_version`. + + A read-only integer representing frontend/backend protocol being used. + Currently Psycopg supports only protocol 3, which allows connection + to PostgreSQL server from version 7.4. Psycopg versions previous than + 2.3 support both protocols 2 and 3. + + .. seealso:: libpq docs for `PQprotocolVersion()`__ for details. + + .. __: https://www.postgresql.org/docs/current/static/libpq-status.html#LIBPQ-PQPROTOCOLVERSION + + .. versionadded:: 2.0.12 + + + .. index:: + pair: Server; Version + + .. attribute:: server_version + + Also available as `~connection.info`\ `!.`\ + `~psycopg2.extensions.ConnectionInfo.server_version`. + + A read-only integer representing the backend version. + + The number is formed by converting the major, minor, and revision + numbers into two-decimal-digit numbers and appending them together. + For example, version 8.1.5 will be returned as ``80105``. + + .. seealso:: libpq docs for `PQserverVersion()`__ for details. + + .. __: https://www.postgresql.org/docs/current/static/libpq-status.html#LIBPQ-PQSERVERVERSION + + .. versionadded:: 2.0.12 + + + .. index:: + pair: Backend; PID + + .. method:: get_backend_pid() + + Also available as `~connection.info`\ `!.`\ + `~psycopg2.extensions.ConnectionInfo.backend_pid`. + + Returns the process ID (PID) of the backend server process *you + connected to*. Note that if you use a connection pool service such as + PgBouncer_ this value will not be updated if your connection is + switched to a different backend. + + Note that the PID belongs to a process executing on the database + server host, not the local host! + + .. seealso:: libpq docs for `PQbackendPID()`__ for details. + + .. __: https://www.postgresql.org/docs/current/static/libpq-status.html#LIBPQ-PQBACKENDPID + + .. versionadded:: 2.0.8 + + + .. index:: + pair: Server; Parameters + + .. method:: get_parameter_status(parameter) + + Also available as `~connection.info`\ `!.`\ + `~psycopg2.extensions.ConnectionInfo.parameter_status()`. + + Look up a current parameter setting of the server. + + Potential values for ``parameter`` are: ``server_version``, + ``server_encoding``, ``client_encoding``, ``is_superuser``, + ``session_authorization``, ``DateStyle``, ``TimeZone``, + ``integer_datetimes``, and ``standard_conforming_strings``. + + If server did not report requested parameter, return `!None`. + + .. seealso:: libpq docs for `PQparameterStatus()`__ for details. + + .. __: https://www.postgresql.org/docs/current/static/libpq-status.html#LIBPQ-PQPARAMETERSTATUS + + .. versionadded:: 2.0.12 + + + .. index:: + pair: Connection; Parameters + + .. method:: get_dsn_parameters() + + Also available as `~connection.info`\ `!.`\ + `~psycopg2.extensions.ConnectionInfo.dsn_parameters`. + + Get the effective dsn parameters for the connection as a dictionary. + + The *password* parameter is removed from the result. + + Example:: + + >>> conn.get_dsn_parameters() + {'dbname': 'test', 'user': 'postgres', 'port': '5432', 'sslmode': 'prefer'} + + Requires libpq >= 9.3. + + .. seealso:: libpq docs for `PQconninfo()`__ for details. + + .. __: https://www.postgresql.org/docs/current/static/libpq-connect.html#LIBPQ-PQCONNINFO + + .. versionadded:: 2.7 + + +.. testcode:: + :hide: + + conn.rollback() diff --git a/doc/src/cursor.rst b/doc/src/cursor.rst new file mode 100644 index 0000000000000000000000000000000000000000..2123b178edfa0684a474603ff28b324c59ab6e2f --- /dev/null +++ b/doc/src/cursor.rst @@ -0,0 +1,668 @@ +The ``cursor`` class +==================== + +.. sectionauthor:: Daniele Varrazzo + +.. testsetup:: * + + from StringIO import StringIO + import sys + + create_test_table() + + # initial data + cur.executemany("INSERT INTO test (num, data) VALUES (%s, %s)", + [(100, "abc'def"), (None, 'dada'), (42, 'bar')]) + conn.commit() + + +.. class:: cursor + + Allows Python code to execute PostgreSQL command in a database session. + Cursors are created by the `connection.cursor()` method: they are + bound to the connection for the entire lifetime and all the commands are + executed in the context of the database session wrapped by the connection. + + Cursors created from the same connection are not isolated, i.e., any + changes done to the database by a cursor are immediately visible by the + other cursors. Cursors created from different connections can or can not + be isolated, depending on the connections' :ref:`isolation level + `. See also `~connection.rollback()` and + `~connection.commit()` methods. + + Cursors are *not* thread safe: a multithread application can create + many cursors from the same connection and should use each cursor from + a single thread. See :ref:`thread-safety` for details. + + Cursors can be used as context managers: leaving the context will close + the cursor. + + .. code:: python + + with conn.cursor() as curs: + curs.execute(SQL) + + # the cursor is now closed + + + .. attribute:: description + + Read-only attribute describing the result of a query. It is a + sequence of `~psycopg2.extensions.Column` instances, each one + describing one result column in order. The attribute is `!None` for + operations that do not return rows or if the cursor has not had an + operation invoked via the |execute*|_ methods yet. + + For compatibility with the DB-API, every object can be unpacked as a + 7-items sequence: the attributes retuned this way are the following. + For further details and other attributes available check the + `~psycopg2.extensions.Column` documentation. + + 0. `~psycopg2.extensions.Column.name`: the name of the column returned. + + 1. `~psycopg2.extensions.Column.type_code`: the PostgreSQL OID of the + column. + + 2. `~psycopg2.extensions.Column.display_size`: the actual length of + the column in bytes. + + 3. `~psycopg2.extensions.Column.internal_size`: the size in bytes of + the column associated to this column on the server. + + 4. `~psycopg2.extensions.Column.precision`: total number of + significant digits in columns of type |NUMERIC|. `!None` + for other types. + + 5. `~psycopg2.extensions.Column.scale`: count of decimal digits in + the fractional part in columns of type |NUMERIC|. `!None` + for other types. + + 6. `~psycopg2.extensions.Column.null_ok`: always `!None` as not easy + to retrieve from the libpq. + + .. versionchanged:: 2.4 + if possible, columns descriptions are named tuple instead of + regular tuples. + + .. versionchanged:: 2.8 + columns descriptions are instances of `!Column`, exposing extra + attributes. + + .. |NUMERIC| replace:: :sql:`NUMERIC` + + .. method:: close() + + Close the cursor now (rather than whenever `del` is executed). + The cursor will be unusable from this point forward; an + `~psycopg2.InterfaceError` will be raised if any operation is + attempted with the cursor. + + .. versionchanged:: 2.5 if the cursor is used in a ``with`` statement, + the method is automatically called at the end of the ``with`` + block. + + + .. attribute:: closed + + Read-only boolean attribute: specifies if the cursor is closed + (`!True`) or not (`!False`). + + .. extension:: + + The `closed` attribute is a Psycopg extension to the + |DBAPI|. + + .. versionadded:: 2.0.7 + + + .. attribute:: connection + + Read-only attribute returning a reference to the `connection` + object on which the cursor was created. + + + .. attribute:: name + + Read-only attribute containing the name of the cursor if it was + created as named cursor by `connection.cursor()`, or `!None` if + it is a client side cursor. See :ref:`server-side-cursors`. + + .. extension:: + + The `name` attribute is a Psycopg extension to the |DBAPI|. + + + .. attribute:: scrollable + + Read/write attribute: specifies if a named cursor is declared + :sql:`SCROLL`, hence is capable to scroll backwards (using + `~cursor.scroll()`). If `!True`, the cursor can be scrolled backwards, + if `!False` it is never scrollable. If `!None` (default) the cursor + scroll option is not specified, usually but not always meaning no + backward scroll (see the |declare-notes|__). + + .. |declare-notes| replace:: :sql:`DECLARE` notes + .. __: https://www.postgresql.org/docs/current/static/sql-declare.html#SQL-DECLARE-NOTES + + .. note:: + + set the value before calling `~cursor.execute()` or use the + `connection.cursor()` *scrollable* parameter, otherwise the value + will have no effect. + + .. versionadded:: 2.5 + + .. extension:: + + The `scrollable` attribute is a Psycopg extension to the |DBAPI|. + + + .. attribute:: withhold + + Read/write attribute: specifies if a named cursor lifetime should + extend outside of the current transaction, i.e., it is possible to + fetch from the cursor even after a `connection.commit()` (but not after + a `connection.rollback()`). See :ref:`server-side-cursors` + + .. note:: + + set the value before calling `~cursor.execute()` or use the + `connection.cursor()` *withhold* parameter, otherwise the value + will have no effect. + + .. versionadded:: 2.4.3 + + .. extension:: + + The `withhold` attribute is a Psycopg extension to the |DBAPI|. + + + .. |execute*| replace:: `execute*()` + + .. _execute*: + + .. rubric:: Commands execution methods + + + .. method:: execute(query, vars=None) + + Execute a database operation (query or command). + + Parameters may be provided as sequence or mapping and will be bound to + variables in the operation. Variables are specified either with + positional (``%s``) or named (:samp:`%({name})s`) placeholders. See + :ref:`query-parameters`. + + The method returns `!None`. If a query was executed, the returned + values can be retrieved using |fetch*|_ methods. + + + .. method:: executemany(query, vars_list) + + Execute a database operation (query or command) against all parameter + tuples or mappings found in the sequence *vars_list*. + + The function is mostly useful for commands that update the database: + any result set returned by the query is discarded. + + Parameters are bounded to the query using the same rules described in + the `~cursor.execute()` method. + + .. warning:: + In its current implementation this method is not faster than + executing `~cursor.execute()` in a loop. For better performance + you can use the functions described in :ref:`fast-exec`. + + + .. method:: callproc(procname [, parameters]) + + Call a stored database procedure with the given name. The sequence of + parameters must contain one entry for each argument that the procedure + expects. Overloaded procedures are supported. Named parameters can be + used by supplying the parameters as a dictionary. + + This function is, at present, not DBAPI-compliant. The return value is + supposed to consist of the sequence of parameters with modified output + and input/output parameters. In future versions, the DBAPI-compliant + return value may be implemented, but for now the function returns None. + + The procedure may provide a result set as output. This is then made + available through the standard |fetch*|_ methods. + + .. versionchanged:: 2.7 + added support for named arguments. + + .. method:: mogrify(operation [, parameters]) + + Return a query string after arguments binding. The string returned is + exactly the one that would be sent to the database running the + `~cursor.execute()` method or similar. + + The returned string is always a bytes string. + + >>> cur.mogrify("INSERT INTO test (num, data) VALUES (%s, %s)", (42, 'bar')) + "INSERT INTO test (num, data) VALUES (42, E'bar')" + + .. extension:: + + The `mogrify()` method is a Psycopg extension to the |DBAPI|. + + .. method:: setinputsizes(sizes) + + This method is exposed in compliance with the |DBAPI|. It currently + does nothing but it is safe to call it. + + + + .. |fetch*| replace:: `!fetch*()` + + .. _fetch*: + + .. rubric:: Results retrieval methods + + + The following methods are used to read data from the database after an + `~cursor.execute()` call. + + .. _cursor-iterable: + + .. note:: + + `cursor` objects are iterable, so, instead of calling + explicitly `~cursor.fetchone()` in a loop, the object itself can + be used: + + >>> cur.execute("SELECT * FROM test;") + >>> for record in cur: + ... print record + ... + (1, 100, "abc'def") + (2, None, 'dada') + (3, 42, 'bar') + + .. versionchanged:: 2.4 + iterating over a :ref:`named cursor ` + fetches `~cursor.itersize` records at time from the backend. + Previously only one record was fetched per roundtrip, resulting + in a large overhead. + + .. method:: fetchone() + + Fetch the next row of a query result set, returning a single tuple, + or `!None` when no more data is available: + + >>> cur.execute("SELECT * FROM test WHERE id = %s", (3,)) + >>> cur.fetchone() + (3, 42, 'bar') + + A `~psycopg2.ProgrammingError` is raised if the previous call + to |execute*|_ did not produce any result set or no call was issued + yet. + + + .. method:: fetchmany([size=cursor.arraysize]) + + Fetch the next set of rows of a query result, returning a list of + tuples. An empty list is returned when no more rows are available. + + The number of rows to fetch per call is specified by the parameter. + If it is not given, the cursor's `~cursor.arraysize` determines + the number of rows to be fetched. The method should try to fetch as + many rows as indicated by the size parameter. If this is not possible + due to the specified number of rows not being available, fewer rows + may be returned: + + >>> cur.execute("SELECT * FROM test;") + >>> cur.fetchmany(2) + [(1, 100, "abc'def"), (2, None, 'dada')] + >>> cur.fetchmany(2) + [(3, 42, 'bar')] + >>> cur.fetchmany(2) + [] + + A `~psycopg2.ProgrammingError` is raised if the previous call to + |execute*|_ did not produce any result set or no call was issued yet. + + Note there are performance considerations involved with the size + parameter. For optimal performance, it is usually best to use the + `~cursor.arraysize` attribute. If the size parameter is used, + then it is best for it to retain the same value from one + `fetchmany()` call to the next. + + + .. method:: fetchall() + + Fetch all (remaining) rows of a query result, returning them as a list + of tuples. An empty list is returned if there is no more record to + fetch. + + >>> cur.execute("SELECT * FROM test;") + >>> cur.fetchall() + [(1, 100, "abc'def"), (2, None, 'dada'), (3, 42, 'bar')] + + A `~psycopg2.ProgrammingError` is raised if the previous call to + |execute*|_ did not produce any result set or no call was issued yet. + + + .. method:: scroll(value [, mode='relative']) + + Scroll the cursor in the result set to a new position according + to mode. + + If `mode` is ``relative`` (default), value is taken as offset to + the current position in the result set, if set to ``absolute``, + value states an absolute target position. + + If the scroll operation would leave the result set, a + `~psycopg2.ProgrammingError` is raised and the cursor position is + not changed. + + .. note:: + + According to the |DBAPI|_, the exception raised for a cursor out + of bound should have been `!IndexError`. The best option is + probably to catch both exceptions in your code:: + + try: + cur.scroll(1000 * 1000) + except (ProgrammingError, IndexError), exc: + deal_with_it(exc) + + The method can be used both for client-side cursors and + :ref:`server-side cursors `. Server-side cursors + can usually scroll backwards only if declared `~cursor.scrollable`. + Moving out-of-bound in a server-side cursor doesn't result in an + exception, if the backend doesn't raise any (Postgres doesn't tell us + in a reliable way if we went out of bound). + + + .. attribute:: arraysize + + This read/write attribute specifies the number of rows to fetch at a + time with `~cursor.fetchmany()`. It defaults to 1 meaning to fetch + a single row at a time. + + + .. attribute:: itersize + + Read/write attribute specifying the number of rows to fetch from the + backend at each network roundtrip during :ref:`iteration + ` on a :ref:`named cursor `. The + default is 2000. + + .. versionadded:: 2.4 + + .. extension:: + + The `itersize` attribute is a Psycopg extension to the |DBAPI|. + + + .. attribute:: rowcount + + This read-only attribute specifies the number of rows that the last + |execute*|_ produced (for :abbr:`DQL (Data Query Language)` statements + like :sql:`SELECT`) or affected (for + :abbr:`DML (Data Manipulation Language)` statements like :sql:`UPDATE` + or :sql:`INSERT`). + + The attribute is -1 in case no |execute*| has been performed on + the cursor or the row count of the last operation if it can't be + determined by the interface. + + .. note:: + The |DBAPI|_ interface reserves to redefine the latter case to + have the object return `!None` instead of -1 in future versions + of the specification. + + + .. attribute:: rownumber + + This read-only attribute provides the current 0-based index of the + cursor in the result set or `!None` if the index cannot be + determined. + + The index can be seen as index of the cursor in a sequence (the result + set). The next fetch operation will fetch the row indexed by + `rownumber` in that sequence. + + + .. index:: oid + + .. attribute:: lastrowid + + This read-only attribute provides the OID of the last row inserted + by the cursor. If the table wasn't created with OID support or the + last operation is not a single record insert, the attribute is set to + `!None`. + + .. note:: + + PostgreSQL currently advices to not create OIDs on the tables and + the default for |CREATE-TABLE|__ is to not support them. The + |INSERT-RETURNING|__ syntax available from PostgreSQL 8.3 allows + more flexibility. + + .. |CREATE-TABLE| replace:: :sql:`CREATE TABLE` + .. __: https://www.postgresql.org/docs/current/static/sql-createtable.html + + .. |INSERT-RETURNING| replace:: :sql:`INSERT ... RETURNING` + .. __: https://www.postgresql.org/docs/current/static/sql-insert.html + + + .. attribute:: query + + Read-only attribute containing the body of the last query sent to the + backend (including bound arguments) as bytes string. `!None` if no + query has been executed yet: + + >>> cur.execute("INSERT INTO test (num, data) VALUES (%s, %s)", (42, 'bar')) + >>> cur.query + "INSERT INTO test (num, data) VALUES (42, E'bar')" + + .. extension:: + + The `query` attribute is a Psycopg extension to the |DBAPI|. + + + .. attribute:: statusmessage + + Read-only attribute containing the message returned by the last + command: + + >>> cur.execute("INSERT INTO test (num, data) VALUES (%s, %s)", (42, 'bar')) + >>> cur.statusmessage + 'INSERT 0 1' + + .. extension:: + + The `statusmessage` attribute is a Psycopg extension to the + |DBAPI|. + + + .. method:: cast(oid, s) + + Convert a value from the PostgreSQL string representation to a Python + object. + + Use the most specific of the typecasters registered by + `~psycopg2.extensions.register_type()`. + + .. versionadded:: 2.4 + + .. extension:: + + The `cast()` method is a Psycopg extension to the |DBAPI|. + + + .. attribute:: tzinfo_factory + + The time zone factory used to handle data types such as + :sql:`TIMESTAMP WITH TIME ZONE`. It should be a `~datetime.tzinfo` + object. Default is `datetime.timezone`. + + .. versionchanged:: 2.9 + previosly the default factory was `psycopg2.tz.FixedOffsetTimezone`. + + + .. method:: nextset() + + This method is not supported (PostgreSQL does not have multiple data + sets) and will raise a `~psycopg2.NotSupportedError` exception. + + + .. method:: setoutputsize(size [, column]) + + This method is exposed in compliance with the |DBAPI|. It currently + does nothing but it is safe to call it. + + + + .. rubric:: COPY-related methods + + Efficiently copy data from file-like objects to the database and back. See + :ref:`copy` for an overview. + + .. extension:: + + The :sql:`COPY` command is a PostgreSQL extension to the SQL standard. + As such, its support is a Psycopg extension to the |DBAPI|. + + .. method:: copy_from(file, table, sep='\\t', null='\\\\N', size=8192, columns=None) + + Read data *from* the file-like object *file* appending them to + the table named *table*. + + :param file: file-like object to read data from. It must have both + `!read()` and `!readline()` methods. + :param table: name of the table to copy data into. + :param sep: columns separator expected in the file. Defaults to a tab. + :param null: textual representation of :sql:`NULL` in the file. + The default is the two characters string ``\N``. + :param size: size of the buffer used to read from the file. + :param columns: iterable with name of the columns to import. + The length and types should match the content of the file to read. + If not specified, it is assumed that the entire table matches the + file structure. + + Example:: + + >>> f = StringIO("42\tfoo\n74\tbar\n") + >>> cur.copy_from(f, 'test', columns=('num', 'data')) + >>> cur.execute("select * from test where id > 5;") + >>> cur.fetchall() + [(6, 42, 'foo'), (7, 74, 'bar')] + + .. note:: the name of the table is not quoted: if the table name + contains uppercase letters or special characters it must be quoted + with double quotes:: + + cur.copy_from(f, '"TABLE"') + + + .. versionchanged:: 2.0.6 + added the *columns* parameter. + + .. versionchanged:: 2.4 + data read from files implementing the `io.TextIOBase` interface + are encoded in the connection `~connection.encoding` when sent to + the backend. + + .. method:: copy_to(file, table, sep='\\t', null='\\\\N', columns=None) + + Write the content of the table named *table* *to* the file-like + object *file*. See :ref:`copy` for an overview. + + :param file: file-like object to write data into. It must have a + `!write()` method. + :param table: name of the table to copy data from. + :param sep: columns separator expected in the file. Defaults to a tab. + :param null: textual representation of :sql:`NULL` in the file. + The default is the two characters string ``\N``. + :param columns: iterable with name of the columns to export. + If not specified, export all the columns. + + Example:: + + >>> cur.copy_to(sys.stdout, 'test', sep="|") + 1|100|abc'def + 2|\N|dada + ... + + .. note:: the name of the table is not quoted: if the table name + contains uppercase letters or special characters it must be quoted + with double quotes:: + + cur.copy_to(f, '"TABLE"') + + .. versionchanged:: 2.0.6 + added the *columns* parameter. + + .. versionchanged:: 2.4 + data sent to files implementing the `io.TextIOBase` interface + are decoded in the connection `~connection.encoding` when read + from the backend. + + + .. method:: copy_expert(sql, file, size=8192) + + Submit a user-composed :sql:`COPY` statement. The method is useful to + handle all the parameters that PostgreSQL makes available (see + |COPY|__ command documentation). + + :param sql: the :sql:`COPY` statement to execute. + :param file: a file-like object to read or write (according to *sql*). + :param size: size of the read buffer to be used in :sql:`COPY FROM`. + + The *sql* statement should be in the form :samp:`COPY {table} TO + STDOUT` to export :samp:`{table}` to the *file* object passed as + argument or :samp:`COPY {table} FROM STDIN` to import the content of + the *file* object into :samp:`{table}`. If you need to compose a + :sql:`COPY` statement dynamically (because table, fields, or query + parameters are in Python variables) you may use the objects provided + by the `psycopg2.sql` module. + + *file* must be a readable file-like object (as required by + `~cursor.copy_from()`) for *sql* statement :sql:`COPY ... FROM STDIN` + or a writable one (as required by `~cursor.copy_to()`) for :sql:`COPY + ... TO STDOUT`. + + Example: + + >>> cur.copy_expert("COPY test TO STDOUT WITH CSV HEADER", sys.stdout) + id,num,data + 1,100,abc'def + 2,,dada + ... + + .. |COPY| replace:: :sql:`COPY` + .. __: https://www.postgresql.org/docs/current/static/sql-copy.html + + .. versionadded:: 2.0.6 + + .. versionchanged:: 2.4 + files implementing the `io.TextIOBase` interface are dealt with + using Unicode data instead of bytes. + + + .. rubric:: Interoperation with other C API modules + + .. attribute:: pgresult_ptr + + Return the cursor's internal `!PGresult*` as integer. Useful to pass + the libpq raw result structure to C functions, e.g. via `ctypes`:: + + >>> import ctypes + >>> libpq = ctypes.pydll.LoadLibrary(ctypes.util.find_library('pq')) + >>> libpq.PQcmdStatus.argtypes = [ctypes.c_void_p] + >>> libpq.PQcmdStatus.restype = ctypes.c_char_p + + >>> curs.execute("select 'x'") + >>> libpq.PQcmdStatus(curs.pgresult_ptr) + b'SELECT 1' + + .. versionadded:: 2.8 + +.. testcode:: + :hide: + + conn.rollback() diff --git a/doc/src/errorcodes.rst b/doc/src/errorcodes.rst new file mode 100644 index 0000000000000000000000000000000000000000..2966efa2f5ced519cc1ad9ceb2bed8c45fd40d28 --- /dev/null +++ b/doc/src/errorcodes.rst @@ -0,0 +1,76 @@ +`psycopg2.errorcodes` -- Error codes defined by PostgreSQL +=============================================================== + +.. sectionauthor:: Daniele Varrazzo + +.. index:: + single: Error; Codes + +.. module:: psycopg2.errorcodes + +.. testsetup:: * + + from psycopg2 import errorcodes + +.. versionadded:: 2.0.6 + +This module contains symbolic names for all PostgreSQL error codes and error +classes codes. Subclasses of `~psycopg2.Error` make the PostgreSQL error +code available in the `~psycopg2.Error.pgcode` attribute. + +From PostgreSQL documentation: + + All messages emitted by the PostgreSQL server are assigned five-character + error codes that follow the SQL standard's conventions for :sql:`SQLSTATE` + codes. Applications that need to know which error condition has occurred + should usually test the error code, rather than looking at the textual + error message. The error codes are less likely to change across + PostgreSQL releases, and also are not subject to change due to + localization of error messages. Note that some, but not all, of the error + codes produced by PostgreSQL are defined by the SQL standard; some + additional error codes for conditions not defined by the standard have + been invented or borrowed from other databases. + + According to the standard, the first two characters of an error code + denote a class of errors, while the last three characters indicate a + specific condition within that class. Thus, an application that does not + recognize the specific error code can still be able to infer what to do + from the error class. + +.. seealso:: `PostgreSQL Error Codes table`__ + + .. __: https://www.postgresql.org/docs/current/static/errcodes-appendix.html#ERRCODES-TABLE + + +An example of the available constants defined in the module: + + >>> errorcodes.CLASS_SYNTAX_ERROR_OR_ACCESS_RULE_VIOLATION + '42' + >>> errorcodes.UNDEFINED_TABLE + '42P01' + +Constants representing all the error values defined by PostgreSQL versions +between 8.1 and 13 are included in the module. + + +.. autofunction:: lookup(code) + + .. doctest:: + + >>> try: + ... cur.execute("SELECT ouch FROM aargh;") + ... except Exception as e: + ... pass + ... + >>> errorcodes.lookup(e.pgcode[:2]) + 'CLASS_SYNTAX_ERROR_OR_ACCESS_RULE_VIOLATION' + >>> errorcodes.lookup(e.pgcode) + 'UNDEFINED_TABLE' + + .. versionadded:: 2.0.14 + + +.. testcode:: + :hide: + + conn.rollback() diff --git a/doc/src/errors.rst b/doc/src/errors.rst new file mode 100644 index 0000000000000000000000000000000000000000..d1aed1366c81326d81029d7ce7616e47a75e7a38 --- /dev/null +++ b/doc/src/errors.rst @@ -0,0 +1,83 @@ +`psycopg2.errors` -- Exception classes mapping PostgreSQL errors +================================================================ + +.. sectionauthor:: Daniele Varrazzo + +.. index:: + single: Error; Class + +.. module:: psycopg2.errors + +.. versionadded:: 2.8 + +.. versionchanged:: 2.8.4 added errors introduced in PostgreSQL 12 + +.. versionchanged:: 2.8.6 added errors introduced in PostgreSQL 13 + +This module exposes the classes psycopg raises upon receiving an error from +the database with a :sql:`SQLSTATE` value attached (available in the +`~psycopg2.Error.pgcode` attribute). The content of the module is generated +from the PostgreSQL source code and includes classes for every error defined +by PostgreSQL in versions between 9.1 and 13. + +Every class in the module is named after what referred as "condition name" `in +the documentation`__, converted to CamelCase: e.g. the error 22012, +``division_by_zero`` is exposed by this module as the class `!DivisionByZero`. + +.. __: https://www.postgresql.org/docs/current/static/errcodes-appendix.html#ERRCODES-TABLE + +Every exception class is a subclass of one of the :ref:`standard DB-API +exception ` and expose the `~psycopg2.Error` interface. +Each class' superclass is what used to be raised by psycopg in versions before +the introduction of this module, so everything should be compatible with +previously written code catching one the DB-API class: if your code used to +catch `!IntegrityError` to detect a duplicate entry, it will keep on working +even if a more specialised subclass such as `UniqueViolation` is raised. + +The new classes allow a more idiomatic way to check and process a specific +error among the many the database may return. For instance, in order to check +that a table is locked, the following code could have been used previously: + +.. code-block:: python + + try: + cur.execute("LOCK TABLE mytable IN ACCESS EXCLUSIVE MODE NOWAIT") + except psycopg2.OperationalError as e: + if e.pgcode == psycopg2.errorcodes.LOCK_NOT_AVAILABLE: + locked = True + else: + raise + +While this method is still available, the specialised class allows for a more +idiomatic error handler: + +.. code-block:: python + + try: + cur.execute("LOCK TABLE mytable IN ACCESS EXCLUSIVE MODE NOWAIT") + except psycopg2.errors.LockNotAvailable: + locked = True + + +.. autofunction:: lookup + + .. code-block:: python + + try: + cur.execute("LOCK TABLE mytable IN ACCESS EXCLUSIVE MODE NOWAIT") + except psycopg2.errors.lookup("55P03"): + locked = True + + +SQLSTATE exception classes +-------------------------- + +The following table contains the list of all the SQLSTATE classes exposed by +the module. + +Note that, for completeness, the module also exposes all the +:ref:`DB-API-defined exceptions ` and :ref:`a few +psycopg-specific ones ` exposed by the `!extensions` +module, which are not listed here. + +.. include:: sqlstate_errors.rst diff --git a/doc/src/extensions.rst b/doc/src/extensions.rst new file mode 100644 index 0000000000000000000000000000000000000000..763910dc35b40315e6214bc1a7d6970c9a2e2f26 --- /dev/null +++ b/doc/src/extensions.rst @@ -0,0 +1,1010 @@ +`psycopg2.extensions` -- Extensions to the DB API +====================================================== + +.. sectionauthor:: Daniele Varrazzo + +.. module:: psycopg2.extensions + +.. testsetup:: * + + from psycopg2.extensions import AsIs, Binary, QuotedString, ISOLATION_LEVEL_AUTOCOMMIT + +The module contains a few objects and function extending the minimum set of +functionalities defined by the |DBAPI|_. + +Classes definitions +------------------- + +Instances of these classes are usually returned by factory functions or +attributes. Their definitions are exposed here to allow subclassing, +introspection etc. + +.. class:: connection(dsn, async=False) + + Is the class usually returned by the `~psycopg2.connect()` function. + It is exposed by the `extensions` module in order to allow + subclassing to extend its behaviour: the subclass should be passed to the + `!connect()` function using the `connection_factory` parameter. + See also :ref:`subclassing-connection`. + + For a complete description of the class, see `connection`. + + .. versionchanged:: 2.7 + *async_* can be used as alias for *async*. + +.. class:: cursor(conn, name=None) + + It is the class usually returned by the `connection.cursor()` + method. It is exposed by the `extensions` module in order to allow + subclassing to extend its behaviour: the subclass should be passed to the + `!cursor()` method using the `cursor_factory` parameter. See + also :ref:`subclassing-cursor`. + + For a complete description of the class, see `cursor`. + + +.. class:: lobject(conn [, oid [, mode [, new_oid [, new_file ]]]]) + + Wrapper for a PostgreSQL large object. See :ref:`large-objects` for an + overview. + + The class can be subclassed: see the `connection.lobject()` to know + how to specify a `!lobject` subclass. + + .. versionadded:: 2.0.8 + + .. attribute:: oid + + Database OID of the object. + + + .. attribute:: mode + + The mode the database was open. See `connection.lobject()` for a + description of the available modes. + + + .. method:: read(bytes=-1) + + Read a chunk of data from the current file position. If -1 (default) + read all the remaining data. + + The result is an Unicode string (decoded according to + `connection.encoding`) if the file was open in ``t`` mode, a bytes + string for ``b`` mode. + + .. versionchanged:: 2.4 + added Unicode support. + + + .. method:: write(str) + + Write a string to the large object. Return the number of bytes + written. Unicode strings are encoded in the `connection.encoding` + before writing. + + .. versionchanged:: 2.4 + added Unicode support. + + + .. method:: export(file_name) + + Export the large object content to the file system. + + The method uses the efficient |lo_export|_ libpq function. + + .. |lo_export| replace:: `!lo_export()` + .. _lo_export: https://www.postgresql.org/docs/current/static/lo-interfaces.html#LO-EXPORT + + + .. method:: seek(offset, whence=0) + + Set the lobject current position. + + .. versionchanged:: 2.6 + added support for *offset* > 2GB. + + + .. method:: tell() + + Return the lobject current position. + + .. versionadded:: 2.2 + + .. versionchanged:: 2.6 + added support for return value > 2GB. + + + .. method:: truncate(len=0) + + Truncate the lobject to the given size. + + The method will only be available if Psycopg has been built against + libpq from PostgreSQL 8.3 or later and can only be used with + PostgreSQL servers running these versions. It uses the |lo_truncate|_ + libpq function. + + .. |lo_truncate| replace:: `!lo_truncate()` + .. _lo_truncate: https://www.postgresql.org/docs/current/static/lo-interfaces.html#LO-TRUNCATE + + .. versionadded:: 2.2 + + .. versionchanged:: 2.6 + added support for *len* > 2GB. + + .. warning:: + + If Psycopg is built with |lo_truncate| support or with the 64 bits API + support (resp. from PostgreSQL versions 8.3 and 9.3) but at runtime an + older version of the dynamic library is found, the ``psycopg2`` module + will fail to import. See :ref:`the lo_truncate FAQ ` + about the problem. + + + .. method:: close() + + Close the object. + + .. attribute:: closed + + Boolean attribute specifying if the object is closed. + + .. method:: unlink() + + Close the object and remove it from the database. + + + +.. autoclass:: ConnectionInfo(connection) + + .. versionadded:: 2.8 + + .. autoattribute:: dbname + .. autoattribute:: user + .. autoattribute:: password + .. autoattribute:: host + .. autoattribute:: port + .. autoattribute:: options + .. autoattribute:: dsn_parameters + + Example:: + + >>> conn.info.dsn_parameters + {'dbname': 'test', 'user': 'postgres', 'port': '5432', 'sslmode': 'prefer'} + + Requires libpq >= 9.3. + + .. autoattribute:: status + .. autoattribute:: transaction_status + .. automethod:: parameter_status(name) + + .. autoattribute:: protocol_version + + Currently Psycopg supports only protocol 3, which allows connection + to PostgreSQL server from version 7.4. Psycopg versions previous than + 2.3 support both protocols 2 and 3. + + .. autoattribute:: server_version + + The number is formed by converting the major, minor, and revision + numbers into two-decimal-digit numbers and appending them together. + After PostgreSQL 10 the minor version was dropped, so the second group + of digits is always ``00``. For example, version 9.3.5 will be + returned as ``90305``, version 10.2 as ``100002``. + + .. autoattribute:: error_message + .. autoattribute:: socket + .. autoattribute:: backend_pid + .. autoattribute:: needs_password + .. autoattribute:: used_password + .. autoattribute:: ssl_in_use + .. automethod:: ssl_attribute(name) + .. autoattribute:: ssl_attribute_names + + +.. class:: Column(\*args, \*\*kwargs) + + Description of one result column, exposed as items of the + `cursor.description` sequence. + + .. versionadded:: 2.8 + + in previous version the `!description` attribute was a sequence of + simple tuples or namedtuples. + + .. attribute:: name + + The name of the column returned. + + .. attribute:: type_code + + The PostgreSQL OID of the column. You can use the |pg_type|_ system + table to get more informations about the type. This is the value used + by Psycopg to decide what Python type use to represent the value. See + also :ref:`type-casting-from-sql-to-python`. + + .. attribute:: display_size + + Supposed to be the actual length of the column in bytes. Obtaining + this value is computationally intensive, so it is always `!None`. + + .. versionchanged:: 2.8 + It was previously possible to obtain this value using a compiler + flag at builtin. + + .. attribute:: internal_size + + The size in bytes of the column associated to this column on the + server. Set to a negative value for variable-size types See also + PQfsize_. + + .. attribute:: precision + + Total number of significant digits in columns of type |NUMERIC|_. + `!None` for other types. + + .. attribute:: scale + + Count of decimal digits in the fractional part in columns of type + |NUMERIC|. `!None` for other types. + + .. attribute:: null_ok + + Always `!None` as not easy to retrieve from the libpq. + + .. attribute:: table_oid + + The oid of the table from which the column was fetched (matching + :sql:`pg_class.oid`). `!None` if the column is not a simple reference + to a table column. See also PQftable_. + + .. versionadded:: 2.8 + + .. attribute:: table_column + + The number of the column (within its table) making up the result + (matching :sql:`pg_attribute.attnum`, so it will start from 1). + `!None` if the column is not a simple reference to a table column. See + also PQftablecol_. + + .. versionadded:: 2.8 + + .. |pg_type| replace:: :sql:`pg_type` + .. _pg_type: https://www.postgresql.org/docs/current/static/catalog-pg-type.html + .. _PQgetlength: https://www.postgresql.org/docs/current/static/libpq-exec.html#LIBPQ-PQGETLENGTH + .. _PQfsize: https://www.postgresql.org/docs/current/static/libpq-exec.html#LIBPQ-PQFSIZE + .. _PQftable: https://www.postgresql.org/docs/current/static/libpq-exec.html#LIBPQ-PQFTABLE + .. _PQftablecol: https://www.postgresql.org/docs/current/static/libpq-exec.html#LIBPQ-PQFTABLECOL + .. _NUMERIC: https://www.postgresql.org/docs/current/static/datatype-numeric.html#DATATYPE-NUMERIC-DECIMAL + .. |NUMERIC| replace:: :sql:`NUMERIC` + +.. autoclass:: Notify(pid, channel, payload='') + :members: pid, channel, payload + + .. versionadded:: 2.3 + + +.. autoclass:: Xid(format_id, gtrid, bqual) + :members: format_id, gtrid, bqual, prepared, owner, database + + .. versionadded:: 2.3 + + .. automethod:: from_string(s) + + +.. autoclass:: Diagnostics(exception) + + .. versionadded:: 2.5 + + The attributes currently available are: + + .. attribute:: + column_name + constraint_name + context + datatype_name + internal_position + internal_query + message_detail + message_hint + message_primary + schema_name + severity + severity_nonlocalized + source_file + source_function + source_line + sqlstate + statement_position + table_name + + A string with the error field if available; `!None` if not available. + The attribute value is available only if the error sent by the server: + not all the fields are available for all the errors and for all the + server versions. + + .. versionadded:: 2.8 + The `!severity_nonlocalized` attribute. + + + +.. _sql-adaptation-objects: + +SQL adaptation protocol objects +------------------------------- + +Psycopg provides a flexible system to adapt Python objects to the SQL syntax +(inspired to the :pep:`246`), allowing serialization in PostgreSQL. See +:ref:`adapting-new-types` for a detailed description. The following objects +deal with Python objects adaptation: + +.. function:: adapt(obj) + + Return the SQL representation of *obj* as an `ISQLQuote`. Raise a + `~psycopg2.ProgrammingError` if how to adapt the object is unknown. + In order to allow new objects to be adapted, register a new adapter for it + using the `register_adapter()` function. + + The function is the entry point of the adaptation mechanism: it can be + used to write adapters for complex objects by recursively calling + `!adapt()` on its components. + +.. function:: register_adapter(class, adapter) + + Register a new adapter for the objects of class *class*. + + *adapter* should be a function taking a single argument (the object + to adapt) and returning an object conforming to the `ISQLQuote` + protocol (e.g. exposing a `!getquoted()` method). The `AsIs` is + often useful for this task. + + Once an object is registered, it can be safely used in SQL queries and by + the `adapt()` function. + +.. class:: ISQLQuote(wrapped_object) + + Represents the SQL adaptation protocol. Objects conforming this protocol + should implement a `getquoted()` and optionally a `prepare()` method. + + Adapters may subclass `!ISQLQuote`, but is not necessary: it is + enough to expose a `!getquoted()` method to be conforming. + + .. attribute:: _wrapped + + The wrapped object passes to the constructor + + .. method:: getquoted() + + Subclasses or other conforming objects should return a valid SQL + string representing the wrapped object. In Python 3 the SQL must be + returned in a `!bytes` object. The `!ISQLQuote` implementation does + nothing. + + .. method:: prepare(conn) + + Prepare the adapter for a connection. The method is optional: if + implemented, it will be invoked before `!getquoted()` with the + connection to adapt for as argument. + + A conform object can implement this method if the SQL + representation depends on any server parameter, such as the server + version or the :envvar:`standard_conforming_string` setting. Container + objects may store the connection and use it to recursively prepare + contained objects: see the implementation for + `psycopg2.extensions.SQL_IN` for a simple example. + + +.. class:: AsIs(object) + + Adapter conform to the `ISQLQuote` protocol useful for objects + whose string representation is already valid as SQL representation. + + .. method:: getquoted() + + Return the `str()` conversion of the wrapped object. + + >>> AsIs(42).getquoted() + '42' + +.. class:: QuotedString(str) + + Adapter conform to the `ISQLQuote` protocol for string-like + objects. + + .. method:: getquoted() + + Return the string enclosed in single quotes. Any single quote appearing + in the string is escaped by doubling it according to SQL string + constants syntax. Backslashes are escaped too. + + >>> QuotedString(r"O'Reilly").getquoted() + "'O''Reilly'" + +.. class:: Binary(str) + + Adapter conform to the `ISQLQuote` protocol for binary objects. + + .. method:: getquoted() + + Return the string enclosed in single quotes. It performs the same + escaping of the `QuotedString` adapter, plus it knows how to + escape non-printable chars. + + >>> Binary("\x00\x08\x0F").getquoted() + "'\\\\000\\\\010\\\\017'" + + .. versionchanged:: 2.0.14 + previously the adapter was not exposed by the `extensions` + module. In older versions it can be imported from the implementation + module `!psycopg2._psycopg`. + + + +.. class:: Boolean + Float + SQL_IN + + Specialized adapters for builtin objects. + +.. class:: DateFromPy + TimeFromPy + TimestampFromPy + IntervalFromPy + + Specialized adapters for Python datetime objects. + +.. data:: adapters + + Dictionary of the currently registered object adapters. Use + `register_adapter()` to add an adapter for a new type. + + + +Database types casting functions +-------------------------------- + +These functions are used to manipulate type casters to convert from PostgreSQL +types to Python objects. See :ref:`type-casting-from-sql-to-python` for +details. + +.. function:: new_type(oids, name, adapter) + + Create a new type caster to convert from a PostgreSQL type to a Python + object. The object created must be registered using + `register_type()` to be used. + + :param oids: tuple of OIDs of the PostgreSQL type to convert. + :param name: the name of the new type adapter. + :param adapter: the adaptation function. + + The object OID can be read from the `cursor.description` attribute + or by querying from the PostgreSQL catalog. + + *adapter* should have signature :samp:`fun({value}, {cur})` where + *value* is the string representation returned by PostgreSQL and + *cur* is the cursor from which data are read. In case of + :sql:`NULL`, *value* will be `!None`. The adapter should return the + converted object. + + See :ref:`type-casting-from-sql-to-python` for an usage example. + + +.. function:: new_array_type(oids, name, base_caster) + + Create a new type caster to convert from a PostgreSQL array type to a list + of Python object. The object created must be registered using + `register_type()` to be used. + + :param oids: tuple of OIDs of the PostgreSQL type to convert. It should + probably contain the oid of the array type (e.g. the ``typarray`` + field in the ``pg_type`` table). + :param name: the name of the new type adapter. + :param base_caster: a Psycopg typecaster, e.g. created using the + `new_type()` function. The caster should be able to parse a single + item of the desired type. + + .. versionadded:: 2.4.3 + + .. _cast-array-unknown: + + .. note:: + + The function can be used to create a generic array typecaster, + returning a list of strings: just use `psycopg2.STRING` as base + typecaster. For instance, if you want to receive an array of + :sql:`macaddr` from the database, each address represented by string, + you can use:: + + # select typarray from pg_type where typname = 'macaddr' -> 1040 + psycopg2.extensions.register_type( + psycopg2.extensions.new_array_type( + (1040,), 'MACADDR[]', psycopg2.STRING)) + + +.. function:: register_type(obj [, scope]) + + Register a type caster created using `new_type()`. + + If *scope* is specified, it should be a `connection` or a + `cursor`: the type caster will be effective only limited to the + specified object. Otherwise it will be globally registered. + + +.. data:: string_types + + The global register of type casters. + + +.. index:: + single: Encoding; Mapping + +.. data:: encodings + + Mapping from `PostgreSQL encoding`__ to `Python encoding`__ names. + Used by Psycopg when adapting or casting unicode strings. See + :ref:`unicode-handling`. + + .. __: https://www.postgresql.org/docs/current/static/multibyte.html + .. __: https://docs.python.org/library/codecs.html#standard-encodings + + + +.. index:: + single: Exceptions; Additional + +.. _extension-exceptions: + +Additional exceptions +--------------------- + +The module exports a few exceptions in addition to the :ref:`standard ones +` defined by the |DBAPI|_. + +.. note:: + From psycopg 2.8 these error classes are also exposed by the + `psycopg2.errors` module. + + +.. exception:: QueryCanceledError + + (subclasses `~psycopg2.OperationalError`) + + Error related to SQL query cancellation. It can be trapped specifically to + detect a timeout. + + .. versionadded:: 2.0.7 + + +.. exception:: TransactionRollbackError + + (subclasses `~psycopg2.OperationalError`) + + Error causing transaction rollback (deadlocks, serialization failures, + etc). It can be trapped specifically to detect a deadlock. + + .. versionadded:: 2.0.7 + + + +.. _coroutines-functions: + +Coroutines support functions +---------------------------- + +These functions are used to set and retrieve the callback function for +:ref:`cooperation with coroutine libraries `. + +.. versionadded:: 2.2 + +.. autofunction:: set_wait_callback(f) + +.. autofunction:: get_wait_callback() + + + +Other functions +--------------- + +.. function:: libpq_version() + + Return the version number of the ``libpq`` dynamic library loaded as an + integer, in the same format of `~connection.server_version`. + + Raise `~psycopg2.NotSupportedError` if the ``psycopg2`` module was + compiled with a ``libpq`` version lesser than 9.1 (which can be detected + by the `~psycopg2.__libpq_version__` constant). + + .. versionadded:: 2.7 + + .. seealso:: libpq docs for `PQlibVersion()`__. + + .. __: https://www.postgresql.org/docs/current/static/libpq-misc.html#LIBPQ-PQLIBVERSION + + +.. function:: make_dsn(dsn=None, \*\*kwargs) + + Create a valid connection string from arguments. + + Put together the arguments in *kwargs* into a connection string. If *dsn* + is specified too, merge the arguments coming from both the sources. If the + same argument name is specified in both the sources, the *kwargs* value + overrides the *dsn* value. + + The input arguments are validated: the output should always be a valid + connection string (as far as `parse_dsn()` is concerned). If not raise + `~psycopg2.ProgrammingError`. + + Example:: + + >>> from psycopg2.extensions import make_dsn + >>> make_dsn('dbname=foo host=example.com', password="s3cr3t") + 'host=example.com password=s3cr3t dbname=foo' + + .. versionadded:: 2.7 + + +.. function:: parse_dsn(dsn) + + Parse connection string into a dictionary of keywords and values. + + Parsing is delegated to the libpq: different versions of the client + library may support different formats or parameters (for example, + `connection URIs`__ are only supported from libpq 9.2). Raise + `~psycopg2.ProgrammingError` if the *dsn* is not valid. + + .. __: https://www.postgresql.org/docs/current/static/libpq-connect.html#LIBPQ-CONNSTRING + + Example:: + + >>> from psycopg2.extensions import parse_dsn + >>> parse_dsn('dbname=test user=postgres password=secret') + {'password': 'secret', 'user': 'postgres', 'dbname': 'test'} + >>> parse_dsn("postgresql://someone@example.com/somedb?connect_timeout=10") + {'host': 'example.com', 'user': 'someone', 'dbname': 'somedb', 'connect_timeout': '10'} + + .. versionadded:: 2.7 + + .. seealso:: libpq docs for `PQconninfoParse()`__. + + .. __: https://www.postgresql.org/docs/current/static/libpq-connect.html#LIBPQ-PQCONNINFOPARSE + + +.. function:: quote_ident(str, scope) + + Return quoted identifier according to PostgreSQL quoting rules. + + The *scope* must be a `connection` or a `cursor`, the underlying + connection encoding is used for any necessary character conversion. + + .. versionadded:: 2.7 + + .. seealso:: libpq docs for `PQescapeIdentifier()`__ + + .. __: https://www.postgresql.org/docs/current/static/libpq-exec.html#LIBPQ-PQESCAPEIDENTIFIER + + +.. method:: encrypt_password(password, user, scope=None, algorithm=None) + + Return the encrypted form of a PostgreSQL password. + + :param password: the cleartext password to encrypt + :param user: the name of the user to use the password for + :param scope: the scope to encrypt the password into; if *algorithm* is + ``md5`` it can be `!None` + :type scope: `connection` or `cursor` + :param algorithm: the password encryption algorithm to use + + The *algorithm* ``md5`` is always supported. Other algorithms are only + supported if the client libpq version is at least 10 and may require a + compatible server version: check the `PostgreSQL encryption + documentation`__ to know the algorithms supported by your server. + + .. __: https://www.postgresql.org/docs/current/static/encryption-options.html + + Using `!None` as *algorithm* will result in querying the server to know the + current server password encryption setting, which is a blocking operation: + query the server separately and specify a value for *algorithm* if you + want to maintain a non-blocking behaviour. + + .. versionadded:: 2.8 + + .. seealso:: PostgreSQL docs for the `password_encryption`__ setting, libpq `PQencryptPasswordConn()`__, `PQencryptPassword()`__ functions. + + .. __: https://www.postgresql.org/docs/current/static/runtime-config-connection.html#GUC-PASSWORD-ENCRYPTION + .. __: https://www.postgresql.org/docs/current/static/libpq-misc.html#LIBPQ-PQENCRYPTPASSWORDCONN + .. __: https://www.postgresql.org/docs/current/static/libpq-misc.html#LIBPQ-PQENCRYPTPASSWORD + + + +.. index:: + pair: Isolation level; Constants + +.. _isolation-level-constants: + +Isolation level constants +------------------------- + +Psycopg2 `connection` objects hold informations about the PostgreSQL +`transaction isolation level`_. By default Psycopg doesn't change the default +configuration of the server (`ISOLATION_LEVEL_DEFAULT`); the default for +PostgreSQL servers is typically :sql:`READ COMMITTED`, but this may be changed +in the server configuration files. A different isolation level can be set +through the `~connection.set_isolation_level()` or `~connection.set_session()` +methods. The level can be set to one of the following constants: + +.. data:: ISOLATION_LEVEL_AUTOCOMMIT + + No transaction is started when commands are executed and no + `~connection.commit()` or `~connection.rollback()` is required. + Some PostgreSQL command such as :sql:`CREATE DATABASE` or :sql:`VACUUM` + can't run into a transaction: to run such command use:: + + >>> conn.set_isolation_level(ISOLATION_LEVEL_AUTOCOMMIT) + + See also :ref:`transactions-control`. + +.. data:: ISOLATION_LEVEL_READ_UNCOMMITTED + + The :sql:`READ UNCOMMITTED` isolation level is defined in the SQL standard + but not available in the |MVCC| model of PostgreSQL: it is replaced by the + stricter :sql:`READ COMMITTED`. + +.. data:: ISOLATION_LEVEL_READ_COMMITTED + + This is usually the default PostgreSQL value, but a different default may + be set in the database configuration. + + A new transaction is started at the first `~cursor.execute()` command on a + cursor and at each new `!execute()` after a `~connection.commit()` or a + `~connection.rollback()`. The transaction runs in the PostgreSQL + :sql:`READ COMMITTED` isolation level: a :sql:`SELECT` query sees only + data committed before the query began; it never sees either uncommitted + data or changes committed during query execution by concurrent + transactions. + + .. seealso:: `Read Committed Isolation Level`__ in PostgreSQL + documentation. + + .. __: https://www.postgresql.org/docs/current/static/transaction-iso.html#XACT-READ-COMMITTED + +.. data:: ISOLATION_LEVEL_REPEATABLE_READ + + As in `!ISOLATION_LEVEL_READ_COMMITTED`, a new transaction is started at + the first `~cursor.execute()` command. Transactions run at a + :sql:`REPEATABLE READ` isolation level: all the queries in a transaction + see a snapshot as of the start of the transaction, not as of the start of + the current query within the transaction. However applications using this + level must be prepared to retry transactions due to serialization + failures. + + While this level provides a guarantee that each transaction sees a + completely stable view of the database, this view will not necessarily + always be consistent with some serial (one at a time) execution of + concurrent transactions of the same level. + + .. versionchanged:: 2.4.2 + The value was an alias for `!ISOLATION_LEVEL_SERIALIZABLE` before. The + two levels are distinct since PostgreSQL 9.1 + + .. seealso:: `Repeatable Read Isolation Level`__ in PostgreSQL + documentation. + + .. __: https://www.postgresql.org/docs/current/static/transaction-iso.html#XACT-REPEATABLE-READ + +.. data:: ISOLATION_LEVEL_SERIALIZABLE + + As in `!ISOLATION_LEVEL_READ_COMMITTED`, a new transaction is started at + the first `~cursor.execute()` command. Transactions run at a + :sql:`SERIALIZABLE` isolation level. This is the strictest transactions + isolation level, equivalent to having the transactions executed serially + rather than concurrently. However applications using this level must be + prepared to retry transactions due to serialization failures. + + Starting from PostgreSQL 9.1, this mode monitors for conditions which + could make execution of a concurrent set of serializable transactions + behave in a manner inconsistent with all possible serial (one at a time) + executions of those transaction. In previous version the behaviour was the + same of the :sql:`REPEATABLE READ` isolation level. + + .. seealso:: `Serializable Isolation Level`__ in PostgreSQL documentation. + + .. __: https://www.postgresql.org/docs/current/static/transaction-iso.html#XACT-SERIALIZABLE + +.. data:: ISOLATION_LEVEL_DEFAULT + + A new transaction is started at the first `~cursor.execute()` command, but + the isolation level is not explicitly selected by Psycopg: the server will + use whatever level is defined in its configuration or by statements + executed within the session outside Pyscopg control. If you want to know + what the value is you can use a query such as :sql:`show + transaction_isolation`. + + .. versionadded:: 2.7 + + +.. index:: + pair: Transaction status; Constants + +.. _transaction-status-constants: + +Transaction status constants +---------------------------- + +These values represent the possible status of a transaction: the current value +can be read using the `connection.info.transaction_status` property. + +.. data:: TRANSACTION_STATUS_IDLE + + The session is idle and there is no current transaction. + +.. data:: TRANSACTION_STATUS_ACTIVE + + A command is currently in progress. + +.. data:: TRANSACTION_STATUS_INTRANS + + The session is idle in a valid transaction block. + +.. data:: TRANSACTION_STATUS_INERROR + + The session is idle in a failed transaction block. + +.. data:: TRANSACTION_STATUS_UNKNOWN + + Reported if the connection with the server is bad. + + + +.. index:: + pair: Connection status; Constants + +.. _connection-status-constants: + +Connection status constants +--------------------------- + +These values represent the possible status of a connection: the current value +can be read from the `~connection.status` attribute. + +It is possible to find the connection in other status than the one shown below. +Those are the only states in which a working connection is expected to be found +during the execution of regular Python client code: other states are for +internal usage and Python code should not rely on them. + +.. data:: STATUS_READY + + Connection established. No transaction in progress. + +.. data:: STATUS_BEGIN + + Connection established. A transaction is currently in progress. + +.. data:: STATUS_IN_TRANSACTION + + An alias for `STATUS_BEGIN` + +.. data:: STATUS_PREPARED + + The connection has been prepared for the second phase in a :ref:`two-phase + commit ` transaction. The connection can't be used to send commands + to the database until the transaction is finished with + `~connection.tpc_commit()` or `~connection.tpc_rollback()`. + + .. versionadded:: 2.3 + + + +.. index:: + pair: Poll status; Constants + +.. _poll-constants: + +Poll constants +-------------- + +.. versionadded:: 2.2 + +These values can be returned by `connection.poll()` during asynchronous +connection and communication. They match the values in the libpq enum +`!PostgresPollingStatusType`. See :ref:`async-support` and +:ref:`green-support`. + +.. data:: POLL_OK + + The data being read is available, or the file descriptor is ready for + writing: reading or writing will not block. + +.. data:: POLL_READ + + Some data is being read from the backend, but it is not available yet on + the client and reading would block. Upon receiving this value, the client + should wait for the connection file descriptor to be ready *for reading*. + For example:: + + select.select([conn.fileno()], [], []) + +.. data:: POLL_WRITE + + Some data is being sent to the backend but the connection file descriptor + can't currently accept new data. Upon receiving this value, the client + should wait for the connection file descriptor to be ready *for writing*. + For example:: + + select.select([], [conn.fileno()], []) + +.. data:: POLL_ERROR + + There was a problem during connection polling. This value should actually + never be returned: in case of poll error usually an exception containing + the relevant details is raised. + + + +Additional database types +------------------------- + +The `!extensions` module includes typecasters for many standard +PostgreSQL types. These objects allow the conversion of returned data into +Python objects. All the typecasters are automatically registered, except +`UNICODE` and `UNICODEARRAY`: you can register them using +`register_type()` in order to receive Unicode objects instead of strings +from the database. See :ref:`unicode-handling` for details. + +.. data:: BOOLEAN + BYTES + DATE + DECIMAL + FLOAT + INTEGER + INTERVAL + LONGINTEGER + TIME + UNICODE + + Typecasters for basic types. Note that a few other ones (`~psycopg2.BINARY`, + `~psycopg2.DATETIME`, `~psycopg2.NUMBER`, `~psycopg2.ROWID`, + `~psycopg2.STRING`) are exposed by the `psycopg2` module for |DBAPI|_ + compliance. + +.. data:: BINARYARRAY + BOOLEANARRAY + BYTESARRAY + DATEARRAY + DATETIMEARRAY + DECIMALARRAY + FLOATARRAY + INTEGERARRAY + INTERVALARRAY + LONGINTEGERARRAY + ROWIDARRAY + STRINGARRAY + TIMEARRAY + UNICODEARRAY + + Typecasters to convert arrays of sql types into Python lists. + +.. data:: PYDATE + PYDATETIME + PYDATETIMETZ + PYINTERVAL + PYTIME + PYDATEARRAY + PYDATETIMEARRAY + PYDATETIMETZARRAY + PYINTERVALARRAY + PYTIMEARRAY + + Typecasters to convert time-related data types to Python `!datetime` + objects. + +.. versionchanged:: 2.2 + previously the `DECIMAL` typecaster and the specific time-related + typecasters (`!PY*` and `!MX*`) were not exposed by the `extensions` + module. In older versions they can be imported from the implementation + module `!psycopg2._psycopg`. + +.. versionadded:: 2.7.2 + the `!*DATETIMETZ*` objects. + +.. versionadded:: 2.8 + the `!BYTES` and `BYTESARRAY` objects. diff --git a/doc/src/extras.rst b/doc/src/extras.rst new file mode 100644 index 0000000000000000000000000000000000000000..96f801ba64a16ed2d113e6396703c841bde2dadb --- /dev/null +++ b/doc/src/extras.rst @@ -0,0 +1,1085 @@ +`psycopg2.extras` -- Miscellaneous goodies for Psycopg 2 +============================================================= + +.. sectionauthor:: Daniele Varrazzo + +.. module:: psycopg2.extras + +.. testsetup:: + + import psycopg2.extras + from psycopg2.extras import Inet + + create_test_table() + +This module is a generic place used to hold little helper functions and +classes until a better place in the distribution is found. + + +.. _cursor-subclasses: + +Connection and cursor subclasses +-------------------------------- + +A few objects that change the way the results are returned by the cursor or +modify the object behavior in some other way. Typically `!cursor` subclasses +are passed as *cursor_factory* argument to `~psycopg2.connect()` so that the +connection's `~connection.cursor()` method will generate objects of this +class. Alternatively a `!cursor` subclass can be used one-off by passing it +as the *cursor_factory* argument to the `!cursor()` method. + +If you want to use a `!connection` subclass you can pass it as the +*connection_factory* argument of the `!connect()` function. + + +.. index:: + pair: Cursor; Dictionary + +.. _dict-cursor: + + +Dictionary-like cursor +^^^^^^^^^^^^^^^^^^^^^^ + +The dict cursors allow to access to the attributes of retrieved records +using an interface similar to the Python dictionaries instead of the tuples. + + >>> dict_cur = conn.cursor(cursor_factory=psycopg2.extras.DictCursor) + >>> dict_cur.execute("INSERT INTO test (num, data) VALUES(%s, %s)", + ... (100, "abc'def")) + >>> dict_cur.execute("SELECT * FROM test") + >>> rec = dict_cur.fetchone() + >>> rec['id'] + 1 + >>> rec['num'] + 100 + >>> rec['data'] + "abc'def" + +The records still support indexing as the original tuple: + + >>> rec[2] + "abc'def" + + +.. autoclass:: DictCursor + +.. autoclass:: DictConnection + + .. note:: + + Not very useful since Psycopg 2.5: you can use `psycopg2.connect`\ + ``(dsn, cursor_factory=DictCursor)`` instead of `!DictConnection`. + +.. autoclass:: DictRow + + +Real dictionary cursor +^^^^^^^^^^^^^^^^^^^^^^ + +.. autoclass:: RealDictCursor + +.. autoclass:: RealDictConnection + + .. note:: + + Not very useful since Psycopg 2.5: you can use `psycopg2.connect`\ + ``(dsn, cursor_factory=RealDictCursor)`` instead of + `!RealDictConnection`. + +.. autoclass:: RealDictRow + + + +.. index:: + pair: Cursor; namedtuple + +`namedtuple` cursor +^^^^^^^^^^^^^^^^^^^^ + +.. versionadded:: 2.3 + +.. autoclass:: NamedTupleCursor + +.. autoclass:: NamedTupleConnection + + .. note:: + + Not very useful since Psycopg 2.5: you can use `psycopg2.connect`\ + ``(dsn, cursor_factory=NamedTupleCursor)`` instead of + `!NamedTupleConnection`. + + +.. index:: + pair: Cursor; Logging + +Logging cursor +^^^^^^^^^^^^^^ + +.. autoclass:: LoggingConnection + :members: initialize,filter + +.. autoclass:: LoggingCursor + + +.. note:: + + Queries that are executed with `cursor.executemany()` are not logged. + + +.. autoclass:: MinTimeLoggingConnection + :members: initialize,filter + +.. autoclass:: MinTimeLoggingCursor + + + +.. _replication-objects: + +Replication support objects +--------------------------- + +See :ref:`replication-support` for an introduction to the topic. + + +The following replication types are defined: + +.. data:: REPLICATION_LOGICAL +.. data:: REPLICATION_PHYSICAL + + +.. index:: + pair: Connection; replication + +.. autoclass:: LogicalReplicationConnection + + This connection factory class can be used to open a special type of + connection that is used for logical replication. + + Example:: + + from psycopg2.extras import LogicalReplicationConnection + log_conn = psycopg2.connect(dsn, connection_factory=LogicalReplicationConnection) + log_cur = log_conn.cursor() + + +.. autoclass:: PhysicalReplicationConnection + + This connection factory class can be used to open a special type of + connection that is used for physical replication. + + Example:: + + from psycopg2.extras import PhysicalReplicationConnection + phys_conn = psycopg2.connect(dsn, connection_factory=PhysicalReplicationConnection) + phys_cur = phys_conn.cursor() + + Both `LogicalReplicationConnection` and `PhysicalReplicationConnection` use + `ReplicationCursor` for actual communication with the server. + + +.. index:: + pair: Message; replication + +The individual messages in the replication stream are represented by +`ReplicationMessage` objects (both logical and physical type): + +.. autoclass:: ReplicationMessage + + .. attribute:: payload + + The actual data received from the server. + + An instance of either `bytes()` or `unicode()`, depending on the value + of `decode` option passed to `~ReplicationCursor.start_replication()` + on the connection. See `~ReplicationCursor.read_message()` for + details. + + .. attribute:: data_size + + The raw size of the message payload (before possible unicode + conversion). + + .. attribute:: data_start + + LSN position of the start of the message. + + .. attribute:: wal_end + + LSN position of the current end of WAL on the server. + + .. attribute:: send_time + + A `~datetime` object representing the server timestamp at the moment + when the message was sent. + + .. attribute:: cursor + + A reference to the corresponding `ReplicationCursor` object. + + +.. index:: + pair: Cursor; replication + +.. autoclass:: ReplicationCursor + + .. method:: create_replication_slot(slot_name, slot_type=None, output_plugin=None) + + Create streaming replication slot. + + :param slot_name: name of the replication slot to be created + :param slot_type: type of replication: should be either + `REPLICATION_LOGICAL` or `REPLICATION_PHYSICAL` + :param output_plugin: name of the logical decoding output plugin to be + used by the slot; required for logical + replication connections, disallowed for physical + + Example:: + + log_cur.create_replication_slot("logical1", "test_decoding") + phys_cur.create_replication_slot("physical1") + + # either logical or physical replication connection + cur.create_replication_slot("slot1", slot_type=REPLICATION_LOGICAL) + + When creating a slot on a logical replication connection, a logical + replication slot is created by default. Logical replication requires + name of the logical decoding output plugin to be specified. + + When creating a slot on a physical replication connection, a physical + replication slot is created by default. No output plugin parameter is + required or allowed when creating a physical replication slot. + + In either case the type of slot being created can be specified + explicitly using *slot_type* parameter. + + Replication slots are a feature of PostgreSQL server starting with + version 9.4. + + .. method:: drop_replication_slot(slot_name) + + Drop streaming replication slot. + + :param slot_name: name of the replication slot to drop + + Example:: + + # either logical or physical replication connection + cur.drop_replication_slot("slot1") + + Replication slots are a feature of PostgreSQL server starting with + version 9.4. + + .. method:: start_replication(slot_name=None, slot_type=None, start_lsn=0, timeline=0, options=None, decode=False, status_interval=10) + + Start replication on the connection. + + :param slot_name: name of the replication slot to use; required for + logical replication, physical replication can work + with or without a slot + :param slot_type: type of replication: should be either + `REPLICATION_LOGICAL` or `REPLICATION_PHYSICAL` + :param start_lsn: the optional LSN position to start replicating from, + can be an integer or a string of hexadecimal digits + in the form ``XXX/XXX`` + :param timeline: WAL history timeline to start streaming from (optional, + can only be used with physical replication) + :param options: a dictionary of options to pass to logical replication + slot (not allowed with physical replication) + :param decode: a flag indicating that unicode conversion should be + performed on messages received from the server + :param status_interval: time between feedback packets sent to the server + + If a *slot_name* is specified, the slot must exist on the server and + its type must match the replication type used. + + If not specified using *slot_type* parameter, the type of replication + is defined by the type of replication connection. Logical replication + is only allowed on logical replication connection, but physical + replication can be used with both types of connection. + + On the other hand, physical replication doesn't require a named + replication slot to be used, only logical replication does. In any + case logical replication and replication slots are a feature of + PostgreSQL server starting with version 9.4. Physical replication can + be used starting with 9.0. + + If *start_lsn* is specified, the requested stream will start from that + LSN. The default is `!None` which passes the LSN ``0/0`` causing + replay to begin at the last point for which the server got flush + confirmation from the client, or the oldest available point for a new + slot. + + The server might produce an error if a WAL file for the given LSN has + already been recycled or it may silently start streaming from a later + position: the client can verify the actual position using information + provided by the `ReplicationMessage` attributes. The exact server + behavior depends on the type of replication and use of slots. + + The *timeline* parameter can only be specified with physical + replication and only starting with server version 9.3. + + A dictionary of *options* may be passed to the logical decoding plugin + on a logical replication slot. The set of supported options depends + on the output plugin that was used to create the slot. Must be + `!None` for physical replication. + + If *decode* is set to `!True` the messages received from the server + would be converted according to the connection `~connection.encoding`. + *This parameter should not be set with physical replication or with + logical replication plugins that produce binary output.* + + Replication stream should periodically send feedback to the database + to prevent disconnect via timeout. Feedback is automatically sent when + `read_message()` is called or during run of the `consume_stream()`. + To specify the feedback interval use *status_interval* parameter. + The value of this parameter must be set to at least 1 second, but + it can have a fractional part. + + + This function constructs a |START_REPLICATION|_ command and calls + `start_replication_expert()` internally. + + After starting the replication, to actually consume the incoming + server messages use `consume_stream()` or implement a loop around + `read_message()` in case of :ref:`asynchronous connection + `. + + .. versionchanged:: 2.8.3 + added the *status_interval* parameter. + + .. |START_REPLICATION| replace:: :sql:`START_REPLICATION` + .. _START_REPLICATION: https://www.postgresql.org/docs/current/static/protocol-replication.html + + .. method:: start_replication_expert(command, decode=False, status_interval=10) + + Start replication on the connection using provided + |START_REPLICATION|_ command. + + :param command: The full replication command. It can be a string or a + `~psycopg2.sql.Composable` instance for dynamic generation. + :param decode: a flag indicating that unicode conversion should be + performed on messages received from the server. + :param status_interval: time between feedback packets sent to the server + + .. versionchanged:: 2.8.3 + added the *status_interval* parameter. + + + .. method:: consume_stream(consume, keepalive_interval=None) + + :param consume: a callable object with signature :samp:`consume({msg})` + :param keepalive_interval: interval (in seconds) to send keepalive + messages to the server + + This method can only be used with synchronous connection. For + asynchronous connections see `read_message()`. + + Before using this method to consume the stream call + `start_replication()` first. + + This method enters an endless loop reading messages from the server + and passing them to ``consume()`` one at a time, then waiting for more + messages from the server. In order to make this method break out of + the loop and return, ``consume()`` can throw a `StopReplication` + exception. Any unhandled exception will make it break out of the loop + as well. + + The *msg* object passed to ``consume()`` is an instance of + `ReplicationMessage` class. See `read_message()` for details about + message decoding. + + This method also sends feedback messages to the server every + *keepalive_interval* (in seconds). The value of this parameter must + be set to at least 1 second, but it can have a fractional part. + If the *keepalive_interval* is not specified, the value of + *status_interval* specified in the `start_replication()` or + `start_replication_expert()` will be used. + + The client must confirm every processed message by calling + `send_feedback()` method on the corresponding replication cursor. A + reference to the cursor is provided in the `ReplicationMessage` as an + attribute. + + The following example is a sketch implementation of ``consume()`` + callable for logical replication:: + + class LogicalStreamConsumer(object): + + # ... + + def __call__(self, msg): + self.process_message(msg.payload) + msg.cursor.send_feedback(flush_lsn=msg.data_start) + + consumer = LogicalStreamConsumer() + cur.consume_stream(consumer) + + .. warning:: + + When using replication with slots, failure to constantly consume + *and* report success to the server appropriately can eventually + lead to "disk full" condition on the server, because the server + retains all the WAL segments that might be needed to stream the + changes via all of the currently open replication slots. + + .. versionchanged:: 2.8.3 + changed the default value of the *keepalive_interval* parameter to `!None`. + + .. method:: send_feedback(write_lsn=0, flush_lsn=0, apply_lsn=0, reply=False, force=False) + + :param write_lsn: a LSN position up to which the client has written the data locally + :param flush_lsn: a LSN position up to which the client has processed the + data reliably (the server is allowed to discard all + and every data that predates this LSN) + :param apply_lsn: a LSN position up to which the warm standby server + has applied the changes (physical replication + master-slave protocol only) + :param reply: request the server to send back a keepalive message immediately + :param force: force sending a feedback message regardless of status_interval timeout + + Use this method to report to the server that all messages up to a + certain LSN position have been processed on the client and may be + discarded on the server. + + If the *reply* or *force* parameters are not set, this method will + just update internal structures without sending the feedback message + to the server. The library sends feedback message automatically + when *status_interval* timeout is reached. For this to work, you must + call `send_feedback()` on the same Cursor that you called `start_replication()` + on (the one in `message.cursor`) or your feedback will be lost. + + .. versionchanged:: 2.8.3 + added the *force* parameter. + + Low-level replication cursor methods for :ref:`asynchronous connection + ` operation. + + With the synchronous connection a call to `consume_stream()` handles all + the complexity of handling the incoming messages and sending keepalive + replies, but at times it might be beneficial to use low-level interface + for better control, in particular to `~select` on multiple sockets. The + following methods are provided for asynchronous operation: + + .. method:: read_message() + + Try to read the next message from the server without blocking and + return an instance of `ReplicationMessage` or `!None`, in case there + are no more data messages from the server at the moment. + + This method should be used in a loop with asynchronous connections + (after calling `start_replication()` once). For synchronous + connections see `consume_stream()`. + + The returned message's `~ReplicationMessage.payload` is an instance of + `!unicode` decoded according to connection `~connection.encoding` + *iff* *decode* was set to `!True` in the initial call to + `start_replication()` on this connection, otherwise it is an instance + of `!bytes` with no decoding. + + It is expected that the calling code will call this method repeatedly + in order to consume all of the messages that might have been buffered + until `!None` is returned. After receiving `!None` from this method + the caller should use `~select.select()` or `~select.poll()` on the + corresponding connection to block the process until there is more data + from the server. + + Last, but not least, this method sends feedback messages when + *status_interval* timeout is reached or when keepalive message with + reply request arrived from the server. + + .. method:: fileno() + + Call the corresponding connection's `~connection.fileno()` method and + return the result. + + This is a convenience method which allows replication cursor to be + used directly in `~select.select()` or `~select.poll()` calls. + + .. attribute:: io_timestamp + + A `~datetime` object representing the timestamp at the moment of last + communication with the server (a data or keepalive message in either + direction). + + .. attribute:: feedback_timestamp + + A `~datetime` object representing the timestamp at the moment when + the last feedback message sent to the server. + + .. versionadded:: 2.8.3 + + .. attribute:: wal_end + + LSN position of the current end of WAL on the server at the + moment of last data or keepalive message received from the + server. + + .. versionadded:: 2.8 + + An actual example of asynchronous operation might look like this:: + + from select import select + from datetime import datetime + + def consume(msg): + # ... + msg.cursor.send_feedback(flush_lsn=msg.data_start) + + status_interval = 10.0 + while True: + msg = cur.read_message() + if msg: + consume(msg) + else: + now = datetime.now() + timeout = status_interval - (now - cur.feedback_timestamp).total_seconds() + try: + sel = select([cur], [], [], max(0, timeout)) + except InterruptedError: + pass # recalculate timeout and continue + +.. index:: + pair: Cursor; Replication + +.. autoclass:: StopReplication + + +.. index:: + single: Data types; Additional + +Additional data types +--------------------- + + +.. index:: + pair: JSON; Data types + pair: JSON; Adaptation + +.. _adapt-json: + +JSON_ adaptation +^^^^^^^^^^^^^^^^ + +.. versionadded:: 2.5 +.. versionchanged:: 2.5.4 + added |jsonb| support. In previous versions |jsonb| values are returned + as strings. See :ref:`the FAQ ` for a workaround. + +Psycopg can adapt Python objects to and from the PostgreSQL |jsons|_ +types. With PostgreSQL 9.2 and following versions adaptation is +available out-of-the-box. To use JSON data with previous database versions +(either with the `9.1 json extension`__, but even if you want to convert text +fields to JSON) you can use the `register_json()` function. + +.. __: http://people.planetpostgresql.org/andrew/index.php?/archives/255-JSON-for-PG-9.2-...-and-now-for-9.1!.html + +The Python :py:mod:`json` module is used by default to convert Python objects +to JSON and to parse data from the database. + +.. _JSON: https://www.json.org/ +.. |json| replace:: :sql:`json` +.. |jsonb| replace:: :sql:`jsonb` +.. |jsons| replace:: |json| and |jsonb| +.. _jsons: https://www.postgresql.org/docs/current/static/datatype-json.html + +In order to pass a Python object to the database as query argument you can use +the `Json` adapter:: + + curs.execute("insert into mytable (jsondata) values (%s)", + [Json({'a': 100})]) + +Reading from the database, |json| and |jsonb| values will be automatically +converted to Python objects. + +.. note:: + + If you are using the PostgreSQL :sql:`json` data type but you want to read + it as string in Python instead of having it parsed, your can either cast + the column to :sql:`text` in the query (it is an efficient operation, that + doesn't involve a copy):: + + cur.execute("select jsondata::text from mytable") + + or you can register a no-op `!loads()` function with + `register_default_json()`:: + + psycopg2.extras.register_default_json(loads=lambda x: x) + +.. note:: + + You can use `~psycopg2.extensions.register_adapter()` to adapt any Python + dictionary to JSON, either registering `Json` or any subclass or factory + creating a compatible adapter:: + + psycopg2.extensions.register_adapter(dict, psycopg2.extras.Json) + + This setting is global though, so it is not compatible with similar + adapters such as the one registered by `register_hstore()`. Any other + object supported by JSON can be registered the same way, but this will + clobber the default adaptation rule, so be careful to unwanted side + effects. + +If you want to customize the adaptation from Python to PostgreSQL you can +either provide a custom `!dumps()` function to `Json`:: + + curs.execute("insert into mytable (jsondata) values (%s)", + [Json({'a': 100}, dumps=simplejson.dumps)]) + +or you can subclass it overriding the `~Json.dumps()` method:: + + class MyJson(Json): + def dumps(self, obj): + return simplejson.dumps(obj) + + curs.execute("insert into mytable (jsondata) values (%s)", + [MyJson({'a': 100})]) + +Customizing the conversion from PostgreSQL to Python can be done passing a +custom `!loads()` function to `register_json()`. For the builtin data types +(|json| from PostgreSQL 9.2, |jsonb| from PostgreSQL 9.4) use +`register_default_json()` and `register_default_jsonb()`. For example, if you +want to convert the float values from :sql:`json` into +:py:class:`~decimal.Decimal` you can use:: + + loads = lambda x: json.loads(x, parse_float=Decimal) + psycopg2.extras.register_json(conn, loads=loads) + +Or, if you want to use an alternative JSON module implementation, such as the +faster UltraJSON_, you can use:: + + psycopg2.extras.register_default_json(loads=ujson.loads, globally=True) + psycopg2.extras.register_default_jsonb(loads=ujson.loads, globally=True) + +.. _UltraJSON: https://pypi.org/project/ujson/ + + +.. autoclass:: Json + + .. automethod:: dumps + +.. autofunction:: register_json + + .. versionchanged:: 2.5.4 + added the *name* parameter to enable :sql:`jsonb` support. + +.. autofunction:: register_default_json + +.. autofunction:: register_default_jsonb + + .. versionadded:: 2.5.4 + + + +.. index:: + pair: hstore; Data types + pair: dict; Adaptation + +.. _adapt-hstore: + +Hstore data type +^^^^^^^^^^^^^^^^ + +.. versionadded:: 2.3 + +The |hstore|_ data type is a key-value store embedded in PostgreSQL. It has +been available for several server versions but with the release 9.0 it has +been greatly improved in capacity and usefulness with the addition of many +functions. It supports GiST or GIN indexes allowing search by keys or +key/value pairs as well as regular BTree indexes for equality, uniqueness etc. + +Psycopg can convert Python `!dict` objects to and from |hstore| structures. +Only dictionaries with string/unicode keys and values are supported. `!None` +is also allowed as value but not as a key. Psycopg uses a more efficient |hstore| +representation when dealing with PostgreSQL 9.0 but previous server versions +are supported as well. By default the adapter/typecaster are disabled: they +can be enabled using the `register_hstore()` function. + +.. autofunction:: register_hstore + + .. versionchanged:: 2.4 + added the *oid* parameter. If not specified, the typecaster is + installed also if |hstore| is not installed in the :sql:`public` + schema. + + .. versionchanged:: 2.4.3 + added support for |hstore| array. + + +.. |hstore| replace:: :sql:`hstore` +.. _hstore: https://www.postgresql.org/docs/current/static/hstore.html + + + +.. index:: + pair: Composite types; Data types + pair: tuple; Adaptation + pair: namedtuple; Adaptation + +.. _adapt-composite: + +Composite types casting +^^^^^^^^^^^^^^^^^^^^^^^ + +.. versionadded:: 2.4 + +Using `register_composite()` it is possible to cast a PostgreSQL composite +type (either created with the |CREATE TYPE|_ command or implicitly defined +after a table row type) into a Python named tuple, or into a regular tuple if +:py:func:`collections.namedtuple` is not found. + +.. |CREATE TYPE| replace:: :sql:`CREATE TYPE` +.. _CREATE TYPE: https://www.postgresql.org/docs/current/static/sql-createtype.html + +.. doctest:: + + >>> cur.execute("CREATE TYPE card AS (value int, suit text);") + >>> psycopg2.extras.register_composite('card', cur) + + + >>> cur.execute("select (8, 'hearts')::card") + >>> cur.fetchone()[0] + card(value=8, suit='hearts') + +Nested composite types are handled as expected, provided that the type of the +composite components are registered as well. + +.. doctest:: + + >>> cur.execute("CREATE TYPE card_back AS (face card, back text);") + >>> psycopg2.extras.register_composite('card_back', cur) + + + >>> cur.execute("select ((8, 'hearts'), 'blue')::card_back") + >>> cur.fetchone()[0] + card_back(face=card(value=8, suit='hearts'), back='blue') + +Adaptation from Python tuples to composite types is automatic instead and +requires no adapter registration. + + +.. _custom-composite: + +.. Note:: + + If you want to convert PostgreSQL composite types into something different + than a `!namedtuple` you can subclass the `CompositeCaster` overriding + `~CompositeCaster.make()`. For example, if you want to convert your type + into a Python dictionary you can use:: + + >>> class DictComposite(psycopg2.extras.CompositeCaster): + ... def make(self, values): + ... return dict(zip(self.attnames, values)) + + >>> psycopg2.extras.register_composite('card', cur, + ... factory=DictComposite) + + >>> cur.execute("select (8, 'hearts')::card") + >>> cur.fetchone()[0] + {'suit': 'hearts', 'value': 8} + + +.. autofunction:: register_composite + + .. versionchanged:: 2.4.3 + added support for array of composite types + .. versionchanged:: 2.5 + added the *factory* parameter + + +.. autoclass:: CompositeCaster + + .. automethod:: make + + .. versionadded:: 2.5 + + Object attributes: + + .. attribute:: name + + The name of the PostgreSQL type. + + .. attribute:: schema + + The schema where the type is defined. + + .. versionadded:: 2.5 + + .. attribute:: oid + + The oid of the PostgreSQL type. + + .. attribute:: array_oid + + The oid of the PostgreSQL array type, if available. + + .. attribute:: type + + The type of the Python objects returned. If :py:func:`collections.namedtuple()` + is available, it is a named tuple with attributes equal to the type + components. Otherwise it is just the `!tuple` object. + + .. attribute:: attnames + + List of component names of the type to be casted. + + .. attribute:: atttypes + + List of component type oids of the type to be casted. + + +.. index:: + pair: range; Data types + +.. _adapt-range: + +Range data types +^^^^^^^^^^^^^^^^ + +.. versionadded:: 2.5 + +Psycopg offers a `Range` Python type and supports adaptation between them and +PostgreSQL |range|_ types. Builtin |range| types are supported out-of-the-box; +user-defined |range| types can be adapted using `register_range()`. + +.. |range| replace:: :sql:`range` +.. _range: https://www.postgresql.org/docs/current/static/rangetypes.html + +.. autoclass:: Range + + This Python type is only used to pass and retrieve range values to and + from PostgreSQL and doesn't attempt to replicate the PostgreSQL range + features: it doesn't perform normalization and doesn't implement all the + operators__ supported by the database. + + .. __: https://www.postgresql.org/docs/current/static/functions-range.html#RANGE-OPERATORS-TABLE + + `!Range` objects are immutable, hashable, and support the ``in`` operator + (checking if an element is within the range). They can be tested for + equivalence. Empty ranges evaluate to `!False` in boolean context, + nonempty evaluate to `!True`. + + .. versionchanged:: 2.5.3 + + `!Range` objects can be sorted although, as on the server-side, this + ordering is not particularly meangingful. It is only meant to be used + by programs assuming objects using `!Range` as primary key can be + sorted on them. In previous versions comparing `!Range`\s raises + `!TypeError`. + + Although it is possible to instantiate `!Range` objects, the class doesn't + have an adapter registered, so you cannot normally pass these instances as + query arguments. To use range objects as query arguments you can either + use one of the provided subclasses, such as `NumericRange` or create a + custom subclass using `register_range()`. + + Object attributes: + + .. autoattribute:: isempty + .. autoattribute:: lower + .. autoattribute:: upper + .. autoattribute:: lower_inc + .. autoattribute:: upper_inc + .. autoattribute:: lower_inf + .. autoattribute:: upper_inf + + +The following `Range` subclasses map builtin PostgreSQL |range| types to +Python objects: they have an adapter registered so their instances can be +passed as query arguments. |range| values read from database queries are +automatically casted into instances of these classes. + +.. autoclass:: NumericRange +.. autoclass:: DateRange +.. autoclass:: DateTimeRange +.. autoclass:: DateTimeTZRange + +.. note:: + + Python lacks a representation for :sql:`infinity` date so Psycopg converts + the value to `date.max` and such. When written into the database these + dates will assume their literal value (e.g. :sql:`9999-12-31` instead of + :sql:`infinity`). Check :ref:`infinite-dates-handling` for an example of + an alternative adapter to map `date.max` to :sql:`infinity`. An + alternative dates adapter will be used automatically by the `DateRange` + adapter and so on. + + +Custom |range| types (created with |CREATE TYPE|_ :sql:`... AS RANGE`) can be +adapted to a custom `Range` subclass: + +.. autofunction:: register_range + +.. autoclass:: RangeCaster + + Object attributes: + + .. attribute:: range + + The `!Range` subclass adapted. + + .. attribute:: adapter + + The `~psycopg2.extensions.ISQLQuote` responsible to adapt `!range`. + + .. attribute:: typecaster + + The object responsible for casting. + + .. attribute:: array_typecaster + + The object responsible to cast arrays, if available, else `!None`. + + + +.. index:: + pair: UUID; Data types + +.. _adapt-uuid: + +UUID data type +^^^^^^^^^^^^^^ + +.. versionadded:: 2.0.9 +.. versionchanged:: 2.0.13 added UUID array support. + +.. doctest:: + + >>> psycopg2.extras.register_uuid() + + + >>> # Python UUID can be used in SQL queries + >>> import uuid + >>> my_uuid = uuid.UUID('{12345678-1234-5678-1234-567812345678}') + >>> psycopg2.extensions.adapt(my_uuid).getquoted() + "'12345678-1234-5678-1234-567812345678'::uuid" + + >>> # PostgreSQL UUID are transformed into Python UUID objects. + >>> cur.execute("SELECT 'a0eebc99-9c0b-4ef8-bb6d-6bb9bd380a11'::uuid") + >>> cur.fetchone()[0] + UUID('a0eebc99-9c0b-4ef8-bb6d-6bb9bd380a11') + + +.. autofunction:: register_uuid + +.. autoclass:: UUID_adapter + + + +.. index:: + pair: INET; Data types + pair: CIDR; Data types + pair: MACADDR; Data types + +.. _adapt-network: + +Networking data types +^^^^^^^^^^^^^^^^^^^^^ + +By default Psycopg casts the PostgreSQL networking data types (:sql:`inet`, +:sql:`cidr`, :sql:`macaddr`) into ordinary strings; array of such types are +converted into lists of strings. + +.. versionchanged:: 2.7 + in previous version array of networking types were not treated as arrays. + +.. autofunction:: register_ipaddress + + +.. autofunction:: register_inet + + .. deprecated:: 2.7 + this function will not receive further development and may disappear in + future versions. + +.. doctest:: + + >>> psycopg2.extras.register_inet() + + + >>> cur.mogrify("SELECT %s", (Inet('127.0.0.1/32'),)) + "SELECT E'127.0.0.1/32'::inet" + + >>> cur.execute("SELECT '192.168.0.1/24'::inet") + >>> cur.fetchone()[0].addr + '192.168.0.1/24' + + +.. autoclass:: Inet + + .. deprecated:: 2.7 + this object will not receive further development and may disappear in + future versions. + + + +.. _fast-exec: + +Fast execution helpers +---------------------- + +The current implementation of `~cursor.executemany()` is (using an extremely +charitable understatement) not particularly performing. These functions can +be used to speed up the repeated execution of a statement against a set of +parameters. By reducing the number of server roundtrips the performance can be +`orders of magnitude better`__ than using `!executemany()`. + +.. __: https://github.com/psycopg/psycopg2/issues/491#issuecomment-276551038 + + +.. autofunction:: execute_batch + + .. versionadded:: 2.7 + +.. note:: + + `!execute_batch()` can be also used in conjunction with PostgreSQL + prepared statements using |PREPARE|_, |EXECUTE|_, |DEALLOCATE|_. + Instead of executing:: + + execute_batch(cur, + "big and complex SQL with %s %s params", + params_list) + + it is possible to execute something like:: + + cur.execute("PREPARE stmt AS big and complex SQL with $1 $2 params") + execute_batch(cur, "EXECUTE stmt (%s, %s)", params_list) + cur.execute("DEALLOCATE stmt") + + which may bring further performance benefits: if the operation to perform + is complex, every single execution will be faster as the query plan is + already cached; furthermore the amount of data to send on the server will + be lesser (one |EXECUTE| per param set instead of the whole, likely + longer, statement). + + .. |PREPARE| replace:: :sql:`PREPARE` + .. _PREPARE: https://www.postgresql.org/docs/current/static/sql-prepare.html + + .. |EXECUTE| replace:: :sql:`EXECUTE` + .. _EXECUTE: https://www.postgresql.org/docs/current/static/sql-execute.html + + .. |DEALLOCATE| replace:: :sql:`DEALLOCATE` + .. _DEALLOCATE: https://www.postgresql.org/docs/current/static/sql-deallocate.html + + +.. autofunction:: execute_values + + .. versionadded:: 2.7 + .. versionchanged:: 2.8 + added the *fetch* parameter. + + +.. index:: + pair: Example; Coroutine; + + + +Coroutine support +----------------- + +.. autofunction:: wait_select(conn) + + .. versionchanged:: 2.6.2 + allow to cancel a query using :kbd:`Ctrl-C`, see + :ref:`the FAQ ` for an example. diff --git a/doc/src/faq.rst b/doc/src/faq.rst new file mode 100644 index 0000000000000000000000000000000000000000..9d1dbeb34ab14ee35940f605f6fcccd466a4b0b5 --- /dev/null +++ b/doc/src/faq.rst @@ -0,0 +1,382 @@ +Frequently Asked Questions +========================== + +.. sectionauthor:: Daniele Varrazzo + +Here are a few gotchas you may encounter using `psycopg2`. Feel free to +suggest new entries! + + +Meta +---- + +.. _faq-question: +.. cssclass:: faq + +How do I ask a question? + - Have you first checked if your question is answered already in the + documentation? + + - If your question is about installing psycopg, have you checked the + :ref:`install FAQ ` and the :ref:`install docs + `? + + - Have you googled for your error message? + + - If you haven't found an answer yet, please write to the `Mailing List`_. + + - If you haven't found a bug, DO NOT write to the bug tracker to ask + questions. You will only get piro grumpy. + + .. _mailing list: https://www.postgresql.org/list/psycopg/ + + +.. _faq-transactions: + +Problems with transactions handling +----------------------------------- + +.. _faq-idle-in-transaction: +.. cssclass:: faq + +Why does `!psycopg2` leave database sessions "idle in transaction"? + Psycopg normally starts a new transaction the first time a query is + executed, e.g. calling `cursor.execute()`, even if the command is a + :sql:`SELECT`. The transaction is not closed until an explicit + `~connection.commit()` or `~connection.rollback()`. + + If you are writing a long-living program, you should probably make sure to + call one of the transaction closing methods before leaving the connection + unused for a long time (which may also be a few seconds, depending on the + concurrency level in your database). Alternatively you can use a + connection in `~connection.autocommit` mode to avoid a new transaction to + be started at the first command. + + +.. _faq-transaction-aborted: +.. cssclass:: faq + +I receive the error *current transaction is aborted, commands ignored until end of transaction block* and can't do anything else! + There was a problem *in the previous* command to the database, which + resulted in an error. The database will not recover automatically from + this condition: you must run a `~connection.rollback()` before sending + new commands to the session (if this seems too harsh, remember that + PostgreSQL supports nested transactions using the |SAVEPOINT|_ command). + + .. |SAVEPOINT| replace:: :sql:`SAVEPOINT` + .. _SAVEPOINT: https://www.postgresql.org/docs/current/static/sql-savepoint.html + + +.. _faq-transaction-aborted-multiprocess: +.. cssclass:: faq + +Why do I get the error *current transaction is aborted, commands ignored until end of transaction block* when I use `!multiprocessing` (or any other forking system) and not when use `!threading`? + Psycopg's connections can't be shared across processes (but are thread + safe). If you are forking the Python process make sure to create a new + connection in each forked child. See :ref:`thread-safety` for further + informations. + + +.. _faq-types: + +Problems with type conversions +------------------------------ + +.. _faq-cant-adapt: +.. cssclass:: faq + +Why does `!cursor.execute()` raise the exception *can't adapt*? + Psycopg converts Python objects in a SQL string representation by looking + at the object class. The exception is raised when you are trying to pass + as query parameter an object for which there is no adapter registered for + its class. See :ref:`adapting-new-types` for informations. + + +.. _faq-number-required: +.. cssclass:: faq + +I can't pass an integer or a float parameter to my query: it says *a number is required*, but *it is* a number! + In your query string, you always have to use ``%s`` placeholders, + even when passing a number. All Python objects are converted by Psycopg + in their SQL representation, so they get passed to the query as strings. + See :ref:`query-parameters`. :: + + >>> cur.execute("INSERT INTO numbers VALUES (%d)", (42,)) # WRONG + >>> cur.execute("INSERT INTO numbers VALUES (%s)", (42,)) # correct + + +.. _faq-not-all-arguments-converted: +.. cssclass:: faq + +I try to execute a query but it fails with the error *not all arguments converted during string formatting* (or *object does not support indexing*). Why? + Psycopg always require positional arguments to be passed as a sequence, even + when the query takes a single parameter. And remember that to make a + single item tuple in Python you need a comma! See :ref:`query-parameters`. + :: + + >>> cur.execute("INSERT INTO foo VALUES (%s)", "bar") # WRONG + >>> cur.execute("INSERT INTO foo VALUES (%s)", ("bar")) # WRONG + >>> cur.execute("INSERT INTO foo VALUES (%s)", ("bar",)) # correct + >>> cur.execute("INSERT INTO foo VALUES (%s)", ["bar"]) # correct + + +.. _faq-unicode: +.. cssclass:: faq + +My database is Unicode, but I receive all the strings as UTF-8 `!str`. Can I receive `!unicode` objects instead? + The following magic formula will do the trick:: + + psycopg2.extensions.register_type(psycopg2.extensions.UNICODE) + psycopg2.extensions.register_type(psycopg2.extensions.UNICODEARRAY) + + See :ref:`unicode-handling` for the gory details. + + +.. _faq-bytes: +.. cssclass:: faq + +My database is in mixed encoding. My program was working on Python 2 but Python 3 fails decoding the strings. How do I avoid decoding? + From psycopg 2.8 you can use the following adapters to always return bytes + from strings:: + + psycopg2.extensions.register_type(psycopg2.extensions.BYTES) + psycopg2.extensions.register_type(psycopg2.extensions.BYTESARRAY) + + See :ref:`unicode-handling` for an example. + + +.. _faq-float: +.. cssclass:: faq + +Psycopg converts :sql:`decimal`\/\ :sql:`numeric` database types into Python `!Decimal` objects. Can I have `!float` instead? + You can register a customized adapter for PostgreSQL decimal type:: + + DEC2FLOAT = psycopg2.extensions.new_type( + psycopg2.extensions.DECIMAL.values, + 'DEC2FLOAT', + lambda value, curs: float(value) if value is not None else None) + psycopg2.extensions.register_type(DEC2FLOAT) + + See :ref:`type-casting-from-sql-to-python` to read the relevant + documentation. If you find `!psycopg2.extensions.DECIMAL` not available, use + `!psycopg2._psycopg.DECIMAL` instead. + + +.. _faq-json-adapt: +.. cssclass:: faq + +Psycopg automatically converts PostgreSQL :sql:`json` data into Python objects. How can I receive strings instead? + The easiest way to avoid JSON parsing is to register a no-op function with + `~psycopg2.extras.register_default_json()`:: + + psycopg2.extras.register_default_json(loads=lambda x: x) + + See :ref:`adapt-json` for further details. + + +.. _faq-jsonb-adapt: +.. cssclass:: faq + +Psycopg converts :sql:`json` values into Python objects but :sql:`jsonb` values are returned as strings. Can :sql:`jsonb` be converted automatically? + Automatic conversion of :sql:`jsonb` values is supported from Psycopg + release 2.5.4. For previous versions you can register the :sql:`json` + typecaster on the :sql:`jsonb` oids (which are known and not supposed to + change in future PostgreSQL versions):: + + psycopg2.extras.register_json(oid=3802, array_oid=3807, globally=True) + + See :ref:`adapt-json` for further details. + + +.. _faq-identifier: +.. cssclass:: faq + +How can I pass field/table names to a query? + The arguments in the `~cursor.execute()` methods can only represent data + to pass to the query: they cannot represent a table or field name:: + + # This doesn't work + cur.execute("insert into %s values (%s)", ["my_table", 42]) + + If you want to build a query dynamically you can use the objects exposed + by the `psycopg2.sql` module:: + + cur.execute( + sql.SQL("insert into %s values (%%s)") % [sql.Identifier("my_table")], + [42]) + + +.. _faq-bytea-9.0: +.. cssclass:: faq + +Transferring binary data from PostgreSQL 9.0 doesn't work. + PostgreSQL 9.0 uses by default `the "hex" format`__ to transfer + :sql:`bytea` data: the format can't be parsed by the libpq 8.4 and + earlier. The problem is solved in Psycopg 2.4.1, that uses its own parser + for the :sql:`bytea` format. For previous Psycopg releases, three options + to solve the problem are: + + - set the bytea_output__ parameter to ``escape`` in the server; + - execute the database command ``SET bytea_output TO escape;`` in the + session before reading binary data; + - upgrade the libpq library on the client to at least 9.0. + + .. __: https://www.postgresql.org/docs/current/static/datatype-binary.html + .. __: https://www.postgresql.org/docs/current/static/runtime-config-client.html#GUC-BYTEA-OUTPUT + + +.. _faq-array: +.. cssclass:: faq + +Arrays of *TYPE* are not casted to list. + Arrays are only casted to list when their oid is known, and an array + typecaster is registered for them. If there is no typecaster, the array is + returned unparsed from PostgreSQL (e.g. ``{a,b,c}``). It is easy to create + a generic arrays typecaster, returning a list of array: an example is + provided in the `~psycopg2.extensions.new_array_type()` documentation. + + +.. _faq-best-practices: + +Best practices +-------------- + +.. _faq-reuse-cursors: +.. cssclass:: faq + +When should I save and re-use a cursor as opposed to creating a new one as needed? + Cursors are lightweight objects and creating lots of them should not pose + any kind of problem. But note that cursors used to fetch result sets will + cache the data and use memory in proportion to the result set size. Our + suggestion is to almost always create a new cursor and dispose old ones as + soon as the data is not required anymore (call `~cursor.close()` on + them.) The only exception are tight loops where one usually use the same + cursor for a whole bunch of :sql:`INSERT`\s or :sql:`UPDATE`\s. + + +.. _faq-reuse-connections: +.. cssclass:: faq + +When should I save and re-use a connection as opposed to creating a new one as needed? + Creating a connection can be slow (think of SSL over TCP) so the best + practice is to create a single connection and keep it open as long as + required. It is also good practice to rollback or commit frequently (even + after a single :sql:`SELECT` statement) to make sure the backend is never + left "idle in transaction". See also `psycopg2.pool` for lightweight + connection pooling. + + +.. _faq-named-cursors: +.. cssclass:: faq + +What are the advantages or disadvantages of using named cursors? + The only disadvantages is that they use up resources on the server and + that there is a little overhead because at least two queries (one to + create the cursor and one to fetch the initial result set) are issued to + the backend. The advantage is that data is fetched one chunk at a time: + using small `~cursor.fetchmany()` values it is possible to use very + little memory on the client and to skip or discard parts of the result set. + + +.. _faq-interrupt-query: +.. cssclass:: faq + +How do I interrupt a long-running query in an interactive shell? + Normally the interactive shell becomes unresponsive to :kbd:`Ctrl-C` when + running a query. Using a connection in green mode allows Python to + receive and handle the interrupt, although it may leave the connection + broken, if the async callback doesn't handle the `!KeyboardInterrupt` + correctly. + + Starting from psycopg 2.6.2, the `~psycopg2.extras.wait_select` callback + can handle a :kbd:`Ctrl-C` correctly. For previous versions, you can use + `this implementation`__. + + .. __: https://www.psycopg.org/articles/2014/07/20/cancelling-postgresql-statements-python/ + + .. code-block:: pycon + + >>> psycopg2.extensions.set_wait_callback(psycopg2.extras.wait_select) + >>> cnn = psycopg2.connect('') + >>> cur = cnn.cursor() + >>> cur.execute("select pg_sleep(10)") + ^C + Traceback (most recent call last): + File "", line 1, in + QueryCanceledError: canceling statement due to user request + + >>> cnn.rollback() + >>> # You can use the connection and cursor again from here + + +.. _faq-compile: + +Problems compiling and installing psycopg2 +------------------------------------------ + +.. _faq-wheels: +.. cssclass:: faq + +Psycopg 2.8 fails to install, Psycopg 2.7 was working fine. + With Psycopg 2.7 you were installing binary packages, but they have proven + unreliable so now you have to install them explicitly using the + ``psycopg2-binary`` package. See :ref:`binary-packages` for all the + details. + +.. _faq-python-h: +.. cssclass:: faq + +I can't compile `!psycopg2`: the compiler says *error: Python.h: No such file or directory*. What am I missing? + You need to install a Python development package: it is usually called + ``python-dev`` or ``python3-dev`` according to your Python version. + + +.. _faq-libpq-fe-h: +.. cssclass:: faq + +I can't compile `!psycopg2`: the compiler says *error: libpq-fe.h: No such file or directory*. What am I missing? + You need to install the development version of the libpq: the package is + usually called ``libpq-dev``. + + +.. _faq-lo_truncate: +.. cssclass:: faq + +`!psycopg2` raises `!ImportError` with message *_psycopg.so: undefined symbol: lo_truncate* when imported. + This means that Psycopg was compiled with |lo_truncate|_ support (*i.e.* + the libpq used at compile time was version >= 8.3) but at runtime an older + libpq dynamic library is found. + + Fast-forward several years, if the message reports *undefined symbol: + lo_truncate64* it means that Psycopg was built with large objects 64 bits + API support (*i.e.* the libpq used at compile time was at least 9.3) but + at runtime an older libpq dynamic library is found. + + You can use: + + .. code-block:: shell + + $ ldd /path/to/packages/psycopg2/_psycopg.so | grep libpq + + to find what is the libpq dynamic library used at runtime. + + You can avoid the problem by using the same version of the + :program:`pg_config` at install time and the libpq at runtime. + + .. |lo_truncate| replace:: `!lo_truncate()` + .. _lo_truncate: https://www.postgresql.org/docs/current/static/lo-interfaces.html#LO-TRUNCATE + + +.. _faq-import-mod_wsgi: +.. cssclass:: faq + +Psycopg raises *ImportError: cannot import name tz* on import in mod_wsgi / ASP, but it works fine otherwise. + If `!psycopg2` is installed in an egg_ (e.g. because installed by + :program:`easy_install`), the user running the program may be unable to + write in the `eggs cache`__. Set the env variable + :envvar:`PYTHON_EGG_CACHE` to a writable directory. With modwsgi you can + use the WSGIPythonEggs__ directive. + + .. _egg: http://peak.telecommunity.com/DevCenter/PythonEggs + .. __: https://stackoverflow.com/questions/2192323/what-is-the-python-egg-cache-python-egg-cache + .. __: https://modwsgi.readthedocs.io/en/develop/configuration-directives/WSGIPythonEggs.html diff --git a/doc/src/index.rst b/doc/src/index.rst new file mode 100644 index 0000000000000000000000000000000000000000..580d44aa66f77c568aaec99f00d54c663e686d9d --- /dev/null +++ b/doc/src/index.rst @@ -0,0 +1,70 @@ +================================================= +Psycopg -- PostgreSQL database adapter for Python +================================================= + +.. sectionauthor:: Daniele Varrazzo + +Psycopg_ is the most popular PostgreSQL_ database adapter for the Python_ +programming language. Its main features are the complete implementation of +the Python |DBAPI|_ specification and the thread safety (several threads can +share the same connection). It was designed for heavily multi-threaded +applications that create and destroy lots of cursors and make a large number +of concurrent :sql:`INSERT`\s or :sql:`UPDATE`\s. + +Psycopg 2 is mostly implemented in C as a libpq_ wrapper, resulting in being +both efficient and secure. It features client-side and :ref:`server-side +` cursors, :ref:`asynchronous communication +` and :ref:`notifications `, :ref:`COPY ` +support. Many Python types are supported out-of-the-box and :ref:`adapted to +matching PostgreSQL data types `; adaptation can be +extended and customized thanks to a flexible :ref:`objects adaptation system +`. + +Psycopg 2 is both Unicode and Python 3 friendly. + + +.. _Psycopg: https://psycopg.org/ +.. _PostgreSQL: https://www.postgresql.org/ +.. _Python: https://www.python.org/ +.. _libpq: https://www.postgresql.org/docs/current/static/libpq.html + + +.. rubric:: Contents + +.. toctree:: + :maxdepth: 2 + + install + usage + module + connection + cursor + advanced + extensions + extras + errors + sql + tz + pool + errorcodes + faq + news + license + + +.. ifconfig:: builder != 'text' + + .. rubric:: Indices and tables + + * :ref:`genindex` + * :ref:`modindex` + * :ref:`search` + + +.. ifconfig:: todo_include_todos + + .. note:: + + **To Do items in the documentation** + + .. todolist:: diff --git a/doc/src/install.rst b/doc/src/install.rst new file mode 100644 index 0000000000000000000000000000000000000000..2eb66c8762203ba79caacd1da17f0730906c0db0 --- /dev/null +++ b/doc/src/install.rst @@ -0,0 +1,357 @@ +.. _installation: + +Installation +============ + +.. sectionauthor:: Daniele Varrazzo + +Psycopg is a PostgreSQL_ adapter for the Python_ programming language. It is a +wrapper for the libpq_, the official PostgreSQL client library. + +.. _PostgreSQL: https://www.postgresql.org/ +.. _Python: https://www.python.org/ + + +.. index:: + single: Install; from PyPI + single: Install; wheel + single: Wheel + +.. _binary-packages: + +Quick Install +------------- + +For most operating systems, the quickest way to install Psycopg is using the +wheel_ package available on PyPI_: + +.. code-block:: console + + $ pip install psycopg2-binary + +This will install a pre-compiled binary version of the module which does not +require the build or runtime prerequisites described below. Make sure to use +an up-date-date version of :program:`pip` (you can upgrade it using something +like ``pip install -U pip``). + +You may then import the ``psycopg2`` package, as usual: + +.. code-block:: python + + import psycopg2 + + # Connect to your postgres DB + conn = psycopg2.connect("dbname=test user=postgres") + + # Open a cursor to perform database operations + cur = conn.cursor() + + # Execute a query + cur.execute("SELECT * FROM my_data") + + # Retrieve query results + records = cur.fetchall() + +.. _PyPI: https://pypi.org/project/psycopg2-binary/ +.. _wheel: https://pythonwheels.com/ + + +psycopg vs psycopg-binary +^^^^^^^^^^^^^^^^^^^^^^^^^ + +The ``psycopg2-binary`` package is meant for beginners to start playing +with Python and PostgreSQL without the need to meet the build +requirements. + +If you are the maintainer of a published package depending on `!psycopg2` +you shouldn't use ``psycopg2-binary`` as a module dependency. **For +production use you are advised to use the source distribution.** + +The binary packages come with their own versions of a few C libraries, +among which ``libpq`` and ``libssl``, which will be used regardless of other +libraries available on the client: upgrading the system libraries will not +upgrade the libraries used by `!psycopg2`. Please build `!psycopg2` from +source if you want to maintain binary upgradeability. + +.. warning:: + + The `!psycopg2` wheel package comes packaged, among the others, with its + own ``libssl`` binary. This may create conflicts with other extension + modules binding with ``libssl`` as well, for instance with the Python + `ssl` module: in some cases, under concurrency, the interaction between + the two libraries may result in a segfault. In case of doubts you are + advised to use a package built from source. + + +.. index:: + single: Install; disable wheel + single: Wheel; disable + +.. _disable-wheel: + +Change in binary packages between Psycopg 2.7 and 2.8 +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +In version 2.7.x, :command:`pip install psycopg2` would have tried to install +automatically the binary package of Psycopg. Because of concurrency problems +binary packages have displayed, ``psycopg2-binary`` has become a separate +package, and from 2.8 it has become the only way to install the binary +package. + +If you are using Psycopg 2.7 and you want to disable the use of wheel binary +packages, relying on the system libraries available on your client, you +can use the :command:`pip` |--no-binary option|__, e.g.: + +.. code-block:: console + + $ pip install --no-binary :all: psycopg2 + +.. |--no-binary option| replace:: ``--no-binary`` option +.. __: https://pip.pypa.io/en/stable/reference/pip_install/#install-no-binary + +which can be specified in your :file:`requirements.txt` files too, e.g. use: + +.. code-block:: none + + psycopg2>=2.7,<2.8 --no-binary psycopg2 + +to use the last bugfix release of the `!psycopg2` 2.7 package, specifying to +always compile it from source. Of course in this case you will have to meet +the :ref:`build prerequisites `. + + +.. index:: + single: Prerequisites + +Prerequisites +------------- + +The current `!psycopg2` implementation supports: + +.. + NOTE: keep consistent with setup.py and the /features/ page. + +- Python versions from 3.6 to 3.9 +- PostgreSQL server versions from 7.4 to 13 +- PostgreSQL client library version from 9.1 + + + +.. _build-prerequisites: + +Build prerequisites +^^^^^^^^^^^^^^^^^^^ + +The build prerequisites are to be met in order to install Psycopg from source +code, from a source distribution package, GitHub_ or from PyPI. + +.. _GitHub: https://github.com/psycopg/psycopg2 + +Psycopg is a C wrapper around the libpq_ PostgreSQL client library. To install +it from sources you will need: + +- A C compiler. + +- The Python header files. They are usually installed in a package such as + **python-dev** or **python3-dev**. A message such as *error: Python.h: No + such file or directory* is an indication that the Python headers are + missing. + +- The libpq header files. They are usually installed in a package such as + **libpq-dev**. If you get an *error: libpq-fe.h: No such file or directory* + you are missing them. + +- The :program:`pg_config` program: it is usually installed by the + **libpq-dev** package but sometimes it is not in a :envvar:`PATH` directory. + Having it in the :envvar:`PATH` greatly streamlines the installation, so try + running ``pg_config --version``: if it returns an error or an unexpected + version number then locate the directory containing the :program:`pg_config` + shipped with the right libpq version (usually + ``/usr/lib/postgresql/X.Y/bin/``) and add it to the :envvar:`PATH`: + + .. code-block:: console + + $ export PATH=/usr/lib/postgresql/X.Y/bin/:$PATH + + You only need :program:`pg_config` to compile `!psycopg2`, not for its + regular usage. + +Once everything is in place it's just a matter of running the standard: + +.. code-block:: console + + $ pip install psycopg2 + +or, from the directory containing the source code: + +.. code-block:: console + + $ python setup.py build + $ python setup.py install + + +Runtime requirements +^^^^^^^^^^^^^^^^^^^^ + +Unless you compile `!psycopg2` as a static library, or you install it from a +self-contained wheel package, it will need the libpq_ library at runtime +(usually distributed in a ``libpq.so`` or ``libpq.dll`` file). `!psycopg2` +relies on the host OS to find the library if the library is installed in a +standard location there is usually no problem; if the library is in a +non-standard location you will have to tell somehow Psycopg how to find it, +which is OS-dependent (for instance setting a suitable +:envvar:`LD_LIBRARY_PATH` on Linux). + +.. note:: + + The libpq header files used to compile `!psycopg2` should match the + version of the library linked at runtime. If you get errors about missing + or mismatching libraries when importing `!psycopg2` check (e.g. using + :program:`ldd`) if the module ``psycopg2/_psycopg.so`` is linked to the + right ``libpq.so``. + +.. note:: + + Whatever version of libpq `!psycopg2` is compiled with, it will be + possible to connect to PostgreSQL servers of any supported version: just + install the most recent libpq version or the most practical, without + trying to match it to the version of the PostgreSQL server you will have + to connect to. + + +.. index:: + single: setup.py + single: setup.cfg + +Non-standard builds +------------------- + +If you have less standard requirements such as: + +- creating a :ref:`debug build `, +- using :program:`pg_config` not in the :envvar:`PATH`, + +then take a look at the ``setup.cfg`` file. + +Some of the options available in ``setup.cfg`` are also available as command +line arguments of the ``build_ext`` sub-command. For instance you can specify +an alternate :program:`pg_config` location using: + +.. code-block:: console + + $ python setup.py build_ext --pg-config /path/to/pg_config build + +Use ``python setup.py build_ext --help`` to get a list of the options +supported. + + +.. index:: + single: debug + single: PSYCOPG_DEBUG + +.. _debug-build: + +Creating a debug build +^^^^^^^^^^^^^^^^^^^^^^ + +In case of problems, Psycopg can be configured to emit detailed debug +messages, which can be very useful for diagnostics and to report a bug. In +order to create a debug package: + +- `Download`__ and unpack the Psycopg *source package* (the ``.tar.gz`` + package). + +- Edit the ``setup.cfg`` file adding the ``PSYCOPG_DEBUG`` flag to the + ``define`` option. + +- :ref:`Compile and install ` the package. + +- Set the :envvar:`PSYCOPG_DEBUG` environment variable: + +.. code-block:: console + + $ export PSYCOPG_DEBUG=1 + +- Run your program (making sure that the `!psycopg2` package imported is the + one you just compiled and not e.g. the system one): you will have a copious + stream of informations printed on stderr. + +.. __: https://pypi.org/project/psycopg2/#files + + +Non-standard Python Implementation +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +The `psycopg2` package is the current mature implementation of the adapter: it +is a C extension and as such it is only compatible with CPython_. If you want +to use Psycopg on a different Python implementation (PyPy, Jython, IronPython) +there is a couple of alternative: + +- a `Ctypes port`__, but it is not as mature as the C implementation yet + and it is not as feature-complete; + +- a `CFFI port`__ which is currently more used and reported more efficient on + PyPy, but please be careful of its version numbers because they are not + aligned to the official psycopg2 ones and some features may differ. + +.. _PostgreSQL: https://www.postgresql.org/ +.. _Python: https://www.python.org/ +.. _libpq: https://www.postgresql.org/docs/current/static/libpq.html +.. _CPython: https://en.wikipedia.org/wiki/CPython +.. _Ctypes: https://docs.python.org/library/ctypes.html +.. __: https://github.com/mvantellingen/psycopg2-ctypes +.. __: https://github.com/chtd/psycopg2cffi + + +.. index:: + single: tests + +.. _test-suite: + +Running the test suite +---------------------- + +Once `!psycopg2` is installed you can run the test suite to verify it is +working correctly. From the source directory, you can run: + +.. code-block:: console + + $ python -c "import tests; tests.unittest.main(defaultTest='tests.test_suite')" --verbose + +The tests run against a database called ``psycopg2_test`` on UNIX socket and +the standard port. You can configure a different database to run the test by +setting the environment variables: + +- :envvar:`PSYCOPG2_TESTDB` +- :envvar:`PSYCOPG2_TESTDB_HOST` +- :envvar:`PSYCOPG2_TESTDB_PORT` +- :envvar:`PSYCOPG2_TESTDB_USER` + +The database should already exist before running the tests. + + +.. _other-problems: + +If you still have problems +-------------------------- + +Try the following. *In order:* + +- Read again the :ref:`build-prerequisites`. + +- Read the :ref:`FAQ `. + +- Google for `!psycopg2` *your error message*. Especially useful the week + after the release of a new OS X version. + +- Write to the `Mailing List`_. + +- If you think that you have discovered a bug, test failure or missing feature + please raise a ticket in the `bug tracker`_. + +- Complain on your blog or on Twitter that `!psycopg2` is the worst package + ever and about the quality time you have wasted figuring out the correct + :envvar:`ARCHFLAGS`. Especially useful from the Starbucks near you. + +.. _mailing list: https://www.postgresql.org/list/psycopg/ +.. _bug tracker: https://github.com/psycopg/psycopg2/issues diff --git a/doc/src/license.rst b/doc/src/license.rst new file mode 100644 index 0000000000000000000000000000000000000000..53a4e7248c0d7410b732548dc84b7528ae25b568 --- /dev/null +++ b/doc/src/license.rst @@ -0,0 +1,7 @@ +.. index:: + single: License + +License +======= + +.. include:: ../../LICENSE diff --git a/doc/src/module.rst b/doc/src/module.rst new file mode 100644 index 0000000000000000000000000000000000000000..f17f3ae46497970a0629ff8390cf07a06cb60c42 --- /dev/null +++ b/doc/src/module.rst @@ -0,0 +1,388 @@ +The `psycopg2` module content +================================== + +.. sectionauthor:: Daniele Varrazzo + +.. module:: psycopg2 + +The module interface respects the standard defined in the |DBAPI|_. + +.. index:: + single: Connection string + double: Connection; Parameters + single: Username; Connection + single: Password; Connection + single: Host; Connection + single: Port; Connection + single: DSN (Database Source Name) + +.. function:: + connect(dsn=None, connection_factory=None, cursor_factory=None, async=False, \*\*kwargs) + + Create a new database session and return a new `connection` object. + + The connection parameters can be specified as a `libpq connection + string`__ using the *dsn* parameter:: + + conn = psycopg2.connect("dbname=test user=postgres password=secret") + + or using a set of keyword arguments:: + + conn = psycopg2.connect(dbname="test", user="postgres", password="secret") + + or using a mix of both: if the same parameter name is specified in both + sources, the *kwargs* value will have precedence over the *dsn* value. + Note that either the *dsn* or at least one connection-related keyword + argument is required. + + The basic connection parameters are: + + - `!dbname` -- the database name (`!database` is a deprecated alias) + - `!user` -- user name used to authenticate + - `!password` -- password used to authenticate + - `!host` -- database host address (defaults to UNIX socket if not provided) + - `!port` -- connection port number (defaults to 5432 if not provided) + + Any other connection parameter supported by the client library/server can + be passed either in the connection string or as a keyword. The PostgreSQL + documentation contains the complete list of the `supported parameters`__. + Also note that the same parameters can be passed to the client library + using `environment variables`__. + + .. __: + .. _connstring: https://www.postgresql.org/docs/current/static/libpq-connect.html#LIBPQ-CONNSTRING + .. __: + .. _connparams: https://www.postgresql.org/docs/current/static/libpq-connect.html#LIBPQ-PARAMKEYWORDS + .. __: + .. _connenvvars: https://www.postgresql.org/docs/current/static/libpq-envars.html + + Using the *connection_factory* parameter a different class or + connections factory can be specified. It should be a callable object + taking a *dsn* string argument. See :ref:`subclassing-connection` for + details. If a *cursor_factory* is specified, the connection's + `~connection.cursor_factory` is set to it. If you only need customized + cursors you can use this parameter instead of subclassing a connection. + + Using *async*\=\ `!True` an asynchronous connection will be created: see + :ref:`async-support` to know about advantages and limitations. *async_* is + a valid alias for the Python version where ``async`` is a keyword. + + .. versionchanged:: 2.4.3 + any keyword argument is passed to the connection. Previously only the + basic parameters (plus `!sslmode`) were supported as keywords. + + .. versionchanged:: 2.5 + added the *cursor_factory* parameter. + + .. versionchanged:: 2.7 + both *dsn* and keyword arguments can be specified. + + .. versionchanged:: 2.7 + added *async_* alias. + + .. seealso:: + + - `~psycopg2.extensions.parse_dsn` + - libpq `connection string syntax`__ + - libpq supported `connection parameters`__ + - libpq supported `environment variables`__ + + .. __: connstring_ + .. __: connparams_ + .. __: connenvvars_ + + .. extension:: + + The non-connection-related keyword parameters are Psycopg extensions + to the |DBAPI|_. + +.. data:: apilevel + + String constant stating the supported DB API level. For `psycopg2` is + ``2.0``. + +.. data:: threadsafety + + Integer constant stating the level of thread safety the interface + supports. For `psycopg2` is ``2``, i.e. threads can share the module + and the connection. See :ref:`thread-safety` for details. + +.. data:: paramstyle + + String constant stating the type of parameter marker formatting expected + by the interface. For `psycopg2` is ``pyformat``. See also + :ref:`query-parameters`. + +.. data:: __libpq_version__ + + Integer constant reporting the version of the ``libpq`` library this + ``psycopg2`` module was compiled with (in the same format of + `~psycopg2.extensions.ConnectionInfo.server_version`). If this value is + greater or equal than ``90100`` then you may query the version of the + actually loaded library using the `~psycopg2.extensions.libpq_version()` + function. + + +.. index:: + single: Exceptions; DB API + +.. _dbapi-exceptions: + +Exceptions +---------- + +In compliance with the |DBAPI|_, the module makes informations about errors +available through the following exceptions: + +.. exception:: Warning + + Exception raised for important warnings like data truncations while + inserting, etc. It is a subclass of the Python `StandardError` + (`Exception` on Python 3). + +.. exception:: Error + + Exception that is the base class of all other error exceptions. You can + use this to catch all errors with one single `!except` statement. Warnings + are not considered errors and thus not use this class as base. It + is a subclass of the Python `StandardError` (`Exception` on Python 3). + + .. attribute:: pgerror + + String representing the error message returned by the backend, + `!None` if not available. + + .. attribute:: pgcode + + String representing the error code returned by the backend, `!None` + if not available. The `~psycopg2.errorcodes` module contains + symbolic constants representing PostgreSQL error codes. + + .. doctest:: + :options: +NORMALIZE_WHITESPACE + + >>> try: + ... cur.execute("SELECT * FROM barf") + ... except psycopg2.Error as e: + ... pass + + >>> e.pgcode + '42P01' + >>> print e.pgerror + ERROR: relation "barf" does not exist + LINE 1: SELECT * FROM barf + ^ + + .. attribute:: cursor + + The cursor the exception was raised from; `None` if not applicable. + + .. attribute:: diag + + A `~psycopg2.extensions.Diagnostics` object containing further + information about the error. :: + + >>> try: + ... cur.execute("SELECT * FROM barf") + ... except psycopg2.Error, e: + ... pass + + >>> e.diag.severity + 'ERROR' + >>> e.diag.message_primary + 'relation "barf" does not exist' + + .. versionadded:: 2.5 + + .. extension:: + + The `~Error.pgerror`, `~Error.pgcode`, `~Error.cursor`, and + `~Error.diag` attributes are Psycopg extensions. + + +.. exception:: InterfaceError + + Exception raised for errors that are related to the database interface + rather than the database itself. It is a subclass of `Error`. + +.. exception:: DatabaseError + + Exception raised for errors that are related to the database. It is a + subclass of `Error`. + +.. exception:: DataError + + Exception raised for errors that are due to problems with the processed + data like division by zero, numeric value out of range, etc. It is a + subclass of `DatabaseError`. + +.. exception:: OperationalError + + Exception raised for errors that are related to the database's operation + and not necessarily under the control of the programmer, e.g. an + unexpected disconnect occurs, the data source name is not found, a + transaction could not be processed, a memory allocation error occurred + during processing, etc. It is a subclass of `DatabaseError`. + +.. exception:: IntegrityError + + Exception raised when the relational integrity of the database is + affected, e.g. a foreign key check fails. It is a subclass of + `DatabaseError`. + +.. exception:: InternalError + + Exception raised when the database encounters an internal error, e.g. the + cursor is not valid anymore, the transaction is out of sync, etc. It is a + subclass of `DatabaseError`. + +.. exception:: ProgrammingError + + Exception raised for programming errors, e.g. table not found or already + exists, syntax error in the SQL statement, wrong number of parameters + specified, etc. It is a subclass of `DatabaseError`. + +.. exception:: NotSupportedError + + Exception raised in case a method or database API was used which is not + supported by the database, e.g. requesting a `!rollback()` on a + connection that does not support transaction or has transactions turned + off. It is a subclass of `DatabaseError`. + + +.. extension:: + + Psycopg actually raises a different exception for each :sql:`SQLSTATE` + error returned by the database: the classes are available in the + `psycopg2.errors` module. Every exception class is a subclass of one of + the exception classes defined here though, so they don't need to be + trapped specifically: trapping `!Error` or `!DatabaseError` is usually + what needed to write a generic error handler; trapping a specific error + such as `!NotNullViolation` can be useful to write specific exception + handlers. + + +This is the exception inheritance layout: + +.. parsed-literal:: + + `!StandardError` + \|__ `Warning` + \|__ `Error` + \|__ `InterfaceError` + \|__ `DatabaseError` + \|__ `DataError` + \|__ `OperationalError` + \|__ `IntegrityError` + \|__ `InternalError` + \|__ `ProgrammingError` + \|__ `NotSupportedError` + + + +.. _type-objects-and-constructors: + +Type Objects and Constructors +----------------------------- + +.. note:: + + This section is mostly copied verbatim from the |DBAPI|_ + specification. While these objects are exposed in compliance to the + DB API, Psycopg offers very accurate tools to convert data between Python + and PostgreSQL formats. See :ref:`adapting-new-types` and + :ref:`type-casting-from-sql-to-python` + +Many databases need to have the input in a particular format for +binding to an operation's input parameters. For example, if an +input is destined for a DATE column, then it must be bound to the +database in a particular string format. Similar problems exist +for "Row ID" columns or large binary items (e.g. blobs or RAW +columns). This presents problems for Python since the parameters +to the .execute*() method are untyped. When the database module +sees a Python string object, it doesn't know if it should be bound +as a simple CHAR column, as a raw BINARY item, or as a DATE. + +To overcome this problem, a module must provide the constructors +defined below to create objects that can hold special values. +When passed to the cursor methods, the module can then detect the +proper type of the input parameter and bind it accordingly. + +A Cursor Object's description attribute returns information about +each of the result columns of a query. The type_code must compare +equal to one of Type Objects defined below. Type Objects may be +equal to more than one type code (e.g. DATETIME could be equal to +the type codes for date, time and timestamp columns; see the +Implementation Hints below for details). + +The module exports the following constructors and singletons: + +.. function:: Date(year,month,day) + + This function constructs an object holding a date value. + +.. function:: Time(hour,minute,second) + + This function constructs an object holding a time value. + +.. function:: Timestamp(year,month,day,hour,minute,second) + + This function constructs an object holding a time stamp value. + +.. function:: DateFromTicks(ticks) + + This function constructs an object holding a date value from the given + ticks value (number of seconds since the epoch; see the documentation of + the standard Python time module for details). + +.. function:: TimeFromTicks(ticks) + + This function constructs an object holding a time value from the given + ticks value (number of seconds since the epoch; see the documentation of + the standard Python time module for details). + +.. function:: TimestampFromTicks(ticks) + + This function constructs an object holding a time stamp value from the + given ticks value (number of seconds since the epoch; see the + documentation of the standard Python time module for details). + +.. function:: Binary(string) + + This function constructs an object capable of holding a binary (long) + string value. + +.. note:: + + All the adapters returned by the module level factories (`!Binary`, + `!Date`, `!Time`, `!Timestamp` and the `!*FromTicks` variants) expose the + wrapped object (a regular Python object such as `!datetime`) in an + `!adapted` attribute. + +.. data:: STRING + + This type object is used to describe columns in a database that are + string-based (e.g. CHAR). + +.. data:: BINARY + + This type object is used to describe (long) binary columns in a database + (e.g. LONG, RAW, BLOBs). + +.. data:: NUMBER + + This type object is used to describe numeric columns in a database. + +.. data:: DATETIME + + This type object is used to describe date/time columns in a database. + +.. data:: ROWID + + This type object is used to describe the "Row ID" column in a database. + + +.. testcode:: + :hide: + + conn.rollback() diff --git a/doc/src/news.rst b/doc/src/news.rst new file mode 100644 index 0000000000000000000000000000000000000000..053d6464a4a31bbd3aee7bf92547a9735442e774 --- /dev/null +++ b/doc/src/news.rst @@ -0,0 +1,8 @@ +.. index:: + single: Release notes + single: News + +Release notes +============= + +.. include:: ../../NEWS diff --git a/doc/src/pool.rst b/doc/src/pool.rst new file mode 100644 index 0000000000000000000000000000000000000000..95f4e23231c34a31cad94d2b78a6e53865e0e214 --- /dev/null +++ b/doc/src/pool.rst @@ -0,0 +1,60 @@ +`psycopg2.pool` -- Connections pooling +====================================== + +.. sectionauthor:: Daniele Varrazzo + +.. index:: + pair: Connection; Pooling + +.. module:: psycopg2.pool + +Creating new PostgreSQL connections can be an expensive operation. This +module offers a few pure Python classes implementing simple connection pooling +directly in the client application. + +.. class:: AbstractConnectionPool(minconn, maxconn, \*args, \*\*kwargs) + + Base class implementing generic key-based pooling code. + + New *minconn* connections are created automatically. The pool will support + a maximum of about *maxconn* connections. *\*args* and *\*\*kwargs* are + passed to the `~psycopg2.connect()` function. + + The following methods are expected to be implemented by subclasses: + + .. method:: getconn(key=None) + + Get a free connection from the pool. + + The *key* parameter is optional: if used, the connection will be + associated to the key and calling `!getconn()` with the same key again + will return the same connection. + + .. method:: putconn(conn, key=None, close=False) + + Put away a connection. + + If *close* is `!True`, discard the connection from the pool. + *key* should be used consistently with `getconn()`. + + .. method:: closeall + + Close all the connections handled by the pool. + + Note that all the connections are closed, including ones + eventually in use by the application. + + +The following classes are `AbstractConnectionPool` subclasses ready to +be used. + +.. autoclass:: SimpleConnectionPool + + .. note:: This pool class is useful only for single-threaded applications. + + +.. index:: Multithread; Connection pooling + +.. autoclass:: ThreadedConnectionPool + + .. note:: This pool class can be safely used in multi-threaded applications. diff --git a/doc/src/sql.rst b/doc/src/sql.rst new file mode 100644 index 0000000000000000000000000000000000000000..c6507e08a09251c2ed4588715c04f52662a37571 --- /dev/null +++ b/doc/src/sql.rst @@ -0,0 +1,147 @@ +`psycopg2.sql` -- SQL string composition +======================================== + +.. sectionauthor:: Daniele Varrazzo + +.. module:: psycopg2.sql + +.. versionadded:: 2.7 + +The module contains objects and functions useful to generate SQL dynamically, +in a convenient and safe way. SQL identifiers (e.g. names of tables and +fields) cannot be passed to the `~cursor.execute()` method like query +arguments:: + + # This will not work + table_name = 'my_table' + cur.execute("insert into %s values (%s, %s)", [table_name, 10, 20]) + +The SQL query should be composed before the arguments are merged, for +instance:: + + # This works, but it is not optimal + table_name = 'my_table' + cur.execute( + "insert into %s values (%%s, %%s)" % table_name, + [10, 20]) + +This sort of works, but it is an accident waiting to happen: the table name +may be an invalid SQL literal and need quoting; even more serious is the +security problem in case the table name comes from an untrusted source. The +name should be escaped using `~psycopg2.extensions.quote_ident()`:: + + # This works, but it is not optimal + table_name = 'my_table' + cur.execute( + "insert into %s values (%%s, %%s)" % ext.quote_ident(table_name), + [10, 20]) + +This is now safe, but it somewhat ad-hoc. In case, for some reason, it is +necessary to include a value in the query string (as opposite as in a value) +the merging rule is still different (`~psycopg2.extensions.adapt()` should be +used...). It is also still relatively dangerous: if `!quote_ident()` is +forgotten somewhere, the program will usually work, but will eventually crash +in the presence of a table or field name with containing characters to escape, +or will present a potentially exploitable weakness. + +The objects exposed by the `!psycopg2.sql` module allow generating SQL +statements on the fly, separating clearly the variable parts of the statement +from the query parameters:: + + from psycopg2 import sql + + cur.execute( + sql.SQL("insert into {} values (%s, %s)") + .format(sql.Identifier('my_table')), + [10, 20]) + + +Module usage +------------ + +Usually you should express the template of your query as an `SQL` instance +with `{}`\-style placeholders and use `~SQL.format()` to merge the variable +parts into them, all of which must be `Composable` subclasses. You can still +have `%s`\ -style placeholders in your query and pass values to +`~cursor.execute()`: such value placeholders will be untouched by +`!format()`:: + + query = sql.SQL("select {field} from {table} where {pkey} = %s").format( + field=sql.Identifier('my_name'), + table=sql.Identifier('some_table'), + pkey=sql.Identifier('id')) + +The resulting object is meant to be passed directly to cursor methods such as +`~cursor.execute()`, `~cursor.executemany()`, `~cursor.copy_expert()`, but can +also be used to compose a query as a Python string, using the +`~Composable.as_string()` method:: + + cur.execute(query, (42,)) + +If part of your query is a variable sequence of arguments, such as a +comma-separated list of field names, you can use the `SQL.join()` method to +pass them to the query:: + + query = sql.SQL("select {fields} from {table}").format( + fields=sql.SQL(',').join([ + sql.Identifier('field1'), + sql.Identifier('field2'), + sql.Identifier('field3'), + ]), + table=sql.Identifier('some_table')) + + +`!sql` objects +-------------- + +The `!sql` objects are in the following inheritance hierarchy: + +| `Composable`: the base class exposing the common interface +| ``|__`` `SQL`: a literal snippet of an SQL query +| ``|__`` `Identifier`: a PostgreSQL identifier or dot-separated sequence of identifiers +| ``|__`` `Literal`: a value hardcoded into a query +| ``|__`` `Placeholder`: a `%s`\ -style placeholder whose value will be added later e.g. by `~cursor.execute()` +| ``|__`` `Composed`: a sequence of `!Composable` instances. + + +.. autoclass:: Composable + + .. automethod:: as_string + + +.. autoclass:: SQL + + .. autoattribute:: string + + .. automethod:: format + + .. automethod:: join + + +.. autoclass:: Identifier + + .. versionchanged:: 2.8 + added support for multiple strings. + + .. autoattribute:: strings + + .. versionadded:: 2.8 + previous verions only had a `!string` attribute. The attribute + still exists but is deprecate and will only work if the + `!Identifier` wraps a single string. + +.. autoclass:: Literal + + .. autoattribute:: wrapped + + +.. autoclass:: Placeholder + + .. autoattribute:: name + + +.. autoclass:: Composed + + .. autoattribute:: seq + + .. automethod:: join diff --git a/doc/src/tools/lib/dbapi_extension.py b/doc/src/tools/lib/dbapi_extension.py new file mode 100755 index 0000000000000000000000000000000000000000..7fc776a4bbb322f2f93e82eb04428793a7068b9c --- /dev/null +++ b/doc/src/tools/lib/dbapi_extension.py @@ -0,0 +1,50 @@ +""" + extension + ~~~~~~~~~ + + A directive to create a box warning that a certain bit of Psycopg is an + extension to the DBAPI 2.0. + + :copyright: Copyright 2010 by Daniele Varrazzo. +""" + +from docutils import nodes + +from sphinx.locale import _ +from docutils.parsers.rst import Directive + +class extension_node(nodes.Admonition, nodes.Element): pass + + +class Extension(Directive): + """ + An extension entry, displayed as an admonition. + """ + + has_content = True + required_arguments = 0 + optional_arguments = 0 + final_argument_whitespace = False + option_spec = {} + + def run(self): + node = extension_node('\n'.join(self.content)) + node += nodes.title(_('DB API extension'), _('DB API extension')) + self.state.nested_parse(self.content, self.content_offset, node) + node['classes'].append('dbapi-extension') + return [node] + + +def visit_extension_node(self, node): + self.visit_admonition(node) + +def depart_extension_node(self, node): + self.depart_admonition(node) + +def setup(app): + app.add_node(extension_node, + html=(visit_extension_node, depart_extension_node), + latex=(visit_extension_node, depart_extension_node), + text=(visit_extension_node, depart_extension_node)) + + app.add_directive('extension', Extension) diff --git a/doc/src/tools/lib/sql_role.py b/doc/src/tools/lib/sql_role.py new file mode 100644 index 0000000000000000000000000000000000000000..1731546e5a7e7c1ec319deb050af2f1e87fa585a --- /dev/null +++ b/doc/src/tools/lib/sql_role.py @@ -0,0 +1,19 @@ +""" + sql role + ~~~~~~~~ + + An interpreted text role to style SQL syntax in Psycopg documentation. + + :copyright: Copyright 2010 by Daniele Varrazzo. +""" + +from docutils import nodes, utils +from docutils.parsers.rst import roles + +def sql_role(name, rawtext, text, lineno, inliner, options={}, content=[]): + text = utils.unescape(text) + options['classes'] = ['sql'] + return [nodes.literal(rawtext, text, **options)], [] + +def setup(app): + roles.register_local_role('sql', sql_role) diff --git a/doc/src/tools/lib/ticket_role.py b/doc/src/tools/lib/ticket_role.py new file mode 100644 index 0000000000000000000000000000000000000000..8ba87cb657119f50775f3354c11ecce0fe8cf968 --- /dev/null +++ b/doc/src/tools/lib/ticket_role.py @@ -0,0 +1,57 @@ +""" + ticket role + ~~~~~~~~~~~ + + An interpreted text role to link docs to tickets issues. + + :copyright: Copyright 2013 by Daniele Varrazzo. +""" + +import re +from docutils import nodes, utils +from docutils.parsers.rst import roles + +def ticket_role(name, rawtext, text, lineno, inliner, options={}, content=[]): + cfg = inliner.document.settings.env.app.config + if cfg.ticket_url is None: + msg = inliner.reporter.warning( + "ticket not configured: please configure ticket_url in conf.py") + prb = inliner.problematic(rawtext, rawtext, msg) + return [prb], [msg] + + rv = [nodes.Text(name + ' ')] + tokens = re.findall(r'(#?\d+)|([^\d#]+)', text) + for ticket, noise in tokens: + if ticket: + num = int(ticket.replace('#', '')) + + # Push numbers of the oldel tickets ahead. + # We moved the tickets from a different tracker to GitHub and the + # latter already had a few ticket numbers taken (as merge + # requests). + remap_until = cfg.ticket_remap_until + remap_offset = cfg.ticket_remap_offset + if remap_until and remap_offset: + if num <= remap_until: + num += remap_offset + + url = cfg.ticket_url % num + roles.set_classes(options) + node = nodes.reference(ticket, utils.unescape(ticket), + refuri=url, **options) + + rv.append(node) + + else: + assert noise + rv.append(nodes.Text(noise)) + + return rv, [] + + +def setup(app): + app.add_config_value('ticket_url', None, 'env') + app.add_config_value('ticket_remap_until', None, 'env') + app.add_config_value('ticket_remap_offset', None, 'env') + app.add_role('ticket', ticket_role) + app.add_role('tickets', ticket_role) diff --git a/doc/src/tools/make_sqlstate_docs.py b/doc/src/tools/make_sqlstate_docs.py new file mode 100644 index 0000000000000000000000000000000000000000..16fd9c987af64b08a999e1f5f3a63a052c584557 --- /dev/null +++ b/doc/src/tools/make_sqlstate_docs.py @@ -0,0 +1,57 @@ +#!/usr/bin/env python +"""Create the docs table of the sqlstate errors. +""" + + +import re +import sys +from collections import namedtuple + +from psycopg2._psycopg import sqlstate_errors + + +def main(): + sqlclasses = {} + clsfile = sys.argv[1] + with open(clsfile) as f: + for l in f: + m = re.match(r'/\* Class (..) - (.+) \*/', l) + if m is not None: + sqlclasses[m.group(1)] = m.group(2) + + Line = namedtuple('Line', 'colstate colexc colbase sqlstate') + + lines = [Line('SQLSTATE', 'Exception', 'Base exception', None)] + for k in sorted(sqlstate_errors): + exc = sqlstate_errors[k] + lines.append(Line( + f"``{k}``", f"`!{exc.__name__}`", + f"`!{get_base_exception(exc).__name__}`", k)) + + widths = [max(len(l[c]) for l in lines) for c in range(3)] + h = Line(*(['=' * w for w in widths] + [None])) + lines.insert(0, h) + lines.insert(2, h) + lines.append(h) + + h1 = '-' * (sum(widths) + len(widths) - 1) + sqlclass = None + for l in lines: + cls = l.sqlstate[:2] if l.sqlstate else None + if cls and cls != sqlclass: + print(f"**Class {cls}**: {sqlclasses[cls]}") + print(h1) + sqlclass = cls + + print("%-*s %-*s %-*s" % ( + widths[0], l.colstate, widths[1], l.colexc, widths[2], l.colbase)) + + +def get_base_exception(exc): + for cls in exc.__mro__: + if cls.__module__ == 'psycopg2': + return cls + + +if __name__ == '__main__': + sys.exit(main()) diff --git a/doc/src/tz.rst b/doc/src/tz.rst new file mode 100644 index 0000000000000000000000000000000000000000..c1bd5ff7e59083f5119495174d6fe20f22a9f9cc --- /dev/null +++ b/doc/src/tz.rst @@ -0,0 +1,19 @@ +`psycopg2.tz` -- ``tzinfo`` implementations for Psycopg 2 +=============================================================== + +.. sectionauthor:: Daniele Varrazzo + +.. module:: psycopg2.tz + +.. deprecated:: 2.9 + The module will be dropped in psycopg 2.10. Use `datetime.timezone` + instead. + +This module holds two different tzinfo implementations that can be used as the +`tzinfo` argument to `~datetime.datetime` constructors, directly passed to +Psycopg functions or used to set the `cursor.tzinfo_factory` attribute in +cursors. + +.. autoclass:: psycopg2.tz.FixedOffsetTimezone + +.. autoclass:: psycopg2.tz.LocalTimezone diff --git a/doc/src/usage.rst b/doc/src/usage.rst new file mode 100644 index 0000000000000000000000000000000000000000..5bb69e940170629fa72a4d46c14125275cc806fb --- /dev/null +++ b/doc/src/usage.rst @@ -0,0 +1,1106 @@ +.. _usage: + +Basic module usage +================== + +.. sectionauthor:: Daniele Varrazzo + +.. index:: + pair: Example; Usage + +The basic Psycopg usage is common to all the database adapters implementing +the |DBAPI|_ protocol. Here is an interactive session showing some of the +basic commands:: + + >>> import psycopg2 + + # Connect to an existing database + >>> conn = psycopg2.connect("dbname=test user=postgres") + + # Open a cursor to perform database operations + >>> cur = conn.cursor() + + # Execute a command: this creates a new table + >>> cur.execute("CREATE TABLE test (id serial PRIMARY KEY, num integer, data varchar);") + + # Pass data to fill a query placeholders and let Psycopg perform + # the correct conversion (no more SQL injections!) + >>> cur.execute("INSERT INTO test (num, data) VALUES (%s, %s)", + ... (100, "abc'def")) + + # Query the database and obtain data as Python objects + >>> cur.execute("SELECT * FROM test;") + >>> cur.fetchone() + (1, 100, "abc'def") + + # Make the changes to the database persistent + >>> conn.commit() + + # Close communication with the database + >>> cur.close() + >>> conn.close() + + +The main entry points of Psycopg are: + +- The function `~psycopg2.connect()` creates a new database session and + returns a new `connection` instance. + +- The class `connection` encapsulates a database session. It allows to: + + - create new `cursor` instances using the `~connection.cursor()` method to + execute database commands and queries, + + - terminate transactions using the methods `~connection.commit()` or + `~connection.rollback()`. + +- The class `cursor` allows interaction with the database: + + - send commands to the database using methods such as `~cursor.execute()` + and `~cursor.executemany()`, + + - retrieve data from the database :ref:`by iteration ` or + using methods such as `~cursor.fetchone()`, `~cursor.fetchmany()`, + `~cursor.fetchall()`. + + + +.. index:: + pair: Query; Parameters + +.. _query-parameters: + +Passing parameters to SQL queries +--------------------------------- + +Psycopg converts Python variables to SQL values using their types: the Python +type determines the function used to convert the object into a string +representation suitable for PostgreSQL. Many standard Python types are +already `adapted to the correct SQL representation`__. + +.. __: python-types-adaptation_ + +Passing parameters to an SQL statement happens in functions such as +`cursor.execute()` by using ``%s`` placeholders in the SQL statement, and +passing a sequence of values as the second argument of the function. For +example the Python function call:: + + >>> cur.execute(""" + ... INSERT INTO some_table (an_int, a_date, a_string) + ... VALUES (%s, %s, %s); + ... """, + ... (10, datetime.date(2005, 11, 18), "O'Reilly")) + +is converted into a SQL command similar to: + +.. code-block:: sql + + INSERT INTO some_table (an_int, a_date, a_string) + VALUES (10, '2005-11-18', 'O''Reilly'); + +Named arguments are supported too using :samp:`%({name})s` placeholders in the +query and specifying the values into a mapping. Using named arguments allows +to specify the values in any order and to repeat the same value in several +places in the query:: + + >>> cur.execute(""" + ... INSERT INTO some_table (an_int, a_date, another_date, a_string) + ... VALUES (%(int)s, %(date)s, %(date)s, %(str)s); + ... """, + ... {'int': 10, 'str': "O'Reilly", 'date': datetime.date(2005, 11, 18)}) + +Using characters ``%``, ``(``, ``)`` in the argument names is not supported. + +When parameters are used, in order to include a literal ``%`` in the query you +can use the ``%%`` string:: + + >>> cur.execute("SELECT (%s % 2) = 0 AS even", (10,)) # WRONG + >>> cur.execute("SELECT (%s %% 2) = 0 AS even", (10,)) # correct + +While the mechanism resembles regular Python strings manipulation, there are a +few subtle differences you should care about when passing parameters to a +query. + +- The Python string operator ``%`` *must not be used*: the `~cursor.execute()` + method accepts a tuple or dictionary of values as second parameter. + |sql-warn|__: + + .. |sql-warn| replace:: **Never** use ``%`` or ``+`` to merge values + into queries + + .. __: sql-injection_ + + >>> cur.execute("INSERT INTO numbers VALUES (%s, %s)" % (10, 20)) # WRONG + >>> cur.execute("INSERT INTO numbers VALUES (%s, %s)", (10, 20)) # correct + +- For positional variables binding, *the second argument must always be a + sequence*, even if it contains a single variable (remember that Python + requires a comma to create a single element tuple):: + + >>> cur.execute("INSERT INTO foo VALUES (%s)", "bar") # WRONG + >>> cur.execute("INSERT INTO foo VALUES (%s)", ("bar")) # WRONG + >>> cur.execute("INSERT INTO foo VALUES (%s)", ("bar",)) # correct + >>> cur.execute("INSERT INTO foo VALUES (%s)", ["bar"]) # correct + +- The placeholder *must not be quoted*. Psycopg will add quotes where needed:: + + >>> cur.execute("INSERT INTO numbers VALUES ('%s')", (10,)) # WRONG + >>> cur.execute("INSERT INTO numbers VALUES (%s)", (10,)) # correct + +- The variables placeholder *must always be a* ``%s``, even if a different + placeholder (such as a ``%d`` for integers or ``%f`` for floats) may look + more appropriate:: + + >>> cur.execute("INSERT INTO numbers VALUES (%d)", (10,)) # WRONG + >>> cur.execute("INSERT INTO numbers VALUES (%s)", (10,)) # correct + +- Only query values should be bound via this method: it shouldn't be used to + merge table or field names to the query (Psycopg will try quoting the table + name as a string value, generating invalid SQL). If you need to generate + dynamically SQL queries (for instance choosing dynamically a table name) + you can use the facilities provided by the `psycopg2.sql` module:: + + >>> cur.execute("INSERT INTO %s VALUES (%s)", ('numbers', 10)) # WRONG + >>> cur.execute( # correct + ... SQL("INSERT INTO {} VALUES (%s)").format(Identifier('numbers')), + ... (10,)) + + +.. index:: Security, SQL injection + +.. _sql-injection: + +The problem with the query parameters +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +The SQL representation of many data types is often different from their Python +string representation. The typical example is with single quotes in strings: +in SQL single quotes are used as string literal delimiters, so the ones +appearing inside the string itself must be escaped, whereas in Python single +quotes can be left unescaped if the string is delimited by double quotes. + +Because of the difference, sometime subtle, between the data types +representations, a naĂŻve approach to query strings composition, such as using +Python strings concatenation, is a recipe for *terrible* problems:: + + >>> SQL = "INSERT INTO authors (name) VALUES ('%s');" # NEVER DO THIS + >>> data = ("O'Reilly", ) + >>> cur.execute(SQL % data) # THIS WILL FAIL MISERABLY + ProgrammingError: syntax error at or near "Reilly" + LINE 1: INSERT INTO authors (name) VALUES ('O'Reilly') + ^ + +If the variables containing the data to send to the database come from an +untrusted source (such as a form published on a web site) an attacker could +easily craft a malformed string, either gaining access to unauthorized data or +performing destructive operations on the database. This form of attack is +called `SQL injection`_ and is known to be one of the most widespread forms of +attack to database servers. Before continuing, please print `this page`__ as a +memo and hang it onto your desk. + +.. _SQL injection: https://en.wikipedia.org/wiki/SQL_injection +.. __: https://xkcd.com/327/ + +Psycopg can `automatically convert Python objects to and from SQL +literals`__: using this feature your code will be more robust and +reliable. We must stress this point: + +.. __: python-types-adaptation_ + +.. warning:: + + Never, **never**, **NEVER** use Python string concatenation (``+``) or + string parameters interpolation (``%``) to pass variables to a SQL query + string. Not even at gunpoint. + +The correct way to pass variables in a SQL command is using the second +argument of the `~cursor.execute()` method:: + + >>> SQL = "INSERT INTO authors (name) VALUES (%s);" # Note: no quotes + >>> data = ("O'Reilly", ) + >>> cur.execute(SQL, data) # Note: no % operator + + +Values containing backslashes and LIKE +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +Unlike in Python, the backslash (`\\`) is not used as an escape +character *except* in patterns used with `LIKE` and `ILIKE` where they +are needed to escape the `%` and `_` characters. + +This can lead to confusing situations:: + + >>> path = r'C:\Users\Bobby.Tables' + >>> cur.execute('INSERT INTO mytable(path) VALUES (%s)', (path,)) + >>> cur.execute('SELECT * FROM mytable WHERE path LIKE %s', (path,)) + >>> cur.fetchall() + [] + +The solution is to specify an `ESCAPE` character of `''` (empty string) +in your `LIKE` query:: + + >>> cur.execute("SELECT * FROM mytable WHERE path LIKE %s ESCAPE ''", (path,)) + + + +.. index:: + single: Adaptation + pair: Objects; Adaptation + single: Data types; Adaptation + +.. _python-types-adaptation: + +Adaptation of Python values to SQL types +---------------------------------------- + +Many standard Python types are adapted into SQL and returned as Python +objects when a query is executed. + +The following table shows the default mapping between Python and PostgreSQL +types: + +.. + TODO: The table is not rendered in text output + +.. only:: html + + .. table:: + :class: data-types + + +--------------------+-------------------------+--------------------------+ + | Python | PostgreSQL | See also | + +====================+=========================+==========================+ + | `!None` | :sql:`NULL` | :ref:`adapt-consts` | + +--------------------+-------------------------+ | + | `!bool` | :sql:`bool` | | + +--------------------+-------------------------+--------------------------+ + | `!float` | | :sql:`real` | :ref:`adapt-numbers` | + | | | :sql:`double` | | + +--------------------+-------------------------+ | + | | `!int` | | :sql:`smallint` | | + | | `!long` | | :sql:`integer` | | + | | | :sql:`bigint` | | + +--------------------+-------------------------+ | + | `~decimal.Decimal` | :sql:`numeric` | | + +--------------------+-------------------------+--------------------------+ + | | `!str` | | :sql:`varchar` | :ref:`adapt-string` | + | | `!unicode` | | :sql:`text` | | + +--------------------+-------------------------+--------------------------+ + | | `buffer` | :sql:`bytea` | :ref:`adapt-binary` | + | | `memoryview` | | | + | | `bytearray` | | | + | | `bytes` | | | + | | Buffer protocol | | | + +--------------------+-------------------------+--------------------------+ + | `!date` | :sql:`date` | :ref:`adapt-date` | + +--------------------+-------------------------+ | + | `!time` | | :sql:`time` | | + | | | :sql:`timetz` | | + +--------------------+-------------------------+ | + | `!datetime` | | :sql:`timestamp` | | + | | | :sql:`timestamptz` | | + +--------------------+-------------------------+ | + | `!timedelta` | :sql:`interval` | | + +--------------------+-------------------------+--------------------------+ + | `!list` | :sql:`ARRAY` | :ref:`adapt-list` | + +--------------------+-------------------------+--------------------------+ + | | `!tuple` | | Composite types | | :ref:`adapt-tuple` | + | | `!namedtuple` | | :sql:`IN` syntax | | :ref:`adapt-composite` | + +--------------------+-------------------------+--------------------------+ + | `!dict` | :sql:`hstore` | :ref:`adapt-hstore` | + +--------------------+-------------------------+--------------------------+ + | Psycopg's `!Range` | :sql:`range` | :ref:`adapt-range` | + +--------------------+-------------------------+--------------------------+ + | Anything\ |tm| | :sql:`json` | :ref:`adapt-json` | + +--------------------+-------------------------+--------------------------+ + | `~uuid.UUID` | :sql:`uuid` | :ref:`adapt-uuid` | + +--------------------+-------------------------+--------------------------+ + | `ipaddress` | | :sql:`inet` | :ref:`adapt-network` | + | objects | | :sql:`cidr` | | + +--------------------+-------------------------+--------------------------+ + +.. |tm| unicode:: U+2122 + +The mapping is fairly customizable: see :ref:`adapting-new-types` and +:ref:`type-casting-from-sql-to-python`. You can also find a few other +specialized adapters in the `psycopg2.extras` module. + + +.. index:: + pair: None; Adaptation + single: NULL; Adaptation + pair: Boolean; Adaptation + +.. _adapt-consts: + +Constants adaptation +^^^^^^^^^^^^^^^^^^^^ + +Python `None` and boolean values `True` and `False` are converted into the +proper SQL literals:: + + >>> cur.mogrify("SELECT %s, %s, %s;", (None, True, False)) + 'SELECT NULL, true, false;' + + +.. index:: + single: Adaptation; numbers + single: Integer; Adaptation + single: Float; Adaptation + single: Decimal; Adaptation + +.. _adapt-numbers: + +Numbers adaptation +^^^^^^^^^^^^^^^^^^ + +Python numeric objects `int`, `long`, `float`, `~decimal.Decimal` are +converted into a PostgreSQL numerical representation:: + + >>> cur.mogrify("SELECT %s, %s, %s, %s;", (10, 10L, 10.0, Decimal("10.00"))) + 'SELECT 10, 10, 10.0, 10.00;' + +Reading from the database, integer types are converted into `!int`, floating +point types are converted into `!float`, :sql:`numeric`\/\ :sql:`decimal` are +converted into `!Decimal`. + +.. note:: + + Sometimes you may prefer to receive :sql:`numeric` data as `!float` + instead, for performance reason or ease of manipulation: you can configure + an adapter to :ref:`cast PostgreSQL numeric to Python float `. + This of course may imply a loss of precision. + +.. seealso:: `PostgreSQL numeric types + `__ + + +.. index:: + pair: Strings; Adaptation + single: Unicode; Adaptation + +.. _adapt-string: + +Strings adaptation +^^^^^^^^^^^^^^^^^^ + +Python `str` and `unicode` are converted into the SQL string syntax. +`!unicode` objects (`!str` in Python 3) are encoded in the connection +`~connection.encoding` before sending to the backend: trying to send a +character not supported by the encoding will result in an error. Data is +usually received as `!str` (*i.e.* it is *decoded* on Python 3, left *encoded* +on Python 2). However it is possible to receive `!unicode` on Python 2 too: +see :ref:`unicode-handling`. + + +.. index:: + single: Unicode + +.. _unicode-handling: + +Unicode handling +'''''''''''''''' + +Psycopg can exchange Unicode data with a PostgreSQL database. Python +`!unicode` objects are automatically *encoded* in the client encoding +defined on the database connection (the `PostgreSQL encoding`__, available in +`connection.encoding`, is translated into a `Python encoding`__ using the +`~psycopg2.extensions.encodings` mapping):: + + >>> print u, type(u) + àÚÏĂČĂč€ + + >>> cur.execute("INSERT INTO test (num, data) VALUES (%s,%s);", (74, u)) + +.. __: https://www.postgresql.org/docs/current/static/multibyte.html +.. __: https://docs.python.org/library/codecs.html + +When reading data from the database, in Python 2 the strings returned are +usually 8 bit `!str` objects encoded in the database client encoding:: + + >>> print conn.encoding + UTF8 + + >>> cur.execute("SELECT data FROM test WHERE num = 74") + >>> x = cur.fetchone()[0] + >>> print x, type(x), repr(x) + àÚÏĂČĂč€ '\xc3\xa0\xc3\xa8\xc3\xac\xc3\xb2\xc3\xb9\xe2\x82\xac' + + >>> conn.set_client_encoding('LATIN9') + + >>> cur.execute("SELECT data FROM test WHERE num = 74") + >>> x = cur.fetchone()[0] + >>> print type(x), repr(x) + '\xe0\xe8\xec\xf2\xf9\xa4' + +In Python 3 instead the strings are automatically *decoded* in the connection +`~connection.encoding`, as the `!str` object can represent Unicode characters. +In Python 2 you must register a :ref:`typecaster +` in order to receive `!unicode` objects:: + + >>> psycopg2.extensions.register_type(psycopg2.extensions.UNICODE, cur) + + >>> cur.execute("SELECT data FROM test WHERE num = 74") + >>> x = cur.fetchone()[0] + >>> print x, type(x), repr(x) + àÚÏĂČĂč€ u'\xe0\xe8\xec\xf2\xf9\u20ac' + +In the above example, the `~psycopg2.extensions.UNICODE` typecaster is +registered only on the cursor. It is also possible to register typecasters on +the connection or globally: see the function +`~psycopg2.extensions.register_type()` and +:ref:`type-casting-from-sql-to-python` for details. + +.. note:: + + In Python 2, if you want to uniformly receive all your database input in + Unicode, you can register the related typecasters globally as soon as + Psycopg is imported:: + + import psycopg2.extensions + psycopg2.extensions.register_type(psycopg2.extensions.UNICODE) + psycopg2.extensions.register_type(psycopg2.extensions.UNICODEARRAY) + + and forget about this story. + +.. note:: + + In some cases, on Python 3, you may want to receive `!bytes` instead of + `!str`, without undergoing to any decoding. This is especially the case if + the data in the database is in mixed encoding. The + `~psycopg2.extensions.BYTES` caster is what you neeed:: + + import psycopg2.extensions + psycopg2.extensions.register_type(psycopg2.extensions.BYTES, conn) + psycopg2.extensions.register_type(psycopg2.extensions.BYTESARRAY, conn) + cur = conn.cursor() + cur.execute("select %s::text", (u"€",)) + cur.fetchone()[0] + b'\xe2\x82\xac' + + .. versionadded: 2.8 + + +.. index:: + single: Buffer; Adaptation + single: bytea; Adaptation + single: bytes; Adaptation + single: bytearray; Adaptation + single: memoryview; Adaptation + single: Binary string + +.. _adapt-binary: + +Binary adaptation +^^^^^^^^^^^^^^^^^ + +Python types representing binary objects are converted into PostgreSQL binary +string syntax, suitable for :sql:`bytea` fields. Such types are `buffer` +(only available in Python 2), `memoryview`, `bytearray`, and `bytes` (only in +Python 3: the name is available in Python 2 but it's only an alias for the +type `!str`). Any object implementing the `Revised Buffer Protocol`__ should +be usable as binary type. Received data is returned as `!buffer` (in Python 2) +or `!memoryview` (in Python 3). + +.. __: https://www.python.org/dev/peps/pep-3118/ + +.. versionchanged:: 2.4 + only strings were supported before. + +.. versionchanged:: 2.4.1 + can parse the 'hex' format from 9.0 servers without relying on the + version of the client library. + +.. note:: + + In Python 2, if you have binary data in a `!str` object, you can pass them + to a :sql:`bytea` field using the `psycopg2.Binary` wrapper:: + + mypic = open('picture.png', 'rb').read() + curs.execute("insert into blobs (file) values (%s)", + (psycopg2.Binary(mypic),)) + +.. warning:: + + Since version 9.0 PostgreSQL uses by default `a new "hex" format`__ to + emit :sql:`bytea` fields. Starting from Psycopg 2.4.1 the format is + correctly supported. If you use a previous version you will need some + extra care when receiving bytea from PostgreSQL: you must have at least + libpq 9.0 installed on the client or alternatively you can set the + `bytea_output`__ configuration parameter to ``escape``, either in the + server configuration file or in the client session (using a query such as + ``SET bytea_output TO escape;``) before receiving binary data. + + .. __: https://www.postgresql.org/docs/current/static/datatype-binary.html + .. __: https://www.postgresql.org/docs/current/static/runtime-config-client.html#GUC-BYTEA-OUTPUT + + +.. index:: + single: Adaptation; Date/Time objects + single: Date objects; Adaptation + single: Time objects; Adaptation + single: Interval objects; Adaptation + +.. _adapt-date: + +Date/Time objects adaptation +^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +Python builtin `~datetime.datetime`, `~datetime.date`, +`~datetime.time`, `~datetime.timedelta` are converted into PostgreSQL's +:sql:`timestamp[tz]`, :sql:`date`, :sql:`time[tz]`, :sql:`interval` data types. +Time zones are supported too. + + >>> dt = datetime.datetime.now() + >>> dt + datetime.datetime(2010, 2, 8, 1, 40, 27, 425337) + + >>> cur.mogrify("SELECT %s, %s, %s;", (dt, dt.date(), dt.time())) + "SELECT '2010-02-08T01:40:27.425337', '2010-02-08', '01:40:27.425337';" + + >>> cur.mogrify("SELECT %s;", (dt - datetime.datetime(2010,1,1),)) + "SELECT '38 days 6027.425337 seconds';" + +.. seealso:: `PostgreSQL date/time types + `__ + + +.. index:: + single: Time Zones + +.. _tz-handling: + +Time zones handling +''''''''''''''''''' + +The PostgreSQL type :sql:`timestamp with time zone` (a.k.a. +:sql:`timestamptz`) is converted into Python `~datetime.datetime` objects. + + >>> cur.execute("SET TIME ZONE 'Europe/Rome'") # UTC + 1 hour + >>> cur.execute("SELECT '2010-01-01 10:30:45'::timestamptz") + >>> cur.fetchone()[0] + datetime.datetime(2010, 1, 1, 10, 30, 45, + tzinfo=datetime.timezone(datetime.timedelta(seconds=3600))) + +.. note:: + + Before Python 3.7, the `datetime` module only supported timezones with an + integer number of minutes. A few historical time zones had seconds in the + UTC offset: these time zones will have the offset rounded to the nearest + minute, with an error of up to 30 seconds, on Python versions before 3.7. + + >>> cur.execute("SET TIME ZONE 'Asia/Calcutta'") # offset was +5:21:10 + >>> cur.execute("SELECT '1900-01-01 10:30:45'::timestamptz") + >>> cur.fetchone()[0].tzinfo + # On Python 3.6: 5h, 21m + datetime.timezone(datetime.timedelta(0, 19260)) + # On Python 3.7 and following: 5h, 21m, 10s + datetime.timezone(datetime.timedelta(seconds=19270)) + +.. versionchanged:: 2.2.2 + timezones with seconds are supported (with rounding). Previously such + timezones raised an error. + +.. versionchanged:: 2.9 + timezones with seconds are supported without rounding. + +.. versionchanged:: 2.9 + use `datetime.timezone` as default tzinfo object instead of + `~psycopg2.tz.FixedOffsetTimezone`. + +.. index:: + double: Date objects; Infinite + +.. _infinite-dates-handling: + +Infinite dates handling +''''''''''''''''''''''' + +PostgreSQL can store the representation of an "infinite" date, timestamp, or +interval. Infinite dates are not available to Python, so these objects are +mapped to `!date.max`, `!datetime.max`, `!interval.max`. Unfortunately the +mapping cannot be bidirectional so these dates will be stored back into the +database with their values, such as :sql:`9999-12-31`. + +It is possible to create an alternative adapter for dates and other objects +to map `date.max` to :sql:`infinity`, for instance:: + + class InfDateAdapter: + def __init__(self, wrapped): + self.wrapped = wrapped + def getquoted(self): + if self.wrapped == datetime.date.max: + return b"'infinity'::date" + elif self.wrapped == datetime.date.min: + return b"'-infinity'::date" + else: + return psycopg2.extensions.DateFromPy(self.wrapped).getquoted() + + psycopg2.extensions.register_adapter(datetime.date, InfDateAdapter) + +Of course it will not be possible to write the value of `date.max` in the +database anymore: :sql:`infinity` will be stored instead. + + +.. _time-handling: + +Time handling +''''''''''''' + +The PostgreSQL :sql:`time` and Python `~datetime.time` types are not +fully bidirectional. + +Within PostgreSQL, the :sql:`time` type's maximum value of ``24:00:00`` is +treated as 24-hours later than the minimum value of ``00:00:00``. + + >>> cur.execute("SELECT '24:00:00'::time - '00:00:00'::time") + >>> cur.fetchone()[0] + datetime.timedelta(days=1) + +However, Python's `!time` only supports times until ``23:59:59``. +Retrieving a value of ``24:00:00`` results in a `!time` of ``00:00:00``. + + >>> cur.execute("SELECT '24:00:00'::time, '00:00:00'::time") + >>> cur.fetchone() + (datetime.time(0, 0), datetime.time(0, 0)) + + +.. _adapt-list: + +Lists adaptation +^^^^^^^^^^^^^^^^ + +.. index:: + single: Array; Adaptation + double: Lists; Adaptation + +Python lists are converted into PostgreSQL :sql:`ARRAY`\ s:: + + >>> cur.mogrify("SELECT %s;", ([10, 20, 30], )) + 'SELECT ARRAY[10,20,30];' + +.. note:: + + You can use a Python list as the argument of the :sql:`IN` operator using + `the PostgreSQL ANY operator`__. :: + + ids = [10, 20, 30] + cur.execute("SELECT * FROM data WHERE id = ANY(%s);", (ids,)) + + Furthermore :sql:`ANY` can also work with empty lists, whereas :sql:`IN ()` + is a SQL syntax error. + + .. __: https://www.postgresql.org/docs/current/static/functions-subquery.html#FUNCTIONS-SUBQUERY-ANY-SOME + +.. note:: + + Reading back from PostgreSQL, arrays are converted to lists of Python + objects as expected, but only if the items are of a known type. + Arrays of unknown types are returned as represented by the database (e.g. + ``{a,b,c}``). If you want to convert the items into Python objects you can + easily create a typecaster for :ref:`array of unknown types + `. + + +.. _adapt-tuple: + +Tuples adaptation +^^^^^^^^^^^^^^^^^^ + +.. index:: + double: Tuple; Adaptation + single: IN operator + +Python tuples are converted into a syntax suitable for the SQL :sql:`IN` +operator and to represent a composite type:: + + >>> cur.mogrify("SELECT %s IN %s;", (10, (10, 20, 30))) + 'SELECT 10 IN (10, 20, 30);' + +.. note:: + + SQL doesn't allow an empty list in the :sql:`IN` operator, so your code + should guard against empty tuples. Alternatively you can :ref:`use a + Python list `. + +If you want PostgreSQL composite types to be converted into a Python +tuple/namedtuple you can use the `~psycopg2.extras.register_composite()` +function. + +.. versionadded:: 2.0.6 + the tuple :sql:`IN` adaptation. + +.. versionchanged:: 2.0.14 + the tuple :sql:`IN` adapter is always active. In previous releases it + was necessary to import the `~psycopg2.extensions` module to have it + registered. + +.. versionchanged:: 2.3 + `~collections.namedtuple` instances are adapted like regular tuples and + can thus be used to represent composite types. + + +.. index:: Transaction, Begin, Commit, Rollback, Autocommit, Read only + +.. _transactions-control: + +Transactions control +-------------------- + +In Psycopg transactions are handled by the `connection` class. By +default, the first time a command is sent to the database (using one of the +`cursor`\ s created by the connection), a new transaction is created. +The following database commands will be executed in the context of the same +transaction -- not only the commands issued by the first cursor, but the ones +issued by all the cursors created by the same connection. Should any command +fail, the transaction will be aborted and no further command will be executed +until a call to the `~connection.rollback()` method. + +The connection is responsible for terminating its transaction, calling either +the `~connection.commit()` or `~connection.rollback()` method. Committed +changes are immediately made persistent in the database. If the connection +is closed (using the `~connection.close()` method) or destroyed (using `!del` +or by letting it fall out of scope) while a transaction is in progress, the +server will discard the transaction. However doing so is not advisable: +middleware such as PgBouncer_ may see the connection closed uncleanly and +dispose of it. + +.. _PgBouncer: http://www.pgbouncer.org/ + +It is possible to set the connection in *autocommit* mode: this way all the +commands executed will be immediately committed and no rollback is possible. A +few commands (e.g. :sql:`CREATE DATABASE`, :sql:`VACUUM`, :sql:`CALL` on +`stored procedures`__ using transaction control...) require to be run +outside any transaction: in order to be able to run these commands from +Psycopg, the connection must be in autocommit mode: you can use the +`~connection.autocommit` property. + +.. __: https://www.postgresql.org/docs/current/xproc.html + +.. warning:: + + By default even a simple :sql:`SELECT` will start a transaction: in + long-running programs, if no further action is taken, the session will + remain "idle in transaction", an undesirable condition for several + reasons (locks are held by the session, tables bloat...). For long lived + scripts, either make sure to terminate a transaction as soon as possible or + use an autocommit connection. + +A few other transaction properties can be set session-wide by the +`!connection`: for instance it is possible to have read-only transactions or +change the isolation level. See the `~connection.set_session()` method for all +the details. + + +.. index:: + single: with statement + +``with`` statement +^^^^^^^^^^^^^^^^^^ + +Starting from version 2.5, psycopg2's connections and cursors are *context +managers* and can be used with the ``with`` statement:: + + with psycopg2.connect(DSN) as conn: + with conn.cursor() as curs: + curs.execute(SQL) + +When a connection exits the ``with`` block, if no exception has been raised by +the block, the transaction is committed. In case of exception the transaction +is rolled back. + +When a cursor exits the ``with`` block it is closed, releasing any resource +eventually associated with it. The state of the transaction is not affected. + +A connection can be used in more than a ``with`` statement +and each ``with`` block is effectively wrapped in a separate transaction:: + + conn = psycopg2.connect(DSN) + + with conn: + with conn.cursor() as curs: + curs.execute(SQL1) + + with conn: + with conn.cursor() as curs: + curs.execute(SQL2) + + conn.close() + +.. warning:: + + Unlike file objects or other resources, exiting the connection's + ``with`` block **doesn't close the connection**, but only the transaction + associated to it. If you want to make sure the connection is closed after + a certain point, you should still use a try-catch block:: + + conn = psycopg2.connect(DSN) + try: + # connection usage + finally: + conn.close() + +.. versionchanged:: 2.9 + ``with connection`` starts a transaction also on autocommit connections. + + +.. index:: + pair: Server side; Cursor + pair: Named; Cursor + pair: DECLARE; SQL command + pair: FETCH; SQL command + pair: MOVE; SQL command + +.. _server-side-cursors: + +Server side cursors +------------------- + +When a database query is executed, the Psycopg `cursor` usually fetches +all the records returned by the backend, transferring them to the client +process. If the query returned an huge amount of data, a proportionally large +amount of memory will be allocated by the client. + +If the dataset is too large to be practically handled on the client side, it is +possible to create a *server side* cursor. Using this kind of cursor it is +possible to transfer to the client only a controlled amount of data, so that a +large dataset can be examined without keeping it entirely in memory. + +Server side cursor are created in PostgreSQL using the |DECLARE|_ command and +subsequently handled using :sql:`MOVE`, :sql:`FETCH` and :sql:`CLOSE` commands. + +Psycopg wraps the database server side cursor in *named cursors*. A named +cursor is created using the `~connection.cursor()` method specifying the +*name* parameter. Such cursor will behave mostly like a regular cursor, +allowing the user to move in the dataset using the `~cursor.scroll()` +method and to read the data using `~cursor.fetchone()` and +`~cursor.fetchmany()` methods. Normally you can only scroll forward in a +cursor: if you need to scroll backwards you should declare your cursor +`~cursor.scrollable`. + +Named cursors are also :ref:`iterable ` like regular cursors. +Note however that before Psycopg 2.4 iteration was performed fetching one +record at time from the backend, resulting in a large overhead. The attribute +`~cursor.itersize` now controls how many records are fetched at time +during the iteration: the default value of 2000 allows to fetch about 100KB +per roundtrip assuming records of 10-20 columns of mixed number and strings; +you may decrease this value if you are dealing with huge records. + +Named cursors are usually created :sql:`WITHOUT HOLD`, meaning they live only +as long as the current transaction. Trying to fetch from a named cursor after +a `~connection.commit()` or to create a named cursor when the connection +is in `~connection.autocommit` mode will result in an exception. +It is possible to create a :sql:`WITH HOLD` cursor by specifying a `!True` +value for the `withhold` parameter to `~connection.cursor()` or by setting the +`~cursor.withhold` attribute to `!True` before calling `~cursor.execute()` on +the cursor. It is extremely important to always `~cursor.close()` such cursors, +otherwise they will continue to hold server-side resources until the connection +will be eventually closed. Also note that while :sql:`WITH HOLD` cursors +lifetime extends well after `~connection.commit()`, calling +`~connection.rollback()` will automatically close the cursor. + +.. note:: + + It is also possible to use a named cursor to consume a cursor created + in some other way than using the |DECLARE| executed by + `~cursor.execute()`. For example, you may have a PL/pgSQL function + returning a cursor: + + .. code-block:: postgres + + CREATE FUNCTION reffunc(refcursor) RETURNS refcursor AS $$ + BEGIN + OPEN $1 FOR SELECT col FROM test; + RETURN $1; + END; + $$ LANGUAGE plpgsql; + + You can read the cursor content by calling the function with a regular, + non-named, Psycopg cursor: + + .. code-block:: python + + cur1 = conn.cursor() + cur1.callproc('reffunc', ['curname']) + + and then use a named cursor in the same transaction to "steal the cursor": + + .. code-block:: python + + cur2 = conn.cursor('curname') + for record in cur2: # or cur2.fetchone, fetchmany... + # do something with record + pass + + +.. |DECLARE| replace:: :sql:`DECLARE` +.. _DECLARE: https://www.postgresql.org/docs/current/static/sql-declare.html + + + +.. index:: Thread safety, Multithread, Multiprocess + +.. _thread-safety: + +Thread and process safety +------------------------- + +The Psycopg module and the `connection` objects are *thread-safe*: many +threads can access the same database either using separate sessions and +creating a `!connection` per thread or using the same +connection and creating separate `cursor`\ s. In |DBAPI|_ parlance, Psycopg is +*level 2 thread safe*. + +The difference between the above two approaches is that, using different +connections, the commands will be executed in different sessions and will be +served by different server processes. On the other hand, using many cursors on +the same connection, all the commands will be executed in the same session +(and in the same transaction if the connection is not in :ref:`autocommit +` mode), but they will be serialized. + +The above observations are only valid for regular threads: they don't apply to +forked processes nor to green threads. `libpq` connections `shouldn't be used by a +forked processes`__, so when using a module such as `multiprocessing` or a +forking web deploy method such as FastCGI make sure to create the connections +*after* the fork. + +.. __: https://www.postgresql.org/docs/current/static/libpq-connect.html#LIBPQ-CONNECT + +Connections shouldn't be shared either by different green threads: see +:ref:`green-support` for further details. + + + +.. index:: + pair: COPY; SQL command + +.. _copy: + +Using COPY TO and COPY FROM +--------------------------- + +Psycopg `cursor` objects provide an interface to the efficient +PostgreSQL |COPY|__ command to move data from files to tables and back. + +Currently no adaptation is provided between Python and PostgreSQL types on +|COPY|: the file can be any Python file-like object but its format must be in +the format accepted by `PostgreSQL COPY command`__ (data format, escaped +characters, etc). + +.. __: COPY_ + +The methods exposed are: + +`~cursor.copy_from()` + Reads data *from* a file-like object appending them to a database table + (:sql:`COPY table FROM file` syntax). The source file must provide both + `!read()` and `!readline()` method. + +`~cursor.copy_to()` + Writes the content of a table *to* a file-like object (:sql:`COPY table TO + file` syntax). The target file must have a `write()` method. + +`~cursor.copy_expert()` + Allows to handle more specific cases and to use all the :sql:`COPY` + features available in PostgreSQL. + +Please refer to the documentation of the single methods for details and +examples. + +.. |COPY| replace:: :sql:`COPY` +.. __: https://www.postgresql.org/docs/current/static/sql-copy.html + + + +.. index:: + single: Large objects + +.. _large-objects: + +Access to PostgreSQL large objects +---------------------------------- + +PostgreSQL offers support for `large objects`__, which provide stream-style +access to user data that is stored in a special large-object structure. They +are useful with data values too large to be manipulated conveniently as a +whole. + +.. __: https://www.postgresql.org/docs/current/static/largeobjects.html + +Psycopg allows access to the large object using the +`~psycopg2.extensions.lobject` class. Objects are generated using the +`connection.lobject()` factory method. Data can be retrieved either as bytes +or as Unicode strings. + +Psycopg large object support efficient import/export with file system files +using the |lo_import|_ and |lo_export|_ libpq functions. + +.. |lo_import| replace:: `!lo_import()` +.. _lo_import: https://www.postgresql.org/docs/current/static/lo-interfaces.html#LO-IMPORT +.. |lo_export| replace:: `!lo_export()` +.. _lo_export: https://www.postgresql.org/docs/current/static/lo-interfaces.html#LO-EXPORT + +.. versionchanged:: 2.6 + added support for large objects greater than 2GB. Note that the support is + enabled only if all the following conditions are verified: + + - the Python build is 64 bits; + - the extension was built against at least libpq 9.3; + - the server version is at least PostgreSQL 9.3 + (`~connection.server_version` must be >= ``90300``). + + If Psycopg was built with 64 bits large objects support (i.e. the first + two contidions above are verified), the `psycopg2.__version__` constant + will contain the ``lo64`` flag. If any of the contition is not met + several `!lobject` methods will fail if the arguments exceed 2GB. + + + +.. index:: + pair: Two-phase commit; Transaction + +.. _tpc: + +Two-Phase Commit protocol support +--------------------------------- + +.. versionadded:: 2.3 + +Psycopg exposes the two-phase commit features available since PostgreSQL 8.1 +implementing the *two-phase commit extensions* proposed by the |DBAPI|. + +The |DBAPI| model of two-phase commit is inspired by the `XA specification`__, +according to which transaction IDs are formed from three components: + +- a format ID (non-negative 32 bit integer) +- a global transaction ID (string not longer than 64 bytes) +- a branch qualifier (string not longer than 64 bytes) + +For a particular global transaction, the first two components will be the same +for all the resources. Every resource will be assigned a different branch +qualifier. + +According to the |DBAPI| specification, a transaction ID is created using the +`connection.xid()` method. Once you have a transaction id, a distributed +transaction can be started with `connection.tpc_begin()`, prepared using +`~connection.tpc_prepare()` and completed using `~connection.tpc_commit()` or +`~connection.tpc_rollback()`. Transaction IDs can also be retrieved from the +database using `~connection.tpc_recover()` and completed using the above +`!tpc_commit()` and `!tpc_rollback()`. + +PostgreSQL doesn't follow the XA standard though, and the ID for a PostgreSQL +prepared transaction can be any string up to 200 characters long. +Psycopg's `~psycopg2.extensions.Xid` objects can represent both XA-style +transactions IDs (such as the ones created by the `!xid()` method) and +PostgreSQL transaction IDs identified by an unparsed string. + +The format in which the Xids are converted into strings passed to the +database is the same employed by the `PostgreSQL JDBC driver`__: this should +allow interoperation between tools written in Python and in Java. For example +a recovery tool written in Python would be able to recognize the components of +transactions produced by a Java program. + +For further details see the documentation for the above methods. + +.. __: https://publications.opengroup.org/c193 +.. __: https://jdbc.postgresql.org/ diff --git a/lib/__init__.py b/lib/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..59a89386eff9d007499f3c35f6a56cf72443f1cd --- /dev/null +++ b/lib/__init__.py @@ -0,0 +1,126 @@ +"""A Python driver for PostgreSQL + +psycopg is a PostgreSQL_ database adapter for the Python_ programming +language. This is version 2, a complete rewrite of the original code to +provide new-style classes for connection and cursor objects and other sweet +candies. Like the original, psycopg 2 was written with the aim of being very +small and fast, and stable as a rock. + +Homepage: https://psycopg.org/ + +.. _PostgreSQL: https://www.postgresql.org/ +.. _Python: https://www.python.org/ + +:Groups: + * `Connections creation`: connect + * `Value objects constructors`: Binary, Date, DateFromTicks, Time, + TimeFromTicks, Timestamp, TimestampFromTicks +""" +# psycopg/__init__.py - initialization of the psycopg module +# +# Copyright (C) 2003-2019 Federico Di Gregorio +# Copyright (C) 2020-2021 The Psycopg Team +# +# psycopg2 is free software: you can redistribute it and/or modify it +# under the terms of the GNU Lesser General Public License as published +# by the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# In addition, as a special exception, the copyright holders give +# permission to link this program with the OpenSSL library (or with +# modified versions of OpenSSL that use the same license as OpenSSL), +# and distribute linked combinations including the two. +# +# You must obey the GNU Lesser General Public License in all respects for +# all of the code used other than OpenSSL. +# +# psycopg2 is distributed in the hope that it will be useful, but WITHOUT +# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or +# FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public +# License for more details. + +# Import modules needed by _psycopg to allow tools like py2exe to do +# their work without bothering about the module dependencies. + +# Note: the first internal import should be _psycopg, otherwise the real cause +# of a failed loading of the C module may get hidden, see +# https://archives.postgresql.org/psycopg/2011-02/msg00044.php + +# Import the DBAPI-2.0 stuff into top-level module. + +from psycopg2._psycopg import ( # noqa + BINARY, NUMBER, STRING, DATETIME, ROWID, + + Binary, Date, Time, Timestamp, + DateFromTicks, TimeFromTicks, TimestampFromTicks, + + Error, Warning, DataError, DatabaseError, ProgrammingError, IntegrityError, + InterfaceError, InternalError, NotSupportedError, OperationalError, + + _connect, apilevel, threadsafety, paramstyle, + __version__, __libpq_version__, +) + + +# Register default adapters. + +from psycopg2 import extensions as _ext +_ext.register_adapter(tuple, _ext.SQL_IN) +_ext.register_adapter(type(None), _ext.NoneAdapter) + +# Register the Decimal adapter here instead of in the C layer. +# This way a new class is registered for each sub-interpreter. +# See ticket #52 +from decimal import Decimal # noqa +from psycopg2._psycopg import Decimal as Adapter # noqa +_ext.register_adapter(Decimal, Adapter) +del Decimal, Adapter + + +def connect(dsn=None, connection_factory=None, cursor_factory=None, **kwargs): + """ + Create a new database connection. + + The connection parameters can be specified as a string: + + conn = psycopg2.connect("dbname=test user=postgres password=secret") + + or using a set of keyword arguments: + + conn = psycopg2.connect(database="test", user="postgres", password="secret") + + Or as a mix of both. The basic connection parameters are: + + - *dbname*: the database name + - *database*: the database name (only as keyword argument) + - *user*: user name used to authenticate + - *password*: password used to authenticate + - *host*: database host address (defaults to UNIX socket if not provided) + - *port*: connection port number (defaults to 5432 if not provided) + + Using the *connection_factory* parameter a different class or connections + factory can be specified. It should be a callable object taking a dsn + argument. + + Using the *cursor_factory* parameter, a new default cursor factory will be + used by cursor(). + + Using *async*=True an asynchronous connection will be created. *async_* is + a valid alias (for Python versions where ``async`` is a keyword). + + Any other keyword parameter will be passed to the underlying client + library: the list of supported parameters depends on the library version. + + """ + kwasync = {} + if 'async' in kwargs: + kwasync['async'] = kwargs.pop('async') + if 'async_' in kwargs: + kwasync['async_'] = kwargs.pop('async_') + + dsn = _ext.make_dsn(dsn, **kwargs) + conn = _connect(dsn, connection_factory=connection_factory, **kwasync) + if cursor_factory is not None: + conn.cursor_factory = cursor_factory + + return conn diff --git a/lib/_ipaddress.py b/lib/_ipaddress.py new file mode 100644 index 0000000000000000000000000000000000000000..d38566c88358edd3c0292f294964ac349d241304 --- /dev/null +++ b/lib/_ipaddress.py @@ -0,0 +1,90 @@ +"""Implementation of the ipaddres-based network types adaptation +""" + +# psycopg/_ipaddress.py - Ipaddres-based network types adaptation +# +# Copyright (C) 2016-2019 Daniele Varrazzo +# Copyright (C) 2020-2021 The Psycopg Team +# +# psycopg2 is free software: you can redistribute it and/or modify it +# under the terms of the GNU Lesser General Public License as published +# by the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# In addition, as a special exception, the copyright holders give +# permission to link this program with the OpenSSL library (or with +# modified versions of OpenSSL that use the same license as OpenSSL), +# and distribute linked combinations including the two. +# +# You must obey the GNU Lesser General Public License in all respects for +# all of the code used other than OpenSSL. +# +# psycopg2 is distributed in the hope that it will be useful, but WITHOUT +# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or +# FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public +# License for more details. + +from psycopg2.extensions import ( + new_type, new_array_type, register_type, register_adapter, QuotedString) + +# The module is imported on register_ipaddress +ipaddress = None + +# The typecasters are created only once +_casters = None + + +def register_ipaddress(conn_or_curs=None): + """ + Register conversion support between `ipaddress` objects and `network types`__. + + :param conn_or_curs: the scope where to register the type casters. + If `!None` register them globally. + + After the function is called, PostgreSQL :sql:`inet` values will be + converted into `~ipaddress.IPv4Interface` or `~ipaddress.IPv6Interface` + objects, :sql:`cidr` values into into `~ipaddress.IPv4Network` or + `~ipaddress.IPv6Network`. + + .. __: https://www.postgresql.org/docs/current/static/datatype-net-types.html + """ + global ipaddress + import ipaddress + + global _casters + if _casters is None: + _casters = _make_casters() + + for c in _casters: + register_type(c, conn_or_curs) + + for t in [ipaddress.IPv4Interface, ipaddress.IPv6Interface, + ipaddress.IPv4Network, ipaddress.IPv6Network]: + register_adapter(t, adapt_ipaddress) + + +def _make_casters(): + inet = new_type((869,), 'INET', cast_interface) + ainet = new_array_type((1041,), 'INET[]', inet) + + cidr = new_type((650,), 'CIDR', cast_network) + acidr = new_array_type((651,), 'CIDR[]', cidr) + + return [inet, ainet, cidr, acidr] + + +def cast_interface(s, cur=None): + if s is None: + return None + # Py2 version force the use of unicode. meh. + return ipaddress.ip_interface(str(s)) + + +def cast_network(s, cur=None): + if s is None: + return None + return ipaddress.ip_network(str(s)) + + +def adapt_ipaddress(obj): + return QuotedString(str(obj)) diff --git a/lib/_json.py b/lib/_json.py new file mode 100644 index 0000000000000000000000000000000000000000..950242237c690a16abe0e2f7647c6fcedcf9ad1a --- /dev/null +++ b/lib/_json.py @@ -0,0 +1,199 @@ +"""Implementation of the JSON adaptation objects + +This module exists to avoid a circular import problem: pyscopg2.extras depends +on psycopg2.extension, so I can't create the default JSON typecasters in +extensions importing register_json from extras. +""" + +# psycopg/_json.py - Implementation of the JSON adaptation objects +# +# Copyright (C) 2012-2019 Daniele Varrazzo +# Copyright (C) 2020-2021 The Psycopg Team +# +# psycopg2 is free software: you can redistribute it and/or modify it +# under the terms of the GNU Lesser General Public License as published +# by the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# In addition, as a special exception, the copyright holders give +# permission to link this program with the OpenSSL library (or with +# modified versions of OpenSSL that use the same license as OpenSSL), +# and distribute linked combinations including the two. +# +# You must obey the GNU Lesser General Public License in all respects for +# all of the code used other than OpenSSL. +# +# psycopg2 is distributed in the hope that it will be useful, but WITHOUT +# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or +# FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public +# License for more details. + +import json + +from psycopg2._psycopg import ISQLQuote, QuotedString +from psycopg2._psycopg import new_type, new_array_type, register_type + + +# oids from PostgreSQL 9.2 +JSON_OID = 114 +JSONARRAY_OID = 199 + +# oids from PostgreSQL 9.4 +JSONB_OID = 3802 +JSONBARRAY_OID = 3807 + + +class Json: + """ + An `~psycopg2.extensions.ISQLQuote` wrapper to adapt a Python object to + :sql:`json` data type. + + `!Json` can be used to wrap any object supported by the provided *dumps* + function. If none is provided, the standard :py:func:`json.dumps()` is + used. + + """ + def __init__(self, adapted, dumps=None): + self.adapted = adapted + self._conn = None + self._dumps = dumps or json.dumps + + def __conform__(self, proto): + if proto is ISQLQuote: + return self + + def dumps(self, obj): + """Serialize *obj* in JSON format. + + The default is to call `!json.dumps()` or the *dumps* function + provided in the constructor. You can override this method to create a + customized JSON wrapper. + """ + return self._dumps(obj) + + def prepare(self, conn): + self._conn = conn + + def getquoted(self): + s = self.dumps(self.adapted) + qs = QuotedString(s) + if self._conn is not None: + qs.prepare(self._conn) + return qs.getquoted() + + def __str__(self): + # getquoted is binary + return self.getquoted().decode('ascii', 'replace') + + +def register_json(conn_or_curs=None, globally=False, loads=None, + oid=None, array_oid=None, name='json'): + """Create and register typecasters converting :sql:`json` type to Python objects. + + :param conn_or_curs: a connection or cursor used to find the :sql:`json` + and :sql:`json[]` oids; the typecasters are registered in a scope + limited to this object, unless *globally* is set to `!True`. It can be + `!None` if the oids are provided + :param globally: if `!False` register the typecasters only on + *conn_or_curs*, otherwise register them globally + :param loads: the function used to parse the data into a Python object. If + `!None` use `!json.loads()`, where `!json` is the module chosen + according to the Python version (see above) + :param oid: the OID of the :sql:`json` type if known; If not, it will be + queried on *conn_or_curs* + :param array_oid: the OID of the :sql:`json[]` array type if known; + if not, it will be queried on *conn_or_curs* + :param name: the name of the data type to look for in *conn_or_curs* + + The connection or cursor passed to the function will be used to query the + database and look for the OID of the :sql:`json` type (or an alternative + type if *name* if provided). No query is performed if *oid* and *array_oid* + are provided. Raise `~psycopg2.ProgrammingError` if the type is not found. + + """ + if oid is None: + oid, array_oid = _get_json_oids(conn_or_curs, name) + + JSON, JSONARRAY = _create_json_typecasters( + oid, array_oid, loads=loads, name=name.upper()) + + register_type(JSON, not globally and conn_or_curs or None) + + if JSONARRAY is not None: + register_type(JSONARRAY, not globally and conn_or_curs or None) + + return JSON, JSONARRAY + + +def register_default_json(conn_or_curs=None, globally=False, loads=None): + """ + Create and register :sql:`json` typecasters for PostgreSQL 9.2 and following. + + Since PostgreSQL 9.2 :sql:`json` is a builtin type, hence its oid is known + and fixed. This function allows specifying a customized *loads* function + for the default :sql:`json` type without querying the database. + All the parameters have the same meaning of `register_json()`. + """ + return register_json(conn_or_curs=conn_or_curs, globally=globally, + loads=loads, oid=JSON_OID, array_oid=JSONARRAY_OID) + + +def register_default_jsonb(conn_or_curs=None, globally=False, loads=None): + """ + Create and register :sql:`jsonb` typecasters for PostgreSQL 9.4 and following. + + As in `register_default_json()`, the function allows to register a + customized *loads* function for the :sql:`jsonb` type at its known oid for + PostgreSQL 9.4 and following versions. All the parameters have the same + meaning of `register_json()`. + """ + return register_json(conn_or_curs=conn_or_curs, globally=globally, + loads=loads, oid=JSONB_OID, array_oid=JSONBARRAY_OID, name='jsonb') + + +def _create_json_typecasters(oid, array_oid, loads=None, name='JSON'): + """Create typecasters for json data type.""" + if loads is None: + loads = json.loads + + def typecast_json(s, cur): + if s is None: + return None + return loads(s) + + JSON = new_type((oid, ), name, typecast_json) + if array_oid is not None: + JSONARRAY = new_array_type((array_oid, ), f"{name}ARRAY", JSON) + else: + JSONARRAY = None + + return JSON, JSONARRAY + + +def _get_json_oids(conn_or_curs, name='json'): + # lazy imports + from psycopg2.extensions import STATUS_IN_TRANSACTION + from psycopg2.extras import _solve_conn_curs + + conn, curs = _solve_conn_curs(conn_or_curs) + + # Store the transaction status of the connection to revert it after use + conn_status = conn.status + + # column typarray not available before PG 8.3 + typarray = conn.info.server_version >= 80300 and "typarray" or "NULL" + + # get the oid for the hstore + curs.execute( + "SELECT t.oid, %s FROM pg_type t WHERE t.typname = %%s;" + % typarray, (name,)) + r = curs.fetchone() + + # revert the status of the connection as before the command + if conn_status != STATUS_IN_TRANSACTION and not conn.autocommit: + conn.rollback() + + if not r: + raise conn.ProgrammingError(f"{name} data type not found") + + return r diff --git a/lib/_range.py b/lib/_range.py new file mode 100644 index 0000000000000000000000000000000000000000..19a05d3c7e32acbc0d0630146a605ea5ae9b1eea --- /dev/null +++ b/lib/_range.py @@ -0,0 +1,537 @@ +"""Implementation of the Range type and adaptation + +""" + +# psycopg/_range.py - Implementation of the Range type and adaptation +# +# Copyright (C) 2012-2019 Daniele Varrazzo +# Copyright (C) 2020-2021 The Psycopg Team +# +# psycopg2 is free software: you can redistribute it and/or modify it +# under the terms of the GNU Lesser General Public License as published +# by the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# In addition, as a special exception, the copyright holders give +# permission to link this program with the OpenSSL library (or with +# modified versions of OpenSSL that use the same license as OpenSSL), +# and distribute linked combinations including the two. +# +# You must obey the GNU Lesser General Public License in all respects for +# all of the code used other than OpenSSL. +# +# psycopg2 is distributed in the hope that it will be useful, but WITHOUT +# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or +# FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public +# License for more details. + +import re + +from psycopg2._psycopg import ProgrammingError, InterfaceError +from psycopg2.extensions import ISQLQuote, adapt, register_adapter +from psycopg2.extensions import new_type, new_array_type, register_type + + +class Range: + """Python representation for a PostgreSQL |range|_ type. + + :param lower: lower bound for the range. `!None` means unbound + :param upper: upper bound for the range. `!None` means unbound + :param bounds: one of the literal strings ``()``, ``[)``, ``(]``, ``[]``, + representing whether the lower or upper bounds are included + :param empty: if `!True`, the range is empty + + """ + __slots__ = ('_lower', '_upper', '_bounds') + + def __init__(self, lower=None, upper=None, bounds='[)', empty=False): + if not empty: + if bounds not in ('[)', '(]', '()', '[]'): + raise ValueError(f"bound flags not valid: {bounds!r}") + + self._lower = lower + self._upper = upper + self._bounds = bounds + else: + self._lower = self._upper = self._bounds = None + + def __repr__(self): + if self._bounds is None: + return f"{self.__class__.__name__}(empty=True)" + else: + return "{}({!r}, {!r}, {!r})".format(self.__class__.__name__, + self._lower, self._upper, self._bounds) + + def __str__(self): + if self._bounds is None: + return 'empty' + + items = [ + self._bounds[0], + str(self._lower), + ', ', + str(self._upper), + self._bounds[1] + ] + return ''.join(items) + + @property + def lower(self): + """The lower bound of the range. `!None` if empty or unbound.""" + return self._lower + + @property + def upper(self): + """The upper bound of the range. `!None` if empty or unbound.""" + return self._upper + + @property + def isempty(self): + """`!True` if the range is empty.""" + return self._bounds is None + + @property + def lower_inf(self): + """`!True` if the range doesn't have a lower bound.""" + if self._bounds is None: + return False + return self._lower is None + + @property + def upper_inf(self): + """`!True` if the range doesn't have an upper bound.""" + if self._bounds is None: + return False + return self._upper is None + + @property + def lower_inc(self): + """`!True` if the lower bound is included in the range.""" + if self._bounds is None or self._lower is None: + return False + return self._bounds[0] == '[' + + @property + def upper_inc(self): + """`!True` if the upper bound is included in the range.""" + if self._bounds is None or self._upper is None: + return False + return self._bounds[1] == ']' + + def __contains__(self, x): + if self._bounds is None: + return False + + if self._lower is not None: + if self._bounds[0] == '[': + if x < self._lower: + return False + else: + if x <= self._lower: + return False + + if self._upper is not None: + if self._bounds[1] == ']': + if x > self._upper: + return False + else: + if x >= self._upper: + return False + + return True + + def __bool__(self): + return self._bounds is not None + + def __nonzero__(self): + # Python 2 compatibility + return type(self).__bool__(self) + + def __eq__(self, other): + if not isinstance(other, Range): + return False + return (self._lower == other._lower + and self._upper == other._upper + and self._bounds == other._bounds) + + def __ne__(self, other): + return not self.__eq__(other) + + def __hash__(self): + return hash((self._lower, self._upper, self._bounds)) + + # as the postgres docs describe for the server-side stuff, + # ordering is rather arbitrary, but will remain stable + # and consistent. + + def __lt__(self, other): + if not isinstance(other, Range): + return NotImplemented + for attr in ('_lower', '_upper', '_bounds'): + self_value = getattr(self, attr) + other_value = getattr(other, attr) + if self_value == other_value: + pass + elif self_value is None: + return True + elif other_value is None: + return False + else: + return self_value < other_value + return False + + def __le__(self, other): + if self == other: + return True + else: + return self.__lt__(other) + + def __gt__(self, other): + if isinstance(other, Range): + return other.__lt__(self) + else: + return NotImplemented + + def __ge__(self, other): + if self == other: + return True + else: + return self.__gt__(other) + + def __getstate__(self): + return {slot: getattr(self, slot) + for slot in self.__slots__ if hasattr(self, slot)} + + def __setstate__(self, state): + for slot, value in state.items(): + setattr(self, slot, value) + + +def register_range(pgrange, pyrange, conn_or_curs, globally=False): + """Create and register an adapter and the typecasters to convert between + a PostgreSQL |range|_ type and a PostgreSQL `Range` subclass. + + :param pgrange: the name of the PostgreSQL |range| type. Can be + schema-qualified + :param pyrange: a `Range` strict subclass, or just a name to give to a new + class + :param conn_or_curs: a connection or cursor used to find the oid of the + range and its subtype; the typecaster is registered in a scope limited + to this object, unless *globally* is set to `!True` + :param globally: if `!False` (default) register the typecaster only on + *conn_or_curs*, otherwise register it globally + :return: `RangeCaster` instance responsible for the conversion + + If a string is passed to *pyrange*, a new `Range` subclass is created + with such name and will be available as the `~RangeCaster.range` attribute + of the returned `RangeCaster` object. + + The function queries the database on *conn_or_curs* to inspect the + *pgrange* type and raises `~psycopg2.ProgrammingError` if the type is not + found. If querying the database is not advisable, use directly the + `RangeCaster` class and register the adapter and typecasters using the + provided functions. + + """ + caster = RangeCaster._from_db(pgrange, pyrange, conn_or_curs) + caster._register(not globally and conn_or_curs or None) + return caster + + +class RangeAdapter: + """`ISQLQuote` adapter for `Range` subclasses. + + This is an abstract class: concrete classes must set a `name` class + attribute or override `getquoted()`. + """ + name = None + + def __init__(self, adapted): + self.adapted = adapted + + def __conform__(self, proto): + if self._proto is ISQLQuote: + return self + + def prepare(self, conn): + self._conn = conn + + def getquoted(self): + if self.name is None: + raise NotImplementedError( + 'RangeAdapter must be subclassed overriding its name ' + 'or the getquoted() method') + + r = self.adapted + if r.isempty: + return b"'empty'::" + self.name.encode('utf8') + + if r.lower is not None: + a = adapt(r.lower) + if hasattr(a, 'prepare'): + a.prepare(self._conn) + lower = a.getquoted() + else: + lower = b'NULL' + + if r.upper is not None: + a = adapt(r.upper) + if hasattr(a, 'prepare'): + a.prepare(self._conn) + upper = a.getquoted() + else: + upper = b'NULL' + + return self.name.encode('utf8') + b'(' + lower + b', ' + upper \ + + b", '" + r._bounds.encode('utf8') + b"')" + + +class RangeCaster: + """Helper class to convert between `Range` and PostgreSQL range types. + + Objects of this class are usually created by `register_range()`. Manual + creation could be useful if querying the database is not advisable: in + this case the oids must be provided. + """ + def __init__(self, pgrange, pyrange, oid, subtype_oid, array_oid=None): + self.subtype_oid = subtype_oid + self._create_ranges(pgrange, pyrange) + + name = self.adapter.name or self.adapter.__class__.__name__ + + self.typecaster = new_type((oid,), name, self.parse) + + if array_oid is not None: + self.array_typecaster = new_array_type( + (array_oid,), name + "ARRAY", self.typecaster) + else: + self.array_typecaster = None + + def _create_ranges(self, pgrange, pyrange): + """Create Range and RangeAdapter classes if needed.""" + # if got a string create a new RangeAdapter concrete type (with a name) + # else take it as an adapter. Passing an adapter should be considered + # an implementation detail and is not documented. It is currently used + # for the numeric ranges. + self.adapter = None + if isinstance(pgrange, str): + self.adapter = type(pgrange, (RangeAdapter,), {}) + self.adapter.name = pgrange + else: + try: + if issubclass(pgrange, RangeAdapter) \ + and pgrange is not RangeAdapter: + self.adapter = pgrange + except TypeError: + pass + + if self.adapter is None: + raise TypeError( + 'pgrange must be a string or a RangeAdapter strict subclass') + + self.range = None + try: + if isinstance(pyrange, str): + self.range = type(pyrange, (Range,), {}) + if issubclass(pyrange, Range) and pyrange is not Range: + self.range = pyrange + except TypeError: + pass + + if self.range is None: + raise TypeError( + 'pyrange must be a type or a Range strict subclass') + + @classmethod + def _from_db(self, name, pyrange, conn_or_curs): + """Return a `RangeCaster` instance for the type *pgrange*. + + Raise `ProgrammingError` if the type is not found. + """ + from psycopg2.extensions import STATUS_IN_TRANSACTION + from psycopg2.extras import _solve_conn_curs + conn, curs = _solve_conn_curs(conn_or_curs) + + if conn.info.server_version < 90200: + raise ProgrammingError("range types not available in version %s" + % conn.info.server_version) + + # Store the transaction status of the connection to revert it after use + conn_status = conn.status + + # Use the correct schema + if '.' in name: + schema, tname = name.split('.', 1) + else: + tname = name + schema = 'public' + + # get the type oid and attributes + try: + curs.execute("""\ +select rngtypid, rngsubtype, + (select typarray from pg_type where oid = rngtypid) +from pg_range r +join pg_type t on t.oid = rngtypid +join pg_namespace ns on ns.oid = typnamespace +where typname = %s and ns.nspname = %s; +""", (tname, schema)) + + except ProgrammingError: + if not conn.autocommit: + conn.rollback() + raise + else: + rec = curs.fetchone() + + # revert the status of the connection as before the command + if (conn_status != STATUS_IN_TRANSACTION + and not conn.autocommit): + conn.rollback() + + if not rec: + raise ProgrammingError( + f"PostgreSQL type '{name}' not found") + + type, subtype, array = rec + + return RangeCaster(name, pyrange, + oid=type, subtype_oid=subtype, array_oid=array) + + _re_range = re.compile(r""" + ( \(|\[ ) # lower bound flag + (?: # lower bound: + " ( (?: [^"] | "")* ) " # - a quoted string + | ( [^",]+ ) # - or an unquoted string + )? # - or empty (not catched) + , + (?: # upper bound: + " ( (?: [^"] | "")* ) " # - a quoted string + | ( [^"\)\]]+ ) # - or an unquoted string + )? # - or empty (not catched) + ( \)|\] ) # upper bound flag + """, re.VERBOSE) + + _re_undouble = re.compile(r'(["\\])\1') + + def parse(self, s, cur=None): + if s is None: + return None + + if s == 'empty': + return self.range(empty=True) + + m = self._re_range.match(s) + if m is None: + raise InterfaceError(f"failed to parse range: '{s}'") + + lower = m.group(3) + if lower is None: + lower = m.group(2) + if lower is not None: + lower = self._re_undouble.sub(r"\1", lower) + + upper = m.group(5) + if upper is None: + upper = m.group(4) + if upper is not None: + upper = self._re_undouble.sub(r"\1", upper) + + if cur is not None: + lower = cur.cast(self.subtype_oid, lower) + upper = cur.cast(self.subtype_oid, upper) + + bounds = m.group(1) + m.group(6) + + return self.range(lower, upper, bounds) + + def _register(self, scope=None): + register_type(self.typecaster, scope) + if self.array_typecaster is not None: + register_type(self.array_typecaster, scope) + + register_adapter(self.range, self.adapter) + + +class NumericRange(Range): + """A `Range` suitable to pass Python numeric types to a PostgreSQL range. + + PostgreSQL types :sql:`int4range`, :sql:`int8range`, :sql:`numrange` are + casted into `!NumericRange` instances. + """ + pass + + +class DateRange(Range): + """Represents :sql:`daterange` values.""" + pass + + +class DateTimeRange(Range): + """Represents :sql:`tsrange` values.""" + pass + + +class DateTimeTZRange(Range): + """Represents :sql:`tstzrange` values.""" + pass + + +# Special adaptation for NumericRange. Allows to pass number range regardless +# of whether they are ints, floats and what size of ints are, which are +# pointless in Python world. On the way back, no numeric range is casted to +# NumericRange, but only to their subclasses + +class NumberRangeAdapter(RangeAdapter): + """Adapt a range if the subtype doesn't need quotes.""" + def getquoted(self): + r = self.adapted + if r.isempty: + return b"'empty'" + + if not r.lower_inf: + # not exactly: we are relying that none of these object is really + # quoted (they are numbers). Also, I'm lazy and not preparing the + # adapter because I assume encoding doesn't matter for these + # objects. + lower = adapt(r.lower).getquoted().decode('ascii') + else: + lower = '' + + if not r.upper_inf: + upper = adapt(r.upper).getquoted().decode('ascii') + else: + upper = '' + + return (f"'{r._bounds[0]}{lower},{upper}{r._bounds[1]}'").encode('ascii') + + +# TODO: probably won't work with infs, nans and other tricky cases. +register_adapter(NumericRange, NumberRangeAdapter) + +# Register globally typecasters and adapters for builtin range types. + +# note: the adapter is registered more than once, but this is harmless. +int4range_caster = RangeCaster(NumberRangeAdapter, NumericRange, + oid=3904, subtype_oid=23, array_oid=3905) +int4range_caster._register() + +int8range_caster = RangeCaster(NumberRangeAdapter, NumericRange, + oid=3926, subtype_oid=20, array_oid=3927) +int8range_caster._register() + +numrange_caster = RangeCaster(NumberRangeAdapter, NumericRange, + oid=3906, subtype_oid=1700, array_oid=3907) +numrange_caster._register() + +daterange_caster = RangeCaster('daterange', DateRange, + oid=3912, subtype_oid=1082, array_oid=3913) +daterange_caster._register() + +tsrange_caster = RangeCaster('tsrange', DateTimeRange, + oid=3908, subtype_oid=1114, array_oid=3909) +tsrange_caster._register() + +tstzrange_caster = RangeCaster('tstzrange', DateTimeTZRange, + oid=3910, subtype_oid=1184, array_oid=3911) +tstzrange_caster._register() diff --git a/lib/errorcodes.py b/lib/errorcodes.py new file mode 100644 index 0000000000000000000000000000000000000000..d511f1cc2c8a8706fd71984ef75994df2fe01167 --- /dev/null +++ b/lib/errorcodes.py @@ -0,0 +1,447 @@ +"""Error codes for PostgreSQL + +This module contains symbolic names for all PostgreSQL error codes. +""" +# psycopg2/errorcodes.py - PostgreSQL error codes +# +# Copyright (C) 2006-2019 Johan Dahlin +# Copyright (C) 2020-2021 The Psycopg Team +# +# psycopg2 is free software: you can redistribute it and/or modify it +# under the terms of the GNU Lesser General Public License as published +# by the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# In addition, as a special exception, the copyright holders give +# permission to link this program with the OpenSSL library (or with +# modified versions of OpenSSL that use the same license as OpenSSL), +# and distribute linked combinations including the two. +# +# You must obey the GNU Lesser General Public License in all respects for +# all of the code used other than OpenSSL. +# +# psycopg2 is distributed in the hope that it will be useful, but WITHOUT +# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or +# FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public +# License for more details. +# +# Based on: +# +# https://www.postgresql.org/docs/current/static/errcodes-appendix.html +# + + +def lookup(code, _cache={}): + """Lookup an error code or class code and return its symbolic name. + + Raise `KeyError` if the code is not found. + """ + if _cache: + return _cache[code] + + # Generate the lookup map at first usage. + tmp = {} + for k, v in globals().items(): + if isinstance(v, str) and len(v) in (2, 5): + # Strip trailing underscore used to disambiguate duplicate values + tmp[v] = k.rstrip("_") + + assert tmp + + # Atomic update, to avoid race condition on import (bug #382) + _cache.update(tmp) + + return _cache[code] + + +# autogenerated data: do not edit below this point. + +# Error classes +CLASS_SUCCESSFUL_COMPLETION = '00' +CLASS_WARNING = '01' +CLASS_NO_DATA = '02' +CLASS_SQL_STATEMENT_NOT_YET_COMPLETE = '03' +CLASS_CONNECTION_EXCEPTION = '08' +CLASS_TRIGGERED_ACTION_EXCEPTION = '09' +CLASS_FEATURE_NOT_SUPPORTED = '0A' +CLASS_INVALID_TRANSACTION_INITIATION = '0B' +CLASS_LOCATOR_EXCEPTION = '0F' +CLASS_INVALID_GRANTOR = '0L' +CLASS_INVALID_ROLE_SPECIFICATION = '0P' +CLASS_DIAGNOSTICS_EXCEPTION = '0Z' +CLASS_CASE_NOT_FOUND = '20' +CLASS_CARDINALITY_VIOLATION = '21' +CLASS_DATA_EXCEPTION = '22' +CLASS_INTEGRITY_CONSTRAINT_VIOLATION = '23' +CLASS_INVALID_CURSOR_STATE = '24' +CLASS_INVALID_TRANSACTION_STATE = '25' +CLASS_INVALID_SQL_STATEMENT_NAME = '26' +CLASS_TRIGGERED_DATA_CHANGE_VIOLATION = '27' +CLASS_INVALID_AUTHORIZATION_SPECIFICATION = '28' +CLASS_DEPENDENT_PRIVILEGE_DESCRIPTORS_STILL_EXIST = '2B' +CLASS_INVALID_TRANSACTION_TERMINATION = '2D' +CLASS_SQL_ROUTINE_EXCEPTION = '2F' +CLASS_INVALID_CURSOR_NAME = '34' +CLASS_EXTERNAL_ROUTINE_EXCEPTION = '38' +CLASS_EXTERNAL_ROUTINE_INVOCATION_EXCEPTION = '39' +CLASS_SAVEPOINT_EXCEPTION = '3B' +CLASS_INVALID_CATALOG_NAME = '3D' +CLASS_INVALID_SCHEMA_NAME = '3F' +CLASS_TRANSACTION_ROLLBACK = '40' +CLASS_SYNTAX_ERROR_OR_ACCESS_RULE_VIOLATION = '42' +CLASS_WITH_CHECK_OPTION_VIOLATION = '44' +CLASS_INSUFFICIENT_RESOURCES = '53' +CLASS_PROGRAM_LIMIT_EXCEEDED = '54' +CLASS_OBJECT_NOT_IN_PREREQUISITE_STATE = '55' +CLASS_OPERATOR_INTERVENTION = '57' +CLASS_SYSTEM_ERROR = '58' +CLASS_SNAPSHOT_FAILURE = '72' +CLASS_CONFIGURATION_FILE_ERROR = 'F0' +CLASS_FOREIGN_DATA_WRAPPER_ERROR = 'HV' +CLASS_PL_PGSQL_ERROR = 'P0' +CLASS_INTERNAL_ERROR = 'XX' + +# Class 00 - Successful Completion +SUCCESSFUL_COMPLETION = '00000' + +# Class 01 - Warning +WARNING = '01000' +NULL_VALUE_ELIMINATED_IN_SET_FUNCTION = '01003' +STRING_DATA_RIGHT_TRUNCATION_ = '01004' +PRIVILEGE_NOT_REVOKED = '01006' +PRIVILEGE_NOT_GRANTED = '01007' +IMPLICIT_ZERO_BIT_PADDING = '01008' +DYNAMIC_RESULT_SETS_RETURNED = '0100C' +DEPRECATED_FEATURE = '01P01' + +# Class 02 - No Data (this is also a warning class per the SQL standard) +NO_DATA = '02000' +NO_ADDITIONAL_DYNAMIC_RESULT_SETS_RETURNED = '02001' + +# Class 03 - SQL Statement Not Yet Complete +SQL_STATEMENT_NOT_YET_COMPLETE = '03000' + +# Class 08 - Connection Exception +CONNECTION_EXCEPTION = '08000' +SQLCLIENT_UNABLE_TO_ESTABLISH_SQLCONNECTION = '08001' +CONNECTION_DOES_NOT_EXIST = '08003' +SQLSERVER_REJECTED_ESTABLISHMENT_OF_SQLCONNECTION = '08004' +CONNECTION_FAILURE = '08006' +TRANSACTION_RESOLUTION_UNKNOWN = '08007' +PROTOCOL_VIOLATION = '08P01' + +# Class 09 - Triggered Action Exception +TRIGGERED_ACTION_EXCEPTION = '09000' + +# Class 0A - Feature Not Supported +FEATURE_NOT_SUPPORTED = '0A000' + +# Class 0B - Invalid Transaction Initiation +INVALID_TRANSACTION_INITIATION = '0B000' + +# Class 0F - Locator Exception +LOCATOR_EXCEPTION = '0F000' +INVALID_LOCATOR_SPECIFICATION = '0F001' + +# Class 0L - Invalid Grantor +INVALID_GRANTOR = '0L000' +INVALID_GRANT_OPERATION = '0LP01' + +# Class 0P - Invalid Role Specification +INVALID_ROLE_SPECIFICATION = '0P000' + +# Class 0Z - Diagnostics Exception +DIAGNOSTICS_EXCEPTION = '0Z000' +STACKED_DIAGNOSTICS_ACCESSED_WITHOUT_ACTIVE_HANDLER = '0Z002' + +# Class 20 - Case Not Found +CASE_NOT_FOUND = '20000' + +# Class 21 - Cardinality Violation +CARDINALITY_VIOLATION = '21000' + +# Class 22 - Data Exception +DATA_EXCEPTION = '22000' +STRING_DATA_RIGHT_TRUNCATION = '22001' +NULL_VALUE_NO_INDICATOR_PARAMETER = '22002' +NUMERIC_VALUE_OUT_OF_RANGE = '22003' +NULL_VALUE_NOT_ALLOWED_ = '22004' +ERROR_IN_ASSIGNMENT = '22005' +INVALID_DATETIME_FORMAT = '22007' +DATETIME_FIELD_OVERFLOW = '22008' +INVALID_TIME_ZONE_DISPLACEMENT_VALUE = '22009' +ESCAPE_CHARACTER_CONFLICT = '2200B' +INVALID_USE_OF_ESCAPE_CHARACTER = '2200C' +INVALID_ESCAPE_OCTET = '2200D' +ZERO_LENGTH_CHARACTER_STRING = '2200F' +MOST_SPECIFIC_TYPE_MISMATCH = '2200G' +SEQUENCE_GENERATOR_LIMIT_EXCEEDED = '2200H' +NOT_AN_XML_DOCUMENT = '2200L' +INVALID_XML_DOCUMENT = '2200M' +INVALID_XML_CONTENT = '2200N' +INVALID_XML_COMMENT = '2200S' +INVALID_XML_PROCESSING_INSTRUCTION = '2200T' +INVALID_INDICATOR_PARAMETER_VALUE = '22010' +SUBSTRING_ERROR = '22011' +DIVISION_BY_ZERO = '22012' +INVALID_PRECEDING_OR_FOLLOWING_SIZE = '22013' +INVALID_ARGUMENT_FOR_NTILE_FUNCTION = '22014' +INTERVAL_FIELD_OVERFLOW = '22015' +INVALID_ARGUMENT_FOR_NTH_VALUE_FUNCTION = '22016' +INVALID_CHARACTER_VALUE_FOR_CAST = '22018' +INVALID_ESCAPE_CHARACTER = '22019' +INVALID_REGULAR_EXPRESSION = '2201B' +INVALID_ARGUMENT_FOR_LOGARITHM = '2201E' +INVALID_ARGUMENT_FOR_POWER_FUNCTION = '2201F' +INVALID_ARGUMENT_FOR_WIDTH_BUCKET_FUNCTION = '2201G' +INVALID_ROW_COUNT_IN_LIMIT_CLAUSE = '2201W' +INVALID_ROW_COUNT_IN_RESULT_OFFSET_CLAUSE = '2201X' +INVALID_LIMIT_VALUE = '22020' +CHARACTER_NOT_IN_REPERTOIRE = '22021' +INDICATOR_OVERFLOW = '22022' +INVALID_PARAMETER_VALUE = '22023' +UNTERMINATED_C_STRING = '22024' +INVALID_ESCAPE_SEQUENCE = '22025' +STRING_DATA_LENGTH_MISMATCH = '22026' +TRIM_ERROR = '22027' +ARRAY_SUBSCRIPT_ERROR = '2202E' +INVALID_TABLESAMPLE_REPEAT = '2202G' +INVALID_TABLESAMPLE_ARGUMENT = '2202H' +DUPLICATE_JSON_OBJECT_KEY_VALUE = '22030' +INVALID_ARGUMENT_FOR_SQL_JSON_DATETIME_FUNCTION = '22031' +INVALID_JSON_TEXT = '22032' +INVALID_SQL_JSON_SUBSCRIPT = '22033' +MORE_THAN_ONE_SQL_JSON_ITEM = '22034' +NO_SQL_JSON_ITEM = '22035' +NON_NUMERIC_SQL_JSON_ITEM = '22036' +NON_UNIQUE_KEYS_IN_A_JSON_OBJECT = '22037' +SINGLETON_SQL_JSON_ITEM_REQUIRED = '22038' +SQL_JSON_ARRAY_NOT_FOUND = '22039' +SQL_JSON_MEMBER_NOT_FOUND = '2203A' +SQL_JSON_NUMBER_NOT_FOUND = '2203B' +SQL_JSON_OBJECT_NOT_FOUND = '2203C' +TOO_MANY_JSON_ARRAY_ELEMENTS = '2203D' +TOO_MANY_JSON_OBJECT_MEMBERS = '2203E' +SQL_JSON_SCALAR_REQUIRED = '2203F' +FLOATING_POINT_EXCEPTION = '22P01' +INVALID_TEXT_REPRESENTATION = '22P02' +INVALID_BINARY_REPRESENTATION = '22P03' +BAD_COPY_FILE_FORMAT = '22P04' +UNTRANSLATABLE_CHARACTER = '22P05' +NONSTANDARD_USE_OF_ESCAPE_CHARACTER = '22P06' + +# Class 23 - Integrity Constraint Violation +INTEGRITY_CONSTRAINT_VIOLATION = '23000' +RESTRICT_VIOLATION = '23001' +NOT_NULL_VIOLATION = '23502' +FOREIGN_KEY_VIOLATION = '23503' +UNIQUE_VIOLATION = '23505' +CHECK_VIOLATION = '23514' +EXCLUSION_VIOLATION = '23P01' + +# Class 24 - Invalid Cursor State +INVALID_CURSOR_STATE = '24000' + +# Class 25 - Invalid Transaction State +INVALID_TRANSACTION_STATE = '25000' +ACTIVE_SQL_TRANSACTION = '25001' +BRANCH_TRANSACTION_ALREADY_ACTIVE = '25002' +INAPPROPRIATE_ACCESS_MODE_FOR_BRANCH_TRANSACTION = '25003' +INAPPROPRIATE_ISOLATION_LEVEL_FOR_BRANCH_TRANSACTION = '25004' +NO_ACTIVE_SQL_TRANSACTION_FOR_BRANCH_TRANSACTION = '25005' +READ_ONLY_SQL_TRANSACTION = '25006' +SCHEMA_AND_DATA_STATEMENT_MIXING_NOT_SUPPORTED = '25007' +HELD_CURSOR_REQUIRES_SAME_ISOLATION_LEVEL = '25008' +NO_ACTIVE_SQL_TRANSACTION = '25P01' +IN_FAILED_SQL_TRANSACTION = '25P02' +IDLE_IN_TRANSACTION_SESSION_TIMEOUT = '25P03' + +# Class 26 - Invalid SQL Statement Name +INVALID_SQL_STATEMENT_NAME = '26000' + +# Class 27 - Triggered Data Change Violation +TRIGGERED_DATA_CHANGE_VIOLATION = '27000' + +# Class 28 - Invalid Authorization Specification +INVALID_AUTHORIZATION_SPECIFICATION = '28000' +INVALID_PASSWORD = '28P01' + +# Class 2B - Dependent Privilege Descriptors Still Exist +DEPENDENT_PRIVILEGE_DESCRIPTORS_STILL_EXIST = '2B000' +DEPENDENT_OBJECTS_STILL_EXIST = '2BP01' + +# Class 2D - Invalid Transaction Termination +INVALID_TRANSACTION_TERMINATION = '2D000' + +# Class 2F - SQL Routine Exception +SQL_ROUTINE_EXCEPTION = '2F000' +MODIFYING_SQL_DATA_NOT_PERMITTED_ = '2F002' +PROHIBITED_SQL_STATEMENT_ATTEMPTED_ = '2F003' +READING_SQL_DATA_NOT_PERMITTED_ = '2F004' +FUNCTION_EXECUTED_NO_RETURN_STATEMENT = '2F005' + +# Class 34 - Invalid Cursor Name +INVALID_CURSOR_NAME = '34000' + +# Class 38 - External Routine Exception +EXTERNAL_ROUTINE_EXCEPTION = '38000' +CONTAINING_SQL_NOT_PERMITTED = '38001' +MODIFYING_SQL_DATA_NOT_PERMITTED = '38002' +PROHIBITED_SQL_STATEMENT_ATTEMPTED = '38003' +READING_SQL_DATA_NOT_PERMITTED = '38004' + +# Class 39 - External Routine Invocation Exception +EXTERNAL_ROUTINE_INVOCATION_EXCEPTION = '39000' +INVALID_SQLSTATE_RETURNED = '39001' +NULL_VALUE_NOT_ALLOWED = '39004' +TRIGGER_PROTOCOL_VIOLATED = '39P01' +SRF_PROTOCOL_VIOLATED = '39P02' +EVENT_TRIGGER_PROTOCOL_VIOLATED = '39P03' + +# Class 3B - Savepoint Exception +SAVEPOINT_EXCEPTION = '3B000' +INVALID_SAVEPOINT_SPECIFICATION = '3B001' + +# Class 3D - Invalid Catalog Name +INVALID_CATALOG_NAME = '3D000' + +# Class 3F - Invalid Schema Name +INVALID_SCHEMA_NAME = '3F000' + +# Class 40 - Transaction Rollback +TRANSACTION_ROLLBACK = '40000' +SERIALIZATION_FAILURE = '40001' +TRANSACTION_INTEGRITY_CONSTRAINT_VIOLATION = '40002' +STATEMENT_COMPLETION_UNKNOWN = '40003' +DEADLOCK_DETECTED = '40P01' + +# Class 42 - Syntax Error or Access Rule Violation +SYNTAX_ERROR_OR_ACCESS_RULE_VIOLATION = '42000' +INSUFFICIENT_PRIVILEGE = '42501' +SYNTAX_ERROR = '42601' +INVALID_NAME = '42602' +INVALID_COLUMN_DEFINITION = '42611' +NAME_TOO_LONG = '42622' +DUPLICATE_COLUMN = '42701' +AMBIGUOUS_COLUMN = '42702' +UNDEFINED_COLUMN = '42703' +UNDEFINED_OBJECT = '42704' +DUPLICATE_OBJECT = '42710' +DUPLICATE_ALIAS = '42712' +DUPLICATE_FUNCTION = '42723' +AMBIGUOUS_FUNCTION = '42725' +GROUPING_ERROR = '42803' +DATATYPE_MISMATCH = '42804' +WRONG_OBJECT_TYPE = '42809' +INVALID_FOREIGN_KEY = '42830' +CANNOT_COERCE = '42846' +UNDEFINED_FUNCTION = '42883' +GENERATED_ALWAYS = '428C9' +RESERVED_NAME = '42939' +UNDEFINED_TABLE = '42P01' +UNDEFINED_PARAMETER = '42P02' +DUPLICATE_CURSOR = '42P03' +DUPLICATE_DATABASE = '42P04' +DUPLICATE_PREPARED_STATEMENT = '42P05' +DUPLICATE_SCHEMA = '42P06' +DUPLICATE_TABLE = '42P07' +AMBIGUOUS_PARAMETER = '42P08' +AMBIGUOUS_ALIAS = '42P09' +INVALID_COLUMN_REFERENCE = '42P10' +INVALID_CURSOR_DEFINITION = '42P11' +INVALID_DATABASE_DEFINITION = '42P12' +INVALID_FUNCTION_DEFINITION = '42P13' +INVALID_PREPARED_STATEMENT_DEFINITION = '42P14' +INVALID_SCHEMA_DEFINITION = '42P15' +INVALID_TABLE_DEFINITION = '42P16' +INVALID_OBJECT_DEFINITION = '42P17' +INDETERMINATE_DATATYPE = '42P18' +INVALID_RECURSION = '42P19' +WINDOWING_ERROR = '42P20' +COLLATION_MISMATCH = '42P21' +INDETERMINATE_COLLATION = '42P22' + +# Class 44 - WITH CHECK OPTION Violation +WITH_CHECK_OPTION_VIOLATION = '44000' + +# Class 53 - Insufficient Resources +INSUFFICIENT_RESOURCES = '53000' +DISK_FULL = '53100' +OUT_OF_MEMORY = '53200' +TOO_MANY_CONNECTIONS = '53300' +CONFIGURATION_LIMIT_EXCEEDED = '53400' + +# Class 54 - Program Limit Exceeded +PROGRAM_LIMIT_EXCEEDED = '54000' +STATEMENT_TOO_COMPLEX = '54001' +TOO_MANY_COLUMNS = '54011' +TOO_MANY_ARGUMENTS = '54023' + +# Class 55 - Object Not In Prerequisite State +OBJECT_NOT_IN_PREREQUISITE_STATE = '55000' +OBJECT_IN_USE = '55006' +CANT_CHANGE_RUNTIME_PARAM = '55P02' +LOCK_NOT_AVAILABLE = '55P03' +UNSAFE_NEW_ENUM_VALUE_USAGE = '55P04' + +# Class 57 - Operator Intervention +OPERATOR_INTERVENTION = '57000' +QUERY_CANCELED = '57014' +ADMIN_SHUTDOWN = '57P01' +CRASH_SHUTDOWN = '57P02' +CANNOT_CONNECT_NOW = '57P03' +DATABASE_DROPPED = '57P04' + +# Class 58 - System Error (errors external to PostgreSQL itself) +SYSTEM_ERROR = '58000' +IO_ERROR = '58030' +UNDEFINED_FILE = '58P01' +DUPLICATE_FILE = '58P02' + +# Class 72 - Snapshot Failure +SNAPSHOT_TOO_OLD = '72000' + +# Class F0 - Configuration File Error +CONFIG_FILE_ERROR = 'F0000' +LOCK_FILE_EXISTS = 'F0001' + +# Class HV - Foreign Data Wrapper Error (SQL/MED) +FDW_ERROR = 'HV000' +FDW_OUT_OF_MEMORY = 'HV001' +FDW_DYNAMIC_PARAMETER_VALUE_NEEDED = 'HV002' +FDW_INVALID_DATA_TYPE = 'HV004' +FDW_COLUMN_NAME_NOT_FOUND = 'HV005' +FDW_INVALID_DATA_TYPE_DESCRIPTORS = 'HV006' +FDW_INVALID_COLUMN_NAME = 'HV007' +FDW_INVALID_COLUMN_NUMBER = 'HV008' +FDW_INVALID_USE_OF_NULL_POINTER = 'HV009' +FDW_INVALID_STRING_FORMAT = 'HV00A' +FDW_INVALID_HANDLE = 'HV00B' +FDW_INVALID_OPTION_INDEX = 'HV00C' +FDW_INVALID_OPTION_NAME = 'HV00D' +FDW_OPTION_NAME_NOT_FOUND = 'HV00J' +FDW_REPLY_HANDLE = 'HV00K' +FDW_UNABLE_TO_CREATE_EXECUTION = 'HV00L' +FDW_UNABLE_TO_CREATE_REPLY = 'HV00M' +FDW_UNABLE_TO_ESTABLISH_CONNECTION = 'HV00N' +FDW_NO_SCHEMAS = 'HV00P' +FDW_SCHEMA_NOT_FOUND = 'HV00Q' +FDW_TABLE_NOT_FOUND = 'HV00R' +FDW_FUNCTION_SEQUENCE_ERROR = 'HV010' +FDW_TOO_MANY_HANDLES = 'HV014' +FDW_INCONSISTENT_DESCRIPTOR_INFORMATION = 'HV021' +FDW_INVALID_ATTRIBUTE_VALUE = 'HV024' +FDW_INVALID_STRING_LENGTH_OR_BUFFER_LENGTH = 'HV090' +FDW_INVALID_DESCRIPTOR_FIELD_IDENTIFIER = 'HV091' + +# Class P0 - PL/pgSQL Error +PLPGSQL_ERROR = 'P0000' +RAISE_EXCEPTION = 'P0001' +NO_DATA_FOUND = 'P0002' +TOO_MANY_ROWS = 'P0003' +ASSERT_FAILURE = 'P0004' + +# Class XX - Internal Error +INTERNAL_ERROR = 'XX000' +DATA_CORRUPTED = 'XX001' +INDEX_CORRUPTED = 'XX002' diff --git a/lib/errors.py b/lib/errors.py new file mode 100644 index 0000000000000000000000000000000000000000..e4e47f5b297c8731d49061814e7cca6180b98391 --- /dev/null +++ b/lib/errors.py @@ -0,0 +1,38 @@ +"""Error classes for PostgreSQL error codes +""" + +# psycopg/errors.py - SQLSTATE and DB-API exceptions +# +# Copyright (C) 2018-2019 Daniele Varrazzo +# Copyright (C) 2020-2021 The Psycopg Team +# +# psycopg2 is free software: you can redistribute it and/or modify it +# under the terms of the GNU Lesser General Public License as published +# by the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# In addition, as a special exception, the copyright holders give +# permission to link this program with the OpenSSL library (or with +# modified versions of OpenSSL that use the same license as OpenSSL), +# and distribute linked combinations including the two. +# +# You must obey the GNU Lesser General Public License in all respects for +# all of the code used other than OpenSSL. +# +# psycopg2 is distributed in the hope that it will be useful, but WITHOUT +# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or +# FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public +# License for more details. + +# +# NOTE: the exceptions are injected into this module by the C extention. +# + + +def lookup(code): + """Lookup an error code and return its exception class. + + Raise `!KeyError` if the code is not found. + """ + from psycopg2._psycopg import sqlstate_errors # avoid circular import + return sqlstate_errors[code] diff --git a/lib/extensions.py b/lib/extensions.py new file mode 100644 index 0000000000000000000000000000000000000000..b938d0ce171a7de800f7dee47a5297c9f81b3cdc --- /dev/null +++ b/lib/extensions.py @@ -0,0 +1,213 @@ +"""psycopg extensions to the DBAPI-2.0 + +This module holds all the extensions to the DBAPI-2.0 provided by psycopg. + +- `connection` -- the new-type inheritable connection class +- `cursor` -- the new-type inheritable cursor class +- `lobject` -- the new-type inheritable large object class +- `adapt()` -- exposes the PEP-246_ compatible adapting mechanism used + by psycopg to adapt Python types to PostgreSQL ones + +.. _PEP-246: https://www.python.org/dev/peps/pep-0246/ +""" +# psycopg/extensions.py - DBAPI-2.0 extensions specific to psycopg +# +# Copyright (C) 2003-2019 Federico Di Gregorio +# Copyright (C) 2020-2021 The Psycopg Team +# +# psycopg2 is free software: you can redistribute it and/or modify it +# under the terms of the GNU Lesser General Public License as published +# by the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# In addition, as a special exception, the copyright holders give +# permission to link this program with the OpenSSL library (or with +# modified versions of OpenSSL that use the same license as OpenSSL), +# and distribute linked combinations including the two. +# +# You must obey the GNU Lesser General Public License in all respects for +# all of the code used other than OpenSSL. +# +# psycopg2 is distributed in the hope that it will be useful, but WITHOUT +# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or +# FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public +# License for more details. + +import re as _re + +from psycopg2._psycopg import ( # noqa + BINARYARRAY, BOOLEAN, BOOLEANARRAY, BYTES, BYTESARRAY, DATE, DATEARRAY, + DATETIMEARRAY, DECIMAL, DECIMALARRAY, FLOAT, FLOATARRAY, INTEGER, + INTEGERARRAY, INTERVAL, INTERVALARRAY, LONGINTEGER, LONGINTEGERARRAY, + ROWIDARRAY, STRINGARRAY, TIME, TIMEARRAY, UNICODE, UNICODEARRAY, + AsIs, Binary, Boolean, Float, Int, QuotedString, ) + +from psycopg2._psycopg import ( # noqa + PYDATE, PYDATETIME, PYDATETIMETZ, PYINTERVAL, PYTIME, PYDATEARRAY, + PYDATETIMEARRAY, PYDATETIMETZARRAY, PYINTERVALARRAY, PYTIMEARRAY, + DateFromPy, TimeFromPy, TimestampFromPy, IntervalFromPy, ) + +from psycopg2._psycopg import ( # noqa + adapt, adapters, encodings, connection, cursor, + lobject, Xid, libpq_version, parse_dsn, quote_ident, + string_types, binary_types, new_type, new_array_type, register_type, + ISQLQuote, Notify, Diagnostics, Column, ConnectionInfo, + QueryCanceledError, TransactionRollbackError, + set_wait_callback, get_wait_callback, encrypt_password, ) + + +"""Isolation level values.""" +ISOLATION_LEVEL_AUTOCOMMIT = 0 +ISOLATION_LEVEL_READ_UNCOMMITTED = 4 +ISOLATION_LEVEL_READ_COMMITTED = 1 +ISOLATION_LEVEL_REPEATABLE_READ = 2 +ISOLATION_LEVEL_SERIALIZABLE = 3 +ISOLATION_LEVEL_DEFAULT = None + + +"""psycopg connection status values.""" +STATUS_SETUP = 0 +STATUS_READY = 1 +STATUS_BEGIN = 2 +STATUS_SYNC = 3 # currently unused +STATUS_ASYNC = 4 # currently unused +STATUS_PREPARED = 5 + +# This is a useful mnemonic to check if the connection is in a transaction +STATUS_IN_TRANSACTION = STATUS_BEGIN + + +"""psycopg asynchronous connection polling values""" +POLL_OK = 0 +POLL_READ = 1 +POLL_WRITE = 2 +POLL_ERROR = 3 + + +"""Backend transaction status values.""" +TRANSACTION_STATUS_IDLE = 0 +TRANSACTION_STATUS_ACTIVE = 1 +TRANSACTION_STATUS_INTRANS = 2 +TRANSACTION_STATUS_INERROR = 3 +TRANSACTION_STATUS_UNKNOWN = 4 + + +def register_adapter(typ, callable): + """Register 'callable' as an ISQLQuote adapter for type 'typ'.""" + adapters[(typ, ISQLQuote)] = callable + + +# The SQL_IN class is the official adapter for tuples starting from 2.0.6. +class SQL_IN: + """Adapt any iterable to an SQL quotable object.""" + def __init__(self, seq): + self._seq = seq + self._conn = None + + def prepare(self, conn): + self._conn = conn + + def getquoted(self): + # this is the important line: note how every object in the + # list is adapted and then how getquoted() is called on it + pobjs = [adapt(o) for o in self._seq] + if self._conn is not None: + for obj in pobjs: + if hasattr(obj, 'prepare'): + obj.prepare(self._conn) + qobjs = [o.getquoted() for o in pobjs] + return b'(' + b', '.join(qobjs) + b')' + + def __str__(self): + return str(self.getquoted()) + + +class NoneAdapter: + """Adapt None to NULL. + + This adapter is not used normally as a fast path in mogrify uses NULL, + but it makes easier to adapt composite types. + """ + def __init__(self, obj): + pass + + def getquoted(self, _null=b"NULL"): + return _null + + +def make_dsn(dsn=None, **kwargs): + """Convert a set of keywords into a connection strings.""" + if dsn is None and not kwargs: + return '' + + # If no kwarg is specified don't mung the dsn, but verify it + if not kwargs: + parse_dsn(dsn) + return dsn + + # Override the dsn with the parameters + if 'database' in kwargs: + if 'dbname' in kwargs: + raise TypeError( + "you can't specify both 'database' and 'dbname' arguments") + kwargs['dbname'] = kwargs.pop('database') + + # Drop the None arguments + kwargs = {k: v for (k, v) in kwargs.items() if v is not None} + + if dsn is not None: + tmp = parse_dsn(dsn) + tmp.update(kwargs) + kwargs = tmp + + dsn = " ".join(["{}={}".format(k, _param_escape(str(v))) + for (k, v) in kwargs.items()]) + + # verify that the returned dsn is valid + parse_dsn(dsn) + + return dsn + + +def _param_escape(s, + re_escape=_re.compile(r"([\\'])"), + re_space=_re.compile(r'\s')): + """ + Apply the escaping rule required by PQconnectdb + """ + if not s: + return "''" + + s = re_escape.sub(r'\\\1', s) + if re_space.search(s): + s = "'" + s + "'" + + return s + + +# Create default json typecasters for PostgreSQL 9.2 oids +from psycopg2._json import register_default_json, register_default_jsonb # noqa + +try: + JSON, JSONARRAY = register_default_json() + JSONB, JSONBARRAY = register_default_jsonb() +except ImportError: + pass + +del register_default_json, register_default_jsonb + + +# Create default Range typecasters +from psycopg2. _range import Range # noqa +del Range + + +# Add the "cleaned" version of the encodings to the key. +# When the encoding is set its name is cleaned up from - and _ and turned +# uppercase, so an encoding not respecting these rules wouldn't be found in the +# encodings keys and would raise an exception with the unicode typecaster +for k, v in list(encodings.items()): + k = k.replace('_', '').replace('-', '').upper() + encodings[k] = v + +del k, v diff --git a/lib/extras.py b/lib/extras.py new file mode 100644 index 0000000000000000000000000000000000000000..f921d2d5e484d7c8913308776d2fbe5c5333aa14 --- /dev/null +++ b/lib/extras.py @@ -0,0 +1,1306 @@ +"""Miscellaneous goodies for psycopg2 + +This module is a generic place used to hold little helper functions +and classes until a better place in the distribution is found. +""" +# psycopg/extras.py - miscellaneous extra goodies for psycopg +# +# Copyright (C) 2003-2019 Federico Di Gregorio +# Copyright (C) 2020-2021 The Psycopg Team +# +# psycopg2 is free software: you can redistribute it and/or modify it +# under the terms of the GNU Lesser General Public License as published +# by the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# In addition, as a special exception, the copyright holders give +# permission to link this program with the OpenSSL library (or with +# modified versions of OpenSSL that use the same license as OpenSSL), +# and distribute linked combinations including the two. +# +# You must obey the GNU Lesser General Public License in all respects for +# all of the code used other than OpenSSL. +# +# psycopg2 is distributed in the hope that it will be useful, but WITHOUT +# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or +# FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public +# License for more details. + +import os as _os +import time as _time +import re as _re +from collections import namedtuple, OrderedDict + +import logging as _logging + +import psycopg2 +from psycopg2 import extensions as _ext +from .extensions import cursor as _cursor +from .extensions import connection as _connection +from .extensions import adapt as _A, quote_ident +from functools import lru_cache + +from psycopg2._psycopg import ( # noqa + REPLICATION_PHYSICAL, REPLICATION_LOGICAL, + ReplicationConnection as _replicationConnection, + ReplicationCursor as _replicationCursor, + ReplicationMessage) + + +# expose the json adaptation stuff into the module +from psycopg2._json import ( # noqa + json, Json, register_json, register_default_json, register_default_jsonb) + + +# Expose range-related objects +from psycopg2._range import ( # noqa + Range, NumericRange, DateRange, DateTimeRange, DateTimeTZRange, + register_range, RangeAdapter, RangeCaster) + + +# Expose ipaddress-related objects +from psycopg2._ipaddress import register_ipaddress # noqa + + +class DictCursorBase(_cursor): + """Base class for all dict-like cursors.""" + + def __init__(self, *args, **kwargs): + if 'row_factory' in kwargs: + row_factory = kwargs['row_factory'] + del kwargs['row_factory'] + else: + raise NotImplementedError( + "DictCursorBase can't be instantiated without a row factory.") + super().__init__(*args, **kwargs) + self._query_executed = False + self._prefetch = False + self.row_factory = row_factory + + def fetchone(self): + if self._prefetch: + res = super().fetchone() + if self._query_executed: + self._build_index() + if not self._prefetch: + res = super().fetchone() + return res + + def fetchmany(self, size=None): + if self._prefetch: + res = super().fetchmany(size) + if self._query_executed: + self._build_index() + if not self._prefetch: + res = super().fetchmany(size) + return res + + def fetchall(self): + if self._prefetch: + res = super().fetchall() + if self._query_executed: + self._build_index() + if not self._prefetch: + res = super().fetchall() + return res + + def __iter__(self): + try: + if self._prefetch: + res = super().__iter__() + first = next(res) + if self._query_executed: + self._build_index() + if not self._prefetch: + res = super().__iter__() + first = next(res) + + yield first + while True: + yield next(res) + except StopIteration: + return + + +class DictConnection(_connection): + """A connection that uses `DictCursor` automatically.""" + def cursor(self, *args, **kwargs): + kwargs.setdefault('cursor_factory', self.cursor_factory or DictCursor) + return super().cursor(*args, **kwargs) + + +class DictCursor(DictCursorBase): + """A cursor that keeps a list of column name -> index mappings__. + + .. __: https://docs.python.org/glossary.html#term-mapping + """ + + def __init__(self, *args, **kwargs): + kwargs['row_factory'] = DictRow + super().__init__(*args, **kwargs) + self._prefetch = True + + def execute(self, query, vars=None): + self.index = OrderedDict() + self._query_executed = True + return super().execute(query, vars) + + def callproc(self, procname, vars=None): + self.index = OrderedDict() + self._query_executed = True + return super().callproc(procname, vars) + + def _build_index(self): + if self._query_executed and self.description: + for i in range(len(self.description)): + self.index[self.description[i][0]] = i + self._query_executed = False + + +class DictRow(list): + """A row object that allow by-column-name access to data.""" + + __slots__ = ('_index',) + + def __init__(self, cursor): + self._index = cursor.index + self[:] = [None] * len(cursor.description) + + def __getitem__(self, x): + if not isinstance(x, (int, slice)): + x = self._index[x] + return super().__getitem__(x) + + def __setitem__(self, x, v): + if not isinstance(x, (int, slice)): + x = self._index[x] + super().__setitem__(x, v) + + def items(self): + g = super().__getitem__ + return ((n, g(self._index[n])) for n in self._index) + + def keys(self): + return iter(self._index) + + def values(self): + g = super().__getitem__ + return (g(self._index[n]) for n in self._index) + + def get(self, x, default=None): + try: + return self[x] + except Exception: + return default + + def copy(self): + return OrderedDict(self.items()) + + def __contains__(self, x): + return x in self._index + + def __reduce__(self): + # this is apparently useless, but it fixes #1073 + return super().__reduce__() + + def __getstate__(self): + return self[:], self._index.copy() + + def __setstate__(self, data): + self[:] = data[0] + self._index = data[1] + + +class RealDictConnection(_connection): + """A connection that uses `RealDictCursor` automatically.""" + def cursor(self, *args, **kwargs): + kwargs.setdefault('cursor_factory', self.cursor_factory or RealDictCursor) + return super().cursor(*args, **kwargs) + + +class RealDictCursor(DictCursorBase): + """A cursor that uses a real dict as the base type for rows. + + Note that this cursor is extremely specialized and does not allow + the normal access (using integer indices) to fetched data. If you need + to access database rows both as a dictionary and a list, then use + the generic `DictCursor` instead of `!RealDictCursor`. + """ + def __init__(self, *args, **kwargs): + kwargs['row_factory'] = RealDictRow + super().__init__(*args, **kwargs) + + def execute(self, query, vars=None): + self.column_mapping = [] + self._query_executed = True + return super().execute(query, vars) + + def callproc(self, procname, vars=None): + self.column_mapping = [] + self._query_executed = True + return super().callproc(procname, vars) + + def _build_index(self): + if self._query_executed and self.description: + self.column_mapping = [d[0] for d in self.description] + self._query_executed = False + + +class RealDictRow(OrderedDict): + """A `!dict` subclass representing a data record.""" + + def __init__(self, *args, **kwargs): + if args and isinstance(args[0], _cursor): + cursor = args[0] + args = args[1:] + else: + cursor = None + + super().__init__(*args, **kwargs) + + if cursor is not None: + # Required for named cursors + if cursor.description and not cursor.column_mapping: + cursor._build_index() + + # Store the cols mapping in the dict itself until the row is fully + # populated, so we don't need to add attributes to the class + # (hence keeping its maintenance, special pickle support, etc.) + self[RealDictRow] = cursor.column_mapping + + def __setitem__(self, key, value): + if RealDictRow in self: + # We are in the row building phase + mapping = self[RealDictRow] + super().__setitem__(mapping[key], value) + if key == len(mapping) - 1: + # Row building finished + del self[RealDictRow] + return + + super().__setitem__(key, value) + + +class NamedTupleConnection(_connection): + """A connection that uses `NamedTupleCursor` automatically.""" + def cursor(self, *args, **kwargs): + kwargs.setdefault('cursor_factory', self.cursor_factory or NamedTupleCursor) + return super().cursor(*args, **kwargs) + + +class NamedTupleCursor(_cursor): + """A cursor that generates results as `~collections.namedtuple`. + + `!fetch*()` methods will return named tuples instead of regular tuples, so + their elements can be accessed both as regular numeric items as well as + attributes. + + >>> nt_cur = conn.cursor(cursor_factory=psycopg2.extras.NamedTupleCursor) + >>> rec = nt_cur.fetchone() + >>> rec + Record(id=1, num=100, data="abc'def") + >>> rec[1] + 100 + >>> rec.data + "abc'def" + """ + Record = None + MAX_CACHE = 1024 + + def execute(self, query, vars=None): + self.Record = None + return super().execute(query, vars) + + def executemany(self, query, vars): + self.Record = None + return super().executemany(query, vars) + + def callproc(self, procname, vars=None): + self.Record = None + return super().callproc(procname, vars) + + def fetchone(self): + t = super().fetchone() + if t is not None: + nt = self.Record + if nt is None: + nt = self.Record = self._make_nt() + return nt._make(t) + + def fetchmany(self, size=None): + ts = super().fetchmany(size) + nt = self.Record + if nt is None: + nt = self.Record = self._make_nt() + return list(map(nt._make, ts)) + + def fetchall(self): + ts = super().fetchall() + nt = self.Record + if nt is None: + nt = self.Record = self._make_nt() + return list(map(nt._make, ts)) + + def __iter__(self): + try: + it = super().__iter__() + t = next(it) + + nt = self.Record + if nt is None: + nt = self.Record = self._make_nt() + + yield nt._make(t) + + while True: + yield nt._make(next(it)) + except StopIteration: + return + + # ascii except alnum and underscore + _re_clean = _re.compile( + '[' + _re.escape(' !"#$%&\'()*+,-./:;<=>?@[\\]^`{|}~') + ']') + + def _make_nt(self): + key = tuple(d[0] for d in self.description) if self.description else () + return self._cached_make_nt(key) + + @classmethod + def _do_make_nt(cls, key): + fields = [] + for s in key: + s = cls._re_clean.sub('_', s) + # Python identifier cannot start with numbers, namedtuple fields + # cannot start with underscore. So... + if s[0] == '_' or '0' <= s[0] <= '9': + s = 'f' + s + fields.append(s) + + nt = namedtuple("Record", fields) + return nt + + +@lru_cache(512) +def _cached_make_nt(cls, key): + return cls._do_make_nt(key) + + +# Exposed for testability, and if someone wants to monkeypatch to tweak +# the cache size. +NamedTupleCursor._cached_make_nt = classmethod(_cached_make_nt) + + +class LoggingConnection(_connection): + """A connection that logs all queries to a file or logger__ object. + + .. __: https://docs.python.org/library/logging.html + """ + + def initialize(self, logobj): + """Initialize the connection to log to `!logobj`. + + The `!logobj` parameter can be an open file object or a Logger/LoggerAdapter + instance from the standard logging module. + """ + self._logobj = logobj + if _logging and isinstance( + logobj, (_logging.Logger, _logging.LoggerAdapter)): + self.log = self._logtologger + else: + self.log = self._logtofile + + def filter(self, msg, curs): + """Filter the query before logging it. + + This is the method to overwrite to filter unwanted queries out of the + log or to add some extra data to the output. The default implementation + just does nothing. + """ + return msg + + def _logtofile(self, msg, curs): + msg = self.filter(msg, curs) + if msg: + if isinstance(msg, bytes): + msg = msg.decode(_ext.encodings[self.encoding], 'replace') + self._logobj.write(msg + _os.linesep) + + def _logtologger(self, msg, curs): + msg = self.filter(msg, curs) + if msg: + self._logobj.debug(msg) + + def _check(self): + if not hasattr(self, '_logobj'): + raise self.ProgrammingError( + "LoggingConnection object has not been initialize()d") + + def cursor(self, *args, **kwargs): + self._check() + kwargs.setdefault('cursor_factory', self.cursor_factory or LoggingCursor) + return super().cursor(*args, **kwargs) + + +class LoggingCursor(_cursor): + """A cursor that logs queries using its connection logging facilities.""" + + def execute(self, query, vars=None): + try: + return super().execute(query, vars) + finally: + self.connection.log(self.query, self) + + def callproc(self, procname, vars=None): + try: + return super().callproc(procname, vars) + finally: + self.connection.log(self.query, self) + + +class MinTimeLoggingConnection(LoggingConnection): + """A connection that logs queries based on execution time. + + This is just an example of how to sub-class `LoggingConnection` to + provide some extra filtering for the logged queries. Both the + `initialize()` and `filter()` methods are overwritten to make sure + that only queries executing for more than ``mintime`` ms are logged. + + Note that this connection uses the specialized cursor + `MinTimeLoggingCursor`. + """ + def initialize(self, logobj, mintime=0): + LoggingConnection.initialize(self, logobj) + self._mintime = mintime + + def filter(self, msg, curs): + t = (_time.time() - curs.timestamp) * 1000 + if t > self._mintime: + if isinstance(msg, bytes): + msg = msg.decode(_ext.encodings[self.encoding], 'replace') + return f"{msg}{_os.linesep} (execution time: {t} ms)" + + def cursor(self, *args, **kwargs): + kwargs.setdefault('cursor_factory', + self.cursor_factory or MinTimeLoggingCursor) + return LoggingConnection.cursor(self, *args, **kwargs) + + +class MinTimeLoggingCursor(LoggingCursor): + """The cursor sub-class companion to `MinTimeLoggingConnection`.""" + + def execute(self, query, vars=None): + self.timestamp = _time.time() + return LoggingCursor.execute(self, query, vars) + + def callproc(self, procname, vars=None): + self.timestamp = _time.time() + return LoggingCursor.callproc(self, procname, vars) + + +class LogicalReplicationConnection(_replicationConnection): + + def __init__(self, *args, **kwargs): + kwargs['replication_type'] = REPLICATION_LOGICAL + super().__init__(*args, **kwargs) + + +class PhysicalReplicationConnection(_replicationConnection): + + def __init__(self, *args, **kwargs): + kwargs['replication_type'] = REPLICATION_PHYSICAL + super().__init__(*args, **kwargs) + + +class StopReplication(Exception): + """ + Exception used to break out of the endless loop in + `~ReplicationCursor.consume_stream()`. + + Subclass of `~exceptions.Exception`. Intentionally *not* inherited from + `~psycopg2.Error` as occurrence of this exception does not indicate an + error. + """ + pass + + +class ReplicationCursor(_replicationCursor): + """A cursor used for communication on replication connections.""" + + def create_replication_slot(self, slot_name, slot_type=None, output_plugin=None): + """Create streaming replication slot.""" + + command = f"CREATE_REPLICATION_SLOT {quote_ident(slot_name, self)} " + + if slot_type is None: + slot_type = self.connection.replication_type + + if slot_type == REPLICATION_LOGICAL: + if output_plugin is None: + raise psycopg2.ProgrammingError( + "output plugin name is required to create " + "logical replication slot") + + command += f"LOGICAL {quote_ident(output_plugin, self)}" + + elif slot_type == REPLICATION_PHYSICAL: + if output_plugin is not None: + raise psycopg2.ProgrammingError( + "cannot specify output plugin name when creating " + "physical replication slot") + + command += "PHYSICAL" + + else: + raise psycopg2.ProgrammingError( + f"unrecognized replication type: {repr(slot_type)}") + + self.execute(command) + + def drop_replication_slot(self, slot_name): + """Drop streaming replication slot.""" + + command = f"DROP_REPLICATION_SLOT {quote_ident(slot_name, self)}" + self.execute(command) + + def start_replication( + self, slot_name=None, slot_type=None, start_lsn=0, + timeline=0, options=None, decode=False, status_interval=10): + """Start replication stream.""" + + command = "START_REPLICATION " + + if slot_type is None: + slot_type = self.connection.replication_type + + if slot_type == REPLICATION_LOGICAL: + if slot_name: + command += f"SLOT {quote_ident(slot_name, self)} " + else: + raise psycopg2.ProgrammingError( + "slot name is required for logical replication") + + command += "LOGICAL " + + elif slot_type == REPLICATION_PHYSICAL: + if slot_name: + command += f"SLOT {quote_ident(slot_name, self)} " + # don't add "PHYSICAL", before 9.4 it was just START_REPLICATION XXX/XXX + + else: + raise psycopg2.ProgrammingError( + f"unrecognized replication type: {repr(slot_type)}") + + if type(start_lsn) is str: + lsn = start_lsn.split('/') + lsn = f"{int(lsn[0], 16):X}/{int(lsn[1], 16):08X}" + else: + lsn = f"{start_lsn >> 32 & 4294967295:X}/{start_lsn & 4294967295:08X}" + + command += lsn + + if timeline != 0: + if slot_type == REPLICATION_LOGICAL: + raise psycopg2.ProgrammingError( + "cannot specify timeline for logical replication") + + command += f" TIMELINE {timeline}" + + if options: + if slot_type == REPLICATION_PHYSICAL: + raise psycopg2.ProgrammingError( + "cannot specify output plugin options for physical replication") + + command += " (" + for k, v in options.items(): + if not command.endswith('('): + command += ", " + command += f"{quote_ident(k, self)} {_A(str(v))}" + command += ")" + + self.start_replication_expert( + command, decode=decode, status_interval=status_interval) + + # allows replication cursors to be used in select.select() directly + def fileno(self): + return self.connection.fileno() + + +# a dbtype and adapter for Python UUID type + +class UUID_adapter: + """Adapt Python's uuid.UUID__ type to PostgreSQL's uuid__. + + .. __: https://docs.python.org/library/uuid.html + .. __: https://www.postgresql.org/docs/current/static/datatype-uuid.html + """ + + def __init__(self, uuid): + self._uuid = uuid + + def __conform__(self, proto): + if proto is _ext.ISQLQuote: + return self + + def getquoted(self): + return (f"'{self._uuid}'::uuid").encode('utf8') + + def __str__(self): + return f"'{self._uuid}'::uuid" + + +def register_uuid(oids=None, conn_or_curs=None): + """Create the UUID type and an uuid.UUID adapter. + + :param oids: oid for the PostgreSQL :sql:`uuid` type, or 2-items sequence + with oids of the type and the array. If not specified, use PostgreSQL + standard oids. + :param conn_or_curs: where to register the typecaster. If not specified, + register it globally. + """ + + import uuid + + if not oids: + oid1 = 2950 + oid2 = 2951 + elif isinstance(oids, (list, tuple)): + oid1, oid2 = oids + else: + oid1 = oids + oid2 = 2951 + + _ext.UUID = _ext.new_type((oid1, ), "UUID", + lambda data, cursor: data and uuid.UUID(data) or None) + _ext.UUIDARRAY = _ext.new_array_type((oid2,), "UUID[]", _ext.UUID) + + _ext.register_type(_ext.UUID, conn_or_curs) + _ext.register_type(_ext.UUIDARRAY, conn_or_curs) + _ext.register_adapter(uuid.UUID, UUID_adapter) + + return _ext.UUID + + +# a type, dbtype and adapter for PostgreSQL inet type + +class Inet: + """Wrap a string to allow for correct SQL-quoting of inet values. + + Note that this adapter does NOT check the passed value to make + sure it really is an inet-compatible address but DOES call adapt() + on it to make sure it is impossible to execute an SQL-injection + by passing an evil value to the initializer. + """ + def __init__(self, addr): + self.addr = addr + + def __repr__(self): + return f"{self.__class__.__name__}({self.addr!r})" + + def prepare(self, conn): + self._conn = conn + + def getquoted(self): + obj = _A(self.addr) + if hasattr(obj, 'prepare'): + obj.prepare(self._conn) + return obj.getquoted() + b"::inet" + + def __conform__(self, proto): + if proto is _ext.ISQLQuote: + return self + + def __str__(self): + return str(self.addr) + + +def register_inet(oid=None, conn_or_curs=None): + """Create the INET type and an Inet adapter. + + :param oid: oid for the PostgreSQL :sql:`inet` type, or 2-items sequence + with oids of the type and the array. If not specified, use PostgreSQL + standard oids. + :param conn_or_curs: where to register the typecaster. If not specified, + register it globally. + """ + import warnings + warnings.warn( + "the inet adapter is deprecated, it's not very useful", + DeprecationWarning) + + if not oid: + oid1 = 869 + oid2 = 1041 + elif isinstance(oid, (list, tuple)): + oid1, oid2 = oid + else: + oid1 = oid + oid2 = 1041 + + _ext.INET = _ext.new_type((oid1, ), "INET", + lambda data, cursor: data and Inet(data) or None) + _ext.INETARRAY = _ext.new_array_type((oid2, ), "INETARRAY", _ext.INET) + + _ext.register_type(_ext.INET, conn_or_curs) + _ext.register_type(_ext.INETARRAY, conn_or_curs) + + return _ext.INET + + +def wait_select(conn): + """Wait until a connection or cursor has data available. + + The function is an example of a wait callback to be registered with + `~psycopg2.extensions.set_wait_callback()`. This function uses + :py:func:`~select.select()` to wait for data to become available, and + therefore is able to handle/receive SIGINT/KeyboardInterrupt. + """ + import select + from psycopg2.extensions import POLL_OK, POLL_READ, POLL_WRITE + + while True: + try: + state = conn.poll() + if state == POLL_OK: + break + elif state == POLL_READ: + select.select([conn.fileno()], [], []) + elif state == POLL_WRITE: + select.select([], [conn.fileno()], []) + else: + raise conn.OperationalError(f"bad state from poll: {state}") + except KeyboardInterrupt: + conn.cancel() + # the loop will be broken by a server error + continue + + +def _solve_conn_curs(conn_or_curs): + """Return the connection and a DBAPI cursor from a connection or cursor.""" + if conn_or_curs is None: + raise psycopg2.ProgrammingError("no connection or cursor provided") + + if hasattr(conn_or_curs, 'execute'): + conn = conn_or_curs.connection + curs = conn.cursor(cursor_factory=_cursor) + else: + conn = conn_or_curs + curs = conn.cursor(cursor_factory=_cursor) + + return conn, curs + + +class HstoreAdapter: + """Adapt a Python dict to the hstore syntax.""" + def __init__(self, wrapped): + self.wrapped = wrapped + + def prepare(self, conn): + self.conn = conn + + # use an old-style getquoted implementation if required + if conn.info.server_version < 90000: + self.getquoted = self._getquoted_8 + + def _getquoted_8(self): + """Use the operators available in PG pre-9.0.""" + if not self.wrapped: + return b"''::hstore" + + adapt = _ext.adapt + rv = [] + for k, v in self.wrapped.items(): + k = adapt(k) + k.prepare(self.conn) + k = k.getquoted() + + if v is not None: + v = adapt(v) + v.prepare(self.conn) + v = v.getquoted() + else: + v = b'NULL' + + # XXX this b'ing is painfully inefficient! + rv.append(b"(" + k + b" => " + v + b")") + + return b"(" + b'||'.join(rv) + b")" + + def _getquoted_9(self): + """Use the hstore(text[], text[]) function.""" + if not self.wrapped: + return b"''::hstore" + + k = _ext.adapt(list(self.wrapped.keys())) + k.prepare(self.conn) + v = _ext.adapt(list(self.wrapped.values())) + v.prepare(self.conn) + return b"hstore(" + k.getquoted() + b", " + v.getquoted() + b")" + + getquoted = _getquoted_9 + + _re_hstore = _re.compile(r""" + # hstore key: + # a string of normal or escaped chars + "((?: [^"\\] | \\. )*)" + \s*=>\s* # hstore value + (?: + NULL # the value can be null - not catched + # or a quoted string like the key + | "((?: [^"\\] | \\. )*)" + ) + (?:\s*,\s*|$) # pairs separated by comma or end of string. + """, _re.VERBOSE) + + @classmethod + def parse(self, s, cur, _bsdec=_re.compile(r"\\(.)")): + """Parse an hstore representation in a Python string. + + The hstore is represented as something like:: + + "a"=>"1", "b"=>"2" + + with backslash-escaped strings. + """ + if s is None: + return None + + rv = {} + start = 0 + for m in self._re_hstore.finditer(s): + if m is None or m.start() != start: + raise psycopg2.InterfaceError( + f"error parsing hstore pair at char {start}") + k = _bsdec.sub(r'\1', m.group(1)) + v = m.group(2) + if v is not None: + v = _bsdec.sub(r'\1', v) + + rv[k] = v + start = m.end() + + if start < len(s): + raise psycopg2.InterfaceError( + f"error parsing hstore: unparsed data after char {start}") + + return rv + + @classmethod + def parse_unicode(self, s, cur): + """Parse an hstore returning unicode keys and values.""" + if s is None: + return None + + s = s.decode(_ext.encodings[cur.connection.encoding]) + return self.parse(s, cur) + + @classmethod + def get_oids(self, conn_or_curs): + """Return the lists of OID of the hstore and hstore[] types. + """ + conn, curs = _solve_conn_curs(conn_or_curs) + + # Store the transaction status of the connection to revert it after use + conn_status = conn.status + + # column typarray not available before PG 8.3 + typarray = conn.info.server_version >= 80300 and "typarray" or "NULL" + + rv0, rv1 = [], [] + + # get the oid for the hstore + curs.execute(f"""SELECT t.oid, {typarray} +FROM pg_type t JOIN pg_namespace ns + ON typnamespace = ns.oid +WHERE typname = 'hstore'; +""") + for oids in curs: + rv0.append(oids[0]) + rv1.append(oids[1]) + + # revert the status of the connection as before the command + if (conn_status != _ext.STATUS_IN_TRANSACTION + and not conn.autocommit): + conn.rollback() + + return tuple(rv0), tuple(rv1) + + +def register_hstore(conn_or_curs, globally=False, unicode=False, + oid=None, array_oid=None): + r"""Register adapter and typecaster for `!dict`\-\ |hstore| conversions. + + :param conn_or_curs: a connection or cursor: the typecaster will be + registered only on this object unless *globally* is set to `!True` + :param globally: register the adapter globally, not only on *conn_or_curs* + :param unicode: if `!True`, keys and values returned from the database + will be `!unicode` instead of `!str`. The option is not available on + Python 3 + :param oid: the OID of the |hstore| type if known. If not, it will be + queried on *conn_or_curs*. + :param array_oid: the OID of the |hstore| array type if known. If not, it + will be queried on *conn_or_curs*. + + The connection or cursor passed to the function will be used to query the + database and look for the OID of the |hstore| type (which may be different + across databases). If querying is not desirable (e.g. with + :ref:`asynchronous connections `) you may specify it in the + *oid* parameter, which can be found using a query such as :sql:`SELECT + 'hstore'::regtype::oid`. Analogously you can obtain a value for *array_oid* + using a query such as :sql:`SELECT 'hstore[]'::regtype::oid`. + + Note that, when passing a dictionary from Python to the database, both + strings and unicode keys and values are supported. Dictionaries returned + from the database have keys/values according to the *unicode* parameter. + + The |hstore| contrib module must be already installed in the database + (executing the ``hstore.sql`` script in your ``contrib`` directory). + Raise `~psycopg2.ProgrammingError` if the type is not found. + """ + if oid is None: + oid = HstoreAdapter.get_oids(conn_or_curs) + if oid is None or not oid[0]: + raise psycopg2.ProgrammingError( + "hstore type not found in the database. " + "please install it from your 'contrib/hstore.sql' file") + else: + array_oid = oid[1] + oid = oid[0] + + if isinstance(oid, int): + oid = (oid,) + + if array_oid is not None: + if isinstance(array_oid, int): + array_oid = (array_oid,) + else: + array_oid = tuple([x for x in array_oid if x]) + + # create and register the typecaster + HSTORE = _ext.new_type(oid, "HSTORE", HstoreAdapter.parse) + _ext.register_type(HSTORE, not globally and conn_or_curs or None) + _ext.register_adapter(dict, HstoreAdapter) + + if array_oid: + HSTOREARRAY = _ext.new_array_type(array_oid, "HSTOREARRAY", HSTORE) + _ext.register_type(HSTOREARRAY, not globally and conn_or_curs or None) + + +class CompositeCaster: + """Helps conversion of a PostgreSQL composite type into a Python object. + + The class is usually created by the `register_composite()` function. + You may want to create and register manually instances of the class if + querying the database at registration time is not desirable (such as when + using an :ref:`asynchronous connections `). + + """ + def __init__(self, name, oid, attrs, array_oid=None, schema=None): + self.name = name + self.schema = schema + self.oid = oid + self.array_oid = array_oid + + self.attnames = [a[0] for a in attrs] + self.atttypes = [a[1] for a in attrs] + self._create_type(name, self.attnames) + self.typecaster = _ext.new_type((oid,), name, self.parse) + if array_oid: + self.array_typecaster = _ext.new_array_type( + (array_oid,), f"{name}ARRAY", self.typecaster) + else: + self.array_typecaster = None + + def parse(self, s, curs): + if s is None: + return None + + tokens = self.tokenize(s) + if len(tokens) != len(self.atttypes): + raise psycopg2.DataError( + "expecting %d components for the type %s, %d found instead" % + (len(self.atttypes), self.name, len(tokens))) + + values = [curs.cast(oid, token) + for oid, token in zip(self.atttypes, tokens)] + + return self.make(values) + + def make(self, values): + """Return a new Python object representing the data being casted. + + *values* is the list of attributes, already casted into their Python + representation. + + You can subclass this method to :ref:`customize the composite cast + `. + """ + + return self._ctor(values) + + _re_tokenize = _re.compile(r""" + \(? ([,)]) # an empty token, representing NULL +| \(? " ((?: [^"] | "")*) " [,)] # or a quoted string +| \(? ([^",)]+) [,)] # or an unquoted string + """, _re.VERBOSE) + + _re_undouble = _re.compile(r'(["\\])\1') + + @classmethod + def tokenize(self, s): + rv = [] + for m in self._re_tokenize.finditer(s): + if m is None: + raise psycopg2.InterfaceError(f"can't parse type: {s!r}") + if m.group(1) is not None: + rv.append(None) + elif m.group(2) is not None: + rv.append(self._re_undouble.sub(r"\1", m.group(2))) + else: + rv.append(m.group(3)) + + return rv + + def _create_type(self, name, attnames): + self.type = namedtuple(name, attnames) + self._ctor = self.type._make + + @classmethod + def _from_db(self, name, conn_or_curs): + """Return a `CompositeCaster` instance for the type *name*. + + Raise `ProgrammingError` if the type is not found. + """ + conn, curs = _solve_conn_curs(conn_or_curs) + + # Store the transaction status of the connection to revert it after use + conn_status = conn.status + + # Use the correct schema + if '.' in name: + schema, tname = name.split('.', 1) + else: + tname = name + schema = 'public' + + # column typarray not available before PG 8.3 + typarray = conn.info.server_version >= 80300 and "typarray" or "NULL" + + # get the type oid and attributes + curs.execute("""\ +SELECT t.oid, %s, attname, atttypid +FROM pg_type t +JOIN pg_namespace ns ON typnamespace = ns.oid +JOIN pg_attribute a ON attrelid = typrelid +WHERE typname = %%s AND nspname = %%s + AND attnum > 0 AND NOT attisdropped +ORDER BY attnum; +""" % typarray, (tname, schema)) + + recs = curs.fetchall() + + # revert the status of the connection as before the command + if (conn_status != _ext.STATUS_IN_TRANSACTION + and not conn.autocommit): + conn.rollback() + + if not recs: + raise psycopg2.ProgrammingError( + f"PostgreSQL type '{name}' not found") + + type_oid = recs[0][0] + array_oid = recs[0][1] + type_attrs = [(r[2], r[3]) for r in recs] + + return self(tname, type_oid, type_attrs, + array_oid=array_oid, schema=schema) + + +def register_composite(name, conn_or_curs, globally=False, factory=None): + """Register a typecaster to convert a composite type into a tuple. + + :param name: the name of a PostgreSQL composite type, e.g. created using + the |CREATE TYPE|_ command + :param conn_or_curs: a connection or cursor used to find the type oid and + components; the typecaster is registered in a scope limited to this + object, unless *globally* is set to `!True` + :param globally: if `!False` (default) register the typecaster only on + *conn_or_curs*, otherwise register it globally + :param factory: if specified it should be a `CompositeCaster` subclass: use + it to :ref:`customize how to cast composite types ` + :return: the registered `CompositeCaster` or *factory* instance + responsible for the conversion + """ + if factory is None: + factory = CompositeCaster + + caster = factory._from_db(name, conn_or_curs) + _ext.register_type(caster.typecaster, not globally and conn_or_curs or None) + + if caster.array_typecaster is not None: + _ext.register_type( + caster.array_typecaster, not globally and conn_or_curs or None) + + return caster + + +def _paginate(seq, page_size): + """Consume an iterable and return it in chunks. + + Every chunk is at most `page_size`. Never return an empty chunk. + """ + page = [] + it = iter(seq) + while True: + try: + for i in range(page_size): + page.append(next(it)) + yield page + page = [] + except StopIteration: + if page: + yield page + return + + +def execute_batch(cur, sql, argslist, page_size=100): + r"""Execute groups of statements in fewer server roundtrips. + + Execute *sql* several times, against all parameters set (sequences or + mappings) found in *argslist*. + + The function is semantically similar to + + .. parsed-literal:: + + *cur*\.\ `~cursor.executemany`\ (\ *sql*\ , *argslist*\ ) + + but has a different implementation: Psycopg will join the statements into + fewer multi-statement commands, each one containing at most *page_size* + statements, resulting in a reduced number of server roundtrips. + + After the execution of the function the `cursor.rowcount` property will + **not** contain a total result. + + """ + for page in _paginate(argslist, page_size=page_size): + sqls = [cur.mogrify(sql, args) for args in page] + cur.execute(b";".join(sqls)) + + +def execute_values(cur, sql, argslist, template=None, page_size=100, fetch=False): + '''Execute a statement using :sql:`VALUES` with a sequence of parameters. + + :param cur: the cursor to use to execute the query. + + :param sql: the query to execute. It must contain a single ``%s`` + placeholder, which will be replaced by a `VALUES list`__. + Example: ``"INSERT INTO mytable (id, f1, f2) VALUES %s"``. + + :param argslist: sequence of sequences or dictionaries with the arguments + to send to the query. The type and content must be consistent with + *template*. + + :param template: the snippet to merge to every item in *argslist* to + compose the query. + + - If the *argslist* items are sequences it should contain positional + placeholders (e.g. ``"(%s, %s, %s)"``, or ``"(%s, %s, 42)``" if there + are constants value...). + + - If the *argslist* items are mappings it should contain named + placeholders (e.g. ``"(%(id)s, %(f1)s, 42)"``). + + If not specified, assume the arguments are sequence and use a simple + positional template (i.e. ``(%s, %s, ...)``), with the number of + placeholders sniffed by the first element in *argslist*. + + :param page_size: maximum number of *argslist* items to include in every + statement. If there are more items the function will execute more than + one statement. + + :param fetch: if `!True` return the query results into a list (like in a + `~cursor.fetchall()`). Useful for queries with :sql:`RETURNING` + clause. + + .. __: https://www.postgresql.org/docs/current/static/queries-values.html + + After the execution of the function the `cursor.rowcount` property will + **not** contain a total result. + + While :sql:`INSERT` is an obvious candidate for this function it is + possible to use it with other statements, for example:: + + >>> cur.execute( + ... "create table test (id int primary key, v1 int, v2 int)") + + >>> execute_values(cur, + ... "INSERT INTO test (id, v1, v2) VALUES %s", + ... [(1, 2, 3), (4, 5, 6), (7, 8, 9)]) + + >>> execute_values(cur, + ... """UPDATE test SET v1 = data.v1 FROM (VALUES %s) AS data (id, v1) + ... WHERE test.id = data.id""", + ... [(1, 20), (4, 50)]) + + >>> cur.execute("select * from test order by id") + >>> cur.fetchall() + [(1, 20, 3), (4, 50, 6), (7, 8, 9)]) + + ''' + from psycopg2.sql import Composable + if isinstance(sql, Composable): + sql = sql.as_string(cur) + + # we can't just use sql % vals because vals is bytes: if sql is bytes + # there will be some decoding error because of stupid codec used, and Py3 + # doesn't implement % on bytes. + if not isinstance(sql, bytes): + sql = sql.encode(_ext.encodings[cur.connection.encoding]) + pre, post = _split_sql(sql) + + result = [] if fetch else None + for page in _paginate(argslist, page_size=page_size): + if template is None: + template = b'(' + b','.join([b'%s'] * len(page[0])) + b')' + parts = pre[:] + for args in page: + parts.append(cur.mogrify(template, args)) + parts.append(b',') + parts[-1:] = post + cur.execute(b''.join(parts)) + if fetch: + result.extend(cur.fetchall()) + + return result + + +def _split_sql(sql): + """Split *sql* on a single ``%s`` placeholder. + + Split on the %s, perform %% replacement and return pre, post lists of + snippets. + """ + curr = pre = [] + post = [] + tokens = _re.split(br'(%.)', sql) + for token in tokens: + if len(token) != 2 or token[:1] != b'%': + curr.append(token) + continue + + if token[1:] == b's': + if curr is pre: + curr = post + else: + raise ValueError( + "the query contains more than one '%s' placeholder") + elif token[1:] == b'%': + curr.append(b'%') + else: + raise ValueError("unsupported format character: '%s'" + % token[1:].decode('ascii', 'replace')) + + if curr is pre: + raise ValueError("the query doesn't contain any '%s' placeholder") + + return pre, post diff --git a/lib/pool.py b/lib/pool.py new file mode 100644 index 0000000000000000000000000000000000000000..9d67d68eb34861345f545c0148c5a4cde0b3d28b --- /dev/null +++ b/lib/pool.py @@ -0,0 +1,187 @@ +"""Connection pooling for psycopg2 + +This module implements thread-safe (and not) connection pools. +""" +# psycopg/pool.py - pooling code for psycopg +# +# Copyright (C) 2003-2019 Federico Di Gregorio +# Copyright (C) 2020-2021 The Psycopg Team +# +# psycopg2 is free software: you can redistribute it and/or modify it +# under the terms of the GNU Lesser General Public License as published +# by the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# In addition, as a special exception, the copyright holders give +# permission to link this program with the OpenSSL library (or with +# modified versions of OpenSSL that use the same license as OpenSSL), +# and distribute linked combinations including the two. +# +# You must obey the GNU Lesser General Public License in all respects for +# all of the code used other than OpenSSL. +# +# psycopg2 is distributed in the hope that it will be useful, but WITHOUT +# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or +# FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public +# License for more details. + +import psycopg2 +from psycopg2 import extensions as _ext + + +class PoolError(psycopg2.Error): + pass + + +class AbstractConnectionPool: + """Generic key-based pooling code.""" + + def __init__(self, minconn, maxconn, *args, **kwargs): + """Initialize the connection pool. + + New 'minconn' connections are created immediately calling 'connfunc' + with given parameters. The connection pool will support a maximum of + about 'maxconn' connections. + """ + self.minconn = int(minconn) + self.maxconn = int(maxconn) + self.closed = False + + self._args = args + self._kwargs = kwargs + + self._pool = [] + self._used = {} + self._rused = {} # id(conn) -> key map + self._keys = 0 + + for i in range(self.minconn): + self._connect() + + def _connect(self, key=None): + """Create a new connection and assign it to 'key' if not None.""" + conn = psycopg2.connect(*self._args, **self._kwargs) + if key is not None: + self._used[key] = conn + self._rused[id(conn)] = key + else: + self._pool.append(conn) + return conn + + def _getkey(self): + """Return a new unique key.""" + self._keys += 1 + return self._keys + + def _getconn(self, key=None): + """Get a free connection and assign it to 'key' if not None.""" + if self.closed: + raise PoolError("connection pool is closed") + if key is None: + key = self._getkey() + + if key in self._used: + return self._used[key] + + if self._pool: + self._used[key] = conn = self._pool.pop() + self._rused[id(conn)] = key + return conn + else: + if len(self._used) == self.maxconn: + raise PoolError("connection pool exhausted") + return self._connect(key) + + def _putconn(self, conn, key=None, close=False): + """Put away a connection.""" + if self.closed: + raise PoolError("connection pool is closed") + + if key is None: + key = self._rused.get(id(conn)) + if key is None: + raise PoolError("trying to put unkeyed connection") + + if len(self._pool) < self.minconn and not close: + # Return the connection into a consistent state before putting + # it back into the pool + if not conn.closed: + status = conn.info.transaction_status + if status == _ext.TRANSACTION_STATUS_UNKNOWN: + # server connection lost + conn.close() + elif status != _ext.TRANSACTION_STATUS_IDLE: + # connection in error or in transaction + conn.rollback() + self._pool.append(conn) + else: + # regular idle connection + self._pool.append(conn) + # If the connection is closed, we just discard it. + else: + conn.close() + + # here we check for the presence of key because it can happen that a + # thread tries to put back a connection after a call to close + if not self.closed or key in self._used: + del self._used[key] + del self._rused[id(conn)] + + def _closeall(self): + """Close all connections. + + Note that this can lead to some code fail badly when trying to use + an already closed connection. If you call .closeall() make sure + your code can deal with it. + """ + if self.closed: + raise PoolError("connection pool is closed") + for conn in self._pool + list(self._used.values()): + try: + conn.close() + except Exception: + pass + self.closed = True + + +class SimpleConnectionPool(AbstractConnectionPool): + """A connection pool that can't be shared across different threads.""" + + getconn = AbstractConnectionPool._getconn + putconn = AbstractConnectionPool._putconn + closeall = AbstractConnectionPool._closeall + + +class ThreadedConnectionPool(AbstractConnectionPool): + """A connection pool that works with the threading module.""" + + def __init__(self, minconn, maxconn, *args, **kwargs): + """Initialize the threading lock.""" + import threading + AbstractConnectionPool.__init__( + self, minconn, maxconn, *args, **kwargs) + self._lock = threading.Lock() + + def getconn(self, key=None): + """Get a free connection and assign it to 'key' if not None.""" + self._lock.acquire() + try: + return self._getconn(key) + finally: + self._lock.release() + + def putconn(self, conn=None, key=None, close=False): + """Put away an unused connection.""" + self._lock.acquire() + try: + self._putconn(conn, key, close) + finally: + self._lock.release() + + def closeall(self): + """Close all connections (even the one currently in use.)""" + self._lock.acquire() + try: + self._closeall() + finally: + self._lock.release() diff --git a/lib/sql.py b/lib/sql.py new file mode 100644 index 0000000000000000000000000000000000000000..1c780902305624dcd08bbd213c698d2520cece17 --- /dev/null +++ b/lib/sql.py @@ -0,0 +1,452 @@ +"""SQL composition utility module +""" + +# psycopg/sql.py - SQL composition utility module +# +# Copyright (C) 2016-2019 Daniele Varrazzo +# Copyright (C) 2020-2021 The Psycopg Team +# +# psycopg2 is free software: you can redistribute it and/or modify it +# under the terms of the GNU Lesser General Public License as published +# by the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# In addition, as a special exception, the copyright holders give +# permission to link this program with the OpenSSL library (or with +# modified versions of OpenSSL that use the same license as OpenSSL), +# and distribute linked combinations including the two. +# +# You must obey the GNU Lesser General Public License in all respects for +# all of the code used other than OpenSSL. +# +# psycopg2 is distributed in the hope that it will be useful, but WITHOUT +# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or +# FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public +# License for more details. + +import string + +from psycopg2 import extensions as ext + + +_formatter = string.Formatter() + + +class Composable: + """ + Abstract base class for objects that can be used to compose an SQL string. + + `!Composable` objects can be passed directly to `~cursor.execute()`, + `~cursor.executemany()`, `~cursor.copy_expert()` in place of the query + string. + + `!Composable` objects can be joined using the ``+`` operator: the result + will be a `Composed` instance containing the objects joined. The operator + ``*`` is also supported with an integer argument: the result is a + `!Composed` instance containing the left argument repeated as many times as + requested. + """ + def __init__(self, wrapped): + self._wrapped = wrapped + + def __repr__(self): + return f"{self.__class__.__name__}({self._wrapped!r})" + + def as_string(self, context): + """ + Return the string value of the object. + + :param context: the context to evaluate the string into. + :type context: `connection` or `cursor` + + The method is automatically invoked by `~cursor.execute()`, + `~cursor.executemany()`, `~cursor.copy_expert()` if a `!Composable` is + passed instead of the query string. + """ + raise NotImplementedError + + def __add__(self, other): + if isinstance(other, Composed): + return Composed([self]) + other + if isinstance(other, Composable): + return Composed([self]) + Composed([other]) + else: + return NotImplemented + + def __mul__(self, n): + return Composed([self] * n) + + def __eq__(self, other): + return type(self) is type(other) and self._wrapped == other._wrapped + + def __ne__(self, other): + return not self.__eq__(other) + + +class Composed(Composable): + """ + A `Composable` object made of a sequence of `!Composable`. + + The object is usually created using `!Composable` operators and methods. + However it is possible to create a `!Composed` directly specifying a + sequence of `!Composable` as arguments. + + Example:: + + >>> comp = sql.Composed( + ... [sql.SQL("insert into "), sql.Identifier("table")]) + >>> print(comp.as_string(conn)) + insert into "table" + + `!Composed` objects are iterable (so they can be used in `SQL.join` for + instance). + """ + def __init__(self, seq): + wrapped = [] + for i in seq: + if not isinstance(i, Composable): + raise TypeError( + f"Composed elements must be Composable, got {i!r} instead") + wrapped.append(i) + + super().__init__(wrapped) + + @property + def seq(self): + """The list of the content of the `!Composed`.""" + return list(self._wrapped) + + def as_string(self, context): + rv = [] + for i in self._wrapped: + rv.append(i.as_string(context)) + return ''.join(rv) + + def __iter__(self): + return iter(self._wrapped) + + def __add__(self, other): + if isinstance(other, Composed): + return Composed(self._wrapped + other._wrapped) + if isinstance(other, Composable): + return Composed(self._wrapped + [other]) + else: + return NotImplemented + + def join(self, joiner): + """ + Return a new `!Composed` interposing the *joiner* with the `!Composed` items. + + The *joiner* must be a `SQL` or a string which will be interpreted as + an `SQL`. + + Example:: + + >>> fields = sql.Identifier('foo') + sql.Identifier('bar') # a Composed + >>> print(fields.join(', ').as_string(conn)) + "foo", "bar" + + """ + if isinstance(joiner, str): + joiner = SQL(joiner) + elif not isinstance(joiner, SQL): + raise TypeError( + "Composed.join() argument must be a string or an SQL") + + return joiner.join(self) + + +class SQL(Composable): + """ + A `Composable` representing a snippet of SQL statement. + + `!SQL` exposes `join()` and `format()` methods useful to create a template + where to merge variable parts of a query (for instance field or table + names). + + The *string* doesn't undergo any form of escaping, so it is not suitable to + represent variable identifiers or values: you should only use it to pass + constant strings representing templates or snippets of SQL statements; use + other objects such as `Identifier` or `Literal` to represent variable + parts. + + Example:: + + >>> query = sql.SQL("select {0} from {1}").format( + ... sql.SQL(', ').join([sql.Identifier('foo'), sql.Identifier('bar')]), + ... sql.Identifier('table')) + >>> print(query.as_string(conn)) + select "foo", "bar" from "table" + """ + def __init__(self, string): + if not isinstance(string, str): + raise TypeError("SQL values must be strings") + super().__init__(string) + + @property + def string(self): + """The string wrapped by the `!SQL` object.""" + return self._wrapped + + def as_string(self, context): + return self._wrapped + + def format(self, *args, **kwargs): + """ + Merge `Composable` objects into a template. + + :param `Composable` args: parameters to replace to numbered + (``{0}``, ``{1}``) or auto-numbered (``{}``) placeholders + :param `Composable` kwargs: parameters to replace to named (``{name}``) + placeholders + :return: the union of the `!SQL` string with placeholders replaced + :rtype: `Composed` + + The method is similar to the Python `str.format()` method: the string + template supports auto-numbered (``{}``), numbered (``{0}``, + ``{1}``...), and named placeholders (``{name}``), with positional + arguments replacing the numbered placeholders and keywords replacing + the named ones. However placeholder modifiers (``{0!r}``, ``{0:<10}``) + are not supported. Only `!Composable` objects can be passed to the + template. + + Example:: + + >>> print(sql.SQL("select * from {} where {} = %s") + ... .format(sql.Identifier('people'), sql.Identifier('id')) + ... .as_string(conn)) + select * from "people" where "id" = %s + + >>> print(sql.SQL("select * from {tbl} where {pkey} = %s") + ... .format(tbl=sql.Identifier('people'), pkey=sql.Identifier('id')) + ... .as_string(conn)) + select * from "people" where "id" = %s + + """ + rv = [] + autonum = 0 + for pre, name, spec, conv in _formatter.parse(self._wrapped): + if spec: + raise ValueError("no format specification supported by SQL") + if conv: + raise ValueError("no format conversion supported by SQL") + if pre: + rv.append(SQL(pre)) + + if name is None: + continue + + if name.isdigit(): + if autonum: + raise ValueError( + "cannot switch from automatic field numbering to manual") + rv.append(args[int(name)]) + autonum = None + + elif not name: + if autonum is None: + raise ValueError( + "cannot switch from manual field numbering to automatic") + rv.append(args[autonum]) + autonum += 1 + + else: + rv.append(kwargs[name]) + + return Composed(rv) + + def join(self, seq): + """ + Join a sequence of `Composable`. + + :param seq: the elements to join. + :type seq: iterable of `!Composable` + + Use the `!SQL` object's *string* to separate the elements in *seq*. + Note that `Composed` objects are iterable too, so they can be used as + argument for this method. + + Example:: + + >>> snip = sql.SQL(', ').join( + ... sql.Identifier(n) for n in ['foo', 'bar', 'baz']) + >>> print(snip.as_string(conn)) + "foo", "bar", "baz" + """ + rv = [] + it = iter(seq) + try: + rv.append(next(it)) + except StopIteration: + pass + else: + for i in it: + rv.append(self) + rv.append(i) + + return Composed(rv) + + +class Identifier(Composable): + """ + A `Composable` representing an SQL identifier or a dot-separated sequence. + + Identifiers usually represent names of database objects, such as tables or + fields. PostgreSQL identifiers follow `different rules`__ than SQL string + literals for escaping (e.g. they use double quotes instead of single). + + .. __: https://www.postgresql.org/docs/current/static/sql-syntax-lexical.html# \ + SQL-SYNTAX-IDENTIFIERS + + Example:: + + >>> t1 = sql.Identifier("foo") + >>> t2 = sql.Identifier("ba'r") + >>> t3 = sql.Identifier('ba"z') + >>> print(sql.SQL(', ').join([t1, t2, t3]).as_string(conn)) + "foo", "ba'r", "ba""z" + + Multiple strings can be passed to the object to represent a qualified name, + i.e. a dot-separated sequence of identifiers. + + Example:: + + >>> query = sql.SQL("select {} from {}").format( + ... sql.Identifier("table", "field"), + ... sql.Identifier("schema", "table")) + >>> print(query.as_string(conn)) + select "table"."field" from "schema"."table" + + """ + def __init__(self, *strings): + if not strings: + raise TypeError("Identifier cannot be empty") + + for s in strings: + if not isinstance(s, str): + raise TypeError("SQL identifier parts must be strings") + + super().__init__(strings) + + @property + def strings(self): + """A tuple with the strings wrapped by the `Identifier`.""" + return self._wrapped + + @property + def string(self): + """The string wrapped by the `Identifier`. + """ + if len(self._wrapped) == 1: + return self._wrapped[0] + else: + raise AttributeError( + "the Identifier wraps more than one than one string") + + def __repr__(self): + return f"{self.__class__.__name__}({', '.join(map(repr, self._wrapped))})" + + def as_string(self, context): + return '.'.join(ext.quote_ident(s, context) for s in self._wrapped) + + +class Literal(Composable): + """ + A `Composable` representing an SQL value to include in a query. + + Usually you will want to include placeholders in the query and pass values + as `~cursor.execute()` arguments. If however you really really need to + include a literal value in the query you can use this object. + + The string returned by `!as_string()` follows the normal :ref:`adaptation + rules ` for Python objects. + + Example:: + + >>> s1 = sql.Literal("foo") + >>> s2 = sql.Literal("ba'r") + >>> s3 = sql.Literal(42) + >>> print(sql.SQL(', ').join([s1, s2, s3]).as_string(conn)) + 'foo', 'ba''r', 42 + + """ + @property + def wrapped(self): + """The object wrapped by the `!Literal`.""" + return self._wrapped + + def as_string(self, context): + # is it a connection or cursor? + if isinstance(context, ext.connection): + conn = context + elif isinstance(context, ext.cursor): + conn = context.connection + else: + raise TypeError("context must be a connection or a cursor") + + a = ext.adapt(self._wrapped) + if hasattr(a, 'prepare'): + a.prepare(conn) + + rv = a.getquoted() + if isinstance(rv, bytes): + rv = rv.decode(ext.encodings[conn.encoding]) + + return rv + + +class Placeholder(Composable): + """A `Composable` representing a placeholder for query parameters. + + If the name is specified, generate a named placeholder (e.g. ``%(name)s``), + otherwise generate a positional placeholder (e.g. ``%s``). + + The object is useful to generate SQL queries with a variable number of + arguments. + + Examples:: + + >>> names = ['foo', 'bar', 'baz'] + + >>> q1 = sql.SQL("insert into table ({}) values ({})").format( + ... sql.SQL(', ').join(map(sql.Identifier, names)), + ... sql.SQL(', ').join(sql.Placeholder() * len(names))) + >>> print(q1.as_string(conn)) + insert into table ("foo", "bar", "baz") values (%s, %s, %s) + + >>> q2 = sql.SQL("insert into table ({}) values ({})").format( + ... sql.SQL(', ').join(map(sql.Identifier, names)), + ... sql.SQL(', ').join(map(sql.Placeholder, names))) + >>> print(q2.as_string(conn)) + insert into table ("foo", "bar", "baz") values (%(foo)s, %(bar)s, %(baz)s) + + """ + + def __init__(self, name=None): + if isinstance(name, str): + if ')' in name: + raise ValueError(f"invalid name: {name!r}") + + elif name is not None: + raise TypeError(f"expected string or None as name, got {name!r}") + + super().__init__(name) + + @property + def name(self): + """The name of the `!Placeholder`.""" + return self._wrapped + + def __repr__(self): + return f"Placeholder({self._wrapped if self._wrapped is not None else ''!r})" + + def as_string(self, context): + if self._wrapped is not None: + return f"%({self._wrapped})" + else: + return "%s" + + +# Literals +NULL = SQL("NULL") +DEFAULT = SQL("DEFAULT") diff --git a/lib/tz.py b/lib/tz.py new file mode 100644 index 0000000000000000000000000000000000000000..d88ca37c2ff2d5bfe3023bb93bba29f515a6fb17 --- /dev/null +++ b/lib/tz.py @@ -0,0 +1,158 @@ +"""tzinfo implementations for psycopg2 + +This module holds two different tzinfo implementations that can be used as +the 'tzinfo' argument to datetime constructors, directly passed to psycopg +functions or used to set the .tzinfo_factory attribute in cursors. +""" +# psycopg/tz.py - tzinfo implementation +# +# Copyright (C) 2003-2019 Federico Di Gregorio +# Copyright (C) 2020-2021 The Psycopg Team +# +# psycopg2 is free software: you can redistribute it and/or modify it +# under the terms of the GNU Lesser General Public License as published +# by the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# In addition, as a special exception, the copyright holders give +# permission to link this program with the OpenSSL library (or with +# modified versions of OpenSSL that use the same license as OpenSSL), +# and distribute linked combinations including the two. +# +# You must obey the GNU Lesser General Public License in all respects for +# all of the code used other than OpenSSL. +# +# psycopg2 is distributed in the hope that it will be useful, but WITHOUT +# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or +# FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public +# License for more details. + +import datetime +import time + +ZERO = datetime.timedelta(0) + + +class FixedOffsetTimezone(datetime.tzinfo): + """Fixed offset in minutes east from UTC. + + This is exactly the implementation__ found in Python 2.3.x documentation, + with a small change to the `!__init__()` method to allow for pickling + and a default name in the form ``sHH:MM`` (``s`` is the sign.). + + The implementation also caches instances. During creation, if a + FixedOffsetTimezone instance has previously been created with the same + offset and name that instance will be returned. This saves memory and + improves comparability. + + .. versionchanged:: 2.9 + + The constructor can take either a timedelta or a number of minutes of + offset. Previously only minutes were supported. + + .. __: https://docs.python.org/library/datetime.html + """ + _name = None + _offset = ZERO + + _cache = {} + + def __init__(self, offset=None, name=None): + if offset is not None: + if not isinstance(offset, datetime.timedelta): + offset = datetime.timedelta(minutes=offset) + self._offset = offset + if name is not None: + self._name = name + + def __new__(cls, offset=None, name=None): + """Return a suitable instance created earlier if it exists + """ + key = (offset, name) + try: + return cls._cache[key] + except KeyError: + tz = super().__new__(cls, offset, name) + cls._cache[key] = tz + return tz + + def __repr__(self): + return "psycopg2.tz.FixedOffsetTimezone(offset=%r, name=%r)" \ + % (self._offset, self._name) + + def __eq__(self, other): + if isinstance(other, FixedOffsetTimezone): + return self._offset == other._offset + else: + return NotImplemented + + def __ne__(self, other): + if isinstance(other, FixedOffsetTimezone): + return self._offset != other._offset + else: + return NotImplemented + + def __getinitargs__(self): + return self._offset, self._name + + def utcoffset(self, dt): + return self._offset + + def tzname(self, dt): + if self._name is not None: + return self._name + + minutes, seconds = divmod(self._offset.total_seconds(), 60) + hours, minutes = divmod(minutes, 60) + rv = "%+03d" % hours + if minutes or seconds: + rv += ":%02d" % minutes + if seconds: + rv += ":%02d" % seconds + + return rv + + def dst(self, dt): + return ZERO + + +STDOFFSET = datetime.timedelta(seconds=-time.timezone) +if time.daylight: + DSTOFFSET = datetime.timedelta(seconds=-time.altzone) +else: + DSTOFFSET = STDOFFSET +DSTDIFF = DSTOFFSET - STDOFFSET + + +class LocalTimezone(datetime.tzinfo): + """Platform idea of local timezone. + + This is the exact implementation from the Python 2.3 documentation. + """ + def utcoffset(self, dt): + if self._isdst(dt): + return DSTOFFSET + else: + return STDOFFSET + + def dst(self, dt): + if self._isdst(dt): + return DSTDIFF + else: + return ZERO + + def tzname(self, dt): + return time.tzname[self._isdst(dt)] + + def _isdst(self, dt): + tt = (dt.year, dt.month, dt.day, + dt.hour, dt.minute, dt.second, + dt.weekday(), 0, -1) + stamp = time.mktime(tt) + tt = time.localtime(stamp) + return tt.tm_isdst > 0 + + +LOCAL = LocalTimezone() + +# TODO: pre-generate some interesting time zones? diff --git a/psycopg/_psycopg.vc9.amd64.manifest b/psycopg/_psycopg.vc9.amd64.manifest new file mode 100644 index 0000000000000000000000000000000000000000..e92d583faf53f2a0197a699a2b01acc2dafe540a --- /dev/null +++ b/psycopg/_psycopg.vc9.amd64.manifest @@ -0,0 +1,15 @@ + + + + + + + + + + + + + + + diff --git a/psycopg/_psycopg.vc9.x86.manifest b/psycopg/_psycopg.vc9.x86.manifest new file mode 100644 index 0000000000000000000000000000000000000000..9fc55da4ff07dd84368da482bd3984279fed0653 --- /dev/null +++ b/psycopg/_psycopg.vc9.x86.manifest @@ -0,0 +1,15 @@ + + + + + + + + + + + + + + + diff --git a/psycopg/adapter_asis.c b/psycopg/adapter_asis.c new file mode 100644 index 0000000000000000000000000000000000000000..5c757860d99ad85893d9b37733eb04598ef3c68a --- /dev/null +++ b/psycopg/adapter_asis.c @@ -0,0 +1,195 @@ +/* adapter_asis.c - adapt types as they are + * + * Copyright (C) 2003-2019 Federico Di Gregorio + * Copyright (C) 2020-2021 The Psycopg Team + * + * This file is part of psycopg. + * + * psycopg2 is free software: you can redistribute it and/or modify it + * under the terms of the GNU Lesser General Public License as published + * by the Free Software Foundation, either version 3 of the License, or + * (at your option) any later version. + * + * In addition, as a special exception, the copyright holders give + * permission to link this program with the OpenSSL library (or with + * modified versions of OpenSSL that use the same license as OpenSSL), + * and distribute linked combinations including the two. + * + * You must obey the GNU Lesser General Public License in all respects for + * all of the code used other than OpenSSL. + * + * psycopg2 is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public + * License for more details. + */ + +#define PSYCOPG_MODULE +#include "psycopg/psycopg.h" + +#include "psycopg/adapter_asis.h" +#include "psycopg/microprotocols_proto.h" + +#include + + +/** the AsIs object **/ + +static PyObject * +asis_getquoted(asisObject *self, PyObject *args) +{ + PyObject *rv; + if (self->wrapped == Py_None) { + Py_INCREF(psyco_null); + rv = psyco_null; + } + else { + rv = PyObject_Str(self->wrapped); + /* unicode to bytes */ + if (rv) { + PyObject *tmp = PyUnicode_AsUTF8String(rv); + Py_DECREF(rv); + rv = tmp; + } + } + + return rv; +} + +static PyObject * +asis_str(asisObject *self) +{ + return psyco_ensure_text(asis_getquoted(self, NULL)); +} + +static PyObject * +asis_conform(asisObject *self, PyObject *args) +{ + PyObject *res, *proto; + + if (!PyArg_ParseTuple(args, "O", &proto)) return NULL; + + if (proto == (PyObject*)&isqlquoteType) + res = (PyObject*)self; + else + res = Py_None; + + Py_INCREF(res); + return res; +} + +/** the AsIs object */ + +/* object member list */ + +static struct PyMemberDef asisObject_members[] = { + {"adapted", T_OBJECT, offsetof(asisObject, wrapped), READONLY}, + {NULL} +}; + +/* object method table */ + +static PyMethodDef asisObject_methods[] = { + {"getquoted", (PyCFunction)asis_getquoted, METH_NOARGS, + "getquoted() -> wrapped object value as SQL-quoted string"}, + {"__conform__", (PyCFunction)asis_conform, METH_VARARGS, NULL}, + {NULL} /* Sentinel */ +}; + +/* initialization and finalization methods */ + +static int +asis_setup(asisObject *self, PyObject *obj) +{ + Dprintf("asis_setup: init asis object at %p, refcnt = " + FORMAT_CODE_PY_SSIZE_T, + self, Py_REFCNT(self) + ); + + Py_INCREF(obj); + self->wrapped = obj; + + Dprintf("asis_setup: good asis object at %p, refcnt = " + FORMAT_CODE_PY_SSIZE_T, + self, Py_REFCNT(self) + ); + return 0; +} + +static void +asis_dealloc(PyObject* obj) +{ + asisObject *self = (asisObject *)obj; + + Py_CLEAR(self->wrapped); + + Dprintf("asis_dealloc: deleted asis object at %p, refcnt = " + FORMAT_CODE_PY_SSIZE_T, + obj, Py_REFCNT(obj) + ); + + Py_TYPE(obj)->tp_free(obj); +} + +static int +asis_init(PyObject *obj, PyObject *args, PyObject *kwds) +{ + PyObject *o; + + if (!PyArg_ParseTuple(args, "O", &o)) + return -1; + + return asis_setup((asisObject *)obj, o); +} + +static PyObject * +asis_new(PyTypeObject *type, PyObject *args, PyObject *kwds) +{ + return type->tp_alloc(type, 0); +} + + +/* object type */ + +#define asisType_doc \ +"AsIs(str) -> new AsIs adapter object" + +PyTypeObject asisType = { + PyVarObject_HEAD_INIT(NULL, 0) + "psycopg2.extensions.AsIs", + sizeof(asisObject), 0, + asis_dealloc, /*tp_dealloc*/ + 0, /*tp_print*/ + 0, /*tp_getattr*/ + 0, /*tp_setattr*/ + 0, /*tp_compare*/ + 0, /*tp_repr*/ + 0, /*tp_as_number*/ + 0, /*tp_as_sequence*/ + 0, /*tp_as_mapping*/ + 0, /*tp_hash */ + 0, /*tp_call*/ + (reprfunc)asis_str, /*tp_str*/ + 0, /*tp_getattro*/ + 0, /*tp_setattro*/ + 0, /*tp_as_buffer*/ + Py_TPFLAGS_DEFAULT|Py_TPFLAGS_BASETYPE, /*tp_flags*/ + asisType_doc, /*tp_doc*/ + 0, /*tp_traverse*/ + 0, /*tp_clear*/ + 0, /*tp_richcompare*/ + 0, /*tp_weaklistoffset*/ + 0, /*tp_iter*/ + 0, /*tp_iternext*/ + asisObject_methods, /*tp_methods*/ + asisObject_members, /*tp_members*/ + 0, /*tp_getset*/ + 0, /*tp_base*/ + 0, /*tp_dict*/ + 0, /*tp_descr_get*/ + 0, /*tp_descr_set*/ + 0, /*tp_dictoffset*/ + asis_init, /*tp_init*/ + 0, /*tp_alloc*/ + asis_new, /*tp_new*/ +}; diff --git a/psycopg/adapter_asis.h b/psycopg/adapter_asis.h new file mode 100644 index 0000000000000000000000000000000000000000..b6c82b722d17f917f734cd82cb79e9759d3f3779 --- /dev/null +++ b/psycopg/adapter_asis.h @@ -0,0 +1,48 @@ +/* adapter_asis.h - definition for the psycopg AsIs type wrapper + * + * Copyright (C) 2003-2019 Federico Di Gregorio + * Copyright (C) 2020-2021 The Psycopg Team + * + * This file is part of psycopg. + * + * psycopg2 is free software: you can redistribute it and/or modify it + * under the terms of the GNU Lesser General Public License as published + * by the Free Software Foundation, either version 3 of the License, or + * (at your option) any later version. + * + * In addition, as a special exception, the copyright holders give + * permission to link this program with the OpenSSL library (or with + * modified versions of OpenSSL that use the same license as OpenSSL), + * and distribute linked combinations including the two. + * + * You must obey the GNU Lesser General Public License in all respects for + * all of the code used other than OpenSSL. + * + * psycopg2 is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public + * License for more details. + */ + +#ifndef PSYCOPG_ASIS_H +#define PSYCOPG_ASIS_H 1 + +#ifdef __cplusplus +extern "C" { +#endif + +extern HIDDEN PyTypeObject asisType; + +typedef struct { + PyObject_HEAD + + /* this is the real object we wrap */ + PyObject *wrapped; + +} asisObject; + +#ifdef __cplusplus +} +#endif + +#endif /* !defined(PSYCOPG_ASIS_H) */ diff --git a/psycopg/adapter_binary.c b/psycopg/adapter_binary.c new file mode 100644 index 0000000000000000000000000000000000000000..d6b110ca99e1c3a4f0a218f5d1f47bf4ae8e3ea3 --- /dev/null +++ b/psycopg/adapter_binary.c @@ -0,0 +1,281 @@ +/* adapter_binary.c - Binary objects + * + * Copyright (C) 2003-2019 Federico Di Gregorio + * Copyright (C) 2020-2021 The Psycopg Team + * + * This file is part of psycopg. + * + * psycopg2 is free software: you can redistribute it and/or modify it + * under the terms of the GNU Lesser General Public License as published + * by the Free Software Foundation, either version 3 of the License, or + * (at your option) any later version. + * + * In addition, as a special exception, the copyright holders give + * permission to link this program with the OpenSSL library (or with + * modified versions of OpenSSL that use the same license as OpenSSL), + * and distribute linked combinations including the two. + * + * You must obey the GNU Lesser General Public License in all respects for + * all of the code used other than OpenSSL. + * + * psycopg2 is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public + * License for more details. + */ + +#define PSYCOPG_MODULE +#include "psycopg/psycopg.h" + +#include "psycopg/adapter_binary.h" +#include "psycopg/microprotocols_proto.h" +#include "psycopg/connection.h" + +#include + + +/** the quoting code */ + +static unsigned char * +binary_escape(unsigned char *from, size_t from_length, + size_t *to_length, PGconn *conn) +{ + if (conn) + return PQescapeByteaConn(conn, from, from_length, to_length); + else + return PQescapeBytea(from, from_length, to_length); +} + +/* binary_quote - do the quote process on plain and unicode strings */ + +static PyObject * +binary_quote(binaryObject *self) +{ + char *to = NULL; + const char *buffer = NULL; + Py_ssize_t buffer_len; + size_t len = 0; + PyObject *rv = NULL; + Py_buffer view; + int got_view = 0; + + /* Allow Binary(None) to work */ + if (self->wrapped == Py_None) { + Py_INCREF(psyco_null); + rv = psyco_null; + goto exit; + } + + /* if we got a plain string or a buffer we escape it and save the buffer */ + if (PyObject_CheckBuffer(self->wrapped)) { + if (0 > PyObject_GetBuffer(self->wrapped, &view, PyBUF_CONTIG_RO)) { + goto exit; + } + got_view = 1; + buffer = (const char *)(view.buf); + buffer_len = view.len; + } + + if (!buffer) { + goto exit; + } + + /* escape and build quoted buffer */ + + to = (char *)binary_escape((unsigned char*)buffer, (size_t)buffer_len, + &len, self->conn ? ((connectionObject*)self->conn)->pgconn : NULL); + if (to == NULL) { + PyErr_NoMemory(); + goto exit; + } + + if (len > 0) + rv = Bytes_FromFormat( + (self->conn && ((connectionObject*)self->conn)->equote) + ? "E'%s'::bytea" : "'%s'::bytea" , to); + else + rv = Bytes_FromString("''::bytea"); + +exit: + if (to) { PQfreemem(to); } + if (got_view) { PyBuffer_Release(&view); } + + /* if the wrapped object is not bytes or a buffer, this is an error */ + if (!rv && !PyErr_Occurred()) { + PyErr_Format(PyExc_TypeError, "can't escape %s to binary", + Py_TYPE(self->wrapped)->tp_name); + } + + return rv; +} + +/* binary_str, binary_getquoted - return result of quoting */ + +static PyObject * +binary_getquoted(binaryObject *self, PyObject *args) +{ + if (self->buffer == NULL) { + self->buffer = binary_quote(self); + } + Py_XINCREF(self->buffer); + return self->buffer; +} + +static PyObject * +binary_str(binaryObject *self) +{ + return psyco_ensure_text(binary_getquoted(self, NULL)); +} + +static PyObject * +binary_prepare(binaryObject *self, PyObject *args) +{ + PyObject *conn; + + if (!PyArg_ParseTuple(args, "O!", &connectionType, &conn)) + return NULL; + + Py_XDECREF(self->conn); + self->conn = conn; + Py_INCREF(self->conn); + + Py_RETURN_NONE; +} + +static PyObject * +binary_conform(binaryObject *self, PyObject *args) +{ + PyObject *res, *proto; + + if (!PyArg_ParseTuple(args, "O", &proto)) return NULL; + + if (proto == (PyObject*)&isqlquoteType) + res = (PyObject*)self; + else + res = Py_None; + + Py_INCREF(res); + return res; +} + +/** the Binary object **/ + +/* object member list */ + +static struct PyMemberDef binaryObject_members[] = { + {"adapted", T_OBJECT, offsetof(binaryObject, wrapped), READONLY}, + {"buffer", T_OBJECT, offsetof(binaryObject, buffer), READONLY}, + {NULL} +}; + +/* object method table */ + +static PyMethodDef binaryObject_methods[] = { + {"getquoted", (PyCFunction)binary_getquoted, METH_NOARGS, + "getquoted() -> wrapped object value as SQL-quoted binary string"}, + {"prepare", (PyCFunction)binary_prepare, METH_VARARGS, + "prepare(conn) -> prepare for binary encoding using conn"}, + {"__conform__", (PyCFunction)binary_conform, METH_VARARGS, NULL}, + {NULL} /* Sentinel */ +}; + +/* initialization and finalization methods */ + +static int +binary_setup(binaryObject *self, PyObject *str) +{ + Dprintf("binary_setup: init binary object at %p, refcnt = " + FORMAT_CODE_PY_SSIZE_T, + self, Py_REFCNT(self) + ); + + self->buffer = NULL; + self->conn = NULL; + Py_INCREF(str); + self->wrapped = str; + + Dprintf("binary_setup: good binary object at %p, refcnt = " + FORMAT_CODE_PY_SSIZE_T, + self, Py_REFCNT(self)); + return 0; +} + +static void +binary_dealloc(PyObject* obj) +{ + binaryObject *self = (binaryObject *)obj; + + Py_CLEAR(self->wrapped); + Py_CLEAR(self->buffer); + Py_CLEAR(self->conn); + + Dprintf("binary_dealloc: deleted binary object at %p, refcnt = " + FORMAT_CODE_PY_SSIZE_T, + obj, Py_REFCNT(obj) + ); + + Py_TYPE(obj)->tp_free(obj); +} + +static int +binary_init(PyObject *obj, PyObject *args, PyObject *kwds) +{ + PyObject *str; + + if (!PyArg_ParseTuple(args, "O", &str)) + return -1; + + return binary_setup((binaryObject *)obj, str); +} + +static PyObject * +binary_new(PyTypeObject *type, PyObject *args, PyObject *kwds) +{ + return type->tp_alloc(type, 0); +} + + +/* object type */ + +#define binaryType_doc \ +"Binary(buffer) -> new binary object" + +PyTypeObject binaryType = { + PyVarObject_HEAD_INIT(NULL, 0) + "psycopg2.extensions.Binary", + sizeof(binaryObject), 0, + binary_dealloc, /*tp_dealloc*/ + 0, /*tp_print*/ + 0, /*tp_getattr*/ + 0, /*tp_setattr*/ + 0, /*tp_compare*/ + 0, /*tp_repr*/ + 0, /*tp_as_number*/ + 0, /*tp_as_sequence*/ + 0, /*tp_as_mapping*/ + 0, /*tp_hash */ + 0, /*tp_call*/ + (reprfunc)binary_str, /*tp_str*/ + 0, /*tp_getattro*/ + 0, /*tp_setattro*/ + 0, /*tp_as_buffer*/ + Py_TPFLAGS_DEFAULT|Py_TPFLAGS_BASETYPE, /*tp_flags*/ + binaryType_doc, /*tp_doc*/ + 0, /*tp_traverse*/ + 0, /*tp_clear*/ + 0, /*tp_richcompare*/ + 0, /*tp_weaklistoffset*/ + 0, /*tp_iter*/ + 0, /*tp_iternext*/ + binaryObject_methods, /*tp_methods*/ + binaryObject_members, /*tp_members*/ + 0, /*tp_getset*/ + 0, /*tp_base*/ + 0, /*tp_dict*/ + 0, /*tp_descr_get*/ + 0, /*tp_descr_set*/ + 0, /*tp_dictoffset*/ + binary_init, /*tp_init*/ + 0, /*tp_alloc*/ + binary_new, /*tp_new*/ +}; diff --git a/psycopg/adapter_binary.h b/psycopg/adapter_binary.h new file mode 100644 index 0000000000000000000000000000000000000000..54f9fb55b159eb526c40f8a6dde97d54d32a8018 --- /dev/null +++ b/psycopg/adapter_binary.h @@ -0,0 +1,48 @@ +/* adapter_binary.h - definition for the Binary type + * + * Copyright (C) 2003-2019 Federico Di Gregorio + * Copyright (C) 2020-2021 The Psycopg Team + * + * This file is part of psycopg. + * + * psycopg2 is free software: you can redistribute it and/or modify it + * under the terms of the GNU Lesser General Public License as published + * by the Free Software Foundation, either version 3 of the License, or + * (at your option) any later version. + * + * In addition, as a special exception, the copyright holders give + * permission to link this program with the OpenSSL library (or with + * modified versions of OpenSSL that use the same license as OpenSSL), + * and distribute linked combinations including the two. + * + * You must obey the GNU Lesser General Public License in all respects for + * all of the code used other than OpenSSL. + * + * psycopg2 is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public + * License for more details. + */ + +#ifndef PSYCOPG_BINARY_H +#define PSYCOPG_BINARY_H 1 + +#ifdef __cplusplus +extern "C" { +#endif + +extern HIDDEN PyTypeObject binaryType; + +typedef struct { + PyObject_HEAD + + PyObject *wrapped; + PyObject *buffer; + PyObject *conn; +} binaryObject; + +#ifdef __cplusplus +} +#endif + +#endif /* !defined(PSYCOPG_BINARY_H) */ diff --git a/psycopg/adapter_datetime.c b/psycopg/adapter_datetime.c new file mode 100644 index 0000000000000000000000000000000000000000..9df26ad16a71afe7323dd6b985ea401059a4d903 --- /dev/null +++ b/psycopg/adapter_datetime.c @@ -0,0 +1,515 @@ +/* adapter_datetime.c - python date/time objects + * + * Copyright (C) 2003-2019 Federico Di Gregorio + * Copyright (C) 2020-2021 The Psycopg Team + * + * This file is part of psycopg. + * + * psycopg2 is free software: you can redistribute it and/or modify it + * under the terms of the GNU Lesser General Public License as published + * by the Free Software Foundation, either version 3 of the License, or + * (at your option) any later version. + * + * In addition, as a special exception, the copyright holders give + * permission to link this program with the OpenSSL library (or with + * modified versions of OpenSSL that use the same license as OpenSSL), + * and distribute linked combinations including the two. + * + * You must obey the GNU Lesser General Public License in all respects for + * all of the code used other than OpenSSL. + * + * psycopg2 is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public + * License for more details. + */ + +#define PSYCOPG_MODULE +#include "psycopg/psycopg.h" + +#include "psycopg/adapter_datetime.h" +#include "psycopg/microprotocols_proto.h" + +#include + +#include +#include + + +RAISES_NEG int +adapter_datetime_init(void) +{ + PyDateTime_IMPORT; + + if (!PyDateTimeAPI) { + PyErr_SetString(PyExc_ImportError, "datetime initialization failed"); + return -1; + } + return 0; +} + +/* datetime_str, datetime_getquoted - return result of quoting */ + +static PyObject * +_pydatetime_string_date_time(pydatetimeObject *self) +{ + PyObject *rv = NULL; + PyObject *iso = NULL; + PyObject *tz; + + /* Select the right PG type to cast into. */ + char *fmt = NULL; + switch (self->type) { + case PSYCO_DATETIME_TIME: + tz = PyObject_GetAttrString(self->wrapped, "tzinfo"); + if (!tz) { goto error; } + fmt = (tz == Py_None) ? "'%s'::time" : "'%s'::timetz"; + Py_DECREF(tz); + break; + case PSYCO_DATETIME_DATE: + fmt = "'%s'::date"; + break; + case PSYCO_DATETIME_TIMESTAMP: + tz = PyObject_GetAttrString(self->wrapped, "tzinfo"); + if (!tz) { goto error; } + fmt = (tz == Py_None) ? "'%s'::timestamp" : "'%s'::timestamptz"; + Py_DECREF(tz); + break; + } + + if (!(iso = psyco_ensure_bytes( + PyObject_CallMethod(self->wrapped, "isoformat", NULL)))) { + goto error; + } + + rv = Bytes_FromFormat(fmt, Bytes_AsString(iso)); + + Py_DECREF(iso); + return rv; + +error: + Py_XDECREF(iso); + return rv; +} + +static PyObject * +_pydatetime_string_delta(pydatetimeObject *self) +{ + PyDateTime_Delta *obj = (PyDateTime_Delta*)self->wrapped; + + char buffer[8]; + int i; + int a = PyDateTime_DELTA_GET_MICROSECONDS(obj); + + for (i=0; i < 6 ; i++) { + buffer[5-i] = '0' + (a % 10); + a /= 10; + } + buffer[6] = '\0'; + + return Bytes_FromFormat("'%d days %d.%s seconds'::interval", + PyDateTime_DELTA_GET_DAYS(obj), + PyDateTime_DELTA_GET_SECONDS(obj), + buffer); +} + +static PyObject * +pydatetime_getquoted(pydatetimeObject *self, PyObject *args) +{ + if (self->type <= PSYCO_DATETIME_TIMESTAMP) { + return _pydatetime_string_date_time(self); + } + else { + return _pydatetime_string_delta(self); + } +} + +static PyObject * +pydatetime_str(pydatetimeObject *self) +{ + return psyco_ensure_text(pydatetime_getquoted(self, NULL)); +} + +static PyObject * +pydatetime_conform(pydatetimeObject *self, PyObject *args) +{ + PyObject *res, *proto; + + if (!PyArg_ParseTuple(args, "O", &proto)) return NULL; + + if (proto == (PyObject*)&isqlquoteType) + res = (PyObject*)self; + else + res = Py_None; + + Py_INCREF(res); + return res; +} + +/** the DateTime wrapper object **/ + +/* object member list */ + +static struct PyMemberDef pydatetimeObject_members[] = { + {"adapted", T_OBJECT, offsetof(pydatetimeObject, wrapped), READONLY}, + {"type", T_INT, offsetof(pydatetimeObject, type), READONLY}, + {NULL} +}; + +/* object method table */ + +static PyMethodDef pydatetimeObject_methods[] = { + {"getquoted", (PyCFunction)pydatetime_getquoted, METH_NOARGS, + "getquoted() -> wrapped object value as SQL date/time"}, + {"__conform__", (PyCFunction)pydatetime_conform, METH_VARARGS, NULL}, + {NULL} /* Sentinel */ +}; + +/* initialization and finalization methods */ + +static int +pydatetime_setup(pydatetimeObject *self, PyObject *obj, int type) +{ + Dprintf("pydatetime_setup: init datetime object at %p, refcnt = " + FORMAT_CODE_PY_SSIZE_T, + self, Py_REFCNT(self)); + + self->type = type; + Py_INCREF(obj); + self->wrapped = obj; + + Dprintf("pydatetime_setup: good pydatetime object at %p, refcnt = " + FORMAT_CODE_PY_SSIZE_T, + self, Py_REFCNT(self)); + return 0; +} + +static void +pydatetime_dealloc(PyObject* obj) +{ + pydatetimeObject *self = (pydatetimeObject *)obj; + + Py_CLEAR(self->wrapped); + + Dprintf("mpydatetime_dealloc: deleted pydatetime object at %p, " + "refcnt = " FORMAT_CODE_PY_SSIZE_T, obj, Py_REFCNT(obj)); + + Py_TYPE(obj)->tp_free(obj); +} + +static int +pydatetime_init(PyObject *obj, PyObject *args, PyObject *kwds) +{ + PyObject *dt; + int type = -1; /* raise an error if type was not passed! */ + + if (!PyArg_ParseTuple(args, "O|i", &dt, &type)) + return -1; + + return pydatetime_setup((pydatetimeObject *)obj, dt, type); +} + +static PyObject * +pydatetime_new(PyTypeObject *type, PyObject *args, PyObject *kwds) +{ + return type->tp_alloc(type, 0); +} + + +/* object type */ + +#define pydatetimeType_doc \ +"datetime(datetime, type) -> new datetime wrapper object" + +PyTypeObject pydatetimeType = { + PyVarObject_HEAD_INIT(NULL, 0) + "psycopg2._psycopg.datetime", + sizeof(pydatetimeObject), 0, + pydatetime_dealloc, /*tp_dealloc*/ + 0, /*tp_print*/ + 0, /*tp_getattr*/ + 0, /*tp_setattr*/ + 0, /*tp_compare*/ + 0, /*tp_repr*/ + 0, /*tp_as_number*/ + 0, /*tp_as_sequence*/ + 0, /*tp_as_mapping*/ + 0, /*tp_hash */ + 0, /*tp_call*/ + (reprfunc)pydatetime_str, /*tp_str*/ + 0, /*tp_getattro*/ + 0, /*tp_setattro*/ + 0, /*tp_as_buffer*/ + Py_TPFLAGS_DEFAULT|Py_TPFLAGS_BASETYPE, /*tp_flags*/ + pydatetimeType_doc, /*tp_doc*/ + 0, /*tp_traverse*/ + 0, /*tp_clear*/ + 0, /*tp_richcompare*/ + 0, /*tp_weaklistoffset*/ + 0, /*tp_iter*/ + 0, /*tp_iternext*/ + pydatetimeObject_methods, /*tp_methods*/ + pydatetimeObject_members, /*tp_members*/ + 0, /*tp_getset*/ + 0, /*tp_base*/ + 0, /*tp_dict*/ + 0, /*tp_descr_get*/ + 0, /*tp_descr_set*/ + 0, /*tp_dictoffset*/ + pydatetime_init, /*tp_init*/ + 0, /*tp_alloc*/ + pydatetime_new, /*tp_new*/ +}; + + +/** module-level functions **/ + +PyObject * +psyco_Date(PyObject *self, PyObject *args) +{ + PyObject *res = NULL; + int year, month, day; + + PyObject* obj = NULL; + + if (!PyArg_ParseTuple(args, "iii", &year, &month, &day)) + return NULL; + + obj = PyObject_CallFunction((PyObject*)PyDateTimeAPI->DateType, + "iii", year, month, day); + + if (obj) { + res = PyObject_CallFunction((PyObject *)&pydatetimeType, + "Oi", obj, PSYCO_DATETIME_DATE); + Py_DECREF(obj); + } + + return res; +} + +PyObject * +psyco_Time(PyObject *self, PyObject *args) +{ + PyObject *res = NULL; + PyObject *tzinfo = NULL; + int hours, minutes=0; + double micro, second=0.0; + + PyObject* obj = NULL; + + if (!PyArg_ParseTuple(args, "iid|O", &hours, &minutes, &second, + &tzinfo)) + return NULL; + + micro = (second - floor(second)) * 1000000.0; + second = floor(second); + + if (tzinfo == NULL) + obj = PyObject_CallFunction((PyObject*)PyDateTimeAPI->TimeType, "iiii", + hours, minutes, (int)second, (int)round(micro)); + else + obj = PyObject_CallFunction((PyObject*)PyDateTimeAPI->TimeType, "iiiiO", + hours, minutes, (int)second, (int)round(micro), tzinfo); + + if (obj) { + res = PyObject_CallFunction((PyObject *)&pydatetimeType, + "Oi", obj, PSYCO_DATETIME_TIME); + Py_DECREF(obj); + } + + return res; +} + +static PyObject * +_psyco_Timestamp(int year, int month, int day, + int hour, int minute, double second, PyObject *tzinfo) +{ + double micro; + PyObject *obj; + PyObject *res = NULL; + + micro = (second - floor(second)) * 1000000.0; + second = floor(second); + + if (tzinfo == NULL) + obj = PyObject_CallFunction((PyObject*)PyDateTimeAPI->DateTimeType, + "iiiiiii", + year, month, day, hour, minute, (int)second, + (int)round(micro)); + else + obj = PyObject_CallFunction((PyObject*)PyDateTimeAPI->DateTimeType, + "iiiiiiiO", + year, month, day, hour, minute, (int)second, + (int)round(micro), tzinfo); + + if (obj) { + res = PyObject_CallFunction((PyObject *)&pydatetimeType, + "Oi", obj, PSYCO_DATETIME_TIMESTAMP); + Py_DECREF(obj); + } + + return res; +} + +PyObject * +psyco_Timestamp(PyObject *self, PyObject *args) +{ + PyObject *tzinfo = NULL; + int year, month, day; + int hour=0, minute=0; /* default to midnight */ + double second=0.0; + + if (!PyArg_ParseTuple(args, "iii|iidO", &year, &month, &day, + &hour, &minute, &second, &tzinfo)) + return NULL; + + return _psyco_Timestamp(year, month, day, hour, minute, second, tzinfo); +} + +PyObject * +psyco_DateFromTicks(PyObject *self, PyObject *args) +{ + PyObject *res = NULL; + struct tm tm; + time_t t; + double ticks; + + if (!PyArg_ParseTuple(args, "d", &ticks)) + return NULL; + + t = (time_t)floor(ticks); + if (localtime_r(&t, &tm)) { + args = Py_BuildValue("iii", tm.tm_year+1900, tm.tm_mon+1, tm.tm_mday); + if (args) { + res = psyco_Date(self, args); + Py_DECREF(args); + } + } + else { + PyErr_SetString(InterfaceError, "failed localtime call"); + } + + return res; +} + +PyObject * +psyco_TimeFromTicks(PyObject *self, PyObject *args) +{ + PyObject *res = NULL; + struct tm tm; + time_t t; + double ticks; + + if (!PyArg_ParseTuple(args,"d", &ticks)) + return NULL; + + t = (time_t)floor(ticks); + ticks -= (double)t; + if (localtime_r(&t, &tm)) { + args = Py_BuildValue("iid", tm.tm_hour, tm.tm_min, + (double)tm.tm_sec + ticks); + if (args) { + res = psyco_Time(self, args); + Py_DECREF(args); + } + } + else { + PyErr_SetString(InterfaceError, "failed localtime call"); + } + + return res; +} + +PyObject * +psyco_TimestampFromTicks(PyObject *self, PyObject *args) +{ + pydatetimeObject *wrapper = NULL; + PyObject *dt_aware = NULL; + PyObject *res = NULL; + struct tm tm; + time_t t; + double ticks; + + if (!PyArg_ParseTuple(args, "d", &ticks)) + return NULL; + + t = (time_t)floor(ticks); + ticks -= (double)t; + if (!localtime_r(&t, &tm)) { + PyErr_SetString(InterfaceError, "failed localtime call"); + goto exit; + } + + /* Convert the tm to a wrapper containing a naive datetime.datetime */ + if (!(wrapper = (pydatetimeObject *)_psyco_Timestamp( + tm.tm_year + 1900, tm.tm_mon + 1, tm.tm_mday, + tm.tm_hour, tm.tm_min, (double)tm.tm_sec + ticks, NULL))) { + goto exit; + } + + /* Localize the datetime and assign it back to the wrapper */ + if (!(dt_aware = PyObject_CallMethod( + wrapper->wrapped, "astimezone", NULL))) { + goto exit; + } + Py_CLEAR(wrapper->wrapped); + wrapper->wrapped = dt_aware; + dt_aware = NULL; + + /* the wrapper is ready to be returned */ + res = (PyObject *)wrapper; + wrapper = NULL; + +exit: + Py_XDECREF(dt_aware); + Py_XDECREF(wrapper); + return res; +} + +PyObject * +psyco_DateFromPy(PyObject *self, PyObject *args) +{ + PyObject *obj; + + if (!PyArg_ParseTuple(args, "O!", PyDateTimeAPI->DateType, &obj)) + return NULL; + + return PyObject_CallFunction((PyObject *)&pydatetimeType, "Oi", obj, + PSYCO_DATETIME_DATE); +} + +PyObject * +psyco_TimeFromPy(PyObject *self, PyObject *args) +{ + PyObject *obj; + + if (!PyArg_ParseTuple(args, "O!", PyDateTimeAPI->TimeType, &obj)) + return NULL; + + return PyObject_CallFunction((PyObject *)&pydatetimeType, "Oi", obj, + PSYCO_DATETIME_TIME); +} + +PyObject * +psyco_TimestampFromPy(PyObject *self, PyObject *args) +{ + PyObject *obj; + + if (!PyArg_ParseTuple(args, "O!", PyDateTimeAPI->DateTimeType, &obj)) + return NULL; + + return PyObject_CallFunction((PyObject *)&pydatetimeType, "Oi", obj, + PSYCO_DATETIME_TIMESTAMP); +} + +PyObject * +psyco_IntervalFromPy(PyObject *self, PyObject *args) +{ + PyObject *obj; + + if (!PyArg_ParseTuple(args, "O!", PyDateTimeAPI->DeltaType, &obj)) + return NULL; + + return PyObject_CallFunction((PyObject *)&pydatetimeType, "Oi", obj, + PSYCO_DATETIME_INTERVAL); +} diff --git a/psycopg/adapter_datetime.h b/psycopg/adapter_datetime.h new file mode 100644 index 0000000000000000000000000000000000000000..7705db316ef12dddaba461ed709819169b0c3e29 --- /dev/null +++ b/psycopg/adapter_datetime.h @@ -0,0 +1,107 @@ +/* adapter_datetime.h - definition for the python date/time types + * + * Copyright (C) 2003-2019 Federico Di Gregorio + * Copyright (C) 2020-2021 The Psycopg Team + * + * This file is part of psycopg. + * + * psycopg2 is free software: you can redistribute it and/or modify it + * under the terms of the GNU Lesser General Public License as published + * by the Free Software Foundation, either version 3 of the License, or + * (at your option) any later version. + * + * In addition, as a special exception, the copyright holders give + * permission to link this program with the OpenSSL library (or with + * modified versions of OpenSSL that use the same license as OpenSSL), + * and distribute linked combinations including the two. + * + * You must obey the GNU Lesser General Public License in all respects for + * all of the code used other than OpenSSL. + * + * psycopg2 is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public + * License for more details. + */ + +#ifndef PSYCOPG_DATETIME_H +#define PSYCOPG_DATETIME_H 1 + +#ifdef __cplusplus +extern "C" { +#endif + +extern HIDDEN PyTypeObject pydatetimeType; + +typedef struct { + PyObject_HEAD + + PyObject *wrapped; + int type; +#define PSYCO_DATETIME_TIME 0 +#define PSYCO_DATETIME_DATE 1 +#define PSYCO_DATETIME_TIMESTAMP 2 +#define PSYCO_DATETIME_INTERVAL 3 + +} pydatetimeObject; + + +RAISES_NEG HIDDEN int adapter_datetime_init(void); + +HIDDEN PyObject *psyco_Date(PyObject *module, PyObject *args); +#define psyco_Date_doc \ + "Date(year, month, day) -> new date\n\n" \ + "Build an object holding a date value." + +HIDDEN PyObject *psyco_Time(PyObject *module, PyObject *args); +#define psyco_Time_doc \ + "Time(hour, minutes, seconds, tzinfo=None) -> new time\n\n" \ + "Build an object holding a time value." + +HIDDEN PyObject *psyco_Timestamp(PyObject *module, PyObject *args); +#define psyco_Timestamp_doc \ + "Timestamp(year, month, day, hour, minutes, seconds, tzinfo=None) -> new timestamp\n\n" \ + "Build an object holding a timestamp value." + +HIDDEN PyObject *psyco_DateFromTicks(PyObject *module, PyObject *args); +#define psyco_DateFromTicks_doc \ + "DateFromTicks(ticks) -> new date\n\n" \ + "Build an object holding a date value from the given ticks value.\n\n" \ + "Ticks are the number of seconds since the epoch; see the documentation " \ + "of the standard Python time module for details)." + +HIDDEN PyObject *psyco_TimeFromTicks(PyObject *module, PyObject *args); +#define psyco_TimeFromTicks_doc \ + "TimeFromTicks(ticks) -> new time\n\n" \ + "Build an object holding a time value from the given ticks value.\n\n" \ + "Ticks are the number of seconds since the epoch; see the documentation " \ + "of the standard Python time module for details)." + +HIDDEN PyObject *psyco_TimestampFromTicks(PyObject *module, PyObject *args); +#define psyco_TimestampFromTicks_doc \ + "TimestampFromTicks(ticks) -> new timestamp\n\n" \ + "Build an object holding a timestamp value from the given ticks value.\n\n" \ + "Ticks are the number of seconds since the epoch; see the documentation " \ + "of the standard Python time module for details)." + +HIDDEN PyObject *psyco_DateFromPy(PyObject *module, PyObject *args); +#define psyco_DateFromPy_doc \ + "DateFromPy(datetime.date) -> new wrapper" + +HIDDEN PyObject *psyco_TimeFromPy(PyObject *module, PyObject *args); +#define psyco_TimeFromPy_doc \ + "TimeFromPy(datetime.time) -> new wrapper" + +HIDDEN PyObject *psyco_TimestampFromPy(PyObject *module, PyObject *args); +#define psyco_TimestampFromPy_doc \ + "TimestampFromPy(datetime.datetime) -> new wrapper" + +HIDDEN PyObject *psyco_IntervalFromPy(PyObject *module, PyObject *args); +#define psyco_IntervalFromPy_doc \ + "IntervalFromPy(datetime.timedelta) -> new wrapper" + +#ifdef __cplusplus +} +#endif + +#endif /* !defined(PSYCOPG_DATETIME_H) */ diff --git a/psycopg/adapter_list.c b/psycopg/adapter_list.c new file mode 100644 index 0000000000000000000000000000000000000000..e22292b6cee5604981e932f25a269dc3fe484dfd --- /dev/null +++ b/psycopg/adapter_list.c @@ -0,0 +1,342 @@ +/* adapter_list.c - python list objects + * + * Copyright (C) 2004-2019 Federico Di Gregorio + * Copyright (C) 2020-2021 The Psycopg Team + * + * This file is part of psycopg. + * + * psycopg2 is free software: you can redistribute it and/or modify it + * under the terms of the GNU Lesser General Public License as published + * by the Free Software Foundation, either version 3 of the License, or + * (at your option) any later version. + * + * In addition, as a special exception, the copyright holders give + * permission to link this program with the OpenSSL library (or with + * modified versions of OpenSSL that use the same license as OpenSSL), + * and distribute linked combinations including the two. + * + * You must obey the GNU Lesser General Public License in all respects for + * all of the code used other than OpenSSL. + * + * psycopg2 is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public + * License for more details. + */ + +#define PSYCOPG_MODULE +#include "psycopg/psycopg.h" + +#include "psycopg/adapter_list.h" +#include "psycopg/microprotocols.h" +#include "psycopg/microprotocols_proto.h" + + +/* list_str, list_getquoted - return result of quoting */ + +static PyObject * +list_quote(listObject *self) +{ + /* adapt the list by calling adapt() recursively and then wrapping + everything into "ARRAY[]" */ + PyObject *res = NULL; + PyObject **qs = NULL; + Py_ssize_t bufsize = 0; + char *buf = NULL, *ptr; + + /* list consisting of only NULL don't work with the ARRAY[] construct + * so we use the {NULL,...} syntax. The same syntax is also necessary + * to convert array of arrays containing only nulls. */ + int all_nulls = 1; + + Py_ssize_t i, len; + + len = PyList_GET_SIZE(self->wrapped); + + /* empty arrays are converted to NULLs (still searching for a way to + insert an empty array in postgresql */ + if (len == 0) { + /* it cannot be ARRAY[] because it would make empty lists unusable + * in any() without a cast. But we may convert it into ARRAY[] below */ + res = Bytes_FromString("'{}'"); + goto exit; + } + + if (!(qs = PyMem_New(PyObject *, len))) { + PyErr_NoMemory(); + goto exit; + } + memset(qs, 0, len * sizeof(PyObject *)); + + for (i = 0; i < len; i++) { + PyObject *wrapped = PyList_GET_ITEM(self->wrapped, i); + if (wrapped == Py_None) { + Py_INCREF(psyco_null); + qs[i] = psyco_null; + } + else { + if (!(qs[i] = microprotocol_getquoted( + wrapped, (connectionObject*)self->connection))) { + goto exit; + } + + /* Lists of arrays containing only nulls are also not supported + * by the ARRAY construct so we should do some special casing */ + if (PyList_Check(wrapped)) { + if (Bytes_AS_STRING(qs[i])[0] == 'A') { + all_nulls = 0; + } + else if (0 == strcmp(Bytes_AS_STRING(qs[i]), "'{}'")) { + /* case of issue #788: '{{}}' is not supported but + * array[array[]] is */ + all_nulls = 0; + Py_CLEAR(qs[i]); + if (!(qs[i] = Bytes_FromString("ARRAY[]"))) { + goto exit; + } + } + } + else { + all_nulls = 0; + } + } + bufsize += Bytes_GET_SIZE(qs[i]) + 1; /* this, and a comma */ + } + + /* Create an array literal, usually ARRAY[...] but if the contents are + * all NULL or array of NULL we must use the '{...}' syntax + */ + if (!(ptr = buf = PyMem_Malloc(bufsize + 8))) { + PyErr_NoMemory(); + goto exit; + } + + if (!all_nulls) { + strcpy(ptr, "ARRAY["); + ptr += 6; + for (i = 0; i < len; i++) { + Py_ssize_t sl; + sl = Bytes_GET_SIZE(qs[i]); + memcpy(ptr, Bytes_AS_STRING(qs[i]), sl); + ptr += sl; + *ptr++ = ','; + } + *(ptr - 1) = ']'; + } + else { + *ptr++ = '\''; + *ptr++ = '{'; + for (i = 0; i < len; i++) { + /* in case all the adapted things are nulls (or array of nulls), + * the quoted string is either NULL or an array of the form + * '{NULL,...}', in which case we have to strip the extra quotes */ + char *s; + Py_ssize_t sl; + s = Bytes_AS_STRING(qs[i]); + sl = Bytes_GET_SIZE(qs[i]); + if (s[0] != '\'') { + memcpy(ptr, s, sl); + ptr += sl; + } + else { + memcpy(ptr, s + 1, sl - 2); + ptr += sl - 2; + } + *ptr++ = ','; + } + *(ptr - 1) = '}'; + *ptr++ = '\''; + } + + res = Bytes_FromStringAndSize(buf, ptr - buf); + +exit: + if (qs) { + for (i = 0; i < len; i++) { + PyObject *q = qs[i]; + Py_XDECREF(q); + } + PyMem_Free(qs); + } + PyMem_Free(buf); + + return res; +} + +static PyObject * +list_str(listObject *self) +{ + return psyco_ensure_text(list_quote(self)); +} + +static PyObject * +list_getquoted(listObject *self, PyObject *args) +{ + return list_quote(self); +} + +static PyObject * +list_prepare(listObject *self, PyObject *args) +{ + PyObject *conn; + + if (!PyArg_ParseTuple(args, "O!", &connectionType, &conn)) + return NULL; + + Py_CLEAR(self->connection); + Py_INCREF(conn); + self->connection = conn; + + Py_RETURN_NONE; +} + +static PyObject * +list_conform(listObject *self, PyObject *args) +{ + PyObject *res, *proto; + + if (!PyArg_ParseTuple(args, "O", &proto)) return NULL; + + if (proto == (PyObject*)&isqlquoteType) + res = (PyObject*)self; + else + res = Py_None; + + Py_INCREF(res); + return res; +} + +/** the DateTime wrapper object **/ + +/* object member list */ + +static struct PyMemberDef listObject_members[] = { + {"adapted", T_OBJECT, offsetof(listObject, wrapped), READONLY}, + {NULL} +}; + +/* object method table */ + +static PyMethodDef listObject_methods[] = { + {"getquoted", (PyCFunction)list_getquoted, METH_NOARGS, + "getquoted() -> wrapped object value as SQL date/time"}, + {"prepare", (PyCFunction)list_prepare, METH_VARARGS, + "prepare(conn) -> set encoding to conn->encoding"}, + {"__conform__", (PyCFunction)list_conform, METH_VARARGS, NULL}, + {NULL} /* Sentinel */ +}; + +/* initialization and finalization methods */ + +static int +list_setup(listObject *self, PyObject *obj) +{ + Dprintf("list_setup: init list object at %p, refcnt = " + FORMAT_CODE_PY_SSIZE_T, + self, Py_REFCNT(self) + ); + + if (!PyList_Check(obj)) + return -1; + + self->connection = NULL; + Py_INCREF(obj); + self->wrapped = obj; + + Dprintf("list_setup: good list object at %p, refcnt = " + FORMAT_CODE_PY_SSIZE_T, + self, Py_REFCNT(self) + ); + return 0; +} + +static int +list_traverse(listObject *self, visitproc visit, void *arg) +{ + Py_VISIT(self->wrapped); + Py_VISIT(self->connection); + return 0; +} + +static int +list_clear(listObject *self) +{ + Py_CLEAR(self->wrapped); + Py_CLEAR(self->connection); + return 0; +} + +static void +list_dealloc(listObject* self) +{ + PyObject_GC_UnTrack((PyObject *)self); + list_clear(self); + + Dprintf("list_dealloc: deleted list object at %p, " + "refcnt = " FORMAT_CODE_PY_SSIZE_T, self, Py_REFCNT(self)); + + Py_TYPE(self)->tp_free((PyObject *)self); +} + +static int +list_init(PyObject *obj, PyObject *args, PyObject *kwds) +{ + PyObject *l; + + if (!PyArg_ParseTuple(args, "O", &l)) + return -1; + + return list_setup((listObject *)obj, l); +} + +static PyObject * +list_new(PyTypeObject *type, PyObject *args, PyObject *kwds) +{ + return type->tp_alloc(type, 0); +} + + +/* object type */ + +#define listType_doc \ +"List(list) -> new list wrapper object" + +PyTypeObject listType = { + PyVarObject_HEAD_INIT(NULL, 0) + "psycopg2._psycopg.List", + sizeof(listObject), 0, + (destructor)list_dealloc, /*tp_dealloc*/ + 0, /*tp_print*/ + 0, /*tp_getattr*/ + 0, /*tp_setattr*/ + 0, /*tp_compare*/ + 0, /*tp_repr*/ + 0, /*tp_as_number*/ + 0, /*tp_as_sequence*/ + 0, /*tp_as_mapping*/ + 0, /*tp_hash */ + 0, /*tp_call*/ + (reprfunc)list_str, /*tp_str*/ + 0, /*tp_getattro*/ + 0, /*tp_setattro*/ + 0, /*tp_as_buffer*/ + Py_TPFLAGS_DEFAULT|Py_TPFLAGS_BASETYPE|Py_TPFLAGS_HAVE_GC, /*tp_flags*/ + listType_doc, /*tp_doc*/ + (traverseproc)list_traverse, /*tp_traverse*/ + (inquiry)list_clear, /*tp_clear*/ + 0, /*tp_richcompare*/ + 0, /*tp_weaklistoffset*/ + 0, /*tp_iter*/ + 0, /*tp_iternext*/ + listObject_methods, /*tp_methods*/ + listObject_members, /*tp_members*/ + 0, /*tp_getset*/ + 0, /*tp_base*/ + 0, /*tp_dict*/ + 0, /*tp_descr_get*/ + 0, /*tp_descr_set*/ + 0, /*tp_dictoffset*/ + list_init, /*tp_init*/ + 0, /*tp_alloc*/ + list_new, /*tp_new*/ +}; diff --git a/psycopg/adapter_list.h b/psycopg/adapter_list.h new file mode 100644 index 0000000000000000000000000000000000000000..2e00b5313fb5a8a2ae53eac185fd83eb17f96bb0 --- /dev/null +++ b/psycopg/adapter_list.h @@ -0,0 +1,47 @@ +/* adapter_list.h - definition for the python list types + * + * Copyright (C) 2004-2019 Federico Di Gregorio + * Copyright (C) 2020-2021 The Psycopg Team + * + * This file is part of psycopg. + * + * psycopg2 is free software: you can redistribute it and/or modify it + * under the terms of the GNU Lesser General Public License as published + * by the Free Software Foundation, either version 3 of the License, or + * (at your option) any later version. + * + * In addition, as a special exception, the copyright holders give + * permission to link this program with the OpenSSL library (or with + * modified versions of OpenSSL that use the same license as OpenSSL), + * and distribute linked combinations including the two. + * + * You must obey the GNU Lesser General Public License in all respects for + * all of the code used other than OpenSSL. + * + * psycopg2 is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public + * License for more details. + */ + +#ifndef PSYCOPG_LIST_H +#define PSYCOPG_LIST_H 1 + +#ifdef __cplusplus +extern "C" { +#endif + +extern HIDDEN PyTypeObject listType; + +typedef struct { + PyObject_HEAD + + PyObject *wrapped; + PyObject *connection; +} listObject; + +#ifdef __cplusplus +} +#endif + +#endif /* !defined(PSYCOPG_LIST_H) */ diff --git a/psycopg/adapter_pboolean.c b/psycopg/adapter_pboolean.c new file mode 100644 index 0000000000000000000000000000000000000000..6a281190b44f890511ca4d4c336a5edaae623b0f --- /dev/null +++ b/psycopg/adapter_pboolean.c @@ -0,0 +1,185 @@ +/* adapter_pboolean.c - psycopg boolean type wrapper implementation + * + * Copyright (C) 2003-2019 Federico Di Gregorio + * Copyright (C) 2020-2021 The Psycopg Team + * + * This file is part of psycopg. + * + * psycopg2 is free software: you can redistribute it and/or modify it + * under the terms of the GNU Lesser General Public License as published + * by the Free Software Foundation, either version 3 of the License, or + * (at your option) any later version. + * + * In addition, as a special exception, the copyright holders give + * permission to link this program with the OpenSSL library (or with + * modified versions of OpenSSL that use the same license as OpenSSL), + * and distribute linked combinations including the two. + * + * You must obey the GNU Lesser General Public License in all respects for + * all of the code used other than OpenSSL. + * + * psycopg2 is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public + * License for more details. + */ + +#define PSYCOPG_MODULE +#include "psycopg/psycopg.h" + +#include "psycopg/adapter_pboolean.h" +#include "psycopg/microprotocols_proto.h" + +#include + + +/** the Boolean object **/ + +static PyObject * +pboolean_getquoted(pbooleanObject *self, PyObject *args) +{ + if (PyObject_IsTrue(self->wrapped)) { + return Bytes_FromString("true"); + } + else { + return Bytes_FromString("false"); + } +} + +static PyObject * +pboolean_str(pbooleanObject *self) +{ + return psyco_ensure_text(pboolean_getquoted(self, NULL)); +} + +static PyObject * +pboolean_conform(pbooleanObject *self, PyObject *args) +{ + PyObject *res, *proto; + + if (!PyArg_ParseTuple(args, "O", &proto)) return NULL; + + if (proto == (PyObject*)&isqlquoteType) + res = (PyObject*)self; + else + res = Py_None; + + Py_INCREF(res); + return res; +} + +/** the Boolean object */ + +/* object member list */ + +static struct PyMemberDef pbooleanObject_members[] = { + {"adapted", T_OBJECT, offsetof(pbooleanObject, wrapped), READONLY}, + {NULL} +}; + +/* object method table */ + +static PyMethodDef pbooleanObject_methods[] = { + {"getquoted", (PyCFunction)pboolean_getquoted, METH_NOARGS, + "getquoted() -> wrapped object value as SQL-quoted string"}, + {"__conform__", (PyCFunction)pboolean_conform, METH_VARARGS, NULL}, + {NULL} /* Sentinel */ +}; + +/* initialization and finalization methods */ + +static int +pboolean_setup(pbooleanObject *self, PyObject *obj) +{ + Dprintf("pboolean_setup: init pboolean object at %p, refcnt = " + FORMAT_CODE_PY_SSIZE_T, + self, Py_REFCNT(self) + ); + + Py_INCREF(obj); + self->wrapped = obj; + + Dprintf("pboolean_setup: good pboolean object at %p, refcnt = " + FORMAT_CODE_PY_SSIZE_T, + self, Py_REFCNT(self) + ); + return 0; +} + +static void +pboolean_dealloc(PyObject* obj) +{ + pbooleanObject *self = (pbooleanObject *)obj; + + Py_CLEAR(self->wrapped); + + Dprintf("pboolean_dealloc: deleted pboolean object at %p, refcnt = " + FORMAT_CODE_PY_SSIZE_T, + obj, Py_REFCNT(obj) + ); + + Py_TYPE(obj)->tp_free(obj); +} + +static int +pboolean_init(PyObject *obj, PyObject *args, PyObject *kwds) +{ + PyObject *o; + + if (!PyArg_ParseTuple(args, "O", &o)) + return -1; + + return pboolean_setup((pbooleanObject *)obj, o); +} + +static PyObject * +pboolean_new(PyTypeObject *type, PyObject *args, PyObject *kwds) +{ + return type->tp_alloc(type, 0); +} + + +/* object type */ + +#define pbooleanType_doc \ +"Boolean(str) -> new Boolean adapter object" + +PyTypeObject pbooleanType = { + PyVarObject_HEAD_INIT(NULL, 0) + "psycopg2.extensions.Boolean", + sizeof(pbooleanObject), 0, + pboolean_dealloc, /*tp_dealloc*/ + 0, /*tp_print*/ + 0, /*tp_getattr*/ + 0, /*tp_setattr*/ + 0, /*tp_compare*/ + 0, /*tp_repr*/ + 0, /*tp_as_number*/ + 0, /*tp_as_sequence*/ + 0, /*tp_as_mapping*/ + 0, /*tp_hash */ + 0, /*tp_call*/ + (reprfunc)pboolean_str, /*tp_str*/ + 0, /*tp_getattro*/ + 0, /*tp_setattro*/ + 0, /*tp_as_buffer*/ + Py_TPFLAGS_DEFAULT|Py_TPFLAGS_BASETYPE, /*tp_flags*/ + pbooleanType_doc, /*tp_doc*/ + 0, /*tp_traverse*/ + 0, /*tp_clear*/ + 0, /*tp_richcompare*/ + 0, /*tp_weaklistoffset*/ + 0, /*tp_iter*/ + 0, /*tp_iternext*/ + pbooleanObject_methods, /*tp_methods*/ + pbooleanObject_members, /*tp_members*/ + 0, /*tp_getset*/ + 0, /*tp_base*/ + 0, /*tp_dict*/ + 0, /*tp_descr_get*/ + 0, /*tp_descr_set*/ + 0, /*tp_dictoffset*/ + pboolean_init, /*tp_init*/ + 0, /*tp_alloc*/ + pboolean_new, /*tp_new*/ +}; diff --git a/psycopg/adapter_pboolean.h b/psycopg/adapter_pboolean.h new file mode 100644 index 0000000000000000000000000000000000000000..562fedc0ed2365d60e53f52a125e0f73e2e0116e --- /dev/null +++ b/psycopg/adapter_pboolean.h @@ -0,0 +1,48 @@ +/* adapter_pboolean.h - definition for the psycopg boolean type wrapper + * + * Copyright (C) 2003-2019 Federico Di Gregorio + * Copyright (C) 2020-2021 The Psycopg Team + * + * This file is part of psycopg. + * + * psycopg2 is free software: you can redistribute it and/or modify it + * under the terms of the GNU Lesser General Public License as published + * by the Free Software Foundation, either version 3 of the License, or + * (at your option) any later version. + * + * In addition, as a special exception, the copyright holders give + * permission to link this program with the OpenSSL library (or with + * modified versions of OpenSSL that use the same license as OpenSSL), + * and distribute linked combinations including the two. + * + * You must obey the GNU Lesser General Public License in all respects for + * all of the code used other than OpenSSL. + * + * psycopg2 is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public + * License for more details. + */ + +#ifndef PSYCOPG_PBOOLEAN_H +#define PSYCOPG_PBOOLEAN_H 1 + +#ifdef __cplusplus +extern "C" { +#endif + +extern HIDDEN PyTypeObject pbooleanType; + +typedef struct { + PyObject_HEAD + + /* this is the real object we wrap */ + PyObject *wrapped; + +} pbooleanObject; + +#ifdef __cplusplus +} +#endif + +#endif /* !defined(PSYCOPG_PBOOLEAN_H) */ diff --git a/psycopg/adapter_pdecimal.c b/psycopg/adapter_pdecimal.c new file mode 100644 index 0000000000000000000000000000000000000000..25a7212d1a7245013e34cf079cb53155c13e741e --- /dev/null +++ b/psycopg/adapter_pdecimal.c @@ -0,0 +1,248 @@ +/* adapter_pdecimal.c - psycopg Decimal type wrapper implementation + * + * Copyright (C) 2003-2019 Federico Di Gregorio + * Copyright (C) 2020-2021 The Psycopg Team + * + * This file is part of psycopg. + * + * psycopg2 is free software: you can redistribute it and/or modify it + * under the terms of the GNU Lesser General Public License as published + * by the Free Software Foundation, either version 3 of the License, or + * (at your option) any later version. + * + * In addition, as a special exception, the copyright holders give + * permission to link this program with the OpenSSL library (or with + * modified versions of OpenSSL that use the same license as OpenSSL), + * and distribute linked combinations including the two. + * + * You must obey the GNU Lesser General Public License in all respects for + * all of the code used other than OpenSSL. + * + * psycopg2 is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public + * License for more details. + */ + +#define PSYCOPG_MODULE +#include "psycopg/psycopg.h" + +#include "psycopg/adapter_pdecimal.h" +#include "psycopg/microprotocols_proto.h" + +#include +#include + + +/** the Decimal object **/ + +static PyObject * +pdecimal_getquoted(pdecimalObject *self, PyObject *args) +{ + PyObject *check, *res = NULL; + check = PyObject_CallMethod(self->wrapped, "is_finite", NULL); + if (check == Py_True) { + if (!(res = PyObject_Str(self->wrapped))) { + goto end; + } + goto output; + } + else if (check) { + res = Bytes_FromString("'NaN'::numeric"); + goto end; + } + + /* is_finite() was introduced 2.5.1 < somewhere <= 2.5.4. + * We assume we are here because we didn't find the method. */ + PyErr_Clear(); + + if (!(check = PyObject_CallMethod(self->wrapped, "_isnan", NULL))) { + goto end; + } + if (PyObject_IsTrue(check)) { + res = Bytes_FromString("'NaN'::numeric"); + goto end; + } + + Py_DECREF(check); + if (!(check = PyObject_CallMethod(self->wrapped, "_isinfinity", NULL))) { + goto end; + } + if (PyObject_IsTrue(check)) { + res = Bytes_FromString("'NaN'::numeric"); + goto end; + } + + /* wrapped is finite */ + if (!(res = PyObject_Str(self->wrapped))) { + goto end; + } + + /* res may be unicode and may suffer for issue #57 */ +output: + + /* unicode to bytes */ + { + PyObject *tmp = PyUnicode_AsUTF8String(res); + Py_DECREF(res); + if (!(res = tmp)) { + goto end; + } + } + + if ('-' == Bytes_AS_STRING(res)[0]) { + /* Prepend a space in front of negative numbers (ticket #57) */ + PyObject *tmp; + if (!(tmp = Bytes_FromString(" "))) { + Py_DECREF(res); + res = NULL; + goto end; + } + Bytes_ConcatAndDel(&tmp, res); + if (!(res = tmp)) { + goto end; + } + } + +end: + Py_XDECREF(check); + return res; +} + +static PyObject * +pdecimal_str(pdecimalObject *self) +{ + return psyco_ensure_text(pdecimal_getquoted(self, NULL)); +} + +static PyObject * +pdecimal_conform(pdecimalObject *self, PyObject *args) +{ + PyObject *res, *proto; + + if (!PyArg_ParseTuple(args, "O", &proto)) return NULL; + + if (proto == (PyObject*)&isqlquoteType) + res = (PyObject*)self; + else + res = Py_None; + + Py_INCREF(res); + return res; +} + +/** the Decimal object */ + +/* object member list */ + +static struct PyMemberDef pdecimalObject_members[] = { + {"adapted", T_OBJECT, offsetof(pdecimalObject, wrapped), READONLY}, + {NULL} +}; + +/* object method table */ + +static PyMethodDef pdecimalObject_methods[] = { + {"getquoted", (PyCFunction)pdecimal_getquoted, METH_NOARGS, + "getquoted() -> wrapped object value as SQL-quoted string"}, + {"__conform__", (PyCFunction)pdecimal_conform, METH_VARARGS, NULL}, + {NULL} /* Sentinel */ +}; + +/* initialization and finalization methods */ + +static int +pdecimal_setup(pdecimalObject *self, PyObject *obj) +{ + Dprintf("pdecimal_setup: init pdecimal object at %p, refcnt = " + FORMAT_CODE_PY_SSIZE_T, + self, Py_REFCNT(self) + ); + + Py_INCREF(obj); + self->wrapped = obj; + + Dprintf("pdecimal_setup: good pdecimal object at %p, refcnt = " + FORMAT_CODE_PY_SSIZE_T, + self, Py_REFCNT(self) + ); + return 0; +} + +static void +pdecimal_dealloc(PyObject* obj) +{ + pdecimalObject *self = (pdecimalObject *)obj; + + Py_CLEAR(self->wrapped); + + Dprintf("pdecimal_dealloc: deleted pdecimal object at %p, refcnt = " + FORMAT_CODE_PY_SSIZE_T, + obj, Py_REFCNT(obj) + ); + + Py_TYPE(obj)->tp_free(obj); +} + +static int +pdecimal_init(PyObject *obj, PyObject *args, PyObject *kwds) +{ + PyObject *o; + + if (!PyArg_ParseTuple(args, "O", &o)) + return -1; + + return pdecimal_setup((pdecimalObject *)obj, o); +} + +static PyObject * +pdecimal_new(PyTypeObject *type, PyObject *args, PyObject *kwds) +{ + return type->tp_alloc(type, 0); +} + + +/* object type */ + +#define pdecimalType_doc \ +"Decimal(str) -> new Decimal adapter object" + +PyTypeObject pdecimalType = { + PyVarObject_HEAD_INIT(NULL, 0) + "psycopg2._psycopg.Decimal", + sizeof(pdecimalObject), 0, + pdecimal_dealloc, /*tp_dealloc*/ + 0, /*tp_print*/ + 0, /*tp_getattr*/ + 0, /*tp_setattr*/ + 0, /*tp_compare*/ + 0, /*tp_repr*/ + 0, /*tp_as_number*/ + 0, /*tp_as_sequence*/ + 0, /*tp_as_mapping*/ + 0, /*tp_hash */ + 0, /*tp_call*/ + (reprfunc)pdecimal_str, /*tp_str*/ + 0, /*tp_getattro*/ + 0, /*tp_setattro*/ + 0, /*tp_as_buffer*/ + Py_TPFLAGS_DEFAULT|Py_TPFLAGS_BASETYPE, /*tp_flags*/ + pdecimalType_doc, /*tp_doc*/ + 0, /*tp_traverse*/ + 0, /*tp_clear*/ + 0, /*tp_richcompare*/ + 0, /*tp_weaklistoffset*/ + 0, /*tp_iter*/ + 0, /*tp_iternext*/ + pdecimalObject_methods, /*tp_methods*/ + pdecimalObject_members, /*tp_members*/ + 0, /*tp_getset*/ + 0, /*tp_base*/ + 0, /*tp_dict*/ + 0, /*tp_descr_get*/ + 0, /*tp_descr_set*/ + 0, /*tp_dictoffset*/ + pdecimal_init, /*tp_init*/ + 0, /*tp_alloc*/ + pdecimal_new, /*tp_new*/ +}; diff --git a/psycopg/adapter_pdecimal.h b/psycopg/adapter_pdecimal.h new file mode 100644 index 0000000000000000000000000000000000000000..24b5ec57da34afdde0a77edf95face19a09351ec --- /dev/null +++ b/psycopg/adapter_pdecimal.h @@ -0,0 +1,48 @@ +/* adapter_pdecimal.h - definition for the psycopg Decimal type wrapper + * + * Copyright (C) 2003-2019 Federico Di Gregorio + * Copyright (C) 2020-2021 The Psycopg Team + * + * This file is part of psycopg. + * + * psycopg2 is free software: you can redistribute it and/or modify it + * under the terms of the GNU Lesser General Public License as published + * by the Free Software Foundation, either version 3 of the License, or + * (at your option) any later version. + * + * In addition, as a special exception, the copyright holders give + * permission to link this program with the OpenSSL library (or with + * modified versions of OpenSSL that use the same license as OpenSSL), + * and distribute linked combinations including the two. + * + * You must obey the GNU Lesser General Public License in all respects for + * all of the code used other than OpenSSL. + * + * psycopg2 is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public + * License for more details. + */ + +#ifndef PSYCOPG_PDECIMAL_H +#define PSYCOPG_PDECIMAL_H 1 + +#ifdef __cplusplus +extern "C" { +#endif + +extern HIDDEN PyTypeObject pdecimalType; + +typedef struct { + PyObject_HEAD + + /* this is the real object we wrap */ + PyObject *wrapped; + +} pdecimalObject; + +#ifdef __cplusplus +} +#endif + +#endif /* !defined(PSYCOPG_PDECIMAL_H) */ diff --git a/psycopg/adapter_pfloat.c b/psycopg/adapter_pfloat.c new file mode 100644 index 0000000000000000000000000000000000000000..9893523b011b426e74db403cf09469a07029c324 --- /dev/null +++ b/psycopg/adapter_pfloat.c @@ -0,0 +1,221 @@ +/* adapter_float.c - psycopg pfloat type wrapper implementation + * + * Copyright (C) 2003-2019 Federico Di Gregorio + * Copyright (C) 2020-2021 The Psycopg Team + * + * This file is part of psycopg. + * + * psycopg2 is free software: you can redistribute it and/or modify it + * under the terms of the GNU Lesser General Public License as published + * by the Free Software Foundation, either version 3 of the License, or + * (at your option) any later version. + * + * In addition, as a special exception, the copyright holders give + * permission to link this program with the OpenSSL library (or with + * modified versions of OpenSSL that use the same license as OpenSSL), + * and distribute linked combinations including the two. + * + * You must obey the GNU Lesser General Public License in all respects for + * all of the code used other than OpenSSL. + * + * psycopg2 is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public + * License for more details. + */ + +#define PSYCOPG_MODULE +#include "psycopg/psycopg.h" + +#include "psycopg/adapter_pfloat.h" +#include "psycopg/microprotocols_proto.h" + +#include +#include + + +/** the Float object **/ + +static PyObject * +pfloat_getquoted(pfloatObject *self, PyObject *args) +{ + PyObject *rv; + double n = PyFloat_AsDouble(self->wrapped); + if (isnan(n)) + rv = Bytes_FromString("'NaN'::float"); + else if (isinf(n)) { + if (n > 0) + rv = Bytes_FromString("'Infinity'::float"); + else + rv = Bytes_FromString("'-Infinity'::float"); + } + else { + if (!(rv = PyObject_Repr(self->wrapped))) { + goto exit; + } + + /* unicode to bytes */ + { + PyObject *tmp = PyUnicode_AsUTF8String(rv); + Py_DECREF(rv); + if (!(rv = tmp)) { + goto exit; + } + } + + if ('-' == Bytes_AS_STRING(rv)[0]) { + /* Prepend a space in front of negative numbers (ticket #57) */ + PyObject *tmp; + if (!(tmp = Bytes_FromString(" "))) { + Py_DECREF(rv); + rv = NULL; + goto exit; + } + Bytes_ConcatAndDel(&tmp, rv); + if (!(rv = tmp)) { + goto exit; + } + } + } + +exit: + return rv; +} + +static PyObject * +pfloat_str(pfloatObject *self) +{ + return psyco_ensure_text(pfloat_getquoted(self, NULL)); +} + +static PyObject * +pfloat_conform(pfloatObject *self, PyObject *args) +{ + PyObject *res, *proto; + + if (!PyArg_ParseTuple(args, "O", &proto)) return NULL; + + if (proto == (PyObject*)&isqlquoteType) + res = (PyObject*)self; + else + res = Py_None; + + Py_INCREF(res); + return res; +} + +/** the Float object */ + +/* object member list */ + +static struct PyMemberDef pfloatObject_members[] = { + {"adapted", T_OBJECT, offsetof(pfloatObject, wrapped), READONLY}, + {NULL} +}; + +/* object method table */ + +static PyMethodDef pfloatObject_methods[] = { + {"getquoted", (PyCFunction)pfloat_getquoted, METH_NOARGS, + "getquoted() -> wrapped object value as SQL-quoted string"}, + {"__conform__", (PyCFunction)pfloat_conform, METH_VARARGS, NULL}, + {NULL} /* Sentinel */ +}; + +/* initialization and finalization methods */ + +static int +pfloat_setup(pfloatObject *self, PyObject *obj) +{ + Dprintf("pfloat_setup: init pfloat object at %p, refcnt = " + FORMAT_CODE_PY_SSIZE_T, + self, Py_REFCNT(self) + ); + + Py_INCREF(obj); + self->wrapped = obj; + + Dprintf("pfloat_setup: good pfloat object at %p, refcnt = " + FORMAT_CODE_PY_SSIZE_T, + self, Py_REFCNT(self) + ); + return 0; +} + +static void +pfloat_dealloc(PyObject* obj) +{ + pfloatObject *self = (pfloatObject *)obj; + + Py_CLEAR(self->wrapped); + + Dprintf("pfloat_dealloc: deleted pfloat object at %p, refcnt = " + FORMAT_CODE_PY_SSIZE_T, + obj, Py_REFCNT(obj) + ); + + Py_TYPE(obj)->tp_free(obj); +} + +static int +pfloat_init(PyObject *obj, PyObject *args, PyObject *kwds) +{ + PyObject *o; + + if (!PyArg_ParseTuple(args, "O", &o)) + return -1; + + return pfloat_setup((pfloatObject *)obj, o); +} + +static PyObject * +pfloat_new(PyTypeObject *type, PyObject *args, PyObject *kwds) +{ + return type->tp_alloc(type, 0); +} + + +/* object type */ + +#define pfloatType_doc \ +"Float(str) -> new Float adapter object" + +PyTypeObject pfloatType = { + PyVarObject_HEAD_INIT(NULL, 0) + "psycopg2.extensions.Float", + sizeof(pfloatObject), 0, + pfloat_dealloc, /*tp_dealloc*/ + 0, /*tp_print*/ + 0, /*tp_getattr*/ + 0, /*tp_setattr*/ + 0, /*tp_compare*/ + 0, /*tp_repr*/ + 0, /*tp_as_number*/ + 0, /*tp_as_sequence*/ + 0, /*tp_as_mapping*/ + 0, /*tp_hash */ + 0, /*tp_call*/ + (reprfunc)pfloat_str, /*tp_str*/ + 0, /*tp_getattro*/ + 0, /*tp_setattro*/ + 0, /*tp_as_buffer*/ + Py_TPFLAGS_DEFAULT|Py_TPFLAGS_BASETYPE, /*tp_flags*/ + pfloatType_doc, /*tp_doc*/ + 0, /*tp_traverse*/ + 0, /*tp_clear*/ + 0, /*tp_richcompare*/ + 0, /*tp_weaklistoffset*/ + 0, /*tp_iter*/ + 0, /*tp_iternext*/ + pfloatObject_methods, /*tp_methods*/ + pfloatObject_members, /*tp_members*/ + 0, /*tp_getset*/ + 0, /*tp_base*/ + 0, /*tp_dict*/ + 0, /*tp_descr_get*/ + 0, /*tp_descr_set*/ + 0, /*tp_dictoffset*/ + pfloat_init, /*tp_init*/ + 0, /*tp_alloc*/ + pfloat_new, /*tp_new*/ +}; diff --git a/psycopg/adapter_pfloat.h b/psycopg/adapter_pfloat.h new file mode 100644 index 0000000000000000000000000000000000000000..8a125640bbae22b7559b9b75b4a9a0c25c44740e --- /dev/null +++ b/psycopg/adapter_pfloat.h @@ -0,0 +1,48 @@ +/* adapter_pfloat.h - definition for the psycopg float type wrapper + * + * Copyright (C) 2003-2019 Federico Di Gregorio + * Copyright (C) 2020-2021 The Psycopg Team + * + * This file is part of psycopg. + * + * psycopg2 is free software: you can redistribute it and/or modify it + * under the terms of the GNU Lesser General Public License as published + * by the Free Software Foundation, either version 3 of the License, or + * (at your option) any later version. + * + * In addition, as a special exception, the copyright holders give + * permission to link this program with the OpenSSL library (or with + * modified versions of OpenSSL that use the same license as OpenSSL), + * and distribute linked combinations including the two. + * + * You must obey the GNU Lesser General Public License in all respects for + * all of the code used other than OpenSSL. + * + * psycopg2 is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public + * License for more details. + */ + +#ifndef PSYCOPG_PFLOAT_H +#define PSYCOPG_PFLOAT_H 1 + +#ifdef __cplusplus +extern "C" { +#endif + +extern HIDDEN PyTypeObject pfloatType; + +typedef struct { + PyObject_HEAD + + /* this is the real object we wrap */ + PyObject *wrapped; + +} pfloatObject; + +#ifdef __cplusplus +} +#endif + +#endif /* !defined(PSYCOPG_PFLOAT_H) */ diff --git a/psycopg/adapter_pint.c b/psycopg/adapter_pint.c new file mode 100644 index 0000000000000000000000000000000000000000..d3cf5080e66b79c60aadf55909ba2b0a99f1cbb3 --- /dev/null +++ b/psycopg/adapter_pint.c @@ -0,0 +1,222 @@ +/* adapter_int.c - psycopg pint type wrapper implementation + * + * Copyright (C) 2011-2019 Daniele Varrazzo + * Copyright (C) 2020-2021 The Psycopg Team + * + * This file is part of psycopg. + * + * psycopg2 is free software: you can redistribute it and/or modify it + * under the terms of the GNU Lesser General Public License as published + * by the Free Software Foundation, either version 3 of the License, or + * (at your option) any later version. + * + * In addition, as a special exception, the copyright holders give + * permission to link this program with the OpenSSL library (or with + * modified versions of OpenSSL that use the same license as OpenSSL), + * and distribute linked combinations including the two. + * + * You must obey the GNU Lesser General Public License in all respects for + * all of the code used other than OpenSSL. + * + * psycopg2 is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public + * License for more details. + */ + +#define PSYCOPG_MODULE +#include "psycopg/psycopg.h" + +#include "psycopg/adapter_pint.h" +#include "psycopg/microprotocols_proto.h" + + +/** the Int object **/ + +static PyObject * +pint_getquoted(pintObject *self, PyObject *args) +{ + PyObject *res = NULL; + + /* Convert subclass to int to handle IntEnum and other subclasses + * whose str() is not the number. */ + if (PyLong_CheckExact(self->wrapped)) { + res = PyObject_Str(self->wrapped); + } else { + PyObject *tmp; + if (!(tmp = PyObject_CallFunctionObjArgs( + (PyObject *)&PyLong_Type, self->wrapped, NULL))) { + goto exit; + } + res = PyObject_Str(tmp); + Py_DECREF(tmp); + } + + if (!res) { + goto exit; + } + + /* unicode to bytes */ + { + PyObject *tmp = PyUnicode_AsUTF8String(res); + Py_DECREF(res); + if (!(res = tmp)) { + goto exit; + } + } + + if ('-' == Bytes_AS_STRING(res)[0]) { + /* Prepend a space in front of negative numbers (ticket #57) */ + PyObject *tmp; + if (!(tmp = Bytes_FromString(" "))) { + Py_DECREF(res); + res = NULL; + goto exit; + } + Bytes_ConcatAndDel(&tmp, res); + if (!(res = tmp)) { + goto exit; + } + } + +exit: + return res; +} + +static PyObject * +pint_str(pintObject *self) +{ + return psyco_ensure_text(pint_getquoted(self, NULL)); +} + +static PyObject * +pint_conform(pintObject *self, PyObject *args) +{ + PyObject *res, *proto; + + if (!PyArg_ParseTuple(args, "O", &proto)) return NULL; + + if (proto == (PyObject*)&isqlquoteType) + res = (PyObject*)self; + else + res = Py_None; + + Py_INCREF(res); + return res; +} + +/** the int object */ + +/* object member list */ + +static struct PyMemberDef pintObject_members[] = { + {"adapted", T_OBJECT, offsetof(pintObject, wrapped), READONLY}, + {NULL} +}; + +/* object method table */ + +static PyMethodDef pintObject_methods[] = { + {"getquoted", (PyCFunction)pint_getquoted, METH_NOARGS, + "getquoted() -> wrapped object value as SQL-quoted string"}, + {"__conform__", (PyCFunction)pint_conform, METH_VARARGS, NULL}, + {NULL} /* Sentinel */ +}; + +/* initialization and finalization methods */ + +static int +pint_setup(pintObject *self, PyObject *obj) +{ + Dprintf("pint_setup: init pint object at %p, refcnt = " + FORMAT_CODE_PY_SSIZE_T, + self, Py_REFCNT(self) + ); + + Py_INCREF(obj); + self->wrapped = obj; + + Dprintf("pint_setup: good pint object at %p, refcnt = " + FORMAT_CODE_PY_SSIZE_T, + self, Py_REFCNT(self) + ); + return 0; +} + +static void +pint_dealloc(PyObject* obj) +{ + pintObject *self = (pintObject *)obj; + + Py_CLEAR(self->wrapped); + + Dprintf("pint_dealloc: deleted pint object at %p, refcnt = " + FORMAT_CODE_PY_SSIZE_T, + obj, Py_REFCNT(obj) + ); + + Py_TYPE(obj)->tp_free(obj); +} + +static int +pint_init(PyObject *obj, PyObject *args, PyObject *kwds) +{ + PyObject *o; + + if (!PyArg_ParseTuple(args, "O", &o)) + return -1; + + return pint_setup((pintObject *)obj, o); +} + +static PyObject * +pint_new(PyTypeObject *type, PyObject *args, PyObject *kwds) +{ + return type->tp_alloc(type, 0); +} + + +/* object type */ + +#define pintType_doc \ +"Int(str) -> new Int adapter object" + +PyTypeObject pintType = { + PyVarObject_HEAD_INIT(NULL, 0) + "psycopg2.extensions.Int", + sizeof(pintObject), 0, + pint_dealloc, /*tp_dealloc*/ + 0, /*tp_print*/ + 0, /*tp_getattr*/ + 0, /*tp_setattr*/ + 0, /*tp_compare*/ + 0, /*tp_repr*/ + 0, /*tp_as_number*/ + 0, /*tp_as_sequence*/ + 0, /*tp_as_mapping*/ + 0, /*tp_hash */ + 0, /*tp_call*/ + (reprfunc)pint_str, /*tp_str*/ + 0, /*tp_getattro*/ + 0, /*tp_setattro*/ + 0, /*tp_as_buffer*/ + Py_TPFLAGS_DEFAULT|Py_TPFLAGS_BASETYPE, /*tp_flags*/ + pintType_doc, /*tp_doc*/ + 0, /*tp_traverse*/ + 0, /*tp_clear*/ + 0, /*tp_richcompare*/ + 0, /*tp_weaklistoffset*/ + 0, /*tp_iter*/ + 0, /*tp_iternext*/ + pintObject_methods, /*tp_methods*/ + pintObject_members, /*tp_members*/ + 0, /*tp_getset*/ + 0, /*tp_base*/ + 0, /*tp_dict*/ + 0, /*tp_descr_get*/ + 0, /*tp_descr_set*/ + 0, /*tp_dictoffset*/ + pint_init, /*tp_init*/ + 0, /*tp_alloc*/ + pint_new, /*tp_new*/ +}; diff --git a/psycopg/adapter_pint.h b/psycopg/adapter_pint.h new file mode 100644 index 0000000000000000000000000000000000000000..49ad8b297179a29c310a935539a8c24eb1afb35d --- /dev/null +++ b/psycopg/adapter_pint.h @@ -0,0 +1,48 @@ +/* adapter_pint.h - definition for the psycopg int type wrapper + * + * Copyright (C) 2011-2019 Daniele Varrazzo + * Copyright (C) 2020-2021 The Psycopg Team + * + * This file is part of psycopg. + * + * psycopg2 is free software: you can redistribute it and/or modify it + * under the terms of the GNU Lesser General Public License as published + * by the Free Software Foundation, either version 3 of the License, or + * (at your option) any later version. + * + * In addition, as a special exception, the copyright holders give + * permission to link this program with the OpenSSL library (or with + * modified versions of OpenSSL that use the same license as OpenSSL), + * and distribute linked combinations including the two. + * + * You must obey the GNU Lesser General Public License in all respects for + * all of the code used other than OpenSSL. + * + * psycopg2 is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public + * License for more details. + */ + +#ifndef PSYCOPG_PINT_H +#define PSYCOPG_PINT_H 1 + +#ifdef __cplusplus +extern "C" { +#endif + +extern HIDDEN PyTypeObject pintType; + +typedef struct { + PyObject_HEAD + + /* this is the real object we wrap */ + PyObject *wrapped; + +} pintObject; + +#ifdef __cplusplus +} +#endif + +#endif /* !defined(PSYCOPG_PINT_H) */ diff --git a/psycopg/adapter_qstring.c b/psycopg/adapter_qstring.c new file mode 100644 index 0000000000000000000000000000000000000000..3a3ad635910dab8598c59f5d2bdbcc9629e9cd60 --- /dev/null +++ b/psycopg/adapter_qstring.c @@ -0,0 +1,307 @@ +/* adapter_qstring.c - QuotedString objects + * + * Copyright (C) 2003-2019 Federico Di Gregorio + * Copyright (C) 2020-2021 The Psycopg Team + * + * This file is part of psycopg. + * + * psycopg2 is free software: you can redistribute it and/or modify it + * under the terms of the GNU Lesser General Public License as published + * by the Free Software Foundation, either version 3 of the License, or + * (at your option) any later version. + * + * In addition, as a special exception, the copyright holders give + * permission to link this program with the OpenSSL library (or with + * modified versions of OpenSSL that use the same license as OpenSSL), + * and distribute linked combinations including the two. + * + * You must obey the GNU Lesser General Public License in all respects for + * all of the code used other than OpenSSL. + * + * psycopg2 is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public + * License for more details. + */ + +#define PSYCOPG_MODULE +#include "psycopg/psycopg.h" + +#include "psycopg/connection.h" +#include "psycopg/adapter_qstring.h" +#include "psycopg/microprotocols_proto.h" + +#include + +static const char *default_encoding = "latin1"; + +/* qstring_quote - do the quote process on plain and unicode strings */ + +static PyObject * +qstring_quote(qstringObject *self) +{ + PyObject *str = NULL; + char *s, *buffer = NULL; + Py_ssize_t len, qlen; + const char *encoding; + PyObject *rv = NULL; + + if (PyUnicode_Check(self->wrapped)) { + if (self->conn) { + if (!(str = conn_encode(self->conn, self->wrapped))) { goto exit; } + } + else { + encoding = self->encoding ? self->encoding : default_encoding; + if(!(str = PyUnicode_AsEncodedString(self->wrapped, encoding, NULL))) { + goto exit; + } + } + } + + /* if the wrapped object is a binary string, we don't know how to + (re)encode it, so we pass it as-is */ + else if (Bytes_Check(self->wrapped)) { + str = self->wrapped; + /* INCREF to make it ref-wise identical to unicode one */ + Py_INCREF(str); + } + + /* if the wrapped object is not a string, this is an error */ + else { + PyErr_SetString(PyExc_TypeError, "can't quote non-string object"); + goto exit; + } + + /* encode the string into buffer */ + Bytes_AsStringAndSize(str, &s, &len); + if (!(buffer = psyco_escape_string(self->conn, s, len, NULL, &qlen))) { + goto exit; + } + + if (qlen > PY_SSIZE_T_MAX) { + PyErr_SetString(PyExc_IndexError, + "PG buffer too large to fit in Python buffer."); + goto exit; + } + + rv = Bytes_FromStringAndSize(buffer, qlen); + +exit: + PyMem_Free(buffer); + Py_XDECREF(str); + + return rv; +} + +/* qstring_str, qstring_getquoted - return result of quoting */ + +static PyObject * +qstring_getquoted(qstringObject *self, PyObject *args) +{ + if (self->buffer == NULL) { + self->buffer = qstring_quote(self); + } + Py_XINCREF(self->buffer); + return self->buffer; +} + +static PyObject * +qstring_str(qstringObject *self) +{ + return psyco_ensure_text(qstring_getquoted(self, NULL)); +} + +static PyObject * +qstring_prepare(qstringObject *self, PyObject *args) +{ + PyObject *conn; + + if (!PyArg_ParseTuple(args, "O!", &connectionType, &conn)) + return NULL; + + Py_CLEAR(self->conn); + Py_INCREF(conn); + self->conn = (connectionObject *)conn; + + Py_RETURN_NONE; +} + +static PyObject * +qstring_conform(qstringObject *self, PyObject *args) +{ + PyObject *res, *proto; + + if (!PyArg_ParseTuple(args, "O", &proto)) return NULL; + + if (proto == (PyObject*)&isqlquoteType) + res = (PyObject*)self; + else + res = Py_None; + + Py_INCREF(res); + return res; +} + +static PyObject * +qstring_get_encoding(qstringObject *self) +{ + if (self->conn) { + return conn_pgenc_to_pyenc(self->conn->encoding, NULL); + } + else { + return Text_FromUTF8(self->encoding ? self->encoding : default_encoding); + } +} + +static int +qstring_set_encoding(qstringObject *self, PyObject *pyenc) +{ + int rv = -1; + const char *tmp; + char *cenc; + + /* get a C copy of the encoding (which may come from unicode) */ + Py_INCREF(pyenc); + if (!(pyenc = psyco_ensure_bytes(pyenc))) { goto exit; } + if (!(tmp = Bytes_AsString(pyenc))) { goto exit; } + if (0 > psyco_strdup(&cenc, tmp, -1)) { goto exit; } + + Dprintf("qstring_set_encoding: encoding set to %s", cenc); + PyMem_Free((void *)self->encoding); + self->encoding = cenc; + rv = 0; + +exit: + Py_XDECREF(pyenc); + return rv; +} + +/** the QuotedString object **/ + +/* object member list */ + +static struct PyMemberDef qstringObject_members[] = { + {"adapted", T_OBJECT, offsetof(qstringObject, wrapped), READONLY}, + {"buffer", T_OBJECT, offsetof(qstringObject, buffer), READONLY}, + {NULL} +}; + +/* object method table */ + +static PyMethodDef qstringObject_methods[] = { + {"getquoted", (PyCFunction)qstring_getquoted, METH_NOARGS, + "getquoted() -> wrapped object value as SQL-quoted string"}, + {"prepare", (PyCFunction)qstring_prepare, METH_VARARGS, + "prepare(conn) -> set encoding to conn->encoding and store conn"}, + {"__conform__", (PyCFunction)qstring_conform, METH_VARARGS, NULL}, + {NULL} /* Sentinel */ +}; + +static PyGetSetDef qstringObject_getsets[] = { + { "encoding", + (getter)qstring_get_encoding, + (setter)qstring_set_encoding, + "current encoding of the adapter" }, + {NULL} +}; + +/* initialization and finalization methods */ + +static int +qstring_setup(qstringObject *self, PyObject *str) +{ + Dprintf("qstring_setup: init qstring object at %p, refcnt = " + FORMAT_CODE_PY_SSIZE_T, + self, Py_REFCNT(self) + ); + + Py_INCREF(str); + self->wrapped = str; + + Dprintf("qstring_setup: good qstring object at %p, refcnt = " + FORMAT_CODE_PY_SSIZE_T, + self, Py_REFCNT(self) + ); + return 0; +} + +static void +qstring_dealloc(PyObject* obj) +{ + qstringObject *self = (qstringObject *)obj; + + Py_CLEAR(self->wrapped); + Py_CLEAR(self->buffer); + Py_CLEAR(self->conn); + PyMem_Free((void *)self->encoding); + + Dprintf("qstring_dealloc: deleted qstring object at %p, refcnt = " + FORMAT_CODE_PY_SSIZE_T, + obj, Py_REFCNT(obj) + ); + + Py_TYPE(obj)->tp_free(obj); +} + +static int +qstring_init(PyObject *obj, PyObject *args, PyObject *kwds) +{ + PyObject *str; + + if (!PyArg_ParseTuple(args, "O", &str)) + return -1; + + return qstring_setup((qstringObject *)obj, str); +} + +static PyObject * +qstring_new(PyTypeObject *type, PyObject *args, PyObject *kwds) +{ + return type->tp_alloc(type, 0); +} + + +/* object type */ + +#define qstringType_doc \ +"QuotedString(str) -> new quoted object" + +PyTypeObject qstringType = { + PyVarObject_HEAD_INIT(NULL, 0) + "psycopg2.extensions.QuotedString", + sizeof(qstringObject), 0, + qstring_dealloc, /*tp_dealloc*/ + 0, /*tp_print*/ + 0, /*tp_getattr*/ + 0, /*tp_setattr*/ + 0, /*tp_compare*/ + 0, /*tp_repr*/ + 0, /*tp_as_number*/ + 0, /*tp_as_sequence*/ + 0, /*tp_as_mapping*/ + 0, /*tp_hash */ + 0, /*tp_call*/ + (reprfunc)qstring_str, /*tp_str*/ + 0, /*tp_getattro*/ + 0, /*tp_setattro*/ + 0, /*tp_as_buffer*/ + Py_TPFLAGS_DEFAULT|Py_TPFLAGS_BASETYPE, /*tp_flags*/ + qstringType_doc, /*tp_doc*/ + 0, /*tp_traverse*/ + 0, /*tp_clear*/ + 0, /*tp_richcompare*/ + 0, /*tp_weaklistoffset*/ + 0, /*tp_iter*/ + 0, /*tp_iternext*/ + qstringObject_methods, /*tp_methods*/ + qstringObject_members, /*tp_members*/ + qstringObject_getsets, /*tp_getset*/ + 0, /*tp_base*/ + 0, /*tp_dict*/ + 0, /*tp_descr_get*/ + 0, /*tp_descr_set*/ + 0, /*tp_dictoffset*/ + qstring_init, /*tp_init*/ + 0, /*tp_alloc*/ + qstring_new, /*tp_new*/ +}; diff --git a/psycopg/adapter_qstring.h b/psycopg/adapter_qstring.h new file mode 100644 index 0000000000000000000000000000000000000000..7e139bae392ccec156aaa3c2003dbb2b65691062 --- /dev/null +++ b/psycopg/adapter_qstring.h @@ -0,0 +1,52 @@ +/* adapter_qstring.h - definition for the QuotedString type + * + * Copyright (C) 2003-2019 Federico Di Gregorio + * Copyright (C) 2020-2021 The Psycopg Team + * + * This file is part of psycopg. + * + * psycopg2 is free software: you can redistribute it and/or modify it + * under the terms of the GNU Lesser General Public License as published + * by the Free Software Foundation, either version 3 of the License, or + * (at your option) any later version. + * + * In addition, as a special exception, the copyright holders give + * permission to link this program with the OpenSSL library (or with + * modified versions of OpenSSL that use the same license as OpenSSL), + * and distribute linked combinations including the two. + * + * You must obey the GNU Lesser General Public License in all respects for + * all of the code used other than OpenSSL. + * + * psycopg2 is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public + * License for more details. + */ + +#ifndef PSYCOPG_QSTRING_H +#define PSYCOPG_QSTRING_H 1 + +#ifdef __cplusplus +extern "C" { +#endif + +extern HIDDEN PyTypeObject qstringType; + +typedef struct { + PyObject_HEAD + + PyObject *wrapped; + PyObject *buffer; + + connectionObject *conn; + + const char *encoding; + +} qstringObject; + +#ifdef __cplusplus +} +#endif + +#endif /* !defined(PSYCOPG_QSTRING_H) */ diff --git a/psycopg/aix_support.c b/psycopg/aix_support.c new file mode 100644 index 0000000000000000000000000000000000000000..941bcab61e455483fcdf3306a6d354c2d25cf302 --- /dev/null +++ b/psycopg/aix_support.c @@ -0,0 +1,58 @@ +/* aix_support.c - emulate functions missing on AIX + * + * Copyright (C) 2017 My Karlsson + * Copyright (c) 2018, Joyent, Inc. + * Copyright (C) 2020-2021 The Psycopg Team + * + * This file is part of psycopg. + * + * psycopg2 is free software: you can redistribute it and/or modify it + * under the terms of the GNU Lesser General Public License as published + * by the Free Software Foundation, either version 3 of the License, or + * (at your option) any later version. + * + * In addition, as a special exception, the copyright holders give + * permission to link this program with the OpenSSL library (or with + * modified versions of OpenSSL that use the same license as OpenSSL), + * and distribute linked combinations including the two. + * + * You must obey the GNU Lesser General Public License in all respects for + * all of the code used other than OpenSSL. + * + * psycopg2 is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public + * License for more details. + */ + +#define PSYCOPG_MODULE +#include "psycopg/psycopg.h" +#include "psycopg/aix_support.h" + +#if defined(_AIX) +/* timeradd is missing on AIX */ +#ifndef timeradd +void +timeradd(struct timeval *a, struct timeval *b, struct timeval *c) +{ + c->tv_sec = a->tv_sec + b->tv_sec; + c->tv_usec = a->tv_usec + b->tv_usec; + if (c->tv_usec >= 1000000) { + c->tv_usec -= 1000000; + c->tv_sec += 1; + } +} + +/* timersub is missing on AIX */ +void +timersub(struct timeval *a, struct timeval *b, struct timeval *c) +{ + c->tv_sec = a->tv_sec - b->tv_sec; + c->tv_usec = a->tv_usec - b->tv_usec; + if (c->tv_usec < 0) { + c->tv_usec += 1000000; + c->tv_sec -= 1; + } +} +#endif /* timeradd */ +#endif /* defined(_AIX)*/ diff --git a/psycopg/aix_support.h b/psycopg/aix_support.h new file mode 100644 index 0000000000000000000000000000000000000000..14c1220cf0667b417e6b22a4aea908be1b66afff --- /dev/null +++ b/psycopg/aix_support.h @@ -0,0 +1,48 @@ +/* aix_support.h - definitions for aix_support.c + * + * Copyright (C) 2017 My Karlsson + * Copyright (c) 2018-2019, Joyent, Inc. + * Copyright (C) 2020-2021 The Psycopg Team + * + * This file is part of psycopg. + * + * psycopg2 is free software: you can redistribute it and/or modify it + * under the terms of the GNU Lesser General Public License as published + * by the Free Software Foundation, either version 3 of the License, or + * (at your option) any later version. + * + * In addition, as a special exception, the copyright holders give + * permission to link this program with the OpenSSL library (or with + * modified versions of OpenSSL that use the same license as OpenSSL), + * and distribute linked combinations including the two. + * + * You must obey the GNU Lesser General Public License in all respects for + * all of the code used other than OpenSSL. + * + * psycopg2 is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public + * License for more details. + */ +#ifndef PSYCOPG_AIX_SUPPORT_H +#define PSYCOPG_AIX_SUPPORT_H + +#include "psycopg/config.h" + +#ifdef _AIX +#include + +#ifndef timeradd +extern HIDDEN void timeradd(struct timeval *a, struct timeval *b, struct timeval *c); +extern HIDDEN void timersub(struct timeval *a, struct timeval *b, struct timeval *c); +#endif + +#ifndef timercmp +#define timercmp(a, b, cmp) \ + (((a)->tv_sec == (b)->tv_sec) ? \ + ((a)->tv_usec cmp (b)->tv_usec) : \ + ((a)->tv_sec cmp (b)->tv_sec)) +#endif +#endif + +#endif /* !defined(PSYCOPG_AIX_SUPPORT_H) */ diff --git a/psycopg/bytes_format.c b/psycopg/bytes_format.c new file mode 100644 index 0000000000000000000000000000000000000000..d34a01710c699617436eb90acbf5956404e2f957 --- /dev/null +++ b/psycopg/bytes_format.c @@ -0,0 +1,309 @@ +/* bytes_format.c - bytes-oriented version of PyString_Format + * + * Copyright (C) 2010-2019 Daniele Varrazzo + * Copyright (C) 2020-2021 The Psycopg Team + * + * This file is part of psycopg. + * + * psycopg2 is free software: you can redistribute it and/or modify it + * under the terms of the GNU Lesser General Public License as published + * by the Free Software Foundation, either version 3 of the License, or + * (at your option) any later version. + * + * In addition, as a special exception, the copyright holders give + * permission to link this program with the OpenSSL library (or with + * modified versions of OpenSSL that use the same license as OpenSSL), + * and distribute linked combinations including the two. + * + * You must obey the GNU Lesser General Public License in all respects for + * all of the code used other than OpenSSL. + * + * psycopg2 is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public + * License for more details. + */ + +/* This implementation is based on the PyString_Format function available in + * Python 2.7.1. The function is altered to be used with both Python 2 strings + * and Python 3 bytes and is stripped of the support of formats different than + * 's'. Original license follows. + * + * PYTHON SOFTWARE FOUNDATION LICENSE VERSION 2 + * -------------------------------------------- + * + * 1. This LICENSE AGREEMENT is between the Python Software Foundation + * ("PSF"), and the Individual or Organization ("Licensee") accessing and + * otherwise using this software ("Python") in source or binary form and + * its associated documentation. + * + * 2. Subject to the terms and conditions of this License Agreement, PSF hereby + * grants Licensee a nonexclusive, royalty-free, world-wide license to reproduce, + * analyze, test, perform and/or display publicly, prepare derivative works, + * distribute, and otherwise use Python alone or in any derivative version, + * provided, however, that PSF's License Agreement and PSF's notice of copyright, + * i.e., "Copyright (c) 2001-2019, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010 + * Python Software Foundation; All Rights Reserved" are retained in Python alone or + * in any derivative version prepared by Licensee. + * + * 3. In the event Licensee prepares a derivative work that is based on + * or incorporates Python or any part thereof, and wants to make + * the derivative work available to others as provided herein, then + * Licensee hereby agrees to include in any such work a brief summary of + * the changes made to Python. + * + * 4. PSF is making Python available to Licensee on an "AS IS" + * basis. PSF MAKES NO REPRESENTATIONS OR WARRANTIES, EXPRESS OR + * IMPLIED. BY WAY OF EXAMPLE, BUT NOT LIMITATION, PSF MAKES NO AND + * DISCLAIMS ANY REPRESENTATION OR WARRANTY OF MERCHANTABILITY OR FITNESS + * FOR ANY PARTICULAR PURPOSE OR THAT THE USE OF PYTHON WILL NOT + * INFRINGE ANY THIRD PARTY RIGHTS. + * + * 5. PSF SHALL NOT BE LIABLE TO LICENSEE OR ANY OTHER USERS OF PYTHON + * FOR ANY INCIDENTAL, SPECIAL, OR CONSEQUENTIAL DAMAGES OR LOSS AS + * A RESULT OF MODIFYING, DISTRIBUTING, OR OTHERWISE USING PYTHON, + * OR ANY DERIVATIVE THEREOF, EVEN IF ADVISED OF THE POSSIBILITY THEREOF. + * + * 6. This License Agreement will automatically terminate upon a material + * breach of its terms and conditions. + * + * 7. Nothing in this License Agreement shall be deemed to create any + * relationship of agency, partnership, or joint venture between PSF and + * Licensee. This License Agreement does not grant permission to use PSF + * trademarks or trade name in a trademark sense to endorse or promote + * products or services of Licensee, or any third party. + * + * 8. By copying, installing or otherwise using Python, Licensee + * agrees to be bound by the terms and conditions of this License + * Agreement. + */ + +#define PSYCOPG_MODULE +#include "psycopg/psycopg.h" +#include "pyport.h" + +/* Helpers for formatstring */ + +BORROWED Py_LOCAL_INLINE(PyObject *) +getnextarg(PyObject *args, Py_ssize_t arglen, Py_ssize_t *p_argidx) +{ + Py_ssize_t argidx = *p_argidx; + if (argidx < arglen) { + (*p_argidx)++; + if (arglen < 0) + return args; + else + return PyTuple_GetItem(args, argidx); + } + PyErr_SetString(PyExc_TypeError, + "not enough arguments for format string"); + return NULL; +} + +/* wrapper around _Bytes_Resize offering normal Python call semantics */ + +STEALS(1) +Py_LOCAL_INLINE(PyObject *) +resize_bytes(PyObject *b, Py_ssize_t newsize) { + if (0 == _Bytes_Resize(&b, newsize)) { + return b; + } + else { + return NULL; + } +} + +/* fmt%(v1,v2,...) is roughly equivalent to sprintf(fmt, v1, v2, ...) */ + +PyObject * +Bytes_Format(PyObject *format, PyObject *args) +{ + char *fmt, *res; + Py_ssize_t arglen, argidx; + Py_ssize_t reslen, rescnt, fmtcnt; + int args_owned = 0; + PyObject *result; + PyObject *dict = NULL; + if (format == NULL || !Bytes_Check(format) || args == NULL) { + PyErr_SetString(PyExc_SystemError, "bad argument to internal function"); + return NULL; + } + fmt = Bytes_AS_STRING(format); + fmtcnt = Bytes_GET_SIZE(format); + reslen = rescnt = fmtcnt + 100; + result = Bytes_FromStringAndSize((char *)NULL, reslen); + if (result == NULL) + return NULL; + res = Bytes_AS_STRING(result); + if (PyTuple_Check(args)) { + arglen = PyTuple_GET_SIZE(args); + argidx = 0; + } + else { + arglen = -1; + argidx = -2; + } + if (Py_TYPE(args)->tp_as_mapping && !PyTuple_Check(args) && + !PyObject_TypeCheck(args, &Bytes_Type)) + dict = args; + while (--fmtcnt >= 0) { + if (*fmt != '%') { + if (--rescnt < 0) { + rescnt = fmtcnt + 100; + reslen += rescnt; + if (!(result = resize_bytes(result, reslen))) { + return NULL; + } + res = Bytes_AS_STRING(result) + reslen - rescnt; + --rescnt; + } + *res++ = *fmt++; + } + else { + /* Got a format specifier */ + Py_ssize_t width = -1; + int c = '\0'; + PyObject *v = NULL; + PyObject *temp = NULL; + char *pbuf; + Py_ssize_t len; + fmt++; + if (*fmt == '(') { + char *keystart; + Py_ssize_t keylen; + PyObject *key; + int pcount = 1; + + if (dict == NULL) { + PyErr_SetString(PyExc_TypeError, + "format requires a mapping"); + goto error; + } + ++fmt; + --fmtcnt; + keystart = fmt; + /* Skip over balanced parentheses */ + while (pcount > 0 && --fmtcnt >= 0) { + if (*fmt == ')') + --pcount; + else if (*fmt == '(') + ++pcount; + fmt++; + } + keylen = fmt - keystart - 1; + if (fmtcnt < 0 || pcount > 0) { + PyErr_SetString(PyExc_ValueError, + "incomplete format key"); + goto error; + } + key = Text_FromUTF8AndSize(keystart, keylen); + if (key == NULL) + goto error; + if (args_owned) { + Py_DECREF(args); + args_owned = 0; + } + args = PyObject_GetItem(dict, key); + Py_DECREF(key); + if (args == NULL) { + goto error; + } + args_owned = 1; + arglen = -1; + argidx = -2; + } + while (--fmtcnt >= 0) { + c = *fmt++; + break; + } + if (fmtcnt < 0) { + PyErr_SetString(PyExc_ValueError, + "incomplete format"); + goto error; + } + switch (c) { + case '%': + pbuf = "%"; + len = 1; + break; + case 's': + /* only bytes! */ + if (!(v = getnextarg(args, arglen, &argidx))) + goto error; + if (!Bytes_CheckExact(v)) { + PyErr_Format(PyExc_ValueError, + "only bytes values expected, got %s", + Py_TYPE(v)->tp_name); + goto error; + } + temp = v; + Py_INCREF(v); + pbuf = Bytes_AS_STRING(temp); + len = Bytes_GET_SIZE(temp); + break; + default: + PyErr_Format(PyExc_ValueError, + "unsupported format character '%c' (0x%x) " + "at index " FORMAT_CODE_PY_SSIZE_T, + c, c, + (Py_ssize_t)(fmt - 1 - Bytes_AS_STRING(format))); + goto error; + } + if (width < len) + width = len; + if (rescnt < width) { + reslen -= rescnt; + rescnt = width + fmtcnt + 100; + reslen += rescnt; + if (reslen < 0) { + Py_DECREF(result); + Py_XDECREF(temp); + if (args_owned) + Py_DECREF(args); + return PyErr_NoMemory(); + } + if (!(result = resize_bytes(result, reslen))) { + Py_XDECREF(temp); + if (args_owned) + Py_DECREF(args); + return NULL; + } + res = Bytes_AS_STRING(result) + + reslen - rescnt; + } + Py_MEMCPY(res, pbuf, len); + res += len; + rescnt -= len; + while (--width >= len) { + --rescnt; + *res++ = ' '; + } + if (dict && (argidx < arglen) && c != '%') { + PyErr_SetString(PyExc_TypeError, + "not all arguments converted during string formatting"); + Py_XDECREF(temp); + goto error; + } + Py_XDECREF(temp); + } /* '%' */ + } /* until end */ + if (argidx < arglen && !dict) { + PyErr_SetString(PyExc_TypeError, + "not all arguments converted during string formatting"); + goto error; + } + if (args_owned) { + Py_DECREF(args); + } + if (!(result = resize_bytes(result, reslen - rescnt))) { + return NULL; + } + return result; + + error: + Py_DECREF(result); + if (args_owned) { + Py_DECREF(args); + } + return NULL; +} diff --git a/psycopg/column.h b/psycopg/column.h new file mode 100644 index 0000000000000000000000000000000000000000..1173fb55ce21e2032c635cf60db5a121513d9b74 --- /dev/null +++ b/psycopg/column.h @@ -0,0 +1,49 @@ +/* column.h - definition for a column in cursor.description type + * + * Copyright (C) 2018-2019 Daniele Varrazzo + * Copyright (C) 2020-2021 The Psycopg Team + * + * This file is part of psycopg. + * + * psycopg2 is free software: you can redistribute it and/or modify it + * under the terms of the GNU Lesser General Public License as published + * by the Free Software Foundation, either version 3 of the License, or + * (at your option) any later version. + * + * In addition, as a special exception, the copyright holders give + * permission to link this program with the OpenSSL library (or with + * modified versions of OpenSSL that use the same license as OpenSSL), + * and distribute linked combinations including the two. + * + * You must obey the GNU Lesser General Public License in all respects for + * all of the code used other than OpenSSL. + * + * psycopg2 is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public + * License for more details. + */ + +#ifndef PSYCOPG_COLUMN_H +#define PSYCOPG_COLUMN_H 1 + +extern HIDDEN PyTypeObject columnType; + +typedef struct { + PyObject_HEAD + + PyObject *name; + PyObject *type_code; + PyObject *display_size; + PyObject *internal_size; + PyObject *precision; + PyObject *scale; + PyObject *null_ok; + + /* Extensions to the DBAPI */ + PyObject *table_oid; + PyObject *table_column; + +} columnObject; + +#endif /* PSYCOPG_COLUMN_H */ diff --git a/psycopg/column_type.c b/psycopg/column_type.c new file mode 100644 index 0000000000000000000000000000000000000000..2f989507afbdb5e9eac46376d8d09951ee57acfc --- /dev/null +++ b/psycopg/column_type.c @@ -0,0 +1,420 @@ +/* column_type.c - python interface to cursor.description objects + * + * Copyright (C) 2018-2019 Daniele Varrazzo + * Copyright (C) 2020-2021 The Psycopg Team + * + * This file is part of psycopg. + * + * psycopg2 is free software: you can redistribute it and/or modify it + * under the terms of the GNU Lesser General Public License as published + * by the Free Software Foundation, either version 3 of the License, or + * (at your option) any later version. + * + * In addition, as a special exception, the copyright holders give + * permission to link this program with the OpenSSL library (or with + * modified versions of OpenSSL that use the same license as OpenSSL), + * and distribute linked combinations including the two. + * + * You must obey the GNU Lesser General Public License in all respects for + * all of the code used other than OpenSSL. + * + * psycopg2 is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public + * License for more details. + */ + +#define PSYCOPG_MODULE +#include "psycopg/psycopg.h" + +#include "psycopg/column.h" + + +static const char column_doc[] = + "Description of a column returned by a query.\n\n" + "The DBAPI demands this object to be a 7-items sequence. This object\n" + "respects this interface, but adds names for the exposed attributes\n" + "and adds attribute not requested by the DBAPI."; + +static const char name_doc[] = + "The name of the column returned."; + +static const char type_code_doc[] = + "The PostgreSQL OID of the column.\n\n" + "You can use the pg_type system table to get more informations about the\n" + "type. This is the value used by Psycopg to decide what Python type use\n" + "to represent the value"; + +static const char display_size_doc[] = + "The actual length of the column in bytes.\n\n" + "Obtaining this value is computationally intensive, so it is always None"; + +static const char internal_size_doc[] = + "The size in bytes of the column associated to this column on the server.\n\n" + "Set to a negative value for variable-size types."; + +static const char precision_doc[] = + "Total number of significant digits in columns of type NUMERIC.\n\n" + "None for other types."; + +static const char scale_doc[] = + "Count of decimal digits in the fractional part in columns of type NUMERIC.\n\n" + "None for other types."; + +static const char null_ok_doc[] = + "Always none."; + +static const char table_oid_doc[] = + "The OID of the table from which the column was fetched.\n\n" + "None if not available"; + +static const char table_column_doc[] = + "The number (within its table) of the column making up the result\n\n" + "None if not available. Note that PostgreSQL column numbers start at 1"; + + +static PyMemberDef column_members[] = { + { "name", T_OBJECT, offsetof(columnObject, name), READONLY, (char *)name_doc }, + { "type_code", T_OBJECT, offsetof(columnObject, type_code), READONLY, (char *)type_code_doc }, + { "display_size", T_OBJECT, offsetof(columnObject, display_size), READONLY, (char *)display_size_doc }, + { "internal_size", T_OBJECT, offsetof(columnObject, internal_size), READONLY, (char *)internal_size_doc }, + { "precision", T_OBJECT, offsetof(columnObject, precision), READONLY, (char *)precision_doc }, + { "scale", T_OBJECT, offsetof(columnObject, scale), READONLY, (char *)scale_doc }, + { "null_ok", T_OBJECT, offsetof(columnObject, null_ok), READONLY, (char *)null_ok_doc }, + { "table_oid", T_OBJECT, offsetof(columnObject, table_oid), READONLY, (char *)table_oid_doc }, + { "table_column", T_OBJECT, offsetof(columnObject, table_column), READONLY, (char *)table_column_doc }, + { NULL } +}; + + +static PyObject * +column_new(PyTypeObject *type, PyObject *args, PyObject *kwargs) +{ + return type->tp_alloc(type, 0); +} + + +static int +column_init(columnObject *self, PyObject *args, PyObject *kwargs) +{ + PyObject *name = NULL; + PyObject *type_code = NULL; + PyObject *display_size = NULL; + PyObject *internal_size = NULL; + PyObject *precision = NULL; + PyObject *scale = NULL; + PyObject *null_ok = NULL; + PyObject *table_oid = NULL; + PyObject *table_column = NULL; + + static char *kwlist[] = { + "name", "type_code", "display_size", "internal_size", + "precision", "scale", "null_ok", "table_oid", "table_column", NULL}; + + if (!PyArg_ParseTupleAndKeywords(args, kwargs, "|OOOOOOOOO", kwlist, + &name, &type_code, &display_size, &internal_size, &precision, + &scale, &null_ok, &table_oid, &table_column)) { + return -1; + } + + Py_XINCREF(name); self->name = name; + Py_XINCREF(type_code); self->type_code = type_code; + Py_XINCREF(display_size); self->display_size = display_size; + Py_XINCREF(internal_size); self->internal_size = internal_size; + Py_XINCREF(precision); self->precision = precision; + Py_XINCREF(scale); self->scale = scale; + Py_XINCREF(null_ok); self->null_ok = null_ok; + Py_XINCREF(table_oid); self->table_oid = table_oid; + Py_XINCREF(table_column); self->table_column = table_column; + + return 0; +} + + +static void +column_dealloc(columnObject *self) +{ + Py_CLEAR(self->name); + Py_CLEAR(self->type_code); + Py_CLEAR(self->display_size); + Py_CLEAR(self->internal_size); + Py_CLEAR(self->precision); + Py_CLEAR(self->scale); + Py_CLEAR(self->null_ok); + Py_CLEAR(self->table_oid); + Py_CLEAR(self->table_column); + + Py_TYPE(self)->tp_free((PyObject *)self); +} + + +static PyObject* +column_repr(columnObject *self) +{ + PyObject *rv = NULL; + PyObject *format = NULL; + PyObject *args = NULL; + PyObject *tmp; + + if (!(format = Text_FromUTF8("Column(name=%r, type_code=%r)"))) { + goto exit; + } + + if (!(args = PyTuple_New(2))) { goto exit; } + + tmp = self->name ? self->name : Py_None; + Py_INCREF(tmp); + PyTuple_SET_ITEM(args, 0, tmp); + + tmp = self->type_code ? self->type_code : Py_None; + Py_INCREF(tmp); + PyTuple_SET_ITEM(args, 1, tmp); + + rv = Text_Format(format, args); + +exit: + Py_XDECREF(args); + Py_XDECREF(format); + + return rv; +} + + +static PyObject * +column_richcompare(columnObject *self, PyObject *other, int op) +{ + PyObject *rv = NULL; + PyObject *tself = NULL; + + if (!(tself = PyObject_CallFunctionObjArgs( + (PyObject *)&PyTuple_Type, (PyObject *)self, NULL))) { + goto exit; + } + + rv = PyObject_RichCompare(tself, other, op); + +exit: + Py_XDECREF(tself); + return rv; +} + + +/* column description can be accessed as a 7 items tuple for DBAPI compatibility */ + +static Py_ssize_t +column_len(columnObject *self) +{ + return 7; +} + + +static PyObject * +column_getitem(columnObject *self, Py_ssize_t item) +{ + PyObject *rv = NULL; + + if (item < 0) + item += 7; + + switch (item) { + case 0: + rv = self->name; + break; + case 1: + rv = self->type_code; + break; + case 2: + rv = self->display_size; + break; + case 3: + rv = self->internal_size; + break; + case 4: + rv = self->precision; + break; + case 5: + rv = self->scale; + break; + case 6: + rv = self->null_ok; + break; + default: + PyErr_SetString(PyExc_IndexError, "index out of range"); + return NULL; + } + + if (!rv) { + rv = Py_None; + } + + Py_INCREF(rv); + return rv; +} + + +static PyObject* +column_subscript(columnObject* self, PyObject* item) +{ + PyObject *t = NULL; + PyObject *rv = NULL; + + /* t = tuple(self) */ + if (!(t = PyObject_CallFunctionObjArgs( + (PyObject *)&PyTuple_Type, (PyObject *)self, NULL))) { + goto exit; + } + + /* rv = t[item] */ + rv = PyObject_GetItem(t, item); + +exit: + Py_XDECREF(t); + return rv; +} + +static PyMappingMethods column_mapping = { + (lenfunc)column_len, /* mp_length */ + (binaryfunc)column_subscript, /* mp_subscript */ + 0 /* mp_ass_subscript */ +}; + +static PySequenceMethods column_sequence = { + (lenfunc)column_len, /* sq_length */ + 0, /* sq_concat */ + 0, /* sq_repeat */ + (ssizeargfunc)column_getitem, /* sq_item */ + 0, /* sq_slice */ + 0, /* sq_ass_item */ + 0, /* sq_ass_slice */ + 0, /* sq_contains */ + 0, /* sq_inplace_concat */ + 0, /* sq_inplace_repeat */ +}; + + +static PyObject * +column_getstate(columnObject *self, PyObject *dummy) +{ + return PyObject_CallFunctionObjArgs( + (PyObject *)&PyTuple_Type, (PyObject *)self, NULL); +} + + +PyObject * +column_setstate(columnObject *self, PyObject *state) +{ + Py_ssize_t size; + PyObject *rv = NULL; + + if (state == Py_None) { + goto exit; + } + if (!PyTuple_Check(state)) { + PyErr_SetString(PyExc_TypeError, "state is not a tuple"); + goto error; + } + + size = PyTuple_GET_SIZE(state); + + if (size > 0) { + Py_CLEAR(self->name); + self->name = PyTuple_GET_ITEM(state, 0); + Py_INCREF(self->name); + } + if (size > 1) { + Py_CLEAR(self->type_code); + self->type_code = PyTuple_GET_ITEM(state, 1); + Py_INCREF(self->type_code); + } + if (size > 2) { + Py_CLEAR(self->display_size); + self->display_size = PyTuple_GET_ITEM(state, 2); + Py_INCREF(self->display_size); + } + if (size > 3) { + Py_CLEAR(self->internal_size); + self->internal_size = PyTuple_GET_ITEM(state, 3); + Py_INCREF(self->internal_size); + } + if (size > 4) { + Py_CLEAR(self->precision); + self->precision = PyTuple_GET_ITEM(state, 4); + Py_INCREF(self->precision); + } + if (size > 5) { + Py_CLEAR(self->scale); + self->scale = PyTuple_GET_ITEM(state, 5); + Py_INCREF(self->scale); + } + if (size > 6) { + Py_CLEAR(self->null_ok); + self->null_ok = PyTuple_GET_ITEM(state, 6); + Py_INCREF(self->null_ok); + } + if (size > 7) { + Py_CLEAR(self->table_oid); + self->table_oid = PyTuple_GET_ITEM(state, 7); + Py_INCREF(self->table_oid); + } + if (size > 8) { + Py_CLEAR(self->table_column); + self->table_column = PyTuple_GET_ITEM(state, 8); + Py_INCREF(self->table_column); + } + +exit: + rv = Py_None; + Py_INCREF(rv); + +error: + return rv; +} + + +static PyMethodDef column_methods[] = { + /* Make Column picklable. */ + {"__getstate__", (PyCFunction)column_getstate, METH_NOARGS }, + {"__setstate__", (PyCFunction)column_setstate, METH_O }, + {NULL} +}; + + +PyTypeObject columnType = { + PyVarObject_HEAD_INIT(NULL, 0) + "psycopg2.extensions.Column", + sizeof(columnObject), 0, + (destructor)column_dealloc, /* tp_dealloc */ + 0, /*tp_print*/ + 0, /*tp_getattr*/ + 0, /*tp_setattr*/ + 0, /*tp_compare*/ + (reprfunc)column_repr, /*tp_repr*/ + 0, /*tp_as_number*/ + &column_sequence, /*tp_as_sequence*/ + &column_mapping, /*tp_as_mapping*/ + 0, /*tp_hash */ + 0, /*tp_call*/ + 0, /*tp_str*/ + 0, /*tp_getattro*/ + 0, /*tp_setattro*/ + 0, /*tp_as_buffer*/ + Py_TPFLAGS_DEFAULT|Py_TPFLAGS_BASETYPE, /*tp_flags*/ + column_doc, /*tp_doc*/ + 0, /*tp_traverse*/ + 0, /*tp_clear*/ + (richcmpfunc)column_richcompare, /*tp_richcompare*/ + 0, /*tp_weaklistoffset*/ + 0, /*tp_iter*/ + 0, /*tp_iternext*/ + column_methods, /*tp_methods*/ + column_members, /*tp_members*/ + 0, /*tp_getset*/ + 0, /*tp_base*/ + 0, /*tp_dict*/ + 0, /*tp_descr_get*/ + 0, /*tp_descr_set*/ + 0, /*tp_dictoffset*/ + (initproc)column_init, /*tp_init*/ + 0, /*tp_alloc*/ + column_new, /*tp_new*/ +}; diff --git a/psycopg/config.h b/psycopg/config.h new file mode 100644 index 0000000000000000000000000000000000000000..0830f932e4c3dd246327f21b37ed6a235cbf8b89 --- /dev/null +++ b/psycopg/config.h @@ -0,0 +1,216 @@ +/* config.h - general config and Dprintf macro + * + * Copyright (C) 2003-2019 Federico Di Gregorio + * Copyright (C) 2020-2021 The Psycopg Team + * + * This file is part of psycopg. + * + * psycopg2 is free software: you can redistribute it and/or modify it + * under the terms of the GNU Lesser General Public License as published + * by the Free Software Foundation, either version 3 of the License, or + * (at your option) any later version. + * + * In addition, as a special exception, the copyright holders give + * permission to link this program with the OpenSSL library (or with + * modified versions of OpenSSL that use the same license as OpenSSL), + * and distribute linked combinations including the two. + * + * You must obey the GNU Lesser General Public License in all respects for + * all of the code used other than OpenSSL. + * + * psycopg2 is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public + * License for more details. + */ + +#ifndef PSYCOPG_CONFIG_H +#define PSYCOPG_CONFIG_H 1 + +/* GCC 4.0 and later have support for specifying symbol visibility */ +#if __GNUC__ >= 4 && !defined(__MINGW32__) +# define HIDDEN __attribute__((visibility("hidden"))) +#else +# define HIDDEN +#endif + +/* support for getpid() */ +#if defined( __GNUC__) +#define CONN_CHECK_PID +#include +#include +#endif +#ifdef _WIN32 +/* Windows doesn't seem affected by bug #829: just make it compile. */ +#define pid_t int +#endif + + +/* debug printf-like function */ +#ifdef PSYCOPG_DEBUG +extern HIDDEN int psycopg_debug_enabled; +#endif + +#if defined( __GNUC__) && !defined(__APPLE__) +#ifdef PSYCOPG_DEBUG +#define Dprintf(fmt, args...) \ + if (!psycopg_debug_enabled) ; else \ + fprintf(stderr, "[%d] " fmt "\n", (int) getpid() , ## args) +#else +#define Dprintf(fmt, args...) +#endif +#else /* !__GNUC__ or __APPLE__ */ +#ifdef PSYCOPG_DEBUG +#include +#ifdef _WIN32 +#include +#define getpid _getpid +#endif +static void Dprintf(const char *fmt, ...) +{ + va_list ap; + + if (!psycopg_debug_enabled) + return; + printf("[%d] ", (int) getpid()); + va_start(ap, fmt); + vprintf(fmt, ap); + va_end(ap); + printf("\n"); +} +#else +static void Dprintf(const char *fmt, ...) {} +#endif +#endif + +/* pthreads work-arounds for mutilated operating systems */ +#if defined(_WIN32) || defined(__BEOS__) + +#ifdef _WIN32 + +/* A Python extension should be linked to only one C runtime: the same one as + * the Python interpreter itself. Straightforwardly using the strdup function + * causes MinGW to implicitly link to the msvcrt.dll, which is not appropriate + * for any Python version later than 2.3. + * Microsoft C runtimes for Windows 98 and later make a _strdup function + * available, which replaces the "normal" strdup. If we replace all psycopg + * calls to strdup with calls to _strdup, MinGW no longer implicitly links to + * the obsolete C runtime. */ +#define strdup _strdup + +#include +#define pthread_mutex_t HANDLE +#define pthread_condvar_t HANDLE +#define pthread_mutex_lock(object) WaitForSingleObject(*(object), INFINITE) +#define pthread_mutex_unlock(object) ReleaseMutex(*(object)) +#define pthread_mutex_destroy(ref) (CloseHandle(*(ref))) +/* convert pthread mutex to native mutex */ +static int pthread_mutex_init(pthread_mutex_t *mutex, void* fake) +{ + *mutex = CreateMutex(NULL, FALSE, NULL); + return 0; +} +#endif /* _WIN32 */ + +#ifdef __BEOS__ +#include +#define pthread_mutex_t sem_id +#define pthread_mutex_lock(object) acquire_sem(object) +#define pthread_mutex_unlock(object) release_sem(object) +#define pthread_mutex_destroy(ref) delete_sem(*ref) +static int pthread_mutex_init(pthread_mutex_t *mutex, void* fake) +{ + *mutex = create_sem(1, "psycopg_mutex"); + if (*mutex < B_OK) + return *mutex; + return 0; +} +#endif /* __BEOS__ */ + +#else /* pthread is available */ +#include +#endif + +/* to work around the fact that Windows does not have a gmtime_r function, or + a proper gmtime function */ +#ifdef _WIN32 +#define gmtime_r(t, tm) (gmtime(t)?memcpy((tm), gmtime(t), sizeof(*(tm))):NULL) +#define localtime_r(t, tm) (localtime(t)?memcpy((tm), localtime(t), sizeof(*(tm))):NULL) + +/* remove the inline keyword, since it doesn't work unless C++ file */ +#define inline + +/* Hmmm, MSVC <2015 doesn't have a isnan/isinf function, but has _isnan function */ +#if defined (_MSC_VER) +#if !defined(isnan) +#define isnan(x) (_isnan(x)) +/* The following line was hacked together from simliar code by Bjorn Reese + * in libxml2 code */ +#define isinf(x) ((_fpclass(x) == _FPCLASS_PINF) ? 1 \ + : ((_fpclass(x) == _FPCLASS_NINF) ? -1 : 0)) +#endif +#define strcasecmp(x, y) lstrcmpi(x, y) + +typedef __int8 int8_t; +typedef __int16 int16_t; +typedef __int32 int32_t; +typedef __int64 int64_t; +typedef unsigned __int8 uint8_t; +typedef unsigned __int16 uint16_t; +typedef unsigned __int32 uint32_t; +typedef unsigned __int64 uint64_t; +#endif + +#include "win32_support.h" +#endif + +/* what's this, we have no round function either? */ +#if (defined(_WIN32) && !defined(__GNUC__)) \ + || (defined(sun) || defined(__sun__)) \ + && (defined(__SunOS_5_8) || defined(__SunOS_5_9)) + +/* round has been added in the standard library with MSVC 2015 */ +#if _MSC_VER < 1900 +static double round(double num) +{ + return (num >= 0) ? floor(num + 0.5) : ceil(num - 0.5); +} +#endif +#endif + +/* resolve missing isinf() function for Solaris */ +#if defined (__SVR4) && defined (__sun) +#include +#define isinf(x) (!finite((x)) && (x)==(x)) +#endif + +/* decorators for the gcc cpychecker plugin */ +#if defined(WITH_CPYCHECKER_RETURNS_BORROWED_REF_ATTRIBUTE) +#define BORROWED \ + __attribute__((cpychecker_returns_borrowed_ref)) +#else +#define BORROWED +#endif + +#if defined(WITH_CPYCHECKER_STEALS_REFERENCE_TO_ARG_ATTRIBUTE) +#define STEALS(n) \ + __attribute__((cpychecker_steals_reference_to_arg(n))) +#else +#define STEALS(n) +#endif + +#if defined(WITH_CPYCHECKER_NEGATIVE_RESULT_SETS_EXCEPTION_ATTRIBUTE) +#define RAISES_NEG \ + __attribute__((cpychecker_negative_result_sets_exception)) +#else +#define RAISES_NEG +#endif + +#if defined(WITH_CPYCHECKER_SETS_EXCEPTION_ATTRIBUTE) +#define RAISES \ + __attribute__((cpychecker_sets_exception)) +#else +#define RAISES +#endif + +#endif /* !defined(PSYCOPG_CONFIG_H) */ diff --git a/psycopg/connection.h b/psycopg/connection.h new file mode 100644 index 0000000000000000000000000000000000000000..6d61c2eb0013b1250d113ca0c75c91fbc7c6cc2c --- /dev/null +++ b/psycopg/connection.h @@ -0,0 +1,229 @@ +/* connection.h - definition for the psycopg connection type + * + * Copyright (C) 2003-2019 Federico Di Gregorio + * Copyright (C) 2020-2021 The Psycopg Team + * + * This file is part of psycopg. + * + * psycopg2 is free software: you can redistribute it and/or modify it + * under the terms of the GNU Lesser General Public License as published + * by the Free Software Foundation, either version 3 of the License, or + * (at your option) any later version. + * + * In addition, as a special exception, the copyright holders give + * permission to link this program with the OpenSSL library (or with + * modified versions of OpenSSL that use the same license as OpenSSL), + * and distribute linked combinations including the two. + * + * You must obey the GNU Lesser General Public License in all respects for + * all of the code used other than OpenSSL. + * + * psycopg2 is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public + * License for more details. + */ + +#ifndef PSYCOPG_CONNECTION_H +#define PSYCOPG_CONNECTION_H 1 + +#include "psycopg/xid.h" + +#ifdef __cplusplus +extern "C" { +#endif + +/* isolation levels */ +#define ISOLATION_LEVEL_AUTOCOMMIT 0 +#define ISOLATION_LEVEL_READ_UNCOMMITTED 4 +#define ISOLATION_LEVEL_READ_COMMITTED 1 +#define ISOLATION_LEVEL_REPEATABLE_READ 2 +#define ISOLATION_LEVEL_SERIALIZABLE 3 +#define ISOLATION_LEVEL_DEFAULT 5 + +/* 3-state values on/off/default */ +#define STATE_OFF 0 +#define STATE_ON 1 +#define STATE_DEFAULT 2 + +/* connection status */ +#define CONN_STATUS_SETUP 0 +#define CONN_STATUS_READY 1 +#define CONN_STATUS_BEGIN 2 +#define CONN_STATUS_PREPARED 5 +/* async connection building statuses */ +#define CONN_STATUS_CONNECTING 20 +#define CONN_STATUS_DATESTYLE 21 + +/* async query execution status */ +#define ASYNC_DONE 0 +#define ASYNC_READ 1 +#define ASYNC_WRITE 2 + +/* polling result */ +#define PSYCO_POLL_OK 0 +#define PSYCO_POLL_READ 1 +#define PSYCO_POLL_WRITE 2 +#define PSYCO_POLL_ERROR 3 + +/* Hard limit on the notices stored by the Python connection */ +#define CONN_NOTICES_LIMIT 50 + +/* we need the initial date style to be ISO, for typecasters; if the user + later change it, she must know what she's doing... these are the queries we + need to issue */ +#define psyco_datestyle "SET DATESTYLE TO 'ISO'" + +extern HIDDEN PyTypeObject connectionType; + +struct connectionObject_notice { + struct connectionObject_notice *next; + char *message; +}; + +/* the typedef is forward-declared in psycopg.h */ +struct connectionObject { + PyObject_HEAD + + pthread_mutex_t lock; /* the global connection lock */ + + char *dsn; /* data source name */ + char *error; /* temporarily stored error before raising */ + char *encoding; /* current backend encoding */ + + long int closed; /* 1 means connection has been closed; + 2 that something horrible happened */ + long int mark; /* number of commits/rollbacks done so far */ + int status; /* status of the connection */ + xidObject *tpc_xid; /* Transaction ID in two-phase commit */ + + long int async; /* 1 means the connection is async */ + int protocol; /* protocol version */ + int server_version; /* server version */ + + PGconn *pgconn; /* the postgresql connection */ + PGcancel *cancel; /* the cancellation structure */ + + /* Weakref to the object executing an asynchronous query. The object + * is a cursor for async connections, but it may be something else + * for a green connection. If NULL, the connection is idle. */ + PyObject *async_cursor; + int async_status; /* asynchronous execution status */ + PGresult *pgres; /* temporary result across async calls */ + + /* notice processing */ + PyObject *notice_list; + struct connectionObject_notice *notice_pending; + struct connectionObject_notice *last_notice; + + /* notifies */ + PyObject *notifies; + + /* per-connection typecasters */ + PyObject *string_types; /* a set of typecasters for string types */ + PyObject *binary_types; /* a set of typecasters for binary types */ + + int equote; /* use E''-style quotes for escaped strings */ + PyObject *weakreflist; /* list of weak references */ + + int autocommit; + + PyObject *cursor_factory; /* default cursor factory from cursor() */ + + /* Optional pointer to a decoding C function, e.g. PyUnicode_DecodeUTF8 */ + PyObject *(*cdecoder)(const char *, Py_ssize_t, const char *); + + /* Pointers to python encoding/decoding functions, e.g. + * codecs.getdecoder('utf8') */ + PyObject *pyencoder; /* python codec encoding function */ + PyObject *pydecoder; /* python codec decoding function */ + + /* Values for the transactions characteristics */ + int isolevel; + int readonly; + int deferrable; + + /* the pid this connection was created into */ + pid_t procpid; + + /* inside a with block */ + int entered; +}; + +/* map isolation level values into a numeric const */ +typedef struct { + char *name; + int value; +} IsolationLevel; + +/* C-callable functions in connection_int.c and connection_ext.c */ +HIDDEN PyObject *conn_text_from_chars(connectionObject *pgconn, const char *str); +HIDDEN PyObject *conn_encode(connectionObject *self, PyObject *b); +HIDDEN PyObject *conn_decode(connectionObject *self, const char *str, Py_ssize_t len); +HIDDEN int conn_get_standard_conforming_strings(PGconn *pgconn); +HIDDEN PyObject *conn_pgenc_to_pyenc(const char *encoding, char **clean_encoding); +HIDDEN int conn_get_protocol_version(PGconn *pgconn); +HIDDEN int conn_get_server_version(PGconn *pgconn); +HIDDEN void conn_notice_process(connectionObject *self); +HIDDEN void conn_notice_clean(connectionObject *self); +HIDDEN void conn_notifies_process(connectionObject *self); +RAISES_NEG HIDDEN int conn_setup(connectionObject *self); +HIDDEN int conn_connect(connectionObject *self, const char *dsn, long int async); +HIDDEN char *conn_obscure_password(const char *dsn); +HIDDEN void conn_close(connectionObject *self); +HIDDEN void conn_close_locked(connectionObject *self); +RAISES_NEG HIDDEN int conn_commit(connectionObject *self); +RAISES_NEG HIDDEN int conn_rollback(connectionObject *self); +RAISES_NEG HIDDEN int conn_set_session(connectionObject *self, int autocommit, + int isolevel, int readonly, int deferrable); +RAISES_NEG HIDDEN int conn_set_client_encoding(connectionObject *self, const char *enc); +HIDDEN int conn_poll(connectionObject *self); +RAISES_NEG HIDDEN int conn_tpc_begin(connectionObject *self, xidObject *xid); +RAISES_NEG HIDDEN int conn_tpc_command(connectionObject *self, + const char *cmd, xidObject *xid); +HIDDEN PyObject *conn_tpc_recover(connectionObject *self); +HIDDEN void conn_set_result(connectionObject *self, PGresult *pgres); +HIDDEN void conn_set_error(connectionObject *self, const char *msg); + +/* exception-raising macros */ +#define EXC_IF_CONN_CLOSED(self) if ((self)->closed > 0) { \ + PyErr_SetString(InterfaceError, "connection already closed"); \ + return NULL; } + +#define EXC_IF_CONN_ASYNC(self, cmd) if ((self)->async == 1) { \ + PyErr_SetString(ProgrammingError, #cmd " cannot be used " \ + "in asynchronous mode"); \ + return NULL; } + +#define EXC_IF_IN_TRANSACTION(self, cmd) \ + if (self->status != CONN_STATUS_READY) { \ + PyErr_Format(ProgrammingError, \ + "%s cannot be used inside a transaction", #cmd); \ + return NULL; \ + } + +#define EXC_IF_TPC_NOT_SUPPORTED(self) \ + if ((self)->server_version < 80100) { \ + PyErr_Format(NotSupportedError, \ + "server version %d: " \ + "two-phase transactions not supported", \ + (self)->server_version); \ + return NULL; \ + } + +#define EXC_IF_TPC_BEGIN(self, cmd) if ((self)->tpc_xid) { \ + PyErr_Format(ProgrammingError, "%s cannot be used " \ + "during a two-phase transaction", #cmd); \ + return NULL; } + +#define EXC_IF_TPC_PREPARED(self, cmd) \ + if ((self)->status == CONN_STATUS_PREPARED) { \ + PyErr_Format(ProgrammingError, "%s cannot be used " \ + "with a prepared two-phase transaction", #cmd); \ + return NULL; } + +#ifdef __cplusplus +} +#endif + +#endif /* !defined(PSYCOPG_CONNECTION_H) */ diff --git a/psycopg/connection_int.c b/psycopg/connection_int.c new file mode 100644 index 0000000000000000000000000000000000000000..34c695747443cf47d3f0e9e4e28ac947b0f974d2 --- /dev/null +++ b/psycopg/connection_int.c @@ -0,0 +1,1553 @@ +/* connection_int.c - code used by the connection object + * + * Copyright (C) 2003-2019 Federico Di Gregorio + * Copyright (C) 2020-2021 The Psycopg Team + * + * This file is part of psycopg. + * + * psycopg2 is free software: you can redistribute it and/or modify it + * under the terms of the GNU Lesser General Public License as published + * by the Free Software Foundation, either version 3 of the License, or + * (at your option) any later version. + * + * In addition, as a special exception, the copyright holders give + * permission to link this program with the OpenSSL library (or with + * modified versions of OpenSSL that use the same license as OpenSSL), + * and distribute linked combinations including the two. + * + * You must obey the GNU Lesser General Public License in all respects for + * all of the code used other than OpenSSL. + * + * psycopg2 is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public + * License for more details. + */ + +#define PSYCOPG_MODULE +#include "psycopg/psycopg.h" + +#include "psycopg/connection.h" +#include "psycopg/cursor.h" +#include "psycopg/pqpath.h" +#include "psycopg/green.h" +#include "psycopg/notify.h" + +#include + +/* String indexes match the ISOLATION_LEVEL_* consts */ +const char *srv_isolevels[] = { + NULL, /* autocommit */ + "READ COMMITTED", + "REPEATABLE READ", + "SERIALIZABLE", + "READ UNCOMMITTED", + "default" /* only to set GUC, not for BEGIN */ +}; + +/* Read only false, true */ +const char *srv_readonly[] = { + " READ WRITE", + " READ ONLY", + "" /* default */ +}; + +/* Deferrable false, true */ +const char *srv_deferrable[] = { + " NOT DEFERRABLE", + " DEFERRABLE", + "" /* default */ +}; + +/* On/Off/Default GUC states + */ +const char *srv_state_guc[] = { + "off", + "on", + "default" +}; + + +const int SRV_STATE_UNCHANGED = -1; + + +/* Return a new "string" from a char* from the database. + * + * On Py2 just get a string, on Py3 decode it in the connection codec. + * + * Use a fallback if the connection is NULL. + */ +PyObject * +conn_text_from_chars(connectionObject *self, const char *str) +{ + return psyco_text_from_chars_safe(str, -1, self ? self->pydecoder : NULL); +} + + +/* Encode an unicode object into a bytes object in the connection encoding. + * + * If no connection or encoding is available, default to utf8 + */ +PyObject * +conn_encode(connectionObject *self, PyObject *u) +{ + PyObject *t = NULL; + PyObject *rv = NULL; + + if (!(self && self->pyencoder)) { + rv = PyUnicode_AsUTF8String(u); + goto exit; + } + + if (!(t = PyObject_CallFunctionObjArgs(self->pyencoder, u, NULL))) { + goto exit; + } + + if (!(rv = PyTuple_GetItem(t, 0))) { goto exit; } + Py_INCREF(rv); + +exit: + Py_XDECREF(t); + + return rv; +} + + +/* decode a c string into a Python unicode in the connection encoding + * + * len can be < 0: in this case it will be calculated + * + * If no connection or encoding is available, default to utf8 + */ +PyObject * +conn_decode(connectionObject *self, const char *str, Py_ssize_t len) +{ + if (len < 0) { len = strlen(str); } + + if (self) { + if (self->cdecoder) { + return self->cdecoder(str, len, NULL); + } + else if (self->pydecoder) { + PyObject *b = NULL; + PyObject *t = NULL; + PyObject *rv = NULL; + + if (!(b = Bytes_FromStringAndSize(str, len))) { goto error; } + if (!(t = PyObject_CallFunctionObjArgs(self->pydecoder, b, NULL))) { + goto error; + } + if (!(rv = PyTuple_GetItem(t, 0))) { goto error; } + Py_INCREF(rv); /* PyTuple_GetItem gives a borrowes one */ +error: + Py_XDECREF(t); + Py_XDECREF(b); + return rv; + } + else { + return PyUnicode_FromStringAndSize(str, len); + } + } + else { + return PyUnicode_FromStringAndSize(str, len); + } +} + +/* conn_notice_callback - process notices */ + +static void +conn_notice_callback(void *args, const char *message) +{ + struct connectionObject_notice *notice; + connectionObject *self = (connectionObject *)args; + + Dprintf("conn_notice_callback: %s", message); + + /* NOTE: if we get here and the connection is unlocked then there is a + problem but this should happen because the notice callback is only + called from libpq and when we're inside libpq the connection is usually + locked. + */ + notice = (struct connectionObject_notice *) + malloc(sizeof(struct connectionObject_notice)); + if (NULL == notice) { + /* Discard the notice in case of failed allocation. */ + return; + } + notice->next = NULL; + notice->message = strdup(message); + if (NULL == notice->message) { + free(notice); + return; + } + + if (NULL == self->last_notice) { + self->notice_pending = self->last_notice = notice; + } + else { + self->last_notice->next = notice; + self->last_notice = notice; + } +} + +/* Expose the notices received as Python objects. + * + * The function should be called with the connection lock and the GIL. + */ +void +conn_notice_process(connectionObject *self) +{ + struct connectionObject_notice *notice; + PyObject *msg = NULL; + PyObject *tmp = NULL; + static PyObject *append; + + if (NULL == self->notice_pending) { + return; + } + + if (!append) { + if (!(append = Text_FromUTF8("append"))) { + goto error; + } + } + + notice = self->notice_pending; + while (notice != NULL) { + Dprintf("conn_notice_process: %s", notice->message); + + if (!(msg = conn_text_from_chars(self, notice->message))) { goto error; } + + if (!(tmp = PyObject_CallMethodObjArgs( + self->notice_list, append, msg, NULL))) { + + goto error; + } + + Py_DECREF(tmp); tmp = NULL; + Py_DECREF(msg); msg = NULL; + + notice = notice->next; + } + + /* Remove the oldest item if the queue is getting too long. */ + if (PyList_Check(self->notice_list)) { + Py_ssize_t nnotices; + nnotices = PyList_GET_SIZE(self->notice_list); + if (nnotices > CONN_NOTICES_LIMIT) { + if (-1 == PySequence_DelSlice(self->notice_list, + 0, nnotices - CONN_NOTICES_LIMIT)) { + PyErr_Clear(); + } + } + } + + conn_notice_clean(self); + return; + +error: + Py_XDECREF(tmp); + Py_XDECREF(msg); + conn_notice_clean(self); + + /* TODO: the caller doesn't expects errors from us */ + PyErr_Clear(); +} + +void +conn_notice_clean(connectionObject *self) +{ + struct connectionObject_notice *tmp, *notice; + + notice = self->notice_pending; + + while (notice != NULL) { + tmp = notice; + notice = notice->next; + free(tmp->message); + free(tmp); + } + + self->last_notice = self->notice_pending = NULL; +} + + +/* conn_notifies_process - make received notification available + * + * The function should be called with the connection lock and holding the GIL. + */ + +void +conn_notifies_process(connectionObject *self) +{ + PGnotify *pgn = NULL; + PyObject *notify = NULL; + PyObject *pid = NULL, *channel = NULL, *payload = NULL; + PyObject *tmp = NULL; + + static PyObject *append; + + if (!append) { + if (!(append = Text_FromUTF8("append"))) { + goto error; + } + } + + while ((pgn = PQnotifies(self->pgconn)) != NULL) { + + Dprintf("conn_notifies_process: got NOTIFY from pid %d, msg = %s", + (int) pgn->be_pid, pgn->relname); + + if (!(pid = PyInt_FromLong((long)pgn->be_pid))) { goto error; } + if (!(channel = conn_text_from_chars(self, pgn->relname))) { goto error; } + if (!(payload = conn_text_from_chars(self, pgn->extra))) { goto error; } + + if (!(notify = PyObject_CallFunctionObjArgs((PyObject *)¬ifyType, + pid, channel, payload, NULL))) { + goto error; + } + + Py_DECREF(pid); pid = NULL; + Py_DECREF(channel); channel = NULL; + Py_DECREF(payload); payload = NULL; + + if (!(tmp = PyObject_CallMethodObjArgs( + self->notifies, append, notify, NULL))) { + goto error; + } + Py_DECREF(tmp); tmp = NULL; + + Py_DECREF(notify); notify = NULL; + PQfreemem(pgn); pgn = NULL; + } + return; /* no error */ + +error: + if (pgn) { PQfreemem(pgn); } + Py_XDECREF(tmp); + Py_XDECREF(notify); + Py_XDECREF(pid); + Py_XDECREF(channel); + Py_XDECREF(payload); + + /* TODO: callers currently don't expect an error from us */ + PyErr_Clear(); + +} + + +/* + * the conn_get_* family of functions makes it easier to obtain the connection + * parameters from query results or by interrogating the connection itself +*/ + +int +conn_get_standard_conforming_strings(PGconn *pgconn) +{ + int equote; + const char *scs; + /* + * The presence of the 'standard_conforming_strings' parameter + * means that the server _accepts_ the E'' quote. + * + * If the parameter is off, the PQescapeByteaConn returns + * backslash escaped strings (e.g. '\001' -> "\\001"), + * so the E'' quotes are required to avoid warnings + * if 'escape_string_warning' is set. + * + * If the parameter is on, the PQescapeByteaConn returns + * not escaped strings (e.g. '\001' -> "\001"), relying on the + * fact that the '\' will pass untouched the string parser. + * In this case the E'' quotes are NOT to be used. + */ + scs = PQparameterStatus(pgconn, "standard_conforming_strings"); + Dprintf("conn_connect: server standard_conforming_strings parameter: %s", + scs ? scs : "unavailable"); + + equote = (scs && (0 == strcmp("off", scs))); + Dprintf("conn_connect: server requires E'' quotes: %s", + equote ? "YES" : "NO"); + + return equote; +} + + +/* Remove irrelevant chars from encoding name and turn it uppercase. + * + * Return a buffer allocated on Python heap into 'clean' and return 0 on + * success, otherwise return -1 and set an exception. + */ +RAISES_NEG static int +clear_encoding_name(const char *enc, char **clean) +{ + const char *i = enc; + char *j, *buf; + int rv = -1; + + /* convert to upper case and remove '-' and '_' from string */ + if (!(j = buf = PyMem_Malloc(strlen(enc) + 1))) { + PyErr_NoMemory(); + goto exit; + } + + while (*i) { + if (!isalnum(*i)) { + ++i; + } + else { + *j++ = toupper(*i++); + } + } + *j = '\0'; + + Dprintf("clear_encoding_name: %s -> %s", enc, buf); + *clean = buf; + rv = 0; + +exit: + return rv; +} + +/* set fast access functions according to the currently selected encoding + */ +static void +conn_set_fast_codec(connectionObject *self) +{ + Dprintf("conn_set_fast_codec: encoding=%s", self->encoding); + + if (0 == strcmp(self->encoding, "UTF8")) { + Dprintf("conn_set_fast_codec: PyUnicode_DecodeUTF8"); + self->cdecoder = PyUnicode_DecodeUTF8; + return; + } + + if (0 == strcmp(self->encoding, "LATIN1")) { + Dprintf("conn_set_fast_codec: PyUnicode_DecodeLatin1"); + self->cdecoder = PyUnicode_DecodeLatin1; + return; + } + + Dprintf("conn_set_fast_codec: no fast codec"); + self->cdecoder = NULL; +} + + +/* Return the Python encoding from a PostgreSQL encoding. + * + * Optionally return the clean version of the postgres encoding too + */ +PyObject * +conn_pgenc_to_pyenc(const char *encoding, char **clean_encoding) +{ + char *pgenc = NULL; + PyObject *rv = NULL; + + if (0 > clear_encoding_name(encoding, &pgenc)) { goto exit; } + if (!(rv = PyDict_GetItemString(psycoEncodings, pgenc))) { + PyErr_Format(OperationalError, + "no Python encoding for PostgreSQL encoding '%s'", pgenc); + goto exit; + } + Py_INCREF(rv); + + if (clean_encoding) { + *clean_encoding = pgenc; + } + else { + PyMem_Free(pgenc); + } + +exit: + return rv; +} + +/* Convert a Postgres encoding into Python encoding and decoding functions. + * + * Set clean_encoding to a clean version of the Postgres encoding name + * and pyenc and pydec to python codec functions. + * + * Return 0 on success, else -1 and set an exception. + */ +RAISES_NEG static int +conn_get_python_codec(const char *encoding, + char **clean_encoding, PyObject **pyenc, PyObject **pydec) +{ + int rv = -1; + char *pgenc = NULL; + PyObject *encname = NULL; + PyObject *enc_tmp = NULL, *dec_tmp = NULL; + + /* get the Python name of the encoding as a C string */ + if (!(encname = conn_pgenc_to_pyenc(encoding, &pgenc))) { goto exit; } + if (!(encname = psyco_ensure_bytes(encname))) { goto exit; } + + /* Look up the codec functions */ + if (!(enc_tmp = PyCodec_Encoder(Bytes_AS_STRING(encname)))) { goto exit; } + if (!(dec_tmp = PyCodec_Decoder(Bytes_AS_STRING(encname)))) { goto exit; } + + /* success */ + *pyenc = enc_tmp; enc_tmp = NULL; + *pydec = dec_tmp; dec_tmp = NULL; + *clean_encoding = pgenc; pgenc = NULL; + rv = 0; + +exit: + Py_XDECREF(enc_tmp); + Py_XDECREF(dec_tmp); + Py_XDECREF(encname); + PyMem_Free(pgenc); + + return rv; +} + + +/* Store the encoding in the pgconn->encoding field and set the other related + * encoding fields in the connection structure. + * + * Return 0 on success, else -1 and set an exception. + */ +RAISES_NEG static int +conn_store_encoding(connectionObject *self, const char *encoding) +{ + int rv = -1; + char *pgenc = NULL; + PyObject *enc_tmp = NULL, *dec_tmp = NULL; + + if (0 > conn_get_python_codec(encoding, &pgenc, &enc_tmp, &dec_tmp)) { + goto exit; + } + + /* Good, success: store the encoding/codec in the connection. */ + { + char *tmp = self->encoding; + self->encoding = pgenc; + PyMem_Free(tmp); + pgenc = NULL; + } + + Py_CLEAR(self->pyencoder); + self->pyencoder = enc_tmp; + enc_tmp = NULL; + + Py_CLEAR(self->pydecoder); + self->pydecoder = dec_tmp; + dec_tmp = NULL; + + conn_set_fast_codec(self); + + rv = 0; + +exit: + Py_XDECREF(enc_tmp); + Py_XDECREF(dec_tmp); + PyMem_Free(pgenc); + return rv; +} + + +/* Read the client encoding from the backend and store it in the connection. + * + * Return 0 on success, else -1. + */ +RAISES_NEG static int +conn_read_encoding(connectionObject *self, PGconn *pgconn) +{ + const char *encoding; + int rv = -1; + + encoding = PQparameterStatus(pgconn, "client_encoding"); + Dprintf("conn_connect: client encoding: %s", encoding ? encoding : "(none)"); + if (!encoding) { + PyErr_SetString(OperationalError, + "server didn't return client encoding"); + goto exit; + } + + if (0 > conn_store_encoding(self, encoding)) { + goto exit; + } + + rv = 0; + +exit: + return rv; +} + + +int +conn_get_protocol_version(PGconn *pgconn) +{ + int ret; + ret = PQprotocolVersion(pgconn); + Dprintf("conn_connect: using protocol %d", ret); + return ret; +} + +int +conn_get_server_version(PGconn *pgconn) +{ + return (int)PQserverVersion(pgconn); +} + +/* set up the cancel key of the connection. + * On success return 0, else set an exception and return -1 + */ +RAISES_NEG static int +conn_setup_cancel(connectionObject *self, PGconn *pgconn) +{ + if (self->cancel) { + PQfreeCancel(self->cancel); + } + + if (!(self->cancel = PQgetCancel(self->pgconn))) { + PyErr_SetString(OperationalError, "can't get cancellation key"); + return -1; + } + + return 0; +} + +/* Return 1 if the "replication" keyword is set in the DSN, 0 otherwise */ +static int +dsn_has_replication(char *pgdsn) +{ + int ret = 0; + PQconninfoOption *connopts, *ptr; + + connopts = PQconninfoParse(pgdsn, NULL); + + for(ptr = connopts; ptr->keyword != NULL; ptr++) { + if(strcmp(ptr->keyword, "replication") == 0 && ptr->val != NULL) + ret = 1; + } + + PQconninfoFree(connopts); + + return ret; +} + + +/* Return 1 if the server datestyle allows us to work without problems, + 0 if it needs to be set to something better, e.g. ISO. */ +static int +conn_is_datestyle_ok(PGconn *pgconn) +{ + const char *ds; + + ds = PQparameterStatus(pgconn, "DateStyle"); + Dprintf("conn_connect: DateStyle %s", ds); + + /* pgbouncer does not pass on DateStyle */ + if (ds == NULL) + return 0; + + /* Return true if ds starts with "ISO" + * e.g. "ISO, DMY" is fine, "German" not. */ + return (ds[0] == 'I' && ds[1] == 'S' && ds[2] == 'O'); +} + + +/* conn_setup - setup and read basic information about the connection */ + +RAISES_NEG int +conn_setup(connectionObject *self) +{ + int rv = -1; + + self->equote = conn_get_standard_conforming_strings(self->pgconn); + self->server_version = conn_get_server_version(self->pgconn); + self->protocol = conn_get_protocol_version(self->pgconn); + if (3 != self->protocol) { + PyErr_SetString(InterfaceError, "only protocol 3 supported"); + goto exit; + } + + if (0 > conn_read_encoding(self, self->pgconn)) { + goto exit; + } + + if (0 > conn_setup_cancel(self, self->pgconn)) { + goto exit; + } + + Py_BEGIN_ALLOW_THREADS; + pthread_mutex_lock(&self->lock); + Py_BLOCK_THREADS; + + if (!dsn_has_replication(self->dsn) && !conn_is_datestyle_ok(self->pgconn)) { + int res; + Py_UNBLOCK_THREADS; + res = pq_set_guc_locked(self, "datestyle", "ISO", &_save); + Py_BLOCK_THREADS; + if (res < 0) { + pq_complete_error(self); + goto unlock; + } + } + + /* for reset */ + self->autocommit = 0; + self->isolevel = ISOLATION_LEVEL_DEFAULT; + self->readonly = STATE_DEFAULT; + self->deferrable = STATE_DEFAULT; + + /* success */ + rv = 0; + +unlock: + Py_UNBLOCK_THREADS; + pthread_mutex_unlock(&self->lock); + Py_END_ALLOW_THREADS; + +exit: + return rv; +} + +/* conn_connect - execute a connection to the database */ + +static int +_conn_sync_connect(connectionObject *self, const char *dsn) +{ + int green; + + /* store this value to prevent inconsistencies due to a change + * in the middle of the function. */ + green = psyco_green(); + if (!green) { + Py_BEGIN_ALLOW_THREADS; + self->pgconn = PQconnectdb(dsn); + Py_END_ALLOW_THREADS; + Dprintf("conn_connect: new PG connection at %p", self->pgconn); + } + else { + Py_BEGIN_ALLOW_THREADS; + self->pgconn = PQconnectStart(dsn); + Py_END_ALLOW_THREADS; + Dprintf("conn_connect: new green PG connection at %p", self->pgconn); + } + + if (!self->pgconn) + { + Dprintf("conn_connect: PQconnectdb(%s) FAILED", dsn); + PyErr_SetString(OperationalError, "PQconnectdb() failed"); + return -1; + } + else if (PQstatus(self->pgconn) == CONNECTION_BAD) + { + Dprintf("conn_connect: PQconnectdb(%s) returned BAD", dsn); + PyErr_SetString(OperationalError, PQerrorMessage(self->pgconn)); + return -1; + } + + PQsetNoticeProcessor(self->pgconn, conn_notice_callback, (void*)self); + + /* if the connection is green, wait to finish connection */ + if (green) { + if (0 > pq_set_non_blocking(self, 1)) { + return -1; + } + if (0 != psyco_wait(self)) { + return -1; + } + } + + /* From here the connection is considered ready: with the new status, + * poll() will use PQisBusy instead of PQconnectPoll. + */ + self->status = CONN_STATUS_READY; + + if (conn_setup(self) == -1) { + return -1; + } + + return 0; +} + +static int +_conn_async_connect(connectionObject *self, const char *dsn) +{ + PGconn *pgconn; + + self->pgconn = pgconn = PQconnectStart(dsn); + + Dprintf("conn_connect: new postgresql connection at %p", pgconn); + + if (pgconn == NULL) + { + Dprintf("conn_connect: PQconnectStart(%s) FAILED", dsn); + PyErr_SetString(OperationalError, "PQconnectStart() failed"); + return -1; + } + else if (PQstatus(pgconn) == CONNECTION_BAD) + { + Dprintf("conn_connect: PQconnectdb(%s) returned BAD", dsn); + PyErr_SetString(OperationalError, PQerrorMessage(pgconn)); + return -1; + } + + PQsetNoticeProcessor(pgconn, conn_notice_callback, (void*)self); + + /* Set the connection to nonblocking now. */ + if (pq_set_non_blocking(self, 1) != 0) { + return -1; + } + + /* The connection will be completed banging on poll(): + * First with _conn_poll_connecting() that will finish connection, + * then with _conn_poll_setup_async() that will do the same job + * of setup_async(). */ + + return 0; +} + +int +conn_connect(connectionObject *self, const char *dsn, long int async) +{ + int rv; + + if (async == 1) { + Dprintf("con_connect: connecting in ASYNC mode"); + rv = _conn_async_connect(self, dsn); + } + else { + Dprintf("con_connect: connecting in SYNC mode"); + rv = _conn_sync_connect(self, dsn); + } + + if (rv != 0) { + /* connection failed, so let's close ourselves */ + self->closed = 2; + } + + return rv; +} + + +/* poll during a connection attempt until the connection has established. */ + +static int +_conn_poll_connecting(connectionObject *self) +{ + int res = PSYCO_POLL_ERROR; + const char *msg; + + Dprintf("conn_poll: poll connecting"); + switch (PQconnectPoll(self->pgconn)) { + case PGRES_POLLING_OK: + res = PSYCO_POLL_OK; + break; + case PGRES_POLLING_READING: + res = PSYCO_POLL_READ; + break; + case PGRES_POLLING_WRITING: + res = PSYCO_POLL_WRITE; + break; + case PGRES_POLLING_FAILED: + case PGRES_POLLING_ACTIVE: + msg = PQerrorMessage(self->pgconn); + if (!(msg && *msg)) { + msg = "asynchronous connection failed"; + } + PyErr_SetString(OperationalError, msg); + res = PSYCO_POLL_ERROR; + break; + } + + return res; +} + + +/* Advance to the next state after an attempt of flushing output */ + +static int +_conn_poll_advance_write(connectionObject *self) +{ + int res; + int flush; + + Dprintf("conn_poll: poll writing"); + + flush = PQflush(self->pgconn); + Dprintf("conn_poll: PQflush() = %i", flush); + + switch (flush) { + case 0: /* success */ + /* we've finished pushing the query to the server. Let's start + reading the results. */ + Dprintf("conn_poll: async_status -> ASYNC_READ"); + self->async_status = ASYNC_READ; + res = PSYCO_POLL_READ; + break; + case 1: /* would block */ + res = PSYCO_POLL_WRITE; + break; + case -1: /* error */ + PyErr_SetString(OperationalError, PQerrorMessage(self->pgconn)); + res = PSYCO_POLL_ERROR; + break; + default: + Dprintf("conn_poll: unexpected result from flush: %d", flush); + res = PSYCO_POLL_ERROR; + break; + } + return res; +} + + +/* Advance to the next state after reading results */ + +static int +_conn_poll_advance_read(connectionObject *self) +{ + int res; + int busy; + + Dprintf("conn_poll: poll reading"); + + busy = pq_get_result_async(self); + + switch (busy) { + case 0: /* result is ready */ + Dprintf("conn_poll: async_status -> ASYNC_DONE"); + self->async_status = ASYNC_DONE; + res = PSYCO_POLL_OK; + break; + case 1: /* result not ready: fd would block */ + res = PSYCO_POLL_READ; + break; + case -1: /* ouch, error */ + res = PSYCO_POLL_ERROR; + break; + default: + Dprintf("conn_poll: unexpected result from pq_get_result_async: %d", + busy); + res = PSYCO_POLL_ERROR; + break; + } + return res; +} + + +/* Poll the connection for the send query/retrieve result phase + + Advance the async_status (usually going WRITE -> READ -> DONE) but don't + mess with the connection status. */ + +static int +_conn_poll_query(connectionObject *self) +{ + int res = PSYCO_POLL_ERROR; + + switch (self->async_status) { + case ASYNC_WRITE: + Dprintf("conn_poll: async_status = ASYNC_WRITE"); + res = _conn_poll_advance_write(self); + break; + + case ASYNC_READ: + Dprintf("conn_poll: async_status = ASYNC_READ"); + res = _conn_poll_advance_read(self); + break; + + case ASYNC_DONE: + Dprintf("conn_poll: async_status = ASYNC_DONE"); + /* We haven't asked anything: just check for notifications. */ + res = _conn_poll_advance_read(self); + break; + + default: + Dprintf("conn_poll: in unexpected async status: %d", + self->async_status); + res = PSYCO_POLL_ERROR; + break; + } + + return res; +} + +/* Advance to the next state during an async connection setup + * + * If the connection is green, this is performed by the regular + * sync code so the queries are sent by conn_setup() while in + * CONN_STATUS_READY state. + */ +static int +_conn_poll_setup_async(connectionObject *self) +{ + int res = PSYCO_POLL_ERROR; + + switch (self->status) { + case CONN_STATUS_CONNECTING: + self->equote = conn_get_standard_conforming_strings(self->pgconn); + self->protocol = conn_get_protocol_version(self->pgconn); + self->server_version = conn_get_server_version(self->pgconn); + if (3 != self->protocol) { + PyErr_SetString(InterfaceError, "only protocol 3 supported"); + break; + } + if (0 > conn_read_encoding(self, self->pgconn)) { + break; + } + if (0 > conn_setup_cancel(self, self->pgconn)) { + return -1; + } + + /* asynchronous connections always use isolation level 0, the user is + * expected to manage the transactions himself, by sending + * (asynchronously) BEGIN and COMMIT statements. + */ + self->autocommit = 1; + + /* If the datestyle is ISO or anything else good, + * we can skip the CONN_STATUS_DATESTYLE step. + * Note that we cannot change the datestyle on a replication + * connection. + */ + if (!dsn_has_replication(self->dsn) && !conn_is_datestyle_ok(self->pgconn)) { + Dprintf("conn_poll: status -> CONN_STATUS_DATESTYLE"); + self->status = CONN_STATUS_DATESTYLE; + if (0 == pq_send_query(self, psyco_datestyle)) { + PyErr_SetString(OperationalError, PQerrorMessage(self->pgconn)); + break; + } + Dprintf("conn_poll: async_status -> ASYNC_WRITE"); + self->async_status = ASYNC_WRITE; + res = PSYCO_POLL_WRITE; + } + else { + Dprintf("conn_poll: status -> CONN_STATUS_READY"); + self->status = CONN_STATUS_READY; + res = PSYCO_POLL_OK; + } + break; + + case CONN_STATUS_DATESTYLE: + res = _conn_poll_query(self); + if (res == PSYCO_POLL_OK) { + res = PSYCO_POLL_ERROR; + if (self->pgres == NULL + || PQresultStatus(self->pgres) != PGRES_COMMAND_OK ) { + PyErr_SetString(OperationalError, "can't set datestyle to ISO"); + break; + } + CLEARPGRES(self->pgres); + + Dprintf("conn_poll: status -> CONN_STATUS_READY"); + self->status = CONN_STATUS_READY; + res = PSYCO_POLL_OK; + } + break; + } + return res; +} + + +static cursorObject * +_conn_get_async_cursor(connectionObject *self) { + PyObject *py_curs; + + if (!(py_curs = PyWeakref_GetObject(self->async_cursor))) { + PyErr_SetString(PyExc_SystemError, + "got null dereferencing cursor weakref"); + goto error; + } + if (Py_None == py_curs) { + PyErr_SetString(InterfaceError, + "the asynchronous cursor has disappeared"); + goto error; + } + + Py_INCREF(py_curs); + return (cursorObject *)py_curs; + +error: + pq_clear_async(self); + return NULL; +} + +/* conn_poll - Main polling switch + * + * The function is called in all the states and connection types and invokes + * the right "next step". + */ + +int +conn_poll(connectionObject *self) +{ + int res = PSYCO_POLL_ERROR; + Dprintf("conn_poll: status = %d", self->status); + + switch (self->status) { + case CONN_STATUS_SETUP: + Dprintf("conn_poll: status -> CONN_STATUS_SETUP"); + self->status = CONN_STATUS_CONNECTING; + res = PSYCO_POLL_WRITE; + break; + + case CONN_STATUS_CONNECTING: + Dprintf("conn_poll: status -> CONN_STATUS_CONNECTING"); + res = _conn_poll_connecting(self); + if (res == PSYCO_POLL_OK && self->async) { + res = _conn_poll_setup_async(self); + } + break; + + case CONN_STATUS_DATESTYLE: + Dprintf("conn_poll: status -> CONN_STATUS_DATESTYLE"); + res = _conn_poll_setup_async(self); + break; + + case CONN_STATUS_READY: + case CONN_STATUS_BEGIN: + case CONN_STATUS_PREPARED: + Dprintf("conn_poll: status -> CONN_STATUS_*"); + res = _conn_poll_query(self); + + if (res == PSYCO_POLL_OK && self->async && self->async_cursor) { + cursorObject *curs; + + /* An async query has just finished: parse the tuple in the + * target cursor. */ + if (!(curs = _conn_get_async_cursor(self))) { + res = PSYCO_POLL_ERROR; + break; + } + + curs_set_result(curs, self->pgres); + self->pgres = NULL; + + /* fetch the tuples (if there are any) and build the result. We + * don't care if pq_fetch return 0 or 1, but if there was an error, + * we want to signal it to the caller. */ + if (pq_fetch(curs, 0) == -1) { + res = PSYCO_POLL_ERROR; + } + + /* We have finished with our async_cursor */ + Py_DECREF(curs); + Py_CLEAR(self->async_cursor); + } + break; + + default: + Dprintf("conn_poll: in unexpected state"); + res = PSYCO_POLL_ERROR; + } + + Dprintf("conn_poll: returning %d", res); + return res; +} + +/* conn_close - do anything needed to shut down the connection */ + +void +conn_close(connectionObject *self) +{ + /* a connection with closed == 2 still requires cleanup */ + if (self->closed == 1) { + return; + } + + /* sets this connection as closed even for other threads; */ + Py_BEGIN_ALLOW_THREADS; + pthread_mutex_lock(&self->lock); + + conn_close_locked(self); + + pthread_mutex_unlock(&self->lock); + Py_END_ALLOW_THREADS; +} + + +/* Return a copy of the 'dsn' string with the password scrubbed. + * + * The string returned is allocated on the Python heap. + * + * In case of error return NULL and raise an exception. + */ +char * +conn_obscure_password(const char *dsn) +{ + PQconninfoOption *options = NULL; + PyObject *d = NULL, *v = NULL, *pydsn = NULL; + char *rv = NULL; + + if (!dsn) { + PyErr_SetString(InternalError, "unexpected null string"); + goto exit; + } + + if (!(options = PQconninfoParse(dsn, NULL))) { + /* unlikely: the dsn was already tested valid */ + PyErr_SetString(InternalError, "the connection string is not valid"); + goto exit; + } + + if (!(d = psyco_dict_from_conninfo_options( + options, /* include_password = */ 1))) { + goto exit; + } + if (NULL == PyDict_GetItemString(d, "password")) { + /* the dsn doesn't have a password */ + psyco_strdup(&rv, dsn, -1); + goto exit; + } + + /* scrub the password and put back the connection string together */ + if (!(v = Text_FromUTF8("xxx"))) { goto exit; } + if (0 > PyDict_SetItemString(d, "password", v)) { goto exit; } + if (!(pydsn = psyco_make_dsn(Py_None, d))) { goto exit; } + if (!(pydsn = psyco_ensure_bytes(pydsn))) { goto exit; } + + /* Return the connection string with the password replaced */ + psyco_strdup(&rv, Bytes_AS_STRING(pydsn), -1); + +exit: + PQconninfoFree(options); + Py_XDECREF(v); + Py_XDECREF(d); + Py_XDECREF(pydsn); + + return rv; +} + + +/* conn_close_locked - shut down the connection with the lock already taken */ + +void conn_close_locked(connectionObject *self) +{ + if (self->closed == 1) { + return; + } + + /* We used to call pq_abort_locked here, but the idea of issuing a + * rollback on close/GC has been considered inappropriate. + * + * Dropping the connection on the server has the same effect as the + * transaction is automatically rolled back. Some middleware, such as + * PgBouncer, have problem with connections closed in the middle of the + * transaction though: to avoid these problems the transaction should be + * closed only in status CONN_STATUS_READY. + */ + self->closed = 1; + + /* we need to check the value of pgconn, because we get called even when + * the connection fails! */ + if (self->pgconn) { + PQfinish(self->pgconn); + self->pgconn = NULL; + Dprintf("conn_close: PQfinish called"); + } +} + +/* conn_commit - commit on a connection */ + +RAISES_NEG int +conn_commit(connectionObject *self) +{ + int res; + + res = pq_commit(self); + return res; +} + +/* conn_rollback - rollback a connection */ + +RAISES_NEG int +conn_rollback(connectionObject *self) +{ + int res; + + res = pq_abort(self); + return res; +} + + +/* Change the state of the session */ +RAISES_NEG int +conn_set_session(connectionObject *self, int autocommit, + int isolevel, int readonly, int deferrable) +{ + int rv = -1; + int want_autocommit = autocommit == SRV_STATE_UNCHANGED ? + self->autocommit : autocommit; + + if (deferrable != SRV_STATE_UNCHANGED && self->server_version < 90100) { + PyErr_SetString(ProgrammingError, + "the 'deferrable' setting is only available" + " from PostgreSQL 9.1"); + goto exit; + } + + /* Promote an isolation level to one of the levels supported by the server */ + if (self->server_version < 80000) { + if (isolevel == ISOLATION_LEVEL_READ_UNCOMMITTED) { + isolevel = ISOLATION_LEVEL_READ_COMMITTED; + } + else if (isolevel == ISOLATION_LEVEL_REPEATABLE_READ) { + isolevel = ISOLATION_LEVEL_SERIALIZABLE; + } + } + + Py_BEGIN_ALLOW_THREADS; + pthread_mutex_lock(&self->lock); + + if (want_autocommit) { + /* we are or are going in autocommit state, so no BEGIN will be issued: + * configure the session with the characteristics requested */ + if (isolevel != SRV_STATE_UNCHANGED) { + if (0 > pq_set_guc_locked(self, + "default_transaction_isolation", srv_isolevels[isolevel], + &_save)) { + goto endlock; + } + } + if (readonly != SRV_STATE_UNCHANGED) { + if (0 > pq_set_guc_locked(self, + "default_transaction_read_only", srv_state_guc[readonly], + &_save)) { + goto endlock; + } + } + if (deferrable != SRV_STATE_UNCHANGED) { + if (0 > pq_set_guc_locked(self, + "default_transaction_deferrable", srv_state_guc[deferrable], + &_save)) { + goto endlock; + } + } + } + else if (self->autocommit) { + /* we are moving from autocommit to not autocommit, so revert the + * characteristics to defaults to let BEGIN do its work */ + if (self->isolevel != ISOLATION_LEVEL_DEFAULT) { + if (0 > pq_set_guc_locked(self, + "default_transaction_isolation", "default", + &_save)) { + goto endlock; + } + } + if (self->readonly != STATE_DEFAULT) { + if (0 > pq_set_guc_locked(self, + "default_transaction_read_only", "default", + &_save)) { + goto endlock; + } + } + if (self->server_version >= 90100 && self->deferrable != STATE_DEFAULT) { + if (0 > pq_set_guc_locked(self, + "default_transaction_deferrable", "default", + &_save)) { + goto endlock; + } + } + } + + if (autocommit != SRV_STATE_UNCHANGED) { + self->autocommit = autocommit; + } + if (isolevel != SRV_STATE_UNCHANGED) { + self->isolevel = isolevel; + } + if (readonly != SRV_STATE_UNCHANGED) { + self->readonly = readonly; + } + if (deferrable != SRV_STATE_UNCHANGED) { + self->deferrable = deferrable; + } + rv = 0; + +endlock: + pthread_mutex_unlock(&self->lock); + Py_END_ALLOW_THREADS; + + if (rv < 0) { + pq_complete_error(self); + goto exit; + } + + Dprintf( + "conn_set_session: autocommit %d, isolevel %d, readonly %d, deferrable %d", + autocommit, isolevel, readonly, deferrable); + + +exit: + return rv; +} + + +/* conn_set_client_encoding - switch client encoding on connection */ + +RAISES_NEG int +conn_set_client_encoding(connectionObject *self, const char *pgenc) +{ + int res = -1; + char *clean_enc = NULL; + + /* We must know what python encoding this encoding is. */ + if (0 > clear_encoding_name(pgenc, &clean_enc)) { goto exit; } + + /* If the current encoding is equal to the requested one we don't + issue any query to the backend */ + if (strcmp(self->encoding, clean_enc) == 0) { + res = 0; + goto exit; + } + + Py_BEGIN_ALLOW_THREADS; + pthread_mutex_lock(&self->lock); + + /* abort the current transaction, to set the encoding ouside of + transactions */ + if ((res = pq_abort_locked(self, &_save))) { + goto endlock; + } + + if ((res = pq_set_guc_locked(self, "client_encoding", clean_enc, &_save))) { + goto endlock; + } + +endlock: + pthread_mutex_unlock(&self->lock); + Py_END_ALLOW_THREADS; + + if (res < 0) { + pq_complete_error(self); + goto exit; + } + + res = conn_store_encoding(self, pgenc); + + Dprintf("conn_set_client_encoding: encoding set to %s", self->encoding); + +exit: + PyMem_Free(clean_enc); + + return res; +} + + +/* conn_tpc_begin -- begin a two-phase commit. + * + * The state of a connection in the middle of a TPC is exactly the same + * of a normal transaction, in CONN_STATUS_BEGIN, but with the tpc_xid + * member set to the xid used. This allows to reuse all the code paths used + * in regular transactions, as PostgreSQL won't even know we are in a TPC + * until PREPARE. */ + +RAISES_NEG int +conn_tpc_begin(connectionObject *self, xidObject *xid) +{ + Dprintf("conn_tpc_begin: starting transaction"); + + Py_BEGIN_ALLOW_THREADS; + pthread_mutex_lock(&self->lock); + + if (pq_begin_locked(self, &_save) < 0) { + pthread_mutex_unlock(&(self->lock)); + Py_BLOCK_THREADS; + pq_complete_error(self); + return -1; + } + + pthread_mutex_unlock(&self->lock); + Py_END_ALLOW_THREADS; + + /* The transaction started ok, let's store this xid. */ + Py_INCREF(xid); + self->tpc_xid = xid; + + return 0; +} + + +/* conn_tpc_command -- run one of the TPC-related PostgreSQL commands. + * + * The function doesn't change the connection state as it can be used + * for many commands and for recovered transactions. */ + +RAISES_NEG int +conn_tpc_command(connectionObject *self, const char *cmd, xidObject *xid) +{ + PyObject *tid = NULL; + const char *ctid; + int rv = -1; + + Dprintf("conn_tpc_command: %s", cmd); + + /* convert the xid into PostgreSQL transaction id while keeping the GIL */ + if (!(tid = psyco_ensure_bytes(xid_get_tid(xid)))) { goto exit; } + if (!(ctid = Bytes_AsString(tid))) { goto exit; } + + Py_BEGIN_ALLOW_THREADS; + pthread_mutex_lock(&self->lock); + + if (0 > (rv = pq_tpc_command_locked(self, cmd, ctid, &_save))) { + pthread_mutex_unlock(&self->lock); + Py_BLOCK_THREADS; + pq_complete_error(self); + goto exit; + } + + pthread_mutex_unlock(&self->lock); + Py_END_ALLOW_THREADS; + +exit: + Py_XDECREF(tid); + return rv; +} + +/* conn_tpc_recover -- return a list of pending TPC Xid */ + +PyObject * +conn_tpc_recover(connectionObject *self) +{ + int status; + PyObject *xids = NULL; + PyObject *rv = NULL; + PyObject *tmp; + + /* store the status to restore it. */ + status = self->status; + + if (!(xids = xid_recover((PyObject *)self))) { goto exit; } + + if (status == CONN_STATUS_READY && self->status == CONN_STATUS_BEGIN) { + /* recover began a transaction: let's abort it. */ + if (!(tmp = PyObject_CallMethod((PyObject *)self, "rollback", NULL))) { + goto exit; + } + Py_DECREF(tmp); + } + + /* all fine */ + rv = xids; + xids = NULL; + +exit: + Py_XDECREF(xids); + + return rv; + +} + + +void +conn_set_result(connectionObject *self, PGresult *pgres) +{ + PQclear(self->pgres); + self->pgres = pgres; +} + + +void +conn_set_error(connectionObject *self, const char *msg) +{ + if (self->error) { + free(self->error); + self->error = NULL; + } + if (msg && *msg) { + self->error = strdup(msg); + } +} diff --git a/psycopg/connection_type.c b/psycopg/connection_type.c new file mode 100644 index 0000000000000000000000000000000000000000..92d5a86169050140ba376ece00b2b505b5261826 --- /dev/null +++ b/psycopg/connection_type.c @@ -0,0 +1,1517 @@ +/* connection_type.c - python interface to connection objects + * + * Copyright (C) 2003-2019 Federico Di Gregorio + * Copyright (C) 2020-2021 The Psycopg Team + * + * This file is part of psycopg. + * + * psycopg2 is free software: you can redistribute it and/or modify it + * under the terms of the GNU Lesser General Public License as published + * by the Free Software Foundation, either version 3 of the License, or + * (at your option) any later version. + * + * In addition, as a special exception, the copyright holders give + * permission to link this program with the OpenSSL library (or with + * modified versions of OpenSSL that use the same license as OpenSSL), + * and distribute linked combinations including the two. + * + * You must obey the GNU Lesser General Public License in all respects for + * all of the code used other than OpenSSL. + * + * psycopg2 is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public + * License for more details. + */ + +#define PSYCOPG_MODULE +#include "psycopg/psycopg.h" + +#include "psycopg/connection.h" +#include "psycopg/cursor.h" +#include "psycopg/pqpath.h" +#include "psycopg/conninfo.h" +#include "psycopg/lobject.h" +#include "psycopg/green.h" +#include "psycopg/xid.h" + +#include +#include + +extern HIDDEN const char *srv_isolevels[]; +extern HIDDEN const char *srv_readonly[]; +extern HIDDEN const char *srv_deferrable[]; +extern HIDDEN const int SRV_STATE_UNCHANGED; + +/** DBAPI methods **/ + +/* cursor method - allocate a new cursor */ + +#define psyco_conn_cursor_doc \ +"cursor(name=None, cursor_factory=extensions.cursor, withhold=False) -- new cursor\n\n" \ +"Return a new cursor.\n\nThe ``cursor_factory`` argument can be used to\n" \ +"create non-standard cursors by passing a class different from the\n" \ +"default. Note that the new class *should* be a sub-class of\n" \ +"`extensions.cursor`.\n\n" \ +":rtype: `extensions.cursor`" + +static PyObject * +psyco_conn_cursor(connectionObject *self, PyObject *args, PyObject *kwargs) +{ + PyObject *obj = NULL; + PyObject *rv = NULL; + PyObject *name = Py_None; + PyObject *factory = Py_None; + PyObject *withhold = Py_False; + PyObject *scrollable = Py_None; + + static char *kwlist[] = { + "name", "cursor_factory", "withhold", "scrollable", NULL}; + + EXC_IF_CONN_CLOSED(self); + + if (!PyArg_ParseTupleAndKeywords( + args, kwargs, "|OOOO", kwlist, + &name, &factory, &withhold, &scrollable)) { + goto exit; + } + + if (factory == Py_None) { + if (self->cursor_factory && self->cursor_factory != Py_None) { + factory = self->cursor_factory; + } + else { + factory = (PyObject *)&cursorType; + } + } + + if (self->status != CONN_STATUS_READY && + self->status != CONN_STATUS_BEGIN && + self->status != CONN_STATUS_PREPARED) { + PyErr_SetString(OperationalError, + "asynchronous connection attempt underway"); + goto exit; + } + + if (name != Py_None && self->async == 1) { + PyErr_SetString(ProgrammingError, + "asynchronous connections " + "cannot produce named cursors"); + goto exit; + } + + Dprintf("psyco_conn_cursor: new %s cursor for connection at %p", + (name == Py_None ? "unnamed" : "named"), self); + + if (!(obj = PyObject_CallFunctionObjArgs(factory, self, name, NULL))) { + goto exit; + } + + if (PyObject_IsInstance(obj, (PyObject *)&cursorType) == 0) { + PyErr_SetString(PyExc_TypeError, + "cursor factory must be subclass of psycopg2.extensions.cursor"); + goto exit; + } + + if (0 > curs_withhold_set((cursorObject *)obj, withhold)) { + goto exit; + } + if (0 > curs_scrollable_set((cursorObject *)obj, scrollable)) { + goto exit; + } + + Dprintf("psyco_conn_cursor: new cursor at %p: refcnt = " + FORMAT_CODE_PY_SSIZE_T, + obj, Py_REFCNT(obj) + ); + + rv = obj; + obj = NULL; + +exit: + Py_XDECREF(obj); + return rv; +} + + +/* close method - close the connection and all related cursors */ + +#define psyco_conn_close_doc "close() -- Close the connection." + +static PyObject * +psyco_conn_close(connectionObject *self, PyObject *dummy) +{ + Dprintf("psyco_conn_close: closing connection at %p", self); + conn_close(self); + Dprintf("psyco_conn_close: connection at %p closed", self); + + Py_RETURN_NONE; +} + + +/* commit method - commit all changes to the database */ + +#define psyco_conn_commit_doc "commit() -- Commit all changes to database." + +static PyObject * +psyco_conn_commit(connectionObject *self, PyObject *dummy) +{ + EXC_IF_CONN_CLOSED(self); + EXC_IF_CONN_ASYNC(self, commit); + EXC_IF_TPC_BEGIN(self, commit); + + if (conn_commit(self) < 0) + return NULL; + + Py_RETURN_NONE; +} + + +/* rollback method - roll back all changes done to the database */ + +#define psyco_conn_rollback_doc \ +"rollback() -- Roll back all changes done to database." + +static PyObject * +psyco_conn_rollback(connectionObject *self, PyObject *dummy) +{ + EXC_IF_CONN_CLOSED(self); + EXC_IF_CONN_ASYNC(self, rollback); + EXC_IF_TPC_BEGIN(self, rollback); + + if (conn_rollback(self) < 0) + return NULL; + + Py_RETURN_NONE; +} + + +#define psyco_conn_xid_doc \ +"xid(format_id, gtrid, bqual) -- create a transaction identifier." + +static PyObject * +psyco_conn_xid(connectionObject *self, PyObject *args, PyObject *kwargs) +{ + EXC_IF_CONN_CLOSED(self); + EXC_IF_TPC_NOT_SUPPORTED(self); + + return PyObject_Call((PyObject *)&xidType, args, kwargs); +} + + +#define psyco_conn_tpc_begin_doc \ +"tpc_begin(xid) -- begin a TPC transaction with given transaction ID xid." + +static PyObject * +psyco_conn_tpc_begin(connectionObject *self, PyObject *args) +{ + PyObject *rv = NULL; + xidObject *xid = NULL; + PyObject *oxid; + + EXC_IF_CONN_CLOSED(self); + EXC_IF_CONN_ASYNC(self, tpc_begin); + EXC_IF_TPC_NOT_SUPPORTED(self); + EXC_IF_IN_TRANSACTION(self, tpc_begin); + + if (!PyArg_ParseTuple(args, "O", &oxid)) { + goto exit; + } + + if (NULL == (xid = xid_ensure(oxid))) { + goto exit; + } + + /* two phase commit and autocommit make no point */ + if (self->autocommit) { + PyErr_SetString(ProgrammingError, + "tpc_begin can't be called in autocommit mode"); + goto exit; + } + + if (conn_tpc_begin(self, xid) < 0) { + goto exit; + } + + Py_INCREF(Py_None); + rv = Py_None; + +exit: + Py_XDECREF(xid); + return rv; +} + + +#define psyco_conn_tpc_prepare_doc \ +"tpc_prepare() -- perform the first phase of a two-phase transaction." + +static PyObject * +psyco_conn_tpc_prepare(connectionObject *self, PyObject *dummy) +{ + EXC_IF_CONN_CLOSED(self); + EXC_IF_CONN_ASYNC(self, tpc_prepare); + EXC_IF_TPC_PREPARED(self, tpc_prepare); + + if (NULL == self->tpc_xid) { + PyErr_SetString(ProgrammingError, + "prepare must be called inside a two-phase transaction"); + return NULL; + } + + if (0 > conn_tpc_command(self, "PREPARE TRANSACTION", self->tpc_xid)) { + return NULL; + } + + /* transaction prepared: set the state so that no operation + * can be performed until commit. */ + self->status = CONN_STATUS_PREPARED; + + Py_RETURN_NONE; +} + + +/* the type of conn_commit/conn_rollback */ +typedef int (*_finish_f)(connectionObject *self); + +/* Implement tpc_commit/tpc_rollback. + * + * This is a common framework performing the chechs and state manipulation + * common to the two functions. + * + * Parameters are: + * - self, args: passed by Python + * - opc_f: the function to call in case of one-phase commit/rollback + * one of conn_commit/conn_rollback + * - tpc_cmd: the command to execute for a two-phase commit/rollback + * + * The function can be called in three cases: + * - If xid is specified, the status must be "ready"; + * issue the commit/rollback prepared. + * - if xid is not specified and status is "begin" with a xid, + * issue a normal commit/rollback. + * - if xid is not specified and status is "prepared", + * issue the commit/rollback prepared. + */ +static PyObject * +_psyco_conn_tpc_finish(connectionObject *self, PyObject *args, + _finish_f opc_f, char *tpc_cmd) +{ + PyObject *oxid = NULL; + xidObject *xid = NULL; + PyObject *rv = NULL; + + if (!PyArg_ParseTuple(args, "|O", &oxid)) { goto exit; } + + if (oxid) { + if (!(xid = xid_ensure(oxid))) { goto exit; } + } + + if (xid) { + /* committing/aborting a recovered transaction. */ + if (self->status != CONN_STATUS_READY) { + PyErr_SetString(ProgrammingError, + "tpc_commit/tpc_rollback with a xid " + "must be called outside a transaction"); + goto exit; + } + if (0 > conn_tpc_command(self, tpc_cmd, xid)) { + goto exit; + } + } else { + /* committing/aborting our own transaction. */ + if (!self->tpc_xid) { + PyErr_SetString(ProgrammingError, + "tpc_commit/tpc_rollback with no parameter " + "must be called in a two-phase transaction"); + goto exit; + } + + switch (self->status) { + case CONN_STATUS_BEGIN: + if (0 > opc_f(self)) { goto exit; } + break; + + case CONN_STATUS_PREPARED: + if (0 > conn_tpc_command(self, tpc_cmd, self->tpc_xid)) { + goto exit; + } + break; + + default: + PyErr_SetString(InterfaceError, + "unexpected state in tpc_commit/tpc_rollback"); + goto exit; + } + + Py_CLEAR(self->tpc_xid); + + /* connection goes ready */ + self->status = CONN_STATUS_READY; + } + + Py_INCREF(Py_None); + rv = Py_None; + +exit: + Py_XDECREF(xid); + return rv; +} + +#define psyco_conn_tpc_commit_doc \ +"tpc_commit([xid]) -- commit a transaction previously prepared." + +static PyObject * +psyco_conn_tpc_commit(connectionObject *self, PyObject *args) +{ + EXC_IF_CONN_CLOSED(self); + EXC_IF_CONN_ASYNC(self, tpc_commit); + EXC_IF_TPC_NOT_SUPPORTED(self); + + return _psyco_conn_tpc_finish(self, args, + conn_commit, "COMMIT PREPARED"); +} + +#define psyco_conn_tpc_rollback_doc \ +"tpc_rollback([xid]) -- abort a transaction previously prepared." + +static PyObject * +psyco_conn_tpc_rollback(connectionObject *self, PyObject *args) +{ + EXC_IF_CONN_CLOSED(self); + EXC_IF_CONN_ASYNC(self, tpc_rollback); + EXC_IF_TPC_NOT_SUPPORTED(self); + + return _psyco_conn_tpc_finish(self, args, + conn_rollback, "ROLLBACK PREPARED"); +} + +#define psyco_conn_tpc_recover_doc \ +"tpc_recover() -- returns a list of pending transaction IDs." + +static PyObject * +psyco_conn_tpc_recover(connectionObject *self, PyObject *dummy) +{ + EXC_IF_CONN_CLOSED(self); + EXC_IF_CONN_ASYNC(self, tpc_recover); + EXC_IF_TPC_PREPARED(self, tpc_recover); + EXC_IF_TPC_NOT_SUPPORTED(self); + + return conn_tpc_recover(self); +} + + +#define psyco_conn_enter_doc \ +"__enter__ -> self" + +static PyObject * +psyco_conn_enter(connectionObject *self, PyObject *dummy) +{ + PyObject *rv = NULL; + + EXC_IF_CONN_CLOSED(self); + + if (self->entered) { + PyErr_SetString(ProgrammingError, + "the connection cannot be re-entered recursively"); + goto exit; + } + + self->entered = 1; + Py_INCREF(self); + rv = (PyObject *)self; + +exit: + return rv; +} + + +#define psyco_conn_exit_doc \ +"__exit__ -- commit if no exception, else roll back" + +static PyObject * +psyco_conn_exit(connectionObject *self, PyObject *args) +{ + PyObject *type, *name, *tb; + PyObject *tmp = NULL; + PyObject *rv = NULL; + + if (!PyArg_ParseTuple(args, "OOO", &type, &name, &tb)) { + goto exit; + } + + /* even if there will be an error, consider ourselves out */ + self->entered = 0; + + if (type == Py_None) { + if (!(tmp = PyObject_CallMethod((PyObject *)self, "commit", NULL))) { + goto exit; + } + } else { + if (!(tmp = PyObject_CallMethod((PyObject *)self, "rollback", NULL))) { + goto exit; + } + } + + /* success (of the commit or rollback, there may have been an exception in + * the block). Return None to avoid swallowing the exception */ + rv = Py_None; + Py_INCREF(rv); + +exit: + Py_XDECREF(tmp); + return rv; +} + + +/* parse a python object into one of the possible isolation level values */ + +RAISES_NEG static int +_psyco_conn_parse_isolevel(PyObject *pyval) +{ + int rv = -1; + long level; + + Py_INCREF(pyval); /* for ensure_bytes */ + + /* None is default. This is only used when setting the property, because + * set_session() has None used as "don't change" */ + if (pyval == Py_None) { + rv = ISOLATION_LEVEL_DEFAULT; + } + + /* parse from one of the level constants */ + else if (PyInt_Check(pyval)) { + level = PyInt_AsLong(pyval); + if (level == -1 && PyErr_Occurred()) { goto exit; } + if (level < 1 || level > 4) { + PyErr_SetString(PyExc_ValueError, + "isolation_level must be between 1 and 4"); + goto exit; + } + + rv = level; + } + + /* parse from the string -- this includes "default" */ + else { + if (!(pyval = psyco_ensure_bytes(pyval))) { + goto exit; + } + for (level = 1; level <= 4; level++) { + if (0 == strcasecmp(srv_isolevels[level], Bytes_AS_STRING(pyval))) { + rv = level; + break; + } + } + if (rv < 0 && 0 == strcasecmp("default", Bytes_AS_STRING(pyval))) { + rv = ISOLATION_LEVEL_DEFAULT; + } + if (rv < 0) { + PyErr_Format(PyExc_ValueError, + "bad value for isolation_level: '%s'", Bytes_AS_STRING(pyval)); + goto exit; + } + } + +exit: + Py_XDECREF(pyval); + + return rv; +} + +/* convert False/True/"default" -> 0/1/2 */ + +RAISES_NEG static int +_psyco_conn_parse_onoff(PyObject *pyval) +{ + int rv = -1; + + Py_INCREF(pyval); /* for ensure_bytes */ + + if (pyval == Py_None) { + rv = STATE_DEFAULT; + } + else if (PyUnicode_CheckExact(pyval) || Bytes_CheckExact(pyval)) { + if (!(pyval = psyco_ensure_bytes(pyval))) { + goto exit; + } + if (0 == strcasecmp("default", Bytes_AS_STRING(pyval))) { + rv = STATE_DEFAULT; + } + else { + PyErr_Format(PyExc_ValueError, + "the only string accepted is 'default'; got %s", + Bytes_AS_STRING(pyval)); + goto exit; + } + } + else { + int istrue; + if (0 > (istrue = PyObject_IsTrue(pyval))) { goto exit; } + rv = istrue ? STATE_ON : STATE_OFF; + } + +exit: + Py_XDECREF(pyval); + + return rv; +} + +#define _set_session_checks(self,what) \ +do { \ + EXC_IF_CONN_CLOSED(self); \ + EXC_IF_CONN_ASYNC(self, what); \ + EXC_IF_IN_TRANSACTION(self, what); \ + EXC_IF_TPC_PREPARED(self, what); \ +} while(0) + +/* set_session - set default transaction characteristics */ + +#define psyco_conn_set_session_doc \ +"set_session(...) -- Set one or more parameters for the next transactions.\n\n" \ +"Accepted arguments are 'isolation_level', 'readonly', 'deferrable', 'autocommit'." + +static PyObject * +psyco_conn_set_session(connectionObject *self, PyObject *args, PyObject *kwargs) +{ + PyObject *isolevel = Py_None; + PyObject *readonly = Py_None; + PyObject *deferrable = Py_None; + PyObject *autocommit = Py_None; + + int c_isolevel = SRV_STATE_UNCHANGED; + int c_readonly = SRV_STATE_UNCHANGED; + int c_deferrable = SRV_STATE_UNCHANGED; + int c_autocommit = SRV_STATE_UNCHANGED; + + static char *kwlist[] = + {"isolation_level", "readonly", "deferrable", "autocommit", NULL}; + + _set_session_checks(self, set_session); + + if (!PyArg_ParseTupleAndKeywords(args, kwargs, "|OOOO", kwlist, + &isolevel, &readonly, &deferrable, &autocommit)) { + return NULL; + } + + if (Py_None != isolevel) { + if (0 > (c_isolevel = _psyco_conn_parse_isolevel(isolevel))) { + return NULL; + } + } + + if (Py_None != readonly) { + if (0 > (c_readonly = _psyco_conn_parse_onoff(readonly))) { + return NULL; + } + } + if (Py_None != deferrable) { + if (0 > (c_deferrable = _psyco_conn_parse_onoff(deferrable))) { + return NULL; + } + } + + if (Py_None != autocommit) { + if (-1 == (c_autocommit = PyObject_IsTrue(autocommit))) { return NULL; } + } + + if (0 > conn_set_session( + self, c_autocommit, c_isolevel, c_readonly, c_deferrable)) { + return NULL; + } + + Py_RETURN_NONE; +} + + +/* autocommit - return or set the current autocommit status */ + +#define psyco_conn_autocommit_doc \ +"Set or return the autocommit status." + +static PyObject * +psyco_conn_autocommit_get(connectionObject *self) +{ + return PyBool_FromLong(self->autocommit); +} + +BORROWED static PyObject * +_psyco_set_session_check_setter_wrapper(connectionObject *self) +{ + /* wrapper to use the EXC_IF macros. + * return NULL in case of error, else whatever */ + _set_session_checks(self, set_session); + return Py_None; /* borrowed */ +} + +static int +psyco_conn_autocommit_set(connectionObject *self, PyObject *pyvalue) +{ + int value; + + if (!_psyco_set_session_check_setter_wrapper(self)) { return -1; } + if (-1 == (value = PyObject_IsTrue(pyvalue))) { return -1; } + if (0 > conn_set_session(self, value, + SRV_STATE_UNCHANGED, SRV_STATE_UNCHANGED, SRV_STATE_UNCHANGED)) { + return -1; + } + + return 0; +} + + +/* isolation_level - return or set the current isolation level */ + +#define psyco_conn_isolation_level_doc \ +"Set or return the connection transaction isolation level." + +static PyObject * +psyco_conn_isolation_level_get(connectionObject *self) +{ + if (self->isolevel == ISOLATION_LEVEL_DEFAULT) { + Py_RETURN_NONE; + } else { + return PyInt_FromLong((long)self->isolevel); + } +} + + +static int +psyco_conn_isolation_level_set(connectionObject *self, PyObject *pyvalue) +{ + int value; + + if (!_psyco_set_session_check_setter_wrapper(self)) { return -1; } + if (0 > (value = _psyco_conn_parse_isolevel(pyvalue))) { return -1; } + if (0 > conn_set_session(self, SRV_STATE_UNCHANGED, + value, SRV_STATE_UNCHANGED, SRV_STATE_UNCHANGED)) { + return -1; + } + + return 0; +} + + +/* set_isolation_level method - switch connection isolation level */ + +#define psyco_conn_set_isolation_level_doc \ +"set_isolation_level(level) -- Switch isolation level to ``level``." + +static PyObject * +psyco_conn_set_isolation_level(connectionObject *self, PyObject *args) +{ + int level = 1; + PyObject *pyval = NULL; + + EXC_IF_CONN_CLOSED(self); + EXC_IF_CONN_ASYNC(self, "isolation_level"); + EXC_IF_TPC_PREPARED(self, "isolation_level"); + + if (!PyArg_ParseTuple(args, "O", &pyval)) return NULL; + + if (pyval == Py_None) { + level = ISOLATION_LEVEL_DEFAULT; + } + + /* parse from one of the level constants */ + else if (PyInt_Check(pyval)) { + level = PyInt_AsLong(pyval); + + if (level < 0 || level > 4) { + PyErr_SetString(PyExc_ValueError, + "isolation level must be between 0 and 4"); + return NULL; + } + } + + if (0 > conn_rollback(self)) { + return NULL; + } + + if (level == 0) { + if (0 > conn_set_session(self, 1, + SRV_STATE_UNCHANGED, SRV_STATE_UNCHANGED, SRV_STATE_UNCHANGED)) { + return NULL; + } + } + else { + if (0 > conn_set_session(self, 0, + level, SRV_STATE_UNCHANGED, SRV_STATE_UNCHANGED)) { + return NULL; + } + } + + Py_RETURN_NONE; +} + + +/* readonly - return or set the current read-only status */ + +#define psyco_conn_readonly_doc \ +"Set or return the connection read-only status." + +static PyObject * +psyco_conn_readonly_get(connectionObject *self) +{ + PyObject *rv = NULL; + + switch (self->readonly) { + case STATE_OFF: + rv = Py_False; + break; + case STATE_ON: + rv = Py_True; + break; + case STATE_DEFAULT: + rv = Py_None; + break; + default: + PyErr_Format(InternalError, + "bad internal value for readonly: %d", self->readonly); + break; + } + + Py_XINCREF(rv); + return rv; +} + + +static int +psyco_conn_readonly_set(connectionObject *self, PyObject *pyvalue) +{ + int value; + + if (!_psyco_set_session_check_setter_wrapper(self)) { return -1; } + if (0 > (value = _psyco_conn_parse_onoff(pyvalue))) { return -1; } + if (0 > conn_set_session(self, SRV_STATE_UNCHANGED, + SRV_STATE_UNCHANGED, value, SRV_STATE_UNCHANGED)) { + return -1; + } + + return 0; +} + + +/* deferrable - return or set the current deferrable status */ + +#define psyco_conn_deferrable_doc \ +"Set or return the connection deferrable status." + +static PyObject * +psyco_conn_deferrable_get(connectionObject *self) +{ + PyObject *rv = NULL; + + switch (self->deferrable) { + case STATE_OFF: + rv = Py_False; + break; + case STATE_ON: + rv = Py_True; + break; + case STATE_DEFAULT: + rv = Py_None; + break; + default: + PyErr_Format(InternalError, + "bad internal value for deferrable: %d", self->deferrable); + break; + } + + Py_XINCREF(rv); + return rv; +} + + +static int +psyco_conn_deferrable_set(connectionObject *self, PyObject *pyvalue) +{ + int value; + + if (!_psyco_set_session_check_setter_wrapper(self)) { return -1; } + if (0 > (value = _psyco_conn_parse_onoff(pyvalue))) { return -1; } + if (0 > conn_set_session(self, SRV_STATE_UNCHANGED, + SRV_STATE_UNCHANGED, SRV_STATE_UNCHANGED, value)) { + return -1; + } + + return 0; +} + +/* psyco_get_native_connection - expose PGconn* as a Python capsule */ + +#define psyco_get_native_connection_doc \ +"get_native_connection() -- Return the internal PGconn* as a Python Capsule." + +static PyObject * +psyco_get_native_connection(connectionObject *self, PyObject *dummy) +{ + EXC_IF_CONN_CLOSED(self); + + return PyCapsule_New(self->pgconn, "psycopg2.connection.native_connection", NULL); +} + + +/* set_client_encoding method - set client encoding */ + +#define psyco_conn_set_client_encoding_doc \ +"set_client_encoding(encoding) -- Set client encoding to ``encoding``." + +static PyObject * +psyco_conn_set_client_encoding(connectionObject *self, PyObject *args) +{ + const char *enc; + PyObject *rv = NULL; + + EXC_IF_CONN_CLOSED(self); + EXC_IF_CONN_ASYNC(self, set_client_encoding); + EXC_IF_TPC_PREPARED(self, set_client_encoding); + + if (!PyArg_ParseTuple(args, "s", &enc)) return NULL; + + if (conn_set_client_encoding(self, enc) >= 0) { + Py_INCREF(Py_None); + rv = Py_None; + } + return rv; +} + +/* get_transaction_status method - Get backend transaction status */ + +#define psyco_conn_get_transaction_status_doc \ +"get_transaction_status() -- Get backend transaction status." + +static PyObject * +psyco_conn_get_transaction_status(connectionObject *self, PyObject *dummy) +{ + return PyInt_FromLong((long)PQtransactionStatus(self->pgconn)); +} + +/* get_parameter_status method - Get server parameter status */ + +#define psyco_conn_get_parameter_status_doc \ +"get_parameter_status(parameter) -- Get backend parameter status.\n\n" \ +"Potential values for ``parameter``:\n" \ +" server_version, server_encoding, client_encoding, is_superuser,\n" \ +" session_authorization, DateStyle, TimeZone, integer_datetimes,\n" \ +" and standard_conforming_strings\n" \ +"If server did not report requested parameter, None is returned.\n\n" \ +"See libpq docs for PQparameterStatus() for further details." + +static PyObject * +psyco_conn_get_parameter_status(connectionObject *self, PyObject *args) +{ + const char *param = NULL; + const char *val = NULL; + + EXC_IF_CONN_CLOSED(self); + + if (!PyArg_ParseTuple(args, "s", ¶m)) return NULL; + + val = PQparameterStatus(self->pgconn, param); + if (!val) { + Py_RETURN_NONE; + } + return conn_text_from_chars(self, val); +} + +/* get_dsn_parameters method - Get connection parameters */ + +#define psyco_conn_get_dsn_parameters_doc \ +"get_dsn_parameters() -- Get effective connection parameters.\n\n" + +static PyObject * +psyco_conn_get_dsn_parameters(connectionObject *self, PyObject *dummy) +{ +#if PG_VERSION_NUM >= 90300 + PyObject *res = NULL; + PQconninfoOption *options = NULL; + + EXC_IF_CONN_CLOSED(self); + + if (!(options = PQconninfo(self->pgconn))) { + PyErr_NoMemory(); + goto exit; + } + + res = psyco_dict_from_conninfo_options(options, /* include_password = */ 0); + +exit: + PQconninfoFree(options); + + return res; +#else + PyErr_SetString(NotSupportedError, "PQconninfo not available in libpq < 9.3"); + return NULL; +#endif +} + + +/* lobject method - allocate a new lobject */ + +#define psyco_conn_lobject_doc \ +"lobject(oid=0, mode=0, new_oid=0, new_file=None,\n" \ +" lobject_factory=extensions.lobject) -- new lobject\n\n" \ +"Return a new lobject.\n\nThe ``lobject_factory`` argument can be used\n" \ +"to create non-standard lobjects by passing a class different from the\n" \ +"default. Note that the new class *should* be a sub-class of\n" \ +"`extensions.lobject`.\n\n" \ +":rtype: `extensions.lobject`" + +static PyObject * +psyco_conn_lobject(connectionObject *self, PyObject *args, PyObject *keywds) +{ + Oid oid = InvalidOid, new_oid = InvalidOid; + const char *new_file = NULL; + const char *smode = ""; + PyObject *factory = (PyObject *)&lobjectType; + PyObject *obj; + + static char *kwlist[] = {"oid", "mode", "new_oid", "new_file", + "lobject_factory", NULL}; + + if (!PyArg_ParseTupleAndKeywords(args, keywds, "|IzIzO", kwlist, + &oid, &smode, &new_oid, &new_file, + &factory)) { + return NULL; + } + + EXC_IF_CONN_CLOSED(self); + EXC_IF_CONN_ASYNC(self, lobject); + EXC_IF_GREEN(lobject); + EXC_IF_TPC_PREPARED(self, lobject); + + Dprintf("psyco_conn_lobject: new lobject for connection at %p", self); + Dprintf("psyco_conn_lobject: parameters: oid = %u, mode = %s", + oid, smode); + Dprintf("psyco_conn_lobject: parameters: new_oid = %u, new_file = %s", + new_oid, new_file); + + if (new_file) + obj = PyObject_CallFunction(factory, "OIsIs", + self, oid, smode, new_oid, new_file); + else + obj = PyObject_CallFunction(factory, "OIsI", + self, oid, smode, new_oid); + + if (obj == NULL) return NULL; + if (PyObject_IsInstance(obj, (PyObject *)&lobjectType) == 0) { + PyErr_SetString(PyExc_TypeError, + "lobject factory must be subclass of psycopg2.extensions.lobject"); + Py_DECREF(obj); + return NULL; + } + + Dprintf("psyco_conn_lobject: new lobject at %p: refcnt = " + FORMAT_CODE_PY_SSIZE_T, + obj, Py_REFCNT(obj)); + return obj; +} + +/* get the current backend pid */ + +#define psyco_conn_get_backend_pid_doc \ +"get_backend_pid() -- Get backend process id." + +static PyObject * +psyco_conn_get_backend_pid(connectionObject *self, PyObject *dummy) +{ + EXC_IF_CONN_CLOSED(self); + + return PyInt_FromLong((long)PQbackendPID(self->pgconn)); +} + + +/* get info about the connection */ + +#define psyco_conn_info_doc \ +"info -- Get connection info." + +static PyObject * +psyco_conn_info_get(connectionObject *self) +{ + return PyObject_CallFunctionObjArgs( + (PyObject *)&connInfoType, (PyObject *)self, NULL); +} + + +/* return the pointer to the PGconn structure */ + +#define psyco_conn_pgconn_ptr_doc \ +"pgconn_ptr -- Get the PGconn structure pointer." + +static PyObject * +psyco_conn_pgconn_ptr_get(connectionObject *self) +{ + if (self->pgconn) { + return PyLong_FromVoidPtr((void *)self->pgconn); + } + else { + Py_RETURN_NONE; + } +} + + +/* reset the currect connection */ + +#define psyco_conn_reset_doc \ +"reset() -- Reset current connection to defaults." + +static PyObject * +psyco_conn_reset(connectionObject *self, PyObject *dummy) +{ + int res; + + EXC_IF_CONN_CLOSED(self); + EXC_IF_CONN_ASYNC(self, reset); + + if (pq_reset(self) < 0) + return NULL; + + res = conn_setup(self); + if (res < 0) + return NULL; + + Py_RETURN_NONE; +} + +static PyObject * +psyco_conn_get_exception(PyObject *self, void *closure) +{ + PyObject *exception = *(PyObject **)closure; + + Py_INCREF(exception); + return exception; +} + + +#define psyco_conn_poll_doc \ +"poll() -> int -- Advance the connection or query process without blocking." + +static PyObject * +psyco_conn_poll(connectionObject *self, PyObject *dummy) +{ + int res; + + EXC_IF_CONN_CLOSED(self); + + res = conn_poll(self); + if (res != PSYCO_POLL_ERROR || !PyErr_Occurred()) { + return PyInt_FromLong(res); + } else { + /* There is an error and an exception is already in place */ + return NULL; + } +} + + +#define psyco_conn_fileno_doc \ +"fileno() -> int -- Return file descriptor associated to database connection." + +static PyObject * +psyco_conn_fileno(connectionObject *self, PyObject *dummy) +{ + long int socket; + + EXC_IF_CONN_CLOSED(self); + + socket = (long int)PQsocket(self->pgconn); + + return PyInt_FromLong(socket); +} + + +#define psyco_conn_isexecuting_doc \ +"isexecuting() -> bool -- Return True if the connection is " \ + "executing an asynchronous operation." + +static PyObject * +psyco_conn_isexecuting(connectionObject *self, PyObject *dummy) +{ + /* synchronous connections will always return False */ + if (self->async == 0) { + Py_RETURN_FALSE; + } + + /* check if the connection is still being built */ + if (self->status != CONN_STATUS_READY) { + Py_RETURN_TRUE; + } + + /* check if there is a query being executed */ + if (self->async_cursor != NULL) { + Py_RETURN_TRUE; + } + + /* otherwise it's not executing */ + Py_RETURN_FALSE; +} + + +#define psyco_conn_cancel_doc \ +"cancel() -- cancel the current operation" + +static PyObject * +psyco_conn_cancel(connectionObject *self, PyObject *dummy) +{ + char errbuf[256]; + + EXC_IF_CONN_CLOSED(self); + EXC_IF_TPC_PREPARED(self, cancel); + + /* do not allow cancellation while the connection is being built */ + Dprintf("psyco_conn_cancel: cancelling with key %p", self->cancel); + if (self->status != CONN_STATUS_READY && + self->status != CONN_STATUS_BEGIN) { + PyErr_SetString(OperationalError, + "asynchronous connection attempt underway"); + return NULL; + } + + if (PQcancel(self->cancel, errbuf, sizeof(errbuf)) == 0) { + Dprintf("psyco_conn_cancel: cancelling failed: %s", errbuf); + PyErr_SetString(OperationalError, errbuf); + return NULL; + } + Py_RETURN_NONE; +} + + +/** the connection object **/ + + +/* object method list */ + +static struct PyMethodDef connectionObject_methods[] = { + {"cursor", (PyCFunction)psyco_conn_cursor, + METH_VARARGS|METH_KEYWORDS, psyco_conn_cursor_doc}, + {"close", (PyCFunction)psyco_conn_close, + METH_NOARGS, psyco_conn_close_doc}, + {"commit", (PyCFunction)psyco_conn_commit, + METH_NOARGS, psyco_conn_commit_doc}, + {"rollback", (PyCFunction)psyco_conn_rollback, + METH_NOARGS, psyco_conn_rollback_doc}, + {"xid", (PyCFunction)psyco_conn_xid, + METH_VARARGS|METH_KEYWORDS, psyco_conn_xid_doc}, + {"tpc_begin", (PyCFunction)psyco_conn_tpc_begin, + METH_VARARGS, psyco_conn_tpc_begin_doc}, + {"tpc_prepare", (PyCFunction)psyco_conn_tpc_prepare, + METH_NOARGS, psyco_conn_tpc_prepare_doc}, + {"tpc_commit", (PyCFunction)psyco_conn_tpc_commit, + METH_VARARGS, psyco_conn_tpc_commit_doc}, + {"tpc_rollback", (PyCFunction)psyco_conn_tpc_rollback, + METH_VARARGS, psyco_conn_tpc_rollback_doc}, + {"tpc_recover", (PyCFunction)psyco_conn_tpc_recover, + METH_NOARGS, psyco_conn_tpc_recover_doc}, + {"__enter__", (PyCFunction)psyco_conn_enter, + METH_NOARGS, psyco_conn_enter_doc}, + {"__exit__", (PyCFunction)psyco_conn_exit, + METH_VARARGS, psyco_conn_exit_doc}, + {"set_session", (PyCFunction)psyco_conn_set_session, + METH_VARARGS|METH_KEYWORDS, psyco_conn_set_session_doc}, + {"set_isolation_level", (PyCFunction)psyco_conn_set_isolation_level, + METH_VARARGS, psyco_conn_set_isolation_level_doc}, + {"set_client_encoding", (PyCFunction)psyco_conn_set_client_encoding, + METH_VARARGS, psyco_conn_set_client_encoding_doc}, + {"get_transaction_status", (PyCFunction)psyco_conn_get_transaction_status, + METH_NOARGS, psyco_conn_get_transaction_status_doc}, + {"get_parameter_status", (PyCFunction)psyco_conn_get_parameter_status, + METH_VARARGS, psyco_conn_get_parameter_status_doc}, + {"get_dsn_parameters", (PyCFunction)psyco_conn_get_dsn_parameters, + METH_NOARGS, psyco_conn_get_dsn_parameters_doc}, + {"get_backend_pid", (PyCFunction)psyco_conn_get_backend_pid, + METH_NOARGS, psyco_conn_get_backend_pid_doc}, + {"lobject", (PyCFunction)psyco_conn_lobject, + METH_VARARGS|METH_KEYWORDS, psyco_conn_lobject_doc}, + {"reset", (PyCFunction)psyco_conn_reset, + METH_NOARGS, psyco_conn_reset_doc}, + {"poll", (PyCFunction)psyco_conn_poll, + METH_NOARGS, psyco_conn_poll_doc}, + {"fileno", (PyCFunction)psyco_conn_fileno, + METH_NOARGS, psyco_conn_fileno_doc}, + {"isexecuting", (PyCFunction)psyco_conn_isexecuting, + METH_NOARGS, psyco_conn_isexecuting_doc}, + {"cancel", (PyCFunction)psyco_conn_cancel, + METH_NOARGS, psyco_conn_cancel_doc}, + {"get_native_connection", (PyCFunction)psyco_get_native_connection, + METH_NOARGS, psyco_get_native_connection_doc}, + {NULL} +}; + +/* object member list */ + +static struct PyMemberDef connectionObject_members[] = { + {"closed", T_LONG, offsetof(connectionObject, closed), READONLY, + "True if the connection is closed."}, + {"encoding", T_STRING, offsetof(connectionObject, encoding), READONLY, + "The current client encoding."}, + {"notices", T_OBJECT, offsetof(connectionObject, notice_list), 0}, + {"notifies", T_OBJECT, offsetof(connectionObject, notifies), 0}, + {"dsn", T_STRING, offsetof(connectionObject, dsn), READONLY, + "The current connection string."}, + {"async", T_LONG, offsetof(connectionObject, async), READONLY, + "True if the connection is asynchronous."}, + {"async_", T_LONG, offsetof(connectionObject, async), READONLY, + "True if the connection is asynchronous."}, + {"status", T_INT, + offsetof(connectionObject, status), READONLY, + "The current transaction status."}, + {"cursor_factory", T_OBJECT, offsetof(connectionObject, cursor_factory), 0, + "Default cursor_factory for cursor()."}, + {"string_types", T_OBJECT, offsetof(connectionObject, string_types), READONLY, + "A set of typecasters to convert textual values."}, + {"binary_types", T_OBJECT, offsetof(connectionObject, binary_types), READONLY, + "A set of typecasters to convert binary values."}, + {"protocol_version", T_INT, + offsetof(connectionObject, protocol), READONLY, + "Protocol version used for this connection. Currently always 3."}, + {"server_version", T_INT, + offsetof(connectionObject, server_version), READONLY, + "Server version."}, + {NULL} +}; + +#define EXCEPTION_GETTER(exc) \ + { #exc, psyco_conn_get_exception, NULL, exc ## _doc, &exc } + +static struct PyGetSetDef connectionObject_getsets[] = { + EXCEPTION_GETTER(Error), + EXCEPTION_GETTER(Warning), + EXCEPTION_GETTER(InterfaceError), + EXCEPTION_GETTER(DatabaseError), + EXCEPTION_GETTER(InternalError), + EXCEPTION_GETTER(OperationalError), + EXCEPTION_GETTER(ProgrammingError), + EXCEPTION_GETTER(IntegrityError), + EXCEPTION_GETTER(DataError), + EXCEPTION_GETTER(NotSupportedError), + { "autocommit", + (getter)psyco_conn_autocommit_get, + (setter)psyco_conn_autocommit_set, + psyco_conn_autocommit_doc }, + { "isolation_level", + (getter)psyco_conn_isolation_level_get, + (setter)psyco_conn_isolation_level_set, + psyco_conn_isolation_level_doc }, + { "readonly", + (getter)psyco_conn_readonly_get, + (setter)psyco_conn_readonly_set, + psyco_conn_readonly_doc }, + { "deferrable", + (getter)psyco_conn_deferrable_get, + (setter)psyco_conn_deferrable_set, + psyco_conn_deferrable_doc }, + { "info", + (getter)psyco_conn_info_get, NULL, + psyco_conn_info_doc }, + { "pgconn_ptr", + (getter)psyco_conn_pgconn_ptr_get, NULL, + psyco_conn_pgconn_ptr_doc }, + {NULL} +}; +#undef EXCEPTION_GETTER + +/* initialization and finalization methods */ + +static int +connection_setup(connectionObject *self, const char *dsn, long int async) +{ + int rv = -1; + + Dprintf("connection_setup: init connection object at %p, " + "async %ld, refcnt = " FORMAT_CODE_PY_SSIZE_T, + self, async, Py_REFCNT(self) + ); + + if (!(self->dsn = conn_obscure_password(dsn))) { goto exit; } + if (!(self->notice_list = PyList_New(0))) { goto exit; } + if (!(self->notifies = PyList_New(0))) { goto exit; } + self->async = async; + self->status = CONN_STATUS_SETUP; + self->async_status = ASYNC_DONE; + if (!(self->string_types = PyDict_New())) { goto exit; } + if (!(self->binary_types = PyDict_New())) { goto exit; } + self->isolevel = ISOLATION_LEVEL_DEFAULT; + self->readonly = STATE_DEFAULT; + self->deferrable = STATE_DEFAULT; +#ifdef CONN_CHECK_PID + self->procpid = getpid(); +#endif + + /* other fields have been zeroed by tp_alloc */ + + if (0 != pthread_mutex_init(&(self->lock), NULL)) { + PyErr_SetString(InternalError, "lock initialization failed"); + goto exit; + } + + if (conn_connect(self, dsn, async) != 0) { + Dprintf("connection_init: FAILED"); + goto exit; + } + + rv = 0; + + Dprintf("connection_setup: good connection object at %p, refcnt = " + FORMAT_CODE_PY_SSIZE_T, + self, Py_REFCNT(self)); + +exit: + return rv; +} + + +static int +connection_clear(connectionObject *self) +{ + Py_CLEAR(self->tpc_xid); + Py_CLEAR(self->async_cursor); + Py_CLEAR(self->notice_list); + Py_CLEAR(self->notifies); + Py_CLEAR(self->string_types); + Py_CLEAR(self->binary_types); + Py_CLEAR(self->cursor_factory); + Py_CLEAR(self->pyencoder); + Py_CLEAR(self->pydecoder); + return 0; +} + +static void +connection_dealloc(PyObject* obj) +{ + connectionObject *self = (connectionObject *)obj; + + /* Make sure to untrack the connection before calling conn_close, which may + * allow a different thread to try and dealloc the connection again, + * resulting in a double-free segfault (ticket #166). */ + PyObject_GC_UnTrack(self); + + /* close the connection only if this is the same process it was created + * into, otherwise using multiprocessing we may close the connection + * belonging to another process. */ +#ifdef CONN_CHECK_PID + if (self->procpid == getpid()) +#endif + { + conn_close(self); + } + + if (self->weakreflist) { + PyObject_ClearWeakRefs(obj); + } + + conn_notice_clean(self); + + PyMem_Free(self->dsn); + PyMem_Free(self->encoding); + if (self->error) free(self->error); + if (self->cancel) PQfreeCancel(self->cancel); + PQclear(self->pgres); + + connection_clear(self); + + pthread_mutex_destroy(&(self->lock)); + + Dprintf("connection_dealloc: deleted connection object at %p, refcnt = " + FORMAT_CODE_PY_SSIZE_T, + obj, Py_REFCNT(obj) + ); + + Py_TYPE(obj)->tp_free(obj); +} + +static int +connection_init(PyObject *obj, PyObject *args, PyObject *kwds) +{ + const char *dsn; + long int async = 0, async_ = 0; + static char *kwlist[] = {"dsn", "async", "async_", NULL}; + + if (!PyArg_ParseTupleAndKeywords(args, kwds, "s|ll", kwlist, + &dsn, &async, &async_)) + return -1; + + if (async_) { async = async_; } + return connection_setup((connectionObject *)obj, dsn, async); +} + +static PyObject * +connection_new(PyTypeObject *type, PyObject *args, PyObject *kwds) +{ + return type->tp_alloc(type, 0); +} + +static PyObject * +connection_repr(connectionObject *self) +{ + return PyString_FromFormat( + "", + self, (self->dsn ? self->dsn : ""), self->closed); +} + +static int +connection_traverse(connectionObject *self, visitproc visit, void *arg) +{ + Py_VISIT((PyObject *)(self->tpc_xid)); + Py_VISIT(self->async_cursor); + Py_VISIT(self->notice_list); + Py_VISIT(self->notifies); + Py_VISIT(self->string_types); + Py_VISIT(self->binary_types); + Py_VISIT(self->cursor_factory); + Py_VISIT(self->pyencoder); + Py_VISIT(self->pydecoder); + return 0; +} + + +/* object type */ + +#define connectionType_doc \ +"connection(dsn, ...) -> new connection object\n\n" \ +":Groups:\n" \ +" * `DBAPI-2.0 errors`: Error, Warning, InterfaceError,\n" \ +" DatabaseError, InternalError, OperationalError,\n" \ +" ProgrammingError, IntegrityError, DataError, NotSupportedError" + +PyTypeObject connectionType = { + PyVarObject_HEAD_INIT(NULL, 0) + "psycopg2.extensions.connection", + sizeof(connectionObject), 0, + connection_dealloc, /*tp_dealloc*/ + 0, /*tp_print*/ + 0, /*tp_getattr*/ + 0, /*tp_setattr*/ + 0, /*tp_compare*/ + (reprfunc)connection_repr, /*tp_repr*/ + 0, /*tp_as_number*/ + 0, /*tp_as_sequence*/ + 0, /*tp_as_mapping*/ + 0, /*tp_hash */ + 0, /*tp_call*/ + (reprfunc)connection_repr, /*tp_str*/ + 0, /*tp_getattro*/ + 0, /*tp_setattro*/ + 0, /*tp_as_buffer*/ + Py_TPFLAGS_DEFAULT | Py_TPFLAGS_BASETYPE | Py_TPFLAGS_HAVE_GC | + Py_TPFLAGS_HAVE_WEAKREFS, + /*tp_flags*/ + connectionType_doc, /*tp_doc*/ + (traverseproc)connection_traverse, /*tp_traverse*/ + (inquiry)connection_clear, /*tp_clear*/ + 0, /*tp_richcompare*/ + offsetof(connectionObject, weakreflist), /* tp_weaklistoffset */ + 0, /*tp_iter*/ + 0, /*tp_iternext*/ + connectionObject_methods, /*tp_methods*/ + connectionObject_members, /*tp_members*/ + connectionObject_getsets, /*tp_getset*/ + 0, /*tp_base*/ + 0, /*tp_dict*/ + 0, /*tp_descr_get*/ + 0, /*tp_descr_set*/ + 0, /*tp_dictoffset*/ + connection_init, /*tp_init*/ + 0, /*tp_alloc*/ + connection_new, /*tp_new*/ +}; diff --git a/psycopg/conninfo.h b/psycopg/conninfo.h new file mode 100644 index 0000000000000000000000000000000000000000..6887d4b548cafcee4016a3effc1cea05c1b908aa --- /dev/null +++ b/psycopg/conninfo.h @@ -0,0 +1,41 @@ +/* connection.h - definition for the psycopg ConnectionInfo type + * + * Copyright (C) 2018-2019 Daniele Varrazzo + * Copyright (C) 2020-2021 The Psycopg Team + * + * This file is part of psycopg. + * + * psycopg2 is free software: you can redistribute it and/or modify it + * under the terms of the GNU Lesser General Public License as published + * by the Free Software Foundation, either version 3 of the License, or + * (at your option) any later version. + * + * In addition, as a special exception, the copyright holders give + * permission to link this program with the OpenSSL library (or with + * modified versions of OpenSSL that use the same license as OpenSSL), + * and distribute linked combinations including the two. + * + * You must obey the GNU Lesser General Public License in all respects for + * all of the code used other than OpenSSL. + * + * psycopg2 is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public + * License for more details. + */ + +#ifndef PSYCOPG_CONNINFO_H +#define PSYCOPG_CONNINFO_H 1 + +#include "psycopg/connection.h" + +extern HIDDEN PyTypeObject connInfoType; + +typedef struct { + PyObject_HEAD + + connectionObject *conn; + +} connInfoObject; + +#endif /* PSYCOPG_CONNINFO_H */ diff --git a/psycopg/conninfo_type.c b/psycopg/conninfo_type.c new file mode 100644 index 0000000000000000000000000000000000000000..9a10c94f28947bdc432e76986a7a24bbb61f5cbe --- /dev/null +++ b/psycopg/conninfo_type.c @@ -0,0 +1,648 @@ +/* conninfo_type.c - present information about the libpq connection + * + * Copyright (C) 2018-2019 Daniele Varrazzo + * Copyright (C) 2020-2021 The Psycopg Team + * + * This file is part of psycopg. + * + * psycopg2 is free software: you can redistribute it and/or modify it + * under the terms of the GNU Lesser General Public License as published + * by the Free Software Foundation, either version 3 of the License, or + * (at your option) any later version. + * + * In addition, as a special exception, the copyright holders give + * permission to link this program with the OpenSSL library (or with + * modified versions of OpenSSL that use the same license as OpenSSL), + * and distribute linked combinations including the two. + * + * You must obey the GNU Lesser General Public License in all respects for + * all of the code used other than OpenSSL. + * + * psycopg2 is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public + * License for more details. + */ + +#define PSYCOPG_MODULE +#include "psycopg/psycopg.h" + +#include "psycopg/conninfo.h" + + +static const char connInfoType_doc[] = +"Details about the native PostgreSQL database connection.\n" +"\n" +"This class exposes several `informative functions`__ about the status\n" +"of the libpq connection.\n" +"\n" +"Objects of this class are exposed as the `connection.info` attribute.\n" +"\n" +".. __: https://www.postgresql.org/docs/current/static/libpq-status.html"; + + +static const char dbname_doc[] = +"The database name of the connection.\n" +"\n" +".. seealso:: libpq docs for `PQdb()`__ for details.\n" +".. __: https://www.postgresql.org/docs/current/static/libpq-status.html" + "#LIBPQ-PQDB"; + +static PyObject * +dbname_get(connInfoObject *self) +{ + const char *val; + + val = PQdb(self->conn->pgconn); + if (!val) { + Py_RETURN_NONE; + } + return conn_text_from_chars(self->conn, val); +} + + +static const char user_doc[] = +"The user name of the connection.\n" +"\n" +".. seealso:: libpq docs for `PQuser()`__ for details.\n" +".. __: https://www.postgresql.org/docs/current/static/libpq-status.html" + "#LIBPQ-PQUSER"; + +static PyObject * +user_get(connInfoObject *self) +{ + const char *val; + + val = PQuser(self->conn->pgconn); + if (!val) { + Py_RETURN_NONE; + } + return conn_text_from_chars(self->conn, val); +} + + +static const char password_doc[] = +"The password of the connection.\n" +"\n" +".. seealso:: libpq docs for `PQpass()`__ for details.\n" +".. __: https://www.postgresql.org/docs/current/static/libpq-status.html" + "#LIBPQ-PQPASS"; + +static PyObject * +password_get(connInfoObject *self) +{ + const char *val; + + val = PQpass(self->conn->pgconn); + if (!val) { + Py_RETURN_NONE; + } + return conn_text_from_chars(self->conn, val); +} + + +static const char host_doc[] = +"The server host name of the connection.\n" +"\n" +"This can be a host name, an IP address, or a directory path if the\n" +"connection is via Unix socket. (The path case can be distinguished\n" +"because it will always be an absolute path, beginning with ``/``.)\n" +"\n" +".. seealso:: libpq docs for `PQhost()`__ for details.\n" +".. __: https://www.postgresql.org/docs/current/static/libpq-status.html" + "#LIBPQ-PQHOST"; + +static PyObject * +host_get(connInfoObject *self) +{ + const char *val; + + val = PQhost(self->conn->pgconn); + if (!val) { + Py_RETURN_NONE; + } + return conn_text_from_chars(self->conn, val); +} + + +static const char port_doc[] = +"The port of the connection.\n" +"\n" +":type: `!int`\n" +"\n" +".. seealso:: libpq docs for `PQport()`__ for details.\n" +".. __: https://www.postgresql.org/docs/current/static/libpq-status.html" + "#LIBPQ-PQPORT"; + +static PyObject * +port_get(connInfoObject *self) +{ + const char *val; + + val = PQport(self->conn->pgconn); + if (!val || !val[0]) { + Py_RETURN_NONE; + } + return PyInt_FromString((char *)val, NULL, 10); +} + + +static const char options_doc[] = +"The command-line options passed in the connection request.\n" +"\n" +".. seealso:: libpq docs for `PQoptions()`__ for details.\n" +".. __: https://www.postgresql.org/docs/current/static/libpq-status.html" + "#LIBPQ-PQOPTIONS"; + +static PyObject * +options_get(connInfoObject *self) +{ + const char *val; + + val = PQoptions(self->conn->pgconn); + if (!val) { + Py_RETURN_NONE; + } + return conn_text_from_chars(self->conn, val); +} + + +static const char dsn_parameters_doc[] = +"The effective connection parameters.\n" +"\n" +":type: `!dict`\n" +"\n" +"The results include values which weren't explicitly set by the connection\n" +"string, such as defaults, environment variables, etc.\n" +"The *password* parameter is removed from the results.\n" +"\n" +".. seealso:: libpq docs for `PQconninfo()`__ for details.\n" +".. __: https://www.postgresql.org/docs/current/libpq-connect.html" + "#LIBPQ-PQCONNINFO"; + +static PyObject * +dsn_parameters_get(connInfoObject *self) +{ +#if PG_VERSION_NUM >= 90300 + PyObject *res = NULL; + PQconninfoOption *options = NULL; + + EXC_IF_CONN_CLOSED(self->conn); + + if (!(options = PQconninfo(self->conn->pgconn))) { + PyErr_NoMemory(); + goto exit; + } + + res = psyco_dict_from_conninfo_options(options, /* include_password = */ 0); + +exit: + PQconninfoFree(options); + + return res; +#else + PyErr_SetString(NotSupportedError, "PQconninfo not available in libpq < 9.3"); + return NULL; +#endif +} + + +static const char status_doc[] = +"The status of the connection.\n" +"\n" +":type: `!int`\n" +"\n" +".. seealso:: libpq docs for `PQstatus()`__ for details.\n" +".. __: https://www.postgresql.org/docs/current/static/libpq-status.html" + "#LIBPQ-PQSTATUS"; + +static PyObject * +status_get(connInfoObject *self) +{ + ConnStatusType val; + + val = PQstatus(self->conn->pgconn); + return PyInt_FromLong((long)val); +} + + +static const char transaction_status_doc[] = +"The current in-transaction status of the connection.\n" +"\n" +"Symbolic constants for the values are defined in the module\n" +"`psycopg2.extensions`: see :ref:`transaction-status-constants` for the\n" +"available values.\n" +"\n" +":type: `!int`\n" +"\n" +".. seealso:: libpq docs for `PQtransactionStatus()`__ for details.\n" +".. __: https://www.postgresql.org/docs/current/static/libpq-status.html" + "#LIBPQ-PQTRANSACTIONSTATUS"; + +static PyObject * +transaction_status_get(connInfoObject *self) +{ + PGTransactionStatusType val; + + val = PQtransactionStatus(self->conn->pgconn); + return PyInt_FromLong((long)val); +} + + +static const char parameter_status_doc[] = +"Looks up a current parameter setting of the server.\n" +"\n" +":param name: The name of the parameter to return.\n" +":type name: `!str`\n" +":return: The parameter value, `!None` if the parameter is unknown.\n" +":rtype: `!str`\n" +"\n" +".. seealso:: libpq docs for `PQparameterStatus()`__ for details.\n" +".. __: https://www.postgresql.org/docs/current/static/libpq-status.html" + "#LIBPQ-PQPARAMETERSTATUS"; + +static PyObject * +parameter_status(connInfoObject *self, PyObject *args, PyObject *kwargs) +{ + static char *kwlist[] = {"name", NULL}; + const char *name; + const char *val; + + if (!PyArg_ParseTupleAndKeywords(args, kwargs, "s", kwlist, &name)) { + return NULL; + } + + val = PQparameterStatus(self->conn->pgconn, name); + + if (!val) { + Py_RETURN_NONE; + } + else { + return conn_text_from_chars(self->conn, val); + } +} + + +static const char protocol_version_doc[] = +"The frontend/backend protocol being used.\n" +"\n" +":type: `!int`\n" +"\n" +".. seealso:: libpq docs for `PQprotocolVersion()`__ for details.\n" +".. __: https://www.postgresql.org/docs/current/static/libpq-status.html" + "#LIBPQ-PQPROTOCOLVERSION"; + +static PyObject * +protocol_version_get(connInfoObject *self) +{ + int val; + + val = PQprotocolVersion(self->conn->pgconn); + return PyInt_FromLong((long)val); +} + + +static const char server_version_doc[] = +"Returns an integer representing the server version.\n" +"\n" +":type: `!int`\n" +"\n" +".. seealso:: libpq docs for `PQserverVersion()`__ for details.\n" +".. __: https://www.postgresql.org/docs/current/static/libpq-status.html" + "#LIBPQ-PQSERVERVERSION"; + +static PyObject * +server_version_get(connInfoObject *self) +{ + int val; + + val = PQserverVersion(self->conn->pgconn); + return PyInt_FromLong((long)val); +} + + +static const char error_message_doc[] = +"The error message most recently generated by an operation on the connection.\n" +"\n" +"`!None` if there is no current message.\n" +"\n" +".. seealso:: libpq docs for `PQerrorMessage()`__ for details.\n" +".. __: https://www.postgresql.org/docs/current/static/libpq-status.html" + "#LIBPQ-PQERRORMESSAGE"; + +static PyObject * +error_message_get(connInfoObject *self) +{ + const char *val; + + val = PQerrorMessage(self->conn->pgconn); + if (!val || !val[0]) { + Py_RETURN_NONE; + } + return conn_text_from_chars(self->conn, val); +} + + +static const char socket_doc[] = +"The file descriptor number of the connection socket to the server.\n" +"\n" +":type: `!int`\n" +"\n" +".. seealso:: libpq docs for `PQsocket()`__ for details.\n" +".. __: https://www.postgresql.org/docs/current/static/libpq-status.html" + "#LIBPQ-PQSOCKET"; + +static PyObject * +socket_get(connInfoObject *self) +{ + int val; + + val = PQsocket(self->conn->pgconn); + return PyInt_FromLong((long)val); +} + + +static const char backend_pid_doc[] = +"The process ID (PID) of the backend process you connected to.\n" +"\n" +":type: `!int`\n" +"\n" +".. seealso:: libpq docs for `PQbackendPID()`__ for details.\n" +".. __: https://www.postgresql.org/docs/current/static/libpq-status.html" + "#LIBPQ-PQBACKENDPID"; + +static PyObject * +backend_pid_get(connInfoObject *self) +{ + int val; + + val = PQbackendPID(self->conn->pgconn); + return PyInt_FromLong((long)val); +} + + +static const char needs_password_doc[] = +"The connection authentication method required a password, but none was available.\n" +"\n" +":type: `!bool`\n" +"\n" +".. seealso:: libpq docs for `PQconnectionNeedsPassword()`__ for details.\n" +".. __: https://www.postgresql.org/docs/current/static/libpq-status.html" + "#LIBPQ-PQCONNECTIONNEEDSPASSWORD"; + +static PyObject * +needs_password_get(connInfoObject *self) +{ + return PyBool_FromLong(PQconnectionNeedsPassword(self->conn->pgconn)); +} + + +static const char used_password_doc[] = +"The connection authentication method used a password.\n" +"\n" +":type: `!bool`\n" +"\n" +".. seealso:: libpq docs for `PQconnectionUsedPassword()`__ for details.\n" +".. __: https://www.postgresql.org/docs/current/static/libpq-status.html" + "#LIBPQ-PQCONNECTIONUSEDPASSWORD"; + +static PyObject * +used_password_get(connInfoObject *self) +{ + return PyBool_FromLong(PQconnectionUsedPassword(self->conn->pgconn)); +} + + +static const char ssl_in_use_doc[] = +"`!True` if the connection uses SSL, `!False` if not.\n" +"\n" +"Only available if psycopg was built with libpq >= 9.5; raise\n" +"`~psycopg2.NotSupportedError` otherwise.\n" +"\n" +":type: `!bool`\n" +"\n" +".. seealso:: libpq docs for `PQsslInUse()`__ for details.\n" +".. __: https://www.postgresql.org/docs/current/static/libpq-status.html" + "#LIBPQ-PQSSLINUSE"; + +static PyObject * +ssl_in_use_get(connInfoObject *self) +{ + PyObject *rv = NULL; + +#if PG_VERSION_NUM >= 90500 + rv = PyBool_FromLong(PQsslInUse(self->conn->pgconn)); +#else + PyErr_SetString(NotSupportedError, + "'ssl_in_use' not available in libpq < 9.5"); +#endif + return rv; +} + + +static const char ssl_attribute_doc[] = +"Returns SSL-related information about the connection.\n" +"\n" +":param name: The name of the attribute to return.\n" +":type name: `!str`\n" +":return: The attribute value, `!None` if unknown.\n" +":rtype: `!str`\n" +"\n" +"Only available if psycopg was built with libpq >= 9.5; raise\n" +"`~psycopg2.NotSupportedError` otherwise.\n" +"\n" +"Valid names are available in `ssl_attribute_names`.\n" +"\n" +".. seealso:: libpq docs for `PQsslAttribute()`__ for details.\n" +".. __: https://www.postgresql.org/docs/current/static/libpq-status.html" + "#LIBPQ-PQSSLATTRIBUTE"; + +static PyObject * +ssl_attribute(connInfoObject *self, PyObject *args, PyObject *kwargs) +{ + static char *kwlist[] = {"name", NULL}; + const char *name; + + if (!PyArg_ParseTupleAndKeywords(args, kwargs, "s", kwlist, &name)) { + return NULL; + } + +#if PG_VERSION_NUM >= 90500 + { + const char *val; + + val = PQsslAttribute(self->conn->pgconn, name); + + if (!val) { + Py_RETURN_NONE; + } + else { + return conn_text_from_chars(self->conn, val); + } + } +#else + PyErr_SetString(NotSupportedError, + "'ssl_attribute()' not available in libpq < 9.5"); + return NULL; +#endif +} + +static const char ssl_attribute_names_doc[] = +"The list of the SSL attribute names available.\n" +"\n" +":type: `!list` of `!str`\n" +"\n" +"Only available if psycopg was built with libpq >= 9.5; raise\n" +"`~psycopg2.NotSupportedError` otherwise.\n" +"\n" +".. seealso:: libpq docs for `PQsslAttributeNames()`__ for details.\n" +".. __: https://www.postgresql.org/docs/current/static/libpq-status.html" + "#LIBPQ-PQSSLATTRIBUTENAMES"; + +static PyObject * +ssl_attribute_names_get(connInfoObject *self) +{ +#if PG_VERSION_NUM >= 90500 + const char* const* names; + int i; + PyObject *l = NULL, *s = NULL, *rv = NULL; + + names = PQsslAttributeNames(self->conn->pgconn); + if (!(l = PyList_New(0))) { goto exit; } + + for (i = 0; names[i]; i++) { + if (!(s = conn_text_from_chars(self->conn, names[i]))) { goto exit; } + if (0 != PyList_Append(l, s)) { goto exit; } + Py_CLEAR(s); + } + + rv = l; + l = NULL; + +exit: + Py_XDECREF(l); + Py_XDECREF(s); + return rv; +#else + PyErr_SetString(NotSupportedError, + "'ssl_attribute_names not available in libpq < 9.5"); + return NULL; +#endif +} + + +static struct PyGetSetDef connInfoObject_getsets[] = { + { "dbname", (getter)dbname_get, NULL, (char *)dbname_doc }, + { "user", (getter)user_get, NULL, (char *)user_doc }, + { "password", (getter)password_get, NULL, (char *)password_doc }, + { "host", (getter)host_get, NULL, (char *)host_doc }, + { "port", (getter)port_get, NULL, (char *)port_doc }, + { "options", (getter)options_get, NULL, (char *)options_doc }, + { "dsn_parameters", (getter)dsn_parameters_get, NULL, + (char *)dsn_parameters_doc }, + { "status", (getter)status_get, NULL, (char *)status_doc }, + { "transaction_status", (getter)transaction_status_get, NULL, + (char *)transaction_status_doc }, + { "protocol_version", (getter)protocol_version_get, NULL, + (char *)protocol_version_doc }, + { "server_version", (getter)server_version_get, NULL, + (char *)server_version_doc }, + { "error_message", (getter)error_message_get, NULL, + (char *)error_message_doc }, + { "socket", (getter)socket_get, NULL, (char *)socket_doc }, + { "backend_pid", (getter)backend_pid_get, NULL, (char *)backend_pid_doc }, + { "used_password", (getter)used_password_get, NULL, + (char *)used_password_doc }, + { "needs_password", (getter)needs_password_get, NULL, + (char *)needs_password_doc }, + { "ssl_in_use", (getter)ssl_in_use_get, NULL, + (char *)ssl_in_use_doc }, + { "ssl_attribute_names", (getter)ssl_attribute_names_get, NULL, + (char *)ssl_attribute_names_doc }, + {NULL} +}; + +static struct PyMethodDef connInfoObject_methods[] = { + {"ssl_attribute", (PyCFunction)ssl_attribute, + METH_VARARGS|METH_KEYWORDS, ssl_attribute_doc}, + {"parameter_status", (PyCFunction)parameter_status, + METH_VARARGS|METH_KEYWORDS, parameter_status_doc}, + {NULL} +}; + +/* initialization and finalization methods */ + +static PyObject * +conninfo_new(PyTypeObject *type, PyObject *args, PyObject *kwds) +{ + return type->tp_alloc(type, 0); +} + +static int +conninfo_init(connInfoObject *self, PyObject *args, PyObject *kwds) +{ + PyObject *conn = NULL; + + if (!PyArg_ParseTuple(args, "O", &conn)) + return -1; + + if (!PyObject_TypeCheck(conn, &connectionType)) { + PyErr_SetString(PyExc_TypeError, + "The argument must be a psycopg2 connection"); + return -1; + } + + Py_INCREF(conn); + self->conn = (connectionObject *)conn; + return 0; +} + +static void +conninfo_dealloc(connInfoObject* self) +{ + Py_CLEAR(self->conn); + Py_TYPE(self)->tp_free((PyObject *)self); +} + + +/* object type */ + +PyTypeObject connInfoType = { + PyVarObject_HEAD_INIT(NULL, 0) + "psycopg2.extensions.ConnectionInfo", + sizeof(connInfoObject), 0, + (destructor)conninfo_dealloc, /*tp_dealloc*/ + 0, /*tp_print*/ + 0, /*tp_getattr*/ + 0, /*tp_setattr*/ + 0, /*tp_compare*/ + 0, /*tp_repr*/ + 0, /*tp_as_number*/ + 0, /*tp_as_sequence*/ + 0, /*tp_as_mapping*/ + 0, /*tp_hash */ + 0, /*tp_call*/ + 0, /*tp_str*/ + 0, /*tp_getattro*/ + 0, /*tp_setattro*/ + 0, /*tp_as_buffer*/ + Py_TPFLAGS_DEFAULT|Py_TPFLAGS_BASETYPE, /*tp_flags*/ + connInfoType_doc, /*tp_doc*/ + 0, /*tp_traverse*/ + 0, /*tp_clear*/ + 0, /*tp_richcompare*/ + 0, /*tp_weaklistoffset*/ + 0, /*tp_iter*/ + 0, /*tp_iternext*/ + connInfoObject_methods, /*tp_methods*/ + 0, /*tp_members*/ + connInfoObject_getsets, /*tp_getset*/ + 0, /*tp_base*/ + 0, /*tp_dict*/ + 0, /*tp_descr_get*/ + 0, /*tp_descr_set*/ + 0, /*tp_dictoffset*/ + (initproc)conninfo_init, /*tp_init*/ + 0, /*tp_alloc*/ + conninfo_new, /*tp_new*/ +}; diff --git a/psycopg/cursor.h b/psycopg/cursor.h new file mode 100644 index 0000000000000000000000000000000000000000..b50894c1328ce338471d1dfb76f9a6db448f8b5e --- /dev/null +++ b/psycopg/cursor.h @@ -0,0 +1,147 @@ +/* cursor.h - definition for the psycopg cursor type + * + * Copyright (C) 2003-2019 Federico Di Gregorio + * Copyright (C) 2020-2021 The Psycopg Team + * + * This file is part of psycopg. + * + * psycopg2 is free software: you can redistribute it and/or modify it + * under the terms of the GNU Lesser General Public License as published + * by the Free Software Foundation, either version 3 of the License, or + * (at your option) any later version. + * + * In addition, as a special exception, the copyright holders give + * permission to link this program with the OpenSSL library (or with + * modified versions of OpenSSL that use the same license as OpenSSL), + * and distribute linked combinations including the two. + * + * You must obey the GNU Lesser General Public License in all respects for + * all of the code used other than OpenSSL. + * + * psycopg2 is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public + * License for more details. + */ + +#ifndef PSYCOPG_CURSOR_H +#define PSYCOPG_CURSOR_H 1 + +#include "psycopg/connection.h" + +#ifdef __cplusplus +extern "C" { +#endif + +extern HIDDEN PyTypeObject cursorType; + +/* the typedef is forward-declared in psycopg.h */ +struct cursorObject { + PyObject_HEAD + + connectionObject *conn; /* connection owning the cursor */ + + int closed:1; /* 1 if the cursor is closed */ + int notuples:1; /* 1 if the command was not a SELECT query */ + int withhold:1; /* 1 if the cursor is named and uses WITH HOLD */ + + int scrollable; /* 1 if the cursor is named and SCROLLABLE, + 0 if not scrollable + -1 if undefined (PG may decide scrollable or not) + */ + + long int rowcount; /* number of rows affected by last execute */ + long int columns; /* number of columns fetched from the db */ + long int arraysize; /* how many rows should fetchmany() return */ + long int itersize; /* how many rows should iter(cur) fetch in named cursors */ + long int row; /* the row counter for fetch*() operations */ + long int mark; /* transaction marker, copied from conn */ + + PyObject *description; /* read-only attribute: sequence of 7-item + sequences.*/ + + /* postgres connection stuff */ + PGresult *pgres; /* result of last query */ + PyObject *pgstatus; /* last message from the server after an execute */ + Oid lastoid; /* last oid from an insert or InvalidOid */ + + PyObject *casts; /* an array (tuple) of typecast functions */ + PyObject *caster; /* the current typecaster object */ + + PyObject *copyfile; /* file-like used during COPY TO/FROM ops */ + Py_ssize_t copysize; /* size of the copy buffer during COPY TO/FROM ops */ +#define DEFAULT_COPYSIZE 16384 +#define DEFAULT_COPYBUFF 8192 + + PyObject *tuple_factory; /* factory for result tuples */ + PyObject *tzinfo_factory; /* factory for tzinfo objects */ + + PyObject *query; /* last query executed */ + + char *qattr; /* quoting attr, used when quoting strings */ + char *notice; /* a notice from the backend */ + char *name; /* this cursor name */ + char *qname; /* this cursor name, quoted */ + + PyObject *string_types; /* a set of typecasters for string types */ + PyObject *binary_types; /* a set of typecasters for binary types */ + + PyObject *weakreflist; /* list of weak references */ + +}; + + +/* C-callable functions in cursor_int.c and cursor_type.c */ +BORROWED HIDDEN PyObject *curs_get_cast(cursorObject *self, PyObject *oid); +HIDDEN void curs_reset(cursorObject *self); +RAISES_NEG HIDDEN int curs_withhold_set(cursorObject *self, PyObject *pyvalue); +RAISES_NEG HIDDEN int curs_scrollable_set(cursorObject *self, PyObject *pyvalue); +HIDDEN PyObject *curs_validate_sql_basic(cursorObject *self, PyObject *sql); +HIDDEN void curs_set_result(cursorObject *self, PGresult *pgres); + +/* exception-raising macros */ +#define EXC_IF_CURS_CLOSED(self) \ +do { \ + if (!(self)->conn) { \ + PyErr_SetString(InterfaceError, "the cursor has no connection"); \ + return NULL; } \ + if ((self)->closed || (self)->conn->closed) { \ + PyErr_SetString(InterfaceError, "cursor already closed"); \ + return NULL; } \ +} while (0) + +#define EXC_IF_NO_TUPLES(self) \ +do \ + if ((self)->notuples && (self)->name == NULL) { \ + PyErr_SetString(ProgrammingError, "no results to fetch"); \ + return NULL; } \ +while (0) + +#define EXC_IF_NO_MARK(self) \ +do \ + if ((self)->mark != (self)->conn->mark && (self)->withhold == 0) { \ + PyErr_SetString(ProgrammingError, "named cursor isn't valid anymore"); \ + return NULL; } \ +while (0) + +#define EXC_IF_CURS_ASYNC(self, cmd) \ +do \ + if ((self)->conn->async == 1) { \ + PyErr_SetString(ProgrammingError, \ + #cmd " cannot be used in asynchronous mode"); \ + return NULL; } \ +while (0) + +#define EXC_IF_ASYNC_IN_PROGRESS(self, cmd) \ +do \ + if ((self)->conn->async_cursor != NULL) { \ + PyErr_SetString(ProgrammingError, \ + #cmd " cannot be used while an asynchronous query is underway"); \ + return NULL; } \ +while (0) + +#ifdef __cplusplus +} +#endif + +#endif /* !defined(PSYCOPG_CURSOR_H) */ diff --git a/psycopg/cursor_int.c b/psycopg/cursor_int.c new file mode 100644 index 0000000000000000000000000000000000000000..7009ee836dc5fd253be832db113ffd74115e83c2 --- /dev/null +++ b/psycopg/cursor_int.c @@ -0,0 +1,171 @@ +/* cursor_int.c - code used by the cursor object + * + * Copyright (C) 2003-2019 Federico Di Gregorio + * Copyright (C) 2020-2021 The Psycopg Team + * + * This file is part of psycopg. + * + * psycopg2 is free software: you can redistribute it and/or modify it + * under the terms of the GNU Lesser General Public License as published + * by the Free Software Foundation, either version 3 of the License, or + * (at your option) any later version. + * + * In addition, as a special exception, the copyright holders give + * permission to link this program with the OpenSSL library (or with + * modified versions of OpenSSL that use the same license as OpenSSL), + * and distribute linked combinations including the two. + * + * You must obey the GNU Lesser General Public License in all respects for + * all of the code used other than OpenSSL. + * + * psycopg2 is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public + * License for more details. + */ + +#define PSYCOPG_MODULE +#include "psycopg/psycopg.h" + +#include "psycopg/cursor.h" +#include "psycopg/pqpath.h" +#include "psycopg/typecast.h" + +/* curs_get_cast - return the type caster for an oid. + * + * Return the most specific type caster, from cursor to connection to global. + * If no type caster is found, return the default one. + * + * Return a borrowed reference. + */ + +BORROWED PyObject * +curs_get_cast(cursorObject *self, PyObject *oid) +{ + PyObject *cast; + + /* cursor lookup */ + if (self->string_types != NULL && self->string_types != Py_None) { + cast = PyDict_GetItem(self->string_types, oid); + Dprintf("curs_get_cast: per-cursor dict: %p", cast); + if (cast) { return cast; } + } + + /* connection lookup */ + cast = PyDict_GetItem(self->conn->string_types, oid); + Dprintf("curs_get_cast: per-connection dict: %p", cast); + if (cast) { return cast; } + + /* global lookup */ + cast = PyDict_GetItem(psyco_types, oid); + Dprintf("curs_get_cast: global dict: %p", cast); + if (cast) { return cast; } + + /* fallback */ + return psyco_default_cast; +} + +#include + + +/* curs_reset - reset the cursor to a clean state */ + +void +curs_reset(cursorObject *self) +{ + /* initialize some variables to default values */ + self->notuples = 1; + self->rowcount = -1; + self->row = 0; + + Py_CLEAR(self->description); + Py_CLEAR(self->casts); +} + + +/* Return 1 if `obj` is a `psycopg2.sql.Composable` instance, else 0 + * Set an exception and return -1 in case of error. + */ +RAISES_NEG static int +_curs_is_composible(PyObject *obj) +{ + int rv = -1; + PyObject *m = NULL; + PyObject *comp = NULL; + + if (!(m = PyImport_ImportModule("psycopg2.sql"))) { goto exit; } + if (!(comp = PyObject_GetAttrString(m, "Composable"))) { goto exit; } + rv = PyObject_IsInstance(obj, comp); + +exit: + Py_XDECREF(comp); + Py_XDECREF(m); + return rv; + +} + +/* Performs very basic validation on an incoming SQL string. + * Returns a new reference to a str instance on success; NULL on failure, + * after having set an exception. + */ +PyObject * +curs_validate_sql_basic(cursorObject *self, PyObject *sql) +{ + PyObject *rv = NULL; + PyObject *comp = NULL; + int iscomp; + + if (!sql || !PyObject_IsTrue(sql)) { + psyco_set_error(ProgrammingError, self, + "can't execute an empty query"); + goto exit; + } + + if (Bytes_Check(sql)) { + /* Necessary for ref-count symmetry with the unicode case: */ + Py_INCREF(sql); + rv = sql; + } + else if (PyUnicode_Check(sql)) { + if (!(rv = conn_encode(self->conn, sql))) { goto exit; } + } + else if (0 != (iscomp = _curs_is_composible(sql))) { + if (iscomp < 0) { goto exit; } + if (!(comp = PyObject_CallMethod(sql, "as_string", "O", self->conn))) { + goto exit; + } + + if (Bytes_Check(comp)) { + rv = comp; + comp = NULL; + } + else if (PyUnicode_Check(comp)) { + if (!(rv = conn_encode(self->conn, comp))) { goto exit; } + } + else { + PyErr_Format(PyExc_TypeError, + "as_string() should return a string: got %s instead", + Py_TYPE(comp)->tp_name); + goto exit; + } + } + else { + /* the is not unicode or string, raise an error */ + PyErr_Format(PyExc_TypeError, + "argument 1 must be a string or unicode object: got %s instead", + Py_TYPE(sql)->tp_name); + goto exit; + } + +exit: + Py_XDECREF(comp); + return rv; +} + + +void +curs_set_result(cursorObject *self, PGresult *pgres) +{ + PQclear(self->pgres); + self->pgres = pgres; +} diff --git a/psycopg/cursor_type.c b/psycopg/cursor_type.c new file mode 100644 index 0000000000000000000000000000000000000000..efdeefcc5be55cb4293ed12dc1b60f2cfddb4a56 --- /dev/null +++ b/psycopg/cursor_type.c @@ -0,0 +1,2126 @@ +/* cursor_type.c - python interface to cursor objects + * + * Copyright (C) 2003-2019 Federico Di Gregorio + * Copyright (C) 2020-2021 The Psycopg Team + * + * This file is part of psycopg. + * + * psycopg2 is free software: you can redistribute it and/or modify it + * under the terms of the GNU Lesser General Public License as published + * by the Free Software Foundation, either version 3 of the License, or + * (at your option) any later version. + * + * In addition, as a special exception, the copyright holders give + * permission to link this program with the OpenSSL library (or with + * modified versions of OpenSSL that use the same license as OpenSSL), + * and distribute linked combinations including the two. + * + * You must obey the GNU Lesser General Public License in all respects for + * all of the code used other than OpenSSL. + * + * psycopg2 is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public + * License for more details. + */ + +#define PSYCOPG_MODULE +#include "psycopg/psycopg.h" + +#include "psycopg/cursor.h" +#include "psycopg/connection.h" +#include "psycopg/green.h" +#include "psycopg/pqpath.h" +#include "psycopg/typecast.h" +#include "psycopg/microprotocols.h" +#include "psycopg/microprotocols_proto.h" + +#include + +#include + + +/** DBAPI methods **/ + +/* close method - close the cursor */ + +#define curs_close_doc \ +"close() -- Close the cursor." + +static PyObject * +curs_close(cursorObject *self, PyObject *dummy) +{ + PyObject *rv = NULL; + char *lname = NULL; + + if (self->closed) { + rv = Py_None; + Py_INCREF(rv); + goto exit; + } + + if (self->qname != NULL) { + char buffer[256]; + PGTransactionStatusType status; + + EXC_IF_ASYNC_IN_PROGRESS(self, close_named); + + if (self->conn) { + status = PQtransactionStatus(self->conn->pgconn); + } + else { + status = PQTRANS_UNKNOWN; + } + + if (status == PQTRANS_UNKNOWN || status == PQTRANS_INERROR) { + Dprintf("skipping named curs close because tx status %d", + (int)status); + goto close; + } + + /* We should close a server-side cursor only if exists, or we get an + * error (#716). If we execute()d the cursor should exist alright, but + * if we didn't there is still the expectation that the cursor is + * closed (#746). + * + * So if we didn't execute() check for the cursor existence before + * closing it (the view exists since PG 8.2 according to docs). + */ + if (!self->query && self->conn->server_version >= 80200) { + if (!(lname = psyco_escape_string( + self->conn, self->name, -1, NULL, NULL))) { + goto exit; + } + PyOS_snprintf(buffer, sizeof(buffer), + "SELECT 1 FROM pg_catalog.pg_cursors where name = %s", + lname); + if (pq_execute(self, buffer, 0, 0, 1) == -1) { goto exit; } + + if (self->rowcount == 0) { + Dprintf("skipping named cursor close because not existing"); + goto close; + } + } + + EXC_IF_NO_MARK(self); + PyOS_snprintf(buffer, sizeof(buffer), "CLOSE %s", self->qname); + if (pq_execute(self, buffer, 0, 0, 1) == -1) { goto exit; } + } + +close: + CLEARPGRES(self->pgres); + + self->closed = 1; + Dprintf("curs_close: cursor at %p closed", self); + + rv = Py_None; + Py_INCREF(rv); + +exit: + PyMem_Free(lname); + return rv; +} + + +/* execute method - executes a query */ + +/* mogrify a query string and build argument array or dict */ + +RAISES_NEG static int +_mogrify(PyObject *var, PyObject *fmt, cursorObject *curs, PyObject **new) +{ + PyObject *key, *value, *n; + const char *d, *c; + Py_ssize_t index = 0; + int force = 0, kind = 0; + + /* from now on we'll use n and replace its value in *new only at the end, + just before returning. we also init *new to NULL to exit with an error + if we can't complete the mogrification */ + n = *new = NULL; + c = Bytes_AsString(fmt); + + while(*c) { + if (*c++ != '%') { + /* a regular character */ + continue; + } + + switch (*c) { + + /* handle plain percent symbol in format string */ + case '%': + ++c; + force = 1; + break; + + /* if we find '%(' then this is a dictionary, we: + 1/ find the matching ')' and extract the key name + 2/ locate the value in the dictionary (or return an error) + 3/ mogrify the value into something useful (quoting)... + 4/ ...and add it to the new dictionary to be used as argument + */ + case '(': + /* check if some crazy guy mixed formats */ + if (kind == 2) { + Py_XDECREF(n); + psyco_set_error(ProgrammingError, curs, + "argument formats can't be mixed"); + return -1; + } + kind = 1; + + /* let's have d point the end of the argument */ + for (d = c + 1; *d && *d != ')' && *d != '%'; d++); + + if (*d == ')') { + if (!(key = Text_FromUTF8AndSize(c+1, (Py_ssize_t)(d-c-1)))) { + Py_XDECREF(n); + return -1; + } + + /* if value is NULL we did not find the key (or this is not a + dictionary): let python raise a KeyError */ + if (!(value = PyObject_GetItem(var, key))) { + Py_DECREF(key); /* destroy key */ + Py_XDECREF(n); /* destroy n */ + return -1; + } + /* key has refcnt 1, value the original value + 1 */ + + Dprintf("_mogrify: value refcnt: " + FORMAT_CODE_PY_SSIZE_T " (+1)", Py_REFCNT(value)); + + if (n == NULL) { + if (!(n = PyDict_New())) { + Py_DECREF(key); + Py_DECREF(value); + return -1; + } + } + + if (0 == PyDict_Contains(n, key)) { + PyObject *t = NULL; + + /* None is always converted to NULL; this is an + optimization over the adapting code and can go away in + the future if somebody finds a None adapter useful. */ + if (value == Py_None) { + Py_INCREF(psyco_null); + t = psyco_null; + PyDict_SetItem(n, key, t); + /* t is a new object, refcnt = 1, key is at 2 */ + } + else { + t = microprotocol_getquoted(value, curs->conn); + if (t != NULL) { + PyDict_SetItem(n, key, t); + /* both key and t refcnt +1, key is at 2 now */ + } + else { + /* no adapter found, raise a BIG exception */ + Py_DECREF(key); + Py_DECREF(value); + Py_DECREF(n); + return -1; + } + } + + Py_XDECREF(t); /* t dies here */ + } + Py_DECREF(value); + Py_DECREF(key); /* key has the original refcnt now */ + Dprintf("_mogrify: after value refcnt: " + FORMAT_CODE_PY_SSIZE_T, Py_REFCNT(value)); + } + else { + /* we found %( but not a ) */ + Py_XDECREF(n); + psyco_set_error(ProgrammingError, curs, + "incomplete placeholder: '%(' without ')'"); + return -1; + } + c = d + 1; /* after the ) */ + break; + + default: + /* this is a format that expects a tuple; it is much easier, + because we don't need to check the old/new dictionary for + keys */ + + /* check if some crazy guy mixed formats */ + if (kind == 1) { + Py_XDECREF(n); + psyco_set_error(ProgrammingError, curs, + "argument formats can't be mixed"); + return -1; + } + kind = 2; + + value = PySequence_GetItem(var, index); + /* value has refcnt inc'ed by 1 here */ + + /* if value is NULL this is not a sequence or the index is wrong; + anyway we let python set its own exception */ + if (value == NULL) { + Py_XDECREF(n); + return -1; + } + + if (n == NULL) { + if (!(n = PyTuple_New(PyObject_Length(var)))) { + Py_DECREF(value); + return -1; + } + } + + /* let's have d point just after the '%' */ + if (value == Py_None) { + Py_INCREF(psyco_null); + PyTuple_SET_ITEM(n, index, psyco_null); + Py_DECREF(value); + } + else { + PyObject *t = microprotocol_getquoted(value, curs->conn); + + if (t != NULL) { + PyTuple_SET_ITEM(n, index, t); + Py_DECREF(value); + } + else { + Py_DECREF(n); + Py_DECREF(value); + return -1; + } + } + index += 1; + } + } + + if (force && n == NULL) + n = PyTuple_New(0); + *new = n; + + return 0; +} + + +/* Merge together a query string and its arguments. + * + * The arguments have been already adapted to SQL. + * + * Return a new reference to a string with the merged query, + * NULL and set an exception if any happened. + */ +static PyObject * +_psyco_curs_merge_query_args(cursorObject *self, + PyObject *query, PyObject *args) +{ + PyObject *fquery; + + /* if PyString_Format() return NULL an error occurred: if the error is + a TypeError we need to check the exception.args[0] string for the + values: + + "not enough arguments for format string" + "not all arguments converted" + + and return the appropriate ProgrammingError. we do that by grabbing + the current exception (we will later restore it if the type or the + strings do not match.) */ + + if (!(fquery = Bytes_Format(query, args))) { + PyObject *err, *arg, *trace; + int pe = 0; + + PyErr_Fetch(&err, &arg, &trace); + + if (err && PyErr_GivenExceptionMatches(err, PyExc_TypeError)) { + Dprintf("curs_execute: TypeError exception caught"); + PyErr_NormalizeException(&err, &arg, &trace); + + if (PyObject_HasAttrString(arg, "args")) { + PyObject *args = PyObject_GetAttrString(arg, "args"); + PyObject *str = PySequence_GetItem(args, 0); + const char *s = Bytes_AS_STRING(str); + + Dprintf("curs_execute: -> %s", s); + + if (!strcmp(s, "not enough arguments for format string") + || !strcmp(s, "not all arguments converted")) { + Dprintf("curs_execute: -> got a match"); + psyco_set_error(ProgrammingError, self, s); + pe = 1; + } + + Py_DECREF(args); + Py_DECREF(str); + } + } + + /* if we did not manage our own exception, restore old one */ + if (pe == 1) { + Py_XDECREF(err); Py_XDECREF(arg); Py_XDECREF(trace); + } + else { + PyErr_Restore(err, arg, trace); + } + } + + return fquery; +} + +#define curs_execute_doc \ +"execute(query, vars=None) -- Execute query with bound vars." + +RAISES_NEG static int +_psyco_curs_execute(cursorObject *self, + PyObject *query, PyObject *vars, + long int async, int no_result) +{ + int res = -1; + int tmp; + PyObject *fquery = NULL, *cvt = NULL; + + /* query becomes NULL or refcount +1, so good to XDECREF at the end */ + if (!(query = curs_validate_sql_basic(self, query))) { + goto exit; + } + + CLEARPGRES(self->pgres); + Py_CLEAR(self->query); + Dprintf("curs_execute: starting execution of new query"); + + /* here we are, and we have a sequence or a dictionary filled with + objects to be substituted (bound variables). we try to be smart and do + the right thing (i.e., what the user expects) */ + if (vars && vars != Py_None) + { + if (0 > _mogrify(vars, query, self, &cvt)) { goto exit; } + } + + /* Merge the query to the arguments if needed */ + if (cvt) { + if (!(fquery = _psyco_curs_merge_query_args(self, query, cvt))) { + goto exit; + } + } + else { + Py_INCREF(query); + fquery = query; + } + + if (self->qname != NULL) { + const char *scroll; + switch (self->scrollable) { + case -1: + scroll = ""; + break; + case 0: + scroll = "NO SCROLL "; + break; + case 1: + scroll = "SCROLL "; + break; + default: + PyErr_SetString(InternalError, "unexpected scrollable value"); + goto exit; + } + + if (!(self->query = Bytes_FromFormat( + "DECLARE %s %sCURSOR %s HOLD FOR %s", + self->qname, + scroll, + self->withhold ? "WITH" : "WITHOUT", + Bytes_AS_STRING(fquery)))) { + goto exit; + } + if (!self->query) { goto exit; } + } + else { + /* Transfer ownership */ + Py_INCREF(fquery); + self->query = fquery; + } + + /* At this point, the SQL statement must be str, not unicode */ + tmp = pq_execute(self, Bytes_AS_STRING(self->query), async, no_result, 0); + Dprintf("curs_execute: res = %d, pgres = %p", tmp, self->pgres); + if (tmp < 0) { goto exit; } + + res = 0; /* Success */ + +exit: + Py_XDECREF(query); + Py_XDECREF(fquery); + Py_XDECREF(cvt); + + return res; +} + +static PyObject * +curs_execute(cursorObject *self, PyObject *args, PyObject *kwargs) +{ + PyObject *vars = NULL, *operation = NULL; + + static char *kwlist[] = {"query", "vars", NULL}; + + if (!PyArg_ParseTupleAndKeywords(args, kwargs, "O|O", kwlist, + &operation, &vars)) { + return NULL; + } + + if (self->name != NULL) { + if (self->query) { + psyco_set_error(ProgrammingError, self, + "can't call .execute() on named cursors more than once"); + return NULL; + } + if (self->conn->autocommit && !self->withhold) { + psyco_set_error(ProgrammingError, self, + "can't use a named cursor outside of transactions"); + return NULL; + } + EXC_IF_NO_MARK(self); + } + + EXC_IF_CURS_CLOSED(self); + EXC_IF_ASYNC_IN_PROGRESS(self, execute); + EXC_IF_TPC_PREPARED(self->conn, execute); + + if (0 > _psyco_curs_execute(self, operation, vars, self->conn->async, 0)) { + return NULL; + } + + /* success */ + Py_RETURN_NONE; +} + +#define curs_executemany_doc \ +"executemany(query, vars_list) -- Execute many queries with bound vars." + +static PyObject * +curs_executemany(cursorObject *self, PyObject *args, PyObject *kwargs) +{ + PyObject *operation = NULL, *vars = NULL; + PyObject *v, *iter = NULL; + long rowcount = 0; + + static char *kwlist[] = {"query", "vars_list", NULL}; + + /* reset rowcount to -1 to avoid setting it when an exception is raised */ + self->rowcount = -1; + + if (!PyArg_ParseTupleAndKeywords(args, kwargs, "OO", kwlist, + &operation, &vars)) { + return NULL; + } + + EXC_IF_CURS_CLOSED(self); + EXC_IF_CURS_ASYNC(self, executemany); + EXC_IF_TPC_PREPARED(self->conn, executemany); + + if (self->name != NULL) { + psyco_set_error(ProgrammingError, self, + "can't call .executemany() on named cursors"); + return NULL; + } + + if (!PyIter_Check(vars)) { + vars = iter = PyObject_GetIter(vars); + if (iter == NULL) return NULL; + } + + while ((v = PyIter_Next(vars)) != NULL) { + if (0 > _psyco_curs_execute(self, operation, v, 0, 1)) { + Py_DECREF(v); + Py_XDECREF(iter); + return NULL; + } + else { + if (self->rowcount == -1) + rowcount = -1; + else if (rowcount >= 0) + rowcount += self->rowcount; + Py_DECREF(v); + } + } + Py_XDECREF(iter); + self->rowcount = rowcount; + + if (!PyErr_Occurred()) { + Py_RETURN_NONE; + } + else { + return NULL; + } +} + + +#define curs_mogrify_doc \ +"mogrify(query, vars=None) -> str -- Return query after vars binding." + +static PyObject * +_psyco_curs_mogrify(cursorObject *self, + PyObject *operation, PyObject *vars) +{ + PyObject *fquery = NULL, *cvt = NULL; + + operation = curs_validate_sql_basic(self, operation); + if (operation == NULL) { goto cleanup; } + + Dprintf("curs_mogrify: starting mogrify"); + + /* here we are, and we have a sequence or a dictionary filled with + objects to be substituted (bound variables). we try to be smart and do + the right thing (i.e., what the user expects) */ + + if (vars && vars != Py_None) + { + if (0 > _mogrify(vars, operation, self, &cvt)) { + goto cleanup; + } + } + + if (vars && cvt) { + if (!(fquery = _psyco_curs_merge_query_args(self, operation, cvt))) { + goto cleanup; + } + + Dprintf("curs_mogrify: cvt->refcnt = " FORMAT_CODE_PY_SSIZE_T + ", fquery->refcnt = " FORMAT_CODE_PY_SSIZE_T, + Py_REFCNT(cvt), Py_REFCNT(fquery)); + } + else { + fquery = operation; + Py_INCREF(fquery); + } + +cleanup: + Py_XDECREF(operation); + Py_XDECREF(cvt); + + return fquery; +} + +static PyObject * +curs_mogrify(cursorObject *self, PyObject *args, PyObject *kwargs) +{ + PyObject *vars = NULL, *operation = NULL; + + static char *kwlist[] = {"query", "vars", NULL}; + + if (!PyArg_ParseTupleAndKeywords(args, kwargs, "O|O", kwlist, + &operation, &vars)) { + return NULL; + } + + return _psyco_curs_mogrify(self, operation, vars); +} + + +/* cast method - convert an oid/string into a Python object */ +#define curs_cast_doc \ +"cast(oid, s) -> value\n\n" \ +"Convert the string s to a Python object according to its oid.\n\n" \ +"Look for a typecaster first in the cursor, then in its connection," \ +"then in the global register. If no suitable typecaster is found," \ +"leave the value as a string." + +static PyObject * +curs_cast(cursorObject *self, PyObject *args) +{ + PyObject *oid; + PyObject *s; + PyObject *cast; + + if (!PyArg_ParseTuple(args, "OO", &oid, &s)) + return NULL; + + cast = curs_get_cast(self, oid); + return PyObject_CallFunctionObjArgs(cast, s, (PyObject *)self, NULL); +} + + +/* fetchone method - fetch one row of results */ + +#define curs_fetchone_doc \ +"fetchone() -> tuple or None\n\n" \ +"Return the next row of a query result set in the form of a tuple (by\n" \ +"default) or using the sequence factory previously set in the\n" \ +"`row_factory` attribute. Return `!None` when no more data is available.\n" + +RAISES_NEG static int +_psyco_curs_prefetch(cursorObject *self) +{ + int i = 0; + + if (self->pgres == NULL) { + Dprintf("_psyco_curs_prefetch: trying to fetch data"); + do { + i = pq_fetch(self, 0); + Dprintf("_psycopg_curs_prefetch: result = %d", i); + } while(i == 1); + } + + Dprintf("_psyco_curs_prefetch: result = %d", i); + return i; +} + +RAISES_NEG static int +_psyco_curs_buildrow_fill(cursorObject *self, PyObject *res, + int row, int n, int istuple) +{ + int i, len, err; + const char *str; + PyObject *val; + int rv = -1; + + for (i=0; i < n; i++) { + if (PQgetisnull(self->pgres, row, i)) { + str = NULL; + len = 0; + } + else { + str = PQgetvalue(self->pgres, row, i); + len = PQgetlength(self->pgres, row, i); + } + + Dprintf("_psyco_curs_buildrow: row %ld, element %d, len %d", + self->row, i, len); + + if (!(val = typecast_cast(PyTuple_GET_ITEM(self->casts, i), str, len, + (PyObject*)self))) { + goto exit; + } + + Dprintf("_psyco_curs_buildrow: val->refcnt = " + FORMAT_CODE_PY_SSIZE_T, + Py_REFCNT(val) + ); + if (istuple) { + PyTuple_SET_ITEM(res, i, val); + } + else { + err = PySequence_SetItem(res, i, val); + Py_DECREF(val); + if (err == -1) { goto exit; } + } + } + + rv = 0; + +exit: + return rv; +} + +static PyObject * +_psyco_curs_buildrow(cursorObject *self, int row) +{ + int n; + int istuple; + PyObject *t = NULL; + PyObject *rv = NULL; + + n = PQnfields(self->pgres); + istuple = (self->tuple_factory == Py_None); + + if (istuple) { + t = PyTuple_New(n); + } + else { + t = PyObject_CallFunctionObjArgs(self->tuple_factory, self, NULL); + } + if (!t) { goto exit; } + + if (0 <= _psyco_curs_buildrow_fill(self, t, row, n, istuple)) { + rv = t; + t = NULL; + } + +exit: + Py_XDECREF(t); + return rv; + +} + +static PyObject * +curs_fetchone(cursorObject *self, PyObject *dummy) +{ + PyObject *res; + + EXC_IF_CURS_CLOSED(self); + if (_psyco_curs_prefetch(self) < 0) return NULL; + EXC_IF_NO_TUPLES(self); + + if (self->qname != NULL) { + char buffer[128]; + + EXC_IF_NO_MARK(self); + EXC_IF_ASYNC_IN_PROGRESS(self, fetchone); + EXC_IF_TPC_PREPARED(self->conn, fetchone); + PyOS_snprintf(buffer, sizeof(buffer), "FETCH FORWARD 1 FROM %s", self->qname); + if (pq_execute(self, buffer, 0, 0, self->withhold) == -1) return NULL; + if (_psyco_curs_prefetch(self) < 0) return NULL; + } + + Dprintf("curs_fetchone: fetching row %ld", self->row); + Dprintf("curs_fetchone: rowcount = %ld", self->rowcount); + + if (self->row >= self->rowcount) { + /* we exhausted available data: return None */ + Py_RETURN_NONE; + } + + res = _psyco_curs_buildrow(self, self->row); + self->row++; /* move the counter to next line */ + + /* if the query was async aggresively free pgres, to allow + successive requests to reallocate it */ + if (self->row >= self->rowcount + && self->conn->async_cursor + && PyWeakref_GetObject(self->conn->async_cursor) == (PyObject*)self) + CLEARPGRES(self->pgres); + + return res; +} + +/* Efficient cursor.next() implementation for named cursors. + * + * Fetch several records at time. Return NULL when the cursor is exhausted. + */ +static PyObject * +curs_next_named(cursorObject *self) +{ + PyObject *res; + + Dprintf("curs_next_named"); + EXC_IF_CURS_CLOSED(self); + EXC_IF_ASYNC_IN_PROGRESS(self, next); + if (_psyco_curs_prefetch(self) < 0) return NULL; + EXC_IF_NO_TUPLES(self); + + EXC_IF_NO_MARK(self); + EXC_IF_TPC_PREPARED(self->conn, next); + + Dprintf("curs_next_named: row %ld", self->row); + Dprintf("curs_next_named: rowcount = %ld", self->rowcount); + if (self->row >= self->rowcount) { + char buffer[128]; + + PyOS_snprintf(buffer, sizeof(buffer), "FETCH FORWARD %ld FROM %s", + self->itersize, self->qname); + if (pq_execute(self, buffer, 0, 0, self->withhold) == -1) return NULL; + if (_psyco_curs_prefetch(self) < 0) return NULL; + } + + /* We exhausted the data: return NULL to stop iteration. */ + if (self->row >= self->rowcount) { + return NULL; + } + + res = _psyco_curs_buildrow(self, self->row); + self->row++; /* move the counter to next line */ + + /* if the query was async aggresively free pgres, to allow + successive requests to reallocate it */ + if (self->row >= self->rowcount + && self->conn->async_cursor + && PyWeakref_GetObject(self->conn->async_cursor) == (PyObject*)self) + CLEARPGRES(self->pgres); + + return res; +} + + +/* fetch many - fetch some results */ + +#define curs_fetchmany_doc \ +"fetchmany(size=self.arraysize) -> list of tuple\n\n" \ +"Return the next `size` rows of a query result set in the form of a list\n" \ +"of tuples (by default) or using the sequence factory previously set in\n" \ +"the `row_factory` attribute.\n\n" \ +"Return an empty list when no more data is available.\n" + +static PyObject * +curs_fetchmany(cursorObject *self, PyObject *args, PyObject *kwords) +{ + int i; + PyObject *list = NULL; + PyObject *row = NULL; + PyObject *rv = NULL; + + PyObject *pysize = NULL; + long int size = self->arraysize; + static char *kwlist[] = {"size", NULL}; + + /* allow passing None instead of omitting the *size* argument, + * or using the method from subclasses would be a problem */ + if (!PyArg_ParseTupleAndKeywords(args, kwords, "|O", kwlist, &pysize)) { + return NULL; + } + + if (pysize && pysize != Py_None) { + size = PyInt_AsLong(pysize); + if (size == -1 && PyErr_Occurred()) { + return NULL; + } + } + + EXC_IF_CURS_CLOSED(self); + if (_psyco_curs_prefetch(self) < 0) return NULL; + EXC_IF_NO_TUPLES(self); + + if (self->qname != NULL) { + char buffer[128]; + + EXC_IF_NO_MARK(self); + EXC_IF_ASYNC_IN_PROGRESS(self, fetchmany); + EXC_IF_TPC_PREPARED(self->conn, fetchone); + PyOS_snprintf(buffer, sizeof(buffer), "FETCH FORWARD %d FROM %s", + (int)size, self->qname); + if (pq_execute(self, buffer, 0, 0, self->withhold) == -1) { goto exit; } + if (_psyco_curs_prefetch(self) < 0) { goto exit; } + } + + /* make sure size is not > than the available number of rows */ + if (size > self->rowcount - self->row || size < 0) { + size = self->rowcount - self->row; + } + + Dprintf("curs_fetchmany: size = %ld", size); + + if (size <= 0) { + rv = PyList_New(0); + goto exit; + } + + if (!(list = PyList_New(size))) { goto exit; } + + for (i = 0; i < size; i++) { + row = _psyco_curs_buildrow(self, self->row); + self->row++; + + if (row == NULL) { goto exit; } + + PyList_SET_ITEM(list, i, row); + } + row = NULL; + + /* if the query was async aggresively free pgres, to allow + successive requests to reallocate it */ + if (self->row >= self->rowcount + && self->conn->async_cursor + && PyWeakref_GetObject(self->conn->async_cursor) == (PyObject*)self) + CLEARPGRES(self->pgres); + + /* success */ + rv = list; + list = NULL; + +exit: + Py_XDECREF(list); + Py_XDECREF(row); + + return rv; +} + + +/* fetch all - fetch all results */ + +#define curs_fetchall_doc \ +"fetchall() -> list of tuple\n\n" \ +"Return all the remaining rows of a query result set.\n\n" \ +"Rows are returned in the form of a list of tuples (by default) or using\n" \ +"the sequence factory previously set in the `row_factory` attribute.\n" \ +"Return `!None` when no more data is available.\n" + +static PyObject * +curs_fetchall(cursorObject *self, PyObject *dummy) +{ + int i, size; + PyObject *list = NULL; + PyObject *row = NULL; + PyObject *rv = NULL; + + EXC_IF_CURS_CLOSED(self); + if (_psyco_curs_prefetch(self) < 0) return NULL; + EXC_IF_NO_TUPLES(self); + + if (self->qname != NULL) { + char buffer[128]; + + EXC_IF_NO_MARK(self); + EXC_IF_ASYNC_IN_PROGRESS(self, fetchall); + EXC_IF_TPC_PREPARED(self->conn, fetchall); + PyOS_snprintf(buffer, sizeof(buffer), "FETCH FORWARD ALL FROM %s", self->qname); + if (pq_execute(self, buffer, 0, 0, self->withhold) == -1) { goto exit; } + if (_psyco_curs_prefetch(self) < 0) { goto exit; } + } + + size = self->rowcount - self->row; + + if (size <= 0) { + rv = PyList_New(0); + goto exit; + } + + if (!(list = PyList_New(size))) { goto exit; } + + for (i = 0; i < size; i++) { + row = _psyco_curs_buildrow(self, self->row); + self->row++; + if (row == NULL) { goto exit; } + + PyList_SET_ITEM(list, i, row); + } + row = NULL; + + /* if the query was async aggresively free pgres, to allow + successive requests to reallocate it */ + if (self->row >= self->rowcount + && self->conn->async_cursor + && PyWeakref_GetObject(self->conn->async_cursor) == (PyObject*)self) + CLEARPGRES(self->pgres); + + /* success */ + rv = list; + list = NULL; + +exit: + Py_XDECREF(list); + Py_XDECREF(row); + + return rv; +} + + +/* callproc method - execute a stored procedure */ + +#define curs_callproc_doc \ +"callproc(procname, parameters=None) -- Execute stored procedure." + +static PyObject * +curs_callproc(cursorObject *self, PyObject *args) +{ + const char *procname = NULL; + char *sql = NULL; + Py_ssize_t procname_len, i, nparameters = 0, sl = 0; + PyObject *parameters = Py_None; + PyObject *operation = NULL; + PyObject *res = NULL; + + int using_dict; + PyObject *pname = NULL; + PyObject *pnames = NULL; + PyObject *pvals = NULL; + char *cpname = NULL; + char **scpnames = NULL; + + if (!PyArg_ParseTuple(args, "s#|O", &procname, &procname_len, + ¶meters)) { + goto exit; + } + + EXC_IF_CURS_CLOSED(self); + EXC_IF_ASYNC_IN_PROGRESS(self, callproc); + EXC_IF_TPC_PREPARED(self->conn, callproc); + + if (self->name != NULL) { + psyco_set_error(ProgrammingError, self, + "can't call .callproc() on named cursors"); + goto exit; + } + + if (parameters != Py_None) { + if (-1 == (nparameters = PyObject_Length(parameters))) { goto exit; } + } + + using_dict = nparameters > 0 && PyDict_Check(parameters); + + /* a Dict is complicated; the parameter names go into the query */ + if (using_dict) { + if (!(pnames = PyDict_Keys(parameters))) { goto exit; } + if (!(pvals = PyDict_Values(parameters))) { goto exit; } + + sl = procname_len + 17 + nparameters * 5 - (nparameters ? 1 : 0); + + if (!(scpnames = PyMem_New(char *, nparameters))) { + PyErr_NoMemory(); + goto exit; + } + + memset(scpnames, 0, sizeof(char *) * nparameters); + + /* each parameter has to be processed; it's a few steps. */ + for (i = 0; i < nparameters; i++) { + /* all errors are RuntimeErrors as they should never occur */ + + if (!(pname = PyList_GetItem(pnames, i))) { goto exit; } + Py_INCREF(pname); /* was borrowed */ + + /* this also makes a check for keys being strings */ + if (!(pname = psyco_ensure_bytes(pname))) { goto exit; } + if (!(cpname = Bytes_AsString(pname))) { goto exit; } + + if (!(scpnames[i] = psyco_escape_identifier( + self->conn, cpname, -1))) { + Py_CLEAR(pname); + goto exit; + } + + Py_CLEAR(pname); + + sl += strlen(scpnames[i]); + } + + if (!(sql = (char*)PyMem_Malloc(sl))) { + PyErr_NoMemory(); + goto exit; + } + + sprintf(sql, "SELECT * FROM %s(", procname); + for (i = 0; i < nparameters; i++) { + strcat(sql, scpnames[i]); + strcat(sql, ":=%s,"); + } + sql[sl-2] = ')'; + sql[sl-1] = '\0'; + } + + /* a list (or None, or empty data structure) is a little bit simpler */ + else { + Py_INCREF(parameters); + pvals = parameters; + + sl = procname_len + 17 + nparameters * 3 - (nparameters ? 1 : 0); + + sql = (char*)PyMem_Malloc(sl); + if (sql == NULL) { + PyErr_NoMemory(); + goto exit; + } + + sprintf(sql, "SELECT * FROM %s(", procname); + for (i = 0; i < nparameters; i++) { + strcat(sql, "%s,"); + } + sql[sl-2] = ')'; + sql[sl-1] = '\0'; + } + + if (!(operation = Bytes_FromString(sql))) { + goto exit; + } + + if (0 <= _psyco_curs_execute( + self, operation, pvals, self->conn->async, 0)) { + /* The dict case is outside DBAPI scope anyway, so simply return None */ + if (using_dict) { + res = Py_None; + } + else { + res = pvals; + } + Py_INCREF(res); + } + +exit: + if (scpnames != NULL) { + for (i = 0; i < nparameters; i++) { + if (scpnames[i] != NULL) { + PQfreemem(scpnames[i]); + } + } + } + PyMem_Free(scpnames); + Py_XDECREF(pname); + Py_XDECREF(pnames); + Py_XDECREF(operation); + Py_XDECREF(pvals); + PyMem_Free((void*)sql); + return res; +} + + +/* nextset method - return the next set of data (not supported) */ + +#define curs_nextset_doc \ +"nextset() -- Skip to next set of data.\n\n" \ +"This method is not supported (PostgreSQL does not have multiple data \n" \ +"sets) and will raise a NotSupportedError exception." + +static PyObject * +curs_nextset(cursorObject *self, PyObject *dummy) +{ + EXC_IF_CURS_CLOSED(self); + + PyErr_SetString(NotSupportedError, "not supported by PostgreSQL"); + return NULL; +} + + +/* setinputsizes - predefine memory areas for execute (does nothing) */ + +#define curs_setinputsizes_doc \ +"setinputsizes(sizes) -- Set memory areas before execute.\n\n" \ +"This method currently does nothing but it is safe to call it." + +static PyObject * +curs_setinputsizes(cursorObject *self, PyObject *args) +{ + PyObject *sizes; + + if (!PyArg_ParseTuple(args, "O", &sizes)) + return NULL; + + EXC_IF_CURS_CLOSED(self); + + Py_RETURN_NONE; +} + + +/* setoutputsize - predefine memory areas for execute (does nothing) */ + +#define curs_setoutputsize_doc \ +"setoutputsize(size, column=None) -- Set column buffer size.\n\n" \ +"This method currently does nothing but it is safe to call it." + +static PyObject * +curs_setoutputsize(cursorObject *self, PyObject *args) +{ + long int size, column; + + if (!PyArg_ParseTuple(args, "l|l", &size, &column)) + return NULL; + + EXC_IF_CURS_CLOSED(self); + + Py_RETURN_NONE; +} + + +/* scroll - scroll position in result list */ + +#define curs_scroll_doc \ +"scroll(value, mode='relative') -- Scroll to new position according to mode." + +static PyObject * +curs_scroll(cursorObject *self, PyObject *args, PyObject *kwargs) +{ + int value, newpos; + const char *mode = "relative"; + + static char *kwlist[] = {"value", "mode", NULL}; + + if (!PyArg_ParseTupleAndKeywords(args, kwargs, "i|s", + kwlist, &value, &mode)) + return NULL; + + EXC_IF_CURS_CLOSED(self); + + /* if the cursor is not named we have the full result set and we can do + our own calculations to scroll; else we just delegate the scrolling + to the MOVE SQL statement */ + if (self->qname == NULL) { + if (strcmp(mode, "relative") == 0) { + newpos = self->row + value; + } else if (strcmp( mode, "absolute") == 0) { + newpos = value; + } else { + psyco_set_error(ProgrammingError, self, + "scroll mode must be 'relative' or 'absolute'"); + return NULL; + } + + if (newpos < 0 || newpos >= self->rowcount ) { + psyco_set_error(ProgrammingError, self, + "scroll destination out of bounds"); + return NULL; + } + + self->row = newpos; + } + + else { + char buffer[128]; + + EXC_IF_NO_MARK(self); + EXC_IF_ASYNC_IN_PROGRESS(self, scroll); + EXC_IF_TPC_PREPARED(self->conn, scroll); + + if (strcmp(mode, "absolute") == 0) { + PyOS_snprintf(buffer, sizeof(buffer), "MOVE ABSOLUTE %d FROM %s", + value, self->qname); + } + else { + PyOS_snprintf(buffer, sizeof(buffer), "MOVE %d FROM %s", value, self->qname); + } + if (pq_execute(self, buffer, 0, 0, self->withhold) == -1) return NULL; + if (_psyco_curs_prefetch(self) < 0) return NULL; + } + + Py_RETURN_NONE; +} + + +#define curs_enter_doc \ +"__enter__ -> self" + +static PyObject * +curs_enter(cursorObject *self, PyObject *dummy) +{ + Py_INCREF(self); + return (PyObject *)self; +} + +#define curs_exit_doc \ +"__exit__ -- close the cursor" + +static PyObject * +curs_exit(cursorObject *self, PyObject *args) +{ + PyObject *tmp = NULL; + PyObject *rv = NULL; + + /* don't care about the arguments here: don't need to parse them */ + + if (!(tmp = PyObject_CallMethod((PyObject *)self, "close", ""))) { + goto exit; + } + + /* success (of curs.close()). + * Return None to avoid swallowing the exception */ + rv = Py_None; + Py_INCREF(rv); + +exit: + Py_XDECREF(tmp); + return rv; +} + + +/* Return a newly allocated buffer containing the list of columns to be + * copied. On error return NULL and set an exception. + */ +static char *_psyco_curs_copy_columns(cursorObject *self, PyObject *columns) +{ + PyObject *col, *coliter; + char *columnlist = NULL; + Py_ssize_t bufsize = 512; + Py_ssize_t offset = 1; + + if (columns == NULL || columns == Py_None) { + if (NULL == (columnlist = PyMem_Malloc(2))) { + PyErr_NoMemory(); + goto error; + } + columnlist[0] = '\0'; + goto exit; + } + + if (NULL == (coliter = PyObject_GetIter(columns))) { + goto error; + } + + if (NULL == (columnlist = PyMem_Malloc(bufsize))) { + Py_DECREF(coliter); + PyErr_NoMemory(); + goto error; + } + columnlist[0] = '('; + + while ((col = PyIter_Next(coliter)) != NULL) { + Py_ssize_t collen; + char *colname; + char *quoted_colname; + + if (!(col = psyco_ensure_bytes(col))) { + Py_DECREF(coliter); + goto error; + } + Bytes_AsStringAndSize(col, &colname, &collen); + if (!(quoted_colname = psyco_escape_identifier( + self->conn, colname, collen))) { + Py_DECREF(col); + Py_DECREF(coliter); + goto error; + } + collen = strlen(quoted_colname); + + while (offset + collen > bufsize - 2) { + char *tmp; + bufsize *= 2; + if (NULL == (tmp = PyMem_Realloc(columnlist, bufsize))) { + PQfreemem(quoted_colname); + Py_DECREF(col); + Py_DECREF(coliter); + PyErr_NoMemory(); + goto error; + } + columnlist = tmp; + } + strncpy(&columnlist[offset], quoted_colname, collen); + offset += collen; + columnlist[offset++] = ','; + Py_DECREF(col); + PQfreemem(quoted_colname); + } + Py_DECREF(coliter); + + /* Error raised by the coliter generator */ + if (PyErr_Occurred()) { + goto error; + } + + if (offset == 2) { + goto exit; + } + else { + columnlist[offset - 1] = ')'; + columnlist[offset] = '\0'; + goto exit; + } + +error: + PyMem_Free(columnlist); + columnlist = NULL; + +exit: + return columnlist; +} + +/* extension: copy_from - implements COPY FROM */ + +#define curs_copy_from_doc \ +"copy_from(file, table, sep='\\t', null='\\\\N', size=8192, columns=None) -- Copy table from file." + +static PyObject * +curs_copy_from(cursorObject *self, PyObject *args, PyObject *kwargs) +{ + static char *kwlist[] = { + "file", "table", "sep", "null", "size", "columns", NULL}; + + const char *sep = "\t"; + const char *null = "\\N"; + const char *command = + "COPY %s%s FROM stdin WITH DELIMITER AS %s NULL AS %s"; + + Py_ssize_t query_size; + char *query = NULL; + char *columnlist = NULL; + char *quoted_delimiter = NULL; + char *quoted_null = NULL; + char *quoted_table_name = NULL; + const char *table_name; + + Py_ssize_t bufsize = DEFAULT_COPYBUFF; + PyObject *file, *columns = NULL, *res = NULL; + + if (!PyArg_ParseTupleAndKeywords( + args, kwargs, "Os|ssnO", kwlist, + &file, &table_name, &sep, &null, &bufsize, &columns)) { + return NULL; + } + + if (!PyObject_HasAttrString(file, "read")) { + PyErr_SetString(PyExc_TypeError, + "argument 1 must have a .read() method"); + return NULL; + } + + EXC_IF_CURS_CLOSED(self); + EXC_IF_CURS_ASYNC(self, copy_from); + EXC_IF_GREEN(copy_from); + EXC_IF_TPC_PREPARED(self->conn, copy_from); + + if (!(columnlist = _psyco_curs_copy_columns(self, columns))) { + goto exit; + } + + if (!(quoted_delimiter = psyco_escape_string( + self->conn, sep, -1, NULL, NULL))) { + goto exit; + } + + if (!(quoted_null = psyco_escape_string( + self->conn, null, -1, NULL, NULL))) { + goto exit; + } + + if (!(quoted_table_name = psyco_escape_identifier( + self->conn, table_name, -1))) { + goto exit; + } + + query_size = strlen(command) + strlen(quoted_table_name) + strlen(columnlist) + + strlen(quoted_delimiter) + strlen(quoted_null) + 1; + if (!(query = PyMem_New(char, query_size))) { + PyErr_NoMemory(); + goto exit; + } + + PyOS_snprintf(query, query_size, command, + quoted_table_name, columnlist, quoted_delimiter, quoted_null); + + Dprintf("curs_copy_from: query = %s", query); + + Py_CLEAR(self->query); + if (!(self->query = Bytes_FromString(query))) { + goto exit; + } + + /* This routine stores a borrowed reference. Although it is only held + * for the duration of curs_copy_from, nested invocations of + * Py_BEGIN_ALLOW_THREADS could surrender control to another thread, + * which could invoke the garbage collector. We thus need an + * INCREF/DECREF pair if we store this pointer in a GC object, such as + * a cursorObject */ + self->copysize = bufsize; + Py_INCREF(file); + self->copyfile = file; + + if (pq_execute(self, query, 0, 0, 0) >= 0) { + res = Py_None; + Py_INCREF(Py_None); + } + + Py_CLEAR(self->copyfile); + +exit: + if (quoted_table_name) { + PQfreemem(quoted_table_name); + } + PyMem_Free(columnlist); + PyMem_Free(quoted_delimiter); + PyMem_Free(quoted_null); + PyMem_Free(query); + + return res; +} + +/* extension: copy_to - implements COPY TO */ + +#define curs_copy_to_doc \ +"copy_to(file, table, sep='\\t', null='\\\\N', columns=None) -- Copy table to file." + +static PyObject * +curs_copy_to(cursorObject *self, PyObject *args, PyObject *kwargs) +{ + static char *kwlist[] = {"file", "table", "sep", "null", "columns", NULL}; + + const char *sep = "\t"; + const char *null = "\\N"; + const char *command = + "COPY %s%s TO stdout WITH DELIMITER AS %s NULL AS %s"; + + Py_ssize_t query_size; + char *query = NULL; + char *columnlist = NULL; + char *quoted_delimiter = NULL; + char *quoted_null = NULL; + + const char *table_name; + char *quoted_table_name = NULL; + PyObject *file = NULL, *columns = NULL, *res = NULL; + + if (!PyArg_ParseTupleAndKeywords( + args, kwargs, "Os|ssO", kwlist, + &file, &table_name, &sep, &null, &columns)) { + return NULL; + } + + if (!PyObject_HasAttrString(file, "write")) { + PyErr_SetString(PyExc_TypeError, + "argument 1 must have a .write() method"); + return NULL; + } + + EXC_IF_CURS_CLOSED(self); + EXC_IF_CURS_ASYNC(self, copy_to); + EXC_IF_GREEN(copy_to); + EXC_IF_TPC_PREPARED(self->conn, copy_to); + + if (!(quoted_table_name = psyco_escape_identifier( + self->conn, table_name, -1))) { + goto exit; + } + + if (!(columnlist = _psyco_curs_copy_columns(self, columns))) { + goto exit; + } + + if (!(quoted_delimiter = psyco_escape_string( + self->conn, sep, -1, NULL, NULL))) { + goto exit; + } + + if (!(quoted_null = psyco_escape_string( + self->conn, null, -1, NULL, NULL))) { + goto exit; + } + + query_size = strlen(command) + strlen(quoted_table_name) + strlen(columnlist) + + strlen(quoted_delimiter) + strlen(quoted_null) + 1; + if (!(query = PyMem_New(char, query_size))) { + PyErr_NoMemory(); + goto exit; + } + + PyOS_snprintf(query, query_size, command, + quoted_table_name, columnlist, quoted_delimiter, quoted_null); + + Dprintf("curs_copy_to: query = %s", query); + + Py_CLEAR(self->query); + if (!(self->query = Bytes_FromString(query))) { + goto exit; + } + + self->copysize = 0; + Py_INCREF(file); + self->copyfile = file; + + if (pq_execute(self, query, 0, 0, 0) >= 0) { + res = Py_None; + Py_INCREF(Py_None); + } + + Py_CLEAR(self->copyfile); + +exit: + if (quoted_table_name) { + PQfreemem(quoted_table_name); + } + PyMem_Free(columnlist); + PyMem_Free(quoted_delimiter); + PyMem_Free(quoted_null); + PyMem_Free(query); + + return res; +} + +/* extension: copy_expert - implements extended COPY FROM/TO + + This method supports both COPY FROM and COPY TO with user-specifiable + SQL statement, rather than composing the statement from parameters. +*/ + +#define curs_copy_expert_doc \ +"copy_expert(sql, file, size=8192) -- Submit a user-composed COPY statement.\n" \ +"`file` must be an open, readable file for COPY FROM or an open, writable\n" \ +"file for COPY TO. The optional `size` argument, when specified for a COPY\n" \ +"FROM statement, will be passed to file's read method to control the read\n" \ +"buffer size." + +static PyObject * +curs_copy_expert(cursorObject *self, PyObject *args, PyObject *kwargs) +{ + Py_ssize_t bufsize = DEFAULT_COPYBUFF; + PyObject *sql, *file, *res = NULL; + + static char *kwlist[] = {"sql", "file", "size", NULL}; + + if (!PyArg_ParseTupleAndKeywords(args, kwargs, + "OO|n", kwlist, &sql, &file, &bufsize)) + { return NULL; } + + EXC_IF_CURS_CLOSED(self); + EXC_IF_CURS_ASYNC(self, copy_expert); + EXC_IF_GREEN(copy_expert); + EXC_IF_TPC_PREPARED(self->conn, copy_expert); + + sql = curs_validate_sql_basic(self, sql); + + /* Any failure from here forward should 'goto exit' rather than + 'return NULL' directly. */ + + if (sql == NULL) { goto exit; } + + /* This validation of file is rather weak, in that it doesn't enforce the + association between "COPY FROM" -> "read" and "COPY TO" -> "write". + However, the error handling in _pq_copy_[in|out] must be able to handle + the case where the attempt to call file.read|write fails, so no harm + done. */ + + if ( !PyObject_HasAttrString(file, "read") + && !PyObject_HasAttrString(file, "write") + ) + { + PyErr_SetString(PyExc_TypeError, "file must be a readable file-like" + " object for COPY FROM; a writable file-like object for COPY TO." + ); + goto exit; + } + + self->copysize = bufsize; + Py_INCREF(file); + self->copyfile = file; + + Py_CLEAR(self->query); + Py_INCREF(sql); + self->query = sql; + + /* At this point, the SQL statement must be str, not unicode */ + if (pq_execute(self, Bytes_AS_STRING(sql), 0, 0, 0) >= 0) { + res = Py_None; + Py_INCREF(res); + } + + Py_CLEAR(self->copyfile); + +exit: + Py_XDECREF(sql); + + return res; +} + +/* extension: closed - return true if cursor is closed */ + +#define curs_closed_doc \ +"True if cursor is closed, False if cursor is open" + +static PyObject * +curs_closed_get(cursorObject *self, void *closure) +{ + return PyBool_FromLong(self->closed || (self->conn && self->conn->closed)); +} + +/* extension: withhold - get or set "WITH HOLD" for named cursors */ + +#define curs_withhold_doc \ +"Set or return cursor use of WITH HOLD" + +static PyObject * +curs_withhold_get(cursorObject *self) +{ + return PyBool_FromLong(self->withhold); +} + +RAISES_NEG int +curs_withhold_set(cursorObject *self, PyObject *pyvalue) +{ + int value; + + if (pyvalue != Py_False && self->name == NULL) { + PyErr_SetString(ProgrammingError, + "trying to set .withhold on unnamed cursor"); + return -1; + } + + if ((value = PyObject_IsTrue(pyvalue)) == -1) + return -1; + + self->withhold = value; + + return 0; +} + +#define curs_scrollable_doc \ +"Set or return cursor use of SCROLL" + +static PyObject * +curs_scrollable_get(cursorObject *self) +{ + PyObject *ret = NULL; + + switch (self->scrollable) { + case -1: + ret = Py_None; + break; + case 0: + ret = Py_False; + break; + case 1: + ret = Py_True; + break; + default: + PyErr_SetString(InternalError, "unexpected scrollable value"); + } + + Py_XINCREF(ret); + return ret; +} + +RAISES_NEG int +curs_scrollable_set(cursorObject *self, PyObject *pyvalue) +{ + int value; + + if (pyvalue != Py_None && self->name == NULL) { + PyErr_SetString(ProgrammingError, + "trying to set .scrollable on unnamed cursor"); + return -1; + } + + if (pyvalue == Py_None) { + value = -1; + } else if ((value = PyObject_IsTrue(pyvalue)) == -1) { + return -1; + } + + self->scrollable = value; + + return 0; +} + + +#define curs_pgresult_ptr_doc \ +"pgresult_ptr -- Get the PGresult structure pointer." + +static PyObject * +curs_pgresult_ptr_get(cursorObject *self) +{ + if (self->pgres) { + return PyLong_FromVoidPtr((void *)self->pgres); + } + else { + Py_RETURN_NONE; + } +} + + +/** the cursor object **/ + +/* iterator protocol */ + +static PyObject * +cursor_iter(PyObject *self) +{ + EXC_IF_CURS_CLOSED((cursorObject*)self); + Py_INCREF(self); + return self; +} + +static PyObject * +cursor_next(PyObject *self) +{ + PyObject *res; + + if (NULL == ((cursorObject*)self)->name) { + /* we don't parse arguments: curs_fetchone will do that for us */ + res = curs_fetchone((cursorObject*)self, NULL); + + /* convert a None to NULL to signal the end of iteration */ + if (res && res == Py_None) { + Py_DECREF(res); + res = NULL; + } + } + else { + res = curs_next_named((cursorObject*)self); + } + + return res; +} + +/* object method list */ + +static struct PyMethodDef cursorObject_methods[] = { + /* DBAPI-2.0 core */ + {"close", (PyCFunction)curs_close, + METH_NOARGS, curs_close_doc}, + {"execute", (PyCFunction)curs_execute, + METH_VARARGS|METH_KEYWORDS, curs_execute_doc}, + {"executemany", (PyCFunction)curs_executemany, + METH_VARARGS|METH_KEYWORDS, curs_executemany_doc}, + {"fetchone", (PyCFunction)curs_fetchone, + METH_NOARGS, curs_fetchone_doc}, + {"fetchmany", (PyCFunction)curs_fetchmany, + METH_VARARGS|METH_KEYWORDS, curs_fetchmany_doc}, + {"fetchall", (PyCFunction)curs_fetchall, + METH_NOARGS, curs_fetchall_doc}, + {"callproc", (PyCFunction)curs_callproc, + METH_VARARGS, curs_callproc_doc}, + {"nextset", (PyCFunction)curs_nextset, + METH_NOARGS, curs_nextset_doc}, + {"setinputsizes", (PyCFunction)curs_setinputsizes, + METH_VARARGS, curs_setinputsizes_doc}, + {"setoutputsize", (PyCFunction)curs_setoutputsize, + METH_VARARGS, curs_setoutputsize_doc}, + /* DBAPI-2.0 extensions */ + {"scroll", (PyCFunction)curs_scroll, + METH_VARARGS|METH_KEYWORDS, curs_scroll_doc}, + {"__enter__", (PyCFunction)curs_enter, + METH_NOARGS, curs_enter_doc}, + {"__exit__", (PyCFunction)curs_exit, + METH_VARARGS, curs_exit_doc}, + /* psycopg extensions */ + {"cast", (PyCFunction)curs_cast, + METH_VARARGS, curs_cast_doc}, + {"mogrify", (PyCFunction)curs_mogrify, + METH_VARARGS|METH_KEYWORDS, curs_mogrify_doc}, + {"copy_from", (PyCFunction)curs_copy_from, + METH_VARARGS|METH_KEYWORDS, curs_copy_from_doc}, + {"copy_to", (PyCFunction)curs_copy_to, + METH_VARARGS|METH_KEYWORDS, curs_copy_to_doc}, + {"copy_expert", (PyCFunction)curs_copy_expert, + METH_VARARGS|METH_KEYWORDS, curs_copy_expert_doc}, + {NULL} +}; + +/* object member list */ + +#define OFFSETOF(x) offsetof(cursorObject, x) + +static struct PyMemberDef cursorObject_members[] = { + /* DBAPI-2.0 basics */ + {"rowcount", T_LONG, OFFSETOF(rowcount), READONLY, + "Number of rows read from the backend in the last command."}, + {"arraysize", T_LONG, OFFSETOF(arraysize), 0, + "Number of records `fetchmany()` must fetch if not explicitly " \ + "specified."}, + {"itersize", T_LONG, OFFSETOF(itersize), 0, + "Number of records ``iter(cur)`` must fetch per network roundtrip."}, + {"description", T_OBJECT, OFFSETOF(description), READONLY, + "Cursor description as defined in DBAPI-2.0."}, + {"lastrowid", T_OID, OFFSETOF(lastoid), READONLY, + "The ``oid`` of the last row inserted by the cursor."}, + /* DBAPI-2.0 extensions */ + {"rownumber", T_LONG, OFFSETOF(row), READONLY, + "The current row position."}, + {"connection", T_OBJECT, OFFSETOF(conn), READONLY, + "The connection where the cursor comes from."}, + {"name", T_STRING, OFFSETOF(name), READONLY}, + {"statusmessage", T_OBJECT, OFFSETOF(pgstatus), READONLY, + "The return message of the last command."}, + {"query", T_OBJECT, OFFSETOF(query), READONLY, + "The last query text sent to the backend."}, + {"row_factory", T_OBJECT, OFFSETOF(tuple_factory), 0}, + {"tzinfo_factory", T_OBJECT, OFFSETOF(tzinfo_factory), 0}, + {"typecaster", T_OBJECT, OFFSETOF(caster), READONLY}, + {"string_types", T_OBJECT, OFFSETOF(string_types), 0}, + {"binary_types", T_OBJECT, OFFSETOF(binary_types), 0}, + {NULL} +}; + +/* object calculated member list */ +static struct PyGetSetDef cursorObject_getsets[] = { + { "closed", (getter)curs_closed_get, NULL, + curs_closed_doc, NULL }, + { "withhold", + (getter)curs_withhold_get, + (setter)curs_withhold_set, + curs_withhold_doc, NULL }, + { "scrollable", + (getter)curs_scrollable_get, + (setter)curs_scrollable_set, + curs_scrollable_doc, NULL }, + { "pgresult_ptr", + (getter)curs_pgresult_ptr_get, NULL, + curs_pgresult_ptr_doc, NULL }, + {NULL} +}; + +/* initialization and finalization methods */ + +static int +cursor_setup(cursorObject *self, connectionObject *conn, const char *name) +{ + Dprintf("cursor_setup: init cursor object at %p", self); + Dprintf("cursor_setup: parameters: name = %s, conn = %p", name, conn); + + if (name) { + if (0 > psyco_strdup(&self->name, name, -1)) { + return -1; + } + if (!(self->qname = psyco_escape_identifier(conn, name, -1))) { + return -1; + } + } + + /* FIXME: why does this raise an exception on the _next_ line of code? + if (PyObject_IsInstance((PyObject*)conn, + (PyObject *)&connectionType) == 0) { + PyErr_SetString(PyExc_TypeError, + "argument 1 must be subclass of psycopg2.extensions.connection"); + return -1; + } */ + Py_INCREF(conn); + self->conn = conn; + + self->mark = conn->mark; + self->notuples = 1; + self->arraysize = 1; + self->itersize = 2000; + self->rowcount = -1; + self->lastoid = InvalidOid; + + Py_INCREF(Py_None); + self->tuple_factory = Py_None; + + /* default tzinfo factory */ + { + /* The datetime api doesn't seem to have a constructor to make a + * datetime.timezone, so use the Python interface. */ + PyObject *m = NULL; + if ((m = PyImport_ImportModule("datetime"))) { + self->tzinfo_factory = PyObject_GetAttrString(m, "timezone"); + Py_DECREF(m); + } + if (!self->tzinfo_factory) { + return -1; + } + } + + Dprintf("cursor_setup: good cursor object at %p, refcnt = " + FORMAT_CODE_PY_SSIZE_T, + self, Py_REFCNT(self) + ); + return 0; +} + +static int +cursor_clear(cursorObject *self) +{ + Py_CLEAR(self->conn); + Py_CLEAR(self->description); + Py_CLEAR(self->pgstatus); + Py_CLEAR(self->casts); + Py_CLEAR(self->caster); + Py_CLEAR(self->copyfile); + Py_CLEAR(self->tuple_factory); + Py_CLEAR(self->tzinfo_factory); + Py_CLEAR(self->query); + Py_CLEAR(self->string_types); + Py_CLEAR(self->binary_types); + return 0; +} + +static void +cursor_dealloc(PyObject* obj) +{ + cursorObject *self = (cursorObject *)obj; + + PyObject_GC_UnTrack(self); + + if (self->weakreflist) { + PyObject_ClearWeakRefs(obj); + } + + cursor_clear(self); + + PyMem_Free(self->name); + PQfreemem(self->qname); + + CLEARPGRES(self->pgres); + + Dprintf("cursor_dealloc: deleted cursor object at %p, refcnt = " + FORMAT_CODE_PY_SSIZE_T, + obj, Py_REFCNT(obj)); + + Py_TYPE(obj)->tp_free(obj); +} + +static int +cursor_init(PyObject *obj, PyObject *args, PyObject *kwargs) +{ + PyObject *conn; + PyObject *name = Py_None; + PyObject *bname = NULL; + const char *cname = NULL; + int rv = -1; + + static char *kwlist[] = {"conn", "name", NULL}; + + if (!PyArg_ParseTupleAndKeywords(args, kwargs, "O!|O", kwlist, + &connectionType, &conn, &name)) { + goto exit; + } + + if (name != Py_None) { + Py_INCREF(name); /* for ensure_bytes */ + if (!(bname = psyco_ensure_bytes(name))) { + /* name has had a ref stolen */ + goto exit; + } + + if (!(cname = Bytes_AsString(bname))) { + goto exit; + } + } + + rv = cursor_setup((cursorObject *)obj, (connectionObject *)conn, cname); + +exit: + Py_XDECREF(bname); + return rv; +} + +static PyObject * +cursor_new(PyTypeObject *type, PyObject *args, PyObject *kwds) +{ + return type->tp_alloc(type, 0); +} + +static PyObject * +cursor_repr(cursorObject *self) +{ + return PyString_FromFormat( + "", self, self->closed); +} + +static int +cursor_traverse(cursorObject *self, visitproc visit, void *arg) +{ + Py_VISIT((PyObject *)self->conn); + Py_VISIT(self->description); + Py_VISIT(self->pgstatus); + Py_VISIT(self->casts); + Py_VISIT(self->caster); + Py_VISIT(self->copyfile); + Py_VISIT(self->tuple_factory); + Py_VISIT(self->tzinfo_factory); + Py_VISIT(self->query); + Py_VISIT(self->string_types); + Py_VISIT(self->binary_types); + return 0; +} + + +/* object type */ + +#define cursorType_doc \ +"A database cursor." + +PyTypeObject cursorType = { + PyVarObject_HEAD_INIT(NULL, 0) + "psycopg2.extensions.cursor", + sizeof(cursorObject), 0, + cursor_dealloc, /*tp_dealloc*/ + 0, /*tp_print*/ + 0, /*tp_getattr*/ + 0, /*tp_setattr*/ + 0, /*tp_compare*/ + (reprfunc)cursor_repr, /*tp_repr*/ + 0, /*tp_as_number*/ + 0, /*tp_as_sequence*/ + 0, /*tp_as_mapping*/ + 0, /*tp_hash */ + 0, /*tp_call*/ + (reprfunc)cursor_repr, /*tp_str*/ + 0, /*tp_getattro*/ + 0, /*tp_setattro*/ + 0, /*tp_as_buffer*/ + Py_TPFLAGS_DEFAULT | Py_TPFLAGS_BASETYPE | Py_TPFLAGS_HAVE_ITER | + Py_TPFLAGS_HAVE_GC | Py_TPFLAGS_HAVE_WEAKREFS , + /*tp_flags*/ + cursorType_doc, /*tp_doc*/ + (traverseproc)cursor_traverse, /*tp_traverse*/ + (inquiry)cursor_clear, /*tp_clear*/ + 0, /*tp_richcompare*/ + offsetof(cursorObject, weakreflist), /*tp_weaklistoffset*/ + cursor_iter, /*tp_iter*/ + cursor_next, /*tp_iternext*/ + cursorObject_methods, /*tp_methods*/ + cursorObject_members, /*tp_members*/ + cursorObject_getsets, /*tp_getset*/ + 0, /*tp_base*/ + 0, /*tp_dict*/ + 0, /*tp_descr_get*/ + 0, /*tp_descr_set*/ + 0, /*tp_dictoffset*/ + cursor_init, /*tp_init*/ + 0, /*tp_alloc*/ + cursor_new, /*tp_new*/ +}; diff --git a/psycopg/diagnostics.h b/psycopg/diagnostics.h new file mode 100644 index 0000000000000000000000000000000000000000..2e2858dd5840c4014dd1a525155746b2d97c4d81 --- /dev/null +++ b/psycopg/diagnostics.h @@ -0,0 +1,41 @@ +/* diagnostics.c - definition for the psycopg Diagnostics type + * + * Copyright (C) 2013-2019 Matthew Woodcraft + * Copyright (C) 2020-2021 The Psycopg Team + * + * This file is part of psycopg. + * + * psycopg2 is free software: you can redistribute it and/or modify it + * under the terms of the GNU Lesser General Public License as published + * by the Free Software Foundation, either version 3 of the License, or + * (at your option) any later version. + * + * In addition, as a special exception, the copyright holders give + * permission to link this program with the OpenSSL library (or with + * modified versions of OpenSSL that use the same license as OpenSSL), + * and distribute linked combinations including the two. + * + * You must obey the GNU Lesser General Public License in all respects for + * all of the code used other than OpenSSL. + * + * psycopg2 is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public + * License for more details. + */ + +#ifndef PSYCOPG_DIAGNOSTICS_H +#define PSYCOPG_DIAGNOSTICS_H 1 + +#include "psycopg/error.h" + +extern HIDDEN PyTypeObject diagnosticsType; + +typedef struct { + PyObject_HEAD + + errorObject *err; /* exception to retrieve the diagnostics from */ + +} diagnosticsObject; + +#endif /* PSYCOPG_DIAGNOSTICS_H */ diff --git a/psycopg/diagnostics_type.c b/psycopg/diagnostics_type.c new file mode 100644 index 0000000000000000000000000000000000000000..a46e7d887c1447c398c4576bdfb51b90bbb51580 --- /dev/null +++ b/psycopg/diagnostics_type.c @@ -0,0 +1,208 @@ +/* diagnostics.c - present information from libpq error responses + * + * Copyright (C) 2013-2019 Matthew Woodcraft + * Copyright (C) 2020-2021 The Psycopg Team + * + * This file is part of psycopg. + * + * psycopg2 is free software: you can redistribute it and/or modify it + * under the terms of the GNU Lesser General Public License as published + * by the Free Software Foundation, either version 3 of the License, or + * (at your option) any later version. + * + * In addition, as a special exception, the copyright holders give + * permission to link this program with the OpenSSL library (or with + * modified versions of OpenSSL that use the same license as OpenSSL), + * and distribute linked combinations including the two. + * + * You must obey the GNU Lesser General Public License in all respects for + * all of the code used other than OpenSSL. + * + * psycopg2 is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public + * License for more details. + */ + +#define PSYCOPG_MODULE +#include "psycopg/psycopg.h" + +#include "psycopg/diagnostics.h" +#include "psycopg/error.h" + + +/* These constants are defined in src/include/postgres_ext.h but some may not + * be available with the libpq we currently support at compile time. */ + +/* Available from PG 9.3 */ +#ifndef PG_DIAG_SCHEMA_NAME +#define PG_DIAG_SCHEMA_NAME 's' +#endif +#ifndef PG_DIAG_TABLE_NAME +#define PG_DIAG_TABLE_NAME 't' +#endif +#ifndef PG_DIAG_COLUMN_NAME +#define PG_DIAG_COLUMN_NAME 'c' +#endif +#ifndef PG_DIAG_DATATYPE_NAME +#define PG_DIAG_DATATYPE_NAME 'd' +#endif +#ifndef PG_DIAG_CONSTRAINT_NAME +#define PG_DIAG_CONSTRAINT_NAME 'n' +#endif + +/* Available from PG 9.6 */ +#ifndef PG_DIAG_SEVERITY_NONLOCALIZED +#define PG_DIAG_SEVERITY_NONLOCALIZED 'V' +#endif + + +/* Retrieve an error string from the exception's cursor. + * + * If the cursor or its result isn't available, return None. + */ +static PyObject * +diagnostics_get_field(diagnosticsObject *self, void *closure) +{ + const char *errortext; + + if (!self->err->pgres) { + Py_RETURN_NONE; + } + + errortext = PQresultErrorField(self->err->pgres, (int)(Py_intptr_t)closure); + return error_text_from_chars(self->err, errortext); +} + + +/* object calculated member list */ +static struct PyGetSetDef diagnosticsObject_getsets[] = { + { "severity", (getter)diagnostics_get_field, NULL, + NULL, (void*) PG_DIAG_SEVERITY }, + { "severity_nonlocalized", (getter)diagnostics_get_field, NULL, + NULL, (void*) PG_DIAG_SEVERITY_NONLOCALIZED }, + { "sqlstate", (getter)diagnostics_get_field, NULL, + NULL, (void*) PG_DIAG_SQLSTATE }, + { "message_primary", (getter)diagnostics_get_field, NULL, + NULL, (void*) PG_DIAG_MESSAGE_PRIMARY }, + { "message_detail", (getter)diagnostics_get_field, NULL, + NULL, (void*) PG_DIAG_MESSAGE_DETAIL }, + { "message_hint", (getter)diagnostics_get_field, NULL, + NULL, (void*) PG_DIAG_MESSAGE_HINT }, + { "statement_position", (getter)diagnostics_get_field, NULL, + NULL, (void*) PG_DIAG_STATEMENT_POSITION }, + { "internal_position", (getter)diagnostics_get_field, NULL, + NULL, (void*) PG_DIAG_INTERNAL_POSITION }, + { "internal_query", (getter)diagnostics_get_field, NULL, + NULL, (void*) PG_DIAG_INTERNAL_QUERY }, + { "context", (getter)diagnostics_get_field, NULL, + NULL, (void*) PG_DIAG_CONTEXT }, + { "schema_name", (getter)diagnostics_get_field, NULL, + NULL, (void*) PG_DIAG_SCHEMA_NAME }, + { "table_name", (getter)diagnostics_get_field, NULL, + NULL, (void*) PG_DIAG_TABLE_NAME }, + { "column_name", (getter)diagnostics_get_field, NULL, + NULL, (void*) PG_DIAG_COLUMN_NAME }, + { "datatype_name", (getter)diagnostics_get_field, NULL, + NULL, (void*) PG_DIAG_DATATYPE_NAME }, + { "constraint_name", (getter)diagnostics_get_field, NULL, + NULL, (void*) PG_DIAG_CONSTRAINT_NAME }, + { "source_file", (getter)diagnostics_get_field, NULL, + NULL, (void*) PG_DIAG_SOURCE_FILE }, + { "source_line", (getter)diagnostics_get_field, NULL, + NULL, (void*) PG_DIAG_SOURCE_LINE }, + { "source_function", (getter)diagnostics_get_field, NULL, + NULL, (void*) PG_DIAG_SOURCE_FUNCTION }, + {NULL} +}; + +/* initialization and finalization methods */ + +static PyObject * +diagnostics_new(PyTypeObject *type, PyObject *args, PyObject *kwds) +{ + return type->tp_alloc(type, 0); +} + +static int +diagnostics_init(diagnosticsObject *self, PyObject *args, PyObject *kwds) +{ + PyObject *err = NULL; + + if (!PyArg_ParseTuple(args, "O", &err)) + return -1; + + if (!PyObject_TypeCheck(err, &errorType)) { + PyErr_SetString(PyExc_TypeError, + "The argument must be a psycopg2.Error"); + return -1; + } + + Py_INCREF(err); + self->err = (errorObject *)err; + return 0; +} + +static void +diagnostics_dealloc(diagnosticsObject* self) +{ + Py_CLEAR(self->err); + Py_TYPE(self)->tp_free((PyObject *)self); +} + + +/* object type */ + +static const char diagnosticsType_doc[] = + "Details from a database error report.\n\n" + "The object is returned by the `~psycopg2.Error.diag` attribute of the\n" + "`!Error` object.\n" + "All the information available from the |PQresultErrorField|_ function\n" + "are exposed as attributes by the object, e.g. the `!severity` attribute\n" + "returns the `!PG_DIAG_SEVERITY` code. " + "Please refer to the `PostgreSQL documentation`__ for the meaning of all" + " the attributes.\n\n" + ".. |PQresultErrorField| replace:: `!PQresultErrorField()`\n" + ".. _PQresultErrorField: https://www.postgresql.org/docs/current/static/" + "libpq-exec.html#LIBPQ-PQRESULTERRORFIELD\n" + ".. __: PQresultErrorField_\n"; + +PyTypeObject diagnosticsType = { + PyVarObject_HEAD_INIT(NULL, 0) + "psycopg2.extensions.Diagnostics", + sizeof(diagnosticsObject), 0, + (destructor)diagnostics_dealloc, /*tp_dealloc*/ + 0, /*tp_print*/ + 0, /*tp_getattr*/ + 0, /*tp_setattr*/ + 0, /*tp_compare*/ + 0, /*tp_repr*/ + 0, /*tp_as_number*/ + 0, /*tp_as_sequence*/ + 0, /*tp_as_mapping*/ + 0, /*tp_hash */ + 0, /*tp_call*/ + 0, /*tp_str*/ + 0, /*tp_getattro*/ + 0, /*tp_setattro*/ + 0, /*tp_as_buffer*/ + Py_TPFLAGS_DEFAULT|Py_TPFLAGS_BASETYPE, /*tp_flags*/ + diagnosticsType_doc, /*tp_doc*/ + 0, /*tp_traverse*/ + 0, /*tp_clear*/ + 0, /*tp_richcompare*/ + 0, /*tp_weaklistoffset*/ + 0, /*tp_iter*/ + 0, /*tp_iternext*/ + 0, /*tp_methods*/ + 0, /*tp_members*/ + diagnosticsObject_getsets, /*tp_getset*/ + 0, /*tp_base*/ + 0, /*tp_dict*/ + 0, /*tp_descr_get*/ + 0, /*tp_descr_set*/ + 0, /*tp_dictoffset*/ + (initproc)diagnostics_init, /*tp_init*/ + 0, /*tp_alloc*/ + diagnostics_new, /*tp_new*/ +}; diff --git a/psycopg/error.h b/psycopg/error.h new file mode 100644 index 0000000000000000000000000000000000000000..33128995bec9ba6ce6b4fe2198577d3e65f550be --- /dev/null +++ b/psycopg/error.h @@ -0,0 +1,46 @@ +/* error.h - definition for the psycopg base Error type + * + * Copyright (C) 2013-2019 Daniele Varrazzo + * Copyright (C) 2020-2021 The Psycopg Team + * + * This file is part of psycopg. + * + * psycopg2 is free software: you can redistribute it and/or modify it + * under the terms of the GNU Lesser General Public License as published + * by the Free Software Foundation, either version 3 of the License, or + * (at your option) any later version. + * + * In addition, as a special exception, the copyright holders give + * permission to link this program with the OpenSSL library (or with + * modified versions of OpenSSL that use the same license as OpenSSL), + * and distribute linked combinations including the two. + * + * You must obey the GNU Lesser General Public License in all respects for + * all of the code used other than OpenSSL. + * + * psycopg2 is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public + * License for more details. + */ + +#ifndef PSYCOPG_ERROR_H +#define PSYCOPG_ERROR_H 1 + +extern HIDDEN PyTypeObject errorType; + +typedef struct { + PyBaseExceptionObject exc; + + PyObject *pgerror; + PyObject *pgcode; + cursorObject *cursor; + PyObject *pydecoder; + PGresult *pgres; +} errorObject; + +HIDDEN PyObject *error_text_from_chars(errorObject *self, const char *str); +HIDDEN BORROWED PyObject *exception_from_sqlstate(const char *sqlstate); +HIDDEN BORROWED PyObject *base_exception_from_sqlstate(const char *sqlstate); + +#endif /* PSYCOPG_ERROR_H */ diff --git a/psycopg/error_type.c b/psycopg/error_type.c new file mode 100644 index 0000000000000000000000000000000000000000..5fd96e2483da93f1f0006fdc547271f1fe7fda42 --- /dev/null +++ b/psycopg/error_type.c @@ -0,0 +1,376 @@ +/* error_type.c - python interface to the Error objects + * + * Copyright (C) 2013-2019 Daniele Varrazzo + * Copyright (C) 2020-2021 The Psycopg Team + * + * This file is part of psycopg. + * + * psycopg2 is free software: you can redistribute it and/or modify it + * under the terms of the GNU Lesser General Public License as published + * by the Free Software Foundation, either version 3 of the License, or + * (at your option) any later version. + * + * In addition, as a special exception, the copyright holders give + * permission to link this program with the OpenSSL library (or with + * modified versions of OpenSSL that use the same license as OpenSSL), + * and distribute linked combinations including the two. + * + * You must obey the GNU Lesser General Public License in all respects for + * all of the code used other than OpenSSL. + * + * psycopg2 is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public + * License for more details. + */ + +#define PSYCOPG_MODULE +#include "psycopg/psycopg.h" + +#include "psycopg/error.h" +#include "psycopg/diagnostics.h" +#include "psycopg/pqpath.h" + + +PyObject * +error_text_from_chars(errorObject *self, const char *str) +{ + return psyco_text_from_chars_safe(str, -1, self->pydecoder); +} + + +/* Return the Python exception corresponding to an SQLSTATE error + * code. A list of error codes can be found at: + * https://www.postgresql.org/docs/current/static/errcodes-appendix.html + */ +BORROWED PyObject * +exception_from_sqlstate(const char *sqlstate) +{ + PyObject *exc; + + /* First look up an exception of the proper class */ + exc = PyDict_GetItemString(sqlstate_errors, sqlstate); + if (exc) { + return exc; + } + else { + PyErr_Clear(); + return base_exception_from_sqlstate(sqlstate); + } +} + +BORROWED PyObject * +base_exception_from_sqlstate(const char *sqlstate) +{ + switch (sqlstate[0]) { + case '0': + switch (sqlstate[1]) { + case '8': /* Class 08 - Connection Exception */ + return OperationalError; + case 'A': /* Class 0A - Feature Not Supported */ + return NotSupportedError; + } + break; + case '2': + switch (sqlstate[1]) { + case '0': /* Class 20 - Case Not Found */ + case '1': /* Class 21 - Cardinality Violation */ + return ProgrammingError; + case '2': /* Class 22 - Data Exception */ + return DataError; + case '3': /* Class 23 - Integrity Constraint Violation */ + return IntegrityError; + case '4': /* Class 24 - Invalid Cursor State */ + case '5': /* Class 25 - Invalid Transaction State */ + return InternalError; + case '6': /* Class 26 - Invalid SQL Statement Name */ + case '7': /* Class 27 - Triggered Data Change Violation */ + case '8': /* Class 28 - Invalid Authorization Specification */ + return OperationalError; + case 'B': /* Class 2B - Dependent Privilege Descriptors Still Exist */ + case 'D': /* Class 2D - Invalid Transaction Termination */ + case 'F': /* Class 2F - SQL Routine Exception */ + return InternalError; + } + break; + case '3': + switch (sqlstate[1]) { + case '4': /* Class 34 - Invalid Cursor Name */ + return OperationalError; + case '8': /* Class 38 - External Routine Exception */ + case '9': /* Class 39 - External Routine Invocation Exception */ + case 'B': /* Class 3B - Savepoint Exception */ + return InternalError; + case 'D': /* Class 3D - Invalid Catalog Name */ + case 'F': /* Class 3F - Invalid Schema Name */ + return ProgrammingError; + } + break; + case '4': + switch (sqlstate[1]) { + case '0': /* Class 40 - Transaction Rollback */ + return TransactionRollbackError; + case '2': /* Class 42 - Syntax Error or Access Rule Violation */ + case '4': /* Class 44 - WITH CHECK OPTION Violation */ + return ProgrammingError; + } + break; + case '5': + /* Class 53 - Insufficient Resources + Class 54 - Program Limit Exceeded + Class 55 - Object Not In Prerequisite State + Class 57 - Operator Intervention + Class 58 - System Error (errors external to PostgreSQL itself) */ + if (!strcmp(sqlstate, "57014")) + return QueryCanceledError; + else + return OperationalError; + case 'F': /* Class F0 - Configuration File Error */ + return InternalError; + case 'H': /* Class HV - Foreign Data Wrapper Error (SQL/MED) */ + return OperationalError; + case 'P': /* Class P0 - PL/pgSQL Error */ + return InternalError; + case 'X': /* Class XX - Internal Error */ + return InternalError; + } + /* return DatabaseError as a fallback */ + return DatabaseError; +} + + +static const char pgerror_doc[] = + "The error message returned by the backend, if available, else None"; + +static const char pgcode_doc[] = + "The error code returned by the backend, if available, else None"; + +static const char cursor_doc[] = + "The cursor that raised the exception, if available, else None"; + +static const char diag_doc[] = + "A Diagnostics object to get further information about the error"; + +static PyMemberDef error_members[] = { + { "pgerror", T_OBJECT, offsetof(errorObject, pgerror), + READONLY, (char *)pgerror_doc }, + { "pgcode", T_OBJECT, offsetof(errorObject, pgcode), + READONLY, (char *)pgcode_doc }, + { "cursor", T_OBJECT, offsetof(errorObject, cursor), + READONLY, (char *)cursor_doc }, + { NULL } +}; + +static PyObject * +error_new(PyTypeObject *type, PyObject *args, PyObject *kwargs) +{ + return ((PyTypeObject *)PyExc_StandardError)->tp_new( + type, args, kwargs); +} + +static int +error_init(errorObject *self, PyObject *args, PyObject *kwargs) +{ + if (((PyTypeObject *)PyExc_StandardError)->tp_init( + (PyObject *)self, args, kwargs) < 0) { + return -1; + } + return 0; +} + +static int +error_traverse(errorObject *self, visitproc visit, void *arg) +{ + Py_VISIT(self->pgerror); + Py_VISIT(self->pgcode); + Py_VISIT(self->cursor); + Py_VISIT(self->pydecoder); + + return ((PyTypeObject *)PyExc_StandardError)->tp_traverse( + (PyObject *)self, visit, arg); +} + +static int +error_clear(errorObject *self) +{ + Py_CLEAR(self->pgerror); + Py_CLEAR(self->pgcode); + Py_CLEAR(self->cursor); + Py_CLEAR(self->pydecoder); + + return ((PyTypeObject *)PyExc_StandardError)->tp_clear((PyObject *)self); +} + +static void +error_dealloc(errorObject *self) +{ + PyObject_GC_UnTrack((PyObject *)self); + error_clear(self); + CLEARPGRES(self->pgres); + + Py_TYPE(self)->tp_free((PyObject *)self); +} + + +static PyObject * +error_get_diag(errorObject *self, void *closure) +{ + return PyObject_CallFunctionObjArgs( + (PyObject *)&diagnosticsType, (PyObject *)self, NULL); +} + +static struct PyGetSetDef error_getsets[] = { + { "diag", (getter)error_get_diag, NULL, (char *)diag_doc }, + { NULL } +}; + + +/* Error.__reduce__ + * + * The method is required to make exceptions picklable: set the cursor + * attribute to None. Only working from Py 2.5: previous versions + * would require implementing __getstate__, and as of 2012 it's a little + * bit too late to care. */ +static PyObject * +error_reduce(errorObject *self, PyObject *dummy) +{ + PyObject *meth = NULL; + PyObject *tuple = NULL; + PyObject *dict = NULL; + PyObject *rv = NULL; + + if (!(meth = PyObject_GetAttrString(PyExc_StandardError, "__reduce__"))) { + goto error; + } + if (!(tuple = PyObject_CallFunctionObjArgs(meth, self, NULL))) { + goto error; + } + + /* tuple is (type, args): convert it to (type, args, dict) + * with our extra items in the dict. + * + * If these checks fail, we can still return a valid object. Pickle + * will likely fail downstream, but there's nothing else we can do here */ + if (!PyTuple_Check(tuple)) { goto exit; } + if (2 != PyTuple_GET_SIZE(tuple)) { goto exit; } + + if (!(dict = PyDict_New())) { goto error; } + if (self->pgerror) { + if (0 != PyDict_SetItemString(dict, "pgerror", self->pgerror)) { + goto error; + } + } + if (self->pgcode) { + if (0 != PyDict_SetItemString(dict, "pgcode", self->pgcode)) { + goto error; + } + } + + { + PyObject *newtuple; + if (!(newtuple = PyTuple_Pack(3, + PyTuple_GET_ITEM(tuple, 0), + PyTuple_GET_ITEM(tuple, 1), + dict))) { + goto error; + } + Py_DECREF(tuple); + tuple = newtuple; + } + +exit: + rv = tuple; + tuple = NULL; + +error: + Py_XDECREF(dict); + Py_XDECREF(tuple); + Py_XDECREF(meth); + + return rv; +} + +PyObject * +error_setstate(errorObject *self, PyObject *state) +{ + PyObject *rv = NULL; + + /* we don't call the StandartError's setstate as it would try to load the + * dict content as attributes */ + + if (state == Py_None) { + goto exit; + } + if (!PyDict_Check(state)) { + PyErr_SetString(PyExc_TypeError, "state is not a dictionary"); + goto error; + } + + /* load the dict content in the structure */ + Py_CLEAR(self->pgerror); + self->pgerror = PyDict_GetItemString(state, "pgerror"); + Py_XINCREF(self->pgerror); + + Py_CLEAR(self->pgcode); + self->pgcode = PyDict_GetItemString(state, "pgcode"); + Py_XINCREF(self->pgcode); + + Py_CLEAR(self->cursor); + /* We never expect a cursor in the state as it's not picklable. + * at most there could be a None here, coming from Psycopg < 2.5 */ + +exit: + rv = Py_None; + Py_INCREF(rv); + +error: + return rv; +} + +static PyMethodDef error_methods[] = { + /* Make Error and all its subclasses picklable. */ + {"__reduce__", (PyCFunction)error_reduce, METH_NOARGS }, + {"__setstate__", (PyCFunction)error_setstate, METH_O }, + {NULL} +}; + + +PyTypeObject errorType = { + PyVarObject_HEAD_INIT(NULL, 0) + "psycopg2.Error", + sizeof(errorObject), 0, + (destructor)error_dealloc, /* tp_dealloc */ + 0, /*tp_print*/ + 0, /*tp_getattr*/ + 0, /*tp_setattr*/ + 0, /*tp_compare*/ + 0, /*tp_repr*/ + 0, /*tp_as_number*/ + 0, /*tp_as_sequence*/ + 0, /*tp_as_mapping*/ + 0, /*tp_hash */ + 0, /*tp_call*/ + 0, /*tp_str*/ + 0, /*tp_getattro*/ + 0, /*tp_setattro*/ + 0, /*tp_as_buffer*/ + Py_TPFLAGS_DEFAULT|Py_TPFLAGS_BASETYPE|Py_TPFLAGS_HAVE_GC, /*tp_flags*/ + Error_doc, /*tp_doc*/ + (traverseproc)error_traverse, /*tp_traverse*/ + (inquiry)error_clear, /*tp_clear*/ + 0, /*tp_richcompare*/ + 0, /*tp_weaklistoffset*/ + 0, /*tp_iter*/ + 0, /*tp_iternext*/ + error_methods, /*tp_methods*/ + error_members, /*tp_members*/ + error_getsets, /*tp_getset*/ + 0, /*tp_base Will be set to StandardError in module init */ + 0, /*tp_dict*/ + 0, /*tp_descr_get*/ + 0, /*tp_descr_set*/ + 0, /*tp_dictoffset*/ + (initproc)error_init, /*tp_init*/ + 0, /*tp_alloc*/ + error_new, /*tp_new*/ +}; diff --git a/psycopg/green.c b/psycopg/green.c new file mode 100644 index 0000000000000000000000000000000000000000..9de05e7a6f9bc547416476a746a5e7bb0d5d8874 --- /dev/null +++ b/psycopg/green.c @@ -0,0 +1,210 @@ +/* green.c - cooperation with coroutine libraries. + * + * Copyright (C) 2010-2019 Daniele Varrazzo + * Copyright (C) 2020-2021 The Psycopg Team + * + * This file is part of psycopg. + * + * psycopg2 is free software: you can redistribute it and/or modify it + * under the terms of the GNU Lesser General Public License as published + * by the Free Software Foundation, either version 3 of the License, or + * (at your option) any later version. + * + * In addition, as a special exception, the copyright holders give + * permission to link this program with the OpenSSL library (or with + * modified versions of OpenSSL that use the same license as OpenSSL), + * and distribute linked combinations including the two. + * + * You must obey the GNU Lesser General Public License in all respects for + * all of the code used other than OpenSSL. + * + * psycopg2 is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public + * License for more details. + */ + +#define PSYCOPG_MODULE +#include "psycopg/psycopg.h" + +#include "psycopg/green.h" +#include "psycopg/connection.h" +#include "psycopg/pqpath.h" + + +HIDDEN PyObject *wait_callback = NULL; + +static PyObject *have_wait_callback(void); +static void green_panic(connectionObject *conn); + +/* Register a callback function to block waiting for data. + * + * The function is exported by the _psycopg module. + */ +PyObject * +psyco_set_wait_callback(PyObject *self, PyObject *obj) +{ + Py_XDECREF(wait_callback); + + if (obj != Py_None) { + wait_callback = obj; + Py_INCREF(obj); + } + else { + wait_callback = NULL; + } + + Py_RETURN_NONE; +} + + +/* Return the currently registered wait callback function. + * + * The function is exported by the _psycopg module. + */ +PyObject * +psyco_get_wait_callback(PyObject *self, PyObject *obj) +{ + PyObject *ret; + + ret = wait_callback; + if (!ret) { + ret = Py_None; + } + + Py_INCREF(ret); + return ret; +} + + +/* Return nonzero if a wait callback should be called. */ +int +psyco_green() +{ + return (NULL != wait_callback); +} + +/* Return the wait callback if available. + * + * If not available, set a Python exception and return. + * + * The function returns a new reference: decref after use. + */ +static PyObject * +have_wait_callback() +{ + PyObject *cb; + + cb = wait_callback; + if (!cb) { + PyErr_SetString(OperationalError, "wait callback not available"); + return NULL; + } + Py_INCREF(cb); + return cb; +} + +/* Block waiting for data available in an async connection. + * + * This function assumes `wait_callback` to be available: + * raise `InterfaceError` if it is not. Use `psyco_green()` to check if + * the function is to be called. + * + * Return 0 on success, else nonzero and set a Python exception. + */ +int +psyco_wait(connectionObject *conn) +{ + PyObject *rv; + PyObject *cb; + + Dprintf("psyco_wait"); + if (!(cb = have_wait_callback())) { + return -1; + } + + rv = PyObject_CallFunctionObjArgs(cb, conn, NULL); + Py_DECREF(cb); + + if (NULL != rv) { + Py_DECREF(rv); + return 0; + } else { + Dprintf("psyco_wait: error in wait callback"); + return -1; + } +} + +/* Replacement for PQexec using the user-provided wait function. + * + * The function should be called helding the connection lock, and + * the GIL because some Python code is expected to be called. + * + * If PGresult is NULL, there may have been either a libpq error + * or an exception raised by Python code: before raising an exception + * check if there is already one using `PyErr_Occurred()` */ +PGresult * +psyco_exec_green(connectionObject *conn, const char *command) +{ + PGresult *result = NULL; + + /* Check that there is a single concurrently executing query */ + if (conn->async_cursor) { + PyErr_SetString(ProgrammingError, + "a single async query can be executed on the same connection"); + goto end; + } + /* we don't care about which cursor is executing the query, and + * it may also be that no cursor is involved at all and this is + * an internal query. So just store anything in the async_cursor, + * respecting the code expecting it to be a weakref */ + if (!(conn->async_cursor = PyWeakref_NewRef((PyObject*)conn, NULL))) { + goto end; + } + + /* Send the query asynchronously */ + if (0 == pq_send_query(conn, command)) { + goto end; + } + + /* Enter the poll loop with a write. When writing is finished the poll + implementation will set the status to ASYNC_READ without exiting the + loop. If read is finished the status is finally set to ASYNC_DONE. + */ + conn->async_status = ASYNC_WRITE; + + if (0 != psyco_wait(conn)) { + green_panic(conn); + goto end; + } + + /* the result is now in the connection: take its ownership */ + result = conn->pgres; + conn->pgres = NULL; + +end: + CLEARPGRES(conn->pgres); + conn->async_status = ASYNC_DONE; + Py_CLEAR(conn->async_cursor); + return result; +} + + +/* There has been a communication error during query execution. It may have + * happened e.g. for a network error or an error in the callback, and we + * cannot tell the two apart. + * Trying to PQcancel or PQgetResult to put the connection back into a working + * state doesn't work nice (issue #113): the program blocks and the + * interpreter won't even respond to SIGINT. PQreset could work async, but the + * python program would have then a connection made but not configured where + * it is probably not designed to handled. So for the moment we do the kindest + * thing we can: we close the connection. A long-running program should + * already have a way to discard broken connections; a short-lived one would + * benefit of working ctrl-c. + */ +static void +green_panic(connectionObject *conn) +{ + Dprintf("green_panic: closing the connection"); + conn_close_locked(conn); +} diff --git a/psycopg/green.h b/psycopg/green.h new file mode 100644 index 0000000000000000000000000000000000000000..f4a675cabac259073464c7893ef41a832a54a108 --- /dev/null +++ b/psycopg/green.h @@ -0,0 +1,76 @@ +/* green.c - cooperation with coroutine libraries. + * + * Copyright (C) 2010-2019 Daniele Varrazzo + * Copyright (C) 2020-2021 The Psycopg Team + * + * This file is part of psycopg. + * + * psycopg2 is free software: you can redistribute it and/or modify it + * under the terms of the GNU Lesser General Public License as published + * by the Free Software Foundation, either version 3 of the License, or + * (at your option) any later version. + * + * In addition, as a special exception, the copyright holders give + * permission to link this program with the OpenSSL library (or with + * modified versions of OpenSSL that use the same license as OpenSSL), + * and distribute linked combinations including the two. + * + * You must obey the GNU Lesser General Public License in all respects for + * all of the code used other than OpenSSL. + * + * psycopg2 is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public + * License for more details. + */ + +#ifndef PSYCOPG_GREEN_H +#define PSYCOPG_GREEN_H 1 + +#include +#include "psycopg/connection.h" + +#ifdef __cplusplus +extern "C" { +#endif + +#define psyco_set_wait_callback_doc \ +"Register a callback function to block waiting for data.\n" \ +"\n" \ +"The callback should have signature :samp:`fun({conn})` and\n" \ +"is called to wait for data available whenever a blocking function from the\n" \ +"libpq is called. Use `!set_wait_callback(None)` to revert to the\n" \ +"original behaviour (i.e. using blocking libpq functions).\n" \ +"\n" \ +"The function is an hook to allow coroutine-based libraries (such as\n" \ +"Eventlet_ or gevent_) to switch when Psycopg is blocked, allowing\n" \ +"other coroutines to run concurrently.\n" \ +"\n" \ +"See `~psycopg2.extras.wait_select()` for an example of a wait callback\n" \ +"implementation.\n" \ +"\n" \ +".. _Eventlet: https://eventlet.net/\n" \ +".. _gevent: http://www.gevent.org/\n" +HIDDEN PyObject *psyco_set_wait_callback(PyObject *self, PyObject *obj); + +#define psyco_get_wait_callback_doc \ +"Return the currently registered wait callback.\n" \ +"\n" \ +"Return `!None` if no callback is currently registered.\n" +HIDDEN PyObject *psyco_get_wait_callback(PyObject *self, PyObject *obj); + +HIDDEN int psyco_green(void); +HIDDEN int psyco_wait(connectionObject *conn); +HIDDEN PGresult *psyco_exec_green(connectionObject *conn, const char *command); + +#define EXC_IF_GREEN(cmd) \ +if (psyco_green()) { \ + PyErr_SetString(ProgrammingError, #cmd " cannot be used " \ + "with an asynchronous callback."); \ + return NULL; } + +#ifdef __cplusplus +} +#endif + +#endif /* !defined(PSYCOPG_GREEN_H) */ diff --git a/psycopg/libpq_support.c b/psycopg/libpq_support.c new file mode 100644 index 0000000000000000000000000000000000000000..d259eca56ed6d2affa4b3bfbf8873a62c8429cef --- /dev/null +++ b/psycopg/libpq_support.c @@ -0,0 +1,105 @@ +/* libpq_support.c - functions not provided by libpq, but which are + * required for advanced communication with the server, such as + * streaming replication + * + * Copyright (C) 2003-2019 Federico Di Gregorio + * Copyright (C) 2020-2021 The Psycopg Team + * + * This file is part of psycopg. + * + * psycopg2 is free software: you can redistribute it and/or modify it + * under the terms of the GNU Lesser General Public License as published + * by the Free Software Foundation, either version 3 of the License, or + * (at your option) any later version. + * + * In addition, as a special exception, the copyright holders give + * permission to link this program with the OpenSSL library (or with + * modified versions of OpenSSL that use the same license as OpenSSL), + * and distribute linked combinations including the two. + * + * You must obey the GNU Lesser General Public License in all respects for + * all of the code used other than OpenSSL. + * + * psycopg2 is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public + * License for more details. + */ + +#define PSYCOPG_MODULE +#include "psycopg/psycopg.h" + +#include "psycopg/libpq_support.h" + +/* htonl(), ntohl() */ +#ifdef _WIN32 +#include +/* gettimeofday() */ +#include "psycopg/win32_support.h" +#else +#include +#endif + +/* support routines taken from pg_basebackup/streamutil.c */ + +/* + * Frontend version of GetCurrentTimestamp(), since we are not linked with + * backend code. The protocol always uses integer timestamps, regardless of + * server setting. + */ +int64_t +feGetCurrentTimestamp(void) +{ + int64_t result; + struct timeval tp; + + gettimeofday(&tp, NULL); + + result = (int64_t) tp.tv_sec - + ((POSTGRES_EPOCH_JDATE - UNIX_EPOCH_JDATE) * SECS_PER_DAY); + + result = (result * USECS_PER_SEC) + tp.tv_usec; + + return result; +} + +/* + * Converts an int64 to network byte order. + */ +void +fe_sendint64(int64_t i, char *buf) +{ + uint32_t n32; + + /* High order half first, since we're doing MSB-first */ + n32 = (uint32_t) (i >> 32); + n32 = htonl(n32); + memcpy(&buf[0], &n32, 4); + + /* Now the low order half */ + n32 = (uint32_t) i; + n32 = htonl(n32); + memcpy(&buf[4], &n32, 4); +} + +/* + * Converts an int64 from network byte order to native format. + */ +int64_t +fe_recvint64(char *buf) +{ + int64_t result; + uint32_t h32; + uint32_t l32; + + memcpy(&h32, buf, 4); + memcpy(&l32, buf + 4, 4); + h32 = ntohl(h32); + l32 = ntohl(l32); + + result = h32; + result <<= 32; + result |= l32; + + return result; +} diff --git a/psycopg/libpq_support.h b/psycopg/libpq_support.h new file mode 100644 index 0000000000000000000000000000000000000000..0b304d6a913b7d246d184101c283470eae6daab7 --- /dev/null +++ b/psycopg/libpq_support.h @@ -0,0 +1,49 @@ +/* libpq_support.h - definitions for libpq_support.c + * + * Copyright (C) 2003-2019 Federico Di Gregorio + * Copyright (C) 2020-2021 The Psycopg Team + * + * This file is part of psycopg. + * + * psycopg2 is free software: you can redistribute it and/or modify it + * under the terms of the GNU Lesser General Public License as published + * by the Free Software Foundation, either version 3 of the License, or + * (at your option) any later version. + * + * In addition, as a special exception, the copyright holders give + * permission to link this program with the OpenSSL library (or with + * modified versions of OpenSSL that use the same license as OpenSSL), + * and distribute linked combinations including the two. + * + * You must obey the GNU Lesser General Public License in all respects for + * all of the code used other than OpenSSL. + * + * psycopg2 is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public + * License for more details. + */ +#ifndef PSYCOPG_LIBPQ_SUPPORT_H +#define PSYCOPG_LIBPQ_SUPPORT_H 1 + +#include "psycopg/config.h" + +/* type and constant definitions from internal postgres include */ +typedef uint64_t XLogRecPtr; + +/* have to use lowercase %x, as PyString_FromFormat can't do %X */ +#define XLOGFMTSTR "%x/%x" +#define XLOGFMTARGS(x) ((uint32_t)((x) >> 32)), ((uint32_t)((x) & 0xFFFFFFFF)) + +/* Julian-date equivalents of Day 0 in Unix and Postgres reckoning */ +#define UNIX_EPOCH_JDATE 2440588 /* == date2j(1970, 1, 1) */ +#define POSTGRES_EPOCH_JDATE 2451545 /* == date2j(2000, 1, 1) */ + +#define SECS_PER_DAY 86400 +#define USECS_PER_SEC 1000000LL + +HIDDEN int64_t feGetCurrentTimestamp(void); +HIDDEN void fe_sendint64(int64_t i, char *buf); +HIDDEN int64_t fe_recvint64(char *buf); + +#endif /* !defined(PSYCOPG_LIBPQ_SUPPORT_H) */ diff --git a/psycopg/lobject.h b/psycopg/lobject.h new file mode 100644 index 0000000000000000000000000000000000000000..37e6b13b4c078b2b8141d10707b2230591b5cbf2 --- /dev/null +++ b/psycopg/lobject.h @@ -0,0 +1,102 @@ +/* lobject.h - definition for the psycopg lobject type + * + * Copyright (C) 2006-2019 Federico Di Gregorio + * Copyright (C) 2020-2021 The Psycopg Team + * + * This file is part of psycopg. + * + * psycopg2 is free software: you can redistribute it and/or modify it + * under the terms of the GNU Lesser General Public License as published + * by the Free Software Foundation, either version 3 of the License, or + * (at your option) any later version. + * + * In addition, as a special exception, the copyright holders give + * permission to link this program with the OpenSSL library (or with + * modified versions of OpenSSL that use the same license as OpenSSL), + * and distribute linked combinations including the two. + * + * You must obey the GNU Lesser General Public License in all respects for + * all of the code used other than OpenSSL. + * + * psycopg2 is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public + * License for more details. + */ + +#ifndef PSYCOPG_LOBJECT_H +#define PSYCOPG_LOBJECT_H 1 + +#include + +#include "psycopg/connection.h" + +#ifdef __cplusplus +extern "C" { +#endif + +extern HIDDEN PyTypeObject lobjectType; + +typedef struct { + PyObject HEAD; + + connectionObject *conn; /* connection owning the lobject */ + long int mark; /* copied from conn->mark */ + + char *smode; /* string mode if lobject was opened */ + int mode; /* numeric version of smode */ + + int fd; /* the file descriptor for file-like ops */ + Oid oid; /* the oid for this lobject */ +} lobjectObject; + +/* functions exported from lobject_int.c */ + +RAISES_NEG HIDDEN int lobject_open(lobjectObject *self, connectionObject *conn, + Oid oid, const char *smode, Oid new_oid, + const char *new_file); +RAISES_NEG HIDDEN int lobject_unlink(lobjectObject *self); +RAISES_NEG HIDDEN int lobject_export(lobjectObject *self, const char *filename); + +RAISES_NEG HIDDEN Py_ssize_t lobject_read(lobjectObject *self, char *buf, size_t len); +RAISES_NEG HIDDEN Py_ssize_t lobject_write(lobjectObject *self, const char *buf, + size_t len); +RAISES_NEG HIDDEN Py_ssize_t lobject_seek(lobjectObject *self, Py_ssize_t pos, int whence); +RAISES_NEG HIDDEN Py_ssize_t lobject_tell(lobjectObject *self); +RAISES_NEG HIDDEN int lobject_truncate(lobjectObject *self, size_t len); +RAISES_NEG HIDDEN int lobject_close(lobjectObject *self); + +#define lobject_is_closed(self) \ + ((self)->fd < 0 || !(self)->conn || (self)->conn->closed) + +/* exception-raising macros */ + +#define EXC_IF_LOBJ_CLOSED(self) \ + if (lobject_is_closed(self)) { \ + PyErr_SetString(InterfaceError, "lobject already closed"); \ + return NULL; } + +#define EXC_IF_LOBJ_LEVEL0(self) \ +if (self->conn->autocommit) { \ + psyco_set_error(ProgrammingError, NULL, \ + "can't use a lobject outside of transactions"); \ + return NULL; \ +} +#define EXC_IF_LOBJ_UNMARKED(self) \ +if (self->conn->mark != self->mark) { \ + psyco_set_error(ProgrammingError, NULL, \ + "lobject isn't valid anymore"); \ + return NULL; \ +} + +/* Values for the lobject mode */ +#define LOBJECT_READ 1 +#define LOBJECT_WRITE 2 +#define LOBJECT_BINARY 4 +#define LOBJECT_TEXT 8 + +#ifdef __cplusplus +} +#endif + +#endif /* !defined(PSYCOPG_LOBJECT_H) */ diff --git a/psycopg/lobject_int.c b/psycopg/lobject_int.c new file mode 100644 index 0000000000000000000000000000000000000000..f0c72c1d49676956e25def93205d5946c4dd4407 --- /dev/null +++ b/psycopg/lobject_int.c @@ -0,0 +1,486 @@ +/* lobject_int.c - code used by the lobject object + * + * Copyright (C) 2006-2019 Federico Di Gregorio + * Copyright (C) 2020-2021 The Psycopg Team + * + * This file is part of psycopg. + * + * psycopg2 is free software: you can redistribute it and/or modify it + * under the terms of the GNU Lesser General Public License as published + * by the Free Software Foundation, either version 3 of the License, or + * (at your option) any later version. + * + * In addition, as a special exception, the copyright holders give + * permission to link this program with the OpenSSL library (or with + * modified versions of OpenSSL that use the same license as OpenSSL), + * and distribute linked combinations including the two. + * + * You must obey the GNU Lesser General Public License in all respects for + * all of the code used other than OpenSSL. + * + * psycopg2 is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public + * License for more details. + */ + +#define PSYCOPG_MODULE +#include "psycopg/psycopg.h" + +#include "psycopg/lobject.h" +#include "psycopg/connection.h" +#include "psycopg/pqpath.h" + +#include + +static void +collect_error(connectionObject *conn) +{ + conn_set_error(conn, PQerrorMessage(conn->pgconn)); +} + + +/* Check if the mode passed to the large object is valid. + * In case of success return a value >= 0 + * On error return a value < 0 and set an exception. + * + * Valid mode are [r|w|rw|n][t|b] + */ +RAISES_NEG static int +_lobject_parse_mode(const char *mode) +{ + int rv = 0; + size_t pos = 0; + + if (0 == strncmp("rw", mode, 2)) { + rv |= LOBJECT_READ | LOBJECT_WRITE; + pos += 2; + } + else { + switch (mode[0]) { + case 'r': + rv |= LOBJECT_READ; + pos += 1; + break; + case 'w': + rv |= LOBJECT_WRITE; + pos += 1; + break; + case 'n': + pos += 1; + break; + default: + rv |= LOBJECT_READ; + break; + } + } + + switch (mode[pos]) { + case 't': + rv |= LOBJECT_TEXT; + pos += 1; + break; + case 'b': + rv |= LOBJECT_BINARY; + pos += 1; + break; + default: + rv |= LOBJECT_TEXT; + break; + } + + if (pos != strlen(mode)) { + PyErr_Format(PyExc_ValueError, + "bad mode for lobject: '%s'", mode); + rv = -1; + } + + return rv; +} + + +/* Return a string representing the lobject mode. + * + * The return value is a new string allocated on the Python heap. + * + * The function must be called holding the GIL. + */ +static char * +_lobject_unparse_mode(int mode) +{ + char *buf; + char *c; + + /* the longest is 'rwt' */ + if (!(c = buf = PyMem_Malloc(4))) { + PyErr_NoMemory(); + return NULL; + } + + if (mode & LOBJECT_READ) { *c++ = 'r'; } + if (mode & LOBJECT_WRITE) { *c++ = 'w'; } + + if (buf == c) { + /* neither read nor write */ + *c++ = 'n'; + } + else { + if (mode & LOBJECT_TEXT) { + *c++ = 't'; + } + else { + *c++ = 'b'; + } + } + *c = '\0'; + + return buf; +} + +/* lobject_open - create a new/open an existing lo */ + +RAISES_NEG int +lobject_open(lobjectObject *self, connectionObject *conn, + Oid oid, const char *smode, Oid new_oid, const char *new_file) +{ + int retvalue = -1; + int pgmode = 0; + int mode; + + if (0 > (mode = _lobject_parse_mode(smode))) { + return -1; + } + + Py_BEGIN_ALLOW_THREADS; + pthread_mutex_lock(&(self->conn->lock)); + + retvalue = pq_begin_locked(self->conn, &_save); + if (retvalue < 0) + goto end; + + /* if the oid is InvalidOid we create a new lob before opening it + or we import a file from the FS, depending on the value of + new_file */ + if (oid == InvalidOid) { + if (new_file) + self->oid = lo_import(self->conn->pgconn, new_file); + else { + /* Use lo_creat when possible to be more middleware-friendly. + See ticket #88. */ + if (new_oid != InvalidOid) + self->oid = lo_create(self->conn->pgconn, new_oid); + else + self->oid = lo_creat(self->conn->pgconn, INV_READ | INV_WRITE); + } + + Dprintf("lobject_open: large object created with oid = %u", + self->oid); + + if (self->oid == InvalidOid) { + collect_error(self->conn); + retvalue = -1; + goto end; + } + + mode = (mode & ~LOBJECT_READ) | LOBJECT_WRITE; + } + else { + self->oid = oid; + } + + /* if the oid is a real one we try to open with the given mode */ + if (mode & LOBJECT_READ) { pgmode |= INV_READ; } + if (mode & LOBJECT_WRITE) { pgmode |= INV_WRITE; } + if (pgmode) { + self->fd = lo_open(self->conn->pgconn, self->oid, pgmode); + Dprintf("lobject_open: large object opened with mode = %i fd = %d", + pgmode, self->fd); + + if (self->fd == -1) { + collect_error(self->conn); + retvalue = -1; + goto end; + } + } + + /* set the mode for future reference */ + self->mode = mode; + Py_BLOCK_THREADS; + self->smode = _lobject_unparse_mode(mode); + Py_UNBLOCK_THREADS; + if (NULL == self->smode) { + retvalue = 1; /* exception already set */ + goto end; + } + + retvalue = 0; + + end: + pthread_mutex_unlock(&(self->conn->lock)); + Py_END_ALLOW_THREADS; + + if (retvalue < 0) + pq_complete_error(self->conn); + /* if retvalue > 0, an exception is already set */ + + return retvalue; +} + +/* lobject_close - close an existing lo */ + +RAISES_NEG static int +lobject_close_locked(lobjectObject *self) +{ + int retvalue; + + Dprintf("lobject_close_locked: conn->closed %ld", self->conn->closed); + switch (self->conn->closed) { + case 0: + /* Connection is open, go ahead */ + break; + case 1: + /* Connection is closed, return a success */ + return 0; + break; + default: + conn_set_error(self->conn, "the connection is broken"); + return -1; + break; + } + + if (self->conn->autocommit || + self->conn->mark != self->mark || + self->fd == -1) + return 0; + + retvalue = lo_close(self->conn->pgconn, self->fd); + self->fd = -1; + if (retvalue < 0) + collect_error(self->conn); + + return retvalue; +} + +RAISES_NEG int +lobject_close(lobjectObject *self) +{ + int retvalue; + + Py_BEGIN_ALLOW_THREADS; + pthread_mutex_lock(&(self->conn->lock)); + + retvalue = lobject_close_locked(self); + + pthread_mutex_unlock(&(self->conn->lock)); + Py_END_ALLOW_THREADS; + + if (retvalue < 0) + pq_complete_error(self->conn); + return retvalue; +} + +/* lobject_unlink - remove an lo from database */ + +RAISES_NEG int +lobject_unlink(lobjectObject *self) +{ + int retvalue = -1; + + Py_BEGIN_ALLOW_THREADS; + pthread_mutex_lock(&(self->conn->lock)); + + retvalue = pq_begin_locked(self->conn, &_save); + if (retvalue < 0) + goto end; + + /* first we make sure the lobject is closed and then we unlink */ + retvalue = lobject_close_locked(self); + if (retvalue < 0) + goto end; + + retvalue = lo_unlink(self->conn->pgconn, self->oid); + if (retvalue < 0) + collect_error(self->conn); + + end: + pthread_mutex_unlock(&(self->conn->lock)); + Py_END_ALLOW_THREADS; + + if (retvalue < 0) + pq_complete_error(self->conn); + return retvalue; +} + +/* lobject_write - write bytes to a lo */ + +RAISES_NEG Py_ssize_t +lobject_write(lobjectObject *self, const char *buf, size_t len) +{ + Py_ssize_t written; + + Dprintf("lobject_writing: fd = %d, len = " FORMAT_CODE_SIZE_T, + self->fd, len); + + Py_BEGIN_ALLOW_THREADS; + pthread_mutex_lock(&(self->conn->lock)); + + written = lo_write(self->conn->pgconn, self->fd, buf, len); + if (written < 0) + collect_error(self->conn); + + pthread_mutex_unlock(&(self->conn->lock)); + Py_END_ALLOW_THREADS; + + if (written < 0) + pq_complete_error(self->conn); + return written; +} + +/* lobject_read - read bytes from a lo */ + +RAISES_NEG Py_ssize_t +lobject_read(lobjectObject *self, char *buf, size_t len) +{ + Py_ssize_t n_read; + + Py_BEGIN_ALLOW_THREADS; + pthread_mutex_lock(&(self->conn->lock)); + + n_read = lo_read(self->conn->pgconn, self->fd, buf, len); + if (n_read < 0) + collect_error(self->conn); + + pthread_mutex_unlock(&(self->conn->lock)); + Py_END_ALLOW_THREADS; + + if (n_read < 0) + pq_complete_error(self->conn); + return n_read; +} + +/* lobject_seek - move the current position in the lo */ + +RAISES_NEG Py_ssize_t +lobject_seek(lobjectObject *self, Py_ssize_t pos, int whence) +{ + Py_ssize_t where; + + Dprintf("lobject_seek: fd = %d, pos = " FORMAT_CODE_PY_SSIZE_T ", whence = %d", + self->fd, pos, whence); + + Py_BEGIN_ALLOW_THREADS; + pthread_mutex_lock(&(self->conn->lock)); + +#ifdef HAVE_LO64 + if (self->conn->server_version < 90300) { + where = (Py_ssize_t)lo_lseek(self->conn->pgconn, self->fd, (int)pos, whence); + } else { + where = (Py_ssize_t)lo_lseek64(self->conn->pgconn, self->fd, pos, whence); + } +#else + where = (Py_ssize_t)lo_lseek(self->conn->pgconn, self->fd, (int)pos, whence); +#endif + Dprintf("lobject_seek: where = " FORMAT_CODE_PY_SSIZE_T, where); + if (where < 0) + collect_error(self->conn); + + pthread_mutex_unlock(&(self->conn->lock)); + Py_END_ALLOW_THREADS; + + if (where < 0) + pq_complete_error(self->conn); + return where; +} + +/* lobject_tell - tell the current position in the lo */ + +RAISES_NEG Py_ssize_t +lobject_tell(lobjectObject *self) +{ + Py_ssize_t where; + + Dprintf("lobject_tell: fd = %d", self->fd); + + Py_BEGIN_ALLOW_THREADS; + pthread_mutex_lock(&(self->conn->lock)); + +#ifdef HAVE_LO64 + if (self->conn->server_version < 90300) { + where = (Py_ssize_t)lo_tell(self->conn->pgconn, self->fd); + } else { + where = (Py_ssize_t)lo_tell64(self->conn->pgconn, self->fd); + } +#else + where = (Py_ssize_t)lo_tell(self->conn->pgconn, self->fd); +#endif + Dprintf("lobject_tell: where = " FORMAT_CODE_PY_SSIZE_T, where); + if (where < 0) + collect_error(self->conn); + + pthread_mutex_unlock(&(self->conn->lock)); + Py_END_ALLOW_THREADS; + + if (where < 0) + pq_complete_error(self->conn); + return where; +} + +/* lobject_export - export to a local file */ + +RAISES_NEG int +lobject_export(lobjectObject *self, const char *filename) +{ + int retvalue; + + Py_BEGIN_ALLOW_THREADS; + pthread_mutex_lock(&(self->conn->lock)); + + retvalue = pq_begin_locked(self->conn, &_save); + if (retvalue < 0) + goto end; + + retvalue = lo_export(self->conn->pgconn, self->oid, filename); + if (retvalue < 0) + collect_error(self->conn); + + end: + pthread_mutex_unlock(&(self->conn->lock)); + Py_END_ALLOW_THREADS; + + if (retvalue < 0) + pq_complete_error(self->conn); + return retvalue; +} + +RAISES_NEG int +lobject_truncate(lobjectObject *self, size_t len) +{ + int retvalue; + + Dprintf("lobject_truncate: fd = %d, len = " FORMAT_CODE_SIZE_T, + self->fd, len); + + Py_BEGIN_ALLOW_THREADS; + pthread_mutex_lock(&(self->conn->lock)); + +#ifdef HAVE_LO64 + if (self->conn->server_version < 90300) { + retvalue = lo_truncate(self->conn->pgconn, self->fd, len); + } else { + retvalue = lo_truncate64(self->conn->pgconn, self->fd, len); + } +#else + retvalue = lo_truncate(self->conn->pgconn, self->fd, len); +#endif + Dprintf("lobject_truncate: result = %d", retvalue); + if (retvalue < 0) + collect_error(self->conn); + + pthread_mutex_unlock(&(self->conn->lock)); + Py_END_ALLOW_THREADS; + + if (retvalue < 0) + pq_complete_error(self->conn); + return retvalue; + +} diff --git a/psycopg/lobject_type.c b/psycopg/lobject_type.c new file mode 100644 index 0000000000000000000000000000000000000000..8376b3a4f74f2b0345dddf202b5c1c4f0843ad3e --- /dev/null +++ b/psycopg/lobject_type.c @@ -0,0 +1,471 @@ +/* lobject_type.c - python interface to lobject objects + * + * Copyright (C) 2006-2019 Federico Di Gregorio + * Copyright (C) 2020-2021 The Psycopg Team + * + * This file is part of psycopg. + * + * psycopg2 is free software: you can redistribute it and/or modify it + * under the terms of the GNU Lesser General Public License as published + * by the Free Software Foundation, either version 3 of the License, or + * (at your option) any later version. + * + * In addition, as a special exception, the copyright holders give + * permission to link this program with the OpenSSL library (or with + * modified versions of OpenSSL that use the same license as OpenSSL), + * and distribute linked combinations including the two. + * + * You must obey the GNU Lesser General Public License in all respects for + * all of the code used other than OpenSSL. + * + * psycopg2 is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public + * License for more details. + */ + +#define PSYCOPG_MODULE +#include "psycopg/psycopg.h" + +#include "psycopg/lobject.h" +#include "psycopg/connection.h" +#include "psycopg/microprotocols.h" +#include "psycopg/microprotocols_proto.h" +#include "psycopg/pqpath.h" + +#include + + +/** public methods **/ + +/* close method - close the lobject */ + +#define psyco_lobj_close_doc \ +"close() -- Close the lobject." + +static PyObject * +psyco_lobj_close(lobjectObject *self, PyObject *args) +{ + /* file-like objects can be closed multiple times and remember that + closing the current transaction is equivalent to close all the + opened large objects */ + if (!lobject_is_closed(self) + && !self->conn->autocommit + && self->conn->mark == self->mark) + { + Dprintf("psyco_lobj_close: closing lobject at %p", self); + if (lobject_close(self) < 0) + return NULL; + } + + Py_RETURN_NONE; +} + +/* write method - write data to the lobject */ + +#define psyco_lobj_write_doc \ +"write(str) -- Write a string to the large object." + +static PyObject * +psyco_lobj_write(lobjectObject *self, PyObject *args) +{ + char *buffer; + Py_ssize_t len; + Py_ssize_t res; + PyObject *obj; + PyObject *data = NULL; + PyObject *rv = NULL; + + if (!PyArg_ParseTuple(args, "O", &obj)) return NULL; + + EXC_IF_LOBJ_CLOSED(self); + EXC_IF_LOBJ_LEVEL0(self); + EXC_IF_LOBJ_UNMARKED(self); + + if (Bytes_Check(obj)) { + Py_INCREF(obj); + data = obj; + } + else if (PyUnicode_Check(obj)) { + if (!(data = conn_encode(self->conn, obj))) { goto exit; } + } + else { + PyErr_Format(PyExc_TypeError, + "lobject.write requires a string; got %s instead", + Py_TYPE(obj)->tp_name); + goto exit; + } + + if (-1 == Bytes_AsStringAndSize(data, &buffer, &len)) { + goto exit; + } + + if (0 > (res = lobject_write(self, buffer, (size_t)len))) { + goto exit; + } + + rv = PyInt_FromSsize_t((Py_ssize_t)res); + +exit: + Py_XDECREF(data); + return rv; +} + +/* read method - read data from the lobject */ + +#define psyco_lobj_read_doc \ +"read(size=-1) -- Read at most size bytes or to the end of the large object." + +static PyObject * +psyco_lobj_read(lobjectObject *self, PyObject *args) +{ + PyObject *res; + Py_ssize_t where, end; + Py_ssize_t size = -1; + char *buffer; + + if (!PyArg_ParseTuple(args, "|n", &size)) return NULL; + + EXC_IF_LOBJ_CLOSED(self); + EXC_IF_LOBJ_LEVEL0(self); + EXC_IF_LOBJ_UNMARKED(self); + + if (size < 0) { + if ((where = lobject_tell(self)) < 0) return NULL; + if ((end = lobject_seek(self, 0, SEEK_END)) < 0) return NULL; + if (lobject_seek(self, where, SEEK_SET) < 0) return NULL; + size = end - where; + } + + if ((buffer = PyMem_Malloc(size)) == NULL) { + PyErr_NoMemory(); + return NULL; + } + if ((size = lobject_read(self, buffer, size)) < 0) { + PyMem_Free(buffer); + return NULL; + } + + if (self->mode & LOBJECT_BINARY) { + res = Bytes_FromStringAndSize(buffer, size); + } else { + res = conn_decode(self->conn, buffer, size); + } + PyMem_Free(buffer); + + return res; +} + +/* seek method - seek in the lobject */ + +#define psyco_lobj_seek_doc \ +"seek(offset, whence=0) -- Set the lobject's current position." + +static PyObject * +psyco_lobj_seek(lobjectObject *self, PyObject *args) +{ + Py_ssize_t offset, pos=0; + int whence=0; + + if (!PyArg_ParseTuple(args, "n|i", &offset, &whence)) + return NULL; + + EXC_IF_LOBJ_CLOSED(self); + EXC_IF_LOBJ_LEVEL0(self); + EXC_IF_LOBJ_UNMARKED(self); + +#ifdef HAVE_LO64 + if ((offset < INT_MIN || offset > INT_MAX) + && self->conn->server_version < 90300) { + PyErr_Format(NotSupportedError, + "offset out of range (%ld): server version %d " + "does not support the lobject 64 API", + offset, self->conn->server_version); + return NULL; + } +#else + if (offset < INT_MIN || offset > INT_MAX) { + PyErr_Format(InterfaceError, + "offset out of range (" FORMAT_CODE_PY_SSIZE_T "): " + "this psycopg version was not built with lobject 64 API support", + offset); + return NULL; + } +#endif + + if ((pos = lobject_seek(self, offset, whence)) < 0) + return NULL; + + return PyInt_FromSsize_t(pos); +} + +/* tell method - tell current position in the lobject */ + +#define psyco_lobj_tell_doc \ +"tell() -- Return the lobject's current position." + +static PyObject * +psyco_lobj_tell(lobjectObject *self, PyObject *args) +{ + Py_ssize_t pos; + + EXC_IF_LOBJ_CLOSED(self); + EXC_IF_LOBJ_LEVEL0(self); + EXC_IF_LOBJ_UNMARKED(self); + + if ((pos = lobject_tell(self)) < 0) + return NULL; + + return PyInt_FromSsize_t(pos); +} + +/* unlink method - unlink (destroy) the lobject */ + +#define psyco_lobj_unlink_doc \ +"unlink() -- Close and then remove the lobject." + +static PyObject * +psyco_lobj_unlink(lobjectObject *self, PyObject *args) +{ + if (lobject_unlink(self) < 0) + return NULL; + + Py_RETURN_NONE; +} + +/* export method - export lobject's content to given file */ + +#define psyco_lobj_export_doc \ +"export(filename) -- Export large object to given file." + +static PyObject * +psyco_lobj_export(lobjectObject *self, PyObject *args) +{ + const char *filename; + + if (!PyArg_ParseTuple(args, "s", &filename)) + return NULL; + + EXC_IF_LOBJ_LEVEL0(self); + + if (lobject_export(self, filename) < 0) + return NULL; + + Py_RETURN_NONE; +} + + +static PyObject * +psyco_lobj_get_closed(lobjectObject *self, void *closure) +{ + return PyBool_FromLong(lobject_is_closed(self)); +} + +#define psyco_lobj_truncate_doc \ +"truncate(len=0) -- Truncate large object to given size." + +static PyObject * +psyco_lobj_truncate(lobjectObject *self, PyObject *args) +{ + Py_ssize_t len = 0; + + if (!PyArg_ParseTuple(args, "|n", &len)) + return NULL; + + EXC_IF_LOBJ_CLOSED(self); + EXC_IF_LOBJ_LEVEL0(self); + EXC_IF_LOBJ_UNMARKED(self); + +#ifdef HAVE_LO64 + if (len > INT_MAX && self->conn->server_version < 90300) { + PyErr_Format(NotSupportedError, + "len out of range (" FORMAT_CODE_PY_SSIZE_T "): " + "server version %d does not support the lobject 64 API", + len, self->conn->server_version); + return NULL; + } +#else + if (len > INT_MAX) { + PyErr_Format(InterfaceError, + "len out of range (" FORMAT_CODE_PY_SSIZE_T "): " + "this psycopg version was not built with lobject 64 API support", + len); + return NULL; + } +#endif + + if (lobject_truncate(self, len) < 0) + return NULL; + + Py_RETURN_NONE; +} + + +/** the lobject object **/ + +/* object method list */ + +static struct PyMethodDef lobjectObject_methods[] = { + {"read", (PyCFunction)psyco_lobj_read, + METH_VARARGS, psyco_lobj_read_doc}, + {"write", (PyCFunction)psyco_lobj_write, + METH_VARARGS, psyco_lobj_write_doc}, + {"seek", (PyCFunction)psyco_lobj_seek, + METH_VARARGS, psyco_lobj_seek_doc}, + {"tell", (PyCFunction)psyco_lobj_tell, + METH_NOARGS, psyco_lobj_tell_doc}, + {"close", (PyCFunction)psyco_lobj_close, + METH_NOARGS, psyco_lobj_close_doc}, + {"unlink",(PyCFunction)psyco_lobj_unlink, + METH_NOARGS, psyco_lobj_unlink_doc}, + {"export",(PyCFunction)psyco_lobj_export, + METH_VARARGS, psyco_lobj_export_doc}, + {"truncate",(PyCFunction)psyco_lobj_truncate, + METH_VARARGS, psyco_lobj_truncate_doc}, + {NULL} +}; + +/* object member list */ + +static struct PyMemberDef lobjectObject_members[] = { + {"oid", T_OID, offsetof(lobjectObject, oid), READONLY, + "The backend OID associated to this lobject."}, + {"mode", T_STRING, offsetof(lobjectObject, smode), READONLY, + "Open mode."}, + {NULL} +}; + +/* object getset list */ + +static struct PyGetSetDef lobjectObject_getsets[] = { + {"closed", (getter)psyco_lobj_get_closed, NULL, + "The if the large object is closed (no file-like methods)."}, + {NULL} +}; + +/* initialization and finalization methods */ + +static int +lobject_setup(lobjectObject *self, connectionObject *conn, + Oid oid, const char *smode, Oid new_oid, const char *new_file) +{ + Dprintf("lobject_setup: init lobject object at %p", self); + + if (conn->autocommit) { + psyco_set_error(ProgrammingError, NULL, + "can't use a lobject outside of transactions"); + return -1; + } + + Py_INCREF((PyObject*)conn); + self->conn = conn; + self->mark = conn->mark; + + self->fd = -1; + self->oid = InvalidOid; + + if (0 != lobject_open(self, conn, oid, smode, new_oid, new_file)) + return -1; + + Dprintf("lobject_setup: good lobject object at %p, refcnt = " + FORMAT_CODE_PY_SSIZE_T, self, Py_REFCNT(self)); + Dprintf("lobject_setup: oid = %u, fd = %d", self->oid, self->fd); + return 0; +} + +static void +lobject_dealloc(PyObject* obj) +{ + lobjectObject *self = (lobjectObject *)obj; + + if (self->conn && self->fd != -1) { + if (lobject_close(self) < 0) + PyErr_Print(); + } + Py_CLEAR(self->conn); + PyMem_Free(self->smode); + + Dprintf("lobject_dealloc: deleted lobject object at %p, refcnt = " + FORMAT_CODE_PY_SSIZE_T, obj, Py_REFCNT(obj)); + + Py_TYPE(obj)->tp_free(obj); +} + +static int +lobject_init(PyObject *obj, PyObject *args, PyObject *kwds) +{ + Oid oid = InvalidOid, new_oid = InvalidOid; + const char *smode = NULL; + const char *new_file = NULL; + PyObject *conn = NULL; + + if (!PyArg_ParseTuple(args, "O!|IzIz", + &connectionType, &conn, + &oid, &smode, &new_oid, &new_file)) + return -1; + + if (!smode) + smode = ""; + + return lobject_setup((lobjectObject *)obj, + (connectionObject *)conn, oid, smode, new_oid, new_file); +} + +static PyObject * +lobject_new(PyTypeObject *type, PyObject *args, PyObject *kwds) +{ + return type->tp_alloc(type, 0); +} + +static PyObject * +lobject_repr(lobjectObject *self) +{ + return PyString_FromFormat( + "", self, lobject_is_closed(self)); +} + + +/* object type */ + +#define lobjectType_doc \ +"A database large object." + +PyTypeObject lobjectType = { + PyVarObject_HEAD_INIT(NULL, 0) + "psycopg2.extensions.lobject", + sizeof(lobjectObject), 0, + lobject_dealloc, /*tp_dealloc*/ + 0, /*tp_print*/ + 0, /*tp_getattr*/ + 0, /*tp_setattr*/ + 0, /*tp_compare*/ + (reprfunc)lobject_repr, /*tp_repr*/ + 0, /*tp_as_number*/ + 0, /*tp_as_sequence*/ + 0, /*tp_as_mapping*/ + 0, /*tp_hash */ + 0, /*tp_call*/ + (reprfunc)lobject_repr, /*tp_str*/ + 0, /*tp_getattro*/ + 0, /*tp_setattro*/ + 0, /*tp_as_buffer*/ + Py_TPFLAGS_DEFAULT|Py_TPFLAGS_BASETYPE|Py_TPFLAGS_HAVE_ITER, /*tp_flags*/ + lobjectType_doc, /*tp_doc*/ + 0, /*tp_traverse*/ + 0, /*tp_clear*/ + 0, /*tp_richcompare*/ + 0, /*tp_weaklistoffset*/ + 0, /*tp_iter*/ + 0, /*tp_iternext*/ + lobjectObject_methods, /*tp_methods*/ + lobjectObject_members, /*tp_members*/ + lobjectObject_getsets, /*tp_getset*/ + 0, /*tp_base*/ + 0, /*tp_dict*/ + 0, /*tp_descr_get*/ + 0, /*tp_descr_set*/ + 0, /*tp_dictoffset*/ + lobject_init, /*tp_init*/ + 0, /*tp_alloc*/ + lobject_new, /*tp_new*/ +}; diff --git a/psycopg/microprotocols.c b/psycopg/microprotocols.c new file mode 100644 index 0000000000000000000000000000000000000000..cbd22da72d8971c0731ac76010cba7349a1f16f5 --- /dev/null +++ b/psycopg/microprotocols.c @@ -0,0 +1,277 @@ +/* microprotocols.c - minimalist and non-validating protocols implementation + * + * Copyright (C) 2003-2019 Federico Di Gregorio + * Copyright (C) 2020-2021 The Psycopg Team + * + * This file is part of psycopg. + * + * psycopg2 is free software: you can redistribute it and/or modify it + * under the terms of the GNU Lesser General Public License as published + * by the Free Software Foundation, either version 3 of the License, or + * (at your option) any later version. + * + * In addition, as a special exception, the copyright holders give + * permission to link this program with the OpenSSL library (or with + * modified versions of OpenSSL that use the same license as OpenSSL), + * and distribute linked combinations including the two. + * + * You must obey the GNU Lesser General Public License in all respects for + * all of the code used other than OpenSSL. + * + * psycopg2 is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public + * License for more details. + */ + +#define PSYCOPG_MODULE +#include "psycopg/psycopg.h" + +#include "psycopg/microprotocols.h" +#include "psycopg/microprotocols_proto.h" +#include "psycopg/cursor.h" +#include "psycopg/connection.h" + + +/** the adapters registry **/ + +PyObject *psyco_adapters; + +/* microprotocols_init - initialize the adapters dictionary */ + +RAISES_NEG int +microprotocols_init(PyObject *module) +{ + /* create adapters dictionary and put it in module namespace */ + if (!(psyco_adapters = PyDict_New())) { + return -1; + } + + Py_INCREF(psyco_adapters); + if (0 > PyModule_AddObject(module, "adapters", psyco_adapters)) { + Py_DECREF(psyco_adapters); + return -1; + } + + return 0; +} + + +/* microprotocols_add - add a reverse type-caster to the dictionary + * + * Return 0 on success, else -1 and set an exception. + */ +RAISES_NEG int +microprotocols_add(PyTypeObject *type, PyObject *proto, PyObject *cast) +{ + PyObject *key = NULL; + int rv = -1; + + if (proto == NULL) proto = (PyObject*)&isqlquoteType; + + if (!(key = PyTuple_Pack(2, (PyObject*)type, proto))) { goto exit; } + if (0 != PyDict_SetItem(psyco_adapters, key, cast)) { goto exit; } + + rv = 0; + +exit: + Py_XDECREF(key); + return rv; +} + +/* Check if one of `obj` superclasses has an adapter for `proto`. + * + * If it does, return a *borrowed reference* to the adapter, else to None. + */ +BORROWED static PyObject * +_get_superclass_adapter(PyObject *obj, PyObject *proto) +{ + PyTypeObject *type; + PyObject *mro, *st; + PyObject *key, *adapter; + Py_ssize_t i, ii; + + type = Py_TYPE(obj); + if (!(type->tp_mro)) { + /* has no mro */ + return Py_None; + } + + /* Walk the mro from the most specific subclass. */ + mro = type->tp_mro; + for (i = 1, ii = PyTuple_GET_SIZE(mro); i < ii; ++i) { + st = PyTuple_GET_ITEM(mro, i); + if (!(key = PyTuple_Pack(2, st, proto))) { return NULL; } + adapter = PyDict_GetItem(psyco_adapters, key); + Py_DECREF(key); + + if (adapter) { + Dprintf( + "microprotocols_adapt: using '%s' adapter to adapt '%s'", + ((PyTypeObject *)st)->tp_name, type->tp_name); + + /* register this adapter as good for the subclass too, + * so that the next time it will be found in the fast path */ + + /* Well, no, maybe this is not a good idea. + * It would become a leak in case of dynamic + * classes generated in a loop (think namedtuples). */ + + /* key = PyTuple_Pack(2, (PyObject*)type, proto); + * PyDict_SetItem(psyco_adapters, key, adapter); + * Py_DECREF(key); + */ + return adapter; + } + } + return Py_None; +} + + +/* microprotocols_adapt - adapt an object to the built-in protocol */ + +PyObject * +microprotocols_adapt(PyObject *obj, PyObject *proto, PyObject *alt) +{ + PyObject *adapter, *adapted, *key, *meth; + char buffer[256]; + + /* we don't check for exact type conformance as specified in PEP 246 + because the ISQLQuote type is abstract and there is no way to get a + quotable object to be its instance */ + + Dprintf("microprotocols_adapt: trying to adapt %s", + Py_TYPE(obj)->tp_name); + + /* look for an adapter in the registry */ + if (!(key = PyTuple_Pack(2, Py_TYPE(obj), proto))) { return NULL; } + adapter = PyDict_GetItem(psyco_adapters, key); + Py_DECREF(key); + if (adapter) { + adapted = PyObject_CallFunctionObjArgs(adapter, obj, NULL); + return adapted; + } + + /* try to have the protocol adapt this object*/ + if ((meth = PyObject_GetAttrString(proto, "__adapt__"))) { + adapted = PyObject_CallFunctionObjArgs(meth, obj, NULL); + Py_DECREF(meth); + if (adapted && adapted != Py_None) return adapted; + Py_XDECREF(adapted); + if (PyErr_Occurred()) { + if (PyErr_ExceptionMatches(PyExc_TypeError)) { + PyErr_Clear(); + } else { + return NULL; + } + } + } + else { + /* proto.__adapt__ not found. */ + PyErr_Clear(); + } + + /* then try to have the object adapt itself */ + if ((meth = PyObject_GetAttrString(obj, "__conform__"))) { + adapted = PyObject_CallFunctionObjArgs(meth, proto, NULL); + Py_DECREF(meth); + if (adapted && adapted != Py_None) return adapted; + Py_XDECREF(adapted); + if (PyErr_Occurred()) { + if (PyErr_ExceptionMatches(PyExc_TypeError)) { + PyErr_Clear(); + } else { + return NULL; + } + } + } + else { + /* obj.__conform__ not found. */ + PyErr_Clear(); + } + + /* Finally check if a superclass can be adapted and use the same adapter. */ + if (!(adapter = _get_superclass_adapter(obj, proto))) { + return NULL; + } + if (Py_None != adapter) { + adapted = PyObject_CallFunctionObjArgs(adapter, obj, NULL); + return adapted; + } + + /* else set the right exception and return NULL */ + PyOS_snprintf(buffer, 255, "can't adapt type '%s'", + Py_TYPE(obj)->tp_name); + psyco_set_error(ProgrammingError, NULL, buffer); + return NULL; +} + +/* microprotocol_getquoted - utility function that adapt and call getquoted. + * + * Return a bytes string, NULL on error. + */ + +PyObject * +microprotocol_getquoted(PyObject *obj, connectionObject *conn) +{ + PyObject *res = NULL; + PyObject *prepare = NULL; + PyObject *adapted; + + if (!(adapted = microprotocols_adapt(obj, (PyObject*)&isqlquoteType, NULL))) { + goto exit; + } + + Dprintf("microprotocol_getquoted: adapted to %s", + Py_TYPE(adapted)->tp_name); + + /* if requested prepare the object passing it the connection */ + if (conn) { + if ((prepare = PyObject_GetAttrString(adapted, "prepare"))) { + res = PyObject_CallFunctionObjArgs( + prepare, (PyObject *)conn, NULL); + if (res) { + Py_DECREF(res); + res = NULL; + } else { + goto exit; + } + } + else { + /* adapted.prepare not found */ + PyErr_Clear(); + } + } + + /* call the getquoted method on adapted (that should exist because we + adapted to the right protocol) */ + res = PyObject_CallMethod(adapted, "getquoted", NULL); + + /* Convert to bytes. */ + if (res && PyUnicode_CheckExact(res)) { + PyObject *b; + b = conn_encode(conn, res); + Py_DECREF(res); + res = b; + } + +exit: + Py_XDECREF(adapted); + Py_XDECREF(prepare); + + /* we return res with one extra reference, the caller shall free it */ + return res; +} + + +/** module-level functions **/ + +PyObject * +psyco_microprotocols_adapt(cursorObject *self, PyObject *args) +{ + PyObject *obj, *alt = NULL; + PyObject *proto = (PyObject*)&isqlquoteType; + + if (!PyArg_ParseTuple(args, "O|OO", &obj, &proto, &alt)) return NULL; + return microprotocols_adapt(obj, proto, alt); +} diff --git a/psycopg/microprotocols.h b/psycopg/microprotocols.h new file mode 100644 index 0000000000000000000000000000000000000000..434e9e9f48f73e6ed73f1d67dcaf2f1d58a3017e --- /dev/null +++ b/psycopg/microprotocols.h @@ -0,0 +1,64 @@ +/* microprotocols.c - definitions for minimalist and non-validating protocols + * + * Copyright (C) 2003-2019 Federico Di Gregorio + * Copyright (C) 2020-2021 The Psycopg Team + * + * This file is part of psycopg. + * + * psycopg2 is free software: you can redistribute it and/or modify it + * under the terms of the GNU Lesser General Public License as published + * by the Free Software Foundation, either version 3 of the License, or + * (at your option) any later version. + * + * In addition, as a special exception, the copyright holders give + * permission to link this program with the OpenSSL library (or with + * modified versions of OpenSSL that use the same license as OpenSSL), + * and distribute linked combinations including the two. + * + * You must obey the GNU Lesser General Public License in all respects for + * all of the code used other than OpenSSL. + * + * psycopg2 is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public + * License for more details. + */ + +#ifndef PSYCOPG_MICROPROTOCOLS_H +#define PSYCOPG_MICROPROTOCOLS_H 1 + +#include "psycopg/connection.h" +#include "psycopg/cursor.h" + +#ifdef __cplusplus +extern "C" { +#endif + +/** adapters registry **/ + +extern HIDDEN PyObject *psyco_adapters; + +/** the names of the three mandatory methods **/ + +#define MICROPROTOCOLS_GETQUOTED_NAME "getquoted" +#define MICROPROTOCOLS_GETSTRING_NAME "getstring" +#define MICROPROTOCOLS_GETBINARY_NAME "getbinary" + +/** exported functions **/ + +/* used by module.c to init the microprotocols system */ +HIDDEN RAISES_NEG int microprotocols_init(PyObject *dict); +HIDDEN RAISES_NEG int microprotocols_add( + PyTypeObject *type, PyObject *proto, PyObject *cast); + +HIDDEN PyObject *microprotocols_adapt( + PyObject *obj, PyObject *proto, PyObject *alt); +HIDDEN PyObject *microprotocol_getquoted( + PyObject *obj, connectionObject *conn); + +HIDDEN PyObject * + psyco_microprotocols_adapt(cursorObject *self, PyObject *args); +#define psyco_microprotocols_adapt_doc \ + "adapt(obj, protocol, alternate) -> object -- adapt obj to given protocol" + +#endif /* !defined(PSYCOPG_MICROPROTOCOLS_H) */ diff --git a/psycopg/microprotocols_proto.c b/psycopg/microprotocols_proto.c new file mode 100644 index 0000000000000000000000000000000000000000..d32250ea2c9d1046d239e9e75bffef7cc19ec44a --- /dev/null +++ b/psycopg/microprotocols_proto.c @@ -0,0 +1,180 @@ +/* microprotocol_proto.c - psycopg protocols + * + * Copyright (C) 2003-2019 Federico Di Gregorio + * Copyright (C) 2020-2021 The Psycopg Team + * + * This file is part of psycopg. + * + * psycopg2 is free software: you can redistribute it and/or modify it + * under the terms of the GNU Lesser General Public License as published + * by the Free Software Foundation, either version 3 of the License, or + * (at your option) any later version. + * + * In addition, as a special exception, the copyright holders give + * permission to link this program with the OpenSSL library (or with + * modified versions of OpenSSL that use the same license as OpenSSL), + * and distribute linked combinations including the two. + * + * You must obey the GNU Lesser General Public License in all respects for + * all of the code used other than OpenSSL. + * + * psycopg2 is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public + * License for more details. + */ + +#define PSYCOPG_MODULE +#include "psycopg/psycopg.h" + +#include "psycopg/microprotocols_proto.h" + +#include + + +/** void protocol implementation **/ + + +/* getquoted - return quoted representation for object */ + +#define isqlquote_getquoted_doc \ +"getquoted() -- return SQL-quoted representation of this object" + +static PyObject * +isqlquote_getquoted(isqlquoteObject *self, PyObject *args) +{ + Py_RETURN_NONE; +} + +/* getbinary - return quoted representation for object */ + +#define isqlquote_getbinary_doc \ +"getbinary() -- return SQL-quoted binary representation of this object" + +static PyObject * +isqlquote_getbinary(isqlquoteObject *self, PyObject *args) +{ + Py_RETURN_NONE; +} + +/* getbuffer - return quoted representation for object */ + +#define isqlquote_getbuffer_doc \ +"getbuffer() -- return this object" + +static PyObject * +isqlquote_getbuffer(isqlquoteObject *self, PyObject *args) +{ + Py_RETURN_NONE; +} + + + +/** the ISQLQuote object **/ + + +/* object method list */ + +static struct PyMethodDef isqlquoteObject_methods[] = { + {"getquoted", (PyCFunction)isqlquote_getquoted, + METH_NOARGS, isqlquote_getquoted_doc}, + {"getbinary", (PyCFunction)isqlquote_getbinary, + METH_NOARGS, isqlquote_getbinary_doc}, + {"getbuffer", (PyCFunction)isqlquote_getbuffer, + METH_NOARGS, isqlquote_getbuffer_doc}, + {NULL} +}; + +/* object member list */ + +static struct PyMemberDef isqlquoteObject_members[] = { + /* DBAPI-2.0 extensions (exception objects) */ + {"_wrapped", T_OBJECT, offsetof(isqlquoteObject, wrapped), READONLY}, + {NULL} +}; + +/* initialization and finalization methods */ + +static int +isqlquote_setup(isqlquoteObject *self, PyObject *wrapped) +{ + self->wrapped = wrapped; + Py_INCREF(wrapped); + + return 0; +} + +static void +isqlquote_dealloc(PyObject* obj) +{ + isqlquoteObject *self = (isqlquoteObject *)obj; + + Py_XDECREF(self->wrapped); + + Py_TYPE(obj)->tp_free(obj); +} + +static int +isqlquote_init(PyObject *obj, PyObject *args, PyObject *kwds) +{ + PyObject *wrapped = NULL; + + if (!PyArg_ParseTuple(args, "O", &wrapped)) + return -1; + + return isqlquote_setup((isqlquoteObject *)obj, wrapped); +} + +static PyObject * +isqlquote_new(PyTypeObject *type, PyObject *args, PyObject *kwds) +{ + return type->tp_alloc(type, 0); +} + + +/* object type */ + +#define isqlquoteType_doc \ +"Abstract ISQLQuote protocol\n\n" \ +"An object conform to this protocol should expose a ``getquoted()`` method\n" \ +"returning the SQL representation of the object.\n\n" + +PyTypeObject isqlquoteType = { + PyVarObject_HEAD_INIT(NULL, 0) + "psycopg2.extensions.ISQLQuote", + sizeof(isqlquoteObject), 0, + isqlquote_dealloc, /*tp_dealloc*/ + 0, /*tp_print*/ + 0, /*tp_getattr*/ + 0, /*tp_setattr*/ + 0, /*tp_compare*/ + 0, /*tp_repr*/ + 0, /*tp_as_number*/ + 0, /*tp_as_sequence*/ + 0, /*tp_as_mapping*/ + 0, /*tp_hash */ + 0, /*tp_call*/ + 0, /*tp_str*/ + 0, /*tp_getattro*/ + 0, /*tp_setattro*/ + 0, /*tp_as_buffer*/ + Py_TPFLAGS_DEFAULT|Py_TPFLAGS_BASETYPE, /*tp_flags*/ + isqlquoteType_doc, /*tp_doc*/ + 0, /*tp_traverse*/ + 0, /*tp_clear*/ + 0, /*tp_richcompare*/ + 0, /*tp_weaklistoffset*/ + 0, /*tp_iter*/ + 0, /*tp_iternext*/ + isqlquoteObject_methods, /*tp_methods*/ + isqlquoteObject_members, /*tp_members*/ + 0, /*tp_getset*/ + 0, /*tp_base*/ + 0, /*tp_dict*/ + 0, /*tp_descr_get*/ + 0, /*tp_descr_set*/ + 0, /*tp_dictoffset*/ + isqlquote_init, /*tp_init*/ + 0, /*tp_alloc*/ + isqlquote_new, /*tp_new*/ +}; diff --git a/psycopg/microprotocols_proto.h b/psycopg/microprotocols_proto.h new file mode 100644 index 0000000000000000000000000000000000000000..8d47d1201aea540980c676ef2c63910a21ac1c2e --- /dev/null +++ b/psycopg/microprotocols_proto.h @@ -0,0 +1,47 @@ +/* microporotocols_proto.h - definition for psycopg's protocols + * + * Copyright (C) 2003-2019 Federico Di Gregorio + * Copyright (C) 2020-2021 The Psycopg Team + * + * This file is part of psycopg. + * + * psycopg2 is free software: you can redistribute it and/or modify it + * under the terms of the GNU Lesser General Public License as published + * by the Free Software Foundation, either version 3 of the License, or + * (at your option) any later version. + * + * In addition, as a special exception, the copyright holders give + * permission to link this program with the OpenSSL library (or with + * modified versions of OpenSSL that use the same license as OpenSSL), + * and distribute linked combinations including the two. + * + * You must obey the GNU Lesser General Public License in all respects for + * all of the code used other than OpenSSL. + * + * psycopg2 is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public + * License for more details. + */ + +#ifndef PSYCOPG_ISQLQUOTE_H +#define PSYCOPG_ISQLQUOTE_H 1 + +#ifdef __cplusplus +extern "C" { +#endif + +extern HIDDEN PyTypeObject isqlquoteType; + +typedef struct { + PyObject_HEAD + + PyObject *wrapped; + +} isqlquoteObject; + +#ifdef __cplusplus +} +#endif + +#endif /* !defined(PSYCOPG_ISQLQUOTE_H) */ diff --git a/psycopg/notify.h b/psycopg/notify.h new file mode 100644 index 0000000000000000000000000000000000000000..2641db845426432025317508157b090db4401965 --- /dev/null +++ b/psycopg/notify.h @@ -0,0 +1,41 @@ +/* notify.h - definition for the psycopg Notify type + * + * Copyright (C) 2010-2019 Daniele Varrazzo + * Copyright (C) 2020-2021 The Psycopg Team + * + * This file is part of psycopg. + * + * psycopg2 is free software: you can redistribute it and/or modify it + * under the terms of the GNU Lesser General Public License as published + * by the Free Software Foundation, either version 3 of the License, or + * (at your option) any later version. + * + * In addition, as a special exception, the copyright holders give + * permission to link this program with the OpenSSL library (or with + * modified versions of OpenSSL that use the same license as OpenSSL), + * and distribute linked combinations including the two. + * + * You must obey the GNU Lesser General Public License in all respects for + * all of the code used other than OpenSSL. + * + * psycopg2 is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public + * License for more details. + */ + +#ifndef PSYCOPG_NOTIFY_H +#define PSYCOPG_NOTIFY_H 1 + +extern HIDDEN PyTypeObject notifyType; + +typedef struct { + PyObject_HEAD + + PyObject *pid; + PyObject *channel; + PyObject *payload; + +} notifyObject; + +#endif /* PSYCOPG_NOTIFY_H */ diff --git a/psycopg/notify_type.c b/psycopg/notify_type.c new file mode 100644 index 0000000000000000000000000000000000000000..44b66b58689e30f5166f50c69971098b52591ec0 --- /dev/null +++ b/psycopg/notify_type.c @@ -0,0 +1,298 @@ +/* notify_type.c - python interface to Notify objects + * + * Copyright (C) 2010-2019 Daniele Varrazzo + * Copyright (C) 2020-2021 The Psycopg Team + * + * This file is part of psycopg. + * + * psycopg2 is free software: you can redistribute it and/or modify it + * under the terms of the GNU Lesser General Public License as published + * by the Free Software Foundation, either version 3 of the License, or + * (at your option) any later version. + * + * In addition, as a special exception, the copyright holders give + * permission to link this program with the OpenSSL library (or with + * modified versions of OpenSSL that use the same license as OpenSSL), + * and distribute linked combinations including the two. + * + * You must obey the GNU Lesser General Public License in all respects for + * all of the code used other than OpenSSL. + * + * psycopg2 is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public + * License for more details. + */ + +#define PSYCOPG_MODULE +#include "psycopg/psycopg.h" + +#include "psycopg/notify.h" + + +static const char notify_doc[] = + "A notification received from the backend.\n\n" + "`!Notify` instances are made available upon reception on the\n" + "`~connection.notifies` member of the listening connection. The object\n" + "can be also accessed as a 2 items tuple returning the members\n" + ":samp:`({pid},{channel})` for backward compatibility.\n\n" + "See :ref:`async-notify` for details."; + +static const char pid_doc[] = + "The ID of the backend process that sent the notification.\n\n" + "Note: if the sending session was handled by Psycopg, you can use\n" + "`~connection.info.backend_pid` to know its PID."; + +static const char channel_doc[] = + "The name of the channel to which the notification was sent."; + +static const char payload_doc[] = + "The payload message of the notification.\n\n" + "Attaching a payload to a notification is only available since\n" + "PostgreSQL 9.0: for notifications received from previous versions\n" + "of the server this member is always the empty string."; + +static PyMemberDef notify_members[] = { + { "pid", T_OBJECT, offsetof(notifyObject, pid), READONLY, (char *)pid_doc }, + { "channel", T_OBJECT, offsetof(notifyObject, channel), READONLY, (char *)channel_doc }, + { "payload", T_OBJECT, offsetof(notifyObject, payload), READONLY, (char *)payload_doc }, + { NULL } +}; + +static PyObject * +notify_new(PyTypeObject *type, PyObject *args, PyObject *kwargs) +{ + return type->tp_alloc(type, 0); +} + +static int +notify_init(notifyObject *self, PyObject *args, PyObject *kwargs) +{ + static char *kwlist[] = {"pid", "channel", "payload", NULL}; + PyObject *pid = NULL, *channel = NULL, *payload = NULL; + + if (!PyArg_ParseTupleAndKeywords(args, kwargs, "OO|O", kwlist, + &pid, &channel, &payload)) { + return -1; + } + + if (!payload) { + payload = Text_FromUTF8(""); + } + + Py_INCREF(pid); + self->pid = pid; + + Py_INCREF(channel); + self->channel = channel; + + Py_INCREF(payload); + self->payload = payload; + + return 0; +} + +static void +notify_dealloc(notifyObject *self) +{ + Py_CLEAR(self->pid); + Py_CLEAR(self->channel); + Py_CLEAR(self->payload); + + Py_TYPE(self)->tp_free((PyObject *)self); +} + + +/* Convert a notify into a 2 or 3 items tuple. */ +static PyObject * +notify_astuple(notifyObject *self, int with_payload) +{ + PyObject *tself; + if (!(tself = PyTuple_New(with_payload ? 3 : 2))) { return NULL; } + + Py_INCREF(self->pid); + PyTuple_SET_ITEM(tself, 0, self->pid); + + Py_INCREF(self->channel); + PyTuple_SET_ITEM(tself, 1, self->channel); + + if (with_payload) { + Py_INCREF(self->payload); + PyTuple_SET_ITEM(tself, 2, self->payload); + } + + return tself; +} + +/* note on Notify-tuple comparison. + * + * Such a comparison is required otherwise a check n == (pid, channel) + * would fail. We also want to compare two notifies, and the obvious meaning is + * "check that all the attributes are equal". Unfortunately this leads to an + * inconsistent situation: + * Notify(pid, channel, payload1) + * == (pid, channel) + * == Notify(pid, channel, payload2) + * even when payload1 != payload2. We can probably live with that, but hashing + * makes things worse: hashability is a desirable property for a Notify, and + * to maintain compatibility we should put a notify object in the same bucket + * of a 2-item tuples... but we can't put all the payloads with the same + * (pid, channel) in the same bucket: it would be an extremely poor hash. + * So we maintain compatibility in the sense that notify without payload + * behave as 2-item tuples in term of hashability, but if a payload is present + * the (pid, channel) pair is no more equivalent as dict key to the Notify. + */ +static PyObject * +notify_richcompare(notifyObject *self, PyObject *other, int op) +{ + PyObject *rv = NULL; + PyObject *tself = NULL; + PyObject *tother = NULL; + + if (Py_TYPE(other) == ¬ifyType) { + if (!(tself = notify_astuple(self, 1))) { goto exit; } + if (!(tother = notify_astuple((notifyObject *)other, 1))) { goto exit; } + rv = PyObject_RichCompare(tself, tother, op); + } + else if (PyTuple_Check(other)) { + if (!(tself = notify_astuple(self, 0))) { goto exit; } + rv = PyObject_RichCompare(tself, other, op); + } + else { + Py_INCREF(Py_False); + rv = Py_False; + } + +exit: + Py_XDECREF(tself); + Py_XDECREF(tother); + return rv; +} + + +static Py_hash_t +notify_hash(notifyObject *self) +{ + Py_hash_t rv = -1L; + PyObject *tself = NULL; + + /* if self == a tuple, then their hashes are the same. */ + int has_payload = PyObject_IsTrue(self->payload); + if (!(tself = notify_astuple(self, has_payload))) { goto exit; } + rv = PyObject_Hash(tself); + +exit: + Py_XDECREF(tself); + return rv; +} + + +static PyObject* +notify_repr(notifyObject *self) +{ + PyObject *rv = NULL; + PyObject *format = NULL; + PyObject *args = NULL; + + if (!(format = Text_FromUTF8("Notify(%r, %r, %r)"))) { + goto exit; + } + + if (!(args = PyTuple_New(3))) { goto exit; } + Py_INCREF(self->pid); + PyTuple_SET_ITEM(args, 0, self->pid); + Py_INCREF(self->channel); + PyTuple_SET_ITEM(args, 1, self->channel); + Py_INCREF(self->payload); + PyTuple_SET_ITEM(args, 2, self->payload); + + rv = Text_Format(format, args); + +exit: + Py_XDECREF(args); + Py_XDECREF(format); + + return rv; +} + +/* Notify can be accessed as a 2 items tuple for backward compatibility */ + +static Py_ssize_t +notify_len(notifyObject *self) +{ + return 2; +} + +static PyObject * +notify_getitem(notifyObject *self, Py_ssize_t item) +{ + if (item < 0) + item += 2; + + switch (item) { + case 0: + Py_INCREF(self->pid); + return self->pid; + case 1: + Py_INCREF(self->channel); + return self->channel; + default: + PyErr_SetString(PyExc_IndexError, "index out of range"); + return NULL; + } +} + +static PySequenceMethods notify_sequence = { + (lenfunc)notify_len, /* sq_length */ + 0, /* sq_concat */ + 0, /* sq_repeat */ + (ssizeargfunc)notify_getitem, /* sq_item */ + 0, /* sq_slice */ + 0, /* sq_ass_item */ + 0, /* sq_ass_slice */ + 0, /* sq_contains */ + 0, /* sq_inplace_concat */ + 0, /* sq_inplace_repeat */ +}; + + +PyTypeObject notifyType = { + PyVarObject_HEAD_INIT(NULL, 0) + "psycopg2.extensions.Notify", + sizeof(notifyObject), 0, + (destructor)notify_dealloc, /* tp_dealloc */ + 0, /*tp_print*/ + 0, /*tp_getattr*/ + 0, /*tp_setattr*/ + 0, /*tp_compare*/ + (reprfunc)notify_repr, /*tp_repr*/ + 0, /*tp_as_number*/ + ¬ify_sequence, /*tp_as_sequence*/ + 0, /*tp_as_mapping*/ + (hashfunc)notify_hash, /*tp_hash */ + 0, /*tp_call*/ + 0, /*tp_str*/ + 0, /*tp_getattro*/ + 0, /*tp_setattro*/ + 0, /*tp_as_buffer*/ + /* Notify is not GC as it only has string attributes */ + Py_TPFLAGS_DEFAULT|Py_TPFLAGS_BASETYPE, /*tp_flags*/ + notify_doc, /*tp_doc*/ + 0, /*tp_traverse*/ + 0, /*tp_clear*/ + (richcmpfunc)notify_richcompare, /*tp_richcompare*/ + 0, /*tp_weaklistoffset*/ + 0, /*tp_iter*/ + 0, /*tp_iternext*/ + 0, /*tp_methods*/ + notify_members, /*tp_members*/ + 0, /*tp_getset*/ + 0, /*tp_base*/ + 0, /*tp_dict*/ + 0, /*tp_descr_get*/ + 0, /*tp_descr_set*/ + 0, /*tp_dictoffset*/ + (initproc)notify_init, /*tp_init*/ + 0, /*tp_alloc*/ + notify_new, /*tp_new*/ +}; diff --git a/psycopg/pgtypes.h b/psycopg/pgtypes.h new file mode 100644 index 0000000000000000000000000000000000000000..1fdbda9a36cc839024b5f24fea512f1bddc72868 --- /dev/null +++ b/psycopg/pgtypes.h @@ -0,0 +1,65 @@ +#define BOOLOID 16 +#define BYTEAOID 17 +#define CHAROID 18 +#define NAMEOID 19 +#define INT8OID 20 +#define INT2OID 21 +#define INT2VECTOROID 22 +#define INT4OID 23 +#define REGPROCOID 24 +#define TEXTOID 25 +#define OIDOID 26 +#define TIDOID 27 +#define XIDOID 28 +#define CIDOID 29 +#define OIDVECTOROID 30 +#define PG_TYPE_RELTYPE_OID 71 +#define PG_ATTRIBUTE_RELTYPE_OID 75 +#define PG_PROC_RELTYPE_OID 81 +#define PG_CLASS_RELTYPE_OID 83 +#define POINTOID 600 +#define LSEGOID 601 +#define PATHOID 602 +#define BOXOID 603 +#define POLYGONOID 604 +#define LINEOID 628 +#define FLOAT4OID 700 +#define FLOAT8OID 701 +#define ABSTIMEOID 702 +#define RELTIMEOID 703 +#define TINTERVALOID 704 +#define UNKNOWNOID 705 +#define CIRCLEOID 718 +#define CASHOID 790 +#define MACADDROID 829 +#define INETOID 869 +#define CIDROID 650 +#define INT4ARRAYOID 1007 +#define ACLITEMOID 1033 +#define BPCHAROID 1042 +#define VARCHAROID 1043 +#define DATEOID 1082 +#define TIMEOID 1083 +#define TIMESTAMPOID 1114 +#define TIMESTAMPTZOID 1184 +#define INTERVALOID 1186 +#define TIMETZOID 1266 +#define BITOID 1560 +#define VARBITOID 1562 +#define NUMERICOID 1700 +#define REFCURSOROID 1790 +#define REGPROCEDUREOID 2202 +#define REGOPEROID 2203 +#define REGOPERATOROID 2204 +#define REGCLASSOID 2205 +#define REGTYPEOID 2206 +#define RECORDOID 2249 +#define CSTRINGOID 2275 +#define ANYOID 2276 +#define ANYARRAYOID 2277 +#define VOIDOID 2278 +#define TRIGGEROID 2279 +#define LANGUAGE_HANDLEROID 2280 +#define INTERNALOID 2281 +#define OPAQUEOID 2282 +#define ANYELEMENTOID 2283 diff --git a/psycopg/pqpath.c b/psycopg/pqpath.c new file mode 100644 index 0000000000000000000000000000000000000000..ec020673256ee9907f22fe7c9c357ff39da21e8c --- /dev/null +++ b/psycopg/pqpath.c @@ -0,0 +1,1834 @@ +/* pqpath.c - single path into libpq + * + * Copyright (C) 2003-2019 Federico Di Gregorio + * Copyright (C) 2020-2021 The Psycopg Team + * + * This file is part of psycopg. + * + * psycopg2 is free software: you can redistribute it and/or modify it + * under the terms of the GNU Lesser General Public License as published + * by the Free Software Foundation, either version 3 of the License, or + * (at your option) any later version. + * + * In addition, as a special exception, the copyright holders give + * permission to link this program with the OpenSSL library (or with + * modified versions of OpenSSL that use the same license as OpenSSL), + * and distribute linked combinations including the two. + * + * You must obey the GNU Lesser General Public License in all respects for + * all of the code used other than OpenSSL. + * + * psycopg2 is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public + * License for more details. + */ + +/* IMPORTANT NOTE: no function in this file do its own connection locking + except for pg_execute and pq_fetch (that are somehow high-level). This means + that all the other functions should be called while holding a lock to the + connection. +*/ + +#define PSYCOPG_MODULE +#include "psycopg/psycopg.h" + +#include "psycopg/pqpath.h" +#include "psycopg/connection.h" +#include "psycopg/cursor.h" +#include "psycopg/replication_cursor.h" +#include "psycopg/replication_message.h" +#include "psycopg/green.h" +#include "psycopg/typecast.h" +#include "psycopg/pgtypes.h" +#include "psycopg/error.h" +#include "psycopg/column.h" + +#include "psycopg/libpq_support.h" +#include "libpq-fe.h" + +#ifdef _WIN32 +/* select() */ +#include +/* gettimeofday() */ +#include "win32_support.h" +#elif defined(__sun) && defined(__SVR4) +#include "solaris_support.h" +#elif defined(_AIX) +#include "aix_support.h" +#else +#include +#endif + +extern HIDDEN PyObject *psyco_DescriptionType; +extern HIDDEN const char *srv_isolevels[]; +extern HIDDEN const char *srv_readonly[]; +extern HIDDEN const char *srv_deferrable[]; + +/* Strip off the severity from a Postgres error message. */ +static const char * +strip_severity(const char *msg) +{ + if (!msg) + return NULL; + + if (strlen(msg) > 8 && (!strncmp(msg, "ERROR: ", 8) || + !strncmp(msg, "FATAL: ", 8) || + !strncmp(msg, "PANIC: ", 8))) + return &msg[8]; + else + return msg; +} + + +/* pq_raise - raise a python exception of the right kind + + This function should be called while holding the GIL. + + The function passes the ownership of the pgres to the returned exception, + where the pgres was the explicit argument or taken from the cursor. + So, after calling it curs->pgres will be set to null */ + +RAISES static void +pq_raise(connectionObject *conn, cursorObject *curs, PGresult **pgres) +{ + PyObject *exc = NULL; + const char *err = NULL; + const char *err2 = NULL; + const char *code = NULL; + PyObject *pyerr = NULL; + PyObject *pgerror = NULL, *pgcode = NULL; + + if (conn == NULL) { + PyErr_SetString(DatabaseError, + "psycopg went psychotic and raised a null error"); + return; + } + + /* if the connection has somehow been broken, we mark the connection + object as closed but requiring cleanup */ + if (conn->pgconn != NULL && PQstatus(conn->pgconn) == CONNECTION_BAD) { + conn->closed = 2; + exc = OperationalError; + } + + if (pgres == NULL && curs != NULL) + pgres = &curs->pgres; + + if (pgres && *pgres) { + err = PQresultErrorMessage(*pgres); + if (err != NULL) { + Dprintf("pq_raise: PQresultErrorMessage: err=%s", err); + code = PQresultErrorField(*pgres, PG_DIAG_SQLSTATE); + } + } + if (err == NULL) { + err = PQerrorMessage(conn->pgconn); + Dprintf("pq_raise: PQerrorMessage: err=%s", err); + } + + /* if the is no error message we probably called pq_raise without reason: + we need to set an exception anyway because the caller will probably + raise and a meaningful message is better than an empty one. + Note: it can happen without it being our error: see ticket #82 */ + if (err == NULL || err[0] == '\0') { + PyErr_Format(DatabaseError, + "error with status %s and no message from the libpq", + PQresStatus(pgres == NULL ? + PQstatus(conn->pgconn) : PQresultStatus(*pgres))); + return; + } + + /* Analyze the message and try to deduce the right exception kind + (only if we got the SQLSTATE from the pgres, obviously) */ + if (code != NULL) { + exc = exception_from_sqlstate(code); + } + else if (exc == NULL) { + /* Fallback if there is no exception code (unless we already + determined that the connection was closed). */ + exc = DatabaseError; + } + + /* try to remove the initial "ERROR: " part from the postgresql error */ + err2 = strip_severity(err); + Dprintf("pq_raise: err2=%s", err2); + + /* decode now the details of the error, because after psyco_set_error + * decoding will fail. + */ + if (!(pgerror = conn_text_from_chars(conn, err))) { + /* we can't really handle an exception while handling this error + * so just print it. */ + PyErr_Print(); + PyErr_Clear(); + } + + if (!(pgcode = conn_text_from_chars(conn, code))) { + PyErr_Print(); + PyErr_Clear(); + } + + pyerr = psyco_set_error(exc, curs, err2); + + if (pyerr && PyObject_TypeCheck(pyerr, &errorType)) { + errorObject *perr = (errorObject *)pyerr; + + Py_CLEAR(perr->pydecoder); + Py_XINCREF(conn->pydecoder); + perr->pydecoder = conn->pydecoder; + + Py_CLEAR(perr->pgerror); + perr->pgerror = pgerror; + pgerror = NULL; + + Py_CLEAR(perr->pgcode); + perr->pgcode = pgcode; + pgcode = NULL; + + CLEARPGRES(perr->pgres); + if (pgres && *pgres) { + perr->pgres = *pgres; + *pgres = NULL; + } + } + + Py_XDECREF(pgerror); + Py_XDECREF(pgcode); +} + +/* pq_clear_async - clear the effects of a previous async query + + note that this function does block because it needs to wait for the full + result sets of the previous query to clear them. + + this function does not call any Py_*_ALLOW_THREADS macros */ + +void +pq_clear_async(connectionObject *conn) +{ + PGresult *pgres; + + /* this will get all pending results (if the submitted query consisted of + many parts, i.e. "select 1; select 2", there will be many) and also + finalize asynchronous processing so the connection will be ready to + accept another query */ + + while ((pgres = PQgetResult(conn->pgconn))) { + Dprintf("pq_clear_async: clearing PGresult at %p", pgres); + PQclear(pgres); + } + Py_CLEAR(conn->async_cursor); +} + + +/* pq_set_non_blocking - set the nonblocking status on a connection. + + Accepted arg values are 1 (nonblocking) and 0 (blocking). + + Return 0 if everything ok, else < 0 and set an exception. + */ +RAISES_NEG int +pq_set_non_blocking(connectionObject *conn, int arg) +{ + int ret = PQsetnonblocking(conn->pgconn, arg); + if (0 != ret) { + Dprintf("PQsetnonblocking(%d) FAILED", arg); + PyErr_SetString(OperationalError, "PQsetnonblocking() failed"); + ret = -1; + } + return ret; +} + + +/* pg_execute_command_locked - execute a no-result query on a locked connection. + + This function should only be called on a locked connection without + holding the global interpreter lock. + + On error, -1 is returned, and the conn->pgres will hold the + relevant result structure. + + The tstate parameter should be the pointer of the _save variable created by + Py_BEGIN_ALLOW_THREADS: this enables the function to acquire and release + again the GIL if needed, i.e. if a Python wait callback must be invoked. + */ +int +pq_execute_command_locked( + connectionObject *conn, const char *query, PyThreadState **tstate) +{ + int pgstatus, retvalue = -1; + Dprintf("pq_execute_command_locked: pgconn = %p, query = %s", + conn->pgconn, query); + + if (!psyco_green()) { + conn_set_result(conn, PQexec(conn->pgconn, query)); + } else { + PyEval_RestoreThread(*tstate); + conn_set_result(conn, psyco_exec_green(conn, query)); + *tstate = PyEval_SaveThread(); + } + if (conn->pgres == NULL) { + Dprintf("pq_execute_command_locked: PQexec returned NULL"); + PyEval_RestoreThread(*tstate); + if (!PyErr_Occurred()) { + conn_set_error(conn, PQerrorMessage(conn->pgconn)); + } + *tstate = PyEval_SaveThread(); + goto cleanup; + } + + pgstatus = PQresultStatus(conn->pgres); + if (pgstatus != PGRES_COMMAND_OK ) { + Dprintf("pq_execute_command_locked: result was not COMMAND_OK (%d)", + pgstatus); + goto cleanup; + } + + retvalue = 0; + CLEARPGRES(conn->pgres); + +cleanup: + return retvalue; +} + +/* pq_complete_error: handle an error from pq_execute_command_locked() + + If pq_execute_command_locked() returns -1, this function should be + called to convert the result to a Python exception. + + This function should be called while holding the global interpreter + lock. + */ +RAISES void +pq_complete_error(connectionObject *conn) +{ + Dprintf("pq_complete_error: pgconn = %p, error = %s", + conn->pgconn, conn->error); + if (conn->pgres) { + pq_raise(conn, NULL, &conn->pgres); + /* now conn->pgres is null */ + } + else { + if (conn->error) { + PyErr_SetString(OperationalError, conn->error); + } else if (PyErr_Occurred()) { + /* There was a Python error (e.g. in the callback). Don't clobber + * it with an unknown exception. (see #410) */ + Dprintf("pq_complete_error: forwarding Python exception"); + } else { + PyErr_SetString(OperationalError, "unknown error"); + } + /* Trivia: with a broken socket connection PQexec returns NULL, so we + * end up here. With a TCP connection we get a pgres with an error + * instead, and the connection gets closed in the pq_raise call above + * (see ticket #196) + */ + if (CONNECTION_BAD == PQstatus(conn->pgconn)) { + conn->closed = 2; + } + } + conn_set_error(conn, NULL); +} + + +/* pq_begin_locked - begin a transaction, if necessary + + This function should only be called on a locked connection without + holding the global interpreter lock. + + On error, -1 is returned, and the conn->pgres argument will hold the + relevant result structure. + */ +int +pq_begin_locked(connectionObject *conn, PyThreadState **tstate) +{ + const size_t bufsize = 256; + char buf[256]; /* buf size must be same as bufsize */ + int result; + + Dprintf("pq_begin_locked: pgconn = %p, %d, status = %d", + conn->pgconn, conn->autocommit, conn->status); + + if (conn->status != CONN_STATUS_READY) { + Dprintf("pq_begin_locked: transaction in progress"); + return 0; + } + + if (conn->autocommit && !conn->entered) { + Dprintf("pq_begin_locked: autocommit and no with block"); + return 0; + } + + if (conn->isolevel == ISOLATION_LEVEL_DEFAULT + && conn->readonly == STATE_DEFAULT + && conn->deferrable == STATE_DEFAULT) { + strcpy(buf, "BEGIN"); + } + else { + snprintf(buf, bufsize, + conn->server_version >= 80000 ? + "BEGIN%s%s%s%s" : "BEGIN;SET TRANSACTION%s%s%s%s", + (conn->isolevel >= 1 && conn->isolevel <= 4) + ? " ISOLATION LEVEL " : "", + (conn->isolevel >= 1 && conn->isolevel <= 4) + ? srv_isolevels[conn->isolevel] : "", + srv_readonly[conn->readonly], + srv_deferrable[conn->deferrable]); + } + + result = pq_execute_command_locked(conn, buf, tstate); + if (result == 0) + conn->status = CONN_STATUS_BEGIN; + + return result; +} + +/* pq_commit - send an END, if necessary + + This function should be called while holding the global interpreter + lock. +*/ + +int +pq_commit(connectionObject *conn) +{ + int retvalue = -1; + + Py_BEGIN_ALLOW_THREADS; + pthread_mutex_lock(&conn->lock); + + Dprintf("pq_commit: pgconn = %p, status = %d", + conn->pgconn, conn->status); + + if (conn->status != CONN_STATUS_BEGIN) { + Dprintf("pq_commit: no transaction to commit"); + retvalue = 0; + } + else { + conn->mark += 1; + retvalue = pq_execute_command_locked(conn, "COMMIT", &_save); + } + + Py_BLOCK_THREADS; + conn_notice_process(conn); + Py_UNBLOCK_THREADS; + + /* Even if an error occurred, the connection will be rolled back, + so we unconditionally set the connection status here. */ + conn->status = CONN_STATUS_READY; + + pthread_mutex_unlock(&conn->lock); + Py_END_ALLOW_THREADS; + + if (retvalue < 0) + pq_complete_error(conn); + + return retvalue; +} + +RAISES_NEG int +pq_abort_locked(connectionObject *conn, PyThreadState **tstate) +{ + int retvalue = -1; + + Dprintf("pq_abort_locked: pgconn = %p, status = %d", + conn->pgconn, conn->status); + + if (conn->status != CONN_STATUS_BEGIN) { + Dprintf("pq_abort_locked: no transaction to abort"); + return 0; + } + + conn->mark += 1; + retvalue = pq_execute_command_locked(conn, "ROLLBACK", tstate); + if (retvalue == 0) + conn->status = CONN_STATUS_READY; + + return retvalue; +} + +/* pq_abort - send an ABORT, if necessary + + This function should be called while holding the global interpreter + lock. */ + +RAISES_NEG int +pq_abort(connectionObject *conn) +{ + int retvalue = -1; + + Dprintf("pq_abort: pgconn = %p, autocommit = %d, status = %d", + conn->pgconn, conn->autocommit, conn->status); + + Py_BEGIN_ALLOW_THREADS; + pthread_mutex_lock(&conn->lock); + + retvalue = pq_abort_locked(conn, &_save); + + Py_BLOCK_THREADS; + conn_notice_process(conn); + Py_UNBLOCK_THREADS; + + pthread_mutex_unlock(&conn->lock); + Py_END_ALLOW_THREADS; + + if (retvalue < 0) + pq_complete_error(conn); + + return retvalue; +} + +/* pq_reset - reset the connection + + This function should be called while holding the global interpreter + lock. + + The _locked version of this function should be called on a locked + connection without holding the global interpreter lock. +*/ + +RAISES_NEG int +pq_reset_locked(connectionObject *conn, PyThreadState **tstate) +{ + int retvalue = -1; + + Dprintf("pq_reset_locked: pgconn = %p, status = %d", + conn->pgconn, conn->status); + + conn->mark += 1; + + if (conn->status == CONN_STATUS_BEGIN) { + retvalue = pq_execute_command_locked(conn, "ABORT", tstate); + if (retvalue != 0) return retvalue; + } + + if (conn->server_version >= 80300) { + retvalue = pq_execute_command_locked(conn, "DISCARD ALL", tstate); + if (retvalue != 0) return retvalue; + } + else { + retvalue = pq_execute_command_locked(conn, "RESET ALL", tstate); + if (retvalue != 0) return retvalue; + + retvalue = pq_execute_command_locked(conn, + "SET SESSION AUTHORIZATION DEFAULT", tstate); + if (retvalue != 0) return retvalue; + } + + /* should set the tpc xid to null: postponed until we get the GIL again */ + conn->status = CONN_STATUS_READY; + + return retvalue; +} + +int +pq_reset(connectionObject *conn) +{ + int retvalue = -1; + + Dprintf("pq_reset: pgconn = %p, autocommit = %d, status = %d", + conn->pgconn, conn->autocommit, conn->status); + + Py_BEGIN_ALLOW_THREADS; + pthread_mutex_lock(&conn->lock); + + retvalue = pq_reset_locked(conn, &_save); + + Py_BLOCK_THREADS; + conn_notice_process(conn); + Py_UNBLOCK_THREADS; + + pthread_mutex_unlock(&conn->lock); + Py_END_ALLOW_THREADS; + + if (retvalue < 0) { + pq_complete_error(conn); + } + else { + Py_CLEAR(conn->tpc_xid); + } + return retvalue; +} + + +/* Get a session parameter. + * + * The function should be called on a locked connection without + * holding the GIL. + * + * The result is a new string allocated with malloc. + */ + +char * +pq_get_guc_locked(connectionObject *conn, const char *param, PyThreadState **tstate) +{ + char query[256]; + int size; + char *rv = NULL; + + Dprintf("pq_get_guc_locked: reading %s", param); + + size = PyOS_snprintf(query, sizeof(query), "SHOW %s", param); + if (size < 0 || (size_t)size >= sizeof(query)) { + conn_set_error(conn, "SHOW: query too large"); + goto cleanup; + } + + Dprintf("pq_get_guc_locked: pgconn = %p, query = %s", conn->pgconn, query); + + if (!psyco_green()) { + conn_set_result(conn, PQexec(conn->pgconn, query)); + } else { + PyEval_RestoreThread(*tstate); + conn_set_result(conn, psyco_exec_green(conn, query)); + *tstate = PyEval_SaveThread(); + } + + if (!conn->pgres) { + Dprintf("pq_get_guc_locked: PQexec returned NULL"); + PyEval_RestoreThread(*tstate); + if (!PyErr_Occurred()) { + conn_set_error(conn, PQerrorMessage(conn->pgconn)); + } + *tstate = PyEval_SaveThread(); + goto cleanup; + } + if (PQresultStatus(conn->pgres) != PGRES_TUPLES_OK) { + Dprintf("pq_get_guc_locked: result was not TUPLES_OK (%s)", + PQresStatus(PQresultStatus(conn->pgres))); + goto cleanup; + } + + rv = strdup(PQgetvalue(conn->pgres, 0, 0)); + CLEARPGRES(conn->pgres); + +cleanup: + return rv; +} + +/* Set a session parameter. + * + * The function should be called on a locked connection without + * holding the GIL + */ + +int +pq_set_guc_locked( + connectionObject *conn, const char *param, const char *value, + PyThreadState **tstate) +{ + char query[256]; + int size; + int rv = -1; + + Dprintf("pq_set_guc_locked: setting %s to %s", param, value); + + if (0 == strcmp(value, "default")) { + size = PyOS_snprintf(query, sizeof(query), + "SET %s TO DEFAULT", param); + } + else { + size = PyOS_snprintf(query, sizeof(query), + "SET %s TO '%s'", param, value); + } + if (size < 0 || (size_t)size >= sizeof(query)) { + conn_set_error(conn, "SET: query too large"); + goto exit; + } + + rv = pq_execute_command_locked(conn, query, tstate); + +exit: + return rv; +} + +/* Call one of the PostgreSQL tpc-related commands. + * + * This function should only be called on a locked connection without + * holding the global interpreter lock. */ + +int +pq_tpc_command_locked( + connectionObject *conn, const char *cmd, const char *tid, + PyThreadState **tstate) +{ + int rv = -1; + char *etid = NULL, *buf = NULL; + Py_ssize_t buflen; + + Dprintf("_pq_tpc_command: pgconn = %p, command = %s", + conn->pgconn, cmd); + + conn->mark += 1; + + PyEval_RestoreThread(*tstate); + + /* convert the xid into the postgres transaction_id and quote it. */ + if (!(etid = psyco_escape_string(conn, tid, -1, NULL, NULL))) + { goto exit; } + + /* prepare the command to the server */ + buflen = 2 + strlen(cmd) + strlen(etid); /* add space, zero */ + if (!(buf = PyMem_Malloc(buflen))) { + PyErr_NoMemory(); + goto exit; + } + if (0 > PyOS_snprintf(buf, buflen, "%s %s", cmd, etid)) { goto exit; } + + /* run the command and let it handle the error cases */ + *tstate = PyEval_SaveThread(); + rv = pq_execute_command_locked(conn, buf, tstate); + PyEval_RestoreThread(*tstate); + +exit: + PyMem_Free(buf); + PyMem_Free(etid); + + *tstate = PyEval_SaveThread(); + return rv; +} + + +/* pq_get_result_async - read an available result without blocking. + * + * Return 0 if the result is ready, 1 if it will block, -1 on error. + * The last result will be returned in conn->pgres. + * + * The function should be called with the lock and holding the GIL. + */ + +RAISES_NEG int +pq_get_result_async(connectionObject *conn) +{ + int rv = -1; + + Dprintf("pq_get_result_async: calling PQconsumeInput()"); + if (PQconsumeInput(conn->pgconn) == 0) { + Dprintf("pq_get_result_async: PQconsumeInput() failed"); + + /* if the libpq says pgconn is lost, close the py conn */ + if (CONNECTION_BAD == PQstatus(conn->pgconn)) { + conn->closed = 2; + } + + PyErr_SetString(OperationalError, PQerrorMessage(conn->pgconn)); + goto exit; + } + + conn_notifies_process(conn); + conn_notice_process(conn); + + for (;;) { + int busy; + PGresult *res; + ExecStatusType status; + + Dprintf("pq_get_result_async: calling PQisBusy()"); + busy = PQisBusy(conn->pgconn); + + if (busy) { + /* try later */ + Dprintf("pq_get_result_async: PQisBusy() = 1"); + rv = 1; + goto exit; + } + + if (!(res = PQgetResult(conn->pgconn))) { + Dprintf("pq_get_result_async: got no result"); + /* the result is ready: it was the previously read one */ + rv = 0; + goto exit; + } + + status = PQresultStatus(res); + Dprintf("pq_get_result_async: got result %s", PQresStatus(status)); + + /* Store the result outside because we want to return the last non-null + * one and we may have to do it across poll calls. However if there is + * an error in the stream of results we want to handle the *first* + * error. So don't clobber it with the following ones. */ + if (conn->pgres && PQresultStatus(conn->pgres) == PGRES_FATAL_ERROR) { + Dprintf("previous pgres is error: discarding"); + PQclear(res); + } + else { + conn_set_result(conn, res); + } + + switch (status) { + case PGRES_COPY_OUT: + case PGRES_COPY_IN: + case PGRES_COPY_BOTH: + /* After entering copy mode, libpq will make a phony + * PGresult for us every time we query for it, so we need to + * break out of this endless loop. */ + rv = 0; + goto exit; + + default: + /* keep on reading to check if there are other results or + * we have finished. */ + continue; + } + } + +exit: + return rv; +} + +/* pq_flush - flush output and return connection status + + a status of 1 means that a some data is still pending to be flushed, while a + status of 0 means that there is no data waiting to be sent. -1 means an + error and an exception will be set accordingly. + + this function locks the connection object + this function call Py_*_ALLOW_THREADS macros */ + +int +pq_flush(connectionObject *conn) +{ + int res; + + Dprintf("pq_flush: flushing output"); + + Py_BEGIN_ALLOW_THREADS; + pthread_mutex_lock(&(conn->lock)); + res = PQflush(conn->pgconn); + pthread_mutex_unlock(&(conn->lock)); + Py_END_ALLOW_THREADS; + + return res; +} + +/* pq_execute - execute a query, possibly asynchronously + * + * With no_result an eventual query result is discarded. + * Currently only used to implement cursor.executemany(). + * + * This function locks the connection object + * This function call Py_*_ALLOW_THREADS macros +*/ + +RAISES_NEG int +_pq_execute_sync(cursorObject *curs, const char *query, int no_result, int no_begin) +{ + connectionObject *conn = curs->conn; + + CLEARPGRES(curs->pgres); + + Py_BEGIN_ALLOW_THREADS; + pthread_mutex_lock(&(conn->lock)); + + if (!no_begin && pq_begin_locked(conn, &_save) < 0) { + pthread_mutex_unlock(&(conn->lock)); + Py_BLOCK_THREADS; + pq_complete_error(conn); + return -1; + } + + Dprintf("pq_execute: executing SYNC query: pgconn = %p", conn->pgconn); + Dprintf(" %-.200s", query); + if (!psyco_green()) { + conn_set_result(conn, PQexec(conn->pgconn, query)); + } + else { + Py_BLOCK_THREADS; + conn_set_result(conn, psyco_exec_green(conn, query)); + Py_UNBLOCK_THREADS; + } + + /* don't let pgres = NULL go to pq_fetch() */ + if (!conn->pgres) { + if (CONNECTION_BAD == PQstatus(conn->pgconn)) { + conn->closed = 2; + } + pthread_mutex_unlock(&(conn->lock)); + Py_BLOCK_THREADS; + if (!PyErr_Occurred()) { + PyErr_SetString(OperationalError, + PQerrorMessage(conn->pgconn)); + } + return -1; + } + + Py_BLOCK_THREADS; + + /* assign the result back to the cursor now that we have the GIL */ + curs_set_result(curs, conn->pgres); + conn->pgres = NULL; + + /* Process notifies here instead of when fetching the tuple as we are + * into the same critical section that received the data. Without this + * care, reading notifies may disrupt other thread communications. + * (as in ticket #55). */ + conn_notifies_process(conn); + conn_notice_process(conn); + Py_UNBLOCK_THREADS; + + pthread_mutex_unlock(&(conn->lock)); + Py_END_ALLOW_THREADS; + + /* if the execute was sync, we call pq_fetch() immediately, + to respect the old DBAPI-2.0 compatible behaviour */ + Dprintf("pq_execute: entering synchronous DBAPI compatibility mode"); + if (pq_fetch(curs, no_result) < 0) return -1; + + return 1; +} + +RAISES_NEG int +_pq_execute_async(cursorObject *curs, const char *query, int no_result) +{ + int async_status = ASYNC_WRITE; + connectionObject *conn = curs->conn; + int ret; + + CLEARPGRES(curs->pgres); + + Py_BEGIN_ALLOW_THREADS; + pthread_mutex_lock(&(conn->lock)); + + Dprintf("pq_execute: executing ASYNC query: pgconn = %p", conn->pgconn); + Dprintf(" %-.200s", query); + + if (PQsendQuery(conn->pgconn, query) == 0) { + if (CONNECTION_BAD == PQstatus(conn->pgconn)) { + conn->closed = 2; + } + pthread_mutex_unlock(&(conn->lock)); + Py_BLOCK_THREADS; + PyErr_SetString(OperationalError, + PQerrorMessage(conn->pgconn)); + return -1; + } + Dprintf("pq_execute: async query sent to backend"); + + ret = PQflush(conn->pgconn); + if (ret == 0) { + /* the query got fully sent to the server */ + Dprintf("pq_execute: query got flushed immediately"); + /* the async status will be ASYNC_READ */ + async_status = ASYNC_READ; + } + else if (ret == 1) { + /* not all of the query got sent to the server */ + async_status = ASYNC_WRITE; + } + else { + /* there was an error */ + pthread_mutex_unlock(&(conn->lock)); + Py_BLOCK_THREADS; + PyErr_SetString(OperationalError, + PQerrorMessage(conn->pgconn)); + return -1; + } + + pthread_mutex_unlock(&(conn->lock)); + Py_END_ALLOW_THREADS; + + conn->async_status = async_status; + if (!(conn->async_cursor + = PyWeakref_NewRef((PyObject *)curs, NULL))) { + return -1; + } + + return 0; +} + +RAISES_NEG int +pq_execute(cursorObject *curs, const char *query, int async, int no_result, int no_begin) +{ + /* check status of connection, raise error if not OK */ + if (PQstatus(curs->conn->pgconn) != CONNECTION_OK) { + Dprintf("pq_execute: connection NOT OK"); + PyErr_SetString(OperationalError, PQerrorMessage(curs->conn->pgconn)); + return -1; + } + Dprintf("pq_execute: pg connection at %p OK", curs->conn->pgconn); + + if (!async) { + return _pq_execute_sync(curs, query, no_result, no_begin); + } else { + return _pq_execute_async(curs, query, no_result); + } +} + + +/* send an async query to the backend. + * + * Return 1 if command succeeded, else 0. + * + * The function should be called helding the connection lock and the GIL. + */ +int +pq_send_query(connectionObject *conn, const char *query) +{ + int rv; + + Dprintf("pq_send_query: sending ASYNC query:"); + Dprintf(" %-.200s", query); + + CLEARPGRES(conn->pgres); + if (0 == (rv = PQsendQuery(conn->pgconn, query))) { + Dprintf("pq_send_query: error: %s", PQerrorMessage(conn->pgconn)); + } + + return rv; +} + + +/* pq_fetch - fetch data after a query + + this function locks the connection object + this function call Py_*_ALLOW_THREADS macros + + return value: + -1 - some error occurred while calling libpq + 0 - no result from the backend but no libpq errors + 1 - result from backend (possibly data is ready) +*/ + +static PyObject * +_get_cast(cursorObject *curs, PGresult *pgres, int i) +{ + /* fill the right cast function by accessing three different dictionaries: + - the per-cursor dictionary, if available (can be NULL or None) + - the per-connection dictionary (always exists but can be null) + - the global dictionary (at module level) + if we get no defined cast use the default one */ + PyObject *type = NULL; + PyObject *cast = NULL; + PyObject *rv = NULL; + + Oid ftype = PQftype(pgres, i); + if (!(type = PyLong_FromOid(ftype))) { goto exit; } + + Dprintf("_pq_fetch_tuples: looking for cast %u:", ftype); + if (!(cast = curs_get_cast(curs, type))) { goto exit; } + + /* else if we got binary tuples and if we got a field that + is binary use the default cast + FIXME: what the hell am I trying to do here? This just can't work.. + */ + if (cast == psyco_default_binary_cast && PQbinaryTuples(pgres)) { + Dprintf("_pq_fetch_tuples: Binary cursor and " + "binary field: %u using default cast", ftype); + cast = psyco_default_cast; + } + + Dprintf("_pq_fetch_tuples: using cast at %p for type %u", cast, ftype); + + /* success */ + Py_INCREF(cast); + rv = cast; + +exit: + Py_XDECREF(type); + return rv; +} + +static PyObject * +_make_column(connectionObject *conn, PGresult *pgres, int i) +{ + Oid ftype = PQftype(pgres, i); + int fsize = PQfsize(pgres, i); + int fmod = PQfmod(pgres, i); + Oid ftable = PQftable(pgres, i); + int ftablecol = PQftablecol(pgres, i); + + columnObject *column = NULL; + PyObject *rv = NULL; + + if (!(column = (columnObject *)PyObject_CallObject( + (PyObject *)&columnType, NULL))) { + goto exit; + } + + /* fill the type and name fields */ + { + PyObject *tmp; + if (!(tmp = PyLong_FromOid(ftype))) { + goto exit; + } + column->type_code = tmp; + } + + { + PyObject *tmp; + if (!(tmp = conn_text_from_chars(conn, PQfname(pgres, i)))) { + goto exit; + } + column->name = tmp; + } + + /* display size is the maximum size of this field result tuples. */ + Py_INCREF(Py_None); + column->display_size = Py_None; + + /* size on the backend */ + if (fmod > 0) { + fmod = fmod - sizeof(int); + } + if (fsize == -1) { + if (ftype == NUMERICOID) { + PyObject *tmp; + if (!(tmp = PyInt_FromLong((fmod >> 16)))) { goto exit; } + column->internal_size = tmp; + } + else { /* If variable length record, return maximum size */ + PyObject *tmp; + if (!(tmp = PyInt_FromLong(fmod))) { goto exit; } + column->internal_size = tmp; + } + } + else { + PyObject *tmp; + if (!(tmp = PyInt_FromLong(fsize))) { goto exit; } + column->internal_size = tmp; + } + + /* scale and precision */ + if (ftype == NUMERICOID) { + PyObject *tmp; + + if (!(tmp = PyInt_FromLong((fmod >> 16) & 0xFFFF))) { + goto exit; + } + column->precision = tmp; + + if (!(tmp = PyInt_FromLong(fmod & 0xFFFF))) { + goto exit; + } + column->scale = tmp; + } + + /* table_oid, table_column */ + if (ftable != InvalidOid) { + PyObject *tmp; + if (!(tmp = PyLong_FromOid(ftable))) { goto exit; } + column->table_oid = tmp; + } + + if (ftablecol > 0) { + PyObject *tmp; + if (!(tmp = PyInt_FromLong((long)ftablecol))) { goto exit; } + column->table_column = tmp; + } + + /* success */ + rv = (PyObject *)column; + column = NULL; + +exit: + Py_XDECREF(column); + return rv; +} + +RAISES_NEG static int +_pq_fetch_tuples(cursorObject *curs) +{ + int i; + int pgnfields; + int rv = -1; + PyObject *description = NULL; + PyObject *casts = NULL; + + Py_BEGIN_ALLOW_THREADS; + pthread_mutex_lock(&(curs->conn->lock)); + Py_END_ALLOW_THREADS; + + pgnfields = PQnfields(curs->pgres); + + curs->notuples = 0; + + /* create the tuple for description and typecasting */ + Py_CLEAR(curs->description); + Py_CLEAR(curs->casts); + if (!(description = PyTuple_New(pgnfields))) { goto exit; } + if (!(casts = PyTuple_New(pgnfields))) { goto exit; } + curs->columns = pgnfields; + + /* calculate each field's parameters and typecasters */ + for (i = 0; i < pgnfields; i++) { + PyObject *column = NULL; + PyObject *cast = NULL; + + if (!(column = _make_column(curs->conn, curs->pgres, i))) { + goto exit; + } + PyTuple_SET_ITEM(description, i, (PyObject *)column); + + if (!(cast = _get_cast(curs, curs->pgres, i))) { + goto exit; + } + PyTuple_SET_ITEM(casts, i, cast); + } + + curs->description = description; + description = NULL; + + curs->casts = casts; + casts = NULL; + + rv = 0; + +exit: + Py_XDECREF(description); + Py_XDECREF(casts); + + Py_BEGIN_ALLOW_THREADS; + pthread_mutex_unlock(&(curs->conn->lock)); + Py_END_ALLOW_THREADS; + + return rv; +} + + +void +_read_rowcount(cursorObject *curs) +{ + const char *rowcount; + + rowcount = PQcmdTuples(curs->pgres); + Dprintf("_read_rowcount: PQcmdTuples = %s", rowcount); + if (!rowcount || !rowcount[0]) { + curs->rowcount = -1; + } else { + curs->rowcount = atol(rowcount); + } +} + +static int +_pq_copy_in_v3(cursorObject *curs) +{ + /* COPY FROM implementation when protocol 3 is available: this function + uses the new PQputCopyData() and can detect errors and set the correct + exception */ + PyObject *o, *func = NULL, *size = NULL; + Py_ssize_t length = 0; + int res, error = 0; + + if (!curs->copyfile) { + PyErr_SetString(ProgrammingError, + "can't execute COPY FROM: use the copy_from() method instead"); + error = 1; + goto exit; + } + + if (!(func = PyObject_GetAttrString(curs->copyfile, "read"))) { + Dprintf("_pq_copy_in_v3: can't get o.read"); + error = 1; + goto exit; + } + if (!(size = PyInt_FromSsize_t(curs->copysize))) { + Dprintf("_pq_copy_in_v3: can't get int from copysize"); + error = 1; + goto exit; + } + + while (1) { + if (!(o = PyObject_CallFunctionObjArgs(func, size, NULL))) { + Dprintf("_pq_copy_in_v3: read() failed"); + error = 1; + break; + } + + /* a file may return unicode if implements io.TextIOBase */ + if (PyUnicode_Check(o)) { + PyObject *tmp; + if (!(tmp = conn_encode(curs->conn, o))) { + Dprintf("_pq_copy_in_v3: encoding() failed"); + error = 1; + break; + } + Py_DECREF(o); + o = tmp; + } + + if (!Bytes_Check(o)) { + Dprintf("_pq_copy_in_v3: got %s instead of bytes", + Py_TYPE(o)->tp_name); + error = 1; + break; + } + + if (0 == (length = Bytes_GET_SIZE(o))) { + break; + } + if (length > INT_MAX) { + Dprintf("_pq_copy_in_v3: bad length: " FORMAT_CODE_PY_SSIZE_T, + length); + error = 1; + break; + } + + Py_BEGIN_ALLOW_THREADS; + res = PQputCopyData(curs->conn->pgconn, Bytes_AS_STRING(o), + /* Py_ssize_t->int cast was validated above */ + (int) length); + Dprintf("_pq_copy_in_v3: sent " FORMAT_CODE_PY_SSIZE_T " bytes of data; res = %d", + length, res); + + if (res == 0) { + /* FIXME: in theory this should not happen but adding a check + here would be a nice idea */ + } + else if (res == -1) { + Dprintf("_pq_copy_in_v3: PQerrorMessage = %s", + PQerrorMessage(curs->conn->pgconn)); + error = 2; + } + Py_END_ALLOW_THREADS; + + if (error == 2) break; + + Py_DECREF(o); + } + + Py_XDECREF(o); + + Dprintf("_pq_copy_in_v3: error = %d", error); + + /* 0 means that the copy went well, 2 that there was an error on the + backend: in both cases we'll get the error message from the PQresult */ + if (error == 0) + res = PQputCopyEnd(curs->conn->pgconn, NULL); + else if (error == 2) + res = PQputCopyEnd(curs->conn->pgconn, "error in PQputCopyData() call"); + else { + char buf[1024]; + strcpy(buf, "error in .read() call"); + if (PyErr_Occurred()) { + PyObject *t, *ex, *tb; + PyErr_Fetch(&t, &ex, &tb); + if (ex) { + PyObject *str; + str = PyObject_Str(ex); + str = psyco_ensure_bytes(str); + if (str) { + PyOS_snprintf(buf, sizeof(buf), + "error in .read() call: %s %s", + ((PyTypeObject *)t)->tp_name, Bytes_AsString(str)); + Py_DECREF(str); + } + } + /* Clear the Py exception: it will be re-raised from the libpq */ + Py_XDECREF(t); + Py_XDECREF(ex); + Py_XDECREF(tb); + PyErr_Clear(); + } + res = PQputCopyEnd(curs->conn->pgconn, buf); + } + + CLEARPGRES(curs->pgres); + + Dprintf("_pq_copy_in_v3: copy ended; res = %d", res); + + /* if the result is -1 we should not even try to get a result from the + because that will lock the current thread forever */ + if (res == -1) { + pq_raise(curs->conn, curs, NULL); + /* FIXME: pq_raise check the connection but for some reason even + if the error message says "server closed the connection unexpectedly" + the status returned by PQstatus is CONNECTION_OK! */ + curs->conn->closed = 2; + } + else { + /* and finally we grab the operation result from the backend */ + for (;;) { + Py_BEGIN_ALLOW_THREADS; + curs_set_result(curs, PQgetResult(curs->conn->pgconn)); + Py_END_ALLOW_THREADS; + + if (NULL == curs->pgres) + break; + _read_rowcount(curs); + if (PQresultStatus(curs->pgres) == PGRES_FATAL_ERROR) + pq_raise(curs->conn, curs, NULL); + CLEARPGRES(curs->pgres); + } + } + +exit: + Py_XDECREF(func); + Py_XDECREF(size); + return (error == 0 ? 1 : -1); +} + +static int +_pq_copy_out_v3(cursorObject *curs) +{ + PyObject *tmp = NULL; + PyObject *func = NULL; + PyObject *obj = NULL; + int ret = -1; + int is_text; + + char *buffer; + Py_ssize_t len; + + if (!curs->copyfile) { + PyErr_SetString(ProgrammingError, + "can't execute COPY TO: use the copy_to() method instead"); + goto exit; + } + + if (!(func = PyObject_GetAttrString(curs->copyfile, "write"))) { + Dprintf("_pq_copy_out_v3: can't get o.write"); + goto exit; + } + + /* if the file is text we must pass it unicode. */ + if (-1 == (is_text = psyco_is_text_file(curs->copyfile))) { + goto exit; + } + + while (1) { + Py_BEGIN_ALLOW_THREADS; + len = PQgetCopyData(curs->conn->pgconn, &buffer, 0); + Py_END_ALLOW_THREADS; + + if (len > 0 && buffer) { + if (is_text) { + obj = conn_decode(curs->conn, buffer, len); + } else { + obj = Bytes_FromStringAndSize(buffer, len); + } + + PQfreemem(buffer); + if (!obj) { goto exit; } + tmp = PyObject_CallFunctionObjArgs(func, obj, NULL); + Py_DECREF(obj); + + if (tmp == NULL) { + goto exit; + } else { + Py_DECREF(tmp); + } + } + /* we break on len == 0 but note that that should *not* happen, + because we are not doing an async call (if it happens blame + postgresql authors :/) */ + else if (len <= 0) break; + } + + if (len == -2) { + pq_raise(curs->conn, curs, NULL); + goto exit; + } + + /* and finally we grab the operation result from the backend */ + for (;;) { + Py_BEGIN_ALLOW_THREADS; + curs_set_result(curs, PQgetResult(curs->conn->pgconn)); + Py_END_ALLOW_THREADS; + + if (NULL == curs->pgres) + break; + _read_rowcount(curs); + if (PQresultStatus(curs->pgres) == PGRES_FATAL_ERROR) + pq_raise(curs->conn, curs, NULL); + CLEARPGRES(curs->pgres); + } + ret = 1; + +exit: + Py_XDECREF(func); + return ret; +} + +/* Tries to read the next message from the replication stream, without + blocking, in both sync and async connection modes. If no message + is ready in the CopyData buffer, tries to read from the server, + again without blocking. If that doesn't help, returns Py_None. + The caller is then supposed to block on the socket(s) and call this + function again. + + Any keepalive messages from the server are silently consumed and + are never returned to the caller. + */ +int +pq_read_replication_message(replicationCursorObject *repl, replicationMessageObject **msg) +{ + cursorObject *curs = &repl->cur; + connectionObject *conn = curs->conn; + PGconn *pgconn = conn->pgconn; + char *buffer = NULL; + int len, data_size, consumed, hdr, reply; + XLogRecPtr data_start, wal_end; + int64_t send_time; + PyObject *str = NULL, *result = NULL; + int ret = -1; + struct timeval curr_time, feedback_time; + + Dprintf("pq_read_replication_message"); + + *msg = NULL; + consumed = 0; + + /* Is it a time to send the next feedback message? */ + gettimeofday(&curr_time, NULL); + timeradd(&repl->last_feedback, &repl->status_interval, &feedback_time); + if (timercmp(&curr_time, &feedback_time, >=) && pq_send_replication_feedback(repl, 0) < 0) { + goto exit; + } + +retry: + len = PQgetCopyData(pgconn, &buffer, 1 /* async */); + + if (len == 0) { + /* If we've tried reading some data, but there was none, bail out. */ + if (consumed) { + ret = 0; + goto exit; + } + /* We should only try reading more data when there is nothing + available at the moment. Otherwise, with a really highly loaded + server we might be reading a number of messages for every single + one we process, thus overgrowing the internal buffer until the + client system runs out of memory. */ + if (!PQconsumeInput(pgconn)) { + pq_raise(conn, curs, NULL); + goto exit; + } + /* But PQconsumeInput() doesn't tell us if it has actually read + anything into the internal buffer and there is no (supported) way + to ask libpq about this directly. The way we check is setting the + flag and re-trying PQgetCopyData(): if that returns 0 again, + there's no more data available in the buffer, so we return None. */ + consumed = 1; + goto retry; + } + + if (len == -2) { + /* serious error */ + pq_raise(conn, curs, NULL); + goto exit; + } + if (len == -1) { + /* EOF */ + curs_set_result(curs, PQgetResult(pgconn)); + + if (curs->pgres && PQresultStatus(curs->pgres) == PGRES_FATAL_ERROR) { + pq_raise(conn, curs, NULL); + goto exit; + } + + CLEARPGRES(curs->pgres); + ret = 0; + goto exit; + } + + /* It also makes sense to set this flag here to make us return early in + case of retry due to keepalive message. Any pending data on the socket + will trigger read condition in select() in the calling code anyway. */ + consumed = 1; + + /* ok, we did really read something: update the io timestamp */ + gettimeofday(&repl->last_io, NULL); + + Dprintf("pq_read_replication_message: msg=%c, len=%d", buffer[0], len); + if (buffer[0] == 'w') { + /* XLogData: msgtype(1), dataStart(8), walEnd(8), sendTime(8) */ + hdr = 1 + 8 + 8 + 8; + if (len < hdr + 1) { + psyco_set_error(OperationalError, curs, "data message header too small"); + goto exit; + } + + data_size = len - hdr; + data_start = fe_recvint64(buffer + 1); + wal_end = fe_recvint64(buffer + 1 + 8); + send_time = fe_recvint64(buffer + 1 + 8 + 8); + + Dprintf("pq_read_replication_message: data_start="XLOGFMTSTR", wal_end="XLOGFMTSTR, + XLOGFMTARGS(data_start), XLOGFMTARGS(wal_end)); + + Dprintf("pq_read_replication_message: >>%.*s<<", data_size, buffer + hdr); + + if (repl->decode) { + str = conn_decode(conn, buffer + hdr, data_size); + } else { + str = Bytes_FromStringAndSize(buffer + hdr, data_size); + } + if (!str) { goto exit; } + + result = PyObject_CallFunctionObjArgs((PyObject *)&replicationMessageType, + curs, str, NULL); + Py_DECREF(str); + if (!result) { goto exit; } + + *msg = (replicationMessageObject *)result; + (*msg)->data_size = data_size; + (*msg)->data_start = data_start; + (*msg)->wal_end = wal_end; + (*msg)->send_time = send_time; + + repl->wal_end = wal_end; + repl->last_msg_data_start = data_start; + } + else if (buffer[0] == 'k') { + /* Primary keepalive message: msgtype(1), walEnd(8), sendTime(8), reply(1) */ + hdr = 1 + 8 + 8; + if (len < hdr + 1) { + psyco_set_error(OperationalError, curs, "keepalive message header too small"); + goto exit; + } + + wal_end = fe_recvint64(buffer + 1); + Dprintf("pq_read_replication_message: wal_end="XLOGFMTSTR, XLOGFMTARGS(wal_end)); + repl->wal_end = wal_end; + + /* We can safely forward flush_lsn to the wal_end from the server keepalive message + * if we know that the client already processed (confirmed) the last XLogData message */ + if (repl->explicitly_flushed_lsn >= repl->last_msg_data_start + && wal_end > repl->explicitly_flushed_lsn + && wal_end > repl->flush_lsn) { + repl->flush_lsn = wal_end; + } + + reply = buffer[hdr]; + if (reply && pq_send_replication_feedback(repl, 0) < 0) { + goto exit; + } + + PQfreemem(buffer); + buffer = NULL; + goto retry; + } + else { + psyco_set_error(OperationalError, curs, "unrecognized replication message type"); + goto exit; + } + + ret = 0; + +exit: + if (buffer) { + PQfreemem(buffer); + } + + return ret; +} + +int +pq_send_replication_feedback(replicationCursorObject *repl, int reply_requested) +{ + cursorObject *curs = &repl->cur; + connectionObject *conn = curs->conn; + PGconn *pgconn = conn->pgconn; + char replybuf[1 + 8 + 8 + 8 + 8 + 1]; + int len = 0; + + Dprintf("pq_send_replication_feedback: write="XLOGFMTSTR", flush="XLOGFMTSTR", apply="XLOGFMTSTR, + XLOGFMTARGS(repl->write_lsn), + XLOGFMTARGS(repl->flush_lsn), + XLOGFMTARGS(repl->apply_lsn)); + + replybuf[len] = 'r'; len += 1; + fe_sendint64(repl->write_lsn, &replybuf[len]); len += 8; + fe_sendint64(repl->flush_lsn, &replybuf[len]); len += 8; + fe_sendint64(repl->apply_lsn, &replybuf[len]); len += 8; + fe_sendint64(feGetCurrentTimestamp(), &replybuf[len]); len += 8; + replybuf[len] = reply_requested ? 1 : 0; len += 1; + + if (PQputCopyData(pgconn, replybuf, len) <= 0 || PQflush(pgconn) != 0) { + pq_raise(conn, curs, NULL); + return -1; + } + gettimeofday(&repl->last_feedback, NULL); + repl->last_io = repl->last_feedback; + + return 0; +} + +/* Calls pq_read_replication_message in an endless loop, until + stop_replication is called or a fatal error occurs. The messages + are passed to the consumer object. + + When no message is available, blocks on the connection socket, but + manages to send keepalive messages to the server as needed. +*/ +int +pq_copy_both(replicationCursorObject *repl, PyObject *consume) +{ + cursorObject *curs = &repl->cur; + connectionObject *conn = curs->conn; + PGconn *pgconn = conn->pgconn; + replicationMessageObject *msg = NULL; + PyObject *tmp = NULL; + int fd, sel, ret = -1; + fd_set fds; + struct timeval curr_time, feedback_time, timeout; + + if (!PyCallable_Check(consume)) { + Dprintf("pq_copy_both: expected callable consume object"); + goto exit; + } + + CLEARPGRES(curs->pgres); + + while (1) { + if (pq_read_replication_message(repl, &msg) < 0) { + goto exit; + } + else if (msg == NULL) { + fd = PQsocket(pgconn); + if (fd < 0) { + pq_raise(conn, curs, NULL); + goto exit; + } + + FD_ZERO(&fds); + FD_SET(fd, &fds); + + /* how long can we wait before we need to send a feedback? */ + gettimeofday(&curr_time, NULL); + + timeradd(&repl->last_feedback, &repl->status_interval, &feedback_time); + timersub(&feedback_time, &curr_time, &timeout); + + if (timeout.tv_sec >= 0) { + Py_BEGIN_ALLOW_THREADS; + sel = select(fd + 1, &fds, NULL, NULL, &timeout); + Py_END_ALLOW_THREADS; + + if (sel < 0) { + if (errno != EINTR) { + PyErr_SetFromErrno(PyExc_OSError); + goto exit; + } + if (PyErr_CheckSignals()) { + goto exit; + } + } + } + } + else { + tmp = PyObject_CallFunctionObjArgs(consume, msg, NULL); + Py_DECREF(msg); + + if (tmp == NULL) { + Dprintf("pq_copy_both: consume returned NULL"); + goto exit; + } + Py_DECREF(tmp); + } + } + + ret = 1; + +exit: + return ret; +} + +int +pq_fetch(cursorObject *curs, int no_result) +{ + int pgstatus, ex = -1; + + /* even if we fail, we remove any information about the previous query */ + curs_reset(curs); + + if (curs->pgres == NULL) return 0; + + pgstatus = PQresultStatus(curs->pgres); + Dprintf("pq_fetch: pgstatus = %s", PQresStatus(pgstatus)); + + /* backend status message */ + Py_CLEAR(curs->pgstatus); + if (!(curs->pgstatus = conn_text_from_chars( + curs->conn, PQcmdStatus(curs->pgres)))) { + ex = -1; + return ex; + } + + switch(pgstatus) { + + case PGRES_COMMAND_OK: + Dprintf("pq_fetch: command returned OK (no tuples)"); + _read_rowcount(curs); + curs->lastoid = PQoidValue(curs->pgres); + CLEARPGRES(curs->pgres); + ex = 1; + break; + + case PGRES_COPY_OUT: + Dprintf("pq_fetch: data from a COPY TO (no tuples)"); + curs->rowcount = -1; + ex = _pq_copy_out_v3(curs); + /* error caught by out glorious notice handler */ + if (PyErr_Occurred()) ex = -1; + CLEARPGRES(curs->pgres); + break; + + case PGRES_COPY_IN: + Dprintf("pq_fetch: data from a COPY FROM (no tuples)"); + curs->rowcount = -1; + ex = _pq_copy_in_v3(curs); + /* error caught by out glorious notice handler */ + if (PyErr_Occurred()) ex = -1; + CLEARPGRES(curs->pgres); + break; + + case PGRES_COPY_BOTH: + Dprintf("pq_fetch: data from a streaming replication slot (no tuples)"); + curs->rowcount = -1; + ex = 0; + /* Nothing to do here: pq_copy_both will be called separately. + + Also don't clear the result status: it's checked in + consume_stream. */ + /*CLEARPGRES(curs->pgres);*/ + break; + + case PGRES_TUPLES_OK: + if (!no_result) { + Dprintf("pq_fetch: got tuples"); + curs->rowcount = PQntuples(curs->pgres); + if (0 == _pq_fetch_tuples(curs)) { ex = 0; } + /* don't clear curs->pgres, because it contains the results! */ + } + else { + Dprintf("pq_fetch: got tuples, discarding them"); + /* TODO: is there any case in which PQntuples == PQcmdTuples? */ + _read_rowcount(curs); + CLEARPGRES(curs->pgres); + ex = 0; + } + break; + + case PGRES_EMPTY_QUERY: + PyErr_SetString(ProgrammingError, + "can't execute an empty query"); + CLEARPGRES(curs->pgres); + ex = -1; + break; + + case PGRES_BAD_RESPONSE: + case PGRES_NONFATAL_ERROR: + case PGRES_FATAL_ERROR: + Dprintf("pq_fetch: uh-oh, something FAILED: status = %d pgconn = %p", + pgstatus, curs->conn); + pq_raise(curs->conn, curs, NULL); + ex = -1; + break; + + default: + /* PGRES_SINGLE_TUPLE, future statuses */ + Dprintf("pq_fetch: got unsupported result: status = %d pgconn = %p", + pgstatus, curs->conn); + PyErr_Format(NotSupportedError, + "got server response with unsupported status %s", + PQresStatus(curs->pgres == NULL ? + PQstatus(curs->conn->pgconn) : PQresultStatus(curs->pgres))); + CLEARPGRES(curs->pgres); + ex = -1; + break; + } + + return ex; +} diff --git a/psycopg/pqpath.h b/psycopg/pqpath.h new file mode 100644 index 0000000000000000000000000000000000000000..d5ba4d14f68a9a35b3cfb2ba4e48e270aeca426c --- /dev/null +++ b/psycopg/pqpath.h @@ -0,0 +1,74 @@ +/* pqpath.h - definitions for pqpath.c + * + * Copyright (C) 2003-2019 Federico Di Gregorio + * Copyright (C) 2020-2021 The Psycopg Team + * + * This file is part of psycopg. + * + * psycopg2 is free software: you can redistribute it and/or modify it + * under the terms of the GNU Lesser General Public License as published + * by the Free Software Foundation, either version 3 of the License, or + * (at your option) any later version. + * + * In addition, as a special exception, the copyright holders give + * permission to link this program with the OpenSSL library (or with + * modified versions of OpenSSL that use the same license as OpenSSL), + * and distribute linked combinations including the two. + * + * You must obey the GNU Lesser General Public License in all respects for + * all of the code used other than OpenSSL. + * + * psycopg2 is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public + * License for more details. + */ + +#ifndef PSYCOPG_PQPATH_H +#define PSYCOPG_PQPATH_H 1 + +#include "psycopg/cursor.h" +#include "psycopg/connection.h" +#include "psycopg/replication_cursor.h" +#include "psycopg/replication_message.h" + +/* macro to clean the pg result */ +#define CLEARPGRES(pgres) do { PQclear(pgres); pgres = NULL; } while (0) + +/* exported functions */ +RAISES_NEG HIDDEN int pq_fetch(cursorObject *curs, int no_result); +RAISES_NEG HIDDEN int pq_execute(cursorObject *curs, const char *query, + int async, int no_result, int no_begin); +HIDDEN int pq_send_query(connectionObject *conn, const char *query); +HIDDEN int pq_begin_locked(connectionObject *conn, PyThreadState **tstate); +HIDDEN int pq_commit(connectionObject *conn); +RAISES_NEG HIDDEN int pq_abort_locked(connectionObject *conn, + PyThreadState **tstate); +RAISES_NEG HIDDEN int pq_abort(connectionObject *conn); +HIDDEN int pq_reset_locked(connectionObject *conn, PyThreadState **tstate); +RAISES_NEG HIDDEN int pq_reset(connectionObject *conn); +HIDDEN char *pq_get_guc_locked(connectionObject *conn, const char *param, + PyThreadState **tstate); +HIDDEN int pq_set_guc_locked(connectionObject *conn, const char *param, + const char *value, PyThreadState **tstate); +HIDDEN int pq_tpc_command_locked(connectionObject *conn, + const char *cmd, const char *tid, + PyThreadState **tstate); +RAISES_NEG HIDDEN int pq_get_result_async(connectionObject *conn); +HIDDEN int pq_flush(connectionObject *conn); +HIDDEN void pq_clear_async(connectionObject *conn); +RAISES_NEG HIDDEN int pq_set_non_blocking(connectionObject *conn, int arg); + +HIDDEN void pq_set_critical(connectionObject *conn, const char *msg); + +HIDDEN int pq_execute_command_locked(connectionObject *conn, const char *query, + PyThreadState **tstate); +RAISES HIDDEN void pq_complete_error(connectionObject *conn); + +/* replication protocol support */ +HIDDEN int pq_copy_both(replicationCursorObject *repl, PyObject *consumer); +HIDDEN int pq_read_replication_message(replicationCursorObject *repl, + replicationMessageObject **msg); +HIDDEN int pq_send_replication_feedback(replicationCursorObject *repl, int reply_requested); + +#endif /* !defined(PSYCOPG_PQPATH_H) */ diff --git a/psycopg/psycopg.h b/psycopg/psycopg.h new file mode 100644 index 0000000000000000000000000000000000000000..afda00f47ff265d26ec1190122b59c9bb08ffb76 --- /dev/null +++ b/psycopg/psycopg.h @@ -0,0 +1,107 @@ +/* psycopg.h - definitions for the psycopg python module + * + * Copyright (C) 2003-2019 Federico Di Gregorio + * Copyright (C) 2020-2021 The Psycopg Team + * + * This file is part of psycopg. + * + * psycopg2 is free software: you can redistribute it and/or modify it + * under the terms of the GNU Lesser General Public License as published + * by the Free Software Foundation, either version 3 of the License, or + * (at your option) any later version. + * + * In addition, as a special exception, the copyright holders give + * permission to link this program with the OpenSSL library (or with + * modified versions of OpenSSL that use the same license as OpenSSL), + * and distribute linked combinations including the two. + * + * You must obey the GNU Lesser General Public License in all respects for + * all of the code used other than OpenSSL. + * + * psycopg2 is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public + * License for more details. + */ + +#ifndef PSYCOPG_H +#define PSYCOPG_H 1 + +#if PG_VERSION_NUM < 90100 +#error "Psycopg requires PostgreSQL client library (libpq) >= 9.1" +#endif + +#define PY_SSIZE_T_CLEAN +#include +#include + +#include "psycopg/config.h" +#include "psycopg/python.h" +#include "psycopg/utils.h" + +#ifdef __cplusplus +extern "C" { +#endif + +/* DBAPI compliance parameters */ +#define APILEVEL "2.0" +#define THREADSAFETY 2 +#define PARAMSTYLE "pyformat" + +/* global exceptions */ +extern HIDDEN PyObject *Error, *Warning, *InterfaceError, *DatabaseError, + *InternalError, *OperationalError, *ProgrammingError, + *IntegrityError, *DataError, *NotSupportedError; +extern HIDDEN PyObject *QueryCanceledError, *TransactionRollbackError; + +/* sqlstate -> exception map */ +extern HIDDEN PyObject *sqlstate_errors; + +/* postgresql<->python encoding map */ +extern HIDDEN PyObject *psycoEncodings; + +/* SQL NULL */ +extern HIDDEN PyObject *psyco_null; + +/* Exceptions docstrings */ +#define Error_doc \ +"Base class for error exceptions." + +#define Warning_doc \ +"A database warning." + +#define InterfaceError_doc \ +"Error related to the database interface." + +#define DatabaseError_doc \ +"Error related to the database engine." + +#define InternalError_doc \ +"The database encountered an internal error." + +#define OperationalError_doc \ +"Error related to database operation (disconnect, memory allocation etc)." + +#define ProgrammingError_doc \ +"Error related to database programming (SQL error, table not found etc)." + +#define IntegrityError_doc \ +"Error related to database integrity." + +#define DataError_doc \ +"Error related to problems with the processed data." + +#define NotSupportedError_doc \ +"A method or database API was used which is not supported by the database." + +#define QueryCanceledError_doc \ +"Error related to SQL query cancellation." + +#define TransactionRollbackError_doc \ +"Error causing transaction rollback (deadlocks, serialization failures, etc)." + +#ifdef __cplusplus +} +#endif + +#endif /* !defined(PSYCOPG_H) */ diff --git a/psycopg/psycopgmodule.c b/psycopg/psycopgmodule.c new file mode 100644 index 0000000000000000000000000000000000000000..5ab6f5fdd156b1add3aa9b698bb5ca2da39495ef --- /dev/null +++ b/psycopg/psycopgmodule.c @@ -0,0 +1,1030 @@ +/* psycopgmodule.c - psycopg module (will import other C classes) + * + * Copyright (C) 2003-2019 Federico Di Gregorio + * Copyright (C) 2020-2021 The Psycopg Team + * + * This file is part of psycopg. + * + * psycopg2 is free software: you can redistribute it and/or modify it + * under the terms of the GNU Lesser General Public License as published + * by the Free Software Foundation, either version 3 of the License, or + * (at your option) any later version. + * + * In addition, as a special exception, the copyright holders give + * permission to link this program with the OpenSSL library (or with + * modified versions of OpenSSL that use the same license as OpenSSL), + * and distribute linked combinations including the two. + * + * You must obey the GNU Lesser General Public License in all respects for + * all of the code used other than OpenSSL. + * + * psycopg2 is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public + * License for more details. + */ + +#define PSYCOPG_MODULE +#include "psycopg/psycopg.h" + +#include "psycopg/connection.h" +#include "psycopg/cursor.h" +#include "psycopg/replication_connection.h" +#include "psycopg/replication_cursor.h" +#include "psycopg/replication_message.h" +#include "psycopg/green.h" +#include "psycopg/column.h" +#include "psycopg/lobject.h" +#include "psycopg/notify.h" +#include "psycopg/xid.h" +#include "psycopg/typecast.h" +#include "psycopg/microprotocols.h" +#include "psycopg/microprotocols_proto.h" +#include "psycopg/conninfo.h" +#include "psycopg/diagnostics.h" + +#include "psycopg/adapter_qstring.h" +#include "psycopg/adapter_binary.h" +#include "psycopg/adapter_pboolean.h" +#include "psycopg/adapter_pint.h" +#include "psycopg/adapter_pfloat.h" +#include "psycopg/adapter_pdecimal.h" +#include "psycopg/adapter_asis.h" +#include "psycopg/adapter_list.h" +#include "psycopg/typecast_binary.h" + +/* some module-level variables, like the datetime module */ +#include +#include "psycopg/adapter_datetime.h" + +HIDDEN PyObject *psycoEncodings = NULL; +HIDDEN PyObject *sqlstate_errors = NULL; + +#ifdef PSYCOPG_DEBUG +HIDDEN int psycopg_debug_enabled = 0; +#endif + +/* Python representation of SQL NULL */ +HIDDEN PyObject *psyco_null = NULL; + +/* macro trick to stringify a macro expansion */ +#define xstr(s) str(s) +#define str(s) #s + +/** connect module-level function **/ +#define psyco_connect_doc \ +"_connect(dsn, [connection_factory], [async]) -- New database connection.\n\n" + +static PyObject * +psyco_connect(PyObject *self, PyObject *args, PyObject *keywds) +{ + PyObject *conn = NULL; + PyObject *factory = NULL; + const char *dsn = NULL; + int async = 0, async_ = 0; + + static char *kwlist[] = {"dsn", "connection_factory", "async", "async_", NULL}; + + if (!PyArg_ParseTupleAndKeywords(args, keywds, "s|Oii", kwlist, + &dsn, &factory, &async, &async_)) { + return NULL; + } + + if (async_) { async = async_; } + + Dprintf("psyco_connect: dsn = '%s', async = %d", dsn, async); + + /* allocate connection, fill with errors and return it */ + if (factory == NULL || factory == Py_None) { + factory = (PyObject *)&connectionType; + } + + /* Here we are breaking the connection.__init__ interface defined + * by psycopg2. So, if not requiring an async conn, avoid passing + * the async parameter. */ + /* TODO: would it be possible to avoid an additional parameter + * to the conn constructor? A subclass? (but it would require mixins + * to further subclass) Another dsn parameter (but is not really + * a connection parameter that can be configured) */ + if (!async) { + conn = PyObject_CallFunction(factory, "s", dsn); + } else { + conn = PyObject_CallFunction(factory, "si", dsn, async); + } + + return conn; +} + + +#define parse_dsn_doc \ +"parse_dsn(dsn) -> dict -- parse a connection string into parameters" + +static PyObject * +parse_dsn(PyObject *self, PyObject *args, PyObject *kwargs) +{ + char *err = NULL; + PQconninfoOption *options = NULL; + PyObject *res = NULL, *dsn; + + static char *kwlist[] = {"dsn", NULL}; + if (!PyArg_ParseTupleAndKeywords(args, kwargs, "O", kwlist, &dsn)) { + return NULL; + } + + Py_INCREF(dsn); /* for ensure_bytes */ + if (!(dsn = psyco_ensure_bytes(dsn))) { goto exit; } + + options = PQconninfoParse(Bytes_AS_STRING(dsn), &err); + if (options == NULL) { + if (err != NULL) { + PyErr_Format(ProgrammingError, "invalid dsn: %s", err); + PQfreemem(err); + } else { + PyErr_SetString(OperationalError, "PQconninfoParse() failed"); + } + goto exit; + } + + res = psyco_dict_from_conninfo_options(options, /* include_password = */ 1); + +exit: + PQconninfoFree(options); /* safe on null */ + Py_XDECREF(dsn); + + return res; +} + + +#define quote_ident_doc \ +"quote_ident(str, conn_or_curs) -> str -- wrapper around PQescapeIdentifier\n\n" \ +":Parameters:\n" \ +" * `str`: A bytes or unicode object\n" \ +" * `conn_or_curs`: A connection or cursor, required" + +static PyObject * +quote_ident(PyObject *self, PyObject *args, PyObject *kwargs) +{ + PyObject *ident = NULL, *obj = NULL, *result = NULL; + connectionObject *conn; + char *quoted = NULL; + + static char *kwlist[] = {"ident", "scope", NULL}; + if (!PyArg_ParseTupleAndKeywords(args, kwargs, "OO", kwlist, &ident, &obj)) { + return NULL; + } + + if (PyObject_TypeCheck(obj, &cursorType)) { + conn = ((cursorObject*)obj)->conn; + } + else if (PyObject_TypeCheck(obj, &connectionType)) { + conn = (connectionObject*)obj; + } + else { + PyErr_SetString(PyExc_TypeError, + "argument 2 must be a connection or a cursor"); + return NULL; + } + + Py_INCREF(ident); /* for ensure_bytes */ + if (!(ident = psyco_ensure_bytes(ident))) { goto exit; } + + if (!(quoted = psyco_escape_identifier(conn, + Bytes_AS_STRING(ident), Bytes_GET_SIZE(ident)))) { goto exit; } + + result = conn_text_from_chars(conn, quoted); + +exit: + PQfreemem(quoted); + Py_XDECREF(ident); + + return result; +} + +/** type registration **/ +#define register_type_doc \ +"register_type(obj, conn_or_curs) -> None -- register obj with psycopg type system\n\n" \ +":Parameters:\n" \ +" * `obj`: A type adapter created by `new_type()`\n" \ +" * `conn_or_curs`: A connection, cursor or None" + +#define typecast_from_python_doc \ +"new_type(oids, name, castobj) -> new type object\n\n" \ +"Create a new binding object. The object can be used with the\n" \ +"`register_type()` function to bind PostgreSQL objects to python objects.\n\n" \ +":Parameters:\n" \ +" * `oids`: Tuple of ``oid`` of the PostgreSQL types to convert.\n" \ +" * `name`: Name for the new type\n" \ +" * `adapter`: Callable to perform type conversion.\n" \ +" It must have the signature ``fun(value, cur)`` where ``value`` is\n" \ +" the string representation returned by PostgreSQL (`!None` if ``NULL``)\n" \ +" and ``cur`` is the cursor from which data are read." + +#define typecast_array_from_python_doc \ +"new_array_type(oids, name, baseobj) -> new type object\n\n" \ +"Create a new binding object to parse an array.\n\n" \ +"The object can be used with `register_type()`.\n\n" \ +":Parameters:\n" \ +" * `oids`: Tuple of ``oid`` of the PostgreSQL types to convert.\n" \ +" * `name`: Name for the new type\n" \ +" * `baseobj`: Adapter to perform type conversion of a single array item." + +static PyObject * +register_type(PyObject *self, PyObject *args) +{ + PyObject *type, *obj = NULL; + + if (!PyArg_ParseTuple(args, "O!|O", &typecastType, &type, &obj)) { + return NULL; + } + + if (obj != NULL && obj != Py_None) { + if (PyObject_TypeCheck(obj, &cursorType)) { + PyObject **dict = &(((cursorObject*)obj)->string_types); + if (*dict == NULL) { + if (!(*dict = PyDict_New())) { return NULL; } + } + if (0 > typecast_add(type, *dict, 0)) { return NULL; } + } + else if (PyObject_TypeCheck(obj, &connectionType)) { + if (0 > typecast_add(type, ((connectionObject*)obj)->string_types, 0)) { + return NULL; + } + } + else { + PyErr_SetString(PyExc_TypeError, + "argument 2 must be a connection, cursor or None"); + return NULL; + } + } + else { + if (0 > typecast_add(type, NULL, 0)) { return NULL; } + } + + Py_RETURN_NONE; +} + + + +/* Make sure libcrypto thread callbacks are set up. */ +static void +libcrypto_threads_init(void) +{ + PyObject *m; + + Dprintf("psycopgmodule: configuring libpq libcrypto callbacks "); + + /* importing the ssl module sets up Python's libcrypto callbacks */ + if ((m = PyImport_ImportModule("ssl"))) { + /* disable libcrypto setup in libpq, so it won't stomp on the callbacks + that have already been set up */ + PQinitOpenSSL(1, 0); + Py_DECREF(m); + } + else { + /* might mean that Python has been compiled without OpenSSL support, + fall back to relying on libpq's libcrypto locking */ + PyErr_Clear(); + } +} + +/* Initialize the default adapters map + * + * Return 0 on success, else -1 and set an exception. + */ +RAISES_NEG static int +adapters_init(PyObject *module) +{ + PyObject *dict = NULL, *obj = NULL; + int rv = -1; + + if (0 > microprotocols_init(module)) { goto exit; } + + Dprintf("psycopgmodule: initializing adapters"); + + if (0 > microprotocols_add(&PyFloat_Type, NULL, (PyObject*)&pfloatType)) { + goto exit; + } + if (0 > microprotocols_add(&PyLong_Type, NULL, (PyObject*)&pintType)) { + goto exit; + } + if (0 > microprotocols_add(&PyBool_Type, NULL, (PyObject*)&pbooleanType)) { + goto exit; + } + + /* strings */ + if (0 > microprotocols_add(&PyUnicode_Type, NULL, (PyObject*)&qstringType)) { + goto exit; + } + + /* binary */ + if (0 > microprotocols_add(&PyBytes_Type, NULL, (PyObject*)&binaryType)) { + goto exit; + } + + if (0 > microprotocols_add(&PyByteArray_Type, NULL, (PyObject*)&binaryType)) { + goto exit; + } + + if (0 > microprotocols_add(&PyMemoryView_Type, NULL, (PyObject*)&binaryType)) { + goto exit; + } + + if (0 > microprotocols_add(&PyList_Type, NULL, (PyObject*)&listType)) { + goto exit; + } + + /* the module has already been initialized, so we can obtain the callable + objects directly from its dictionary :) */ + if (!(dict = PyModule_GetDict(module))) { goto exit; } + + if (!(obj = PyMapping_GetItemString(dict, "DateFromPy"))) { goto exit; } + if (0 > microprotocols_add(PyDateTimeAPI->DateType, NULL, obj)) { goto exit; } + Py_CLEAR(obj); + + if (!(obj = PyMapping_GetItemString(dict, "TimeFromPy"))) { goto exit; } + if (0 > microprotocols_add(PyDateTimeAPI->TimeType, NULL, obj)) { goto exit; } + Py_CLEAR(obj); + + if (!(obj = PyMapping_GetItemString(dict, "TimestampFromPy"))) { goto exit; } + if (0 > microprotocols_add(PyDateTimeAPI->DateTimeType, NULL, obj)) { goto exit; } + Py_CLEAR(obj); + + if (!(obj = PyMapping_GetItemString(dict, "IntervalFromPy"))) { goto exit; } + if (0 > microprotocols_add(PyDateTimeAPI->DeltaType, NULL, obj)) { goto exit; } + Py_CLEAR(obj); + + /* Success! */ + rv = 0; + +exit: + Py_XDECREF(obj); + + return rv; +} + +#define libpq_version_doc "Query actual libpq version loaded." + +static PyObject* +libpq_version(PyObject *self, PyObject *dummy) +{ + return PyInt_FromLong(PQlibVersion()); +} + +/* encrypt_password - Prepare the encrypted password form */ +#define encrypt_password_doc \ +"encrypt_password(password, user, [scope], [algorithm]) -- Prepares the encrypted form of a PostgreSQL password.\n\n" + +static PyObject * +encrypt_password(PyObject *self, PyObject *args, PyObject *kwargs) +{ + char *encrypted = NULL; + PyObject *password = NULL, *user = NULL; + PyObject *scope = Py_None, *algorithm = Py_None; + PyObject *res = NULL; + connectionObject *conn = NULL; + + static char *kwlist[] = {"password", "user", "scope", "algorithm", NULL}; + + if (!PyArg_ParseTupleAndKeywords(args, kwargs, "OO|OO", kwlist, + &password, &user, &scope, &algorithm)) { + return NULL; + } + + /* for ensure_bytes */ + Py_INCREF(user); + Py_INCREF(password); + Py_INCREF(algorithm); + + if (scope != Py_None) { + if (PyObject_TypeCheck(scope, &cursorType)) { + conn = ((cursorObject*)scope)->conn; + } + else if (PyObject_TypeCheck(scope, &connectionType)) { + conn = (connectionObject*)scope; + } + else { + PyErr_SetString(PyExc_TypeError, + "the scope must be a connection or a cursor"); + goto exit; + } + } + + if (!(user = psyco_ensure_bytes(user))) { goto exit; } + if (!(password = psyco_ensure_bytes(password))) { goto exit; } + if (algorithm != Py_None) { + if (!(algorithm = psyco_ensure_bytes(algorithm))) { + goto exit; + } + } + + /* If we have to encrypt md5 we can use the libpq < 10 API */ + if (algorithm != Py_None && + strcmp(Bytes_AS_STRING(algorithm), "md5") == 0) { + encrypted = PQencryptPassword( + Bytes_AS_STRING(password), Bytes_AS_STRING(user)); + } + + /* If the algorithm is not md5 we have to use the API available from + * libpq 10. */ + else { +#if PG_VERSION_NUM >= 100000 + if (!conn) { + PyErr_SetString(ProgrammingError, + "password encryption (other than 'md5' algorithm)" + " requires a connection or cursor"); + goto exit; + } + + /* TODO: algo = None will block: forbid on async/green conn? */ + encrypted = PQencryptPasswordConn(conn->pgconn, + Bytes_AS_STRING(password), Bytes_AS_STRING(user), + algorithm != Py_None ? Bytes_AS_STRING(algorithm) : NULL); +#else + PyErr_SetString(NotSupportedError, + "password encryption (other than 'md5' algorithm)" + " requires libpq 10"); + goto exit; +#endif + } + + if (encrypted) { + res = Text_FromUTF8(encrypted); + } + else { + const char *msg = PQerrorMessage(conn->pgconn); + PyErr_Format(ProgrammingError, + "password encryption failed: %s", msg ? msg : "no reason given"); + goto exit; + } + +exit: + if (encrypted) { + PQfreemem(encrypted); + } + Py_XDECREF(user); + Py_XDECREF(password); + Py_XDECREF(algorithm); + + return res; +} + + +/* Fill the module's postgresql<->python encoding table */ +static struct { + char *pgenc; + char *pyenc; +} enctable[] = { + {"ABC", "cp1258"}, + {"ALT", "cp866"}, + {"BIG5", "big5"}, + {"EUC_CN", "euccn"}, + {"EUC_JIS_2004", "euc_jis_2004"}, + {"EUC_JP", "euc_jp"}, + {"EUC_KR", "euc_kr"}, + {"GB18030", "gb18030"}, + {"GBK", "gbk"}, + {"ISO_8859_1", "iso8859_1"}, + {"ISO_8859_2", "iso8859_2"}, + {"ISO_8859_3", "iso8859_3"}, + {"ISO_8859_5", "iso8859_5"}, + {"ISO_8859_6", "iso8859_6"}, + {"ISO_8859_7", "iso8859_7"}, + {"ISO_8859_8", "iso8859_8"}, + {"ISO_8859_9", "iso8859_9"}, + {"ISO_8859_10", "iso8859_10"}, + {"ISO_8859_13", "iso8859_13"}, + {"ISO_8859_14", "iso8859_14"}, + {"ISO_8859_15", "iso8859_15"}, + {"ISO_8859_16", "iso8859_16"}, + {"JOHAB", "johab"}, + {"KOI8", "koi8_r"}, + {"KOI8R", "koi8_r"}, + {"KOI8U", "koi8_u"}, + {"LATIN1", "iso8859_1"}, + {"LATIN2", "iso8859_2"}, + {"LATIN3", "iso8859_3"}, + {"LATIN4", "iso8859_4"}, + {"LATIN5", "iso8859_9"}, + {"LATIN6", "iso8859_10"}, + {"LATIN7", "iso8859_13"}, + {"LATIN8", "iso8859_14"}, + {"LATIN9", "iso8859_15"}, + {"LATIN10", "iso8859_16"}, + {"Mskanji", "cp932"}, + {"ShiftJIS", "cp932"}, + {"SHIFT_JIS_2004", "shift_jis_2004"}, + {"SJIS", "cp932"}, + {"SQL_ASCII", "ascii"}, /* XXX this is wrong: SQL_ASCII means "no + * encoding" we should fix the unicode + * typecaster to return a str or bytes in Py3 + */ + {"TCVN", "cp1258"}, + {"TCVN5712", "cp1258"}, + {"UHC", "cp949"}, + {"UNICODE", "utf_8"}, /* Not valid in 8.2, backward compatibility */ + {"UTF8", "utf_8"}, + {"VSCII", "cp1258"}, + {"WIN", "cp1251"}, + {"WIN866", "cp866"}, + {"WIN874", "cp874"}, + {"WIN932", "cp932"}, + {"WIN936", "gbk"}, + {"WIN949", "cp949"}, + {"WIN950", "cp950"}, + {"WIN1250", "cp1250"}, + {"WIN1251", "cp1251"}, + {"WIN1252", "cp1252"}, + {"WIN1253", "cp1253"}, + {"WIN1254", "cp1254"}, + {"WIN1255", "cp1255"}, + {"WIN1256", "cp1256"}, + {"WIN1257", "cp1257"}, + {"WIN1258", "cp1258"}, + {"Windows932", "cp932"}, + {"Windows936", "gbk"}, + {"Windows949", "cp949"}, + {"Windows950", "cp950"}, + +/* those are missing from Python: */ +/* {"EUC_TW", "?"}, */ +/* {"MULE_INTERNAL", "?"}, */ + {NULL, NULL} +}; + +/* Initialize the encodings table. + * + * Return 0 on success, else -1 and set an exception. + */ +RAISES_NEG static int +encodings_init(PyObject *module) +{ + PyObject *value = NULL; + int i; + int rv = -1; + + Dprintf("psycopgmodule: initializing encodings table"); + if (psycoEncodings) { + Dprintf("encodings_init(): already called"); + return 0; + } + + if (!(psycoEncodings = PyDict_New())) { goto exit; } + Py_INCREF(psycoEncodings); + if (0 > PyModule_AddObject(module, "encodings", psycoEncodings)) { + Py_DECREF(psycoEncodings); + goto exit; + } + + for (i = 0; enctable[i].pgenc != NULL; i++) { + if (!(value = Text_FromUTF8(enctable[i].pyenc))) { goto exit; } + if (0 > PyDict_SetItemString( + psycoEncodings, enctable[i].pgenc, value)) { + goto exit; + } + Py_CLEAR(value); + } + rv = 0; + +exit: + Py_XDECREF(value); + + return rv; +} + +/* Initialize the module's exceptions and after that a dictionary with a full + set of exceptions. */ + +PyObject *Error, *Warning, *InterfaceError, *DatabaseError, + *InternalError, *OperationalError, *ProgrammingError, + *IntegrityError, *DataError, *NotSupportedError; +PyObject *QueryCanceledError, *TransactionRollbackError; + +/* mapping between exception names and their PyObject */ +static struct { + char *name; + PyObject **exc; + PyObject **base; + const char *docstr; +} exctable[] = { + { "psycopg2.Error", &Error, NULL, Error_doc }, + { "psycopg2.Warning", &Warning, NULL, Warning_doc }, + { "psycopg2.InterfaceError", &InterfaceError, &Error, InterfaceError_doc }, + { "psycopg2.DatabaseError", &DatabaseError, &Error, DatabaseError_doc }, + { "psycopg2.InternalError", &InternalError, &DatabaseError, InternalError_doc }, + { "psycopg2.OperationalError", &OperationalError, &DatabaseError, + OperationalError_doc }, + { "psycopg2.ProgrammingError", &ProgrammingError, &DatabaseError, + ProgrammingError_doc }, + { "psycopg2.IntegrityError", &IntegrityError, &DatabaseError, + IntegrityError_doc }, + { "psycopg2.DataError", &DataError, &DatabaseError, DataError_doc }, + { "psycopg2.NotSupportedError", &NotSupportedError, &DatabaseError, + NotSupportedError_doc }, + { "psycopg2.extensions.QueryCanceledError", &QueryCanceledError, + &OperationalError, QueryCanceledError_doc }, + { "psycopg2.extensions.TransactionRollbackError", + &TransactionRollbackError, &OperationalError, + TransactionRollbackError_doc }, + {NULL} /* Sentinel */ +}; + + +RAISES_NEG static int +basic_errors_init(PyObject *module) +{ + /* the names of the exceptions here reflect the organization of the + psycopg2 module and not the fact the original error objects live in + _psycopg */ + + int i; + PyObject *dict = NULL; + PyObject *str = NULL; + PyObject *errmodule = NULL; + int rv = -1; + + Dprintf("psycopgmodule: initializing basic exceptions"); + + /* 'Error' has been defined elsewhere: only init the other classes */ + Error = (PyObject *)&errorType; + + for (i = 1; exctable[i].name; i++) { + if (!(dict = PyDict_New())) { goto exit; } + + if (exctable[i].docstr) { + if (!(str = Text_FromUTF8(exctable[i].docstr))) { goto exit; } + if (0 > PyDict_SetItemString(dict, "__doc__", str)) { goto exit; } + Py_CLEAR(str); + } + + /* can't put PyExc_StandardError in the static exctable: + * windows build will fail */ + if (!(*exctable[i].exc = PyErr_NewException( + exctable[i].name, + exctable[i].base ? *exctable[i].base : PyExc_StandardError, + dict))) { + goto exit; + } + Py_CLEAR(dict); + } + + if (!(errmodule = PyImport_ImportModule("psycopg2.errors"))) { + /* don't inject the exceptions into the errors module */ + PyErr_Clear(); + } + + for (i = 0; exctable[i].name; i++) { + char *name; + if (NULL == exctable[i].exc) { continue; } + + /* the name is the part after the last dot */ + name = strrchr(exctable[i].name, '.'); + name = name ? name + 1 : exctable[i].name; + + Py_INCREF(*exctable[i].exc); + if (0 > PyModule_AddObject(module, name, *exctable[i].exc)) { + Py_DECREF(*exctable[i].exc); + goto exit; + } + if (errmodule) { + Py_INCREF(*exctable[i].exc); + if (0 > PyModule_AddObject(errmodule, name, *exctable[i].exc)) { + Py_DECREF(*exctable[i].exc); + goto exit; + } + } + } + + rv = 0; + +exit: + Py_XDECREF(errmodule); + Py_XDECREF(str); + Py_XDECREF(dict); + return rv; +} + + +/* mapping between sqlstate and exception name */ +static struct { + char *sqlstate; + char *name; +} sqlstate_table[] = { +#include "sqlstate_errors.h" + {NULL} /* Sentinel */ +}; + + +RAISES_NEG static int +sqlstate_errors_init(PyObject *module) +{ + int i; + char namebuf[120]; + char prefix[] = "psycopg2.errors."; + char *suffix; + size_t bufsize; + PyObject *exc = NULL; + PyObject *errmodule = NULL; + int rv = -1; + + Dprintf("psycopgmodule: initializing sqlstate exceptions"); + + if (sqlstate_errors) { + Dprintf("sqlstate_errors_init(): already called"); + return 0; + } + if (!(errmodule = PyImport_ImportModule("psycopg2.errors"))) { + /* don't inject the exceptions into the errors module */ + PyErr_Clear(); + } + if (!(sqlstate_errors = PyDict_New())) { + goto exit; + } + Py_INCREF(sqlstate_errors); + if (0 > PyModule_AddObject(module, "sqlstate_errors", sqlstate_errors)) { + Py_DECREF(sqlstate_errors); + return -1; + } + + strcpy(namebuf, prefix); + suffix = namebuf + sizeof(prefix) - 1; + bufsize = sizeof(namebuf) - sizeof(prefix) - 1; + /* If this 0 gets deleted the buffer was too small. */ + namebuf[sizeof(namebuf) - 1] = '\0'; + + for (i = 0; sqlstate_table[i].sqlstate; i++) { + PyObject *base; + + base = base_exception_from_sqlstate(sqlstate_table[i].sqlstate); + strncpy(suffix, sqlstate_table[i].name, bufsize); + if (namebuf[sizeof(namebuf) - 1] != '\0') { + PyErr_SetString( + PyExc_SystemError, "sqlstate_errors_init(): buffer too small"); + goto exit; + } + if (!(exc = PyErr_NewException(namebuf, base, NULL))) { + goto exit; + } + if (0 > PyDict_SetItemString( + sqlstate_errors, sqlstate_table[i].sqlstate, exc)) { + goto exit; + } + + /* Expose the exceptions to psycopg2.errors */ + if (errmodule) { + if (0 > PyModule_AddObject( + errmodule, sqlstate_table[i].name, exc)) { + goto exit; + } + else { + exc = NULL; /* ref stolen by the module */ + } + } + else { + Py_CLEAR(exc); + } + } + + rv = 0; + +exit: + Py_XDECREF(errmodule); + Py_XDECREF(exc); + return rv; +} + + +RAISES_NEG static int +add_module_constants(PyObject *module) +{ + PyObject *tmp; + Dprintf("psycopgmodule: initializing module constants"); + + if (0 > PyModule_AddStringConstant(module, + "__version__", xstr(PSYCOPG_VERSION))) + { return -1; } + + if (0 > PyModule_AddStringConstant(module, + "__doc__", "psycopg2 PostgreSQL driver")) + { return -1; } + + if (0 > PyModule_AddIntConstant(module, + "__libpq_version__", PG_VERSION_NUM)) + { return -1; } + + if (0 > PyModule_AddObject(module, + "apilevel", tmp = Text_FromUTF8(APILEVEL))) + { + Py_XDECREF(tmp); + return -1; + } + + if (0 > PyModule_AddObject(module, + "threadsafety", tmp = PyInt_FromLong(THREADSAFETY))) + { + Py_XDECREF(tmp); + return -1; + } + + if (0 > PyModule_AddObject(module, + "paramstyle", tmp = Text_FromUTF8(PARAMSTYLE))) + { + Py_XDECREF(tmp); + return -1; + } + + if (0 > PyModule_AddIntMacro(module, REPLICATION_PHYSICAL)) { return -1; } + if (0 > PyModule_AddIntMacro(module, REPLICATION_LOGICAL)) { return -1; } + + return 0; +} + + +static struct { + char *name; + PyTypeObject *type; +} typetable[] = { + { "connection", &connectionType }, + { "cursor", &cursorType }, + { "ReplicationConnection", &replicationConnectionType }, + { "ReplicationCursor", &replicationCursorType }, + { "ReplicationMessage", &replicationMessageType }, + { "ISQLQuote", &isqlquoteType }, + { "Column", &columnType }, + { "Notify", ¬ifyType }, + { "Xid", &xidType }, + { "ConnectionInfo", &connInfoType }, + { "Diagnostics", &diagnosticsType }, + { "AsIs", &asisType }, + { "Binary", &binaryType }, + { "Boolean", &pbooleanType }, + { "Decimal", &pdecimalType }, + { "Int", &pintType }, + { "Float", &pfloatType }, + { "List", &listType }, + { "QuotedString", &qstringType }, + { "lobject", &lobjectType }, + {NULL} /* Sentinel */ +}; + +RAISES_NEG static int +add_module_types(PyObject *module) +{ + int i; + + Dprintf("psycopgmodule: initializing module types"); + + for (i = 0; typetable[i].name; i++) { + PyObject *type = (PyObject *)typetable[i].type; + + Py_SET_TYPE(typetable[i].type, &PyType_Type); + if (0 > PyType_Ready(typetable[i].type)) { return -1; } + + Py_INCREF(type); + if (0 > PyModule_AddObject(module, typetable[i].name, type)) { + Py_DECREF(type); + return -1; + } + } + return 0; +} + + +RAISES_NEG static int +datetime_init(void) +{ + PyObject *dt = NULL; + + Dprintf("psycopgmodule: initializing datetime module"); + + /* import python builtin datetime module, if available */ + if (!(dt = PyImport_ImportModule("datetime"))) { + return -1; + } + Py_DECREF(dt); + + /* Initialize the PyDateTimeAPI everywhere is used */ + PyDateTime_IMPORT; + if (0 > adapter_datetime_init()) { return -1; } + if (0 > repl_curs_datetime_init()) { return -1; } + if (0 > replmsg_datetime_init()) { return -1; } + + Py_SET_TYPE(&pydatetimeType, &PyType_Type); + if (0 > PyType_Ready(&pydatetimeType)) { return -1; } + + return 0; +} + +/** method table and module initialization **/ + +static PyMethodDef psycopgMethods[] = { + {"_connect", (PyCFunction)psyco_connect, + METH_VARARGS|METH_KEYWORDS, psyco_connect_doc}, + {"parse_dsn", (PyCFunction)parse_dsn, + METH_VARARGS|METH_KEYWORDS, parse_dsn_doc}, + {"quote_ident", (PyCFunction)quote_ident, + METH_VARARGS|METH_KEYWORDS, quote_ident_doc}, + {"adapt", (PyCFunction)psyco_microprotocols_adapt, + METH_VARARGS, psyco_microprotocols_adapt_doc}, + + {"register_type", (PyCFunction)register_type, + METH_VARARGS, register_type_doc}, + {"new_type", (PyCFunction)typecast_from_python, + METH_VARARGS|METH_KEYWORDS, typecast_from_python_doc}, + {"new_array_type", (PyCFunction)typecast_array_from_python, + METH_VARARGS|METH_KEYWORDS, typecast_array_from_python_doc}, + {"libpq_version", (PyCFunction)libpq_version, + METH_NOARGS, libpq_version_doc}, + + {"Date", (PyCFunction)psyco_Date, + METH_VARARGS, psyco_Date_doc}, + {"Time", (PyCFunction)psyco_Time, + METH_VARARGS, psyco_Time_doc}, + {"Timestamp", (PyCFunction)psyco_Timestamp, + METH_VARARGS, psyco_Timestamp_doc}, + {"DateFromTicks", (PyCFunction)psyco_DateFromTicks, + METH_VARARGS, psyco_DateFromTicks_doc}, + {"TimeFromTicks", (PyCFunction)psyco_TimeFromTicks, + METH_VARARGS, psyco_TimeFromTicks_doc}, + {"TimestampFromTicks", (PyCFunction)psyco_TimestampFromTicks, + METH_VARARGS, psyco_TimestampFromTicks_doc}, + + {"DateFromPy", (PyCFunction)psyco_DateFromPy, + METH_VARARGS, psyco_DateFromPy_doc}, + {"TimeFromPy", (PyCFunction)psyco_TimeFromPy, + METH_VARARGS, psyco_TimeFromPy_doc}, + {"TimestampFromPy", (PyCFunction)psyco_TimestampFromPy, + METH_VARARGS, psyco_TimestampFromPy_doc}, + {"IntervalFromPy", (PyCFunction)psyco_IntervalFromPy, + METH_VARARGS, psyco_IntervalFromPy_doc}, + + {"set_wait_callback", (PyCFunction)psyco_set_wait_callback, + METH_O, psyco_set_wait_callback_doc}, + {"get_wait_callback", (PyCFunction)psyco_get_wait_callback, + METH_NOARGS, psyco_get_wait_callback_doc}, + {"encrypt_password", (PyCFunction)encrypt_password, + METH_VARARGS|METH_KEYWORDS, encrypt_password_doc}, + + {NULL, NULL, 0, NULL} /* Sentinel */ +}; + +static struct PyModuleDef psycopgmodule = { + PyModuleDef_HEAD_INIT, + "_psycopg", + NULL, + -1, + psycopgMethods, + NULL, + NULL, + NULL, + NULL +}; + +#ifndef PyMODINIT_FUNC /* declarations for DLL import/export */ +#define PyMODINIT_FUNC void +#endif +PyMODINIT_FUNC +INIT_MODULE(_psycopg)(void) +{ + PyObject *module = NULL; + +#ifdef PSYCOPG_DEBUG + if (getenv("PSYCOPG_DEBUG")) + psycopg_debug_enabled = 1; +#endif + + Dprintf("psycopgmodule: initializing psycopg %s", xstr(PSYCOPG_VERSION)); + + /* initialize libcrypto threading callbacks */ + libcrypto_threads_init(); + + /* initialize types and objects not exposed to the module */ + Py_SET_TYPE(&typecastType, &PyType_Type); + if (0 > PyType_Ready(&typecastType)) { goto exit; } + + Py_SET_TYPE(&chunkType, &PyType_Type); + if (0 > PyType_Ready(&chunkType)) { goto exit; } + + Py_SET_TYPE(&errorType, &PyType_Type); + errorType.tp_base = (PyTypeObject *)PyExc_StandardError; + if (0 > PyType_Ready(&errorType)) { goto exit; } + + if (!(psyco_null = Bytes_FromString("NULL"))) { goto exit; } + + /* initialize the module */ + module = PyModule_Create(&psycopgmodule); + if (!module) { goto exit; } + + if (0 > add_module_constants(module)) { goto exit; } + if (0 > add_module_types(module)) { goto exit; } + if (0 > datetime_init()) { goto exit; } + if (0 > encodings_init(module)) { goto exit; } + if (0 > typecast_init(module)) { goto exit; } + if (0 > adapters_init(module)) { goto exit; } + if (0 > basic_errors_init(module)) { goto exit; } + if (0 > sqlstate_errors_init(module)) { goto exit; } + + Dprintf("psycopgmodule: module initialization complete"); + +exit: + return module; +} diff --git a/psycopg/python.h b/psycopg/python.h new file mode 100644 index 0000000000000000000000000000000000000000..491d285f0ac43dfdf932ecf89920db32c4ea2b5d --- /dev/null +++ b/psycopg/python.h @@ -0,0 +1,99 @@ +/* python.h - python version compatibility stuff + * + * Copyright (C) 2003-2019 Federico Di Gregorio + * Copyright (C) 2020-2021 The Psycopg Team + * + * This file is part of psycopg. + * + * psycopg2 is free software: you can redistribute it and/or modify it + * under the terms of the GNU Lesser General Public License as published + * by the Free Software Foundation, either version 3 of the License, or + * (at your option) any later version. + * + * In addition, as a special exception, the copyright holders give + * permission to link this program with the OpenSSL library (or with + * modified versions of OpenSSL that use the same license as OpenSSL), + * and distribute linked combinations including the two. + * + * You must obey the GNU Lesser General Public License in all respects for + * all of the code used other than OpenSSL. + * + * psycopg2 is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public + * License for more details. + */ + +#ifndef PSYCOPG_PYTHON_H +#define PSYCOPG_PYTHON_H 1 + +#if PY_VERSION_HEX < 0x03060000 +#error "psycopg requires Python 3.6" +#endif + +#include + +/* Since Py_TYPE() is changed to the inline static function, + * Py_TYPE(obj) = new_type must be replaced with Py_SET_TYPE(obj, new_type) + * https://docs.python.org/3.10/whatsnew/3.10.html#id2 + */ +#if PY_VERSION_HEX < 0x030900A4 + #define Py_SET_TYPE(obj, type) ((Py_TYPE(obj) = (type)), (void)0) +#endif + +/* FORMAT_CODE_PY_SSIZE_T is for Py_ssize_t: */ +#define FORMAT_CODE_PY_SSIZE_T "%" PY_FORMAT_SIZE_T "d" + +/* FORMAT_CODE_SIZE_T is for plain size_t, not for Py_ssize_t: */ +#ifdef _MSC_VER + /* For MSVC: */ + #define FORMAT_CODE_SIZE_T "%Iu" +#else + /* C99 standard format code: */ + #define FORMAT_CODE_SIZE_T "%zu" +#endif + +#define Text_Type PyUnicode_Type +#define Text_Check(s) PyUnicode_Check(s) +#define Text_Format(f,a) PyUnicode_Format(f,a) +#define Text_FromUTF8(s) PyUnicode_FromString(s) +#define Text_FromUTF8AndSize(s,n) PyUnicode_FromStringAndSize(s,n) + +#define PyInt_Type PyLong_Type +#define PyInt_Check PyLong_Check +#define PyInt_AsLong PyLong_AsLong +#define PyInt_FromLong PyLong_FromLong +#define PyInt_FromString PyLong_FromString +#define PyInt_FromSsize_t PyLong_FromSsize_t +#define PyExc_StandardError PyExc_Exception +#define PyString_FromFormat PyUnicode_FromFormat +#define Py_TPFLAGS_HAVE_ITER 0L +#define Py_TPFLAGS_HAVE_RICHCOMPARE 0L +#define Py_TPFLAGS_HAVE_WEAKREFS 0L + +#ifndef PyNumber_Int +#define PyNumber_Int PyNumber_Long +#endif + +#define Bytes_Type PyBytes_Type +#define Bytes_Check PyBytes_Check +#define Bytes_CheckExact PyBytes_CheckExact +#define Bytes_AS_STRING PyBytes_AS_STRING +#define Bytes_GET_SIZE PyBytes_GET_SIZE +#define Bytes_Size PyBytes_Size +#define Bytes_AsString PyBytes_AsString +#define Bytes_AsStringAndSize PyBytes_AsStringAndSize +#define Bytes_FromString PyBytes_FromString +#define Bytes_FromStringAndSize PyBytes_FromStringAndSize +#define Bytes_FromFormat PyBytes_FromFormat +#define Bytes_ConcatAndDel PyBytes_ConcatAndDel +#define _Bytes_Resize _PyBytes_Resize + +#define INIT_MODULE(m) PyInit_ ## m + +#define PyLong_FromOid(x) (PyLong_FromUnsignedLong((unsigned long)(x))) + +/* expose Oid attributes in Python C objects */ +#define T_OID T_UINT + +#endif /* !defined(PSYCOPG_PYTHON_H) */ diff --git a/psycopg/replication_connection.h b/psycopg/replication_connection.h new file mode 100644 index 0000000000000000000000000000000000000000..bf3c91c8ffea0b584a18e4e144ea604fac1cd242 --- /dev/null +++ b/psycopg/replication_connection.h @@ -0,0 +1,53 @@ +/* replication_connection.h - definition for the psycopg replication connection type + * + * Copyright (C) 2015-2019 Daniele Varrazzo + * Copyright (C) 2020-2021 The Psycopg Team + * + * This file is part of psycopg. + * + * psycopg2 is free software: you can redistribute it and/or modify it + * under the terms of the GNU Lesser General Public License as published + * by the Free Software Foundation, either version 3 of the License, or + * (at your option) any later version. + * + * In addition, as a special exception, the copyright holders give + * permission to link this program with the OpenSSL library (or with + * modified versions of OpenSSL that use the same license as OpenSSL), + * and distribute linked combinations including the two. + * + * You must obey the GNU Lesser General Public License in all respects for + * all of the code used other than OpenSSL. + * + * psycopg2 is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public + * License for more details. + */ + +#ifndef PSYCOPG_REPLICATION_CONNECTION_H +#define PSYCOPG_REPLICATION_CONNECTION_H 1 + +#include "psycopg/connection.h" + +#ifdef __cplusplus +extern "C" { +#endif + +extern HIDDEN PyTypeObject replicationConnectionType; + +typedef struct replicationConnectionObject { + connectionObject conn; + + long int type; +} replicationConnectionObject; + +/* The funny constant values should help to avoid mixups with some + commonly used numbers like 1 and 2. */ +#define REPLICATION_PHYSICAL 12345678 +#define REPLICATION_LOGICAL 87654321 + +#ifdef __cplusplus +} +#endif + +#endif /* !defined(PSYCOPG_REPLICATION_CONNECTION_H) */ diff --git a/psycopg/replication_connection_type.c b/psycopg/replication_connection_type.c new file mode 100644 index 0000000000000000000000000000000000000000..b8c1d2debd8e45c270c70dbfd882263c17cb09e9 --- /dev/null +++ b/psycopg/replication_connection_type.c @@ -0,0 +1,193 @@ +/* replication_connection_type.c - python interface to replication connection objects + * + * Copyright (C) 2015-2019 Daniele Varrazzo + * Copyright (C) 2020-2021 The Psycopg Team + * + * This file is part of psycopg. + * + * psycopg2 is free software: you can redistribute it and/or modify it + * under the terms of the GNU Lesser General Public License as published + * by the Free Software Foundation, either version 3 of the License, or + * (at your option) any later version. + * + * In addition, as a special exception, the copyright holders give + * permission to link this program with the OpenSSL library (or with + * modified versions of OpenSSL that use the same license as OpenSSL), + * and distribute linked combinations including the two. + * + * You must obey the GNU Lesser General Public License in all respects for + * all of the code used other than OpenSSL. + * + * psycopg2 is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public + * License for more details. + */ + +#define PSYCOPG_MODULE +#include "psycopg/psycopg.h" + +#include "psycopg/replication_connection.h" +#include "psycopg/replication_message.h" +#include "psycopg/green.h" +#include "psycopg/pqpath.h" + +#include +#include + + +#define psyco_repl_conn_type_doc \ +"replication_type -- the replication connection type" + +static PyObject * +psyco_repl_conn_get_type(replicationConnectionObject *self) +{ + return PyInt_FromLong(self->type); +} + + +static int +replicationConnection_init(replicationConnectionObject *self, + PyObject *args, PyObject *kwargs) +{ + PyObject *dsn = NULL, *async = Py_False, + *item = NULL, *extras = NULL, *cursor = NULL, + *newdsn = NULL, *newargs = NULL, *dsnopts = NULL; + int ret = -1; + long int replication_type; + + /* 'replication_type' is not actually optional, but there's no + good way to put it before 'async' in the list */ + static char *kwlist[] = {"dsn", "async", "replication_type", NULL}; + if (!PyArg_ParseTupleAndKeywords(args, kwargs, "O|Ol", kwlist, + &dsn, &async, &replication_type)) { + return ret; + } + + /* + We have to call make_dsn() to add replication-specific + connection parameters, because the DSN might be an URI (if there + were no keyword arguments to connect() it is passed unchanged). + */ + if (!(dsnopts = PyDict_New())) { return ret; } + + /* all the nice stuff is located in python-level ReplicationCursor class */ + if (!(extras = PyImport_ImportModule("psycopg2.extras"))) { goto exit; } + if (!(cursor = PyObject_GetAttrString(extras, "ReplicationCursor"))) { goto exit; } + + if (replication_type == REPLICATION_PHYSICAL) { + self->type = REPLICATION_PHYSICAL; + +#define SET_ITEM(k, v) \ + if (!(item = Text_FromUTF8(#v))) { goto exit; } \ + if (PyDict_SetItemString(dsnopts, #k, item) != 0) { goto exit; } \ + Py_DECREF(item); \ + item = NULL; + + SET_ITEM(replication, true); + SET_ITEM(dbname, replication); /* required for .pgpass lookup */ + } else if (replication_type == REPLICATION_LOGICAL) { + self->type = REPLICATION_LOGICAL; + + SET_ITEM(replication, database); +#undef SET_ITEM + } else { + PyErr_SetString(PyExc_TypeError, + "replication_type must be either " + "REPLICATION_PHYSICAL or REPLICATION_LOGICAL"); + goto exit; + } + + if (!(newdsn = psyco_make_dsn(dsn, dsnopts))) { goto exit; } + if (!(newargs = PyTuple_Pack(2, newdsn, async))) { goto exit; } + + /* only attempt the connection once we've handled all possible errors */ + if ((ret = connectionType.tp_init((PyObject *)self, newargs, NULL)) < 0) { + goto exit; + } + + self->conn.autocommit = 1; + Py_INCREF(cursor); + self->conn.cursor_factory = cursor; + +exit: + Py_XDECREF(item); + Py_XDECREF(extras); + Py_XDECREF(cursor); + Py_XDECREF(newdsn); + Py_XDECREF(newargs); + Py_XDECREF(dsnopts); + + return ret; +} + +static PyObject * +replicationConnection_repr(replicationConnectionObject *self) +{ + return PyString_FromFormat( + "", + self, self->conn.dsn, self->conn.closed); +} + + +/* object calculated member list */ + +static struct PyGetSetDef replicationConnectionObject_getsets[] = { + /* override to prevent user tweaking these: */ + { "autocommit", NULL, NULL, NULL }, + { "isolation_level", NULL, NULL, NULL }, + { "set_session", NULL, NULL, NULL }, + { "set_isolation_level", NULL, NULL, NULL }, + { "reset", NULL, NULL, NULL }, + /* an actual getter */ + { "replication_type", + (getter)psyco_repl_conn_get_type, NULL, + psyco_repl_conn_type_doc, NULL }, + {NULL} +}; + +/* object type */ + +#define replicationConnectionType_doc \ +"A replication connection." + +PyTypeObject replicationConnectionType = { + PyVarObject_HEAD_INIT(NULL, 0) + "psycopg2.extensions.ReplicationConnection", + sizeof(replicationConnectionObject), 0, + 0, /*tp_dealloc*/ + 0, /*tp_print*/ + 0, /*tp_getattr*/ + 0, /*tp_setattr*/ + 0, /*tp_compare*/ + (reprfunc)replicationConnection_repr, /*tp_repr*/ + 0, /*tp_as_number*/ + 0, /*tp_as_sequence*/ + 0, /*tp_as_mapping*/ + 0, /*tp_hash*/ + 0, /*tp_call*/ + (reprfunc)replicationConnection_repr, /*tp_str*/ + 0, /*tp_getattro*/ + 0, /*tp_setattro*/ + 0, /*tp_as_buffer*/ + Py_TPFLAGS_DEFAULT | Py_TPFLAGS_BASETYPE | Py_TPFLAGS_HAVE_ITER | + Py_TPFLAGS_HAVE_GC, /*tp_flags*/ + replicationConnectionType_doc, /*tp_doc*/ + 0, /*tp_traverse*/ + 0, /*tp_clear*/ + 0, /*tp_richcompare*/ + 0, /*tp_weaklistoffset*/ + 0, /*tp_iter*/ + 0, /*tp_iternext*/ + 0, /*tp_methods*/ + 0, /*tp_members*/ + replicationConnectionObject_getsets, /*tp_getset*/ + &connectionType, /*tp_base*/ + 0, /*tp_dict*/ + 0, /*tp_descr_get*/ + 0, /*tp_descr_set*/ + 0, /*tp_dictoffset*/ + (initproc)replicationConnection_init, /*tp_init*/ + 0, /*tp_alloc*/ + 0, /*tp_new*/ +}; diff --git a/psycopg/replication_cursor.h b/psycopg/replication_cursor.h new file mode 100644 index 0000000000000000000000000000000000000000..d102d734ff57a4400257a3d19fc43fbe3a8be58d --- /dev/null +++ b/psycopg/replication_cursor.h @@ -0,0 +1,66 @@ +/* replication_cursor.h - definition for the psycopg replication cursor type + * + * Copyright (C) 2015-2019 Daniele Varrazzo + * Copyright (C) 2020-2021 The Psycopg Team + * + * This file is part of psycopg. + * + * psycopg2 is free software: you can redistribute it and/or modify it + * under the terms of the GNU Lesser General Public License as published + * by the Free Software Foundation, either version 3 of the License, or + * (at your option) any later version. + * + * In addition, as a special exception, the copyright holders give + * permission to link this program with the OpenSSL library (or with + * modified versions of OpenSSL that use the same license as OpenSSL), + * and distribute linked combinations including the two. + * + * You must obey the GNU Lesser General Public License in all respects for + * all of the code used other than OpenSSL. + * + * psycopg2 is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public + * License for more details. + */ + +#ifndef PSYCOPG_REPLICATION_CURSOR_H +#define PSYCOPG_REPLICATION_CURSOR_H 1 + +#include "psycopg/cursor.h" +#include "libpq_support.h" + +#ifdef __cplusplus +extern "C" { +#endif + +extern HIDDEN PyTypeObject replicationCursorType; + +typedef struct replicationCursorObject { + cursorObject cur; + + int consuming:1; /* if running the consume loop */ + int decode:1; /* if we should use character decoding on the messages */ + + struct timeval last_io; /* timestamp of the last exchange with the server */ + struct timeval status_interval; /* time between status packets sent to the server */ + + XLogRecPtr write_lsn; /* LSNs for replication feedback messages */ + XLogRecPtr flush_lsn; + XLogRecPtr apply_lsn; + + XLogRecPtr wal_end; /* WAL end pointer from the last exchange with the server */ + + XLogRecPtr last_msg_data_start; /* WAL pointer to the last non-keepalive message from the server */ + struct timeval last_feedback; /* timestamp of the last feedback message to the server */ + XLogRecPtr explicitly_flushed_lsn; /* the flush LSN explicitly set by the send_feedback call */ +} replicationCursorObject; + + +RAISES_NEG HIDDEN int repl_curs_datetime_init(void); + +#ifdef __cplusplus +} +#endif + +#endif /* !defined(PSYCOPG_REPLICATION_CURSOR_H) */ diff --git a/psycopg/replication_cursor_type.c b/psycopg/replication_cursor_type.c new file mode 100644 index 0000000000000000000000000000000000000000..689a131cf05a5707ddac0fae6078b9d84571fa92 --- /dev/null +++ b/psycopg/replication_cursor_type.c @@ -0,0 +1,394 @@ +/* replication_cursor_type.c - python interface to replication cursor objects + * + * Copyright (C) 2015-2019 Daniele Varrazzo + * Copyright (C) 2020-2021 The Psycopg Team + * + * This file is part of psycopg. + * + * psycopg2 is free software: you can redistribute it and/or modify it + * under the terms of the GNU Lesser General Public License as published + * by the Free Software Foundation, either version 3 of the License, or + * (at your option) any later version. + * + * In addition, as a special exception, the copyright holders give + * permission to link this program with the OpenSSL library (or with + * modified versions of OpenSSL that use the same license as OpenSSL), + * and distribute linked combinations including the two. + * + * You must obey the GNU Lesser General Public License in all respects for + * all of the code used other than OpenSSL. + * + * psycopg2 is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public + * License for more details. + */ + +#define PSYCOPG_MODULE +#include "psycopg/psycopg.h" + +#include "psycopg/replication_cursor.h" +#include "psycopg/replication_message.h" +#include "psycopg/green.h" +#include "psycopg/pqpath.h" + +#include +#include + +/* python */ +#include "datetime.h" + + +static void set_status_interval(replicationCursorObject *self, double status_interval) +{ + self->status_interval.tv_sec = (int)status_interval; + self->status_interval.tv_usec = (long)((status_interval - self->status_interval.tv_sec)*1.0e6); +} + +#define start_replication_expert_doc \ +"start_replication_expert(command, decode=False, status_interval=10) -- Start replication with a given command." + +static PyObject * +start_replication_expert(replicationCursorObject *self, + PyObject *args, PyObject *kwargs) +{ + cursorObject *curs = &self->cur; + connectionObject *conn = self->cur.conn; + PyObject *res = NULL; + PyObject *command = NULL; + double status_interval = 10; + long int decode = 0; + static char *kwlist[] = {"command", "decode", "status_interval", NULL}; + + if (!PyArg_ParseTupleAndKeywords(args, kwargs, "O|ld", kwlist, + &command, &decode, &status_interval)) { + return NULL; + } + + EXC_IF_CURS_CLOSED(curs); + EXC_IF_GREEN(start_replication_expert); + EXC_IF_TPC_PREPARED(conn, start_replication_expert); + + if (!(command = curs_validate_sql_basic((cursorObject *)self, command))) { + goto exit; + } + + if (status_interval < 1.0) { + psyco_set_error(ProgrammingError, curs, "status_interval must be >= 1 (sec)"); + return NULL; + } + + Dprintf("start_replication_expert: '%s'; decode: %ld", + Bytes_AS_STRING(command), decode); + + if (pq_execute(curs, Bytes_AS_STRING(command), conn->async, + 1 /* no_result */, 1 /* no_begin */) >= 0) { + res = Py_None; + Py_INCREF(res); + + set_status_interval(self, status_interval); + self->decode = decode; + gettimeofday(&self->last_io, NULL); + } + +exit: + Py_XDECREF(command); + return res; +} + +#define consume_stream_doc \ +"consume_stream(consumer, keepalive_interval=None) -- Consume replication stream." + +static PyObject * +consume_stream(replicationCursorObject *self, + PyObject *args, PyObject *kwargs) +{ + cursorObject *curs = &self->cur; + PyObject *consume = NULL, *interval = NULL, *res = NULL; + double keepalive_interval = 0; + static char *kwlist[] = {"consume", "keepalive_interval", NULL}; + + if (!PyArg_ParseTupleAndKeywords(args, kwargs, "O|O", kwlist, + &consume, &interval)) { + return NULL; + } + + EXC_IF_CURS_CLOSED(curs); + EXC_IF_CURS_ASYNC(curs, consume_stream); + EXC_IF_GREEN(consume_stream); + EXC_IF_TPC_PREPARED(self->cur.conn, consume_stream); + + Dprintf("consume_stream"); + + if (interval && interval != Py_None) { + + if (PyFloat_Check(interval)) { + keepalive_interval = PyFloat_AsDouble(interval); + } else if (PyLong_Check(interval)) { + keepalive_interval = PyLong_AsDouble(interval); + } else if (PyInt_Check(interval)) { + keepalive_interval = PyInt_AsLong(interval); + } else { + psyco_set_error(ProgrammingError, curs, "keepalive_interval must be int or float"); + return NULL; + } + + if (keepalive_interval < 1.0) { + psyco_set_error(ProgrammingError, curs, "keepalive_interval must be >= 1 (sec)"); + return NULL; + } + } + + if (self->consuming) { + PyErr_SetString(ProgrammingError, + "consume_stream cannot be used when already in the consume loop"); + return NULL; + } + + if (curs->pgres == NULL || PQresultStatus(curs->pgres) != PGRES_COPY_BOTH) { + PyErr_SetString(ProgrammingError, + "consume_stream: not replicating, call start_replication first"); + return NULL; + } + CLEARPGRES(curs->pgres); + + self->consuming = 1; + if (keepalive_interval > 0) { + set_status_interval(self, keepalive_interval); + } + + if (pq_copy_both(self, consume) >= 0) { + res = Py_None; + Py_INCREF(res); + } + + self->consuming = 0; + + return res; +} + +#define read_message_doc \ +"read_message() -- Try reading a replication message from the server (non-blocking)." + +static PyObject * +read_message(replicationCursorObject *self, PyObject *dummy) +{ + cursorObject *curs = &self->cur; + replicationMessageObject *msg = NULL; + + EXC_IF_CURS_CLOSED(curs); + EXC_IF_GREEN(read_message); + EXC_IF_TPC_PREPARED(self->cur.conn, read_message); + + if (pq_read_replication_message(self, &msg) < 0) { + return NULL; + } + if (msg) { + return (PyObject *)msg; + } + + Py_RETURN_NONE; +} + +#define send_feedback_doc \ +"send_feedback(write_lsn=0, flush_lsn=0, apply_lsn=0, reply=False, force=False) -- Update a replication feedback, optionally request a reply or force sending a feedback message regardless of the timeout." + +static PyObject * +send_feedback(replicationCursorObject *self, + PyObject *args, PyObject *kwargs) +{ + cursorObject *curs = &self->cur; + XLogRecPtr write_lsn = 0, flush_lsn = 0, apply_lsn = 0; + int reply = 0, force = 0; + static char* kwlist[] = {"write_lsn", "flush_lsn", "apply_lsn", "reply", "force", NULL}; + + EXC_IF_CURS_CLOSED(curs); + + if (!PyArg_ParseTupleAndKeywords(args, kwargs, "|KKKii", kwlist, + &write_lsn, &flush_lsn, &apply_lsn, &reply, &force)) { + return NULL; + } + + if (write_lsn > self->write_lsn) + self->write_lsn = write_lsn; + + if (flush_lsn > self->explicitly_flushed_lsn) + self->explicitly_flushed_lsn = flush_lsn; + + if (flush_lsn > self->flush_lsn) + self->flush_lsn = flush_lsn; + + if (apply_lsn > self->apply_lsn) + self->apply_lsn = apply_lsn; + + if ((force || reply) && pq_send_replication_feedback(self, reply) < 0) { + return NULL; + } + + Py_RETURN_NONE; +} + + +RAISES_NEG int +repl_curs_datetime_init(void) +{ + PyDateTime_IMPORT; + + if (!PyDateTimeAPI) { + PyErr_SetString(PyExc_ImportError, "datetime initialization failed"); + return -1; + } + return 0; +} + +#define repl_curs_io_timestamp_doc \ +"io_timestamp -- the timestamp of latest IO with the server" + +static PyObject * +repl_curs_get_io_timestamp(replicationCursorObject *self) +{ + cursorObject *curs = &self->cur; + PyObject *tval, *res = NULL; + double seconds; + + EXC_IF_CURS_CLOSED(curs); + + seconds = self->last_io.tv_sec + self->last_io.tv_usec / 1.0e6; + + tval = Py_BuildValue("(d)", seconds); + if (tval) { + res = PyDateTime_FromTimestamp(tval); + Py_DECREF(tval); + } + return res; +} + +#define repl_curs_feedback_timestamp_doc \ +"feedback_timestamp -- the timestamp of the latest feedback message sent to the server" + +static PyObject * +repl_curs_get_feedback_timestamp(replicationCursorObject *self) +{ + cursorObject *curs = &self->cur; + PyObject *tval, *res = NULL; + double seconds; + + EXC_IF_CURS_CLOSED(curs); + + seconds = self->last_feedback.tv_sec + self->last_feedback.tv_usec / 1.0e6; + + tval = Py_BuildValue("(d)", seconds); + if (tval) { + res = PyDateTime_FromTimestamp(tval); + Py_DECREF(tval); + } + return res; +} + +/* object member list */ + +#define OFFSETOF(x) offsetof(replicationCursorObject, x) + +static struct PyMemberDef replicationCursorObject_members[] = { + {"wal_end", T_ULONGLONG, OFFSETOF(wal_end), READONLY, + "LSN position of the current end of WAL on the server."}, + {NULL} +}; + + +/* object method list */ + +static struct PyMethodDef replicationCursorObject_methods[] = { + {"start_replication_expert", (PyCFunction)start_replication_expert, + METH_VARARGS|METH_KEYWORDS, start_replication_expert_doc}, + {"consume_stream", (PyCFunction)consume_stream, + METH_VARARGS|METH_KEYWORDS, consume_stream_doc}, + {"read_message", (PyCFunction)read_message, + METH_NOARGS, read_message_doc}, + {"send_feedback", (PyCFunction)send_feedback, + METH_VARARGS|METH_KEYWORDS, send_feedback_doc}, + {NULL} +}; + +/* object calculated member list */ + +static struct PyGetSetDef replicationCursorObject_getsets[] = { + { "io_timestamp", + (getter)repl_curs_get_io_timestamp, NULL, + repl_curs_io_timestamp_doc, NULL }, + { "feedback_timestamp", + (getter)repl_curs_get_feedback_timestamp, NULL, + repl_curs_feedback_timestamp_doc, NULL }, + {NULL} +}; + +static int +replicationCursor_init(PyObject *obj, PyObject *args, PyObject *kwargs) +{ + replicationCursorObject *self = (replicationCursorObject *)obj; + + self->consuming = 0; + self->decode = 0; + + self->wal_end = 0; + + self->write_lsn = 0; + self->flush_lsn = 0; + self->apply_lsn = 0; + + return cursorType.tp_init(obj, args, kwargs); +} + +static PyObject * +replicationCursor_repr(replicationCursorObject *self) +{ + return PyString_FromFormat( + "", self, self->cur.closed); +} + + +/* object type */ + +#define replicationCursorType_doc \ +"A database replication cursor." + +PyTypeObject replicationCursorType = { + PyVarObject_HEAD_INIT(NULL, 0) + "psycopg2.extensions.ReplicationCursor", + sizeof(replicationCursorObject), 0, + 0, /*tp_dealloc*/ + 0, /*tp_print*/ + 0, /*tp_getattr*/ + 0, /*tp_setattr*/ + 0, /*tp_compare*/ + (reprfunc)replicationCursor_repr, /*tp_repr*/ + 0, /*tp_as_number*/ + 0, /*tp_as_sequence*/ + 0, /*tp_as_mapping*/ + 0, /*tp_hash*/ + 0, /*tp_call*/ + (reprfunc)replicationCursor_repr, /*tp_str*/ + 0, /*tp_getattro*/ + 0, /*tp_setattro*/ + 0, /*tp_as_buffer*/ + Py_TPFLAGS_DEFAULT | Py_TPFLAGS_BASETYPE | Py_TPFLAGS_HAVE_ITER | + Py_TPFLAGS_HAVE_GC, /*tp_flags*/ + replicationCursorType_doc, /*tp_doc*/ + 0, /*tp_traverse*/ + 0, /*tp_clear*/ + 0, /*tp_richcompare*/ + 0, /*tp_weaklistoffset*/ + 0, /*tp_iter*/ + 0, /*tp_iternext*/ + replicationCursorObject_methods, /*tp_methods*/ + replicationCursorObject_members, /*tp_members*/ + replicationCursorObject_getsets, /*tp_getset*/ + &cursorType, /*tp_base*/ + 0, /*tp_dict*/ + 0, /*tp_descr_get*/ + 0, /*tp_descr_set*/ + 0, /*tp_dictoffset*/ + replicationCursor_init, /*tp_init*/ + 0, /*tp_alloc*/ + 0, /*tp_new*/ +}; diff --git a/psycopg/replication_message.h b/psycopg/replication_message.h new file mode 100644 index 0000000000000000000000000000000000000000..c03e60686a48a00921e3dd1924b5e98c54601fb8 --- /dev/null +++ b/psycopg/replication_message.h @@ -0,0 +1,58 @@ +/* replication_message.h - definition for the psycopg ReplicationMessage type + * + * Copyright (C) 2003-2019 Federico Di Gregorio + * Copyright (C) 2020-2021 The Psycopg Team + * + * This file is part of psycopg. + * + * psycopg2 is free software: you can redistribute it and/or modify it + * under the terms of the GNU Lesser General Public License as published + * by the Free Software Foundation, either version 3 of the License, or + * (at your option) any later version. + * + * In addition, as a special exception, the copyright holders give + * permission to link this program with the OpenSSL library (or with + * modified versions of OpenSSL that use the same license as OpenSSL), + * and distribute linked combinations including the two. + * + * You must obey the GNU Lesser General Public License in all respects for + * all of the code used other than OpenSSL. + * + * psycopg2 is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public + * License for more details. + */ + +#ifndef PSYCOPG_REPLICATION_MESSAGE_H +#define PSYCOPG_REPLICATION_MESSAGE_H 1 + +#include "cursor.h" +#include "libpq_support.h" + +#ifdef __cplusplus +extern "C" { +#endif + +extern HIDDEN PyTypeObject replicationMessageType; + +/* the typedef is forward-declared in psycopg.h */ +struct replicationMessageObject { + PyObject_HEAD + + cursorObject *cursor; + PyObject *payload; + + int data_size; + XLogRecPtr data_start; + XLogRecPtr wal_end; + int64_t send_time; +}; + +RAISES_NEG HIDDEN int replmsg_datetime_init(void); + +#ifdef __cplusplus +} +#endif + +#endif /* !defined(PSYCOPG_REPLICATION_MESSAGE_H) */ diff --git a/psycopg/replication_message_type.c b/psycopg/replication_message_type.c new file mode 100644 index 0000000000000000000000000000000000000000..a137f84557d32687de0432f90dcb5ea72b63129c --- /dev/null +++ b/psycopg/replication_message_type.c @@ -0,0 +1,195 @@ +/* replication_message_type.c - python interface to ReplcationMessage objects + * + * Copyright (C) 2003-2019 Federico Di Gregorio + * Copyright (C) 2020-2021 The Psycopg Team + * + * This file is part of psycopg. + * + * psycopg2 is free software: you can redistribute it and/or modify it + * under the terms of the GNU Lesser General Public License as published + * by the Free Software Foundation, either version 3 of the License, or + * (at your option) any later version. + * + * In addition, as a special exception, the copyright holders give + * permission to link this program with the OpenSSL library (or with + * modified versions of OpenSSL that use the same license as OpenSSL), + * and distribute linked combinations including the two. + * + * You must obey the GNU Lesser General Public License in all respects for + * all of the code used other than OpenSSL. + * + * psycopg2 is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public + * License for more details. + */ + +#define PSYCOPG_MODULE +#include "psycopg/psycopg.h" + +#include "psycopg/replication_message.h" + +#include "datetime.h" + +RAISES_NEG int +replmsg_datetime_init(void) +{ + PyDateTime_IMPORT; + + if (!PyDateTimeAPI) { + PyErr_SetString(PyExc_ImportError, "datetime initialization failed"); + return -1; + } + return 0; +} + + +static PyObject * +replmsg_repr(replicationMessageObject *self) +{ + return PyString_FromFormat( + "", + self, self->data_size, XLOGFMTARGS(self->data_start), XLOGFMTARGS(self->wal_end), + (long int)self->send_time); +} + +static int +replmsg_init(PyObject *obj, PyObject *args, PyObject *kwargs) +{ + PyObject *cur = NULL; + replicationMessageObject *self = (replicationMessageObject *)obj; + + if (!PyArg_ParseTuple( + args, "O!O", &cursorType, &cur, &self->payload)) { + return -1; + } + + Py_INCREF(cur); + self->cursor = (cursorObject *)cur; + Py_INCREF(self->payload); + + self->data_size = 0; + self->data_start = 0; + self->wal_end = 0; + self->send_time = 0; + + return 0; +} + +static int +replmsg_traverse(replicationMessageObject *self, visitproc visit, void *arg) +{ + Py_VISIT((PyObject *)self->cursor); + Py_VISIT(self->payload); + return 0; +} + +static int +replmsg_clear(replicationMessageObject *self) +{ + Py_CLEAR(self->cursor); + Py_CLEAR(self->payload); + return 0; +} + +static void +replmsg_dealloc(PyObject* obj) +{ + PyObject_GC_UnTrack(obj); + + replmsg_clear((replicationMessageObject*) obj); + + Py_TYPE(obj)->tp_free(obj); +} + +#define replmsg_send_time_doc \ +"send_time - Timestamp of the replication message departure from the server." + +static PyObject * +replmsg_get_send_time(replicationMessageObject *self) +{ + PyObject *tval, *res = NULL; + double t; + + t = (double)self->send_time / USECS_PER_SEC + + ((POSTGRES_EPOCH_JDATE - UNIX_EPOCH_JDATE) * SECS_PER_DAY); + + tval = Py_BuildValue("(d)", t); + if (tval) { + res = PyDateTime_FromTimestamp(tval); + Py_DECREF(tval); + } + + return res; +} + +#define OFFSETOF(x) offsetof(replicationMessageObject, x) + +/* object member list */ + +static struct PyMemberDef replicationMessageObject_members[] = { + {"cursor", T_OBJECT, OFFSETOF(cursor), READONLY, + "Related ReplcationCursor object."}, + {"payload", T_OBJECT, OFFSETOF(payload), READONLY, + "The actual message data."}, + {"data_size", T_INT, OFFSETOF(data_size), READONLY, + "Raw size of the message data in bytes."}, + {"data_start", T_ULONGLONG, OFFSETOF(data_start), READONLY, + "LSN position of the start of this message."}, + {"wal_end", T_ULONGLONG, OFFSETOF(wal_end), READONLY, + "LSN position of the current end of WAL on the server."}, + {NULL} +}; + +static struct PyGetSetDef replicationMessageObject_getsets[] = { + { "send_time", (getter)replmsg_get_send_time, NULL, + replmsg_send_time_doc, NULL }, + {NULL} +}; + +/* object type */ + +#define replicationMessageType_doc \ +"A replication protocol message." + +PyTypeObject replicationMessageType = { + PyVarObject_HEAD_INIT(NULL, 0) + "psycopg2.extensions.ReplicationMessage", + sizeof(replicationMessageObject), 0, + replmsg_dealloc, /*tp_dealloc*/ + 0, /*tp_print*/ + 0, /*tp_getattr*/ + 0, /*tp_setattr*/ + 0, /*tp_compare*/ + (reprfunc)replmsg_repr, /*tp_repr*/ + 0, /*tp_as_number*/ + 0, /*tp_as_sequence*/ + 0, /*tp_as_mapping*/ + 0, /*tp_hash */ + 0, /*tp_call*/ + 0, /*tp_str*/ + 0, /*tp_getattro*/ + 0, /*tp_setattro*/ + 0, /*tp_as_buffer*/ + Py_TPFLAGS_DEFAULT | Py_TPFLAGS_BASETYPE | + Py_TPFLAGS_HAVE_GC, /*tp_flags*/ + replicationMessageType_doc, /*tp_doc*/ + (traverseproc)replmsg_traverse, /*tp_traverse*/ + (inquiry)replmsg_clear, /*tp_clear*/ + 0, /*tp_richcompare*/ + 0, /*tp_weaklistoffset*/ + 0, /*tp_iter*/ + 0, /*tp_iternext*/ + 0, /*tp_methods*/ + replicationMessageObject_members, /*tp_members*/ + replicationMessageObject_getsets, /*tp_getset*/ + 0, /*tp_base*/ + 0, /*tp_dict*/ + 0, /*tp_descr_get*/ + 0, /*tp_descr_set*/ + 0, /*tp_dictoffset*/ + replmsg_init, /*tp_init*/ + 0, /*tp_alloc*/ + PyType_GenericNew, /*tp_new*/ +}; diff --git a/psycopg/solaris_support.c b/psycopg/solaris_support.c new file mode 100644 index 0000000000000000000000000000000000000000..da95b3821e2ea9ee251ff2767709a958c82ad27a --- /dev/null +++ b/psycopg/solaris_support.c @@ -0,0 +1,58 @@ +/* solaris_support.c - emulate functions missing on Solaris + * + * Copyright (C) 2017 My Karlsson + * Copyright (c) 2018, Joyent, Inc. + * Copyright (C) 2020-2021 The Psycopg Team + * + * This file is part of psycopg. + * + * psycopg2 is free software: you can redistribute it and/or modify it + * under the terms of the GNU Lesser General Public License as published + * by the Free Software Foundation, either version 3 of the License, or + * (at your option) any later version. + * + * In addition, as a special exception, the copyright holders give + * permission to link this program with the OpenSSL library (or with + * modified versions of OpenSSL that use the same license as OpenSSL), + * and distribute linked combinations including the two. + * + * You must obey the GNU Lesser General Public License in all respects for + * all of the code used other than OpenSSL. + * + * psycopg2 is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public + * License for more details. + */ + +#define PSYCOPG_MODULE +#include "psycopg/psycopg.h" +#include "psycopg/solaris_support.h" + +#if defined(__sun) && defined(__SVR4) +/* timeradd is missing on Solaris 10 */ +#ifndef timeradd +void +timeradd(struct timeval *a, struct timeval *b, struct timeval *c) +{ + c->tv_sec = a->tv_sec + b->tv_sec; + c->tv_usec = a->tv_usec + b->tv_usec; + if (c->tv_usec >= 1000000) { + c->tv_usec -= 1000000; + c->tv_sec += 1; + } +} + +/* timersub is missing on Solaris */ +void +timersub(struct timeval *a, struct timeval *b, struct timeval *c) +{ + c->tv_sec = a->tv_sec - b->tv_sec; + c->tv_usec = a->tv_usec - b->tv_usec; + if (c->tv_usec < 0) { + c->tv_usec += 1000000; + c->tv_sec -= 1; + } +} +#endif /* timeradd */ +#endif /* defined(__sun) && defined(__SVR4) */ diff --git a/psycopg/solaris_support.h b/psycopg/solaris_support.h new file mode 100644 index 0000000000000000000000000000000000000000..ba9a565bf78a1c9dd990e077864eeafd8325906f --- /dev/null +++ b/psycopg/solaris_support.h @@ -0,0 +1,48 @@ +/* solaris_support.h - definitions for solaris_support.c + * + * Copyright (C) 2017 My Karlsson + * Copyright (c) 2018-2019, Joyent, Inc. + * Copyright (C) 2020-2021 The Psycopg Team + * + * This file is part of psycopg. + * + * psycopg2 is free software: you can redistribute it and/or modify it + * under the terms of the GNU Lesser General Public License as published + * by the Free Software Foundation, either version 3 of the License, or + * (at your option) any later version. + * + * In addition, as a special exception, the copyright holders give + * permission to link this program with the OpenSSL library (or with + * modified versions of OpenSSL that use the same license as OpenSSL), + * and distribute linked combinations including the two. + * + * You must obey the GNU Lesser General Public License in all respects for + * all of the code used other than OpenSSL. + * + * psycopg2 is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public + * License for more details. + */ +#ifndef PSYCOPG_SOLARIS_SUPPORT_H +#define PSYCOPG_SOLARIS_SUPPORT_H + +#include "psycopg/config.h" + +#if defined(__sun) && defined(__SVR4) +#include + +#ifndef timeradd +extern HIDDEN void timeradd(struct timeval *a, struct timeval *b, struct timeval *c); +extern HIDDEN void timersub(struct timeval *a, struct timeval *b, struct timeval *c); +#endif + +#ifndef timercmp +#define timercmp(a, b, cmp) \ + (((a)->tv_sec == (b)->tv_sec) ? \ + ((a)->tv_usec cmp (b)->tv_usec) : \ + ((a)->tv_sec cmp (b)->tv_sec)) +#endif +#endif + +#endif /* !defined(PSYCOPG_SOLARIS_SUPPORT_H) */ diff --git a/psycopg/sqlstate_errors.h b/psycopg/sqlstate_errors.h new file mode 100644 index 0000000000000000000000000000000000000000..0b85dd5a30e37603864e390051c2db2a8921217a --- /dev/null +++ b/psycopg/sqlstate_errors.h @@ -0,0 +1,335 @@ +/* + * Autogenerated by 'scripts/make_errors.py'. + */ + + +/* Class 02 - No Data (this is also a warning class per the SQL standard) */ +{"02000", "NoData"}, +{"02001", "NoAdditionalDynamicResultSetsReturned"}, + +/* Class 03 - SQL Statement Not Yet Complete */ +{"03000", "SqlStatementNotYetComplete"}, + +/* Class 08 - Connection Exception */ +{"08000", "ConnectionException"}, +{"08001", "SqlclientUnableToEstablishSqlconnection"}, +{"08003", "ConnectionDoesNotExist"}, +{"08004", "SqlserverRejectedEstablishmentOfSqlconnection"}, +{"08006", "ConnectionFailure"}, +{"08007", "TransactionResolutionUnknown"}, +{"08P01", "ProtocolViolation"}, + +/* Class 09 - Triggered Action Exception */ +{"09000", "TriggeredActionException"}, + +/* Class 0A - Feature Not Supported */ +{"0A000", "FeatureNotSupported"}, + +/* Class 0B - Invalid Transaction Initiation */ +{"0B000", "InvalidTransactionInitiation"}, + +/* Class 0F - Locator Exception */ +{"0F000", "LocatorException"}, +{"0F001", "InvalidLocatorSpecification"}, + +/* Class 0L - Invalid Grantor */ +{"0L000", "InvalidGrantor"}, +{"0LP01", "InvalidGrantOperation"}, + +/* Class 0P - Invalid Role Specification */ +{"0P000", "InvalidRoleSpecification"}, + +/* Class 0Z - Diagnostics Exception */ +{"0Z000", "DiagnosticsException"}, +{"0Z002", "StackedDiagnosticsAccessedWithoutActiveHandler"}, + +/* Class 20 - Case Not Found */ +{"20000", "CaseNotFound"}, + +/* Class 21 - Cardinality Violation */ +{"21000", "CardinalityViolation"}, + +/* Class 22 - Data Exception */ +{"22000", "DataException"}, +{"22001", "StringDataRightTruncation"}, +{"22002", "NullValueNoIndicatorParameter"}, +{"22003", "NumericValueOutOfRange"}, +{"22004", "NullValueNotAllowed"}, +{"22005", "ErrorInAssignment"}, +{"22007", "InvalidDatetimeFormat"}, +{"22008", "DatetimeFieldOverflow"}, +{"22009", "InvalidTimeZoneDisplacementValue"}, +{"2200B", "EscapeCharacterConflict"}, +{"2200C", "InvalidUseOfEscapeCharacter"}, +{"2200D", "InvalidEscapeOctet"}, +{"2200F", "ZeroLengthCharacterString"}, +{"2200G", "MostSpecificTypeMismatch"}, +{"2200H", "SequenceGeneratorLimitExceeded"}, +{"2200L", "NotAnXmlDocument"}, +{"2200M", "InvalidXmlDocument"}, +{"2200N", "InvalidXmlContent"}, +{"2200S", "InvalidXmlComment"}, +{"2200T", "InvalidXmlProcessingInstruction"}, +{"22010", "InvalidIndicatorParameterValue"}, +{"22011", "SubstringError"}, +{"22012", "DivisionByZero"}, +{"22013", "InvalidPrecedingOrFollowingSize"}, +{"22014", "InvalidArgumentForNtileFunction"}, +{"22015", "IntervalFieldOverflow"}, +{"22016", "InvalidArgumentForNthValueFunction"}, +{"22018", "InvalidCharacterValueForCast"}, +{"22019", "InvalidEscapeCharacter"}, +{"2201B", "InvalidRegularExpression"}, +{"2201E", "InvalidArgumentForLogarithm"}, +{"2201F", "InvalidArgumentForPowerFunction"}, +{"2201G", "InvalidArgumentForWidthBucketFunction"}, +{"2201W", "InvalidRowCountInLimitClause"}, +{"2201X", "InvalidRowCountInResultOffsetClause"}, +{"22021", "CharacterNotInRepertoire"}, +{"22022", "IndicatorOverflow"}, +{"22023", "InvalidParameterValue"}, +{"22024", "UnterminatedCString"}, +{"22025", "InvalidEscapeSequence"}, +{"22026", "StringDataLengthMismatch"}, +{"22027", "TrimError"}, +{"2202E", "ArraySubscriptError"}, +{"2202G", "InvalidTablesampleRepeat"}, +{"2202H", "InvalidTablesampleArgument"}, +{"22030", "DuplicateJsonObjectKeyValue"}, +{"22031", "InvalidArgumentForSqlJsonDatetimeFunction"}, +{"22032", "InvalidJsonText"}, +{"22033", "InvalidSqlJsonSubscript"}, +{"22034", "MoreThanOneSqlJsonItem"}, +{"22035", "NoSqlJsonItem"}, +{"22036", "NonNumericSqlJsonItem"}, +{"22037", "NonUniqueKeysInAJsonObject"}, +{"22038", "SingletonSqlJsonItemRequired"}, +{"22039", "SqlJsonArrayNotFound"}, +{"2203A", "SqlJsonMemberNotFound"}, +{"2203B", "SqlJsonNumberNotFound"}, +{"2203C", "SqlJsonObjectNotFound"}, +{"2203D", "TooManyJsonArrayElements"}, +{"2203E", "TooManyJsonObjectMembers"}, +{"2203F", "SqlJsonScalarRequired"}, +{"22P01", "FloatingPointException"}, +{"22P02", "InvalidTextRepresentation"}, +{"22P03", "InvalidBinaryRepresentation"}, +{"22P04", "BadCopyFileFormat"}, +{"22P05", "UntranslatableCharacter"}, +{"22P06", "NonstandardUseOfEscapeCharacter"}, + +/* Class 23 - Integrity Constraint Violation */ +{"23000", "IntegrityConstraintViolation"}, +{"23001", "RestrictViolation"}, +{"23502", "NotNullViolation"}, +{"23503", "ForeignKeyViolation"}, +{"23505", "UniqueViolation"}, +{"23514", "CheckViolation"}, +{"23P01", "ExclusionViolation"}, + +/* Class 24 - Invalid Cursor State */ +{"24000", "InvalidCursorState"}, + +/* Class 25 - Invalid Transaction State */ +{"25000", "InvalidTransactionState"}, +{"25001", "ActiveSqlTransaction"}, +{"25002", "BranchTransactionAlreadyActive"}, +{"25003", "InappropriateAccessModeForBranchTransaction"}, +{"25004", "InappropriateIsolationLevelForBranchTransaction"}, +{"25005", "NoActiveSqlTransactionForBranchTransaction"}, +{"25006", "ReadOnlySqlTransaction"}, +{"25007", "SchemaAndDataStatementMixingNotSupported"}, +{"25008", "HeldCursorRequiresSameIsolationLevel"}, +{"25P01", "NoActiveSqlTransaction"}, +{"25P02", "InFailedSqlTransaction"}, +{"25P03", "IdleInTransactionSessionTimeout"}, + +/* Class 26 - Invalid SQL Statement Name */ +{"26000", "InvalidSqlStatementName"}, + +/* Class 27 - Triggered Data Change Violation */ +{"27000", "TriggeredDataChangeViolation"}, + +/* Class 28 - Invalid Authorization Specification */ +{"28000", "InvalidAuthorizationSpecification"}, +{"28P01", "InvalidPassword"}, + +/* Class 2B - Dependent Privilege Descriptors Still Exist */ +{"2B000", "DependentPrivilegeDescriptorsStillExist"}, +{"2BP01", "DependentObjectsStillExist"}, + +/* Class 2D - Invalid Transaction Termination */ +{"2D000", "InvalidTransactionTermination"}, + +/* Class 2F - SQL Routine Exception */ +{"2F000", "SqlRoutineException"}, +{"2F002", "ModifyingSqlDataNotPermitted"}, +{"2F003", "ProhibitedSqlStatementAttempted"}, +{"2F004", "ReadingSqlDataNotPermitted"}, +{"2F005", "FunctionExecutedNoReturnStatement"}, + +/* Class 34 - Invalid Cursor Name */ +{"34000", "InvalidCursorName"}, + +/* Class 38 - External Routine Exception */ +{"38000", "ExternalRoutineException"}, +{"38001", "ContainingSqlNotPermitted"}, +{"38002", "ModifyingSqlDataNotPermittedExt"}, +{"38003", "ProhibitedSqlStatementAttemptedExt"}, +{"38004", "ReadingSqlDataNotPermittedExt"}, + +/* Class 39 - External Routine Invocation Exception */ +{"39000", "ExternalRoutineInvocationException"}, +{"39001", "InvalidSqlstateReturned"}, +{"39004", "NullValueNotAllowedExt"}, +{"39P01", "TriggerProtocolViolated"}, +{"39P02", "SrfProtocolViolated"}, +{"39P03", "EventTriggerProtocolViolated"}, + +/* Class 3B - Savepoint Exception */ +{"3B000", "SavepointException"}, +{"3B001", "InvalidSavepointSpecification"}, + +/* Class 3D - Invalid Catalog Name */ +{"3D000", "InvalidCatalogName"}, + +/* Class 3F - Invalid Schema Name */ +{"3F000", "InvalidSchemaName"}, + +/* Class 40 - Transaction Rollback */ +{"40000", "TransactionRollback"}, +{"40001", "SerializationFailure"}, +{"40002", "TransactionIntegrityConstraintViolation"}, +{"40003", "StatementCompletionUnknown"}, +{"40P01", "DeadlockDetected"}, + +/* Class 42 - Syntax Error or Access Rule Violation */ +{"42000", "SyntaxErrorOrAccessRuleViolation"}, +{"42501", "InsufficientPrivilege"}, +{"42601", "SyntaxError"}, +{"42602", "InvalidName"}, +{"42611", "InvalidColumnDefinition"}, +{"42622", "NameTooLong"}, +{"42701", "DuplicateColumn"}, +{"42702", "AmbiguousColumn"}, +{"42703", "UndefinedColumn"}, +{"42704", "UndefinedObject"}, +{"42710", "DuplicateObject"}, +{"42712", "DuplicateAlias"}, +{"42723", "DuplicateFunction"}, +{"42725", "AmbiguousFunction"}, +{"42803", "GroupingError"}, +{"42804", "DatatypeMismatch"}, +{"42809", "WrongObjectType"}, +{"42830", "InvalidForeignKey"}, +{"42846", "CannotCoerce"}, +{"42883", "UndefinedFunction"}, +{"428C9", "GeneratedAlways"}, +{"42939", "ReservedName"}, +{"42P01", "UndefinedTable"}, +{"42P02", "UndefinedParameter"}, +{"42P03", "DuplicateCursor"}, +{"42P04", "DuplicateDatabase"}, +{"42P05", "DuplicatePreparedStatement"}, +{"42P06", "DuplicateSchema"}, +{"42P07", "DuplicateTable"}, +{"42P08", "AmbiguousParameter"}, +{"42P09", "AmbiguousAlias"}, +{"42P10", "InvalidColumnReference"}, +{"42P11", "InvalidCursorDefinition"}, +{"42P12", "InvalidDatabaseDefinition"}, +{"42P13", "InvalidFunctionDefinition"}, +{"42P14", "InvalidPreparedStatementDefinition"}, +{"42P15", "InvalidSchemaDefinition"}, +{"42P16", "InvalidTableDefinition"}, +{"42P17", "InvalidObjectDefinition"}, +{"42P18", "IndeterminateDatatype"}, +{"42P19", "InvalidRecursion"}, +{"42P20", "WindowingError"}, +{"42P21", "CollationMismatch"}, +{"42P22", "IndeterminateCollation"}, + +/* Class 44 - WITH CHECK OPTION Violation */ +{"44000", "WithCheckOptionViolation"}, + +/* Class 53 - Insufficient Resources */ +{"53000", "InsufficientResources"}, +{"53100", "DiskFull"}, +{"53200", "OutOfMemory"}, +{"53300", "TooManyConnections"}, +{"53400", "ConfigurationLimitExceeded"}, + +/* Class 54 - Program Limit Exceeded */ +{"54000", "ProgramLimitExceeded"}, +{"54001", "StatementTooComplex"}, +{"54011", "TooManyColumns"}, +{"54023", "TooManyArguments"}, + +/* Class 55 - Object Not In Prerequisite State */ +{"55000", "ObjectNotInPrerequisiteState"}, +{"55006", "ObjectInUse"}, +{"55P02", "CantChangeRuntimeParam"}, +{"55P03", "LockNotAvailable"}, +{"55P04", "UnsafeNewEnumValueUsage"}, + +/* Class 57 - Operator Intervention */ +{"57000", "OperatorIntervention"}, +{"57014", "QueryCanceled"}, +{"57P01", "AdminShutdown"}, +{"57P02", "CrashShutdown"}, +{"57P03", "CannotConnectNow"}, +{"57P04", "DatabaseDropped"}, + +/* Class 58 - System Error (errors external to PostgreSQL itself) */ +{"58000", "SystemError"}, +{"58030", "IoError"}, +{"58P01", "UndefinedFile"}, +{"58P02", "DuplicateFile"}, + +/* Class 72 - Snapshot Failure */ +{"72000", "SnapshotTooOld"}, + +/* Class F0 - Configuration File Error */ +{"F0000", "ConfigFileError"}, +{"F0001", "LockFileExists"}, + +/* Class HV - Foreign Data Wrapper Error (SQL/MED) */ +{"HV000", "FdwError"}, +{"HV001", "FdwOutOfMemory"}, +{"HV002", "FdwDynamicParameterValueNeeded"}, +{"HV004", "FdwInvalidDataType"}, +{"HV005", "FdwColumnNameNotFound"}, +{"HV006", "FdwInvalidDataTypeDescriptors"}, +{"HV007", "FdwInvalidColumnName"}, +{"HV008", "FdwInvalidColumnNumber"}, +{"HV009", "FdwInvalidUseOfNullPointer"}, +{"HV00A", "FdwInvalidStringFormat"}, +{"HV00B", "FdwInvalidHandle"}, +{"HV00C", "FdwInvalidOptionIndex"}, +{"HV00D", "FdwInvalidOptionName"}, +{"HV00J", "FdwOptionNameNotFound"}, +{"HV00K", "FdwReplyHandle"}, +{"HV00L", "FdwUnableToCreateExecution"}, +{"HV00M", "FdwUnableToCreateReply"}, +{"HV00N", "FdwUnableToEstablishConnection"}, +{"HV00P", "FdwNoSchemas"}, +{"HV00Q", "FdwSchemaNotFound"}, +{"HV00R", "FdwTableNotFound"}, +{"HV010", "FdwFunctionSequenceError"}, +{"HV014", "FdwTooManyHandles"}, +{"HV021", "FdwInconsistentDescriptorInformation"}, +{"HV024", "FdwInvalidAttributeValue"}, +{"HV090", "FdwInvalidStringLengthOrBufferLength"}, +{"HV091", "FdwInvalidDescriptorFieldIdentifier"}, + +/* Class P0 - PL/pgSQL Error */ +{"P0000", "PlpgsqlError"}, +{"P0001", "RaiseException"}, +{"P0002", "NoDataFound"}, +{"P0003", "TooManyRows"}, +{"P0004", "AssertFailure"}, + +/* Class XX - Internal Error */ +{"XX000", "InternalError_"}, +{"XX001", "DataCorrupted"}, +{"XX002", "IndexCorrupted"}, diff --git a/psycopg/typecast.c b/psycopg/typecast.c new file mode 100644 index 0000000000000000000000000000000000000000..c1facc49e0097f8c376b68506386d2c9a8d9bf85 --- /dev/null +++ b/psycopg/typecast.c @@ -0,0 +1,620 @@ +/* typecast.c - basic utility functions related to typecasting + * + * Copyright (C) 2003-2019 Federico Di Gregorio + * Copyright (C) 2020-2021 The Psycopg Team + * + * This file is part of psycopg. + * + * psycopg2 is free software: you can redistribute it and/or modify it + * under the terms of the GNU Lesser General Public License as published + * by the Free Software Foundation, either version 3 of the License, or + * (at your option) any later version. + * + * In addition, as a special exception, the copyright holders give + * permission to link this program with the OpenSSL library (or with + * modified versions of OpenSSL that use the same license as OpenSSL), + * and distribute linked combinations including the two. + * + * You must obey the GNU Lesser General Public License in all respects for + * all of the code used other than OpenSSL. + * + * psycopg2 is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public + * License for more details. + */ + +#define PSYCOPG_MODULE +#include "psycopg/psycopg.h" + +#include "psycopg/typecast.h" +#include "psycopg/cursor.h" + +/* useful function used by some typecasters */ + +static const char * +skip_until_space2(const char *s, Py_ssize_t *len) +{ + while (*len > 0 && *s && *s != ' ') { + s++; (*len)--; + } + return s; +} + +static int +typecast_parse_date(const char* s, const char** t, Py_ssize_t* len, + int* year, int* month, int* day) +{ + int acc = -1, cz = 0; + + Dprintf("typecast_parse_date: len = " FORMAT_CODE_PY_SSIZE_T ", s = %s", + *len, s); + + while (cz < 3 && *len > 0 && *s) { + switch (*s) { + case '-': + case ' ': + case 'T': + if (cz == 0) *year = acc; + else if (cz == 1) *month = acc; + else if (cz == 2) *day = acc; + acc = -1; cz++; + break; + default: + acc = (acc == -1 ? 0 : acc*10) + ((int)*s - (int)'0'); + break; + } + + s++; (*len)--; + } + + if (acc != -1) { + *day = acc; + cz += 1; + } + + /* Is this a BC date? If so, adjust the year value. However + * Python datetime module does not support BC dates, so this will raise + * an exception downstream. */ + if (*len >= 2 && s[*len-2] == 'B' && s[*len-1] == 'C') + *year = -(*year); + + if (t != NULL) *t = s; + + return cz; +} + +static int +typecast_parse_time(const char* s, const char** t, Py_ssize_t* len, + int* hh, int* mm, int* ss, int* us, int* tz) +{ + int acc = -1, cz = 0; + int tzsign = 1, tzhh = 0, tzmm = 0, tzss = 0; + int usd = 0; + + /* sets microseconds and timezone to 0 because they may be missing */ + *us = *tz = 0; + + Dprintf("typecast_parse_time: len = " FORMAT_CODE_PY_SSIZE_T ", s = %s", + *len, s); + + while (cz < 7 && *len > 0 && *s) { + switch (*s) { + case ':': + if (cz == 0) *hh = acc; + else if (cz == 1) *mm = acc; + else if (cz == 2) *ss = acc; + else if (cz == 3) *us = acc; + else if (cz == 4) tzhh = acc; + else if (cz == 5) tzmm = acc; + acc = -1; cz++; + break; + case '.': + /* we expect seconds and if we don't get them we return an error */ + if (cz != 2) return -1; + *ss = acc; + acc = -1; cz++; + break; + case '+': + case '-': + /* seconds or microseconds here, anything else is an error */ + if (cz < 2 || cz > 3) return -1; + if (*s == '-') tzsign = -1; + if (cz == 2) *ss = acc; + else if (cz == 3) *us = acc; + acc = -1; cz = 4; + break; + case ' ': + case 'B': + case 'C': + /* Ignore the " BC" suffix, if passed -- it is handled + * when parsing the date portion. */ + break; + default: + acc = (acc == -1 ? 0 : acc*10) + ((int)*s - (int)'0'); + if (cz == 3) usd += 1; + break; + } + + s++; (*len)--; + } + + if (acc != -1) { + if (cz == 0) { *hh = acc; cz += 1; } + else if (cz == 1) { *mm = acc; cz += 1; } + else if (cz == 2) { *ss = acc; cz += 1; } + else if (cz == 3) { *us = acc; cz += 1; } + else if (cz == 4) { tzhh = acc; cz += 1; } + else if (cz == 5) { tzmm = acc; cz += 1; } + else if (cz == 6) tzss = acc; + } + if (t != NULL) *t = s; + + *tz = tzsign * (3600 * tzhh + 60 * tzmm + tzss); + + if (*us != 0) { + while (usd++ < 6) *us *= 10; + } + + /* 24:00:00 -> 00:00:00 (ticket #278) */ + if (*hh == 24) { *hh = 0; } + + return cz; +} + +/** include casting objects **/ +#include "psycopg/typecast_basic.c" +#include "psycopg/typecast_binary.c" +#include "psycopg/typecast_datetime.c" +#include "psycopg/typecast_array.c" + +static long int typecast_default_DEFAULT[] = {0}; +static typecastObject_initlist typecast_default = { + "DEFAULT", typecast_default_DEFAULT, typecast_STRING_cast}; + +static PyObject * +typecast_UNKNOWN_cast(const char *str, Py_ssize_t len, PyObject *curs) +{ + Dprintf("typecast_UNKNOWN_cast: str = '%s'," + " len = " FORMAT_CODE_PY_SSIZE_T, str, len); + + return typecast_default.cast(str, len, curs); +} + +#include "psycopg/typecast_builtins.c" + +#define typecast_PYDATETIMEARRAY_cast typecast_GENERIC_ARRAY_cast +#define typecast_PYDATETIMETZARRAY_cast typecast_GENERIC_ARRAY_cast +#define typecast_PYDATEARRAY_cast typecast_GENERIC_ARRAY_cast +#define typecast_PYTIMEARRAY_cast typecast_GENERIC_ARRAY_cast +#define typecast_PYINTERVALARRAY_cast typecast_GENERIC_ARRAY_cast + +/* a list of initializers, used to make the typecasters accessible anyway */ +static typecastObject_initlist typecast_pydatetime[] = { + {"PYDATETIME", typecast_DATETIME_types, typecast_PYDATETIME_cast}, + {"PYDATETIMETZ", typecast_DATETIMETZ_types, typecast_PYDATETIMETZ_cast}, + {"PYTIME", typecast_TIME_types, typecast_PYTIME_cast}, + {"PYDATE", typecast_DATE_types, typecast_PYDATE_cast}, + {"PYINTERVAL", typecast_INTERVAL_types, typecast_PYINTERVAL_cast}, + {"PYDATETIMEARRAY", typecast_DATETIMEARRAY_types, typecast_PYDATETIMEARRAY_cast, "PYDATETIME"}, + {"PYDATETIMETZARRAY", typecast_DATETIMETZARRAY_types, typecast_PYDATETIMETZARRAY_cast, "PYDATETIMETZ"}, + {"PYTIMEARRAY", typecast_TIMEARRAY_types, typecast_PYTIMEARRAY_cast, "PYTIME"}, + {"PYDATEARRAY", typecast_DATEARRAY_types, typecast_PYDATEARRAY_cast, "PYDATE"}, + {"PYINTERVALARRAY", typecast_INTERVALARRAY_types, typecast_PYINTERVALARRAY_cast, "PYINTERVAL"}, + {NULL, NULL, NULL} +}; + + +/** the type dictionary and associated functions **/ + +PyObject *psyco_types; +PyObject *psyco_default_cast; +PyObject *psyco_binary_types; +PyObject *psyco_default_binary_cast; + + +/* typecast_init - initialize the dictionary and create default types */ + +RAISES_NEG int +typecast_init(PyObject *module) +{ + int i; + int rv = -1; + typecastObject *t = NULL; + PyObject *dict = NULL; + + if (!(dict = PyModule_GetDict(module))) { goto exit; } + + /* create type dictionary and put it in module namespace */ + if (!(psyco_types = PyDict_New())) { goto exit; } + PyDict_SetItemString(dict, "string_types", psyco_types); + + if (!(psyco_binary_types = PyDict_New())) { goto exit; } + PyDict_SetItemString(dict, "binary_types", psyco_binary_types); + + /* insert the cast types into the 'types' dictionary and register them in + the module dictionary */ + for (i = 0; typecast_builtins[i].name != NULL; i++) { + t = (typecastObject *)typecast_from_c(&(typecast_builtins[i]), dict); + if (t == NULL) { goto exit; } + if (typecast_add((PyObject *)t, NULL, 0) < 0) { goto exit; } + + PyDict_SetItem(dict, t->name, (PyObject *)t); + + /* export binary object */ + if (typecast_builtins[i].values == typecast_BINARY_types) { + Py_INCREF((PyObject *)t); + psyco_default_binary_cast = (PyObject *)t; + } + Py_DECREF((PyObject *)t); + t = NULL; + } + + /* create and save a default cast object (but do not register it) */ + psyco_default_cast = typecast_from_c(&typecast_default, dict); + + /* register the date/time typecasters with their original names */ + if (0 > typecast_datetime_init()) { goto exit; } + for (i = 0; typecast_pydatetime[i].name != NULL; i++) { + t = (typecastObject *)typecast_from_c(&(typecast_pydatetime[i]), dict); + if (t == NULL) { goto exit; } + PyDict_SetItem(dict, t->name, (PyObject *)t); + Py_DECREF((PyObject *)t); + t = NULL; + } + + rv = 0; + +exit: + Py_XDECREF((PyObject *)t); + return rv; +} + +/* typecast_add - add a type object to the dictionary */ +RAISES_NEG int +typecast_add(PyObject *obj, PyObject *dict, int binary) +{ + PyObject *val; + Py_ssize_t len, i; + + typecastObject *type = (typecastObject *)obj; + + if (dict == NULL) + dict = (binary ? psyco_binary_types : psyco_types); + + len = PyTuple_Size(type->values); + for (i = 0; i < len; i++) { + val = PyTuple_GetItem(type->values, i); + PyDict_SetItem(dict, val, obj); + } + + return 0; +} + + +/** typecast type **/ + +#define OFFSETOF(x) offsetof(typecastObject, x) + +static int +typecast_cmp(PyObject *obj1, PyObject* obj2) +{ + typecastObject *self = (typecastObject*)obj1; + typecastObject *other = NULL; + PyObject *number = NULL; + Py_ssize_t i, j; + int res = -1; + + if (PyObject_TypeCheck(obj2, &typecastType)) { + other = (typecastObject*)obj2; + } + else { + number = PyNumber_Int(obj2); + } + + Dprintf("typecast_cmp: other = %p, number = %p", other, number); + + for (i=0; i < PyObject_Length(self->values) && res == -1; i++) { + long int val = PyInt_AsLong(PyTuple_GET_ITEM(self->values, i)); + + if (other != NULL) { + for (j=0; j < PyObject_Length(other->values); j++) { + if (PyInt_AsLong(PyTuple_GET_ITEM(other->values, j)) == val) { + res = 0; break; + } + } + } + + else if (number != NULL) { + if (PyInt_AsLong(number) == val) { + res = 0; break; + } + } + } + + Py_XDECREF(number); + return res; +} + +static PyObject* +typecast_richcompare(PyObject *obj1, PyObject* obj2, int opid) +{ + int res = typecast_cmp(obj1, obj2); + + if (PyErr_Occurred()) return NULL; + + return PyBool_FromLong((opid == Py_EQ && res == 0) || (opid != Py_EQ && res != 0)); +} + +static struct PyMemberDef typecastObject_members[] = { + {"name", T_OBJECT, OFFSETOF(name), READONLY}, + {"values", T_OBJECT, OFFSETOF(values), READONLY}, + {NULL} +}; + +static int +typecast_clear(typecastObject *self) +{ + Py_CLEAR(self->values); + Py_CLEAR(self->name); + Py_CLEAR(self->pcast); + Py_CLEAR(self->bcast); + return 0; +} + +static void +typecast_dealloc(typecastObject *self) +{ + PyObject_GC_UnTrack(self); + typecast_clear(self); + Py_TYPE(self)->tp_free((PyObject *)self); +} + +static int +typecast_traverse(typecastObject *self, visitproc visit, void *arg) +{ + Py_VISIT(self->values); + Py_VISIT(self->name); + Py_VISIT(self->pcast); + Py_VISIT(self->bcast); + return 0; +} + +static PyObject * +typecast_repr(PyObject *self) +{ + PyObject *name = ((typecastObject *)self)->name; + PyObject *rv; + + Py_INCREF(name); + if (!(name = psyco_ensure_bytes(name))) { + return NULL; + } + + rv = PyString_FromFormat("<%s '%s' at %p>", + Py_TYPE(self)->tp_name, Bytes_AS_STRING(name), self); + + Py_DECREF(name); + return rv; +} + +static PyObject * +typecast_call(PyObject *obj, PyObject *args, PyObject *kwargs) +{ + const char *string; + Py_ssize_t length; + PyObject *cursor; + + if (!PyArg_ParseTuple(args, "z#O", &string, &length, &cursor)) { + return NULL; + } + + // If the string is not a string but a None value we're being called + // from a Python-defined caster. + if (!string) { + Py_RETURN_NONE; + } + + return typecast_cast(obj, string, length, cursor); +} + +PyTypeObject typecastType = { + PyVarObject_HEAD_INIT(NULL, 0) + "psycopg2._psycopg.type", + sizeof(typecastObject), 0, + (destructor)typecast_dealloc, /*tp_dealloc*/ + 0, /*tp_print*/ + 0, /*tp_getattr*/ + 0, /*tp_setattr*/ + 0, /*tp_reserved*/ + typecast_repr, /*tp_repr*/ + 0, /*tp_as_number*/ + 0, /*tp_as_sequence*/ + 0, /*tp_as_mapping*/ + 0, /*tp_hash */ + typecast_call, /*tp_call*/ + 0, /*tp_str*/ + 0, /*tp_getattro*/ + 0, /*tp_setattro*/ + 0, /*tp_as_buffer*/ + Py_TPFLAGS_DEFAULT | Py_TPFLAGS_HAVE_RICHCOMPARE | + Py_TPFLAGS_HAVE_GC, /*tp_flags*/ + "psycopg type-casting object", /*tp_doc*/ + (traverseproc)typecast_traverse, /*tp_traverse*/ + (inquiry)typecast_clear, /*tp_clear*/ + typecast_richcompare, /*tp_richcompare*/ + 0, /*tp_weaklistoffset*/ + 0, /*tp_iter*/ + 0, /*tp_iternext*/ + 0, /*tp_methods*/ + typecastObject_members, /*tp_members*/ + 0, /*tp_getset*/ + 0, /*tp_base*/ + 0, /*tp_dict*/ + 0, /*tp_descr_get*/ + 0, /*tp_descr_set*/ + 0, /*tp_dictoffset*/ + 0, /*tp_init*/ + 0, /*tp_alloc*/ + 0, /*tp_new*/ +}; + +static PyObject * +typecast_new(PyObject *name, PyObject *values, PyObject *cast, PyObject *base) +{ + typecastObject *obj; + + obj = PyObject_GC_New(typecastObject, &typecastType); + if (obj == NULL) return NULL; + + Py_INCREF(values); + obj->values = values; + + if (name) { + Py_INCREF(name); + obj->name = name; + } + else { + Py_INCREF(Py_None); + obj->name = Py_None; + } + + obj->pcast = NULL; + obj->ccast = NULL; + obj->bcast = base; + + if (obj->bcast) Py_INCREF(obj->bcast); + + /* FIXME: raise an exception when None is passed as Python caster */ + if (cast && cast != Py_None) { + Py_INCREF(cast); + obj->pcast = cast; + } + + PyObject_GC_Track(obj); + + return (PyObject *)obj; +} + +PyObject * +typecast_from_python(PyObject *self, PyObject *args, PyObject *keywds) +{ + PyObject *v, *name = NULL, *cast = NULL, *base = NULL; + + static char *kwlist[] = {"values", "name", "castobj", "baseobj", NULL}; + + if (!PyArg_ParseTupleAndKeywords(args, keywds, "O!|O!OO", kwlist, + &PyTuple_Type, &v, + &Text_Type, &name, + &cast, &base)) { + return NULL; + } + + return typecast_new(name, v, cast, base); +} + +PyObject * +typecast_array_from_python(PyObject *self, PyObject *args, PyObject *keywds) +{ + PyObject *values, *name = NULL, *base = NULL; + typecastObject *obj = NULL; + + static char *kwlist[] = {"values", "name", "baseobj", NULL}; + + if (!PyArg_ParseTupleAndKeywords(args, keywds, "O!O!O!", kwlist, + &PyTuple_Type, &values, + &Text_Type, &name, + &typecastType, &base)) { + return NULL; + } + + if ((obj = (typecastObject *)typecast_new(name, values, NULL, base))) { + obj->ccast = typecast_GENERIC_ARRAY_cast; + obj->pcast = NULL; + } + + return (PyObject *)obj; +} + +PyObject * +typecast_from_c(typecastObject_initlist *type, PyObject *dict) +{ + PyObject *name = NULL, *values = NULL, *base = NULL; + typecastObject *obj = NULL; + Py_ssize_t i, len = 0; + + /* before doing anything else we look for the base */ + if (type->base) { + /* NOTE: base is a borrowed reference! */ + base = PyDict_GetItemString(dict, type->base); + if (!base) { + PyErr_Format(Error, "typecast base not found: %s", type->base); + goto end; + } + } + + name = Text_FromUTF8(type->name); + if (!name) goto end; + + while (type->values[len] != 0) len++; + + values = PyTuple_New(len); + if (!values) goto end; + + for (i = 0; i < len ; i++) { + PyTuple_SET_ITEM(values, i, PyInt_FromLong(type->values[i])); + } + + obj = (typecastObject *)typecast_new(name, values, NULL, base); + + if (obj) { + obj->ccast = type->cast; + obj->pcast = NULL; + } + + end: + Py_XDECREF(values); + Py_XDECREF(name); + return (PyObject *)obj; +} + +PyObject * +typecast_cast(PyObject *obj, const char *str, Py_ssize_t len, PyObject *curs) +{ + PyObject *old, *res = NULL; + typecastObject *self = (typecastObject *)obj; + + Py_INCREF(obj); + old = ((cursorObject*)curs)->caster; + ((cursorObject*)curs)->caster = obj; + + if (self->ccast) { + res = self->ccast(str, len, curs); + } + else if (self->pcast) { + PyObject *s; + /* XXX we have bytes in the adapters and strings in the typecasters. + * are you sure this is ok? + * Notice that this way it is about impossible to create a python + * typecaster on a binary type. */ + if (str) { + s = conn_decode(((cursorObject *)curs)->conn, str, len); + } + else { + Py_INCREF(Py_None); + s = Py_None; + } + if (s) { + res = PyObject_CallFunctionObjArgs(self->pcast, s, curs, NULL); + Py_DECREF(s); + } + } + else { + PyErr_SetString(Error, "internal error: no casting function found"); + } + + ((cursorObject*)curs)->caster = old; + Py_DECREF(obj); + + return res; +} diff --git a/psycopg/typecast.h b/psycopg/typecast.h new file mode 100644 index 0000000000000000000000000000000000000000..050345f4384dff5d43946d244e5736874602103f --- /dev/null +++ b/psycopg/typecast.h @@ -0,0 +1,91 @@ +/* typecast.h - definitions for typecasters + * + * Copyright (C) 2003-2019 Federico Di Gregorio + * Copyright (C) 2020-2021 The Psycopg Team + * + * This file is part of psycopg. + * + * psycopg2 is free software: you can redistribute it and/or modify it + * under the terms of the GNU Lesser General Public License as published + * by the Free Software Foundation, either version 3 of the License, or + * (at your option) any later version. + * + * In addition, as a special exception, the copyright holders give + * permission to link this program with the OpenSSL library (or with + * modified versions of OpenSSL that use the same license as OpenSSL), + * and distribute linked combinations including the two. + * + * You must obey the GNU Lesser General Public License in all respects for + * all of the code used other than OpenSSL. + * + * psycopg2 is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public + * License for more details. + */ + +#ifndef PSYCOPG_TYPECAST_H +#define PSYCOPG_TYPECAST_H 1 + +#ifdef __cplusplus +extern "C" { +#endif + +/* type of type-casting functions (both C and Python) */ +typedef PyObject *(*typecast_function)(const char *str, Py_ssize_t len, + PyObject *cursor); + +/** typecast type **/ + +extern HIDDEN PyTypeObject typecastType; + +typedef struct { + PyObject_HEAD + + PyObject *name; /* the name of this type */ + PyObject *values; /* the different types this instance can match */ + + typecast_function ccast; /* the C casting function */ + PyObject *pcast; /* the python casting function */ + PyObject *bcast; /* base cast, used by array typecasters */ +} typecastObject; + +/* the initialization values are stored here */ + +typedef struct { + char *name; + long int *values; + typecast_function cast; + + /* base is the base typecaster for arrays */ + char *base; +} typecastObject_initlist; + +/* the type dictionary, much faster to access it globally */ +extern HIDDEN PyObject *psyco_types; +extern HIDDEN PyObject *psyco_binary_types; + +/* the default casting objects, used when no other objects are available */ +extern HIDDEN PyObject *psyco_default_cast; +extern HIDDEN PyObject *psyco_default_binary_cast; + +/** exported functions **/ + +/* used by module.c to init the type system and register types */ +RAISES_NEG HIDDEN int typecast_init(PyObject *dict); +RAISES_NEG HIDDEN int typecast_add(PyObject *obj, PyObject *dict, int binary); + +/* the C callable typecastObject creator function */ +HIDDEN PyObject *typecast_from_c(typecastObject_initlist *type, PyObject *d); + +/* the python callable typecast creator functions */ +HIDDEN PyObject *typecast_from_python( + PyObject *self, PyObject *args, PyObject *keywds); +HIDDEN PyObject *typecast_array_from_python( + PyObject *self, PyObject *args, PyObject *keywds); + +/* the function used to dispatch typecasting calls */ +HIDDEN PyObject *typecast_cast( + PyObject *self, const char *str, Py_ssize_t len, PyObject *curs); + +#endif /* !defined(PSYCOPG_TYPECAST_H) */ diff --git a/psycopg/typecast_array.c b/psycopg/typecast_array.c new file mode 100644 index 0000000000000000000000000000000000000000..7eac99d9dc553e4a7e4a5b2dc74ee2ef90ca3681 --- /dev/null +++ b/psycopg/typecast_array.c @@ -0,0 +1,298 @@ +/* typecast_array.c - array typecasters + * + * Copyright (C) 2005-2019 Federico Di Gregorio + * Copyright (C) 2020-2021 The Psycopg Team + * + * This file is part of psycopg. + * + * psycopg2 is free software: you can redistribute it and/or modify it + * under the terms of the GNU Lesser General Public License as published + * by the Free Software Foundation, either version 3 of the License, or + * (at your option) any later version. + * + * In addition, as a special exception, the copyright holders give + * permission to link this program with the OpenSSL library (or with + * modified versions of OpenSSL that use the same license as OpenSSL), + * and distribute linked combinations including the two. + * + * You must obey the GNU Lesser General Public License in all respects for + * all of the code used other than OpenSSL. + * + * psycopg2 is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public + * License for more details. + */ + +#define MAX_DIMENSIONS 16 + +/** typecast_array_cleanup - remove the horrible [...]= stuff **/ + +static int +typecast_array_cleanup(const char **str, Py_ssize_t *len) +{ + Py_ssize_t i, depth = 1; + + if ((*str)[0] != '[') return -1; + + for (i=1 ; depth > 0 && i < *len ; i++) { + if ((*str)[i] == '[') + depth += 1; + else if ((*str)[i] == ']') + depth -= 1; + } + if ((*str)[i] != '=') return -1; + + *str = &((*str)[i+1]); + *len = *len - i - 1; + return 0; +} + +/** typecast_array_scan - scan a string looking for array items **/ + +#define ASCAN_ERROR -1 +#define ASCAN_EOF 0 +#define ASCAN_BEGIN 1 +#define ASCAN_END 2 +#define ASCAN_TOKEN 3 +#define ASCAN_QUOTED 4 + +static int +typecast_array_tokenize(const char *str, Py_ssize_t strlength, + Py_ssize_t *pos, char** token, + Py_ssize_t *length, int *quotes) +{ + /* FORTRAN glory */ + Py_ssize_t i, l; + int q, b, res; + + Dprintf("typecast_array_tokenize: '%s', " + FORMAT_CODE_PY_SSIZE_T "/" FORMAT_CODE_PY_SSIZE_T, + &str[*pos], *pos, strlength); + + /* we always get called with pos pointing at the start of a token, so a + fast check is enough for ASCAN_EOF, ASCAN_BEGIN and ASCAN_END */ + if (*pos == strlength) { + return ASCAN_EOF; + } + else if (str[*pos] == '{') { + *pos += 1; + return ASCAN_BEGIN; + } + else if (str[*pos] == '}') { + *pos += 1; + if (str[*pos] == ',') + *pos += 1; + return ASCAN_END; + } + + /* now we start looking for the first unquoted ',' or '}', the only two + tokens that can limit an array element */ + q = 0; /* if q is odd we're inside quotes */ + b = 0; /* if b is 1 we just encountered a backslash */ + res = ASCAN_TOKEN; + + for (i = *pos ; i < strlength ; i++) { + switch (str[i]) { + case '"': + if (b == 0) + q += 1; + else + b = 0; + break; + + case '\\': + res = ASCAN_QUOTED; + if (b == 0) + b = 1; + else + /* we're backslashing a backslash */ + b = 0; + break; + + case '}': + case ',': + if (b == 0 && ((q&1) == 0)) + goto tokenize; + break; + + default: + /* reset the backslash counter */ + b = 0; + break; + } + } + + tokenize: + /* remove initial quoting character and calculate raw length */ + *quotes = 0; + l = i - *pos; + if (str[*pos] == '"') { + *pos += 1; + l -= 2; + *quotes = 1; + } + + if (res == ASCAN_QUOTED) { + const char *j, *jj; + char *buffer = PyMem_Malloc(l+1); + if (buffer == NULL) { + PyErr_NoMemory(); + return ASCAN_ERROR; + } + + *token = buffer; + + for (j = str + *pos, jj = j + l; j < jj; ++j) { + if (*j == '\\') { ++j; } + *(buffer++) = *j; + } + + *buffer = '\0'; + /* The variable that was used to indicate the size of buffer is of type + * Py_ssize_t, so a subsegment of buffer couldn't possibly exceed + * PY_SSIZE_T_MAX: */ + *length = (Py_ssize_t) (buffer - *token); + } + else { + *token = (char *)&str[*pos]; + *length = l; + } + + *pos = i; + + /* skip the comma and set position to the start of next token */ + if (str[i] == ',') *pos += 1; + + return res; +} + +RAISES_NEG static int +typecast_array_scan(const char *str, Py_ssize_t strlength, + PyObject *curs, PyObject *base, PyObject *array) +{ + int state, quotes = 0; + Py_ssize_t length = 0, pos = 0; + char *token; + + PyObject *stack[MAX_DIMENSIONS]; + size_t stack_index = 0; + + while (1) { + token = NULL; + state = typecast_array_tokenize(str, strlength, + &pos, &token, &length, "es); + Dprintf("typecast_array_scan: state = %d," + " length = " FORMAT_CODE_PY_SSIZE_T ", token = '%s'", + state, length, token); + if (state == ASCAN_TOKEN || state == ASCAN_QUOTED) { + PyObject *obj; + if (!quotes && length == 4 + && (token[0] == 'n' || token[0] == 'N') + && (token[1] == 'u' || token[1] == 'U') + && (token[2] == 'l' || token[2] == 'L') + && (token[3] == 'l' || token[3] == 'L')) + { + obj = typecast_cast(base, NULL, 0, curs); + } else { + obj = typecast_cast(base, token, length, curs); + } + + /* before anything else we free the memory */ + if (state == ASCAN_QUOTED) PyMem_Free(token); + if (obj == NULL) return -1; + + PyList_Append(array, obj); + Py_DECREF(obj); + } + + else if (state == ASCAN_BEGIN) { + PyObject *sub = PyList_New(0); + if (sub == NULL) return -1; + + PyList_Append(array, sub); + Py_DECREF(sub); + + if (stack_index == MAX_DIMENSIONS) { + PyErr_SetString(DataError, "excessive array dimensions"); + return -1; + } + + stack[stack_index++] = array; + array = sub; + } + + else if (state == ASCAN_ERROR) { + return -1; + } + + else if (state == ASCAN_END) { + if (stack_index == 0) { + PyErr_SetString(DataError, "unbalanced braces in array"); + return -1; + } + array = stack[--stack_index]; + } + + else if (state == ASCAN_EOF) + break; + } + + return 0; +} + + +/** GENERIC - a generic typecaster that can be used when no special actions + have to be taken on the single items **/ + +static PyObject * +typecast_GENERIC_ARRAY_cast(const char *str, Py_ssize_t len, PyObject *curs) +{ + PyObject *obj = NULL; + PyObject *base = ((typecastObject*)((cursorObject*)curs)->caster)->bcast; + + Dprintf("typecast_GENERIC_ARRAY_cast: str = '%s'," + " len = " FORMAT_CODE_PY_SSIZE_T, str, len); + + if (str == NULL) { Py_RETURN_NONE; } + if (str[0] == '[') + typecast_array_cleanup(&str, &len); + if (str[0] != '{') { + PyErr_SetString(DataError, "array does not start with '{'"); + return NULL; + } + if (str[1] == '\0') { + PyErr_SetString(DataError, "malformed array: '{'"); + return NULL; + } + + Dprintf("typecast_GENERIC_ARRAY_cast: str = '%s'," + " len = " FORMAT_CODE_PY_SSIZE_T, str, len); + + if (!(obj = PyList_New(0))) { return NULL; } + + /* scan the array skipping the first level of {} */ + if (typecast_array_scan(&str[1], len-2, curs, base, obj) < 0) { + Py_CLEAR(obj); + } + + return obj; +} + +/** almost all the basic array typecasters are derived from GENERIC **/ + +#define typecast_LONGINTEGERARRAY_cast typecast_GENERIC_ARRAY_cast +#define typecast_INTEGERARRAY_cast typecast_GENERIC_ARRAY_cast +#define typecast_FLOATARRAY_cast typecast_GENERIC_ARRAY_cast +#define typecast_DECIMALARRAY_cast typecast_GENERIC_ARRAY_cast +#define typecast_STRINGARRAY_cast typecast_GENERIC_ARRAY_cast +#define typecast_UNICODEARRAY_cast typecast_GENERIC_ARRAY_cast +#define typecast_BYTESARRAY_cast typecast_GENERIC_ARRAY_cast +#define typecast_BOOLEANARRAY_cast typecast_GENERIC_ARRAY_cast +#define typecast_DATETIMEARRAY_cast typecast_GENERIC_ARRAY_cast +#define typecast_DATETIMETZARRAY_cast typecast_GENERIC_ARRAY_cast +#define typecast_DATEARRAY_cast typecast_GENERIC_ARRAY_cast +#define typecast_TIMEARRAY_cast typecast_GENERIC_ARRAY_cast +#define typecast_INTERVALARRAY_cast typecast_GENERIC_ARRAY_cast +#define typecast_BINARYARRAY_cast typecast_GENERIC_ARRAY_cast +#define typecast_ROWIDARRAY_cast typecast_GENERIC_ARRAY_cast diff --git a/psycopg/typecast_basic.c b/psycopg/typecast_basic.c new file mode 100644 index 0000000000000000000000000000000000000000..f73f60bca92282fa56c22d65dfe5f9d2e7a2684c --- /dev/null +++ b/psycopg/typecast_basic.c @@ -0,0 +1,150 @@ +/* pgcasts_basic.c - basic typecasting functions to python types + * + * Copyright (C) 2001-2019 Federico Di Gregorio + * Copyright (C) 2020-2021 The Psycopg Team + * + * This file is part of psycopg. + * + * psycopg2 is free software: you can redistribute it and/or modify it + * under the terms of the GNU Lesser General Public License as published + * by the Free Software Foundation, either version 3 of the License, or + * (at your option) any later version. + * + * In addition, as a special exception, the copyright holders give + * permission to link this program with the OpenSSL library (or with + * modified versions of OpenSSL that use the same license as OpenSSL), + * and distribute linked combinations including the two. + * + * You must obey the GNU Lesser General Public License in all respects for + * all of the code used other than OpenSSL. + * + * psycopg2 is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public + * License for more details. + */ + +/** INTEGER - cast normal integers (4 bytes) to python int **/ + +#define typecast_INTEGER_cast typecast_LONGINTEGER_cast + +/** LONGINTEGER - cast long integers (8 bytes) to python long **/ + +static PyObject * +typecast_LONGINTEGER_cast(const char *s, Py_ssize_t len, PyObject *curs) +{ + char buffer[24]; + + if (s == NULL) { Py_RETURN_NONE; } + if (s[len] != '\0') { + strncpy(buffer, s, (size_t) len); buffer[len] = '\0'; + s = buffer; + } + return PyLong_FromString((char *)s, NULL, 0); +} + +/** FLOAT - cast floating point numbers to python float **/ + +static PyObject * +typecast_FLOAT_cast(const char *s, Py_ssize_t len, PyObject *curs) +{ + PyObject *str = NULL, *flo = NULL; + + if (s == NULL) { Py_RETURN_NONE; } + if (!(str = Text_FromUTF8AndSize(s, len))) { return NULL; } + flo = PyFloat_FromString(str); + Py_DECREF(str); + return flo; +} + + +/** BYTES - cast strings of any type to python bytes **/ + +static PyObject * +typecast_BYTES_cast(const char *s, Py_ssize_t len, PyObject *curs) +{ + if (s == NULL) { Py_RETURN_NONE; } + return Bytes_FromStringAndSize(s, len); +} + + +/** UNICODE - cast strings of any type to a python unicode object **/ + +static PyObject * +typecast_UNICODE_cast(const char *s, Py_ssize_t len, PyObject *curs) +{ + connectionObject *conn; + + if (s == NULL) { Py_RETURN_NONE; } + + conn = ((cursorObject*)curs)->conn; + return conn_decode(conn, s, len); +} + + +/** STRING - cast strings of any type to python string **/ + +#define typecast_STRING_cast typecast_UNICODE_cast + + +/** BOOLEAN - cast boolean value into right python object **/ + +static PyObject * +typecast_BOOLEAN_cast(const char *s, Py_ssize_t len, PyObject *curs) +{ + PyObject *res = NULL; + + if (s == NULL) { Py_RETURN_NONE; } + + switch (s[0]) { + case 't': + case 'T': + res = Py_True; + break; + + case 'f': + case 'F': + res = Py_False; + break; + + default: + PyErr_Format(InterfaceError, "can't parse boolean: '%s'", s); + break; + } + + Py_XINCREF(res); + return res; +} + +/** DECIMAL - cast any kind of number into a Python Decimal object **/ + +static PyObject * +typecast_DECIMAL_cast(const char *s, Py_ssize_t len, PyObject *curs) +{ + PyObject *res = NULL; + PyObject *decimalType; + char *buffer; + + if (s == NULL) { Py_RETURN_NONE; } + + if ((buffer = PyMem_Malloc(len+1)) == NULL) + return PyErr_NoMemory(); + strncpy(buffer, s, (size_t) len); buffer[len] = '\0'; + decimalType = psyco_get_decimal_type(); + /* Fall back on float if decimal is not available */ + if (decimalType != NULL) { + res = PyObject_CallFunction(decimalType, "s", buffer); + Py_DECREF(decimalType); + } + else { + PyErr_Clear(); + res = PyObject_CallFunction((PyObject*)&PyFloat_Type, "s", buffer); + } + PyMem_Free(buffer); + + return res; +} + +/* some needed aliases */ +#define typecast_NUMBER_cast typecast_FLOAT_cast +#define typecast_ROWID_cast typecast_INTEGER_cast diff --git a/psycopg/typecast_binary.c b/psycopg/typecast_binary.c new file mode 100644 index 0000000000000000000000000000000000000000..e255581cc74cc057bae7a1bc348027244479b9a8 --- /dev/null +++ b/psycopg/typecast_binary.c @@ -0,0 +1,275 @@ +/* typecast_binary.c - binary typecasting functions to python types + * + * Copyright (C) 2001-2019 Federico Di Gregorio + * Copyright (C) 2020-2021 The Psycopg Team + * + * This file is part of psycopg. + * + * psycopg2 is free software: you can redistribute it and/or modify it + * under the terms of the GNU Lesser General Public License as published + * by the Free Software Foundation, either version 3 of the License, or + * (at your option) any later version. + * + * In addition, as a special exception, the copyright holders give + * permission to link this program with the OpenSSL library (or with + * modified versions of OpenSSL that use the same license as OpenSSL), + * and distribute linked combinations including the two. + * + * You must obey the GNU Lesser General Public License in all respects for + * all of the code used other than OpenSSL. + * + * psycopg2 is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public + * License for more details. + */ + +#include "typecast_binary.h" + +#include + + +/* Python object holding a memory chunk. The memory is deallocated when + the object is destroyed. This type is used to let users directly access + memory chunks holding unescaped binary data through the buffer interface. + */ + +static void +chunk_dealloc(chunkObject *self) +{ + Dprintf("chunk_dealloc: deallocating memory at %p, size " + FORMAT_CODE_PY_SSIZE_T, + self->base, self->len + ); + PyMem_Free(self->base); + Py_TYPE(self)->tp_free((PyObject *)self); +} + +static PyObject * +chunk_repr(chunkObject *self) +{ + return PyString_FromFormat( + "", + self->base, self->len + ); +} + +/* 3.0 buffer interface */ +int chunk_getbuffer(PyObject *_self, Py_buffer *view, int flags) +{ + int rv; + chunkObject *self = (chunkObject*)_self; + rv = PyBuffer_FillInfo(view, _self, self->base, self->len, 1, flags); + if (rv == 0) { + view->format = "c"; + } + return rv; +} + +static PyBufferProcs chunk_as_buffer = +{ + chunk_getbuffer, + NULL, +}; + +#define chunk_doc "memory chunk" + +PyTypeObject chunkType = { + PyVarObject_HEAD_INIT(NULL, 0) + "psycopg2._psycopg.chunk", + sizeof(chunkObject), 0, + (destructor) chunk_dealloc, /* tp_dealloc*/ + 0, /* tp_print */ + 0, /* tp_getattr */ + 0, /* tp_setattr */ + 0, /* tp_compare */ + (reprfunc) chunk_repr, /* tp_repr */ + 0, /* tp_as_number */ + 0, /* tp_as_sequence */ + 0, /* tp_as_mapping */ + 0, /* tp_hash */ + 0, /* tp_call */ + 0, /* tp_str */ + 0, /* tp_getattro */ + 0, /* tp_setattro */ + &chunk_as_buffer, /* tp_as_buffer */ + Py_TPFLAGS_DEFAULT|Py_TPFLAGS_BASETYPE, /* tp_flags */ + chunk_doc /* tp_doc */ +}; + + +static char *parse_hex( + const char *bufin, Py_ssize_t sizein, Py_ssize_t *sizeout); +static char *parse_escape( + const char *bufin, Py_ssize_t sizein, Py_ssize_t *sizeout); + +/* The function is not static and not hidden as we use ctypes to test it. */ +PyObject * +typecast_BINARY_cast(const char *s, Py_ssize_t l, PyObject *curs) +{ + chunkObject *chunk = NULL; + PyObject *res = NULL; + char *buffer = NULL; + Py_ssize_t len; + + if (s == NULL) { Py_RETURN_NONE; } + + if (s[0] == '\\' && s[1] == 'x') { + /* This is a buffer escaped in hex format: libpq before 9.0 can't + * parse it and we can't detect reliably the libpq version at runtime. + * So the only robust option is to parse it ourselves - luckily it's + * an easy format. + */ + if (NULL == (buffer = parse_hex(s, l, &len))) { + goto exit; + } + } + else { + /* This is a buffer in the classic bytea format. So we can handle it + * to the PQunescapeBytea to have it parsed, right? ...Wrong. We + * could, but then we'd have to record whether buffer was allocated by + * Python or by the libpq to dispose it properly. Furthermore the + * PQunescapeBytea interface is not the most brilliant as it wants a + * null-terminated string even if we have known its length thus + * requiring a useless memcpy and strlen. + * So we'll just have our better integrated parser, let's finish this + * story. + */ + if (NULL == (buffer = parse_escape(s, l, &len))) { + goto exit; + } + } + + chunk = (chunkObject *) PyObject_New(chunkObject, &chunkType); + if (chunk == NULL) goto exit; + + /* **Transfer** ownership of buffer's memory to the chunkObject: */ + chunk->base = buffer; + buffer = NULL; + chunk->len = (Py_ssize_t)len; + + if ((res = PyMemoryView_FromObject((PyObject*)chunk)) == NULL) + goto exit; + +exit: + Py_XDECREF((PyObject *)chunk); + PyMem_Free(buffer); + + return res; +} + + +static const char hex_lut[128] = { + -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, + -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, + -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, + 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, -1, -1, -1, -1, -1, -1, + -1, 10, 11, 12, 13, 14, 15, -1, -1, -1, -1, -1, -1, -1, -1, -1, + -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, + -1, 10, 11, 12, 13, 14, 15, -1, -1, -1, -1, -1, -1, -1, -1, -1, + -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, +}; + +/* Parse a bytea output buffer encoded in 'hex' format. + * + * the format is described in + * https://www.postgresql.org/docs/current/static/datatype-binary.html + * + * Parse the buffer in 'bufin', whose length is 'sizein'. + * Return a new buffer allocated by PyMem_Malloc and set 'sizeout' to its size. + * In case of error set an exception and return NULL. + */ +static char * +parse_hex(const char *bufin, Py_ssize_t sizein, Py_ssize_t *sizeout) +{ + char *ret = NULL; + const char *bufend = bufin + sizein; + const char *pi = bufin + 2; /* past the \x */ + char *bufout; + char *po; + + po = bufout = PyMem_Malloc((sizein - 2) >> 1); /* output size upper bound */ + if (NULL == bufout) { + PyErr_NoMemory(); + goto exit; + } + + /* Implementation note: we call this function upon database response, not + * user input (because we are parsing the output format of a buffer) so we + * don't expect errors. On bad input we reserve the right to return a bad + * output, not an error. + */ + while (pi < bufend) { + char c; + while (-1 == (c = hex_lut[*pi++ & '\x7f'])) { + if (pi >= bufend) { goto endloop; } + } + *po = c << 4; + + while (-1 == (c = hex_lut[*pi++ & '\x7f'])) { + if (pi >= bufend) { goto endloop; } + } + *po++ |= c; + } +endloop: + + ret = bufout; + *sizeout = po - bufout; + +exit: + return ret; +} + +/* Parse a bytea output buffer encoded in 'escape' format. + * + * the format is described in + * https://www.postgresql.org/docs/current/static/datatype-binary.html + * + * Parse the buffer in 'bufin', whose length is 'sizein'. + * Return a new buffer allocated by PyMem_Malloc and set 'sizeout' to its size. + * In case of error set an exception and return NULL. + */ +static char * +parse_escape(const char *bufin, Py_ssize_t sizein, Py_ssize_t *sizeout) +{ + char *ret = NULL; + const char *bufend = bufin + sizein; + const char *pi = bufin; + char *bufout; + char *po; + + po = bufout = PyMem_Malloc(sizein); /* output size upper bound */ + if (NULL == bufout) { + PyErr_NoMemory(); + goto exit; + } + + while (pi < bufend) { + if (*pi != '\\') { + /* Unescaped char */ + *po++ = *pi++; + continue; + } + if ((pi[1] >= '0' && pi[1] <= '3') && + (pi[2] >= '0' && pi[2] <= '7') && + (pi[3] >= '0' && pi[3] <= '7')) + { + /* Escaped octal value */ + *po++ = ((pi[1] - '0') << 6) | + ((pi[2] - '0') << 3) | + ((pi[3] - '0')); + pi += 4; + } + else { + /* Escaped char */ + *po++ = pi[1]; + pi += 2; + } + } + + ret = bufout; + *sizeout = po - bufout; + +exit: + return ret; +} diff --git a/psycopg/typecast_binary.h b/psycopg/typecast_binary.h new file mode 100644 index 0000000000000000000000000000000000000000..e6773ed875b54f90bab13b9e20fd39dfdab9d057 --- /dev/null +++ b/psycopg/typecast_binary.h @@ -0,0 +1,50 @@ +/* typecast_binary.h - definitions for binary typecaster + * + * Copyright (C) 2003-2019 Federico Di Gregorio + * Copyright (C) 2020-2021 The Psycopg Team + * + * This file is part of psycopg. + * + * psycopg2 is free software: you can redistribute it and/or modify it + * under the terms of the GNU Lesser General Public License as published + * by the Free Software Foundation, either version 3 of the License, or + * (at your option) any later version. + * + * In addition, as a special exception, the copyright holders give + * permission to link this program with the OpenSSL library (or with + * modified versions of OpenSSL that use the same license as OpenSSL), + * and distribute linked combinations including the two. + * + * You must obey the GNU Lesser General Public License in all respects for + * all of the code used other than OpenSSL. + * + * psycopg2 is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public + * License for more details. + */ + +#ifndef PSYCOPG_TYPECAST_BINARY_H +#define PSYCOPG_TYPECAST_BINARY_H 1 + +#ifdef __cplusplus +extern "C" { +#endif + +/** chunk type **/ + +extern HIDDEN PyTypeObject chunkType; + +typedef struct { + PyObject_HEAD + + void *base; /* Pointer to the memory chunk. */ + Py_ssize_t len; /* Size in bytes of the memory chunk. */ + +} chunkObject; + +#ifdef __cplusplus +} +#endif + +#endif /* !defined(PSYCOPG_TYPECAST_BINARY_H) */ diff --git a/psycopg/typecast_builtins.c b/psycopg/typecast_builtins.c new file mode 100644 index 0000000000000000000000000000000000000000..0e4901d106b9932c57a5b4f260f71fdd8e90fac4 --- /dev/null +++ b/psycopg/typecast_builtins.c @@ -0,0 +1,71 @@ +static long int typecast_NUMBER_types[] = {20, 23, 21, 701, 700, 1700, 0}; +static long int typecast_LONGINTEGER_types[] = {20, 0}; +static long int typecast_INTEGER_types[] = {23, 21, 0}; +static long int typecast_FLOAT_types[] = {701, 700, 0}; +static long int typecast_DECIMAL_types[] = {1700, 0}; +static long int typecast_STRING_types[] = {19, 18, 25, 1042, 1043, 0}; +static long int typecast_BOOLEAN_types[] = {16, 0}; +static long int typecast_DATETIME_types[] = {1114, 0}; +static long int typecast_DATETIMETZ_types[] = {1184, 0}; +static long int typecast_TIME_types[] = {1083, 1266, 0}; +static long int typecast_DATE_types[] = {1082, 0}; +static long int typecast_INTERVAL_types[] = {704, 1186, 0}; +static long int typecast_BINARY_types[] = {17, 0}; +static long int typecast_ROWID_types[] = {26, 0}; +static long int typecast_LONGINTEGERARRAY_types[] = {1016, 0}; +static long int typecast_INTEGERARRAY_types[] = {1005, 1006, 1007, 0}; +static long int typecast_FLOATARRAY_types[] = {1021, 1022, 0}; +static long int typecast_DECIMALARRAY_types[] = {1231, 0}; +static long int typecast_STRINGARRAY_types[] = {1002, 1003, 1009, 1014, 1015, 0}; +static long int typecast_BOOLEANARRAY_types[] = {1000, 0}; +static long int typecast_DATETIMEARRAY_types[] = {1115, 0}; +static long int typecast_DATETIMETZARRAY_types[] = {1185, 0}; +static long int typecast_TIMEARRAY_types[] = {1183, 1270, 0}; +static long int typecast_DATEARRAY_types[] = {1182, 0}; +static long int typecast_INTERVALARRAY_types[] = {1187, 0}; +static long int typecast_BINARYARRAY_types[] = {1001, 0}; +static long int typecast_ROWIDARRAY_types[] = {1028, 1013, 0}; +static long int typecast_INETARRAY_types[] = {1041, 0}; +static long int typecast_CIDRARRAY_types[] = {651, 0}; +static long int typecast_MACADDRARRAY_types[] = {1040, 0}; +static long int typecast_UNKNOWN_types[] = {705, 0}; + + +static typecastObject_initlist typecast_builtins[] = { + {"NUMBER", typecast_NUMBER_types, typecast_NUMBER_cast, NULL}, + {"LONGINTEGER", typecast_LONGINTEGER_types, typecast_LONGINTEGER_cast, NULL}, + {"INTEGER", typecast_INTEGER_types, typecast_INTEGER_cast, NULL}, + {"FLOAT", typecast_FLOAT_types, typecast_FLOAT_cast, NULL}, + {"DECIMAL", typecast_DECIMAL_types, typecast_DECIMAL_cast, NULL}, + {"UNICODE", typecast_STRING_types, typecast_UNICODE_cast, NULL}, + {"BYTES", typecast_STRING_types, typecast_BYTES_cast, NULL}, + {"STRING", typecast_STRING_types, typecast_STRING_cast, NULL}, + {"BOOLEAN", typecast_BOOLEAN_types, typecast_BOOLEAN_cast, NULL}, + {"DATETIME", typecast_DATETIME_types, typecast_DATETIME_cast, NULL}, + {"DATETIMETZ", typecast_DATETIMETZ_types, typecast_DATETIMETZ_cast, NULL}, + {"TIME", typecast_TIME_types, typecast_TIME_cast, NULL}, + {"DATE", typecast_DATE_types, typecast_DATE_cast, NULL}, + {"INTERVAL", typecast_INTERVAL_types, typecast_INTERVAL_cast, NULL}, + {"BINARY", typecast_BINARY_types, typecast_BINARY_cast, NULL}, + {"ROWID", typecast_ROWID_types, typecast_ROWID_cast, NULL}, + {"LONGINTEGERARRAY", typecast_LONGINTEGERARRAY_types, typecast_LONGINTEGERARRAY_cast, "LONGINTEGER"}, + {"INTEGERARRAY", typecast_INTEGERARRAY_types, typecast_INTEGERARRAY_cast, "INTEGER"}, + {"FLOATARRAY", typecast_FLOATARRAY_types, typecast_FLOATARRAY_cast, "FLOAT"}, + {"DECIMALARRAY", typecast_DECIMALARRAY_types, typecast_DECIMALARRAY_cast, "DECIMAL"}, + {"UNICODEARRAY", typecast_STRINGARRAY_types, typecast_UNICODEARRAY_cast, "UNICODE"}, + {"BYTESARRAY", typecast_STRINGARRAY_types, typecast_BYTESARRAY_cast, "BYTES"}, + {"STRINGARRAY", typecast_STRINGARRAY_types, typecast_STRINGARRAY_cast, "STRING"}, + {"BOOLEANARRAY", typecast_BOOLEANARRAY_types, typecast_BOOLEANARRAY_cast, "BOOLEAN"}, + {"DATETIMEARRAY", typecast_DATETIMEARRAY_types, typecast_DATETIMEARRAY_cast, "DATETIME"}, + {"DATETIMETZARRAY", typecast_DATETIMETZARRAY_types, typecast_DATETIMETZARRAY_cast, "DATETIMETZ"}, + {"TIMEARRAY", typecast_TIMEARRAY_types, typecast_TIMEARRAY_cast, "TIME"}, + {"DATEARRAY", typecast_DATEARRAY_types, typecast_DATEARRAY_cast, "DATE"}, + {"INTERVALARRAY", typecast_INTERVALARRAY_types, typecast_INTERVALARRAY_cast, "INTERVAL"}, + {"BINARYARRAY", typecast_BINARYARRAY_types, typecast_BINARYARRAY_cast, "BINARY"}, + {"ROWIDARRAY", typecast_ROWIDARRAY_types, typecast_ROWIDARRAY_cast, "ROWID"}, + {"UNKNOWN", typecast_UNKNOWN_types, typecast_UNKNOWN_cast, NULL}, + {"INETARRAY", typecast_INETARRAY_types, typecast_STRINGARRAY_cast, "STRING"}, + {"CIDRARRAY", typecast_CIDRARRAY_types, typecast_STRINGARRAY_cast, "STRING"}, + {"MACADDRARRAY", typecast_MACADDRARRAY_types, typecast_STRINGARRAY_cast, "STRING"}, + {NULL, NULL, NULL, NULL} +}; diff --git a/psycopg/typecast_datetime.c b/psycopg/typecast_datetime.c new file mode 100644 index 0000000000000000000000000000000000000000..e5e5110da4b49ecad8c19a0ee3ff92403a9c33cd --- /dev/null +++ b/psycopg/typecast_datetime.c @@ -0,0 +1,486 @@ +/* typecast_datetime.c - date and time typecasting functions to python types + * + * Copyright (C) 2001-2019 Federico Di Gregorio + * Copyright (C) 2020-2021 The Psycopg Team + * + * This file is part of psycopg. + * + * psycopg2 is free software: you can redistribute it and/or modify it + * under the terms of the GNU Lesser General Public License as published + * by the Free Software Foundation, either version 3 of the License, or + * (at your option) any later version. + * + * In addition, as a special exception, the copyright holders give + * permission to link this program with the OpenSSL library (or with + * modified versions of OpenSSL that use the same license as OpenSSL), + * and distribute linked combinations including the two. + * + * You must obey the GNU Lesser General Public License in all respects for + * all of the code used other than OpenSSL. + * + * psycopg2 is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public + * License for more details. + */ + +#include +#include "datetime.h" + +RAISES_NEG static int +typecast_datetime_init(void) +{ + PyDateTime_IMPORT; + + if (!PyDateTimeAPI) { + PyErr_SetString(PyExc_ImportError, "datetime initialization failed"); + return -1; + } + return 0; +} + +/** DATE - cast a date into a date python object **/ + +static PyObject * +typecast_PYDATE_cast(const char *str, Py_ssize_t len, PyObject *curs) +{ + PyObject* obj = NULL; + int n, y=0, m=0, d=0; + + if (str == NULL) { Py_RETURN_NONE; } + + if (!strcmp(str, "infinity") || !strcmp(str, "-infinity")) { + if (str[0] == '-') { + obj = PyObject_GetAttrString( + (PyObject*)PyDateTimeAPI->DateType, "min"); + } + else { + obj = PyObject_GetAttrString( + (PyObject*)PyDateTimeAPI->DateType, "max"); + } + } + + else { + n = typecast_parse_date(str, NULL, &len, &y, &m, &d); + Dprintf("typecast_PYDATE_cast: " + "n = %d, len = " FORMAT_CODE_PY_SSIZE_T ", " + "y = %d, m = %d, d = %d", + n, len, y, m, d); + if (n != 3) { + PyErr_SetString(DataError, "unable to parse date"); + return NULL; + } + else { + if (y > 9999) y = 9999; + obj = PyObject_CallFunction( + (PyObject*)PyDateTimeAPI->DateType, "iii", y, m, d); + } + } + return obj; +} + +/* convert the strings -infinity and infinity into a datetime with timezone */ +static PyObject * +_parse_inftz(const char *str, PyObject *curs) +{ + PyObject *rv = NULL; + PyObject *m = NULL; + PyObject *tzinfo_factory = NULL; + PyObject *tzinfo = NULL; + PyObject *args = NULL; + PyObject *kwargs = NULL; + PyObject *replace = NULL; + + if (!(m = PyObject_GetAttrString( + (PyObject*)PyDateTimeAPI->DateTimeType, + (str[0] == '-' ? "min" : "max")))) { + goto exit; + } + + tzinfo_factory = ((cursorObject *)curs)->tzinfo_factory; + if (tzinfo_factory == Py_None) { + rv = m; + m = NULL; + goto exit; + } + +#if PY_VERSION_HEX < 0x03070000 + { + PyObject *tzoff; + if (!(tzoff = PyDelta_FromDSU(0, 0, 0))) { goto exit; } + tzinfo = PyObject_CallFunctionObjArgs(tzinfo_factory, tzoff, NULL); + Py_DECREF(tzoff); + if (!tzinfo) { goto exit; } + } +#else + tzinfo = PyDateTime_TimeZone_UTC; + Py_INCREF(tzinfo); +#endif + + /* m.replace(tzinfo=tzinfo) */ + if (!(args = PyTuple_New(0))) { goto exit; } + if (!(kwargs = PyDict_New())) { goto exit; } + if (0 != PyDict_SetItemString(kwargs, "tzinfo", tzinfo)) { goto exit; } + if (!(replace = PyObject_GetAttrString(m, "replace"))) { goto exit; } + rv = PyObject_Call(replace, args, kwargs); + +exit: + Py_XDECREF(replace); + Py_XDECREF(args); + Py_XDECREF(kwargs); + Py_XDECREF(tzinfo); + Py_XDECREF(m); + + return rv; +} + +static PyObject * +_parse_noninftz(const char *str, Py_ssize_t len, PyObject *curs) +{ + PyObject* rv = NULL; + PyObject *tzoff = NULL; + PyObject *tzinfo = NULL; + PyObject *tzinfo_factory; + int n, y=0, m=0, d=0; + int hh=0, mm=0, ss=0, us=0, tzsec=0; + const char *tp = NULL; + + Dprintf("typecast_PYDATETIMETZ_cast: s = %s", str); + n = typecast_parse_date(str, &tp, &len, &y, &m, &d); + Dprintf("typecast_PYDATE_cast: tp = %p " + "n = %d, len = " FORMAT_CODE_PY_SSIZE_T "," + " y = %d, m = %d, d = %d", + tp, n, len, y, m, d); + if (n != 3) { + PyErr_SetString(DataError, "unable to parse date"); + goto exit; + } + + if (len > 0) { + n = typecast_parse_time(tp, NULL, &len, &hh, &mm, &ss, &us, &tzsec); + Dprintf("typecast_PYDATETIMETZ_cast: n = %d," + " len = " FORMAT_CODE_PY_SSIZE_T "," + " hh = %d, mm = %d, ss = %d, us = %d, tzsec = %d", + n, len, hh, mm, ss, us, tzsec); + if (n < 3 || n > 6) { + PyErr_SetString(DataError, "unable to parse time"); + goto exit; + } + } + + if (ss > 59) { + mm += 1; + ss -= 60; + } + if (y > 9999) + y = 9999; + + tzinfo_factory = ((cursorObject *)curs)->tzinfo_factory; + if (n >= 5 && tzinfo_factory != Py_None) { + /* we have a time zone, calculate minutes and create + appropriate tzinfo object calling the factory */ + Dprintf("typecast_PYDATETIMETZ_cast: UTC offset = %ds", tzsec); + +#if PY_VERSION_HEX < 0x03070000 + /* Before Python 3.7 the timezone offset had to be a whole number + * of minutes, so round the seconds to the closest minute */ + tzsec = 60 * (int)round(tzsec / 60.0); +#endif + if (!(tzoff = PyDelta_FromDSU(0, tzsec, 0))) { goto exit; } + if (!(tzinfo = PyObject_CallFunctionObjArgs( + tzinfo_factory, tzoff, NULL))) { + goto exit; + } + } + else { + Py_INCREF(Py_None); + tzinfo = Py_None; + } + + Dprintf("typecast_PYDATETIMETZ_cast: tzinfo: %p, refcnt = " + FORMAT_CODE_PY_SSIZE_T, + tzinfo, Py_REFCNT(tzinfo)); + rv = PyObject_CallFunction( + (PyObject*)PyDateTimeAPI->DateTimeType, "iiiiiiiO", + y, m, d, hh, mm, ss, us, tzinfo); + +exit: + Py_XDECREF(tzoff); + Py_XDECREF(tzinfo); + return rv; +} + +/** DATETIME - cast a timestamp into a datetime python object **/ + +static PyObject * +typecast_PYDATETIME_cast(const char *str, Py_ssize_t len, PyObject *curs) +{ + if (str == NULL) { Py_RETURN_NONE; } + + /* check for infinity */ + if (!strcmp(str, "infinity") || !strcmp(str, "-infinity")) { + return PyObject_GetAttrString( + (PyObject*)PyDateTimeAPI->DateTimeType, + (str[0] == '-' ? "min" : "max")); + } + + return _parse_noninftz(str, len, curs); +} + +/** DATETIMETZ - cast a timestamptz into a datetime python object **/ + +static PyObject * +typecast_PYDATETIMETZ_cast(const char *str, Py_ssize_t len, PyObject *curs) +{ + if (str == NULL) { Py_RETURN_NONE; } + + if (!strcmp(str, "infinity") || !strcmp(str, "-infinity")) { + return _parse_inftz(str, curs); + } + + return _parse_noninftz(str, len, curs); +} + +/** TIME - parse time into a time object **/ + +static PyObject * +typecast_PYTIME_cast(const char *str, Py_ssize_t len, PyObject *curs) +{ + PyObject* rv = NULL; + PyObject *tzoff = NULL; + PyObject *tzinfo = NULL; + PyObject *tzinfo_factory; + int n, hh=0, mm=0, ss=0, us=0, tzsec=0; + + if (str == NULL) { Py_RETURN_NONE; } + + n = typecast_parse_time(str, NULL, &len, &hh, &mm, &ss, &us, &tzsec); + Dprintf("typecast_PYTIME_cast: n = %d, len = " FORMAT_CODE_PY_SSIZE_T ", " + "hh = %d, mm = %d, ss = %d, us = %d, tzsec = %d", + n, len, hh, mm, ss, us, tzsec); + + if (n < 3 || n > 6) { + PyErr_SetString(DataError, "unable to parse time"); + return NULL; + } + if (ss > 59) { + mm += 1; + ss -= 60; + } + tzinfo_factory = ((cursorObject *)curs)->tzinfo_factory; + if (n >= 5 && tzinfo_factory != Py_None) { + /* we have a time zone, calculate seconds and create + appropriate tzinfo object calling the factory */ + Dprintf("typecast_PYTIME_cast: UTC offset = %ds", tzsec); + +#if PY_VERSION_HEX < 0x03070000 + /* Before Python 3.7 the timezone offset had to be a whole number + * of minutes, so round the seconds to the closest minute */ + tzsec = 60 * (int)round(tzsec / 60.0); +#endif + if (!(tzoff = PyDelta_FromDSU(0, tzsec, 0))) { goto exit; } + if (!(tzinfo = PyObject_CallFunctionObjArgs(tzinfo_factory, tzoff, NULL))) { + goto exit; + } + } + else { + Py_INCREF(Py_None); + tzinfo = Py_None; + } + + rv = PyObject_CallFunction((PyObject*)PyDateTimeAPI->TimeType, "iiiiO", + hh, mm, ss, us, tzinfo); + +exit: + Py_XDECREF(tzoff); + Py_XDECREF(tzinfo); + return rv; +} + + +/* Attempt parsing a number as microseconds + * Redshift is reported returning this stuff, see #558 + * + * Return a new `timedelta()` object in case of success or NULL and set an error + */ +static PyObject * +interval_from_usecs(const char *str) +{ + PyObject *us = NULL; + char *pend; + PyObject *rv = NULL; + + Dprintf("interval_from_usecs: %s", str); + + if (!(us = PyLong_FromString((char *)str, &pend, 0))) { + Dprintf("interval_from_usecs: parsing long failed"); + goto exit; + } + + if (*pend != '\0') { + /* there are trailing chars, it's not just micros. Barf. */ + Dprintf("interval_from_usecs: spurious chars %s", pend); + PyErr_Format(PyExc_ValueError, + "expected number of microseconds, got %s", str); + goto exit; + } + + rv = PyObject_CallFunction( + (PyObject*)PyDateTimeAPI->DeltaType, "iiO", 0, 0, us); + +exit: + Py_XDECREF(us); + return rv; +} + + +/** INTERVAL - parse an interval into a timedelta object **/ + +static PyObject * +typecast_PYINTERVAL_cast(const char *str, Py_ssize_t len, PyObject *curs) +{ + long v = 0, years = 0, months = 0, hours = 0, minutes = 0, micros = 0; + PY_LONG_LONG days = 0, seconds = 0; + int sign = 1, denom = 1, part = 0; + const char *orig = str; + + if (str == NULL) { Py_RETURN_NONE; } + + Dprintf("typecast_PYINTERVAL_cast: s = %s", str); + + while (len-- > 0 && *str) { + switch (*str) { + + case '-': + sign = -1; + break; + + case '0': case '1': case '2': case '3': case '4': + case '5': case '6': case '7': case '8': case '9': + { + long v1; + v1 = v * 10 + (*str - '0'); + /* detect either a rollover, happening if v is really too short, + * or too big value. On Win where long == int the 2nd check + * is useless. */ + if (v1 < v || v1 > (long)INT_MAX) { + /* uhm, oops... but before giving up, maybe it's redshift + * returning microseconds? See #558 */ + PyObject *rv; + if ((rv = interval_from_usecs(orig))) { + return rv; + } + else { + PyErr_Clear(); + } + + PyErr_SetString( + PyExc_OverflowError, "interval component too big"); + return NULL; + } + v = v1; + } + if (part == 6) { + denom *= 10; + } + break; + + case 'y': + if (part == 0) { + years = v * sign; + v = 0; sign = 1; part = 1; + str = skip_until_space2(str, &len); + } + break; + + case 'm': + if (part <= 1) { + months = v * sign; + v = 0; sign = 1; part = 2; + str = skip_until_space2(str, &len); + } + break; + + case 'd': + if (part <= 2) { + days = v * sign; + v = 0; sign = 1; part = 3; + str = skip_until_space2(str, &len); + } + break; + + case ':': + if (part <= 3) { + hours = v; + v = 0; part = 4; + } + else if (part == 4) { + minutes = v; + v = 0; part = 5; + } + break; + + case '.': + if (part == 5) { + seconds = v; + v = 0; part = 6; + } + break; + + case 'P': + PyErr_SetString(NotSupportedError, + "iso_8601 intervalstyle currently not supported"); + return NULL; + + default: + break; + } + + str++; + } + + /* manage last value, be it minutes or seconds or microseconds */ + if (part == 4) { + minutes = v; + } + else if (part == 5) { + seconds = v; + } + else if (part == 6) { + micros = v; + if (denom < 1000000L) { + do { + micros *= 10; + denom *= 10; + } while (denom < 1000000L); + } + else if (denom > 1000000L) { + micros = (long)round((double)micros / denom * 1000000.0); + } + } + else if (part == 0) { + /* Parsing failed, maybe it's just an integer? Assume usecs */ + return interval_from_usecs(orig); + } + + /* add hour, minutes, seconds together and include the sign */ + seconds += 60 * (PY_LONG_LONG)minutes + 3600 * (PY_LONG_LONG)hours; + if (sign < 0) { + seconds = -seconds; + micros = -micros; + } + + /* add the days, months years together - they already include a sign */ + days += 30 * (PY_LONG_LONG)months + 365 * (PY_LONG_LONG)years; + + return PyObject_CallFunction((PyObject*)PyDateTimeAPI->DeltaType, "LLl", + days, seconds, micros); +} + +/* psycopg defaults to using python datetime types */ + +#define typecast_DATE_cast typecast_PYDATE_cast +#define typecast_TIME_cast typecast_PYTIME_cast +#define typecast_INTERVAL_cast typecast_PYINTERVAL_cast +#define typecast_DATETIME_cast typecast_PYDATETIME_cast +#define typecast_DATETIMETZ_cast typecast_PYDATETIMETZ_cast diff --git a/psycopg/utils.c b/psycopg/utils.c new file mode 100644 index 0000000000000000000000000000000000000000..16be9062ad52540d4b9d8eabd86893ce5f5e02fa --- /dev/null +++ b/psycopg/utils.c @@ -0,0 +1,456 @@ +/* utils.c - miscellaneous utility functions + * + * Copyright (C) 2008-2019 Federico Di Gregorio + * Copyright (C) 2020-2021 The Psycopg Team + * + * This file is part of psycopg. + * + * psycopg2 is free software: you can redistribute it and/or modify it + * under the terms of the GNU Lesser General Public License as published + * by the Free Software Foundation, either version 3 of the License, or + * (at your option) any later version. + * + * In addition, as a special exception, the copyright holders give + * permission to link this program with the OpenSSL library (or with + * modified versions of OpenSSL that use the same license as OpenSSL), + * and distribute linked combinations including the two. + * + * You must obey the GNU Lesser General Public License in all respects for + * all of the code used other than OpenSSL. + * + * psycopg2 is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public + * License for more details. + */ + +#define PSYCOPG_MODULE +#include "psycopg/psycopg.h" + +#include "psycopg/connection.h" +#include "psycopg/cursor.h" +#include "psycopg/pgtypes.h" +#include "psycopg/error.h" + +#include +#include + +/* Escape a string for sql inclusion. + * + * The function must be called holding the GIL. + * + * Return a pointer to a new string on the Python heap on success, else NULL + * and set an exception. The returned string includes quotes and leading E if + * needed. + * + * `len` is optional: if < 0 it will be calculated. + * + * If tolen is set, it will contain the length of the escaped string, + * including quotes. + */ +char * +psyco_escape_string(connectionObject *conn, const char *from, Py_ssize_t len, + char *to, Py_ssize_t *tolen) +{ + Py_ssize_t ql; + int eq = (conn && (conn->equote)) ? 1 : 0; + + if (len < 0) { + len = strlen(from); + } else if (strchr(from, '\0') != from + len) { + PyErr_Format(PyExc_ValueError, + "A string literal cannot contain NUL (0x00) characters."); + return NULL; + } + + if (to == NULL) { + to = (char *)PyMem_Malloc((len * 2 + 4) * sizeof(char)); + if (to == NULL) { + PyErr_NoMemory(); + return NULL; + } + } + + { + int err; + if (conn && conn->pgconn) + ql = PQescapeStringConn(conn->pgconn, to+eq+1, from, len, &err); + else + ql = PQescapeString(to+eq+1, from, len); + } + + if (eq) { + to[0] = 'E'; + to[1] = to[ql+2] = '\''; + to[ql+3] = '\0'; + } + else { + to[0] = to[ql+1] = '\''; + to[ql+2] = '\0'; + } + + if (tolen) + *tolen = ql+eq+2; + + return to; +} + +/* Escape a string for inclusion in a query as identifier. + * + * 'len' is optional: if < 0 it will be calculated. + * + * Return a string allocated by Postgres: free it using PQfreemem + * In case of error set a Python exception. + */ +char * +psyco_escape_identifier(connectionObject *conn, const char *str, Py_ssize_t len) +{ + char *rv = NULL; + + if (!conn || !conn->pgconn) { + PyErr_SetString(InterfaceError, "connection not valid"); + goto exit; + } + + if (len < 0) { len = strlen(str); } + + rv = PQescapeIdentifier(conn->pgconn, str, len); + if (!rv) { + char *msg; + msg = PQerrorMessage(conn->pgconn); + if (!msg || !msg[0]) { + msg = "no message provided"; + } + PyErr_Format(InterfaceError, "failed to escape identifier: %s", msg); + } + +exit: + return rv; +} + + +/* Duplicate a string. + * + * Allocate a new buffer on the Python heap containing the new string. + * 'len' is optional: if < 0 the length is calculated. + * + * Store the return in 'to' and return 0 in case of success, else return -1 + * and raise an exception. + * + * If from is null, store null into to. + */ +RAISES_NEG int +psyco_strdup(char **to, const char *from, Py_ssize_t len) +{ + if (!from) { + *to = NULL; + return 0; + } + if (len < 0) { len = strlen(from); } + if (!(*to = PyMem_Malloc(len + 1))) { + PyErr_NoMemory(); + return -1; + } + strcpy(*to, from); + return 0; +} + +/* Ensure a Python object is a bytes string. + * + * Useful when a char * is required out of it. + * + * The function is ref neutral: steals a ref from obj and adds one to the + * return value. This also means that you shouldn't call the function on a + * borrowed ref, if having the object unallocated is not what you want. + * + * It is safe to call the function on NULL. + */ +STEALS(1) PyObject * +psyco_ensure_bytes(PyObject *obj) +{ + PyObject *rv = NULL; + if (!obj) { return NULL; } + + if (PyUnicode_Check(obj)) { + rv = PyUnicode_AsUTF8String(obj); + Py_DECREF(obj); + } + else if (Bytes_Check(obj)) { + rv = obj; + } + else { + PyErr_Format(PyExc_TypeError, + "Expected bytes or unicode string, got %s instead", + Py_TYPE(obj)->tp_name); + Py_DECREF(obj); /* steal the ref anyway */ + } + + return rv; +} + +/* Take a Python object and return text from it. + * + * This means converting bytes to unicode. + * + * The function is ref neutral: steals a ref from obj and adds one to the + * return value. It is safe to call it on NULL. + */ +STEALS(1) PyObject * +psyco_ensure_text(PyObject *obj) +{ + if (obj) { + /* bytes to unicode in Py3 */ + PyObject *rv = PyUnicode_FromEncodedObject(obj, "utf8", "replace"); + Py_DECREF(obj); + return rv; + } + else { + return NULL; + } +} + +/* Check if a file derives from TextIOBase. + * + * Return 1 if it does, else 0, -1 on errors. + */ +int +psyco_is_text_file(PyObject *f) +{ + /* NULL before any call. + * then io.TextIOBase if exists, else None. */ + static PyObject *base; + + /* Try to import os.TextIOBase */ + if (NULL == base) { + PyObject *m; + Dprintf("psyco_is_text_file: importing io.TextIOBase"); + if (!(m = PyImport_ImportModule("io"))) { + Dprintf("psyco_is_text_file: io module not found"); + PyErr_Clear(); + Py_INCREF(Py_None); + base = Py_None; + } + else { + if (!(base = PyObject_GetAttrString(m, "TextIOBase"))) { + Dprintf("psyco_is_text_file: io.TextIOBase not found"); + PyErr_Clear(); + Py_INCREF(Py_None); + base = Py_None; + } + } + Py_XDECREF(m); + } + + if (base != Py_None) { + return PyObject_IsInstance(f, base); + } else { + return 0; + } +} + +/* Make a dict out of PQconninfoOption array */ +PyObject * +psyco_dict_from_conninfo_options(PQconninfoOption *options, int include_password) +{ + PyObject *dict, *res = NULL; + PQconninfoOption *o; + + if (!(dict = PyDict_New())) { goto exit; } + for (o = options; o->keyword != NULL; o++) { + if (o->val != NULL && + (include_password || strcmp(o->keyword, "password") != 0)) { + PyObject *value; + if (!(value = Text_FromUTF8(o->val))) { goto exit; } + if (PyDict_SetItemString(dict, o->keyword, value) != 0) { + Py_DECREF(value); + goto exit; + } + Py_DECREF(value); + } + } + + res = dict; + dict = NULL; + +exit: + Py_XDECREF(dict); + + return res; +} + + +/* Make a connection string out of a string and a dictionary of arguments. + * + * Helper to call psycopg2.extensions.make_dsn() + */ +PyObject * +psyco_make_dsn(PyObject *dsn, PyObject *kwargs) +{ + PyObject *ext = NULL, *make_dsn = NULL; + PyObject *args = NULL, *rv = NULL; + + if (!(ext = PyImport_ImportModule("psycopg2.extensions"))) { goto exit; } + if (!(make_dsn = PyObject_GetAttrString(ext, "make_dsn"))) { goto exit; } + + if (!(args = PyTuple_Pack(1, dsn))) { goto exit; } + rv = PyObject_Call(make_dsn, args, kwargs); + +exit: + Py_XDECREF(args); + Py_XDECREF(make_dsn); + Py_XDECREF(ext); + + return rv; +} + +/* Convert a C string into Python Text using a specified codec. + * + * The codec is the python function codec.getdecoder(enc). + * + * len is optional: use -1 to have it calculated by the function. + */ +PyObject * +psyco_text_from_chars_safe(const char *str, Py_ssize_t len, PyObject *decoder) +{ + static PyObject *replace = NULL; + PyObject *rv = NULL; + PyObject *b = NULL; + PyObject *t = NULL; + + if (!str) { Py_RETURN_NONE; } + + if (len < 0) { len = strlen(str); } + + if (decoder) { + if (!replace) { + if (!(replace = PyUnicode_FromString("replace"))) { goto exit; } + } + if (!(b = PyBytes_FromStringAndSize(str, len))) { goto exit; } + if (!(t = PyObject_CallFunctionObjArgs(decoder, b, replace, NULL))) { + goto exit; + } + + if (!(rv = PyTuple_GetItem(t, 0))) { goto exit; } + Py_INCREF(rv); + } + else { + rv = PyUnicode_DecodeASCII(str, len, "replace"); + } + +exit: + Py_XDECREF(t); + Py_XDECREF(b); + return rv; +} + + +/* psyco_set_error + * + * Create a new error of the given type with extra attributes. + */ + +RAISES BORROWED PyObject * +psyco_set_error(PyObject *exc, cursorObject *curs, const char *msg) +{ + PyObject *pymsg; + PyObject *err = NULL; + connectionObject *conn = NULL; + + if (curs) { + conn = ((cursorObject *)curs)->conn; + } + + if ((pymsg = conn_text_from_chars(conn, msg))) { + err = PyObject_CallFunctionObjArgs(exc, pymsg, NULL); + Py_DECREF(pymsg); + } + else { + /* what's better than an error in an error handler in the morning? + * Anyway, some error was set, refcount is ok... get outta here. */ + return NULL; + } + + if (err && PyObject_TypeCheck(err, &errorType)) { + errorObject *perr = (errorObject *)err; + if (curs) { + Py_CLEAR(perr->cursor); + Py_INCREF(curs); + perr->cursor = curs; + } + } + + if (err) { + PyErr_SetObject(exc, err); + Py_DECREF(err); + } + + return err; +} + + +/* Return nonzero if the current one is the main interpreter */ +static int +psyco_is_main_interp(void) +{ +#if PY_VERSION_HEX >= 0x03080000 + /* tested with Python 3.8.0a2 */ + return _PyInterpreterState_Get() == PyInterpreterState_Main(); +#else + static PyInterpreterState *main_interp = NULL; /* Cached reference */ + PyInterpreterState *interp; + + if (main_interp) { + return (main_interp == PyThreadState_Get()->interp); + } + + /* No cached value: cache the proper value and try again. */ + interp = PyInterpreterState_Head(); + while (interp->next) + interp = interp->next; + + main_interp = interp; + assert (main_interp); + return psyco_is_main_interp(); +#endif +} + +/* psyco_get_decimal_type + + Return a new reference to the decimal type. + + The function returns a cached version of the object, but only in the main + interpreter because subinterpreters are confusing. +*/ + +PyObject * +psyco_get_decimal_type(void) +{ + static PyObject *cachedType = NULL; + PyObject *decimalType = NULL; + PyObject *decimal; + + /* Use the cached object if running from the main interpreter. */ + int can_cache = psyco_is_main_interp(); + if (can_cache && cachedType) { + Py_INCREF(cachedType); + return cachedType; + } + + /* Get a new reference to the Decimal type. */ + decimal = PyImport_ImportModule("decimal"); + if (decimal) { + decimalType = PyObject_GetAttrString(decimal, "Decimal"); + Py_DECREF(decimal); + } + else { + decimalType = NULL; + } + + /* Store the object from future uses. */ + if (can_cache && !cachedType && decimalType) { + Py_INCREF(decimalType); + cachedType = decimalType; + } + + return decimalType; +} diff --git a/psycopg/utils.h b/psycopg/utils.h new file mode 100644 index 0000000000000000000000000000000000000000..5223d3a5a0086915531808a698725693877101fe --- /dev/null +++ b/psycopg/utils.h @@ -0,0 +1,65 @@ +/* utils.h - function definitions for utility file + * + * Copyright (C) 2018-2019 Daniele Varrazzo + * Copyright (C) 2020-2021 The Psycopg Team + * + * This file is part of psycopg. + * + * psycopg2 is free software: you can redistribute it and/or modify it + * under the terms of the GNU Lesser General Public License as published + * by the Free Software Foundation, either version 3 of the License, or + * (at your option) any later version. + * + * In addition, as a special exception, the copyright holders give + * permission to link this program with the OpenSSL library (or with + * modified versions of OpenSSL that use the same license as OpenSSL), + * and distribute linked combinations including the two. + * + * You must obey the GNU Lesser General Public License in all respects for + * all of the code used other than OpenSSL. + * + * psycopg2 is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public + * License for more details. + */ + +#ifndef UTILS_H +#define UTILS_H 1 + +/* forward declarations */ +typedef struct cursorObject cursorObject; +typedef struct connectionObject connectionObject; +typedef struct replicationMessageObject replicationMessageObject; + +HIDDEN char *psyco_escape_string( + connectionObject *conn, + const char *from, Py_ssize_t len, char *to, Py_ssize_t *tolen); + +HIDDEN char *psyco_escape_identifier( + connectionObject *conn, const char *str, Py_ssize_t len); + +HIDDEN int psyco_strdup(char **to, const char *from, Py_ssize_t len); + +STEALS(1) HIDDEN PyObject * psyco_ensure_bytes(PyObject *obj); +STEALS(1) HIDDEN PyObject * psyco_ensure_text(PyObject *obj); + +HIDDEN int psyco_is_text_file(PyObject *f); + +HIDDEN PyObject *psyco_dict_from_conninfo_options( + PQconninfoOption *options, int include_password); + +HIDDEN PyObject *psyco_make_dsn(PyObject *dsn, PyObject *kwargs); + +HIDDEN PyObject *psyco_text_from_chars_safe( + const char *str, Py_ssize_t len, PyObject *decoder); + +HIDDEN RAISES BORROWED PyObject *psyco_set_error( + PyObject *exc, cursorObject *curs, const char *msg); + +HIDDEN PyObject *psyco_get_decimal_type(void); + +HIDDEN PyObject *Bytes_Format(PyObject *format, PyObject *args); + + +#endif /* !defined(UTILS_H) */ diff --git a/psycopg/win32_support.c b/psycopg/win32_support.c new file mode 100644 index 0000000000000000000000000000000000000000..e82575a1c2aac8fea60eb0e0d0c15adcbc63b012 --- /dev/null +++ b/psycopg/win32_support.c @@ -0,0 +1,90 @@ +/* win32_support.c - emulate some functions missing on Win32 + * + * Copyright (C) 2003-2019 Federico Di Gregorio + * Copyright (C) 2020-2021 The Psycopg Team + * + * This file is part of psycopg. + * + * psycopg2 is free software: you can redistribute it and/or modify it + * under the terms of the GNU Lesser General Public License as published + * by the Free Software Foundation, either version 3 of the License, or + * (at your option) any later version. + * + * In addition, as a special exception, the copyright holders give + * permission to link this program with the OpenSSL library (or with + * modified versions of OpenSSL that use the same license as OpenSSL), + * and distribute linked combinations including the two. + * + * You must obey the GNU Lesser General Public License in all respects for + * all of the code used other than OpenSSL. + * + * psycopg2 is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public + * License for more details. + */ + +#define PSYCOPG_MODULE +#include "psycopg/psycopg.h" + +#include "psycopg/win32_support.h" + +#ifdef _WIN32 + +#ifndef __MINGW32__ +/* millisecond-precision port of gettimeofday for Win32, taken from + src/port/gettimeofday.c in PostgreSQL core */ + +/* FILETIME of Jan 1 1970 00:00:00. */ +static const unsigned __int64 epoch = ((unsigned __int64) 116444736000000000ULL); + +/* + * timezone information is stored outside the kernel so tzp isn't used anymore. + * + * Note: this function is not for Win32 high precision timing purpose. See + * elapsed_time(). + */ +int +gettimeofday(struct timeval * tp, void * tzp) +{ + FILETIME file_time; + SYSTEMTIME system_time; + ULARGE_INTEGER ularge; + + GetSystemTime(&system_time); + SystemTimeToFileTime(&system_time, &file_time); + ularge.LowPart = file_time.dwLowDateTime; + ularge.HighPart = file_time.dwHighDateTime; + + tp->tv_sec = (long) ((ularge.QuadPart - epoch) / 10000000L); + tp->tv_usec = (long) (system_time.wMilliseconds * 1000); + + return 0; +} + +/* timeradd missing on MS VC */ +void +timeradd(struct timeval *a, struct timeval *b, struct timeval *c) +{ + c->tv_sec = a->tv_sec + b->tv_sec; + c->tv_usec = a->tv_usec + b->tv_usec; + if(c->tv_usec >= 1000000L) { + c->tv_usec -= 1000000L; + c->tv_sec += 1; + } +} +#endif /* !defined(__MINGW32__) */ + +/* timersub is missing on mingw & MS VC */ +void +timersub(struct timeval *a, struct timeval *b, struct timeval *c) +{ + c->tv_sec = a->tv_sec - b->tv_sec; + c->tv_usec = a->tv_usec - b->tv_usec; + if (c->tv_usec < 0) { + c->tv_usec += 1000000; + c->tv_sec -= 1; + } +} + +#endif /* defined(_WIN32) */ diff --git a/psycopg/win32_support.h b/psycopg/win32_support.h new file mode 100644 index 0000000000000000000000000000000000000000..9fca0d6af147fba5f96b862afe7fa6aa1093f9b7 --- /dev/null +++ b/psycopg/win32_support.h @@ -0,0 +1,56 @@ +/* win32_support.h - definitions for win32_support.c + * + * Copyright (C) 2003-2019 Federico Di Gregorio + * Copyright (C) 2020-2021 The Psycopg Team + * + * This file is part of psycopg. + * + * psycopg2 is free software: you can redistribute it and/or modify it + * under the terms of the GNU Lesser General Public License as published + * by the Free Software Foundation, either version 3 of the License, or + * (at your option) any later version. + * + * In addition, as a special exception, the copyright holders give + * permission to link this program with the OpenSSL library (or with + * modified versions of OpenSSL that use the same license as OpenSSL), + * and distribute linked combinations including the two. + * + * You must obey the GNU Lesser General Public License in all respects for + * all of the code used other than OpenSSL. + * + * psycopg2 is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public + * License for more details. + */ +#ifndef PSYCOPG_WIN32_SUPPORT_H +#define PSYCOPG_WIN32_SUPPORT_H + +#include "psycopg/config.h" + +#ifdef _WIN32 +#include +#endif +#ifdef __MINGW32__ +#include +#endif + + +#ifdef _WIN32 +#ifndef __MINGW32__ +extern HIDDEN int gettimeofday(struct timeval * tp, void * tzp); +extern HIDDEN void timeradd(struct timeval *a, struct timeval *b, struct timeval *c); +#elif +#endif + +extern HIDDEN void timersub(struct timeval *a, struct timeval *b, struct timeval *c); + +#ifndef timercmp +#define timercmp(a, b, cmp) \ + (((a)->tv_sec == (b)->tv_sec) ? \ + ((a)->tv_usec cmp (b)->tv_usec) : \ + ((a)->tv_sec cmp (b)->tv_sec)) +#endif +#endif + +#endif /* !defined(PSYCOPG_WIN32_SUPPORT_H) */ diff --git a/psycopg/xid.h b/psycopg/xid.h new file mode 100644 index 0000000000000000000000000000000000000000..d8d90bd2d0cd685c3f6d0591368d2cab211b0859 --- /dev/null +++ b/psycopg/xid.h @@ -0,0 +1,52 @@ +/* xid.h - definition for the psycopg Xid type + * + * Copyright (C) 2008-2019 James Henstridge + * Copyright (C) 2010-2019 Daniele Varrazzo + * Copyright (C) 2020-2021 The Psycopg Team + * + * This file is part of psycopg. + * + * psycopg2 is free software: you can redistribute it and/or modify it + * under the terms of the GNU Lesser General Public License as published + * by the Free Software Foundation, either version 3 of the License, or + * (at your option) any later version. + * + * In addition, as a special exception, the copyright holders give + * permission to link this program with the OpenSSL library (or with + * modified versions of OpenSSL that use the same license as OpenSSL), + * and distribute linked combinations including the two. + * + * You must obey the GNU Lesser General Public License in all respects for + * all of the code used other than OpenSSL. + * + * psycopg2 is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public + * License for more details. + */ + +#ifndef PSYCOPG_XID_H +#define PSYCOPG_XID_H 1 + +extern HIDDEN PyTypeObject xidType; + +typedef struct { + PyObject_HEAD + + /* the Python-style three-part transaction ID */ + PyObject *format_id; + PyObject *gtrid; + PyObject *bqual; + + /* Additional information PostgreSQL exposes about prepared transactions */ + PyObject *prepared; + PyObject *owner; + PyObject *database; +} xidObject; + +HIDDEN xidObject *xid_ensure(PyObject *oxid); +HIDDEN xidObject *xid_from_string(PyObject *s); +HIDDEN PyObject *xid_get_tid(xidObject *self); +HIDDEN PyObject *xid_recover(PyObject *conn); + +#endif /* PSYCOPG_XID_H */ diff --git a/psycopg/xid_type.c b/psycopg/xid_type.c new file mode 100644 index 0000000000000000000000000000000000000000..094c58c6b68a64cb7b268098f96c67a41a33d08c --- /dev/null +++ b/psycopg/xid_type.c @@ -0,0 +1,665 @@ +/* xid_type.c - python interface to Xid objects + * + * Copyright (C) 2008 Canonical Ltd. + * Copyright (C) 2010-2019 Daniele Varrazzo + * Copyright (C) 2020-2021 The Psycopg Team + * + * This file is part of psycopg. + * + * psycopg2 is free software: you can redistribute it and/or modify it + * under the terms of the GNU Lesser General Public License as published + * by the Free Software Foundation, either version 3 of the License, or + * (at your option) any later version. + * + * In addition, as a special exception, the copyright holders give + * permission to link this program with the OpenSSL library (or with + * modified versions of OpenSSL that use the same license as OpenSSL), + * and distribute linked combinations including the two. + * + * You must obey the GNU Lesser General Public License in all respects for + * all of the code used other than OpenSSL. + * + * psycopg2 is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public + * License for more details. + */ + +#define PSYCOPG_MODULE +#include "psycopg/psycopg.h" + +#include "psycopg/xid.h" +#include "psycopg/cursor.h" + + +static const char xid_doc[] = + "A transaction identifier used for two-phase commit.\n\n" + "Usually returned by the connection methods `~connection.xid()` and\n" + "`~connection.tpc_recover()`.\n" + "`!Xid` instances can be unpacked as a 3-item tuples containing the items\n" + ":samp:`({format_id},{gtrid},{bqual})`.\n" + "The `!str()` of the object returns the *transaction ID* used\n" + "in the commands sent to the server.\n\n" + "See :ref:`tpc` for an introduction."; + +static const char format_id_doc[] = + "Format ID in a XA transaction.\n\n" + "A non-negative 32 bit integer.\n" + "`!None` if the transaction doesn't follow the XA standard."; + +static const char gtrid_doc[] = + "Global transaction ID in a XA transaction.\n\n" + "If the transaction doesn't follow the XA standard, it is the plain\n" + "*transaction ID* used in the server commands."; + +static const char bqual_doc[] = + "Branch qualifier of the transaction.\n\n" + "In a XA transaction every resource participating to a transaction\n" + "receives a distinct branch qualifier.\n" + "`!None` if the transaction doesn't follow the XA standard."; + +static const char prepared_doc[] = + "Timestamp (with timezone) in which a recovered transaction was prepared."; + +static const char owner_doc[] = + "Name of the user who prepared a recovered transaction."; + +static const char database_doc[] = + "Database the recovered transaction belongs to."; + +static PyMemberDef xid_members[] = { + { "format_id", T_OBJECT, offsetof(xidObject, format_id), READONLY, (char *)format_id_doc }, + { "gtrid", T_OBJECT, offsetof(xidObject, gtrid), READONLY, (char *)gtrid_doc }, + { "bqual", T_OBJECT, offsetof(xidObject, bqual), READONLY, (char *)bqual_doc }, + { "prepared", T_OBJECT, offsetof(xidObject, prepared), READONLY, (char *)prepared_doc }, + { "owner", T_OBJECT, offsetof(xidObject, owner), READONLY, (char *)owner_doc }, + { "database", T_OBJECT, offsetof(xidObject, database), READONLY, (char *)database_doc }, + { NULL } +}; + +static PyObject * +xid_new(PyTypeObject *type, PyObject *args, PyObject *kwargs) +{ + return type->tp_alloc(type, 0); +} + +static int +xid_init(xidObject *self, PyObject *args, PyObject *kwargs) +{ + static char *kwlist[] = {"format_id", "gtrid", "bqual", NULL}; + int format_id; + size_t i, gtrid_len, bqual_len; + const char *gtrid, *bqual; + + if (!PyArg_ParseTupleAndKeywords(args, kwargs, "iss", kwlist, + &format_id, >rid, &bqual)) + return -1; + + if (format_id < 0 || format_id > 0x7fffffff) { + PyErr_SetString(PyExc_ValueError, + "format_id must be a non-negative 32-bit integer"); + return -1; + } + + /* make sure that gtrid is no more than 64 characters long and + made of printable characters (which we're defining as those + between 0x20 and 0x7f). */ + gtrid_len = strlen(gtrid); + if (gtrid_len > 64) { + PyErr_SetString(PyExc_ValueError, + "gtrid must be a string no longer than 64 characters"); + return -1; + } + for (i = 0; i < gtrid_len; i++) { + if (gtrid[i] < 0x20 || gtrid[i] >= 0x7f) { + PyErr_SetString(PyExc_ValueError, + "gtrid must contain only printable characters."); + return -1; + } + } + /* Same for bqual */ + bqual_len = strlen(bqual); + if (bqual_len > 64) { + PyErr_SetString(PyExc_ValueError, + "bqual must be a string no longer than 64 characters"); + return -1; + } + for (i = 0; i < bqual_len; i++) { + if (bqual[i] < 0x20 || bqual[i] >= 0x7f) { + PyErr_SetString(PyExc_ValueError, + "bqual must contain only printable characters."); + return -1; + } + } + + if (!(self->format_id = PyInt_FromLong(format_id))) { return -1; } + if (!(self->gtrid = Text_FromUTF8(gtrid))) { return -1; } + if (!(self->bqual = Text_FromUTF8(bqual))) { return -1; } + Py_INCREF(Py_None); self->prepared = Py_None; + Py_INCREF(Py_None); self->owner = Py_None; + Py_INCREF(Py_None); self->database = Py_None; + + return 0; +} + +static void +xid_dealloc(xidObject *self) +{ + Py_CLEAR(self->format_id); + Py_CLEAR(self->gtrid); + Py_CLEAR(self->bqual); + Py_CLEAR(self->prepared); + Py_CLEAR(self->owner); + Py_CLEAR(self->database); + + Py_TYPE(self)->tp_free((PyObject *)self); +} + +static Py_ssize_t +xid_len(xidObject *self) +{ + return 3; +} + +static PyObject * +xid_getitem(xidObject *self, Py_ssize_t item) +{ + if (item < 0) + item += 3; + + switch (item) { + case 0: + Py_INCREF(self->format_id); + return self->format_id; + case 1: + Py_INCREF(self->gtrid); + return self->gtrid; + case 2: + Py_INCREF(self->bqual); + return self->bqual; + default: + PyErr_SetString(PyExc_IndexError, "index out of range"); + return NULL; + } +} + +static PyObject * +xid_str(xidObject *self) +{ + return xid_get_tid(self); +} + +static PyObject * +xid_repr(xidObject *self) +{ + PyObject *rv = NULL; + PyObject *format = NULL; + PyObject *args = NULL; + + if (Py_None == self->format_id) { + if (!(format = Text_FromUTF8(""))) { + goto exit; + } + if (!(args = PyTuple_New(1))) { goto exit; } + Py_INCREF(self->gtrid); + PyTuple_SET_ITEM(args, 0, self->gtrid); + } + else { + if (!(format = Text_FromUTF8(""))) { + goto exit; + } + if (!(args = PyTuple_New(3))) { goto exit; } + Py_INCREF(self->format_id); + PyTuple_SET_ITEM(args, 0, self->format_id); + Py_INCREF(self->gtrid); + PyTuple_SET_ITEM(args, 1, self->gtrid); + Py_INCREF(self->bqual); + PyTuple_SET_ITEM(args, 2, self->bqual); + } + + rv = Text_Format(format, args); + +exit: + Py_XDECREF(args); + Py_XDECREF(format); + + return rv; +} + + +static const char xid_from_string_doc[] = + "Create a `!Xid` object from a string representation. Static method.\n\n" + "If *s* is a PostgreSQL transaction ID produced by a XA transaction,\n" + "the returned object will have `format_id`, `gtrid`, `bqual` set to\n" + "the values of the preparing XA id.\n" + "Otherwise only the `!gtrid` is populated with the unparsed string.\n" + "The operation is the inverse of the one performed by `!str(xid)`."; + +static PyObject * +xid_from_string_method(PyObject *cls, PyObject *args) +{ + PyObject *s = NULL; + + if (!PyArg_ParseTuple(args, "O", &s)) { return NULL; } + + return (PyObject *)xid_from_string(s); +} + + +static PySequenceMethods xid_sequence = { + (lenfunc)xid_len, /* sq_length */ + 0, /* sq_concat */ + 0, /* sq_repeat */ + (ssizeargfunc)xid_getitem, /* sq_item */ + 0, /* sq_slice */ + 0, /* sq_ass_item */ + 0, /* sq_ass_slice */ + 0, /* sq_contains */ + 0, /* sq_inplace_concat */ + 0, /* sq_inplace_repeat */ +}; + +static struct PyMethodDef xid_methods[] = { + {"from_string", (PyCFunction)xid_from_string_method, + METH_VARARGS|METH_STATIC, xid_from_string_doc}, + {NULL} +}; + +PyTypeObject xidType = { + PyVarObject_HEAD_INIT(NULL, 0) + "psycopg2.extensions.Xid", + sizeof(xidObject), 0, + (destructor)xid_dealloc, /* tp_dealloc */ + 0, /*tp_print*/ + 0, /*tp_getattr*/ + 0, /*tp_setattr*/ + 0, /*tp_compare*/ + (reprfunc)xid_repr, /*tp_repr*/ + 0, /*tp_as_number*/ + &xid_sequence, /*tp_as_sequence*/ + 0, /*tp_as_mapping*/ + 0, /*tp_hash */ + 0, /*tp_call*/ + (reprfunc)xid_str, /*tp_str*/ + 0, /*tp_getattro*/ + 0, /*tp_setattro*/ + 0, /*tp_as_buffer*/ + /* Notify is not GC as it only has string attributes */ + Py_TPFLAGS_DEFAULT|Py_TPFLAGS_BASETYPE, /*tp_flags*/ + xid_doc, /*tp_doc*/ + 0, /*tp_traverse*/ + 0, /*tp_clear*/ + 0, /*tp_richcompare*/ + 0, /*tp_weaklistoffset*/ + 0, /*tp_iter*/ + 0, /*tp_iternext*/ + xid_methods, /*tp_methods*/ + xid_members, /*tp_members*/ + 0, /*tp_getset*/ + 0, /*tp_base*/ + 0, /*tp_dict*/ + 0, /*tp_descr_get*/ + 0, /*tp_descr_set*/ + 0, /*tp_dictoffset*/ + (initproc)xid_init, /*tp_init*/ + 0, /*tp_alloc*/ + xid_new, /*tp_new*/ +}; + + +/* Convert a Python object into a proper xid. + * + * Return a new reference to the object or set an exception. + * + * The idea is that people can either create a xid from connection.xid + * or use a regular string they have found in PostgreSQL's pg_prepared_xacts + * in order to recover a transaction not generated by psycopg. + */ +xidObject *xid_ensure(PyObject *oxid) +{ + xidObject *rv = NULL; + + if (PyObject_TypeCheck(oxid, &xidType)) { + Py_INCREF(oxid); + rv = (xidObject *)oxid; + } + else { + rv = xid_from_string(oxid); + } + + return rv; +} + + +/* Encode or decode a string in base64. */ + +static PyObject * +_xid_base64_enc_dec(const char *funcname, PyObject *s) +{ + PyObject *base64 = NULL; + PyObject *func = NULL; + PyObject *rv = NULL; + + if (!(base64 = PyImport_ImportModule("base64"))) { goto exit; } + if (!(func = PyObject_GetAttrString(base64, funcname))) { goto exit; } + + Py_INCREF(s); + if (!(s = psyco_ensure_bytes(s))) { goto exit; } + rv = psyco_ensure_text(PyObject_CallFunctionObjArgs(func, s, NULL)); + Py_DECREF(s); + +exit: + Py_XDECREF(func); + Py_XDECREF(base64); + + return rv; +} + +/* Return a base64-encoded string. */ + +static PyObject * +_xid_encode64(PyObject *s) +{ + return _xid_base64_enc_dec("b64encode", s); +} + +/* Decode a base64-encoded string */ + +static PyObject * +_xid_decode64(PyObject *s) +{ + return _xid_base64_enc_dec("b64decode", s); +} + + +/* Return the PostgreSQL transaction_id for this XA xid. + * + * PostgreSQL wants just a string, while the DBAPI supports the XA standard + * and thus a triple. We use the same conversion algorithm implemented by JDBC + * in order to allow some form of interoperation. + * + * The function must be called while holding the GIL. + * + * see also: the pgjdbc implementation + * http://cvs.pgfoundry.org/cgi-bin/cvsweb.cgi/jdbc/pgjdbc/org/postgresql/xa/RecoveredXid.java?rev=1.2 + */ +PyObject * +xid_get_tid(xidObject *self) +{ + PyObject *rv = NULL; + PyObject *egtrid = NULL; + PyObject *ebqual = NULL; + PyObject *format = NULL; + PyObject *args = NULL; + + if (Py_None == self->format_id) { + /* Unparsed xid: return the gtrid. */ + Py_INCREF(self->gtrid); + rv = self->gtrid; + } + else { + /* XA xid: mash together the components. */ + if (!(egtrid = _xid_encode64(self->gtrid))) { goto exit; } + if (!(ebqual = _xid_encode64(self->bqual))) { goto exit; } + + /* rv = "%d_%s_%s" % (format_id, egtrid, ebqual) */ + if (!(format = Text_FromUTF8("%d_%s_%s"))) { goto exit; } + + if (!(args = PyTuple_New(3))) { goto exit; } + Py_INCREF(self->format_id); + PyTuple_SET_ITEM(args, 0, self->format_id); + PyTuple_SET_ITEM(args, 1, egtrid); egtrid = NULL; + PyTuple_SET_ITEM(args, 2, ebqual); ebqual = NULL; + + if (!(rv = Text_Format(format, args))) { goto exit; } + } + +exit: + Py_XDECREF(args); + Py_XDECREF(format); + Py_XDECREF(egtrid); + Py_XDECREF(ebqual); + + return rv; +} + + +/* Return the regex object to parse a Xid string. + * + * Return a borrowed reference. */ + +BORROWED static PyObject * +_xid_get_parse_regex(void) { + static PyObject *rv; + + if (!rv) { + PyObject *re_mod = NULL; + PyObject *comp = NULL; + PyObject *regex = NULL; + + Dprintf("compiling regexp to parse transaction id"); + + if (!(re_mod = PyImport_ImportModule("re"))) { goto exit; } + if (!(comp = PyObject_GetAttrString(re_mod, "compile"))) { goto exit; } + if (!(regex = PyObject_CallFunction(comp, "s", + "^(\\d+)_([^_]*)_([^_]*)$"))) { + goto exit; + } + + /* Good, compiled. */ + rv = regex; + regex = NULL; + +exit: + Py_XDECREF(regex); + Py_XDECREF(comp); + Py_XDECREF(re_mod); + } + + return rv; +} + +/* Try to parse a Xid string representation in a Xid object. + * + * + * Return NULL + exception if parsing failed. Else a new Xid object. */ + +static xidObject * +_xid_parse_string(PyObject *str) { + PyObject *regex; + PyObject *m = NULL; + PyObject *group = NULL; + PyObject *item = NULL; + PyObject *format_id = NULL; + PyObject *egtrid = NULL; + PyObject *ebqual = NULL; + PyObject *gtrid = NULL; + PyObject *bqual = NULL; + xidObject *rv = NULL; + + /* check if the string is a possible XA triple with a regexp */ + if (!(regex = _xid_get_parse_regex())) { goto exit; } + if (!(m = PyObject_CallMethod(regex, "match", "O", str))) { goto exit; } + if (m == Py_None) { + PyErr_SetString(PyExc_ValueError, "bad xid format"); + goto exit; + } + + /* Extract the components from the regexp */ + if (!(group = PyObject_GetAttrString(m, "group"))) { goto exit; } + if (!(item = PyObject_CallFunction(group, "i", 1))) { goto exit; } + if (!(format_id = PyObject_CallFunctionObjArgs( + (PyObject *)&PyInt_Type, item, NULL))) { + goto exit; + } + if (!(egtrid = PyObject_CallFunction(group, "i", 2))) { goto exit; } + if (!(gtrid = _xid_decode64(egtrid))) { goto exit; } + + if (!(ebqual = PyObject_CallFunction(group, "i", 3))) { goto exit; } + if (!(bqual = _xid_decode64(ebqual))) { goto exit; } + + /* Try to build the xid with the parsed material */ + rv = (xidObject *)PyObject_CallFunctionObjArgs((PyObject *)&xidType, + format_id, gtrid, bqual, NULL); + +exit: + Py_XDECREF(bqual); + Py_XDECREF(ebqual); + Py_XDECREF(gtrid); + Py_XDECREF(egtrid); + Py_XDECREF(format_id); + Py_XDECREF(item); + Py_XDECREF(group); + Py_XDECREF(m); + + return rv; +} + +/* Return a new Xid object representing a transaction ID not conform to + * the XA specifications. */ + +static xidObject * +_xid_unparsed_from_string(PyObject *str) { + xidObject *xid = NULL; + xidObject *rv = NULL; + + /* fake args to work around the checks performed by the xid init */ + if (!(xid = (xidObject *)PyObject_CallFunction((PyObject *)&xidType, + "iss", 0, "", ""))) { + goto exit; + } + + /* set xid.gtrid = str */ + Py_CLEAR(xid->gtrid); + Py_INCREF(str); + xid->gtrid = str; + + /* set xid.format_id = None */ + Py_CLEAR(xid->format_id); + Py_INCREF(Py_None); + xid->format_id = Py_None; + + /* set xid.bqual = None */ + Py_CLEAR(xid->bqual); + Py_INCREF(Py_None); + xid->bqual = Py_None; + + /* return the finished object */ + rv = xid; + xid = NULL; + +exit: + Py_XDECREF(xid); + + return rv; +} + +/* Build a Xid from a string representation. + * + * If the xid is in the format generated by Psycopg, unpack the tuple into + * the struct members. Otherwise generate an "unparsed" xid. + */ +xidObject * +xid_from_string(PyObject *str) { + xidObject *rv; + + if (!(Bytes_Check(str) || PyUnicode_Check(str))) { + PyErr_SetString(PyExc_TypeError, "not a valid transaction id"); + return NULL; + } + + /* Try to parse an XA triple from the string. This may fail for several + * reasons, such as the rules stated in Xid.__init__. */ + rv = _xid_parse_string(str); + if (!rv) { + /* If parsing failed, treat the string as an unparsed id */ + PyErr_Clear(); + rv = _xid_unparsed_from_string(str); + } + + return rv; +} + + +/* conn_tpc_recover -- return a list of pending TPC Xid */ + +PyObject * +xid_recover(PyObject *conn) +{ + PyObject *rv = NULL; + PyObject *curs = NULL; + PyObject *xids = NULL; + xidObject *xid = NULL; + PyObject *recs = NULL; + PyObject *rec = NULL; + PyObject *item = NULL; + PyObject *tmp; + Py_ssize_t len, i; + + /* curs = conn.cursor() + * (sort of. Use the real cursor in case the connection returns + * something non-dbapi -- see ticket #114) */ + if (!(curs = PyObject_CallFunctionObjArgs( + (PyObject *)&cursorType, conn, NULL))) { goto exit; } + + /* curs.execute(...) */ + if (!(tmp = PyObject_CallMethod(curs, "execute", "s", + "SELECT gid, prepared, owner, database FROM pg_prepared_xacts"))) + { + goto exit; + } + Py_DECREF(tmp); + + /* recs = curs.fetchall() */ + if (!(recs = PyObject_CallMethod(curs, "fetchall", NULL))) { goto exit; } + + /* curs.close() */ + if (!(tmp = PyObject_CallMethod(curs, "close", NULL))) { goto exit; } + Py_DECREF(tmp); + + /* Build the list with return values. */ + if (0 > (len = PySequence_Size(recs))) { goto exit; } + if (!(xids = PyList_New(len))) { goto exit; } + + /* populate the xids list */ + for (i = 0; i < len; ++i) { + if (!(rec = PySequence_GetItem(recs, i))) { goto exit; } + + /* Get the xid with the XA triple set */ + if (!(item = PySequence_GetItem(rec, 0))) { goto exit; } + if (!(xid = xid_from_string(item))) { goto exit; } + Py_CLEAR(item); + + /* set xid.prepared */ + Py_CLEAR(xid->prepared); + if (!(xid->prepared = PySequence_GetItem(rec, 1))) { goto exit; } + + /* set xid.owner */ + Py_CLEAR(xid->owner); + if (!(xid->owner = PySequence_GetItem(rec, 2))) { goto exit; } + + /* set xid.database */ + Py_CLEAR(xid->database); + if (!(xid->database = PySequence_GetItem(rec, 3))) { goto exit; } + + /* xid finished: add it to the returned list */ + PyList_SET_ITEM(xids, i, (PyObject *)xid); + xid = NULL; /* ref stolen */ + + Py_CLEAR(rec); + } + + /* set the return value. */ + rv = xids; + xids = NULL; + +exit: + Py_XDECREF(xids); + Py_XDECREF(xid); + Py_XDECREF(curs); + Py_XDECREF(recs); + Py_XDECREF(rec); + Py_XDECREF(item); + + return rv; +} diff --git a/psycopg2.cproj b/psycopg2.cproj new file mode 100644 index 0000000000000000000000000000000000000000..4c2327850200cd9e3ea4ac7b07a9d0e230a5fac0 --- /dev/null +++ b/psycopg2.cproj @@ -0,0 +1,212 @@ + + + + Debug + AnyCPU + 9.0.21022 + 2.0 + {CFD80D18-3EE5-49ED-992A-E6D433BC7641} + + + + C + Bin + + + true + . + DEBUG MONODEVELOP + + + + Bin + + + + + + + psycopg2 + + + bin\Release + MONODEVELOP + + + + 3 + psycopg2 + Bin + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/scripts/build/appveyor.cache_rebuild b/scripts/build/appveyor.cache_rebuild new file mode 100644 index 0000000000000000000000000000000000000000..d4d2ed2a04644c8b13e442837786b95be84bdf5a --- /dev/null +++ b/scripts/build/appveyor.cache_rebuild @@ -0,0 +1,22 @@ +This file is a simple placeholder for forcing the appveyor build cache +to invalidate itself since appveyor.yml changes more frequently then +the cache needs updating. Note, the versions list here can be +different than what is indicated in appveyor.yml. + +To invalidate the cache, update this file and check it into git. + + +Currently used modules built in the cache: + +OpenSSL + Version: 1.1.1k + +PostgreSQL + Version: 13.3 + + +NOTE: to zap the cache manually you can also use: + + curl -X DELETE -H "Authorization: Bearer $APPVEYOR_TOKEN" -H "Content-Type: application/json" https://ci.appveyor.com/api/projects/psycopg/psycopg2/buildcache + +with the token from https://ci.appveyor.com/api-token diff --git a/scripts/build/appveyor.py b/scripts/build/appveyor.py new file mode 100755 index 0000000000000000000000000000000000000000..39b3ebe2ee3bf2191c9774f9104a32a94b883289 --- /dev/null +++ b/scripts/build/appveyor.py @@ -0,0 +1,848 @@ +#!/usr/bin/env python3 +""" +Build steps for the windows binary packages. + +The script is designed to be called by appveyor. Subcommands map the steps in +'appveyor.yml'. + +""" + +import re +import os +import sys +import json +import shutil +import logging +import subprocess as sp +from glob import glob +from pathlib import Path +from zipfile import ZipFile +from argparse import ArgumentParser +from tempfile import NamedTemporaryFile +from urllib.request import urlopen + +opt = None +STEP_PREFIX = 'step_' + +logger = logging.getLogger() +logging.basicConfig( + level=logging.INFO, format='%(asctime)s %(levelname)s %(message)s' +) + + +def main(): + global opt + opt = parse_cmdline() + logger.setLevel(opt.loglevel) + + cmd = globals()[STEP_PREFIX + opt.step] + cmd() + + +def setup_build_env(): + """ + Set the environment variables according to the build environment + """ + setenv('VS_VER', opt.vs_ver) + + path = [ + str(opt.py_dir), + str(opt.py_dir / 'Scripts'), + r'C:\Strawberry\Perl\bin', + r'C:\Program Files\Git\mingw64\bin', + str(opt.ssl_build_dir / 'bin'), + os.environ['PATH'], + ] + setenv('PATH', os.pathsep.join(path)) + + logger.info("Configuring compiler") + bat_call([opt.vc_dir / "vcvarsall.bat", 'x86' if opt.arch_32 else 'amd64']) + + +def python_info(): + logger.info("Python Information") + run_python(['--version'], stderr=sp.STDOUT) + run_python( + ['-c', "import sys; print('64bit: %s' % (sys.maxsize > 2**32))"] + ) + + +def step_install(): + python_info() + configure_sdk() + configure_postgres() + + if opt.is_wheel: + install_wheel_support() + + +def install_wheel_support(): + """ + Install an up-to-date pip wheel package to build wheels. + """ + run_python("-m pip install --upgrade pip".split()) + run_python("-m pip install wheel".split()) + + +def configure_sdk(): + # The program rc.exe on 64bit with some versions look in the wrong path + # location when building postgresql. This cheats by copying the x64 bit + # files to that location. + if opt.arch_64: + for fn in glob( + r'C:\Program Files\Microsoft SDKs\Windows\v7.0\Bin\x64\rc*' + ): + copy_file( + fn, r"C:\Program Files (x86)\Microsoft SDKs\Windows\v7.0A\Bin" + ) + + +def configure_postgres(): + """ + Set up PostgreSQL config before the service starts. + """ + logger.info("Configuring Postgres") + with (opt.pg_data_dir / 'postgresql.conf').open('a') as f: + # allow > 1 prepared transactions for test cases + print("max_prepared_transactions = 10", file=f) + print("ssl = on", file=f) + + # Create openssl certificate to allow ssl connection + cwd = os.getcwd() + os.chdir(opt.pg_data_dir) + run_openssl( + 'req -new -x509 -days 365 -nodes -text ' + '-out server.crt -keyout server.key -subj /CN=initd.org'.split() + ) + run_openssl( + 'req -new -nodes -text -out root.csr -keyout root.key ' + '-subj /CN=initd.org'.split() + ) + + run_openssl( + 'x509 -req -in root.csr -text -days 3650 -extensions v3_ca ' + '-signkey root.key -out root.crt'.split() + ) + + run_openssl( + 'req -new -nodes -text -out server.csr -keyout server.key ' + '-subj /CN=initd.org'.split() + ) + + run_openssl( + 'x509 -req -in server.csr -text -days 365 -CA root.crt ' + '-CAkey root.key -CAcreateserial -out server.crt'.split() + ) + + os.chdir(cwd) + + +def run_openssl(args): + """Run the appveyor-installed openssl with some args.""" + # https://www.appveyor.com/docs/windows-images-software/ + openssl = Path(r"C:\OpenSSL-v111-Win64") / 'bin' / 'openssl' + return run_command([openssl] + args) + + +def step_build_script(): + setup_build_env() + build_openssl() + build_libpq() + build_psycopg() + + if opt.is_wheel: + build_binary_packages() + + +def build_openssl(): + top = opt.ssl_build_dir + if (top / 'lib' / 'libssl.lib').exists(): + return + + logger.info("Building OpenSSL") + + # Setup directories for building OpenSSL libraries + ensure_dir(top / 'include' / 'openssl') + ensure_dir(top / 'lib') + + # Setup OpenSSL Environment Variables based on processor architecture + if opt.arch_32: + target = 'VC-WIN32' + setenv('VCVARS_PLATFORM', 'x86') + else: + target = 'VC-WIN64A' + setenv('VCVARS_PLATFORM', 'amd64') + setenv('CPU', 'AMD64') + + ver = os.environ['OPENSSL_VERSION'] + + # Download OpenSSL source + zipname = f'OpenSSL_{ver}.zip' + zipfile = opt.cache_dir / zipname + if not zipfile.exists(): + download( + f"https://github.com/openssl/openssl/archive/{zipname}", zipfile + ) + + with ZipFile(zipfile) as z: + z.extractall(path=opt.build_dir) + + sslbuild = opt.build_dir / f"openssl-OpenSSL_{ver}" + os.chdir(sslbuild) + run_command( + ['perl', 'Configure', target, 'no-asm'] + + ['no-shared', 'no-zlib', f'--prefix={top}', f'--openssldir={top}'] + ) + + run_command("nmake build_libs install_sw".split()) + + assert (top / 'lib' / 'libssl.lib').exists() + + os.chdir(opt.clone_dir) + shutil.rmtree(sslbuild) + + +def build_libpq(): + top = opt.pg_build_dir + if (top / 'lib' / 'libpq.lib').exists(): + return + + logger.info("Building libpq") + + # Setup directories for building PostgreSQL librarires + ensure_dir(top / 'include') + ensure_dir(top / 'lib') + ensure_dir(top / 'bin') + + ver = os.environ['POSTGRES_VERSION'] + + # Download PostgreSQL source + zipname = f'postgres-REL_{ver}.zip' + zipfile = opt.cache_dir / zipname + if not zipfile.exists(): + download( + f"https://github.com/postgres/postgres/archive/REL_{ver}.zip", + zipfile, + ) + + with ZipFile(zipfile) as z: + z.extractall(path=opt.build_dir) + + pgbuild = opt.build_dir / f"postgres-REL_{ver}" + os.chdir(pgbuild) + + # Setup build config file (config.pl) + os.chdir("src/tools/msvc") + with open("config.pl", 'w') as f: + print( + """\ +$config->{ldap} = 0; +$config->{openssl} = "%s"; + +1; +""" + % str(opt.ssl_build_dir).replace('\\', '\\\\'), + file=f, + ) + + # Hack the Mkvcbuild.pm file so we build the lib version of libpq + file_replace('Mkvcbuild.pm', "'libpq', 'dll'", "'libpq', 'lib'") + + # Build libpgport, libpgcommon, libpq + run_command([which("build"), "libpgport"]) + run_command([which("build"), "libpgcommon"]) + run_command([which("build"), "libpq"]) + + # Install includes + with (pgbuild / "src/backend/parser/gram.h").open("w") as f: + print("", file=f) + + # Copy over built libraries + file_replace("Install.pm", "qw(Install)", "qw(Install CopyIncludeFiles)") + run_command( + ["perl", "-MInstall=CopyIncludeFiles", "-e"] + + [f"chdir('../../..'); CopyIncludeFiles('{top}')"] + ) + + for lib in ('libpgport', 'libpgcommon', 'libpq'): + copy_file(pgbuild / f'Release/{lib}/{lib}.lib', top / 'lib') + + # Prepare local include directory for building from + for dir in ('win32', 'win32_msvc'): + merge_dir(pgbuild / f"src/include/port/{dir}", pgbuild / "src/include") + + # Build pg_config in place + os.chdir(pgbuild / 'src/bin/pg_config') + run_command( + ['cl', 'pg_config.c', '/MT', '/nologo', fr'/I{pgbuild}\src\include'] + + ['/link', fr'/LIBPATH:{top}\lib'] + + ['libpgcommon.lib', 'libpgport.lib', 'advapi32.lib'] + + ['/NODEFAULTLIB:libcmt.lib'] + + [fr'/OUT:{top}\bin\pg_config.exe'] + ) + + assert (top / 'lib' / 'libpq.lib').exists() + assert (top / 'bin' / 'pg_config.exe').exists() + + os.chdir(opt.clone_dir) + shutil.rmtree(pgbuild) + + +def build_psycopg(): + os.chdir(opt.package_dir) + patch_package_name() + add_pg_config_path() + run_python( + ["setup.py", "build_ext", "--have-ssl"] + + ["-l", "libpgcommon libpgport"] + + ["-L", opt.ssl_build_dir / 'lib'] + + ['-I', opt.ssl_build_dir / 'include'] + ) + run_python(["setup.py", "build_py"]) + + +def patch_package_name(): + """Change the psycopg2 package name in the setup.py if required.""" + if opt.package_name == 'psycopg2': + return + + logger.info("changing package name to %s", opt.package_name) + + with (opt.package_dir / 'setup.py').open() as f: + data = f.read() + + # Replace the name of the package with what desired + rex = re.compile(r"""name=["']psycopg2["']""") + assert len(rex.findall(data)) == 1, rex.findall(data) + data = rex.sub(f'name="{opt.package_name}"', data) + + with (opt.package_dir / 'setup.py').open('w') as f: + f.write(data) + + +def build_binary_packages(): + """Create wheel/exe binary packages.""" + os.chdir(opt.package_dir) + + add_pg_config_path() + + # Build .exe packages for whom still use them + if opt.package_name == 'psycopg2': + run_python(['setup.py', 'bdist_wininst', "-d", opt.dist_dir]) + + # Build .whl packages + run_python(['setup.py', 'bdist_wheel', "-d", opt.dist_dir]) + + +def step_after_build(): + if not opt.is_wheel: + install_built_package() + else: + install_binary_package() + + +def install_built_package(): + """Install the package just built by setup build.""" + os.chdir(opt.package_dir) + + # Install the psycopg just built + add_pg_config_path() + run_python(["setup.py", "install"]) + shutil.rmtree("psycopg2.egg-info") + + +def install_binary_package(): + """Install the package from a packaged wheel.""" + run_python( + ['-m', 'pip', 'install', '--no-index', '-f', opt.dist_dir] + + [opt.package_name] + ) + + +def add_pg_config_path(): + """Allow finding in the path the pg_config just built.""" + pg_path = str(opt.pg_build_dir / 'bin') + if pg_path not in os.environ['PATH'].split(os.pathsep): + setenv('PATH', os.pathsep.join([pg_path, os.environ['PATH']])) + + +def step_before_test(): + print_psycopg2_version() + + # Create and setup PostgreSQL database for the tests + run_command([opt.pg_bin_dir / 'createdb', os.environ['PSYCOPG2_TESTDB']]) + run_command( + [opt.pg_bin_dir / 'psql', '-d', os.environ['PSYCOPG2_TESTDB']] + + ['-c', "CREATE EXTENSION hstore"] + ) + + +def print_psycopg2_version(): + """Print psycopg2 and libpq versions installed.""" + for expr in ( + 'psycopg2.__version__', + 'psycopg2.__libpq_version__', + 'psycopg2.extensions.libpq_version()', + ): + out = out_python(['-c', f"import psycopg2; print({expr})"]) + logger.info("built %s: %s", expr, out.decode('ascii')) + + +def step_test_script(): + check_libpq_version() + run_test_suite() + + +def check_libpq_version(): + """ + Fail if the package installed is not using the expected libpq version. + """ + want_ver = tuple(map(int, os.environ['POSTGRES_VERSION'].split('_'))) + want_ver = "%d%04d" % want_ver + got_ver = ( + out_python( + ['-c'] + + ["import psycopg2; print(psycopg2.extensions.libpq_version())"] + ) + .decode('ascii') + .rstrip() + ) + assert want_ver == got_ver, f"libpq version mismatch: {want_ver!r} != {got_ver!r}" + + +def run_test_suite(): + # Remove this var, which would make badly a configured OpenSSL 1.1 work + os.environ.pop('OPENSSL_CONF', None) + + # Run the unit test + args = [ + '-c', + "import tests; tests.unittest.main(defaultTest='tests.test_suite')", + ] + + if opt.is_wheel: + os.environ['PSYCOPG2_TEST_FAST'] = '1' + else: + args.append('--verbose') + + os.chdir(opt.package_dir) + run_python(args) + + +def step_on_success(): + print_sha1_hashes() + if setup_ssh(): + upload_packages() + + +def print_sha1_hashes(): + """ + Print the packages sha1 so their integrity can be checked upon signing. + """ + logger.info("artifacts SHA1 hashes:") + + os.chdir(opt.package_dir / 'dist') + run_command([which('sha1sum'), '-b', 'psycopg2-*/*']) + + +def setup_ssh(): + """ + Configure ssh to upload built packages where they can be retrieved. + + Return False if can't configure and upload shoould be skipped. + """ + # If we are not on the psycopg AppVeyor account, the environment variable + # REMOTE_KEY will not be decrypted. In that case skip uploading. + if os.environ['APPVEYOR_ACCOUNT_NAME'] != 'psycopg': + logger.warn("skipping artifact upload: you are not psycopg") + return False + + pkey = os.environ.get('REMOTE_KEY', None) + if not pkey: + logger.warn("skipping artifact upload: no remote key") + return False + + # Write SSH Private Key file from environment variable + pkey = pkey.replace(' ', '\n') + with (opt.clone_dir / 'data/id_rsa-psycopg-upload').open('w') as f: + f.write( + f"""\ +-----BEGIN RSA PRIVATE KEY----- +{pkey} +-----END RSA PRIVATE KEY----- +""" + ) + + # Make a directory to please MinGW's version of ssh + ensure_dir(r"C:\MinGW\msys\1.0\home\appveyor\.ssh") + + return True + + +def upload_packages(): + # Upload built artifacts + logger.info("uploading artifacts") + + os.chdir(opt.clone_dir) + run_command( + [r"C:\MinGW\msys\1.0\bin\rsync", "-avr"] + + ["-e", r"C:\MinGW\msys\1.0\bin\ssh -F data/ssh_config"] + + ["psycopg2/dist/", "upload:"] + ) + + +def download(url, fn): + """Download a file locally""" + logger.info("downloading %s", url) + with open(fn, 'wb') as fo, urlopen(url) as fi: + while 1: + data = fi.read(8192) + if not data: + break + fo.write(data) + + logger.info("file downloaded: %s", fn) + + +def file_replace(fn, s1, s2): + """ + Replace all the occurrences of the string s1 into s2 in the file fn. + """ + assert os.path.exists(fn) + with open(fn, 'r+') as f: + data = f.read() + f.seek(0) + f.write(data.replace(s1, s2)) + f.truncate() + + +def merge_dir(src, tgt): + """ + Merge the content of the directory src into the directory tgt + + Reproduce the semantic of "XCOPY /Y /S src/* tgt" + """ + src = str(src) + for dp, _dns, fns in os.walk(src): + logger.debug("dirpath %s", dp) + if not fns: + continue + assert dp.startswith(src) + subdir = dp[len(src) :].lstrip(os.sep) + tgtdir = ensure_dir(os.path.join(tgt, subdir)) + for fn in fns: + copy_file(os.path.join(dp, fn), tgtdir) + + +def bat_call(cmdline): + """ + Simulate 'CALL' from a batch file + + Execute CALL *cmdline* and export the changed environment to the current + environment. + + nana-nana-nana-nana... + + """ + if not isinstance(cmdline, str): + cmdline = map(str, cmdline) + cmdline = ' '.join(c if ' ' not in c else '"%s"' % c for c in cmdline) + + data = f"""\ +CALL {cmdline} +{opt.py_exe} -c "import os, sys, json; \ +json.dump(dict(os.environ), sys.stdout, indent=2)" +""" + + logger.debug("preparing file to batcall:\n\n%s", data) + + with NamedTemporaryFile(suffix='.bat') as tmp: + fn = tmp.name + + with open(fn, "w") as f: + f.write(data) + + try: + out = out_command(fn) + # be vewwy vewwy caweful to print the env var as it might contain + # secwet things like your pwecious pwivate key. + # logger.debug("output of command:\n\n%s", out.decode('utf8', 'replace')) + + # The output has some useless crap on stdout, because sure, and json + # indented so the last { on column 1 is where we have to start parsing + + m = list(re.finditer(b'^{', out, re.MULTILINE))[-1] + out = out[m.start() :] + env = json.loads(out) + for k, v in env.items(): + if os.environ.get(k) != v: + setenv(k, v) + finally: + os.remove(fn) + + +def ensure_dir(dir): + if not isinstance(dir, Path): + dir = Path(dir) + + if not dir.is_dir(): + logger.info("creating directory %s", dir) + dir.mkdir(parents=True) + + return dir + + +def run_command(cmdline, **kwargs): + """Run a command, raise on error.""" + if not isinstance(cmdline, str): + cmdline = list(map(str, cmdline)) + logger.info("running command: %s", cmdline) + sp.check_call(cmdline, **kwargs) + + +def out_command(cmdline, **kwargs): + """Run a command, return its output, raise on error.""" + if not isinstance(cmdline, str): + cmdline = list(map(str, cmdline)) + logger.info("running command: %s", cmdline) + data = sp.check_output(cmdline, **kwargs) + return data + + +def run_python(args, **kwargs): + """ + Run a script in the target Python. + """ + return run_command([opt.py_exe] + args, **kwargs) + + +def out_python(args, **kwargs): + """ + Return the output of a script run in the target Python. + """ + return out_command([opt.py_exe] + args, **kwargs) + + +def copy_file(src, dst): + logger.info("copying file %s -> %s", src, dst) + shutil.copy(src, dst) + + +def setenv(k, v): + logger.debug("setting %s=%s", k, v) + os.environ[k] = v + + +def which(name): + """ + Return the full path of a command found on the path + """ + base, ext = os.path.splitext(name) + if not ext: + exts = ('.com', '.exe', '.bat', '.cmd') + else: + exts = (ext,) + + for dir in ['.'] + os.environ['PATH'].split(os.pathsep): + for ext in exts: + fn = os.path.join(dir, base + ext) + if os.path.isfile(fn): + return fn + + raise Exception(f"couldn't find program on path: {name}") + + +class Options: + """ + An object exposing the script configuration from env vars and command line. + """ + + @property + def py_ver(self): + """The Python version to build as 2 digits string.""" + rv = os.environ['PY_VER'] + assert rv in ('36', '37', '38', '39'), rv + return rv + + @property + def py_arch(self): + """The Python architecture to build, 32 or 64.""" + rv = os.environ['PY_ARCH'] + assert rv in ('32', '64'), rv + return int(rv) + + @property + def arch_32(self): + """True if the Python architecture to build is 32 bits.""" + return self.py_arch == 32 + + @property + def arch_64(self): + """True if the Python architecture to build is 64 bits.""" + return self.py_arch == 64 + + @property + def package_name(self): + return os.environ.get('CONFIGURATION', 'psycopg2') + + @property + def package_version(self): + """The psycopg2 version number to build.""" + with (self.package_dir / 'setup.py').open() as f: + data = f.read() + + m = re.search( + r"""^PSYCOPG_VERSION\s*=\s*['"](.*)['"]""", data, re.MULTILINE + ) + return m.group(1) + + @property + def is_wheel(self): + """Are we building the wheel packages or just the extension?""" + workflow = os.environ["WORKFLOW"] + return workflow == "packages" + + @property + def py_dir(self): + """ + The path to the target python binary to execute. + """ + dirname = ''.join( + [r"C:\Python", self.py_ver, '-x64' if self.arch_64 else ''] + ) + return Path(dirname) + + @property + def py_exe(self): + """ + The full path of the target python executable. + """ + return self.py_dir / 'python.exe' + + @property + def vc_dir(self): + """ + The path of the Visual C compiler. + """ + if self.vs_ver == '16.0': + path = Path( + r"C:\Program Files (x86)\Microsoft Visual Studio\2019" + r"\Community\VC\Auxiliary\Build" + ) + else: + path = Path( + r"C:\Program Files (x86)\Microsoft Visual Studio %s\VC" + % self.vs_ver + ) + return path + + @property + def vs_ver(self): + # https://wiki.python.org/moin/WindowsCompilers + # https://www.appveyor.com/docs/windows-images-software/#python + # Py 3.6--3.8 = VS Ver. 14.0 (VS 2015) + # Py 3.9 = VS Ver. 16.0 (VS 2019) + vsvers = { + '36': '14.0', + '37': '14.0', + '38': '14.0', + '39': '16.0', + } + return vsvers[self.py_ver] + + @property + def clone_dir(self): + """The directory where the repository is cloned.""" + return Path(r"C:\Project") + + @property + def appveyor_pg_dir(self): + """The directory of the postgres service made available by Appveyor.""" + return Path(os.environ['POSTGRES_DIR']) + + @property + def pg_data_dir(self): + """The data dir of the appveyor postgres service.""" + return self.appveyor_pg_dir / 'data' + + @property + def pg_bin_dir(self): + """The bin dir of the appveyor postgres service.""" + return self.appveyor_pg_dir / 'bin' + + @property + def pg_build_dir(self): + """The directory where to build the postgres libraries for psycopg.""" + return self.cache_arch_dir / 'postgresql' + + @property + def ssl_build_dir(self): + """The directory where to build the openssl libraries for psycopg.""" + return self.cache_arch_dir / 'openssl' + + @property + def cache_arch_dir(self): + rv = self.cache_dir / str(self.py_arch) / self.vs_ver + return ensure_dir(rv) + + @property + def cache_dir(self): + return Path(r"C:\Others") + + @property + def build_dir(self): + rv = self.cache_arch_dir / 'Builds' + return ensure_dir(rv) + + @property + def package_dir(self): + return self.clone_dir + + @property + def dist_dir(self): + """The directory where to build packages to distribute.""" + return ( + self.package_dir / 'dist' / (f'psycopg2-{self.package_version}') + ) + + +def parse_cmdline(): + parser = ArgumentParser(description=__doc__) + + g = parser.add_mutually_exclusive_group() + g.add_argument( + '-q', + '--quiet', + help="Talk less", + dest='loglevel', + action='store_const', + const=logging.WARN, + default=logging.INFO, + ) + g.add_argument( + '-v', + '--verbose', + help="Talk more", + dest='loglevel', + action='store_const', + const=logging.DEBUG, + default=logging.INFO, + ) + + steps = [ + n[len(STEP_PREFIX) :] + for n in globals() + if n.startswith(STEP_PREFIX) and callable(globals()[n]) + ] + + parser.add_argument( + 'step', choices=steps, help="the appveyor step to execute" + ) + + opt = parser.parse_args(namespace=Options()) + + return opt + + +if __name__ == '__main__': + sys.exit(main()) diff --git a/scripts/build/build_libpq.sh b/scripts/build/build_libpq.sh new file mode 100755 index 0000000000000000000000000000000000000000..32d2222d146a8058d11ff58d2fcf02aa6ba0ae39 --- /dev/null +++ b/scripts/build/build_libpq.sh @@ -0,0 +1,130 @@ +#!/bin/bash + +# Build a modern version of libpq and depending libs from source on Centos 5 + +set -euo pipefail +set -x + +openssl_version="1.1.1k" +ldap_version="2.4.59" +sasl_version="2.1.27" +postgres_version="13.3" + +yum install -y zlib-devel krb5-devel pam-devel + + +# Build openssl if needed +openssl_tag="OpenSSL_${openssl_version//./_}" +openssl_dir="openssl-${openssl_tag}" +if [ ! -d "${openssl_dir}" ]; then curl -sL \ + https://github.com/openssl/openssl/archive/${openssl_tag}.tar.gz \ + | tar xzf - + + cd "${openssl_dir}" + + ./config --prefix=/usr/local/ --openssldir=/usr/local/ \ + zlib -fPIC shared + make depend + make +else + cd "${openssl_dir}" +fi + +# Install openssl +make install_sw +cd .. + + +# Build libsasl2 if needed +# The system package (cyrus-sasl-devel) causes an amazing error on i686: +# "unsupported version 0 of Verneed record" +# https://github.com/pypa/manylinux/issues/376 +sasl_tag="cyrus-sasl-${sasl_version}" +sasl_dir="cyrus-sasl-${sasl_tag}" +if [ ! -d "${sasl_dir}" ]; then + curl -sL \ + https://github.com/cyrusimap/cyrus-sasl/archive/${sasl_tag}.tar.gz \ + | tar xzf - + + cd "${sasl_dir}" + + autoreconf -i + ./configure + make +else + cd "${sasl_dir}" +fi + +# Install libsasl2 +# requires missing nroff to build +touch saslauthd/saslauthd.8 +make install +cd .. + + +# Build openldap if needed +ldap_tag="${ldap_version}" +ldap_dir="openldap-${ldap_tag}" +if [ ! -d "${ldap_dir}" ]; then + curl -sL \ + https://www.openldap.org/software/download/OpenLDAP/openldap-release/openldap-${ldap_tag}.tgz \ + | tar xzf - + + cd "${ldap_dir}" + + ./configure --enable-backends=no --enable-null + make depend + make -C libraries/liblutil/ + make -C libraries/liblber/ + make -C libraries/libldap/ + make -C libraries/libldap_r/ +else + cd "${ldap_dir}" +fi + +# Install openldap +make -C libraries/liblber/ install +make -C libraries/libldap/ install +make -C libraries/libldap_r/ install +make -C include/ install +chmod +x /usr/local/lib/{libldap,liblber}*.so* +cd .. + + +# Build libpq if needed +postgres_tag="REL_${postgres_version//./_}" +postgres_dir="postgres-${postgres_tag}" +if [ ! -d "${postgres_dir}" ]; then + curl -sL \ + https://github.com/postgres/postgres/archive/${postgres_tag}.tar.gz \ + | tar xzf - + + cd "${postgres_dir}" + + # Match the default unix socket dir default with what defined on Ubuntu and + # Red Hat, which seems the most common location + sed -i 's|#define DEFAULT_PGSOCKET_DIR .*'\ +'|#define DEFAULT_PGSOCKET_DIR "/var/run/postgresql"|' \ + src/include/pg_config_manual.h + + # Without this, libpq ./configure fails on i686 + if [[ "$(uname -m)" == "i686" ]]; then + export LD_LIBRARY_PATH=/usr/local/lib + fi + + ./configure --prefix=/usr/local --without-readline \ + --with-gssapi --with-openssl --with-pam --with-ldap + make -C src/interfaces/libpq + make -C src/bin/pg_config + make -C src/include +else + cd "${postgres_dir}" +fi + +# Install libpq +make -C src/interfaces/libpq install +make -C src/bin/pg_config install +make -C src/include install +cd .. + +find /usr/local/ -name \*.so.\* -type f -exec strip --strip-unneeded {} \; diff --git a/scripts/build/build_macos.sh b/scripts/build/build_macos.sh new file mode 100755 index 0000000000000000000000000000000000000000..0fe4b56ce3a4f35bdc88b74e4e8946cd87eb3af4 --- /dev/null +++ b/scripts/build/build_macos.sh @@ -0,0 +1,79 @@ +#!/bin/bash + +# Create macOS wheels for psycopg2 +# +# Following instructions from https://github.com/MacPython/wiki/wiki/Spinning-wheels +# Cargoculting pieces of implementation from https://github.com/matthew-brett/multibuild + +set -euo pipefail +set -x + +dir="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )" +prjdir="$( cd "${dir}/../.." && pwd )" + +brew install gnu-sed postgresql@13 + +# Start the database for testing +brew services start postgresql + +for i in $(seq 10 -1 0); do + eval pg_isready && break + if [ $i == 0 ]; then + echo "PostgreSQL service not ready, giving up" + exit 1 + fi + echo "PostgreSQL service not ready, waiting a bit, attempts left: $i" + sleep 5 +done + +# Find psycopg version +version=$(grep -e ^PSYCOPG_VERSION "${prjdir}/setup.py" | gsed "s/.*'\(.*\)'/\1/") +# A gratuitous comment to fix broken vim syntax file: '") +distdir="${prjdir}/dist/psycopg2-$version" +mkdir -p "$distdir" + +# Install required python packages +pip install -U pip wheel delocate + +# Replace the package name +if [[ "${PACKAGE_NAME:-}" ]]; then + gsed -i "s/^setup(name=\"psycopg2\"/setup(name=\"${PACKAGE_NAME}\"/" \ + "${prjdir}/setup.py" +fi + +# Build the wheels +wheeldir="${prjdir}/wheels" +pip wheel -w ${wheeldir} . +delocate-listdeps ${wheeldir}/*.whl + +# Check where is the libpq. I'm gonna kill it for testing +if [[ -z "${LIBPQ:-}" ]]; then + export LIBPQ=$(delocate-listdeps ${wheeldir}/*.whl | grep libpq) +fi + +delocate-wheel ${wheeldir}/*.whl +# https://github.com/MacPython/wiki/wiki/Spinning-wheels#question-will-pip-give-me-a-broken-wheel +delocate-addplat --rm-orig -x 10_9 -x 10_10 ${wheeldir}/*.whl +cp ${wheeldir}/*.whl ${distdir} + +# kill the libpq to make sure tests don't depend on it +mv "$LIBPQ" "${LIBPQ}-bye" + +# Install and test the built wheel +pip install ${PACKAGE_NAME:-psycopg2} --no-index -f "$distdir" + +# Print psycopg and libpq versions +python -c "import psycopg2; print(psycopg2.__version__)" +python -c "import psycopg2; print(psycopg2.__libpq_version__)" +python -c "import psycopg2; print(psycopg2.extensions.libpq_version())" + +# fail if we are not using the expected libpq library +# Disabled as we just use what's available on the system on macOS +# if [[ "${WANT_LIBPQ:-}" ]]; then +# python -c "import psycopg2, sys; sys.exit(${WANT_LIBPQ} != psycopg2.extensions.libpq_version())" +# fi + +python -c "import tests; tests.unittest.main(defaultTest='tests.test_suite')" + +# just because I'm a boy scout +mv "${LIBPQ}-bye" "$LIBPQ" diff --git a/scripts/build/build_manylinux2014.sh b/scripts/build/build_manylinux2014.sh new file mode 100755 index 0000000000000000000000000000000000000000..0e87bd543a71741a97438aa4776bc374fdca81e3 --- /dev/null +++ b/scripts/build/build_manylinux2014.sh @@ -0,0 +1,75 @@ +#!/bin/bash + +# Create manylinux2014 wheels for psycopg2 +# +# manylinux2014 is built on CentOS 7, which packages an old version of the +# libssl, (1.0, which has concurrency problems with the Python libssl). So we +# need to build these libraries from source. +# +# Look at the .github/workflows/packages.yml file for hints about how to use it. + +set -euo pipefail +set -x + +dir="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )" +prjdir="$( cd "${dir}/../.." && pwd )" + +# Build all the available versions, or just the ones specified in PYVERS +if [ ! "${PYVERS:-}" ]; then + PYVERS="$(ls /opt/python/)" +fi + +# Find psycopg version +version=$(grep -e ^PSYCOPG_VERSION "${prjdir}/setup.py" | sed "s/.*'\(.*\)'/\1/") +# A gratuitous comment to fix broken vim syntax file: '") +distdir="${prjdir}/dist/psycopg2-$version" + +# Replace the package name +if [[ "${PACKAGE_NAME:-}" ]]; then + sed -i "s/^setup(name=\"psycopg2\"/setup(name=\"${PACKAGE_NAME}\"/" \ + "${prjdir}/setup.py" +fi + +# Build depending libraries +"${dir}/build_libpq.sh" > /dev/null + +# Create the wheel packages +for pyver in $PYVERS; do + pybin="/opt/python/${pyver}/bin" + "${pybin}/pip" wheel "${prjdir}" -w "${prjdir}/dist/" +done + +# Bundle external shared libraries into the wheels +for whl in "${prjdir}"/dist/*.whl; do + auditwheel repair "$whl" -w "$distdir" +done + +# Make sure the libpq is not in the system +for f in $(find /usr/local/lib -name libpq\*) ; do + mkdir -pv "/libpqbak/$(dirname $f)" + mv -v "$f" "/libpqbak/$(dirname $f)" +done + +# Install packages and test +cd "${prjdir}" +for pyver in $PYVERS; do + pybin="/opt/python/${pyver}/bin" + "${pybin}/pip" install ${PACKAGE_NAME:-psycopg2} --no-index -f "$distdir" + + # Print psycopg and libpq versions + "${pybin}/python" -c "import psycopg2; print(psycopg2.__version__)" + "${pybin}/python" -c "import psycopg2; print(psycopg2.__libpq_version__)" + "${pybin}/python" -c "import psycopg2; print(psycopg2.extensions.libpq_version())" + + # Fail if we are not using the expected libpq library + if [[ "${WANT_LIBPQ:-}" ]]; then + "${pybin}/python" -c "import psycopg2, sys; sys.exit(${WANT_LIBPQ} != psycopg2.extensions.libpq_version())" + fi + + "${pybin}/python" -c "import tests; tests.unittest.main(defaultTest='tests.test_suite')" +done + +# Restore the libpq packages +for f in $(cd /libpqbak/ && find . -not -type d); do + mv -v "/libpqbak/$f" "/$f" +done diff --git a/scripts/build/build_manylinux_2_24.sh b/scripts/build/build_manylinux_2_24.sh new file mode 100755 index 0000000000000000000000000000000000000000..d83c84143c77e837bbb60332815c7710b6f3ab3d --- /dev/null +++ b/scripts/build/build_manylinux_2_24.sh @@ -0,0 +1,75 @@ +#!/bin/bash + +# Create manylinux_2_24 wheels for psycopg2 +# +# Look at the .github/workflows/packages.yml file for hints about how to use it. + +set -euo pipefail +set -x + +dir="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )" +prjdir="$( cd "${dir}/../.." && pwd )" + +# Build all the available versions, or just the ones specified in PYVERS +if [ ! "${PYVERS:-}" ]; then + PYVERS="$(ls /opt/python/)" +fi + +# Find psycopg version +version=$(grep -e ^PSYCOPG_VERSION "${prjdir}/setup.py" | sed "s/.*'\(.*\)'/\1/") +# A gratuitous comment to fix broken vim syntax file: '") +distdir="${prjdir}/dist/psycopg2-$version" + +# Replace the package name +if [[ "${PACKAGE_NAME:-}" ]]; then + sed -i "s/^setup(name=\"psycopg2\"/setup(name=\"${PACKAGE_NAME}\"/" \ + "${prjdir}/setup.py" +fi + +# Install prerequisite libraries +curl -s https://www.postgresql.org/media/keys/ACCC4CF8.asc | apt-key add - +echo "deb http://apt.postgresql.org/pub/repos/apt stretch-pgdg main" \ + > /etc/apt/sources.list.d/pgdg.list +apt-get -y update +apt-get install -y libpq-dev + +# Create the wheel packages +for pyver in $PYVERS; do + pybin="/opt/python/${pyver}/bin" + "${pybin}/pip" wheel "${prjdir}" -w "${prjdir}/dist/" +done + +# Bundle external shared libraries into the wheels +for whl in "${prjdir}"/dist/*.whl; do + auditwheel repair "$whl" -w "$distdir" +done + +# Make sure the libpq is not in the system +for f in $(find /usr/lib /usr/lib64 -name libpq\*) ; do + mkdir -pv "/libpqbak/$(dirname $f)" + mv -v "$f" "/libpqbak/$(dirname $f)" +done + +# Install packages and test +cd "${prjdir}" +for pyver in $PYVERS; do + pybin="/opt/python/${pyver}/bin" + "${pybin}/pip" install ${PACKAGE_NAME:-psycopg2} --no-index -f "$distdir" + + # Print psycopg and libpq versions + "${pybin}/python" -c "import psycopg2; print(psycopg2.__version__)" + "${pybin}/python" -c "import psycopg2; print(psycopg2.__libpq_version__)" + "${pybin}/python" -c "import psycopg2; print(psycopg2.extensions.libpq_version())" + + # Fail if we are not using the expected libpq library + if [[ "${WANT_LIBPQ:-}" ]]; then + "${pybin}/python" -c "import psycopg2, sys; sys.exit(${WANT_LIBPQ} != psycopg2.extensions.libpq_version())" + fi + + "${pybin}/python" -c "import tests; tests.unittest.main(defaultTest='tests.test_suite')" +done + +# Restore the libpq packages +for f in $(cd /libpqbak/ && find . -not -type d); do + mv -v "/libpqbak/$f" "/$f" +done diff --git a/scripts/build/build_sdist.sh b/scripts/build/build_sdist.sh new file mode 100755 index 0000000000000000000000000000000000000000..6408cac9ccf3c7fa5969e80b00d926a1ec910600 --- /dev/null +++ b/scripts/build/build_sdist.sh @@ -0,0 +1,26 @@ +#!/bin/bash + +set -euo pipefail +set -x + +dir="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )" +prjdir="$( cd "${dir}/../.." && pwd )" + +# Find psycopg version +version=$(grep -e ^PSYCOPG_VERSION setup.py | sed "s/.*'\(.*\)'/\1/") +# A gratuitous comment to fix broken vim syntax file: '") +distdir="${prjdir}/dist/psycopg2-$version" + +# Replace the package name +if [[ "${PACKAGE_NAME:-}" ]]; then + sed -i "s/^setup(name=\"psycopg2\"/setup(name=\"${PACKAGE_NAME}\"/" \ + "${prjdir}/setup.py" +fi + +# Build the source package +python setup.py sdist -d "$distdir" + +# install and test +pip install "${distdir}"/*.tar.gz + +python -c "import tests; tests.unittest.main(defaultTest='tests.test_suite')" diff --git a/scripts/build/download_packages_appveyor.py b/scripts/build/download_packages_appveyor.py new file mode 100755 index 0000000000000000000000000000000000000000..afdae3794d291e82642226db990d047ba9ddaa81 --- /dev/null +++ b/scripts/build/download_packages_appveyor.py @@ -0,0 +1,103 @@ +#!/usr/bin/env python +"""Download packages from github actions artifacts +""" + +import os +import re +import sys +import logging +import datetime as dt +from pathlib import Path + +import requests + +logger = logging.getLogger() +logging.basicConfig(level=logging.INFO, format="%(asctime)s %(levelname)s %(message)s") + +API_URL = "https://ci.appveyor.com/api" +REPOS = "psycopg/psycopg2" +WORKFLOW_NAME = "Build packages" + + +class ScriptError(Exception): + """Controlled exception raised by the script.""" + + +def main(): + try: + token = os.environ["APPVEYOR_TOKEN"] + except KeyError: + raise ScriptError("please set a APPVEYOR_TOKEN to download artifacts") + + s = requests.Session() + s.headers["Content-Type"] = "application/json" + s.headers["Authorization"] = f"Bearer {token}" + + logger.info("fetching last run") + resp = s.get(f"{API_URL}/projects/{REPOS}/") + resp.raise_for_status() + data = resp.json() + + updated_at = dt.datetime.fromisoformat( + re.sub(r"\.\d+", "", data["build"]["finished"]) + ) + now = dt.datetime.now(dt.timezone.utc) + age = now - updated_at + logger.info( + f"found build {data['build']['version']} updated {pretty_interval(age)} ago" + ) + if age > dt.timedelta(hours=6): + logger.warning("maybe it's a bit old?") + + jobs = data["build"]["jobs"] + for job in jobs: + if job["status"] != "success": + raise ScriptError("status for job {job['jobId']} is {job['status']}") + + logger.info(f"fetching artifacts info for {job['name']}") + resp = s.get(f"{API_URL}/buildjobs/{job['jobId']}/artifacts/") + resp.raise_for_status() + afs = resp.json() + for af in afs: + fn = af["fileName"] + if fn.startswith("dist/"): + fn = fn.split("/", 1)[1] + dest = Path("packages") / fn + logger.info(f"downloading {dest}") + resp = s.get( + f"{API_URL}/buildjobs/{job['jobId']}/artifacts/{af['fileName']}" + ) + resp.raise_for_status() + if not dest.parent.exists(): + dest.parent.mkdir() + + with dest.open("wb") as f: + f.write(resp.content) + + logger.info("now you can run: 'twine upload -s packages/*'") + + +def pretty_interval(td): + secs = td.total_seconds() + mins, secs = divmod(secs, 60) + hours, mins = divmod(mins, 60) + days, hours = divmod(hours, 24) + if days: + return f"{int(days)} days, {int(hours)} hours, {int(mins)} minutes" + elif hours: + return f"{int(hours)} hours, {int(mins)} minutes" + else: + return f"{int(mins)} minutes" + + +if __name__ == "__main__": + try: + sys.exit(main()) + + except ScriptError as e: + logger.error("%s", e) + sys.exit(1) + + except KeyboardInterrupt: + logger.info("user interrupt") + sys.exit(1) diff --git a/scripts/build/download_packages_github.py b/scripts/build/download_packages_github.py new file mode 100755 index 0000000000000000000000000000000000000000..cd8003e5a95938940f1ae3b5bd7fdb33a746baad --- /dev/null +++ b/scripts/build/download_packages_github.py @@ -0,0 +1,99 @@ +#!/usr/bin/env python +"""Download packages from github actions artifacts +""" + +import io +import os +import sys +import logging +import datetime as dt +from pathlib import Path +from zipfile import ZipFile + +import requests + +logger = logging.getLogger() +logging.basicConfig(level=logging.INFO, format="%(asctime)s %(levelname)s %(message)s") + +REPOS = "psycopg/psycopg2" +WORKFLOW_NAME = "Build packages" + + +class ScriptError(Exception): + """Controlled exception raised by the script.""" + + +def main(): + try: + token = os.environ["GITHUB_TOKEN"] + except KeyError: + raise ScriptError("please set a GITHUB_TOKEN to download artifacts") + + s = requests.Session() + s.headers["Accept"] = "application/vnd.github.v3+json" + s.headers["Authorization"] = f"token {token}" + + logger.info("looking for recent runs") + resp = s.get(f"https://api.github.com/repos/{REPOS}/actions/runs?per_page=10") + resp.raise_for_status() + for run in resp.json()["workflow_runs"]: + if run["name"] == WORKFLOW_NAME: + break + else: + raise ScriptError(f"couldn't find {WORKFLOW_NAME!r} in recent runs") + + if run["status"] != "completed": + raise ScriptError(f"run #{run['run_number']} is in status {run['status']}") + + updated_at = dt.datetime.fromisoformat(run["updated_at"].replace("Z", "+00:00")) + now = dt.datetime.now(dt.timezone.utc) + age = now - updated_at + logger.info(f"found run #{run['run_number']} updated {pretty_interval(age)} ago") + if age > dt.timedelta(hours=6): + logger.warning("maybe it's a bit old?") + + logger.info(f"looking for run #{run['run_number']} artifacts") + resp = s.get(f"{run['url']}/artifacts") + resp.raise_for_status() + artifacts = resp.json()["artifacts"] + + dest = Path("packages") + if not dest.exists(): + logger.info(f"creating dir {dest}") + dest.mkdir() + + for artifact in artifacts: + logger.info(f"downloading {artifact['name']} archive") + zip_url = artifact["archive_download_url"] + resp = s.get(zip_url) + with ZipFile(io.BytesIO(resp.content)) as zf: + logger.info("extracting archive content") + zf.extractall(dest) + + logger.info(f"now you can run: 'twine upload -s {dest}/*'") + + +def pretty_interval(td): + secs = td.total_seconds() + mins, secs = divmod(secs, 60) + hours, mins = divmod(mins, 60) + days, hours = divmod(hours, 24) + if days: + return f"{int(days)} days, {int(hours)} hours, {int(mins)} minutes" + elif hours: + return f"{int(hours)} hours, {int(mins)} minutes" + else: + return f"{int(mins)} minutes" + + +if __name__ == "__main__": + try: + sys.exit(main()) + + except ScriptError as e: + logger.error("%s", e) + sys.exit(1) + + except KeyboardInterrupt: + logger.info("user interrupt") + sys.exit(1) diff --git a/scripts/make_errorcodes.py b/scripts/make_errorcodes.py new file mode 100755 index 0000000000000000000000000000000000000000..66ca87b6c083c6d2d03ffd4dae21d1fc94f2b3ec --- /dev/null +++ b/scripts/make_errorcodes.py @@ -0,0 +1,152 @@ +#!/usr/bin/env python3 +"""Generate the errorcodes module starting from PostgreSQL documentation. + +The script can be run at a new PostgreSQL release to refresh the module. +""" + +# Copyright (C) 2010-2019 Daniele Varrazzo +# Copyright (C) 2020-2021 The Psycopg Team +# +# psycopg2 is free software: you can redistribute it and/or modify it +# under the terms of the GNU Lesser General Public License as published +# by the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# psycopg2 is distributed in the hope that it will be useful, but WITHOUT +# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or +# FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public +# License for more details. + +import re +import sys +from urllib.request import urlopen +from collections import defaultdict + + +def main(): + if len(sys.argv) != 2: + print(f"usage: {sys.argv[0]} /path/to/errorcodes.py", file=sys.stderr) + return 2 + + filename = sys.argv[1] + + file_start = read_base_file(filename) + # If you add a version to the list fix the docs (in errorcodes.rst) + classes, errors = fetch_errors( + ['9.1', '9.2', '9.3', '9.4', '9.5', '9.6', '10', '11', '12', '13']) + + disambiguate(errors) + + f = open(filename, "w") + for line in file_start: + print(line, file=f) + for line in generate_module_data(classes, errors): + print(line, file=f) + + +def read_base_file(filename): + rv = [] + for line in open(filename): + rv.append(line.rstrip("\n")) + if line.startswith("# autogenerated"): + return rv + + raise ValueError("can't find the separator. Is this the right file?") + + +def parse_errors_txt(url): + classes = {} + errors = defaultdict(dict) + + page = urlopen(url) + for line in page: + # Strip comments and skip blanks + line = line.decode("ascii").split('#')[0].strip() + if not line: + continue + + # Parse a section + m = re.match(r"Section: (Class (..) - .+)", line) + if m: + label, class_ = m.groups() + classes[class_] = label + continue + + # Parse an error + m = re.match(r"(.....)\s+(?:E|W|S)\s+ERRCODE_(\S+)(?:\s+(\S+))?$", line) + if m: + errcode, macro, spec = m.groups() + # skip errcodes without specs as they are not publicly visible + if not spec: + continue + errlabel = spec.upper() + errors[class_][errcode] = errlabel + continue + + # We don't expect anything else + raise ValueError(f"unexpected line:\n{line}") + + return classes, errors + + +errors_txt_url = \ + "http://git.postgresql.org/gitweb/?p=postgresql.git;a=blob_plain;" \ + "f=src/backend/utils/errcodes.txt;hb=%s" + + +def fetch_errors(versions): + classes = {} + errors = defaultdict(dict) + + for version in versions: + print(version, file=sys.stderr) + tver = tuple(map(int, version.split()[0].split('.'))) + tag = f"{tver[0] >= 10 and 'REL_' or 'REL'}{version.replace('.', '_')}_STABLE" + c1, e1 = parse_errors_txt(errors_txt_url % tag) + classes.update(c1) + + # This error was in old server versions but probably never used + # https://github.com/postgres/postgres/commit/12f87b2c82 + errors['22']['22020'] = 'INVALID_LIMIT_VALUE' + + for c, cerrs in e1.items(): + errors[c].update(cerrs) + + return classes, errors + + +def disambiguate(errors): + """ + Change name for exception defined more than once. + + Change the first occurrence, because before introdcing the function + they were pretty much lost (see ticket #1133) + """ + # Note: if some code is missing it will be caught downstream + for code in "01004 22004 2F002 2F003 2F004".split(): + errors[code[:2]][code] += "_" + + +def generate_module_data(classes, errors): + yield "" + yield "# Error classes" + for clscode, clslabel in sorted(classes.items()): + err = clslabel.split(" - ")[1].split("(")[0] \ + .strip().replace(" ", "_").replace('/', "_").upper() + yield f"CLASS_{err} = {clscode!r}" + + seen = set() + + for clscode, clslabel in sorted(classes.items()): + yield "" + yield f"# {clslabel}" + + for errcode, errlabel in sorted(errors[clscode].items()): + if errlabel in seen: + raise Exception(f"error label already seen: {errlabel}") + seen.add(errlabel) + yield f"{errlabel} = {errcode!r}" + + +if __name__ == '__main__': + sys.exit(main()) diff --git a/scripts/make_errors.py b/scripts/make_errors.py new file mode 100755 index 0000000000000000000000000000000000000000..fb22708f9f49476c65a2bfad224f26fc92f2f39d --- /dev/null +++ b/scripts/make_errors.py @@ -0,0 +1,137 @@ +#!/usr/bin/env python3 +"""Generate the errors module from PostgreSQL source code. + +The script can be run at a new PostgreSQL release to refresh the module. +""" + +# Copyright (C) 2018-2019 Daniele Varrazzo +# Copyright (C) 2020-2021 The Psycopg Team +# +# psycopg2 is free software: you can redistribute it and/or modify it +# under the terms of the GNU Lesser General Public License as published +# by the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# psycopg2 is distributed in the hope that it will be useful, but WITHOUT +# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or +# FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public +# License for more details. + +import os +import re +import sys +from urllib.request import urlopen +from collections import defaultdict + + +def main(): + filename = os.path.join( + os.path.dirname(__file__), "../psycopg/sqlstate_errors.h") + + # If you add a version to the list fix the docs (in errors.rst) + classes, errors = fetch_errors( + ['9.1', '9.2', '9.3', '9.4', '9.5', '9.6', '10', '11', '12', '13']) + + f = open(filename, "w") + print("/*\n * Autogenerated by 'scripts/make_errors.py'.\n */\n", file=f) + for line in generate_module_data(classes, errors): + print(line, file=f) + + +def parse_errors_txt(url): + classes = {} + errors = defaultdict(dict) + + page = urlopen(url) + for line in page: + # Strip comments and skip blanks + line = line.decode('ascii').split('#')[0].strip() + if not line: + continue + + # Parse a section + m = re.match(r"Section: (Class (..) - .+)", line) + if m: + label, class_ = m.groups() + classes[class_] = label + continue + + # Parse an error + m = re.match(r"(.....)\s+(?:E|W|S)\s+ERRCODE_(\S+)(?:\s+(\S+))?$", line) + if m: + errcode, macro, spec = m.groups() + # skip errcodes without specs as they are not publicly visible + if not spec: + continue + errlabel = spec.upper() + errors[class_][errcode] = errlabel + continue + + # We don't expect anything else + raise ValueError(f"unexpected line:\n{line}") + + return classes, errors + + +errors_txt_url = \ + "http://git.postgresql.org/gitweb/?p=postgresql.git;a=blob_plain;" \ + "f=src/backend/utils/errcodes.txt;hb=%s" + + +def fetch_errors(versions): + classes = {} + errors = defaultdict(dict) + + for version in versions: + print(version, file=sys.stderr) + tver = tuple(map(int, version.split()[0].split('.'))) + tag = f"{tver[0] >= 10 and 'REL_' or 'REL'}{version.replace('.', '_')}_STABLE" + c1, e1 = parse_errors_txt(errors_txt_url % tag) + classes.update(c1) + + for c, cerrs in e1.items(): + errors[c].update(cerrs) + + return classes, errors + + +def generate_module_data(classes, errors): + tmpl = '{"%(errcode)s", "%(cls)s"},' + specific = { + '38002': 'ModifyingSqlDataNotPermittedExt', + '38003': 'ProhibitedSqlStatementAttemptedExt', + '38004': 'ReadingSqlDataNotPermittedExt', + '39004': 'NullValueNotAllowedExt', + 'XX000': 'InternalError_', + } + + seen = set(""" + Error Warning InterfaceError DataError DatabaseError ProgrammingError + IntegrityError InternalError NotSupportedError OperationalError + QueryCanceledError TransactionRollbackError + """.split()) + + for clscode, clslabel in sorted(classes.items()): + if clscode in ('00', '01'): + # success and warning - never raised + continue + + yield f"\n/* {clslabel} */" + + for errcode, errlabel in sorted(errors[clscode].items()): + if errcode in specific: + clsname = specific[errcode] + else: + clsname = errlabel.title().replace('_', '') + if clsname in seen: + raise Exception(f"class already existing: {clsname}") + seen.add(clsname) + + yield tmpl % { + 'cls': clsname, + 'errcode': errcode + } + + +if __name__ == '__main__': + sys.exit(main()) diff --git a/scripts/refcounter.py b/scripts/refcounter.py new file mode 100755 index 0000000000000000000000000000000000000000..e916dfadb388ce7605673213acf4bef16542b016 --- /dev/null +++ b/scripts/refcounter.py @@ -0,0 +1,107 @@ +#!/usr/bin/env python +"""Detect reference leaks after several unit test runs. + +The script runs the unit test and counts the objects alive after the run. If +the object count differs between the last two runs, a report is printed and the +script exits with error 1. +""" + +# Copyright (C) 2011-2019 Daniele Varrazzo +# Copyright (C) 2020-2021 The Psycopg Team +# +# psycopg2 is free software: you can redistribute it and/or modify it +# under the terms of the GNU Lesser General Public License as published +# by the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# psycopg2 is distributed in the hope that it will be useful, but WITHOUT +# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or +# FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public +# License for more details. + +import argparse +import gc +import sys +import difflib +import unittest +from pprint import pprint +from collections import defaultdict + + +def main(): + opt = parse_args() + + import tests + test = tests + if opt.suite: + test = getattr(test, opt.suite) + + sys.stdout.write(f"test suite {test.__name__}\n") + + for i in range(1, opt.nruns + 1): + sys.stdout.write(f"test suite run {i} of {opt.nruns}\n") + runner = unittest.TextTestRunner() + runner.run(test.test_suite()) + dump(i, opt) + + f1 = open(f'debug-{(opt.nruns - 1):02}.txt').readlines() + f2 = open(f'debug-{opt.nruns:02}.txt').readlines() + for line in difflib.unified_diff(f1, f2, + f"run {opt.nruns - 1}", f"run {opt.nruns}"): + sys.stdout.write(line) + + rv = f1 != f2 and 1 or 0 + + if opt.objs: + f1 = open(f'objs-{(opt.nruns - 1):02}.txt').readlines() + f2 = open(f'objs-{opt.nruns:02}.txt').readlines() + for line in difflib.unified_diff(f1, f2, + f"run {opt.nruns - 1}", f"run {opt.nruns}"): + sys.stdout.write(line) + + return rv + + +def parse_args(): + parser = argparse.ArgumentParser(description=__doc__) + parser.add_argument('--nruns', type=int, metavar="N", default=3, + help="number of test suite runs [default: %(default)d]") + parser.add_argument('--suite', metavar="NAME", + help="the test suite to run (e.g. 'test_cursor'). [default: all]") + parser.add_argument('--objs', metavar="TYPE", + help="in case of leaks, print a report of object TYPE " + "(support still incomplete)") + + return parser.parse_args() + + +def dump(i, opt): + gc.collect() + objs = gc.get_objects() + + c = defaultdict(int) + for o in objs: + c[type(o)] += 1 + + pprint( + sorted(((v, str(k)) for k, v in c.items()), reverse=True), + stream=open(f"debug-{i:02}.txt", "w")) + + if opt.objs: + co = [] + t = getattr(__builtins__, opt.objs) + for o in objs: + if type(o) is t: + co.append(o) + + # TODO: very incomplete + if t is dict: + co.sort(key=lambda d: d.items()) + else: + co.sort() + + pprint(co, stream=open(f"objs-{i:02}.txt", "w")) + + +if __name__ == '__main__': + sys.exit(main()) diff --git a/scripts/travis_update_docs.sh b/scripts/travis_update_docs.sh new file mode 100755 index 0000000000000000000000000000000000000000..c45ccc81bc0d4276b0e9a5fdf62eb1339e3b9ee4 --- /dev/null +++ b/scripts/travis_update_docs.sh @@ -0,0 +1,23 @@ +#!/bin/bash + +# Trigger a rebuild of the psycopg.org website to update the documentation. +# The script is meant to run by Travis CI. + +set -euo pipefail + +# The travis token can be set at https://github.com/psycopg/psycopg2/settings/secrets/actions +# and can be set on a selected branch only (which should match the DOC_BRANCH +# in the psycopg-website Makefile, or it won't refresh a thing). +if [ -z "${TRAVIS_TOKEN:-}" ]; then + echo "skipping docs update: travis token not set" >&2 + exit 0 +fi + +echo "triggering psycopg-website rebuild" >&2 +curl -s -X POST \ + -H "Content-Type: application/json" \ + -H "Accept: application/json" \ + -H "Travis-API-Version: 3" \ + -H "Authorization: token ${TRAVIS_TOKEN}" \ + -d "{\"request\": {\"branch\": \"${TRAVIS_BRANCH}\"}}" \ + https://api.travis-ci.com/repo/psycopg%2Fpsycopg-website/requests diff --git a/setup.cfg b/setup.cfg new file mode 100644 index 0000000000000000000000000000000000000000..043285547fc9ee8b0cac19c657b601f1c54a523b --- /dev/null +++ b/setup.cfg @@ -0,0 +1,22 @@ +[build_ext] +# PSYCOPG_DEBUG can be added to enable verbose debug information +define=PSYCOPG_DEBUG + +# "pg_config" is required to locate PostgreSQL headers and libraries needed to +# build psycopg2. If pg_config is not in the path or is installed under a +# different name set the following option to the pg_config full path. +pg_config= + +# For Windows only: +# Set to 1 if the PostgreSQL library was built with OpenSSL. +# Required to link in OpenSSL libraries and dependencies. +have_ssl=0 + +# Set to 1 to statically link against the postgresql client library. +static_libpq=0 + +# Add here eventual extra libraries required to link the module. +libraries= + +[metadata] +license_file = LICENSE diff --git a/setup.py b/setup.py new file mode 100644 index 0000000000000000000000000000000000000000..927945a29c7559c0e2c8f34a767f4bb7b0b73d87 --- /dev/null +++ b/setup.py @@ -0,0 +1,572 @@ +# setup.py - distutils packaging +# +# Copyright (C) 2003-2019 Federico Di Gregorio +# Copyright (C) 2020-2021 The Psycopg Team +# +# psycopg2 is free software: you can redistribute it and/or modify it +# under the terms of the GNU Lesser General Public License as published +# by the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# psycopg2 is distributed in the hope that it will be useful, but WITHOUT +# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or +# FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public +# License for more details. + +"""Python-PostgreSQL Database Adapter + +psycopg2 is a PostgreSQL database adapter for the Python programming +language. psycopg2 was written with the aim of being very small and fast, +and stable as a rock. + +psycopg2 is different from the other database adapter because it was +designed for heavily multi-threaded applications that create and destroy +lots of cursors and make a conspicuous number of concurrent INSERTs or +UPDATEs. psycopg2 also provide full asynchronous operations and support +for coroutine libraries. +""" + + +import os +import sys +import re +import subprocess +from setuptools import setup, Extension +from distutils.command.build_ext import build_ext +from distutils.ccompiler import get_default_compiler +from distutils.errors import CompileError + +try: + import configparser +except ImportError: + import ConfigParser as configparser + +# Take a look at https://www.python.org/dev/peps/pep-0440/ +# for a consistent versioning pattern. + +PSYCOPG_VERSION = '2.9' + + +# note: if you are changing the list of supported Python version please fix +# the docs in install.rst and the /features/ page on the website. +classifiers = """\ +Development Status :: 5 - Production/Stable +Intended Audience :: Developers +License :: OSI Approved :: GNU Library or Lesser General Public License (LGPL) +Programming Language :: Python +Programming Language :: Python :: 3 +Programming Language :: Python :: 3.6 +Programming Language :: Python :: 3.7 +Programming Language :: Python :: 3.8 +Programming Language :: Python :: 3.9 +Programming Language :: Python :: 3 :: Only +Programming Language :: Python :: Implementation :: CPython +Programming Language :: C +Programming Language :: SQL +Topic :: Database +Topic :: Database :: Front-Ends +Topic :: Software Development +Topic :: Software Development :: Libraries :: Python Modules +Operating System :: Microsoft :: Windows +Operating System :: Unix +""" + +version_flags = ['dt', 'dec'] + +PLATFORM_IS_WINDOWS = sys.platform.lower().startswith('win') + + +class PostgresConfig: + def __init__(self, build_ext): + self.build_ext = build_ext + self.pg_config_exe = self.build_ext.pg_config + if not self.pg_config_exe: + self.pg_config_exe = self.autodetect_pg_config_path() + if self.pg_config_exe is None: + sys.stderr.write(""" +Error: pg_config executable not found. + +pg_config is required to build psycopg2 from source. Please add the directory +containing pg_config to the $PATH or specify the full executable path with the +option: + + python setup.py build_ext --pg-config /path/to/pg_config build ... + +or with the pg_config option in 'setup.cfg'. + +If you prefer to avoid building psycopg2 from source, please install the PyPI +'psycopg2-binary' package instead. + +For further information please check the 'doc/src/install.rst' file (also at +). + +""") + sys.exit(1) + + def query(self, attr_name): + """Spawn the pg_config executable, querying for the given config + name, and return the printed value, sanitized. """ + try: + pg_config_process = subprocess.Popen( + [self.pg_config_exe, "--" + attr_name], + stdin=subprocess.PIPE, + stdout=subprocess.PIPE, + stderr=subprocess.PIPE) + except OSError: + raise Warning( + f"Unable to find 'pg_config' file in '{self.pg_config_exe}'") + pg_config_process.stdin.close() + result = pg_config_process.stdout.readline().strip() + if not result: + raise Warning(pg_config_process.stderr.readline()) + if not isinstance(result, str): + result = result.decode('ascii') + return result + + def find_on_path(self, exename, path_directories=None): + if not path_directories: + path_directories = os.environ['PATH'].split(os.pathsep) + for dir_name in path_directories: + fullpath = os.path.join(dir_name, exename) + if os.path.isfile(fullpath): + return fullpath + return None + + def autodetect_pg_config_path(self): + """Find and return the path to the pg_config executable.""" + if PLATFORM_IS_WINDOWS: + return self.autodetect_pg_config_path_windows() + else: + return self.find_on_path('pg_config') + + def autodetect_pg_config_path_windows(self): + """Attempt several different ways of finding the pg_config + executable on Windows, and return its full path, if found.""" + + # This code only runs if they have not specified a pg_config option + # in the config file or via the commandline. + + # First, check for pg_config.exe on the PATH, and use that if found. + pg_config_exe = self.find_on_path('pg_config.exe') + if pg_config_exe: + return pg_config_exe + + # Now, try looking in the Windows Registry to find a PostgreSQL + # installation, and infer the path from that. + pg_config_exe = self._get_pg_config_from_registry() + if pg_config_exe: + return pg_config_exe + + return None + + def _get_pg_config_from_registry(self): + try: + import winreg + except ImportError: + import _winreg as winreg + + reg = winreg.ConnectRegistry(None, winreg.HKEY_LOCAL_MACHINE) + try: + pg_inst_list_key = winreg.OpenKey(reg, + 'SOFTWARE\\PostgreSQL\\Installations') + except OSError: + # No PostgreSQL installation, as best as we can tell. + return None + + try: + # Determine the name of the first subkey, if any: + try: + first_sub_key_name = winreg.EnumKey(pg_inst_list_key, 0) + except OSError: + return None + + pg_first_inst_key = winreg.OpenKey(reg, + 'SOFTWARE\\PostgreSQL\\Installations\\' + first_sub_key_name) + try: + pg_inst_base_dir = winreg.QueryValueEx( + pg_first_inst_key, 'Base Directory')[0] + finally: + winreg.CloseKey(pg_first_inst_key) + + finally: + winreg.CloseKey(pg_inst_list_key) + + pg_config_path = os.path.join( + pg_inst_base_dir, 'bin', 'pg_config.exe') + if not os.path.exists(pg_config_path): + return None + + return pg_config_path + + +class psycopg_build_ext(build_ext): + """Conditionally complement the setup.cfg options file. + + This class configures the include_dirs, library_dirs, libraries + options as required by the system. Most of the configuration happens + in finalize_options() method. + + If you want to set up the build step for a peculiar platform, add a + method finalize_PLAT(), where PLAT matches your sys.platform. + """ + user_options = build_ext.user_options[:] + user_options.extend([ + ('use-pydatetime', None, + "Use Python datatime objects for date and time representation."), + ('pg-config=', None, + "The name of the pg_config binary and/or full path to find it"), + ('have-ssl', None, + "Compile with OpenSSL built PostgreSQL libraries (Windows only)."), + ('static-libpq', None, + "Statically link the PostgreSQL client library"), + ]) + + boolean_options = build_ext.boolean_options[:] + boolean_options.extend(('use-pydatetime', 'have-ssl', 'static-libpq')) + + def __init__(self, *args, **kwargs): + build_ext.__init__(self, *args, **kwargs) + + def initialize_options(self): + build_ext.initialize_options(self) + self.pgdir = None + self.have_ssl = have_ssl + self.static_libpq = static_libpq + self.pg_config = None + + def compiler_is_msvc(self): + return self.get_compiler_name().lower().startswith('msvc') + + def compiler_is_mingw(self): + return self.get_compiler_name().lower().startswith('mingw') + + def get_compiler_name(self): + """Return the name of the C compiler used to compile extensions. + + If a compiler was not explicitly set (on the command line, for + example), fall back on the default compiler. + """ + if self.compiler: + # distutils doesn't keep the type of self.compiler uniform; we + # compensate: + if isinstance(self.compiler, str): + name = self.compiler + else: + name = self.compiler.compiler_type + else: + name = get_default_compiler() + return name + + def get_export_symbols(self, extension): + # Fix MSVC seeing two of the same export symbols. + if self.compiler_is_msvc(): + return [] + else: + return build_ext.get_export_symbols(self, extension) + + built_files = 0 + + def build_extension(self, extension): + # Count files compiled to print the binary blurb only if the first fails + compile_orig = getattr(self.compiler, '_compile', None) + if compile_orig is not None: + def _compile(*args, **kwargs): + rv = compile_orig(*args, **kwargs) + psycopg_build_ext.built_files += 1 + return rv + + self.compiler._compile = _compile + + try: + build_ext.build_extension(self, extension) + psycopg_build_ext.built_files += 1 + except CompileError: + if self.built_files == 0: + sys.stderr.write(""" +It appears you are missing some prerequisite to build the package from source. + +You may install a binary package by installing 'psycopg2-binary' from PyPI. +If you want to install psycopg2 from source, please install the packages +required for the build and try again. + +For further information please check the 'doc/src/install.rst' file (also at +). + +""") + raise + + def finalize_win32(self): + """Finalize build system configuration on win32 platform.""" + + # Add compiler-specific arguments: + extra_compiler_args = [] + + if self.compiler_is_mingw(): + # Default MinGW compilation of Python extensions on Windows uses + # only -O: + extra_compiler_args.append('-O3') + + # GCC-compiled Python on non-Windows platforms is built with strict + # aliasing disabled, but that must be done explicitly on Windows to + # avoid large numbers of warnings for perfectly idiomatic Python C + # API code. + extra_compiler_args.append('-fno-strict-aliasing') + + for extension in ext: # ext is a global list of Extension objects + extension.extra_compile_args.extend(extra_compiler_args) + # End of add-compiler-specific arguments section. + + self.libraries.append("ws2_32") + self.libraries.append("advapi32") + if self.compiler_is_msvc(): + # MSVC requires an explicit "libpq" + if "pq" in self.libraries: + self.libraries.remove("pq") + self.libraries.append("secur32") + self.libraries.append("libpq") + self.libraries.append("shfolder") + for path in self.library_dirs: + if os.path.isfile(os.path.join(path, "ms", "libpq.lib")): + self.library_dirs.append(os.path.join(path, "ms")) + break + if self.have_ssl: + self.libraries.append("libcrypto") + self.libraries.append("libssl") + self.libraries.append("crypt32") + self.libraries.append("user32") + self.libraries.append("gdi32") + + def finalize_darwin(self): + """Finalize build system configuration on darwin platform.""" + self.libraries.append('ssl') + self.libraries.append('crypto') + + def finalize_linux(self): + """Finalize build system configuration on GNU/Linux platform.""" + # tell piro that GCC is fine and dandy, but not so MS compilers + for extension in self.extensions: + extension.extra_compile_args.append( + '-Wdeclaration-after-statement') + + finalize_linux2 = finalize_linux + finalize_linux3 = finalize_linux + + def finalize_options(self): + """Complete the build system configuration.""" + # An empty option in the setup.cfg causes self.libraries to include + # an empty string in the list of libraries + if self.libraries is not None and not self.libraries.strip(): + self.libraries = None + + build_ext.finalize_options(self) + + pg_config_helper = PostgresConfig(self) + + self.include_dirs.append(".") + if self.static_libpq: + if not getattr(self, 'link_objects', None): + self.link_objects = [] + self.link_objects.append( + os.path.join(pg_config_helper.query("libdir"), "libpq.a")) + else: + self.libraries.append("pq") + + try: + self.library_dirs.append(pg_config_helper.query("libdir")) + self.include_dirs.append(pg_config_helper.query("includedir")) + self.include_dirs.append(pg_config_helper.query("includedir-server")) + + # add includedirs from cppflags, libdirs from ldflags + for token in pg_config_helper.query("ldflags").split(): + if token.startswith("-L"): + self.library_dirs.append(token[2:]) + + for token in pg_config_helper.query("cppflags").split(): + if token.startswith("-I"): + self.include_dirs.append(token[2:]) + + pgversion = pg_config_helper.query("version").split()[1] + + verre = re.compile( + r"(\d+)(?:\.(\d+))?(?:(?:\.(\d+))|(devel|(?:alpha|beta|rc)\d+))?") + m = verre.match(pgversion) + if m: + pgmajor, pgminor, pgpatch = m.group(1, 2, 3) + # Postgres >= 10 doesn't have pgminor anymore. + pgmajor = int(pgmajor) + if pgmajor >= 10: + pgminor, pgpatch = None, pgminor + if pgminor is None or not pgminor.isdigit(): + pgminor = 0 + if pgpatch is None or not pgpatch.isdigit(): + pgpatch = 0 + pgminor = int(pgminor) + pgpatch = int(pgpatch) + else: + sys.stderr.write( + f"Error: could not determine PostgreSQL version from " + f"'{pgversion}'") + sys.exit(1) + + define_macros.append(("PG_VERSION_NUM", "%d%02d%02d" % + (9, 2, 4))) + + # enable lo64 if libpq >= 9.3 and Python 64 bits + if (pgmajor, pgminor) >= (9, 3) and is_py_64(): + define_macros.append(("HAVE_LO64", "1")) + + # Inject the flag in the version string already packed up + # because we didn't know the version before. + # With distutils everything is complicated. + for i, t in enumerate(define_macros): + if t[0] == 'PSYCOPG_VERSION': + n = t[1].find(')') + if n > 0: + define_macros[i] = ( + t[0], t[1][:n] + ' lo64' + t[1][n:]) + + except Warning: + w = sys.exc_info()[1] # work around py 2/3 different syntax + sys.stderr.write(f"Error: {w}\n") + sys.exit(1) + + if hasattr(self, "finalize_" + sys.platform): + getattr(self, "finalize_" + sys.platform)() + + +def is_py_64(): + # sys.maxint not available since Py 3.1; + # sys.maxsize not available before Py 2.6; + # this is portable at least between Py 2.4 and 3.4. + import struct + return struct.calcsize("P") > 4 + + +# let's start with macro definitions (the ones not already in setup.cfg) +define_macros = [] +include_dirs = [] + +# gather information to build the extension module +ext = [] +data_files = [] + +# sources + +sources = [ + 'psycopgmodule.c', + 'green.c', 'pqpath.c', 'utils.c', 'bytes_format.c', + 'libpq_support.c', 'win32_support.c', 'solaris_support.c', 'aix_support.c', + + 'connection_int.c', 'connection_type.c', + 'cursor_int.c', 'cursor_type.c', 'column_type.c', + 'replication_connection_type.c', + 'replication_cursor_type.c', + 'replication_message_type.c', + 'diagnostics_type.c', 'error_type.c', 'conninfo_type.c', + 'lobject_int.c', 'lobject_type.c', + 'notify_type.c', 'xid_type.c', + + 'adapter_asis.c', 'adapter_binary.c', 'adapter_datetime.c', + 'adapter_list.c', 'adapter_pboolean.c', 'adapter_pdecimal.c', + 'adapter_pint.c', 'adapter_pfloat.c', 'adapter_qstring.c', + 'microprotocols.c', 'microprotocols_proto.c', + 'typecast.c', +] + +depends = [ + # headers + 'config.h', 'pgtypes.h', 'psycopg.h', 'python.h', 'connection.h', + 'cursor.h', 'diagnostics.h', 'error.h', 'green.h', 'lobject.h', + 'replication_connection.h', + 'replication_cursor.h', + 'replication_message.h', + 'notify.h', 'pqpath.h', 'xid.h', 'column.h', 'conninfo.h', + 'libpq_support.h', 'win32_support.h', 'utils.h', + + 'adapter_asis.h', 'adapter_binary.h', 'adapter_datetime.h', + 'adapter_list.h', 'adapter_pboolean.h', 'adapter_pdecimal.h', + 'adapter_pint.h', 'adapter_pfloat.h', 'adapter_qstring.h', + 'microprotocols.h', 'microprotocols_proto.h', + 'typecast.h', 'typecast_binary.h', 'sqlstate_errors.h', + + # included sources + 'typecast_array.c', 'typecast_basic.c', 'typecast_binary.c', + 'typecast_builtins.c', 'typecast_datetime.c', +] + +parser = configparser.ConfigParser() +parser.read('setup.cfg') + +# generate a nice version string to avoid confusion when users report bugs +version_flags.append('pq3') # no more a choice +version_flags.append('ext') # no more a choice + +if version_flags: + PSYCOPG_VERSION_EX = PSYCOPG_VERSION + f" ({' '.join(version_flags)})" +else: + PSYCOPG_VERSION_EX = PSYCOPG_VERSION + +define_macros.append(('PSYCOPG_VERSION', PSYCOPG_VERSION_EX)) + +if parser.has_option('build_ext', 'have_ssl'): + have_ssl = parser.getboolean('build_ext', 'have_ssl') +else: + have_ssl = False + +if parser.has_option('build_ext', 'static_libpq'): + static_libpq = parser.getboolean('build_ext', 'static_libpq') +else: + static_libpq = False + +# And now... explicitly add the defines from the .cfg files. +# Looks like setuptools or some other cog doesn't add them to the command line +# when called e.g. with "pip -e git+url'. This results in declarations +# duplicate on the commandline, which I hope is not a problem. +for define in parser.get('build_ext', 'define').split(','): + if define: + define_macros.append((define, '1')) + +# build the extension + +sources = [os.path.join('psycopg', x) for x in sources] +depends = [os.path.join('psycopg', x) for x in depends] + +ext.append(Extension("psycopg2._psycopg", sources, + define_macros=define_macros, + include_dirs=include_dirs, + depends=depends, + undef_macros=[])) + +try: + f = open("README.rst") + readme = f.read() + f.close() +except Exception: + print("failed to read readme: ignoring...") + readme = __doc__ + +setup(name="psycopg2", + version=PSYCOPG_VERSION, + author="Federico Di Gregorio", + author_email="fog@initd.org", + maintainer="Daniele Varrazzo", + maintainer_email="daniele.varrazzo@gmail.org", + url="https://psycopg.org/", + license="LGPL with exceptions", + platforms=["any"], + python_requires='>=3.6', + description=readme.split("\n")[0], + long_description="\n".join(readme.split("\n")[2:]).lstrip(), + classifiers=[x for x in classifiers.split("\n") if x], + data_files=data_files, + package_dir={'psycopg2': 'lib'}, + packages=['psycopg2'], + cmdclass={'build_ext': psycopg_build_ext}, + ext_modules=ext, + project_urls={ + 'Homepage': 'https://psycopg.org/', + 'Documentation': 'https://www.psycopg.org/docs/', + 'Code': 'https://github.com/psycopg/psycopg2', + 'Issue Tracker': 'https://github.com/psycopg/psycopg2/issues', + 'Download': 'https://pypi.org/project/psycopg2/', + }) diff --git a/tests/__init__.py b/tests/__init__.py new file mode 100755 index 0000000000000000000000000000000000000000..65023752c1550af27631b33bf3bbfdff175d7111 --- /dev/null +++ b/tests/__init__.py @@ -0,0 +1,104 @@ +#!/usr/bin/env python + +# psycopg2 test suite +# +# Copyright (C) 2007-2019 Federico Di Gregorio +# Copyright (C) 2020-2021 The Psycopg Team +# +# psycopg2 is free software: you can redistribute it and/or modify it +# under the terms of the GNU Lesser General Public License as published +# by the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# In addition, as a special exception, the copyright holders give +# permission to link this program with the OpenSSL library (or with +# modified versions of OpenSSL that use the same license as OpenSSL), +# and distribute linked combinations including the two. +# +# You must obey the GNU Lesser General Public License in all respects for +# all of the code used other than OpenSSL. +# +# psycopg2 is distributed in the hope that it will be useful, but WITHOUT +# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or +# FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public +# License for more details. + +# Convert warnings into errors here. We can't do it with -W because on +# Travis importing site raises a warning. +import warnings +warnings.simplefilter('error') # noqa + +import sys +from .testconfig import dsn +import unittest + +from . import test_async +from . import test_bugX000 +from . import test_bug_gc +from . import test_cancel +from . import test_connection +from . import test_copy +from . import test_cursor +from . import test_dates +from . import test_errcodes +from . import test_errors +from . import test_extras_dictcursor +from . import test_fast_executemany +from . import test_green +from . import test_ipaddress +from . import test_lobject +from . import test_module +from . import test_notify +from . import test_psycopg2_dbapi20 +from . import test_quote +from . import test_replication +from . import test_sql +from . import test_transaction +from . import test_types_basic +from . import test_types_extras +from . import test_with + + +def test_suite(): + # If connection to test db fails, bail out early. + import psycopg2 + try: + cnn = psycopg2.connect(dsn) + except Exception as e: + print("Failed connection to test db:", e.__class__.__name__, e) + print("Please set env vars 'PSYCOPG2_TESTDB*' to valid values.") + sys.exit(1) + else: + cnn.close() + + suite = unittest.TestSuite() + suite.addTest(test_async.test_suite()) + suite.addTest(test_bugX000.test_suite()) + suite.addTest(test_bug_gc.test_suite()) + suite.addTest(test_cancel.test_suite()) + suite.addTest(test_connection.test_suite()) + suite.addTest(test_copy.test_suite()) + suite.addTest(test_cursor.test_suite()) + suite.addTest(test_dates.test_suite()) + suite.addTest(test_errcodes.test_suite()) + suite.addTest(test_errors.test_suite()) + suite.addTest(test_extras_dictcursor.test_suite()) + suite.addTest(test_fast_executemany.test_suite()) + suite.addTest(test_green.test_suite()) + suite.addTest(test_ipaddress.test_suite()) + suite.addTest(test_lobject.test_suite()) + suite.addTest(test_module.test_suite()) + suite.addTest(test_notify.test_suite()) + suite.addTest(test_psycopg2_dbapi20.test_suite()) + suite.addTest(test_quote.test_suite()) + suite.addTest(test_replication.test_suite()) + suite.addTest(test_sql.test_suite()) + suite.addTest(test_transaction.test_suite()) + suite.addTest(test_types_basic.test_suite()) + suite.addTest(test_types_extras.test_suite()) + suite.addTest(test_with.test_suite()) + return suite + + +if __name__ == '__main__': + unittest.main(defaultTest='test_suite') diff --git a/tests/dbapi20.py b/tests/dbapi20.py new file mode 100644 index 0000000000000000000000000000000000000000..b3c640515bd161e6d8acced7a893d1490b7048e7 --- /dev/null +++ b/tests/dbapi20.py @@ -0,0 +1,862 @@ +#!/usr/bin/env python +''' Python DB API 2.0 driver compliance unit test suite. + + This software is Public Domain and may be used without restrictions. + + "Now we have booze and barflies entering the discussion, plus rumours of + DBAs on drugs... and I won't tell you what flashes through my mind each + time I read the subject line with 'Anal Compliance' in it. All around + this is turning out to be a thoroughly unwholesome unit test." + + -- Ian Bicking +''' + +__rcs_id__ = '$Id: dbapi20.py,v 1.11 2005/01/02 02:41:01 zenzen Exp $' +__version__ = '$Revision: 1.12 $'[11:-2] +__author__ = 'Stuart Bishop ' + +import unittest +import time +import sys + + +# Revision 1.12 2009/02/06 03:35:11 kf7xm +# Tested okay with Python 3.0, includes last minute patches from Mark H. +# +# Revision 1.1.1.1.2.1 2008/09/20 19:54:59 rupole +# Include latest changes from main branch +# Updates for py3k +# +# Revision 1.11 2005/01/02 02:41:01 zenzen +# Update author email address +# +# Revision 1.10 2003/10/09 03:14:14 zenzen +# Add test for DB API 2.0 optional extension, where database exceptions +# are exposed as attributes on the Connection object. +# +# Revision 1.9 2003/08/13 01:16:36 zenzen +# Minor tweak from Stefan Fleiter +# +# Revision 1.8 2003/04/10 00:13:25 zenzen +# Changes, as per suggestions by M.-A. Lemburg +# - Add a table prefix, to ensure namespace collisions can always be avoided +# +# Revision 1.7 2003/02/26 23:33:37 zenzen +# Break out DDL into helper functions, as per request by David Rushby +# +# Revision 1.6 2003/02/21 03:04:33 zenzen +# Stuff from Henrik Ekelund: +# added test_None +# added test_nextset & hooks +# +# Revision 1.5 2003/02/17 22:08:43 zenzen +# Implement suggestions and code from Henrik Eklund - test that cursor.arraysize +# defaults to 1 & generic cursor.callproc test added +# +# Revision 1.4 2003/02/15 00:16:33 zenzen +# Changes, as per suggestions and bug reports by M.-A. Lemburg, +# Matthew T. Kromer, Federico Di Gregorio and Daniel Dittmar +# - Class renamed +# - Now a subclass of TestCase, to avoid requiring the driver stub +# to use multiple inheritance +# - Reversed the polarity of buggy test in test_description +# - Test exception hierarchy correctly +# - self.populate is now self._populate(), so if a driver stub +# overrides self.ddl1 this change propagates +# - VARCHAR columns now have a width, which will hopefully make the +# DDL even more portable (this will be reversed if it causes more problems) +# - cursor.rowcount being checked after various execute and fetchXXX methods +# - Check for fetchall and fetchmany returning empty lists after results +# are exhausted (already checking for empty lists if select retrieved +# nothing +# - Fix bugs in test_setoutputsize_basic and test_setinputsizes +# + +class DatabaseAPI20Test(unittest.TestCase): + ''' Test a database self.driver for DB API 2.0 compatibility. + This implementation tests Gadfly, but the TestCase + is structured so that other self.drivers can subclass this + test case to ensure compliance with the DB-API. It is + expected that this TestCase may be expanded in the future + if ambiguities or edge conditions are discovered. + + The 'Optional Extensions' are not yet being tested. + + self.drivers should subclass this test, overriding setUp, tearDown, + self.driver, connect_args and connect_kw_args. Class specification + should be as follows: + + from . import dbapi20 + class mytest(dbapi20.DatabaseAPI20Test): + [...] + + Don't 'from .dbapi20 import DatabaseAPI20Test', or you will + confuse the unit tester - just 'from . import dbapi20'. + ''' + + # The self.driver module. This should be the module where the 'connect' + # method is to be found + driver = None + connect_args = () # List of arguments to pass to connect + connect_kw_args = {} # Keyword arguments for connect + table_prefix = 'dbapi20test_' # If you need to specify a prefix for tables + + ddl1 = 'create table %sbooze (name varchar(20))' % table_prefix + ddl2 = 'create table %sbarflys (name varchar(20))' % table_prefix + xddl1 = 'drop table %sbooze' % table_prefix + xddl2 = 'drop table %sbarflys' % table_prefix + + lowerfunc = 'lower' # Name of stored procedure to convert string->lowercase + + # Some drivers may need to override these helpers, for example adding + # a 'commit' after the execute. + def executeDDL1(self,cursor): + cursor.execute(self.ddl1) + + def executeDDL2(self,cursor): + cursor.execute(self.ddl2) + + def setUp(self): + ''' self.drivers should override this method to perform required setup + if any is necessary, such as creating the database. + ''' + pass + + def tearDown(self): + ''' self.drivers should override this method to perform required cleanup + if any is necessary, such as deleting the test database. + The default drops the tables that may be created. + ''' + con = self._connect() + try: + cur = con.cursor() + for ddl in (self.xddl1,self.xddl2): + try: + cur.execute(ddl) + con.commit() + except self.driver.Error: + # Assume table didn't exist. Other tests will check if + # execute is busted. + pass + finally: + con.close() + + def _connect(self): + try: + return self.driver.connect( + *self.connect_args,**self.connect_kw_args + ) + except AttributeError: + self.fail("No connect method found in self.driver module") + + def test_connect(self): + con = self._connect() + con.close() + + def test_apilevel(self): + try: + # Must exist + apilevel = self.driver.apilevel + # Must equal 2.0 + self.assertEqual(apilevel,'2.0') + except AttributeError: + self.fail("Driver doesn't define apilevel") + + def test_threadsafety(self): + try: + # Must exist + threadsafety = self.driver.threadsafety + # Must be a valid value + self.failUnless(threadsafety in (0,1,2,3)) + except AttributeError: + self.fail("Driver doesn't define threadsafety") + + def test_paramstyle(self): + try: + # Must exist + paramstyle = self.driver.paramstyle + # Must be a valid value + self.failUnless(paramstyle in ( + 'qmark','numeric','named','format','pyformat' + )) + except AttributeError: + self.fail("Driver doesn't define paramstyle") + + def test_Exceptions(self): + # Make sure required exceptions exist, and are in the + # defined hierarchy. + self.failUnless(issubclass(self.driver.Warning,Exception)) + self.failUnless(issubclass(self.driver.Error,Exception)) + self.failUnless( + issubclass(self.driver.InterfaceError,self.driver.Error) + ) + self.failUnless( + issubclass(self.driver.DatabaseError,self.driver.Error) + ) + self.failUnless( + issubclass(self.driver.OperationalError,self.driver.Error) + ) + self.failUnless( + issubclass(self.driver.IntegrityError,self.driver.Error) + ) + self.failUnless( + issubclass(self.driver.InternalError,self.driver.Error) + ) + self.failUnless( + issubclass(self.driver.ProgrammingError,self.driver.Error) + ) + self.failUnless( + issubclass(self.driver.NotSupportedError,self.driver.Error) + ) + + def test_ExceptionsAsConnectionAttributes(self): + # OPTIONAL EXTENSION + # Test for the optional DB API 2.0 extension, where the exceptions + # are exposed as attributes on the Connection object + # I figure this optional extension will be implemented by any + # driver author who is using this test suite, so it is enabled + # by default. + con = self._connect() + drv = self.driver + self.failUnless(con.Warning is drv.Warning) + self.failUnless(con.Error is drv.Error) + self.failUnless(con.InterfaceError is drv.InterfaceError) + self.failUnless(con.DatabaseError is drv.DatabaseError) + self.failUnless(con.OperationalError is drv.OperationalError) + self.failUnless(con.IntegrityError is drv.IntegrityError) + self.failUnless(con.InternalError is drv.InternalError) + self.failUnless(con.ProgrammingError is drv.ProgrammingError) + self.failUnless(con.NotSupportedError is drv.NotSupportedError) + + + def test_commit(self): + con = self._connect() + try: + # Commit must work, even if it doesn't do anything + con.commit() + finally: + con.close() + + def test_rollback(self): + con = self._connect() + # If rollback is defined, it should either work or throw + # the documented exception + if hasattr(con,'rollback'): + try: + con.rollback() + except self.driver.NotSupportedError: + pass + + def test_cursor(self): + con = self._connect() + try: + cur = con.cursor() + finally: + con.close() + + def test_cursor_isolation(self): + con = self._connect() + try: + # Make sure cursors created from the same connection have + # the documented transaction isolation level + cur1 = con.cursor() + cur2 = con.cursor() + self.executeDDL1(cur1) + cur1.execute("insert into %sbooze values ('Victoria Bitter')" % ( + self.table_prefix + )) + cur2.execute("select name from %sbooze" % self.table_prefix) + booze = cur2.fetchall() + self.assertEqual(len(booze),1) + self.assertEqual(len(booze[0]),1) + self.assertEqual(booze[0][0],'Victoria Bitter') + finally: + con.close() + + def test_description(self): + con = self._connect() + try: + cur = con.cursor() + self.executeDDL1(cur) + self.assertEqual(cur.description,None, + 'cursor.description should be none after executing a ' + 'statement that can return no rows (such as DDL)' + ) + cur.execute('select name from %sbooze' % self.table_prefix) + self.assertEqual(len(cur.description),1, + 'cursor.description describes too many columns' + ) + self.assertEqual(len(cur.description[0]),7, + 'cursor.description[x] tuples must have 7 elements' + ) + self.assertEqual(cur.description[0][0].lower(),'name', + 'cursor.description[x][0] must return column name' + ) + self.assertEqual(cur.description[0][1],self.driver.STRING, + 'cursor.description[x][1] must return column type. Got %r' + % cur.description[0][1] + ) + + # Make sure self.description gets reset + self.executeDDL2(cur) + self.assertEqual(cur.description,None, + 'cursor.description not being set to None when executing ' + 'no-result statements (eg. DDL)' + ) + finally: + con.close() + + def test_rowcount(self): + con = self._connect() + try: + cur = con.cursor() + self.executeDDL1(cur) + self.assertEqual(cur.rowcount,-1, + 'cursor.rowcount should be -1 after executing no-result ' + 'statements' + ) + cur.execute("insert into %sbooze values ('Victoria Bitter')" % ( + self.table_prefix + )) + self.failUnless(cur.rowcount in (-1,1), + 'cursor.rowcount should == number or rows inserted, or ' + 'set to -1 after executing an insert statement' + ) + cur.execute("select name from %sbooze" % self.table_prefix) + self.failUnless(cur.rowcount in (-1,1), + 'cursor.rowcount should == number of rows returned, or ' + 'set to -1 after executing a select statement' + ) + self.executeDDL2(cur) + self.assertEqual(cur.rowcount,-1, + 'cursor.rowcount not being reset to -1 after executing ' + 'no-result statements' + ) + finally: + con.close() + + lower_func = 'lower' + def test_callproc(self): + con = self._connect() + try: + cur = con.cursor() + if self.lower_func and hasattr(cur,'callproc'): + r = cur.callproc(self.lower_func,('FOO',)) + self.assertEqual(len(r),1) + self.assertEqual(r[0],'FOO') + r = cur.fetchall() + self.assertEqual(len(r),1,'callproc produced no result set') + self.assertEqual(len(r[0]),1, + 'callproc produced invalid result set' + ) + self.assertEqual(r[0][0],'foo', + 'callproc produced invalid results' + ) + finally: + con.close() + + def test_close(self): + con = self._connect() + try: + cur = con.cursor() + finally: + con.close() + + # cursor.execute should raise an Error if called after connection + # closed + self.assertRaises(self.driver.Error,self.executeDDL1,cur) + + # connection.commit should raise an Error if called after connection' + # closed.' + self.assertRaises(self.driver.Error,con.commit) + + # connection.close should raise an Error if called more than once + # Issue discussed on DB-SIG: consensus seem that close() should not + # raised if called on closed objects. Issue reported back to Stuart. + # self.assertRaises(self.driver.Error,con.close) + + def test_execute(self): + con = self._connect() + try: + cur = con.cursor() + self._paraminsert(cur) + finally: + con.close() + + def _paraminsert(self,cur): + self.executeDDL1(cur) + cur.execute("insert into %sbooze values ('Victoria Bitter')" % ( + self.table_prefix + )) + self.failUnless(cur.rowcount in (-1,1)) + + if self.driver.paramstyle == 'qmark': + cur.execute( + 'insert into %sbooze values (?)' % self.table_prefix, + ("Cooper's",) + ) + elif self.driver.paramstyle == 'numeric': + cur.execute( + 'insert into %sbooze values (:1)' % self.table_prefix, + ("Cooper's",) + ) + elif self.driver.paramstyle == 'named': + cur.execute( + 'insert into %sbooze values (:beer)' % self.table_prefix, + {'beer':"Cooper's"} + ) + elif self.driver.paramstyle == 'format': + cur.execute( + 'insert into %sbooze values (%%s)' % self.table_prefix, + ("Cooper's",) + ) + elif self.driver.paramstyle == 'pyformat': + cur.execute( + 'insert into %sbooze values (%%(beer)s)' % self.table_prefix, + {'beer':"Cooper's"} + ) + else: + self.fail('Invalid paramstyle') + self.failUnless(cur.rowcount in (-1,1)) + + cur.execute('select name from %sbooze' % self.table_prefix) + res = cur.fetchall() + self.assertEqual(len(res),2,'cursor.fetchall returned too few rows') + beers = [res[0][0],res[1][0]] + beers.sort() + self.assertEqual(beers[0],"Cooper's", + 'cursor.fetchall retrieved incorrect data, or data inserted ' + 'incorrectly' + ) + self.assertEqual(beers[1],"Victoria Bitter", + 'cursor.fetchall retrieved incorrect data, or data inserted ' + 'incorrectly' + ) + + def test_executemany(self): + con = self._connect() + try: + cur = con.cursor() + self.executeDDL1(cur) + largs = [ ("Cooper's",) , ("Boag's",) ] + margs = [ {'beer': "Cooper's"}, {'beer': "Boag's"} ] + if self.driver.paramstyle == 'qmark': + cur.executemany( + 'insert into %sbooze values (?)' % self.table_prefix, + largs + ) + elif self.driver.paramstyle == 'numeric': + cur.executemany( + 'insert into %sbooze values (:1)' % self.table_prefix, + largs + ) + elif self.driver.paramstyle == 'named': + cur.executemany( + 'insert into %sbooze values (:beer)' % self.table_prefix, + margs + ) + elif self.driver.paramstyle == 'format': + cur.executemany( + 'insert into %sbooze values (%%s)' % self.table_prefix, + largs + ) + elif self.driver.paramstyle == 'pyformat': + cur.executemany( + 'insert into %sbooze values (%%(beer)s)' % ( + self.table_prefix + ), + margs + ) + else: + self.fail('Unknown paramstyle') + self.failUnless(cur.rowcount in (-1,2), + 'insert using cursor.executemany set cursor.rowcount to ' + 'incorrect value %r' % cur.rowcount + ) + cur.execute('select name from %sbooze' % self.table_prefix) + res = cur.fetchall() + self.assertEqual(len(res),2, + 'cursor.fetchall retrieved incorrect number of rows' + ) + beers = [res[0][0],res[1][0]] + beers.sort() + self.assertEqual(beers[0],"Boag's",'incorrect data retrieved') + self.assertEqual(beers[1],"Cooper's",'incorrect data retrieved') + finally: + con.close() + + def test_fetchone(self): + con = self._connect() + try: + cur = con.cursor() + + # cursor.fetchone should raise an Error if called before + # executing a select-type query + self.assertRaises(self.driver.Error,cur.fetchone) + + # cursor.fetchone should raise an Error if called after + # executing a query that cannot return rows + self.executeDDL1(cur) + self.assertRaises(self.driver.Error,cur.fetchone) + + cur.execute('select name from %sbooze' % self.table_prefix) + self.assertEqual(cur.fetchone(),None, + 'cursor.fetchone should return None if a query retrieves ' + 'no rows' + ) + self.failUnless(cur.rowcount in (-1,0)) + + # cursor.fetchone should raise an Error if called after + # executing a query that cannot return rows + cur.execute("insert into %sbooze values ('Victoria Bitter')" % ( + self.table_prefix + )) + self.assertRaises(self.driver.Error,cur.fetchone) + + cur.execute('select name from %sbooze' % self.table_prefix) + r = cur.fetchone() + self.assertEqual(len(r),1, + 'cursor.fetchone should have retrieved a single row' + ) + self.assertEqual(r[0],'Victoria Bitter', + 'cursor.fetchone retrieved incorrect data' + ) + self.assertEqual(cur.fetchone(),None, + 'cursor.fetchone should return None if no more rows available' + ) + self.failUnless(cur.rowcount in (-1,1)) + finally: + con.close() + + samples = [ + 'Carlton Cold', + 'Carlton Draft', + 'Mountain Goat', + 'Redback', + 'Victoria Bitter', + 'XXXX' + ] + + def _populate(self): + ''' Return a list of sql commands to setup the DB for the fetch + tests. + ''' + populate = [ + f"insert into {self.table_prefix}booze values ('{s}')" + for s in self.samples + ] + return populate + + def test_fetchmany(self): + con = self._connect() + try: + cur = con.cursor() + + # cursor.fetchmany should raise an Error if called without + #issuing a query + self.assertRaises(self.driver.Error,cur.fetchmany,4) + + self.executeDDL1(cur) + for sql in self._populate(): + cur.execute(sql) + + cur.execute('select name from %sbooze' % self.table_prefix) + r = cur.fetchmany() + self.assertEqual(len(r),1, + 'cursor.fetchmany retrieved incorrect number of rows, ' + 'default of arraysize is one.' + ) + cur.arraysize=10 + r = cur.fetchmany(3) # Should get 3 rows + self.assertEqual(len(r),3, + 'cursor.fetchmany retrieved incorrect number of rows' + ) + r = cur.fetchmany(4) # Should get 2 more + self.assertEqual(len(r),2, + 'cursor.fetchmany retrieved incorrect number of rows' + ) + r = cur.fetchmany(4) # Should be an empty sequence + self.assertEqual(len(r),0, + 'cursor.fetchmany should return an empty sequence after ' + 'results are exhausted' + ) + self.failUnless(cur.rowcount in (-1,6)) + + # Same as above, using cursor.arraysize + cur.arraysize=4 + cur.execute('select name from %sbooze' % self.table_prefix) + r = cur.fetchmany() # Should get 4 rows + self.assertEqual(len(r),4, + 'cursor.arraysize not being honoured by fetchmany' + ) + r = cur.fetchmany() # Should get 2 more + self.assertEqual(len(r),2) + r = cur.fetchmany() # Should be an empty sequence + self.assertEqual(len(r),0) + self.failUnless(cur.rowcount in (-1,6)) + + cur.arraysize=6 + cur.execute('select name from %sbooze' % self.table_prefix) + rows = cur.fetchmany() # Should get all rows + self.failUnless(cur.rowcount in (-1,6)) + self.assertEqual(len(rows),6) + self.assertEqual(len(rows),6) + rows = [r[0] for r in rows] + rows.sort() + + # Make sure we get the right data back out + for i in range(0,6): + self.assertEqual(rows[i],self.samples[i], + 'incorrect data retrieved by cursor.fetchmany' + ) + + rows = cur.fetchmany() # Should return an empty list + self.assertEqual(len(rows),0, + 'cursor.fetchmany should return an empty sequence if ' + 'called after the whole result set has been fetched' + ) + self.failUnless(cur.rowcount in (-1,6)) + + self.executeDDL2(cur) + cur.execute('select name from %sbarflys' % self.table_prefix) + r = cur.fetchmany() # Should get empty sequence + self.assertEqual(len(r),0, + 'cursor.fetchmany should return an empty sequence if ' + 'query retrieved no rows' + ) + self.failUnless(cur.rowcount in (-1,0)) + + finally: + con.close() + + def test_fetchall(self): + con = self._connect() + try: + cur = con.cursor() + # cursor.fetchall should raise an Error if called + # without executing a query that may return rows (such + # as a select) + self.assertRaises(self.driver.Error, cur.fetchall) + + self.executeDDL1(cur) + for sql in self._populate(): + cur.execute(sql) + + # cursor.fetchall should raise an Error if called + # after executing a a statement that cannot return rows + self.assertRaises(self.driver.Error,cur.fetchall) + + cur.execute('select name from %sbooze' % self.table_prefix) + rows = cur.fetchall() + self.failUnless(cur.rowcount in (-1,len(self.samples))) + self.assertEqual(len(rows),len(self.samples), + 'cursor.fetchall did not retrieve all rows' + ) + rows = [r[0] for r in rows] + rows.sort() + for i in range(0,len(self.samples)): + self.assertEqual(rows[i],self.samples[i], + 'cursor.fetchall retrieved incorrect rows' + ) + rows = cur.fetchall() + self.assertEqual( + len(rows),0, + 'cursor.fetchall should return an empty list if called ' + 'after the whole result set has been fetched' + ) + self.failUnless(cur.rowcount in (-1,len(self.samples))) + + self.executeDDL2(cur) + cur.execute('select name from %sbarflys' % self.table_prefix) + rows = cur.fetchall() + self.failUnless(cur.rowcount in (-1,0)) + self.assertEqual(len(rows),0, + 'cursor.fetchall should return an empty list if ' + 'a select query returns no rows' + ) + + finally: + con.close() + + def test_mixedfetch(self): + con = self._connect() + try: + cur = con.cursor() + self.executeDDL1(cur) + for sql in self._populate(): + cur.execute(sql) + + cur.execute('select name from %sbooze' % self.table_prefix) + rows1 = cur.fetchone() + rows23 = cur.fetchmany(2) + rows4 = cur.fetchone() + rows56 = cur.fetchall() + self.failUnless(cur.rowcount in (-1,6)) + self.assertEqual(len(rows23),2, + 'fetchmany returned incorrect number of rows' + ) + self.assertEqual(len(rows56),2, + 'fetchall returned incorrect number of rows' + ) + + rows = [rows1[0]] + rows.extend([rows23[0][0],rows23[1][0]]) + rows.append(rows4[0]) + rows.extend([rows56[0][0],rows56[1][0]]) + rows.sort() + for i in range(0,len(self.samples)): + self.assertEqual(rows[i],self.samples[i], + 'incorrect data retrieved or inserted' + ) + finally: + con.close() + + def help_nextset_setUp(self,cur): + ''' Should create a procedure called deleteme + that returns two result sets, first the + number of rows in booze then "name from booze" + ''' + raise NotImplementedError('Helper not implemented') + #sql=""" + # create procedure deleteme as + # begin + # select count(*) from booze + # select name from booze + # end + #""" + #cur.execute(sql) + + def help_nextset_tearDown(self,cur): + 'If cleaning up is needed after nextSetTest' + raise NotImplementedError('Helper not implemented') + #cur.execute("drop procedure deleteme") + + def test_nextset(self): + con = self._connect() + try: + cur = con.cursor() + if not hasattr(cur,'nextset'): + return + + try: + self.executeDDL1(cur) + sql=self._populate() + for sql in self._populate(): + cur.execute(sql) + + self.help_nextset_setUp(cur) + + cur.callproc('deleteme') + numberofrows=cur.fetchone() + assert numberofrows[0]== len(self.samples) + assert cur.nextset() + names=cur.fetchall() + assert len(names) == len(self.samples) + s=cur.nextset() + assert s is None, 'No more return sets, should return None' + finally: + self.help_nextset_tearDown(cur) + + finally: + con.close() + + def test_nextset(self): + raise NotImplementedError('Drivers need to override this test') + + def test_arraysize(self): + # Not much here - rest of the tests for this are in test_fetchmany + con = self._connect() + try: + cur = con.cursor() + self.failUnless(hasattr(cur,'arraysize'), + 'cursor.arraysize must be defined' + ) + finally: + con.close() + + def test_setinputsizes(self): + con = self._connect() + try: + cur = con.cursor() + cur.setinputsizes( (25,) ) + self._paraminsert(cur) # Make sure cursor still works + finally: + con.close() + + def test_setoutputsize_basic(self): + # Basic test is to make sure setoutputsize doesn't blow up + con = self._connect() + try: + cur = con.cursor() + cur.setoutputsize(1000) + cur.setoutputsize(2000,0) + self._paraminsert(cur) # Make sure the cursor still works + finally: + con.close() + + def test_setoutputsize(self): + # Real test for setoutputsize is driver dependent + raise NotImplementedError('Driver needed to override this test') + + def test_None(self): + con = self._connect() + try: + cur = con.cursor() + self.executeDDL1(cur) + cur.execute('insert into %sbooze values (NULL)' % self.table_prefix) + cur.execute('select name from %sbooze' % self.table_prefix) + r = cur.fetchall() + self.assertEqual(len(r),1) + self.assertEqual(len(r[0]),1) + self.assertEqual(r[0][0],None,'NULL value not returned as None') + finally: + con.close() + + def test_Date(self): + d1 = self.driver.Date(2002,12,25) + d2 = self.driver.DateFromTicks(time.mktime((2002,12,25,0,0,0,0,0,0))) + # Can we assume this? API doesn't specify, but it seems implied + # self.assertEqual(str(d1),str(d2)) + + def test_Time(self): + t1 = self.driver.Time(13,45,30) + t2 = self.driver.TimeFromTicks(time.mktime((2001,1,1,13,45,30,0,0,0))) + # Can we assume this? API doesn't specify, but it seems implied + # self.assertEqual(str(t1),str(t2)) + + def test_Timestamp(self): + t1 = self.driver.Timestamp(2002,12,25,13,45,30) + t2 = self.driver.TimestampFromTicks( + time.mktime((2002,12,25,13,45,30,0,0,0)) + ) + # Can we assume this? API doesn't specify, but it seems implied + # self.assertEqual(str(t1),str(t2)) + + def test_Binary(self): + b = self.driver.Binary(b'Something') + b = self.driver.Binary(b'') + + def test_STRING(self): + self.failUnless(hasattr(self.driver,'STRING'), + 'module.STRING must be defined' + ) + + def test_BINARY(self): + self.failUnless(hasattr(self.driver,'BINARY'), + 'module.BINARY must be defined.' + ) + + def test_NUMBER(self): + self.failUnless(hasattr(self.driver,'NUMBER'), + 'module.NUMBER must be defined.' + ) + + def test_DATETIME(self): + self.failUnless(hasattr(self.driver,'DATETIME'), + 'module.DATETIME must be defined.' + ) + + def test_ROWID(self): + self.failUnless(hasattr(self.driver,'ROWID'), + 'module.ROWID must be defined.' + ) diff --git a/tests/dbapi20_tpc.py b/tests/dbapi20_tpc.py new file mode 100644 index 0000000000000000000000000000000000000000..fccc7756c0059e67f7917a2b5c495d3a994ef385 --- /dev/null +++ b/tests/dbapi20_tpc.py @@ -0,0 +1,144 @@ +""" Python DB API 2.0 driver Two Phase Commit compliance test suite. + +""" + +import unittest + + +class TwoPhaseCommitTests(unittest.TestCase): + + driver = None + + def connect(self): + """Make a database connection.""" + raise NotImplementedError + + _last_id = 0 + _global_id_prefix = "dbapi20_tpc:" + + def make_xid(self, con): + id = TwoPhaseCommitTests._last_id + TwoPhaseCommitTests._last_id += 1 + return con.xid(42, f"{self._global_id_prefix}{id}", "qualifier") + + def test_xid(self): + con = self.connect() + try: + xid = con.xid(42, "global", "bqual") + except self.driver.NotSupportedError: + self.fail("Driver does not support transaction IDs.") + + self.assertEquals(xid[0], 42) + self.assertEquals(xid[1], "global") + self.assertEquals(xid[2], "bqual") + + # Try some extremes for the transaction ID: + xid = con.xid(0, "", "") + self.assertEquals(tuple(xid), (0, "", "")) + xid = con.xid(0x7fffffff, "a" * 64, "b" * 64) + self.assertEquals(tuple(xid), (0x7fffffff, "a" * 64, "b" * 64)) + + def test_tpc_begin(self): + con = self.connect() + try: + xid = self.make_xid(con) + try: + con.tpc_begin(xid) + except self.driver.NotSupportedError: + self.fail("Driver does not support tpc_begin()") + finally: + con.close() + + def test_tpc_commit_without_prepare(self): + con = self.connect() + try: + xid = self.make_xid(con) + con.tpc_begin(xid) + cursor = con.cursor() + cursor.execute("SELECT 1") + con.tpc_commit() + finally: + con.close() + + def test_tpc_rollback_without_prepare(self): + con = self.connect() + try: + xid = self.make_xid(con) + con.tpc_begin(xid) + cursor = con.cursor() + cursor.execute("SELECT 1") + con.tpc_rollback() + finally: + con.close() + + def test_tpc_commit_with_prepare(self): + con = self.connect() + try: + xid = self.make_xid(con) + con.tpc_begin(xid) + cursor = con.cursor() + cursor.execute("SELECT 1") + con.tpc_prepare() + con.tpc_commit() + finally: + con.close() + + def test_tpc_rollback_with_prepare(self): + con = self.connect() + try: + xid = self.make_xid(con) + con.tpc_begin(xid) + cursor = con.cursor() + cursor.execute("SELECT 1") + con.tpc_prepare() + con.tpc_rollback() + finally: + con.close() + + def test_tpc_begin_in_transaction_fails(self): + con = self.connect() + try: + xid = self.make_xid(con) + + cursor = con.cursor() + cursor.execute("SELECT 1") + self.assertRaises(self.driver.ProgrammingError, + con.tpc_begin, xid) + finally: + con.close() + + def test_tpc_begin_in_tpc_transaction_fails(self): + con = self.connect() + try: + xid = self.make_xid(con) + + cursor = con.cursor() + cursor.execute("SELECT 1") + self.assertRaises(self.driver.ProgrammingError, + con.tpc_begin, xid) + finally: + con.close() + + def test_commit_in_tpc_fails(self): + # calling commit() within a TPC transaction fails with + # ProgrammingError. + con = self.connect() + try: + xid = self.make_xid(con) + con.tpc_begin(xid) + + self.assertRaises(self.driver.ProgrammingError, con.commit) + finally: + con.close() + + def test_rollback_in_tpc_fails(self): + # calling rollback() within a TPC transaction fails with + # ProgrammingError. + con = self.connect() + try: + xid = self.make_xid(con) + con.tpc_begin(xid) + + self.assertRaises(self.driver.ProgrammingError, con.rollback) + finally: + con.close() diff --git a/tests/test_async.py b/tests/test_async.py new file mode 100755 index 0000000000000000000000000000000000000000..ee6651d7aa8964719f0af7fe57436785dab46e47 --- /dev/null +++ b/tests/test_async.py @@ -0,0 +1,546 @@ +#!/usr/bin/env python + +# test_async.py - unit test for asynchronous API +# +# Copyright (C) 2010-2019 Jan UrbaƄski +# Copyright (C) 2020-2021 The Psycopg Team +# +# psycopg2 is free software: you can redistribute it and/or modify it +# under the terms of the GNU Lesser General Public License as published +# by the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# In addition, as a special exception, the copyright holders give +# permission to link this program with the OpenSSL library (or with +# modified versions of OpenSSL that use the same license as OpenSSL), +# and distribute linked combinations including the two. +# +# You must obey the GNU Lesser General Public License in all respects for +# all of the code used other than OpenSSL. +# +# psycopg2 is distributed in the hope that it will be useful, but WITHOUT +# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or +# FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public +# License for more details. + +import gc +import time +import unittest +import warnings + +import psycopg2 +import psycopg2.errors +from psycopg2 import extensions as ext + +from .testutils import (ConnectingTestCase, StringIO, skip_before_postgres, + skip_if_crdb, crdb_version, slow) + + +class PollableStub: + """A 'pollable' wrapper allowing analysis of the `poll()` calls.""" + def __init__(self, pollable): + self.pollable = pollable + self.polls = [] + + def fileno(self): + return self.pollable.fileno() + + def poll(self): + rv = self.pollable.poll() + self.polls.append(rv) + return rv + + +class AsyncTests(ConnectingTestCase): + + def setUp(self): + ConnectingTestCase.setUp(self) + + self.sync_conn = self.conn + self.conn = self.connect(async_=True) + + self.wait(self.conn) + + curs = self.conn.cursor() + if crdb_version(self.sync_conn) is not None: + curs.execute("set experimental_enable_temp_tables = 'on'") + self.wait(curs) + + curs.execute(''' + CREATE TEMPORARY TABLE table1 ( + id int PRIMARY KEY + )''') + self.wait(curs) + + def test_connection_setup(self): + cur = self.conn.cursor() + sync_cur = self.sync_conn.cursor() + del cur, sync_cur + + self.assert_(self.conn.async_) + self.assert_(not self.sync_conn.async_) + + # the async connection should be autocommit + self.assert_(self.conn.autocommit) + self.assertEquals(self.conn.isolation_level, ext.ISOLATION_LEVEL_DEFAULT) + + # check other properties to be found on the connection + self.assert_(self.conn.server_version) + self.assert_(self.conn.protocol_version in (2, 3)) + self.assert_(self.conn.encoding in ext.encodings) + + def test_async_named_cursor(self): + self.assertRaises(psycopg2.ProgrammingError, + self.conn.cursor, "name") + + def test_async_select(self): + cur = self.conn.cursor() + self.assertFalse(self.conn.isexecuting()) + cur.execute("select 'a'") + self.assertTrue(self.conn.isexecuting()) + + self.wait(cur) + + self.assertFalse(self.conn.isexecuting()) + self.assertEquals(cur.fetchone()[0], "a") + + @slow + @skip_before_postgres(8, 2) + def test_async_callproc(self): + cur = self.conn.cursor() + cur.callproc("pg_sleep", (0.1, )) + self.assertTrue(self.conn.isexecuting()) + + self.wait(cur) + self.assertFalse(self.conn.isexecuting()) + + @slow + def test_async_after_async(self): + cur = self.conn.cursor() + cur2 = self.conn.cursor() + del cur2 + + cur.execute("insert into table1 values (1)") + + # an async execute after an async one raises an exception + self.assertRaises(psycopg2.ProgrammingError, + cur.execute, "select * from table1") + # same for callproc + self.assertRaises(psycopg2.ProgrammingError, + cur.callproc, "version") + # but after you've waited it should be good + self.wait(cur) + cur.execute("select * from table1") + self.wait(cur) + + self.assertEquals(cur.fetchall()[0][0], 1) + + cur.execute("delete from table1") + self.wait(cur) + + cur.execute("select * from table1") + self.wait(cur) + + self.assertEquals(cur.fetchone(), None) + + def test_fetch_after_async(self): + cur = self.conn.cursor() + cur.execute("select 'a'") + + # a fetch after an asynchronous query should raise an error + self.assertRaises(psycopg2.ProgrammingError, + cur.fetchall) + # but after waiting it should work + self.wait(cur) + self.assertEquals(cur.fetchall()[0][0], "a") + + def test_rollback_while_async(self): + cur = self.conn.cursor() + + cur.execute("select 'a'") + + # a rollback should not work in asynchronous mode + self.assertRaises(psycopg2.ProgrammingError, self.conn.rollback) + + def test_commit_while_async(self): + cur = self.conn.cursor() + + cur.execute("begin") + self.wait(cur) + + cur.execute("insert into table1 values (1)") + + # a commit should not work in asynchronous mode + self.assertRaises(psycopg2.ProgrammingError, self.conn.commit) + self.assertTrue(self.conn.isexecuting()) + + # but a manual commit should + self.wait(cur) + cur.execute("commit") + self.wait(cur) + + cur.execute("select * from table1") + self.wait(cur) + self.assertEquals(cur.fetchall()[0][0], 1) + + cur.execute("delete from table1") + self.wait(cur) + + cur.execute("select * from table1") + self.wait(cur) + self.assertEquals(cur.fetchone(), None) + + def test_set_parameters_while_async(self): + cur = self.conn.cursor() + + cur.execute("select 'c'") + self.assertTrue(self.conn.isexecuting()) + + # getting transaction status works + self.assertEquals(self.conn.info.transaction_status, + ext.TRANSACTION_STATUS_ACTIVE) + self.assertTrue(self.conn.isexecuting()) + + # setting connection encoding should fail + self.assertRaises(psycopg2.ProgrammingError, + self.conn.set_client_encoding, "LATIN1") + + # same for transaction isolation + self.assertRaises(psycopg2.ProgrammingError, + self.conn.set_isolation_level, 1) + + def test_reset_while_async(self): + cur = self.conn.cursor() + cur.execute("select 'c'") + self.assertTrue(self.conn.isexecuting()) + + # a reset should fail + self.assertRaises(psycopg2.ProgrammingError, self.conn.reset) + + def test_async_iter(self): + cur = self.conn.cursor() + + cur.execute("begin") + self.wait(cur) + cur.execute(""" + insert into table1 values (1); + insert into table1 values (2); + insert into table1 values (3); + """) + self.wait(cur) + cur.execute("select id from table1 order by id") + + # iteration fails if a query is underway + self.assertRaises(psycopg2.ProgrammingError, list, cur) + + # but after it's done it should work + self.wait(cur) + self.assertEquals(list(cur), [(1, ), (2, ), (3, )]) + self.assertFalse(self.conn.isexecuting()) + + def test_copy_while_async(self): + cur = self.conn.cursor() + cur.execute("select 'a'") + + # copy should fail + self.assertRaises(psycopg2.ProgrammingError, + cur.copy_from, + StringIO("1\n3\n5\n\\.\n"), "table1") + + def test_lobject_while_async(self): + # large objects should be prohibited + self.assertRaises(psycopg2.ProgrammingError, + self.conn.lobject) + + def test_async_executemany(self): + cur = self.conn.cursor() + self.assertRaises( + psycopg2.ProgrammingError, + cur.executemany, "insert into table1 values (%s)", [1, 2, 3]) + + def test_async_scroll(self): + cur = self.conn.cursor() + cur.execute(""" + insert into table1 values (1); + insert into table1 values (2); + insert into table1 values (3); + """) + self.wait(cur) + cur.execute("select id from table1 order by id") + + # scroll should fail if a query is underway + self.assertRaises(psycopg2.ProgrammingError, cur.scroll, 1) + self.assertTrue(self.conn.isexecuting()) + + # but after it's done it should work + self.wait(cur) + cur.scroll(1) + self.assertEquals(cur.fetchall(), [(2, ), (3, )]) + + cur = self.conn.cursor() + cur.execute("select id from table1 order by id") + self.wait(cur) + + cur2 = self.conn.cursor() + self.assertRaises(psycopg2.ProgrammingError, cur2.scroll, 1) + + self.assertRaises(psycopg2.ProgrammingError, cur.scroll, 4) + + cur = self.conn.cursor() + cur.execute("select id from table1 order by id") + self.wait(cur) + cur.scroll(2) + cur.scroll(-1) + self.assertEquals(cur.fetchall(), [(2, ), (3, )]) + + def test_scroll(self): + cur = self.sync_conn.cursor() + cur.execute("create table table1 (id int)") + cur.execute(""" + insert into table1 values (1); + insert into table1 values (2); + insert into table1 values (3); + """) + cur.execute("select id from table1 order by id") + cur.scroll(2) + cur.scroll(-1) + self.assertEquals(cur.fetchall(), [(2, ), (3, )]) + + def test_async_dont_read_all(self): + cur = self.conn.cursor() + cur.execute("select repeat('a', 10000); select repeat('b', 10000)") + + # fetch the result + self.wait(cur) + + # it should be the result of the second query + self.assertEquals(cur.fetchone()[0], "b" * 10000) + + def test_async_subclass(self): + class MyConn(ext.connection): + def __init__(self, dsn, async_=0): + ext.connection.__init__(self, dsn, async_=async_) + + conn = self.connect(connection_factory=MyConn, async_=True) + self.assert_(isinstance(conn, MyConn)) + self.assert_(conn.async_) + conn.close() + + @slow + @skip_if_crdb("flush on write flakey") + def test_flush_on_write(self): + # a very large query requires a flush loop to be sent to the backend + curs = self.conn.cursor() + for mb in 1, 5, 10, 20, 50: + size = mb * 1024 * 1024 + stub = PollableStub(self.conn) + curs.execute("select %s;", ('x' * size,)) + self.wait(stub) + self.assertEqual(size, len(curs.fetchone()[0])) + if stub.polls.count(ext.POLL_WRITE) > 1: + return + + # This is more a testing glitch than an error: it happens + # on high load on linux: probably because the kernel has more + # buffers ready. A warning may be useful during development, + # but an error is bad during regression testing. + warnings.warn("sending a large query didn't trigger block on write.") + + def test_sync_poll(self): + cur = self.sync_conn.cursor() + cur.execute("select 1") + # polling with a sync query works + cur.connection.poll() + self.assertEquals(cur.fetchone()[0], 1) + + @slow + @skip_if_crdb("notify") + def test_notify(self): + cur = self.conn.cursor() + sync_cur = self.sync_conn.cursor() + + sync_cur.execute("listen test_notify") + self.sync_conn.commit() + cur.execute("notify test_notify") + self.wait(cur) + + self.assertEquals(self.sync_conn.notifies, []) + + pid = self.conn.info.backend_pid + for _ in range(5): + self.wait(self.sync_conn) + if not self.sync_conn.notifies: + time.sleep(0.5) + continue + self.assertEquals(len(self.sync_conn.notifies), 1) + self.assertEquals(self.sync_conn.notifies.pop(), + (pid, "test_notify")) + return + self.fail("No NOTIFY in 2.5 seconds") + + def test_async_fetch_wrong_cursor(self): + cur1 = self.conn.cursor() + cur2 = self.conn.cursor() + cur1.execute("select 1") + + self.wait(cur1) + self.assertFalse(self.conn.isexecuting()) + # fetching from a cursor with no results is an error + self.assertRaises(psycopg2.ProgrammingError, cur2.fetchone) + # fetching from the correct cursor works + self.assertEquals(cur1.fetchone()[0], 1) + + def test_error(self): + cur = self.conn.cursor() + cur.execute("insert into table1 values (%s)", (1, )) + self.wait(cur) + cur.execute("insert into table1 values (%s)", (1, )) + # this should fail + self.assertRaises(psycopg2.IntegrityError, self.wait, cur) + cur.execute("insert into table1 values (%s); " + "insert into table1 values (%s)", (2, 2)) + # this should fail as well (Postgres behaviour) + self.assertRaises(psycopg2.IntegrityError, self.wait, cur) + # but this should work + if crdb_version(self.sync_conn) is None: + cur.execute("insert into table1 values (%s)", (2, )) + self.wait(cur) + # and the cursor should be usable afterwards + cur.execute("insert into table1 values (%s)", (3, )) + self.wait(cur) + cur.execute("select * from table1 order by id") + self.wait(cur) + self.assertEquals(cur.fetchall(), [(1, ), (2, ), (3, )]) + cur.execute("delete from table1") + self.wait(cur) + + def test_stop_on_first_error(self): + cur = self.conn.cursor() + cur.execute("select 1; select x; select 1/0; select 2") + self.assertRaises(psycopg2.errors.UndefinedColumn, self.wait, cur) + + cur.execute("select 1") + self.wait(cur) + self.assertEqual(cur.fetchone(), (1,)) + + def test_error_two_cursors(self): + cur = self.conn.cursor() + cur2 = self.conn.cursor() + cur.execute("select * from no_such_table") + self.assertRaises(psycopg2.ProgrammingError, self.wait, cur) + cur2.execute("select 1") + self.wait(cur2) + self.assertEquals(cur2.fetchone()[0], 1) + + @skip_if_crdb("notice") + def test_notices(self): + del self.conn.notices[:] + cur = self.conn.cursor() + if self.conn.info.server_version >= 90300: + cur.execute("set client_min_messages=debug1") + self.wait(cur) + cur.execute("create temp table chatty (id serial primary key);") + self.wait(cur) + self.assertEqual("CREATE TABLE", cur.statusmessage) + self.assert_(self.conn.notices) + + def test_async_cursor_gone(self): + cur = self.conn.cursor() + cur.execute("select 42;") + del cur + gc.collect() + self.assertRaises(psycopg2.InterfaceError, self.wait, self.conn) + + # The connection is still usable + cur = self.conn.cursor() + cur.execute("select 42;") + self.wait(self.conn) + self.assertEqual(cur.fetchone(), (42,)) + + @skip_if_crdb("copy") + def test_async_connection_error_message(self): + try: + cnn = psycopg2.connect('dbname=thisdatabasedoesntexist', async_=True) + self.wait(cnn) + except psycopg2.Error as e: + self.assertNotEqual(str(e), "asynchronous connection failed", + "connection error reason lost") + else: + self.fail("no exception raised") + + @skip_before_postgres(8, 2) + def test_copy_no_hang(self): + cur = self.conn.cursor() + cur.execute("copy (select 1) to stdout") + self.assertRaises(psycopg2.ProgrammingError, self.wait, self.conn) + + @slow + @skip_if_crdb("notice") + @skip_before_postgres(9, 0) + def test_non_block_after_notification(self): + from select import select + + cur = self.conn.cursor() + cur.execute(""" + select 1; + do $$ + begin + raise notice 'hello'; + end + $$ language plpgsql; + select pg_sleep(1); + """) + + polls = 0 + while True: + state = self.conn.poll() + if state == psycopg2.extensions.POLL_OK: + break + elif state == psycopg2.extensions.POLL_READ: + select([self.conn], [], [], 0.1) + elif state == psycopg2.extensions.POLL_WRITE: + select([], [self.conn], [], 0.1) + else: + raise Exception("Unexpected result from poll: %r", state) + polls += 1 + + self.assert_(polls >= 8, polls) + + def test_poll_noop(self): + self.conn.poll() + + @skip_if_crdb("notify") + @skip_before_postgres(9, 0) + def test_poll_conn_for_notification(self): + with self.conn.cursor() as cur: + cur.execute("listen test") + self.wait(cur) + + with self.sync_conn.cursor() as cur: + cur.execute("notify test, 'hello'") + self.sync_conn.commit() + + for i in range(10): + self.conn.poll() + + if self.conn.notifies: + n = self.conn.notifies.pop() + self.assertEqual(n.channel, 'test') + self.assertEqual(n.payload, 'hello') + break + time.sleep(0.1) + else: + self.fail("No notification received") + + def test_close(self): + self.conn.close() + self.assertTrue(self.conn.closed) + self.assertTrue(self.conn.async_) + + +def test_suite(): + return unittest.TestLoader().loadTestsFromName(__name__) + + +if __name__ == "__main__": + unittest.main() diff --git a/tests/test_bugX000.py b/tests/test_bugX000.py new file mode 100755 index 0000000000000000000000000000000000000000..b7c672f29c7c243bb3e52de948c8bcea03705151 --- /dev/null +++ b/tests/test_bugX000.py @@ -0,0 +1,48 @@ +#!/usr/bin/env python + +# bugX000.py - test for DateTime object allocation bug +# +# Copyright (C) 2007-2019 Federico Di Gregorio +# Copyright (C) 2020-2021 The Psycopg Team +# +# psycopg2 is free software: you can redistribute it and/or modify it +# under the terms of the GNU Lesser General Public License as published +# by the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# In addition, as a special exception, the copyright holders give +# permission to link this program with the OpenSSL library (or with +# modified versions of OpenSSL that use the same license as OpenSSL), +# and distribute linked combinations including the two. +# +# You must obey the GNU Lesser General Public License in all respects for +# all of the code used other than OpenSSL. +# +# psycopg2 is distributed in the hope that it will be useful, but WITHOUT +# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or +# FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public +# License for more details. + +import psycopg2 +import time +import unittest + + +class DateTimeAllocationBugTestCase(unittest.TestCase): + def test_date_time_allocation_bug(self): + d1 = psycopg2.Date(2002, 12, 25) + d2 = psycopg2.DateFromTicks(time.mktime((2002, 12, 25, 0, 0, 0, 0, 0, 0))) + t1 = psycopg2.Time(13, 45, 30) + t2 = psycopg2.TimeFromTicks(time.mktime((2001, 1, 1, 13, 45, 30, 0, 0, 0))) + t1 = psycopg2.Timestamp(2002, 12, 25, 13, 45, 30) + t2 = psycopg2.TimestampFromTicks( + time.mktime((2002, 12, 25, 13, 45, 30, 0, 0, 0))) + del d1, d2, t1, t2 + + +def test_suite(): + return unittest.TestLoader().loadTestsFromName(__name__) + + +if __name__ == "__main__": + unittest.main() diff --git a/tests/test_bug_gc.py b/tests/test_bug_gc.py new file mode 100755 index 0000000000000000000000000000000000000000..650d1a55c23cd24262ee04183babc00328fd168f --- /dev/null +++ b/tests/test_bug_gc.py @@ -0,0 +1,52 @@ +#!/usr/bin/env python + +# bug_gc.py - test for refcounting/GC bug +# +# Copyright (C) 2010-2019 Federico Di Gregorio +# Copyright (C) 2020-2021 The Psycopg Team +# +# psycopg2 is free software: you can redistribute it and/or modify it +# under the terms of the GNU Lesser General Public License as published +# by the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# In addition, as a special exception, the copyright holders give +# permission to link this program with the OpenSSL library (or with +# modified versions of OpenSSL that use the same license as OpenSSL), +# and distribute linked combinations including the two. +# +# You must obey the GNU Lesser General Public License in all respects for +# all of the code used other than OpenSSL. +# +# psycopg2 is distributed in the hope that it will be useful, but WITHOUT +# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or +# FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public +# License for more details. + +import psycopg2 +import psycopg2.extensions +import unittest +import gc + +from .testutils import ConnectingTestCase, skip_if_no_uuid + + +class StolenReferenceTestCase(ConnectingTestCase): + @skip_if_no_uuid + def test_stolen_reference_bug(self): + def fish(val, cur): + gc.collect() + return 42 + UUID = psycopg2.extensions.new_type((2950,), "UUID", fish) + psycopg2.extensions.register_type(UUID, self.conn) + curs = self.conn.cursor() + curs.execute("select 'b5219e01-19ab-4994-b71e-149225dc51e4'::uuid") + curs.fetchone() + + +def test_suite(): + return unittest.TestLoader().loadTestsFromName(__name__) + + +if __name__ == "__main__": + unittest.main() diff --git a/tests/test_cancel.py b/tests/test_cancel.py new file mode 100755 index 0000000000000000000000000000000000000000..9320d3e5f37341cc16b2915d7cb2316a38965c56 --- /dev/null +++ b/tests/test_cancel.py @@ -0,0 +1,117 @@ +#!/usr/bin/env python + +# test_cancel.py - unit test for query cancellation +# +# Copyright (C) 2010-2019 Jan UrbaƄski +# Copyright (C) 2020-2021 The Psycopg Team +# +# psycopg2 is free software: you can redistribute it and/or modify it +# under the terms of the GNU Lesser General Public License as published +# by the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# In addition, as a special exception, the copyright holders give +# permission to link this program with the OpenSSL library (or with +# modified versions of OpenSSL that use the same license as OpenSSL), +# and distribute linked combinations including the two. +# +# You must obey the GNU Lesser General Public License in all respects for +# all of the code used other than OpenSSL. +# +# psycopg2 is distributed in the hope that it will be useful, but WITHOUT +# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or +# FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public +# License for more details. + +import time +import threading + +import psycopg2 +import psycopg2.extensions +from psycopg2 import extras + +from .testconfig import dsn +import unittest +from .testutils import ConnectingTestCase, skip_before_postgres, slow +from .testutils import skip_if_crdb + + +class CancelTests(ConnectingTestCase): + + def setUp(self): + ConnectingTestCase.setUp(self) + + skip_if_crdb("cancel", self.conn) + + cur = self.conn.cursor() + cur.execute(''' + CREATE TEMPORARY TABLE table1 ( + id int PRIMARY KEY + )''') + self.conn.commit() + + def test_empty_cancel(self): + self.conn.cancel() + + @slow + @skip_before_postgres(8, 2) + def test_cancel(self): + errors = [] + + def neverending(conn): + cur = conn.cursor() + try: + self.assertRaises(psycopg2.extensions.QueryCanceledError, + cur.execute, "select pg_sleep(60)") + # make sure the connection still works + conn.rollback() + cur.execute("select 1") + self.assertEqual(cur.fetchall(), [(1, )]) + except Exception as e: + errors.append(e) + raise + + def canceller(conn): + cur = conn.cursor() + try: + conn.cancel() + except Exception as e: + errors.append(e) + raise + del cur + + thread1 = threading.Thread(target=neverending, args=(self.conn, )) + # wait a bit to make sure that the other thread is already in + # pg_sleep -- ugly and racy, but the chances are ridiculously low + thread2 = threading.Timer(0.3, canceller, args=(self.conn, )) + thread1.start() + thread2.start() + thread1.join() + thread2.join() + + self.assertEqual(errors, []) + + @slow + @skip_before_postgres(8, 2) + def test_async_cancel(self): + async_conn = psycopg2.connect(dsn, async_=True) + self.assertRaises(psycopg2.OperationalError, async_conn.cancel) + extras.wait_select(async_conn) + cur = async_conn.cursor() + cur.execute("select pg_sleep(10)") + time.sleep(1) + self.assertTrue(async_conn.isexecuting()) + async_conn.cancel() + self.assertRaises(psycopg2.extensions.QueryCanceledError, + extras.wait_select, async_conn) + cur.execute("select 1") + extras.wait_select(async_conn) + self.assertEqual(cur.fetchall(), [(1, )]) + + +def test_suite(): + return unittest.TestLoader().loadTestsFromName(__name__) + + +if __name__ == "__main__": + unittest.main() diff --git a/tests/test_connection.py b/tests/test_connection.py new file mode 100755 index 0000000000000000000000000000000000000000..c8009cd09cc43478cd339a173e74fcce52ab2206 --- /dev/null +++ b/tests/test_connection.py @@ -0,0 +1,1944 @@ +#!/usr/bin/env python + +# test_connection.py - unit test for connection attributes +# +# Copyright (C) 2008-2019 James Henstridge +# Copyright (C) 2020-2021 The Psycopg Team +# +# psycopg2 is free software: you can redistribute it and/or modify it +# under the terms of the GNU Lesser General Public License as published +# by the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# In addition, as a special exception, the copyright holders give +# permission to link this program with the OpenSSL library (or with +# modified versions of OpenSSL that use the same license as OpenSSL), +# and distribute linked combinations including the two. +# +# You must obey the GNU Lesser General Public License in all respects for +# all of the code used other than OpenSSL. +# +# psycopg2 is distributed in the hope that it will be useful, but WITHOUT +# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or +# FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public +# License for more details. + +import gc +import os +import re +import sys +import time +import ctypes +import shutil +import tempfile +import threading +import subprocess as sp +from collections import deque +from operator import attrgetter +from weakref import ref + +import psycopg2 +import psycopg2.extras +from psycopg2 import extensions as ext + +from .testutils import ( + unittest, skip_if_no_superuser, skip_before_postgres, + skip_after_postgres, skip_before_libpq, skip_after_libpq, + ConnectingTestCase, skip_if_tpc_disabled, skip_if_windows, slow, + skip_if_crdb, crdb_version) + +from .testconfig import dbhost, dsn, dbname + + +class ConnectionTests(ConnectingTestCase): + def test_closed_attribute(self): + conn = self.conn + self.assertEqual(conn.closed, False) + conn.close() + self.assertEqual(conn.closed, True) + + def test_close_idempotent(self): + conn = self.conn + conn.close() + conn.close() + self.assert_(conn.closed) + + def test_cursor_closed_attribute(self): + conn = self.conn + curs = conn.cursor() + self.assertEqual(curs.closed, False) + curs.close() + self.assertEqual(curs.closed, True) + + # Closing the connection closes the cursor: + curs = conn.cursor() + conn.close() + self.assertEqual(curs.closed, True) + + @skip_if_crdb("backend pid") + @skip_before_postgres(8, 4) + @skip_if_no_superuser + @skip_if_windows + def test_cleanup_on_badconn_close(self): + # ticket #148 + conn = self.conn + cur = conn.cursor() + self.assertRaises(psycopg2.OperationalError, + cur.execute, "select pg_terminate_backend(pg_backend_pid())") + + self.assertEqual(conn.closed, 2) + conn.close() + self.assertEqual(conn.closed, 1) + + @skip_if_crdb("isolation level") + def test_reset(self): + conn = self.conn + # switch session characteristics + conn.autocommit = True + conn.isolation_level = 'serializable' + conn.readonly = True + if self.conn.info.server_version >= 90100: + conn.deferrable = False + + self.assert_(conn.autocommit) + self.assertEqual(conn.isolation_level, ext.ISOLATION_LEVEL_SERIALIZABLE) + self.assert_(conn.readonly is True) + if self.conn.info.server_version >= 90100: + self.assert_(conn.deferrable is False) + + conn.reset() + # now the session characteristics should be reverted + self.assert_(not conn.autocommit) + self.assertEqual(conn.isolation_level, ext.ISOLATION_LEVEL_DEFAULT) + self.assert_(conn.readonly is None) + if self.conn.info.server_version >= 90100: + self.assert_(conn.deferrable is None) + + @skip_if_crdb("notice") + def test_notices(self): + conn = self.conn + cur = conn.cursor() + if self.conn.info.server_version >= 90300: + cur.execute("set client_min_messages=debug1") + cur.execute("create temp table chatty (id serial primary key);") + self.assertEqual("CREATE TABLE", cur.statusmessage) + self.assert_(conn.notices) + + @skip_if_crdb("notice") + def test_notices_consistent_order(self): + conn = self.conn + cur = conn.cursor() + if self.conn.info.server_version >= 90300: + cur.execute("set client_min_messages=debug1") + cur.execute(""" + create temp table table1 (id serial); + create temp table table2 (id serial); + """) + cur.execute(""" + create temp table table3 (id serial); + create temp table table4 (id serial); + """) + self.assertEqual(4, len(conn.notices)) + self.assert_('table1' in conn.notices[0]) + self.assert_('table2' in conn.notices[1]) + self.assert_('table3' in conn.notices[2]) + self.assert_('table4' in conn.notices[3]) + + @slow + @skip_if_crdb("notice") + def test_notices_limited(self): + conn = self.conn + cur = conn.cursor() + if self.conn.info.server_version >= 90300: + cur.execute("set client_min_messages=debug1") + for i in range(0, 100, 10): + sql = " ".join(["create temp table table%d (id serial);" % j + for j in range(i, i + 10)]) + cur.execute(sql) + + self.assertEqual(50, len(conn.notices)) + self.assert_('table99' in conn.notices[-1], conn.notices[-1]) + + @slow + @skip_if_crdb("notice") + def test_notices_deque(self): + conn = self.conn + self.conn.notices = deque() + cur = conn.cursor() + if self.conn.info.server_version >= 90300: + cur.execute("set client_min_messages=debug1") + + cur.execute(""" + create temp table table1 (id serial); + create temp table table2 (id serial); + """) + cur.execute(""" + create temp table table3 (id serial); + create temp table table4 (id serial);""") + self.assertEqual(len(conn.notices), 4) + self.assert_('table1' in conn.notices.popleft()) + self.assert_('table2' in conn.notices.popleft()) + self.assert_('table3' in conn.notices.popleft()) + self.assert_('table4' in conn.notices.popleft()) + self.assertEqual(len(conn.notices), 0) + + # not limited, but no error + for i in range(0, 100, 10): + sql = " ".join(["create temp table table2_%d (id serial);" % j + for j in range(i, i + 10)]) + cur.execute(sql) + + self.assertEqual(len([n for n in conn.notices if 'CREATE TABLE' in n]), + 100) + + @skip_if_crdb("notice") + def test_notices_noappend(self): + conn = self.conn + self.conn.notices = None # will make an error swallowes ok + cur = conn.cursor() + if self.conn.info.server_version >= 90300: + cur.execute("set client_min_messages=debug1") + + cur.execute("create temp table table1 (id serial);") + + self.assertEqual(self.conn.notices, None) + + def test_server_version(self): + self.assert_(self.conn.server_version) + + def test_protocol_version(self): + self.assert_(self.conn.protocol_version in (2, 3), + self.conn.protocol_version) + + def test_tpc_unsupported(self): + cnn = self.conn + if cnn.info.server_version >= 80100: + return self.skipTest("tpc is supported") + + self.assertRaises(psycopg2.NotSupportedError, + cnn.xid, 42, "foo", "bar") + + @slow + @skip_before_postgres(8, 2) + def test_concurrent_execution(self): + def slave(): + cnn = self.connect() + cur = cnn.cursor() + cur.execute("select pg_sleep(4)") + cur.close() + cnn.close() + + t1 = threading.Thread(target=slave) + t2 = threading.Thread(target=slave) + t0 = time.time() + t1.start() + t2.start() + t1.join() + t2.join() + self.assert_(time.time() - t0 < 7, + "something broken in concurrency") + + @skip_if_crdb("encoding") + def test_encoding_name(self): + self.conn.set_client_encoding("EUC_JP") + # conn.encoding is 'EUCJP' now. + cur = self.conn.cursor() + ext.register_type(ext.UNICODE, cur) + cur.execute("select 'foo'::text;") + self.assertEqual(cur.fetchone()[0], 'foo') + + def test_connect_nonnormal_envvar(self): + # We must perform encoding normalization at connection time + self.conn.close() + oldenc = os.environ.get('PGCLIENTENCODING') + os.environ['PGCLIENTENCODING'] = 'utf-8' # malformed spelling + try: + self.conn = self.connect() + finally: + if oldenc is not None: + os.environ['PGCLIENTENCODING'] = oldenc + else: + del os.environ['PGCLIENTENCODING'] + + def test_connect_no_string(self): + class MyString(str): + pass + + conn = psycopg2.connect(MyString(dsn)) + conn.close() + + def test_weakref(self): + conn = psycopg2.connect(dsn) + w = ref(conn) + conn.close() + del conn + gc.collect() + self.assert_(w() is None) + + @slow + def test_commit_concurrency(self): + # The problem is the one reported in ticket #103. Because of bad + # status check, we commit even when a commit is already on its way. + # We can detect this condition by the warnings. + conn = self.conn + notices = [] + stop = [] + + def committer(): + while not stop: + conn.commit() + while conn.notices: + notices.append((2, conn.notices.pop())) + + cur = conn.cursor() + t1 = threading.Thread(target=committer) + t1.start() + for i in range(1000): + cur.execute("select %s;", (i,)) + conn.commit() + while conn.notices: + notices.append((1, conn.notices.pop())) + + # Stop the committer thread + stop.append(True) + + self.assert_(not notices, f"{len(notices)} notices raised") + + def test_connect_cursor_factory(self): + conn = self.connect(cursor_factory=psycopg2.extras.DictCursor) + cur = conn.cursor() + cur.execute("select 1 as a") + self.assertEqual(cur.fetchone()['a'], 1) + + def test_cursor_factory(self): + self.assertEqual(self.conn.cursor_factory, None) + cur = self.conn.cursor() + cur.execute("select 1 as a") + self.assertRaises(TypeError, (lambda r: r['a']), cur.fetchone()) + + self.conn.cursor_factory = psycopg2.extras.DictCursor + self.assertEqual(self.conn.cursor_factory, psycopg2.extras.DictCursor) + cur = self.conn.cursor() + cur.execute("select 1 as a") + self.assertEqual(cur.fetchone()['a'], 1) + + self.conn.cursor_factory = None + self.assertEqual(self.conn.cursor_factory, None) + cur = self.conn.cursor() + cur.execute("select 1 as a") + self.assertRaises(TypeError, (lambda r: r['a']), cur.fetchone()) + + def test_cursor_factory_none(self): + # issue #210 + conn = self.connect() + cur = conn.cursor(cursor_factory=None) + self.assertEqual(type(cur), ext.cursor) + + conn = self.connect(cursor_factory=psycopg2.extras.DictCursor) + cur = conn.cursor(cursor_factory=None) + self.assertEqual(type(cur), psycopg2.extras.DictCursor) + + @skip_if_crdb("connect any db") + def test_failed_init_status(self): + class SubConnection(ext.connection): + def __init__(self, dsn): + try: + super().__init__(dsn) + except Exception: + pass + + c = SubConnection("dbname=thereisnosuchdatabasemate password=foobar") + self.assert_(c.closed, "connection failed so it must be closed") + self.assert_('foobar' not in c.dsn, "password was not obscured") + + def test_get_native_connection(self): + conn = self.connect() + capsule = conn.get_native_connection() + # we can't do anything else in Python + self.assertIsNotNone(capsule) + + def test_pgconn_ptr(self): + conn = self.connect() + self.assert_(conn.pgconn_ptr is not None) + + try: + f = self.libpq.PQserverVersion + except AttributeError: + pass + else: + f.argtypes = [ctypes.c_void_p] + f.restype = ctypes.c_int + ver = f(conn.pgconn_ptr) + if ver == 0 and sys.platform == 'darwin': + return self.skipTest( + "I don't know why this func returns 0 on OSX") + + self.assertEqual(ver, conn.server_version) + + conn.close() + self.assert_(conn.pgconn_ptr is None) + + @slow + def test_multiprocess_close(self): + dir = tempfile.mkdtemp() + try: + with open(os.path.join(dir, "mptest.py"), 'w') as f: + f.write(f"""import time +import psycopg2 + +def thread(): + conn = psycopg2.connect({dsn!r}) + curs = conn.cursor() + for i in range(10): + curs.execute("select 1") + time.sleep(0.1) + +def process(): + time.sleep(0.2) +""") + + script = ("""\ +import sys +sys.path.insert(0, {dir!r}) +import time +import threading +import multiprocessing +import mptest + +t = threading.Thread(target=mptest.thread, name='mythread') +t.start() +time.sleep(0.2) +multiprocessing.Process(target=mptest.process, name='myprocess').start() +t.join() +""".format(dir=dir)) + + out = sp.check_output( + [sys.executable, '-c', script], stderr=sp.STDOUT) + self.assertEqual(out, b'', out) + finally: + shutil.rmtree(dir, ignore_errors=True) + + +class ParseDsnTestCase(ConnectingTestCase): + def test_parse_dsn(self): + self.assertEqual( + ext.parse_dsn('dbname=test user=tester password=secret'), + dict(user='tester', password='secret', dbname='test'), + "simple DSN parsed") + + self.assertRaises(psycopg2.ProgrammingError, ext.parse_dsn, + "dbname=test 2 user=tester password=secret") + + self.assertEqual( + ext.parse_dsn("dbname='test 2' user=tester password=secret"), + dict(user='tester', password='secret', dbname='test 2'), + "DSN with quoting parsed") + + # Can't really use assertRaisesRegexp() here since we need to + # make sure that secret is *not* exposed in the error message. + raised = False + try: + # unterminated quote after dbname: + ext.parse_dsn("dbname='test 2 user=tester password=secret") + except psycopg2.ProgrammingError as e: + raised = True + self.assertTrue(str(e).find('secret') < 0, + "DSN was not exposed in error message") + self.assertTrue(raised, "ProgrammingError raised due to invalid DSN") + + @skip_before_libpq(9, 2) + def test_parse_dsn_uri(self): + self.assertEqual(ext.parse_dsn('postgresql://tester:secret@/test'), + dict(user='tester', password='secret', dbname='test'), + "valid URI dsn parsed") + + raised = False + try: + # extra '=' after port value + ext.parse_dsn(dsn='postgresql://tester:secret@/test?port=1111=x') + except psycopg2.ProgrammingError as e: + raised = True + self.assertTrue(str(e).find('secret') < 0, + "URI was not exposed in error message") + self.assertTrue(raised, "ProgrammingError raised due to invalid URI") + + def test_unicode_value(self): + snowman = "\u2603" + d = ext.parse_dsn('dbname=' + snowman) + self.assertEqual(d['dbname'], snowman) + + def test_unicode_key(self): + snowman = "\u2603" + self.assertRaises(psycopg2.ProgrammingError, ext.parse_dsn, + snowman + '=' + snowman) + + def test_bad_param(self): + self.assertRaises(TypeError, ext.parse_dsn, None) + self.assertRaises(TypeError, ext.parse_dsn, 42) + + def test_str_subclass(self): + class MyString(str): + pass + + res = ext.parse_dsn(MyString("dbname=test")) + self.assertEqual(res, {'dbname': 'test'}) + + +class MakeDsnTestCase(ConnectingTestCase): + def test_empty_arguments(self): + self.assertEqual(ext.make_dsn(), '') + + def test_empty_string(self): + dsn = ext.make_dsn('') + self.assertEqual(dsn, '') + + def test_params_validation(self): + self.assertRaises(psycopg2.ProgrammingError, + ext.make_dsn, 'dbnamo=a') + self.assertRaises(psycopg2.ProgrammingError, + ext.make_dsn, dbnamo='a') + self.assertRaises(psycopg2.ProgrammingError, + ext.make_dsn, 'dbname=a', nosuchparam='b') + + def test_empty_param(self): + dsn = ext.make_dsn(dbname='sony', password='') + self.assertDsnEqual(dsn, "dbname=sony password=''") + + def test_escape(self): + dsn = ext.make_dsn(dbname='hello world') + self.assertEqual(dsn, "dbname='hello world'") + + dsn = ext.make_dsn(dbname=r'back\slash') + self.assertEqual(dsn, r"dbname=back\\slash") + + dsn = ext.make_dsn(dbname="quo'te") + self.assertEqual(dsn, r"dbname=quo\'te") + + dsn = ext.make_dsn(dbname="with\ttab") + self.assertEqual(dsn, "dbname='with\ttab'") + + dsn = ext.make_dsn(dbname=r"\every thing'") + self.assertEqual(dsn, r"dbname='\\every thing\''") + + def test_database_is_a_keyword(self): + self.assertEqual(ext.make_dsn(database='sigh'), "dbname=sigh") + + def test_params_merging(self): + dsn = ext.make_dsn('dbname=foo host=bar', host='baz') + self.assertDsnEqual(dsn, 'dbname=foo host=baz') + + dsn = ext.make_dsn('dbname=foo', user='postgres') + self.assertDsnEqual(dsn, 'dbname=foo user=postgres') + + def test_no_dsn_munging(self): + dsnin = 'dbname=a host=b user=c password=d' + dsn = ext.make_dsn(dsnin) + self.assertEqual(dsn, dsnin) + + def test_null_args(self): + dsn = ext.make_dsn("dbname=foo", user="bar", password=None) + self.assertDsnEqual(dsn, "dbname=foo user=bar") + + @skip_before_libpq(9, 2) + def test_url_is_cool(self): + url = 'postgresql://tester:secret@/test?application_name=wat' + dsn = ext.make_dsn(url) + self.assertEqual(dsn, url) + + dsn = ext.make_dsn(url, application_name='woot') + self.assertDsnEqual(dsn, + 'dbname=test user=tester password=secret application_name=woot') + + self.assertRaises(psycopg2.ProgrammingError, + ext.make_dsn, 'postgresql://tester:secret@/test?nosuch=param') + self.assertRaises(psycopg2.ProgrammingError, + ext.make_dsn, url, nosuch="param") + + @skip_before_libpq(9, 3) + def test_get_dsn_parameters(self): + conn = self.connect() + d = conn.get_dsn_parameters() + self.assertEqual(d['dbname'], dbname) # the only param we can check reliably + self.assert_('password' not in d, d) + + +class IsolationLevelsTestCase(ConnectingTestCase): + + def setUp(self): + ConnectingTestCase.setUp(self) + + conn = self.connect() + cur = conn.cursor() + if crdb_version(conn) is not None: + cur.execute("create table if not exists isolevel (id integer)") + cur.execute("truncate isolevel") + conn.commit() + return + + try: + cur.execute("drop table isolevel;") + except psycopg2.ProgrammingError: + conn.rollback() + try: + cur.execute("create table isolevel (id integer);") + conn.commit() + finally: + conn.close() + + def test_isolation_level(self): + conn = self.connect() + self.assertEqual( + conn.isolation_level, + ext.ISOLATION_LEVEL_DEFAULT) + + def test_encoding(self): + conn = self.connect() + self.assert_(conn.encoding in ext.encodings) + + @skip_if_crdb("isolation level") + def test_set_isolation_level(self): + conn = self.connect() + curs = conn.cursor() + + levels = [ + ('read uncommitted', + ext.ISOLATION_LEVEL_READ_UNCOMMITTED), + ('read committed', ext.ISOLATION_LEVEL_READ_COMMITTED), + ('repeatable read', ext.ISOLATION_LEVEL_REPEATABLE_READ), + ('serializable', ext.ISOLATION_LEVEL_SERIALIZABLE), + ] + for name, level in levels: + conn.set_isolation_level(level) + + # the only values available on prehistoric PG versions + if conn.info.server_version < 80000: + if level in ( + ext.ISOLATION_LEVEL_READ_UNCOMMITTED, + ext.ISOLATION_LEVEL_REPEATABLE_READ): + name, level = levels[levels.index((name, level)) + 1] + + self.assertEqual(conn.isolation_level, level) + + curs.execute('show transaction_isolation;') + got_name = curs.fetchone()[0] + + self.assertEqual(name, got_name) + conn.commit() + + self.assertRaises(ValueError, conn.set_isolation_level, -1) + self.assertRaises(ValueError, conn.set_isolation_level, 5) + + def test_set_isolation_level_autocommit(self): + conn = self.connect() + curs = conn.cursor() + + conn.set_isolation_level(ext.ISOLATION_LEVEL_AUTOCOMMIT) + self.assertEqual(conn.isolation_level, ext.ISOLATION_LEVEL_DEFAULT) + self.assert_(conn.autocommit) + + conn.isolation_level = 'serializable' + self.assertEqual(conn.isolation_level, ext.ISOLATION_LEVEL_SERIALIZABLE) + self.assert_(conn.autocommit) + + curs.execute('show transaction_isolation;') + self.assertEqual(curs.fetchone()[0], 'serializable') + + @skip_if_crdb("isolation level") + def test_set_isolation_level_default(self): + conn = self.connect() + curs = conn.cursor() + + conn.autocommit = True + curs.execute("set default_transaction_isolation to 'read committed'") + + conn.autocommit = False + conn.set_isolation_level(ext.ISOLATION_LEVEL_SERIALIZABLE) + self.assertEqual(conn.isolation_level, + ext.ISOLATION_LEVEL_SERIALIZABLE) + curs.execute("show transaction_isolation") + self.assertEqual(curs.fetchone()[0], "serializable") + + conn.rollback() + conn.set_isolation_level(ext.ISOLATION_LEVEL_DEFAULT) + curs.execute("show transaction_isolation") + self.assertEqual(curs.fetchone()[0], "read committed") + + def test_set_isolation_level_abort(self): + conn = self.connect() + cur = conn.cursor() + + self.assertEqual(ext.TRANSACTION_STATUS_IDLE, + conn.info.transaction_status) + cur.execute("insert into isolevel values (10);") + self.assertEqual(ext.TRANSACTION_STATUS_INTRANS, + conn.info.transaction_status) + + conn.set_isolation_level( + psycopg2.extensions.ISOLATION_LEVEL_SERIALIZABLE) + self.assertEqual(psycopg2.extensions.TRANSACTION_STATUS_IDLE, + conn.info.transaction_status) + cur.execute("select count(*) from isolevel;") + self.assertEqual(0, cur.fetchone()[0]) + + cur.execute("insert into isolevel values (10);") + self.assertEqual(psycopg2.extensions.TRANSACTION_STATUS_INTRANS, + conn.info.transaction_status) + conn.set_isolation_level( + psycopg2.extensions.ISOLATION_LEVEL_AUTOCOMMIT) + self.assertEqual(psycopg2.extensions.TRANSACTION_STATUS_IDLE, + conn.info.transaction_status) + cur.execute("select count(*) from isolevel;") + self.assertEqual(0, cur.fetchone()[0]) + + cur.execute("insert into isolevel values (10);") + self.assertEqual(psycopg2.extensions.TRANSACTION_STATUS_IDLE, + conn.info.transaction_status) + conn.set_isolation_level( + psycopg2.extensions.ISOLATION_LEVEL_READ_COMMITTED) + self.assertEqual(psycopg2.extensions.TRANSACTION_STATUS_IDLE, + conn.info.transaction_status) + cur.execute("select count(*) from isolevel;") + self.assertEqual(1, cur.fetchone()[0]) + self.assertEqual(conn.isolation_level, + psycopg2.extensions.ISOLATION_LEVEL_READ_COMMITTED) + + def test_isolation_level_autocommit(self): + cnn1 = self.connect() + cnn2 = self.connect() + cnn2.set_isolation_level(ext.ISOLATION_LEVEL_AUTOCOMMIT) + + cur1 = cnn1.cursor() + cur1.execute("select count(*) from isolevel;") + self.assertEqual(0, cur1.fetchone()[0]) + cnn1.commit() + + cur2 = cnn2.cursor() + cur2.execute("insert into isolevel values (10);") + + cur1.execute("select count(*) from isolevel;") + self.assertEqual(1, cur1.fetchone()[0]) + + @skip_if_crdb("isolation level") + def test_isolation_level_read_committed(self): + cnn1 = self.connect() + cnn2 = self.connect() + cnn2.set_isolation_level(ext.ISOLATION_LEVEL_READ_COMMITTED) + + cur1 = cnn1.cursor() + cur1.execute("select count(*) from isolevel;") + self.assertEqual(0, cur1.fetchone()[0]) + cnn1.commit() + + cur2 = cnn2.cursor() + cur2.execute("insert into isolevel values (10);") + cur1.execute("insert into isolevel values (20);") + + cur2.execute("select count(*) from isolevel;") + self.assertEqual(1, cur2.fetchone()[0]) + cnn1.commit() + cur2.execute("select count(*) from isolevel;") + self.assertEqual(2, cur2.fetchone()[0]) + + cur1.execute("select count(*) from isolevel;") + self.assertEqual(1, cur1.fetchone()[0]) + cnn2.commit() + cur1.execute("select count(*) from isolevel;") + self.assertEqual(2, cur1.fetchone()[0]) + + @skip_if_crdb("isolation level") + def test_isolation_level_serializable(self): + cnn1 = self.connect() + cnn2 = self.connect() + cnn2.set_isolation_level(ext.ISOLATION_LEVEL_SERIALIZABLE) + + cur1 = cnn1.cursor() + cur1.execute("select count(*) from isolevel;") + self.assertEqual(0, cur1.fetchone()[0]) + cnn1.commit() + + cur2 = cnn2.cursor() + cur2.execute("insert into isolevel values (10);") + cur1.execute("insert into isolevel values (20);") + + cur2.execute("select count(*) from isolevel;") + self.assertEqual(1, cur2.fetchone()[0]) + cnn1.commit() + cur2.execute("select count(*) from isolevel;") + self.assertEqual(1, cur2.fetchone()[0]) + + cur1.execute("select count(*) from isolevel;") + self.assertEqual(1, cur1.fetchone()[0]) + cnn2.commit() + cur1.execute("select count(*) from isolevel;") + self.assertEqual(2, cur1.fetchone()[0]) + + cur2.execute("select count(*) from isolevel;") + self.assertEqual(2, cur2.fetchone()[0]) + + def test_isolation_level_closed(self): + cnn = self.connect() + cnn.close() + self.assertRaises(psycopg2.InterfaceError, + cnn.set_isolation_level, 0) + self.assertRaises(psycopg2.InterfaceError, + cnn.set_isolation_level, 1) + + @skip_if_crdb("isolation level") + def test_setattr_isolation_level_int(self): + cur = self.conn.cursor() + self.conn.isolation_level = ext.ISOLATION_LEVEL_SERIALIZABLE + self.assertEqual(self.conn.isolation_level, ext.ISOLATION_LEVEL_SERIALIZABLE) + + cur.execute("SHOW transaction_isolation;") + self.assertEqual(cur.fetchone()[0], 'serializable') + self.conn.rollback() + + self.conn.isolation_level = ext.ISOLATION_LEVEL_REPEATABLE_READ + cur.execute("SHOW transaction_isolation;") + if self.conn.info.server_version > 80000: + self.assertEqual(self.conn.isolation_level, + ext.ISOLATION_LEVEL_REPEATABLE_READ) + self.assertEqual(cur.fetchone()[0], 'repeatable read') + else: + self.assertEqual(self.conn.isolation_level, + ext.ISOLATION_LEVEL_SERIALIZABLE) + self.assertEqual(cur.fetchone()[0], 'serializable') + self.conn.rollback() + + self.conn.isolation_level = ext.ISOLATION_LEVEL_READ_COMMITTED + self.assertEqual(self.conn.isolation_level, + ext.ISOLATION_LEVEL_READ_COMMITTED) + cur.execute("SHOW transaction_isolation;") + self.assertEqual(cur.fetchone()[0], 'read committed') + self.conn.rollback() + + self.conn.isolation_level = ext.ISOLATION_LEVEL_READ_UNCOMMITTED + cur.execute("SHOW transaction_isolation;") + if self.conn.info.server_version > 80000: + self.assertEqual(self.conn.isolation_level, + ext.ISOLATION_LEVEL_READ_UNCOMMITTED) + self.assertEqual(cur.fetchone()[0], 'read uncommitted') + else: + self.assertEqual(self.conn.isolation_level, + ext.ISOLATION_LEVEL_READ_COMMITTED) + self.assertEqual(cur.fetchone()[0], 'read committed') + self.conn.rollback() + + self.assertEqual(ext.ISOLATION_LEVEL_DEFAULT, None) + self.conn.isolation_level = ext.ISOLATION_LEVEL_DEFAULT + self.assertEqual(self.conn.isolation_level, None) + cur.execute("SHOW transaction_isolation;") + isol = cur.fetchone()[0] + cur.execute("SHOW default_transaction_isolation;") + self.assertEqual(cur.fetchone()[0], isol) + + @skip_if_crdb("isolation level") + def test_setattr_isolation_level_str(self): + cur = self.conn.cursor() + self.conn.isolation_level = "serializable" + self.assertEqual(self.conn.isolation_level, ext.ISOLATION_LEVEL_SERIALIZABLE) + + cur.execute("SHOW transaction_isolation;") + self.assertEqual(cur.fetchone()[0], 'serializable') + self.conn.rollback() + + self.conn.isolation_level = "repeatable read" + cur.execute("SHOW transaction_isolation;") + if self.conn.info.server_version > 80000: + self.assertEqual(self.conn.isolation_level, + ext.ISOLATION_LEVEL_REPEATABLE_READ) + self.assertEqual(cur.fetchone()[0], 'repeatable read') + else: + self.assertEqual(self.conn.isolation_level, + ext.ISOLATION_LEVEL_SERIALIZABLE) + self.assertEqual(cur.fetchone()[0], 'serializable') + self.conn.rollback() + + self.conn.isolation_level = "read committed" + self.assertEqual(self.conn.isolation_level, + ext.ISOLATION_LEVEL_READ_COMMITTED) + cur.execute("SHOW transaction_isolation;") + self.assertEqual(cur.fetchone()[0], 'read committed') + self.conn.rollback() + + self.conn.isolation_level = "read uncommitted" + cur.execute("SHOW transaction_isolation;") + if self.conn.info.server_version > 80000: + self.assertEqual(self.conn.isolation_level, + ext.ISOLATION_LEVEL_READ_UNCOMMITTED) + self.assertEqual(cur.fetchone()[0], 'read uncommitted') + else: + self.assertEqual(self.conn.isolation_level, + ext.ISOLATION_LEVEL_READ_COMMITTED) + self.assertEqual(cur.fetchone()[0], 'read committed') + self.conn.rollback() + + self.conn.isolation_level = "default" + self.assertEqual(self.conn.isolation_level, None) + cur.execute("SHOW transaction_isolation;") + isol = cur.fetchone()[0] + cur.execute("SHOW default_transaction_isolation;") + self.assertEqual(cur.fetchone()[0], isol) + + def test_setattr_isolation_level_invalid(self): + self.assertRaises(ValueError, setattr, self.conn, 'isolation_level', 0) + self.assertRaises(ValueError, setattr, self.conn, 'isolation_level', -1) + self.assertRaises(ValueError, setattr, self.conn, 'isolation_level', 5) + self.assertRaises(ValueError, setattr, self.conn, 'isolation_level', 'bah') + + def test_attribs_segfault(self): + # bug #790 + for i in range(10000): + self.conn.autocommit + self.conn.readonly + self.conn.deferrable + self.conn.isolation_level + + +@skip_if_tpc_disabled +class ConnectionTwoPhaseTests(ConnectingTestCase): + def setUp(self): + ConnectingTestCase.setUp(self) + + self.make_test_table() + self.clear_test_xacts() + + def tearDown(self): + self.clear_test_xacts() + ConnectingTestCase.tearDown(self) + + def clear_test_xacts(self): + """Rollback all the prepared transaction in the testing db.""" + cnn = self.connect() + cnn.set_isolation_level(0) + cur = cnn.cursor() + try: + cur.execute( + "select gid from pg_prepared_xacts where database = %s", + (dbname,)) + except psycopg2.ProgrammingError: + cnn.rollback() + cnn.close() + return + + gids = [r[0] for r in cur] + for gid in gids: + cur.execute("rollback prepared %s;", (gid,)) + cnn.close() + + def make_test_table(self): + cnn = self.connect() + cur = cnn.cursor() + if crdb_version(cnn) is not None: + cur.execute("CREATE TABLE IF NOT EXISTS test_tpc (data text)") + cur.execute("TRUNCATE test_tpc") + cnn.commit() + cnn.close() + return + + try: + cur.execute("DROP TABLE test_tpc;") + except psycopg2.ProgrammingError: + cnn.rollback() + try: + cur.execute("CREATE TABLE test_tpc (data text);") + cnn.commit() + finally: + cnn.close() + + def count_xacts(self): + """Return the number of prepared xacts currently in the test db.""" + cnn = self.connect() + cur = cnn.cursor() + cur.execute(""" + select count(*) from pg_prepared_xacts + where database = %s;""", + (dbname,)) + rv = cur.fetchone()[0] + cnn.close() + return rv + + def count_test_records(self): + """Return the number of records in the test table.""" + cnn = self.connect() + cur = cnn.cursor() + cur.execute("select count(*) from test_tpc;") + rv = cur.fetchone()[0] + cnn.close() + return rv + + def test_tpc_commit(self): + cnn = self.connect() + xid = cnn.xid(1, "gtrid", "bqual") + self.assertEqual(cnn.status, ext.STATUS_READY) + + cnn.tpc_begin(xid) + self.assertEqual(cnn.status, ext.STATUS_BEGIN) + + cur = cnn.cursor() + cur.execute("insert into test_tpc values ('test_tpc_commit');") + self.assertEqual(0, self.count_xacts()) + self.assertEqual(0, self.count_test_records()) + + cnn.tpc_prepare() + self.assertEqual(cnn.status, ext.STATUS_PREPARED) + self.assertEqual(1, self.count_xacts()) + self.assertEqual(0, self.count_test_records()) + + cnn.tpc_commit() + self.assertEqual(cnn.status, ext.STATUS_READY) + self.assertEqual(0, self.count_xacts()) + self.assertEqual(1, self.count_test_records()) + + def test_tpc_commit_one_phase(self): + cnn = self.connect() + xid = cnn.xid(1, "gtrid", "bqual") + self.assertEqual(cnn.status, ext.STATUS_READY) + + cnn.tpc_begin(xid) + self.assertEqual(cnn.status, ext.STATUS_BEGIN) + + cur = cnn.cursor() + cur.execute("insert into test_tpc values ('test_tpc_commit_1p');") + self.assertEqual(0, self.count_xacts()) + self.assertEqual(0, self.count_test_records()) + + cnn.tpc_commit() + self.assertEqual(cnn.status, ext.STATUS_READY) + self.assertEqual(0, self.count_xacts()) + self.assertEqual(1, self.count_test_records()) + + def test_tpc_commit_recovered(self): + cnn = self.connect() + xid = cnn.xid(1, "gtrid", "bqual") + self.assertEqual(cnn.status, ext.STATUS_READY) + + cnn.tpc_begin(xid) + self.assertEqual(cnn.status, ext.STATUS_BEGIN) + + cur = cnn.cursor() + cur.execute("insert into test_tpc values ('test_tpc_commit_rec');") + self.assertEqual(0, self.count_xacts()) + self.assertEqual(0, self.count_test_records()) + + cnn.tpc_prepare() + cnn.close() + self.assertEqual(1, self.count_xacts()) + self.assertEqual(0, self.count_test_records()) + + cnn = self.connect() + xid = cnn.xid(1, "gtrid", "bqual") + cnn.tpc_commit(xid) + + self.assertEqual(cnn.status, ext.STATUS_READY) + self.assertEqual(0, self.count_xacts()) + self.assertEqual(1, self.count_test_records()) + + def test_tpc_rollback(self): + cnn = self.connect() + xid = cnn.xid(1, "gtrid", "bqual") + self.assertEqual(cnn.status, ext.STATUS_READY) + + cnn.tpc_begin(xid) + self.assertEqual(cnn.status, ext.STATUS_BEGIN) + + cur = cnn.cursor() + cur.execute("insert into test_tpc values ('test_tpc_rollback');") + self.assertEqual(0, self.count_xacts()) + self.assertEqual(0, self.count_test_records()) + + cnn.tpc_prepare() + self.assertEqual(cnn.status, ext.STATUS_PREPARED) + self.assertEqual(1, self.count_xacts()) + self.assertEqual(0, self.count_test_records()) + + cnn.tpc_rollback() + self.assertEqual(cnn.status, ext.STATUS_READY) + self.assertEqual(0, self.count_xacts()) + self.assertEqual(0, self.count_test_records()) + + def test_tpc_rollback_one_phase(self): + cnn = self.connect() + xid = cnn.xid(1, "gtrid", "bqual") + self.assertEqual(cnn.status, ext.STATUS_READY) + + cnn.tpc_begin(xid) + self.assertEqual(cnn.status, ext.STATUS_BEGIN) + + cur = cnn.cursor() + cur.execute("insert into test_tpc values ('test_tpc_rollback_1p');") + self.assertEqual(0, self.count_xacts()) + self.assertEqual(0, self.count_test_records()) + + cnn.tpc_rollback() + self.assertEqual(cnn.status, ext.STATUS_READY) + self.assertEqual(0, self.count_xacts()) + self.assertEqual(0, self.count_test_records()) + + def test_tpc_rollback_recovered(self): + cnn = self.connect() + xid = cnn.xid(1, "gtrid", "bqual") + self.assertEqual(cnn.status, ext.STATUS_READY) + + cnn.tpc_begin(xid) + self.assertEqual(cnn.status, ext.STATUS_BEGIN) + + cur = cnn.cursor() + cur.execute("insert into test_tpc values ('test_tpc_commit_rec');") + self.assertEqual(0, self.count_xacts()) + self.assertEqual(0, self.count_test_records()) + + cnn.tpc_prepare() + cnn.close() + self.assertEqual(1, self.count_xacts()) + self.assertEqual(0, self.count_test_records()) + + cnn = self.connect() + xid = cnn.xid(1, "gtrid", "bqual") + cnn.tpc_rollback(xid) + + self.assertEqual(cnn.status, ext.STATUS_READY) + self.assertEqual(0, self.count_xacts()) + self.assertEqual(0, self.count_test_records()) + + def test_status_after_recover(self): + cnn = self.connect() + self.assertEqual(ext.STATUS_READY, cnn.status) + cnn.tpc_recover() + self.assertEqual(ext.STATUS_READY, cnn.status) + + cur = cnn.cursor() + cur.execute("select 1") + self.assertEqual(ext.STATUS_BEGIN, cnn.status) + cnn.tpc_recover() + self.assertEqual(ext.STATUS_BEGIN, cnn.status) + + def test_recovered_xids(self): + # insert a few test xns + cnn = self.connect() + cnn.set_isolation_level(0) + cur = cnn.cursor() + cur.execute("begin; prepare transaction '1-foo';") + cur.execute("begin; prepare transaction '2-bar';") + + # read the values to return + cur.execute(""" + select gid, prepared, owner, database + from pg_prepared_xacts + where database = %s;""", + (dbname,)) + okvals = cur.fetchall() + okvals.sort() + + cnn = self.connect() + xids = cnn.tpc_recover() + xids = [xid for xid in xids if xid.database == dbname] + xids.sort(key=attrgetter('gtrid')) + + # check the values returned + self.assertEqual(len(okvals), len(xids)) + for (xid, (gid, prepared, owner, database)) in zip(xids, okvals): + self.assertEqual(xid.gtrid, gid) + self.assertEqual(xid.prepared, prepared) + self.assertEqual(xid.owner, owner) + self.assertEqual(xid.database, database) + + def test_xid_encoding(self): + cnn = self.connect() + xid = cnn.xid(42, "gtrid", "bqual") + cnn.tpc_begin(xid) + cnn.tpc_prepare() + + cnn = self.connect() + cur = cnn.cursor() + cur.execute("select gid from pg_prepared_xacts where database = %s;", + (dbname,)) + self.assertEqual('42_Z3RyaWQ=_YnF1YWw=', cur.fetchone()[0]) + + @slow + def test_xid_roundtrip(self): + for fid, gtrid, bqual in [ + (0, "", ""), + (42, "gtrid", "bqual"), + (0x7fffffff, "x" * 64, "y" * 64), + ]: + cnn = self.connect() + xid = cnn.xid(fid, gtrid, bqual) + cnn.tpc_begin(xid) + cnn.tpc_prepare() + cnn.close() + + cnn = self.connect() + xids = [x for x in cnn.tpc_recover() if x.database == dbname] + self.assertEqual(1, len(xids)) + xid = xids[0] + self.assertEqual(xid.format_id, fid) + self.assertEqual(xid.gtrid, gtrid) + self.assertEqual(xid.bqual, bqual) + + cnn.tpc_rollback(xid) + + @slow + def test_unparsed_roundtrip(self): + for tid in [ + '', + 'hello, world!', + 'x' * 199, # PostgreSQL's limit in transaction id length + ]: + cnn = self.connect() + cnn.tpc_begin(tid) + cnn.tpc_prepare() + cnn.close() + + cnn = self.connect() + xids = [x for x in cnn.tpc_recover() if x.database == dbname] + self.assertEqual(1, len(xids)) + xid = xids[0] + self.assertEqual(xid.format_id, None) + self.assertEqual(xid.gtrid, tid) + self.assertEqual(xid.bqual, None) + + cnn.tpc_rollback(xid) + + def test_xid_construction(self): + x1 = ext.Xid(74, 'foo', 'bar') + self.assertEqual(74, x1.format_id) + self.assertEqual('foo', x1.gtrid) + self.assertEqual('bar', x1.bqual) + + def test_xid_from_string(self): + x2 = ext.Xid.from_string('42_Z3RyaWQ=_YnF1YWw=') + self.assertEqual(42, x2.format_id) + self.assertEqual('gtrid', x2.gtrid) + self.assertEqual('bqual', x2.bqual) + + x3 = ext.Xid.from_string('99_xxx_yyy') + self.assertEqual(None, x3.format_id) + self.assertEqual('99_xxx_yyy', x3.gtrid) + self.assertEqual(None, x3.bqual) + + def test_xid_to_string(self): + x1 = ext.Xid.from_string('42_Z3RyaWQ=_YnF1YWw=') + self.assertEqual(str(x1), '42_Z3RyaWQ=_YnF1YWw=') + + x2 = ext.Xid.from_string('99_xxx_yyy') + self.assertEqual(str(x2), '99_xxx_yyy') + + def test_xid_unicode(self): + cnn = self.connect() + x1 = cnn.xid(10, 'uni', 'code') + cnn.tpc_begin(x1) + cnn.tpc_prepare() + cnn.reset() + xid = [x for x in cnn.tpc_recover() if x.database == dbname][0] + self.assertEqual(10, xid.format_id) + self.assertEqual('uni', xid.gtrid) + self.assertEqual('code', xid.bqual) + + def test_xid_unicode_unparsed(self): + # We don't expect people shooting snowmen as transaction ids, + # so if something explodes in an encode error I don't mind. + # Let's just check unicode is accepted as type. + cnn = self.connect() + cnn.set_client_encoding('utf8') + cnn.tpc_begin("transaction-id") + cnn.tpc_prepare() + cnn.reset() + + xid = [x for x in cnn.tpc_recover() if x.database == dbname][0] + self.assertEqual(None, xid.format_id) + self.assertEqual('transaction-id', xid.gtrid) + self.assertEqual(None, xid.bqual) + + def test_cancel_fails_prepared(self): + cnn = self.connect() + cnn.tpc_begin('cancel') + cnn.tpc_prepare() + self.assertRaises(psycopg2.ProgrammingError, cnn.cancel) + + def test_tpc_recover_non_dbapi_connection(self): + cnn = self.connect(connection_factory=psycopg2.extras.RealDictConnection) + cnn.tpc_begin('dict-connection') + cnn.tpc_prepare() + cnn.reset() + + xids = cnn.tpc_recover() + xid = [x for x in xids if x.database == dbname][0] + self.assertEqual(None, xid.format_id) + self.assertEqual('dict-connection', xid.gtrid) + self.assertEqual(None, xid.bqual) + + +@skip_if_crdb("isolation level") +class TransactionControlTests(ConnectingTestCase): + def test_closed(self): + self.conn.close() + self.assertRaises(psycopg2.InterfaceError, + self.conn.set_session, + ext.ISOLATION_LEVEL_SERIALIZABLE) + + def test_not_in_transaction(self): + cur = self.conn.cursor() + cur.execute("select 1") + self.assertRaises(psycopg2.ProgrammingError, + self.conn.set_session, + ext.ISOLATION_LEVEL_SERIALIZABLE) + + def test_set_isolation_level(self): + cur = self.conn.cursor() + self.conn.set_session( + ext.ISOLATION_LEVEL_SERIALIZABLE) + cur.execute("SHOW transaction_isolation;") + self.assertEqual(cur.fetchone()[0], 'serializable') + self.conn.rollback() + + self.conn.set_session( + ext.ISOLATION_LEVEL_REPEATABLE_READ) + cur.execute("SHOW transaction_isolation;") + if self.conn.info.server_version > 80000: + self.assertEqual(cur.fetchone()[0], 'repeatable read') + else: + self.assertEqual(cur.fetchone()[0], 'serializable') + self.conn.rollback() + + self.conn.set_session( + isolation_level=ext.ISOLATION_LEVEL_READ_COMMITTED) + cur.execute("SHOW transaction_isolation;") + self.assertEqual(cur.fetchone()[0], 'read committed') + self.conn.rollback() + + self.conn.set_session( + isolation_level=ext.ISOLATION_LEVEL_READ_UNCOMMITTED) + cur.execute("SHOW transaction_isolation;") + if self.conn.info.server_version > 80000: + self.assertEqual(cur.fetchone()[0], 'read uncommitted') + else: + self.assertEqual(cur.fetchone()[0], 'read committed') + self.conn.rollback() + + def test_set_isolation_level_str(self): + cur = self.conn.cursor() + self.conn.set_session("serializable") + cur.execute("SHOW transaction_isolation;") + self.assertEqual(cur.fetchone()[0], 'serializable') + self.conn.rollback() + + self.conn.set_session("repeatable read") + cur.execute("SHOW transaction_isolation;") + if self.conn.info.server_version > 80000: + self.assertEqual(cur.fetchone()[0], 'repeatable read') + else: + self.assertEqual(cur.fetchone()[0], 'serializable') + self.conn.rollback() + + self.conn.set_session("read committed") + cur.execute("SHOW transaction_isolation;") + self.assertEqual(cur.fetchone()[0], 'read committed') + self.conn.rollback() + + self.conn.set_session("read uncommitted") + cur.execute("SHOW transaction_isolation;") + if self.conn.info.server_version > 80000: + self.assertEqual(cur.fetchone()[0], 'read uncommitted') + else: + self.assertEqual(cur.fetchone()[0], 'read committed') + self.conn.rollback() + + def test_bad_isolation_level(self): + self.assertRaises(ValueError, self.conn.set_session, 0) + self.assertRaises(ValueError, self.conn.set_session, 5) + self.assertRaises(ValueError, self.conn.set_session, 'whatever') + + def test_set_read_only(self): + self.assert_(self.conn.readonly is None) + + cur = self.conn.cursor() + self.conn.set_session(readonly=True) + self.assert_(self.conn.readonly is True) + cur.execute("SHOW transaction_read_only;") + self.assertEqual(cur.fetchone()[0], 'on') + self.conn.rollback() + cur.execute("SHOW transaction_read_only;") + self.assertEqual(cur.fetchone()[0], 'on') + self.conn.rollback() + + self.conn.set_session(readonly=False) + self.assert_(self.conn.readonly is False) + cur.execute("SHOW transaction_read_only;") + self.assertEqual(cur.fetchone()[0], 'off') + self.conn.rollback() + + def test_setattr_read_only(self): + cur = self.conn.cursor() + self.conn.readonly = True + self.assert_(self.conn.readonly is True) + cur.execute("SHOW transaction_read_only;") + self.assertEqual(cur.fetchone()[0], 'on') + self.assertRaises(self.conn.ProgrammingError, + setattr, self.conn, 'readonly', False) + self.assert_(self.conn.readonly is True) + self.conn.rollback() + cur.execute("SHOW transaction_read_only;") + self.assertEqual(cur.fetchone()[0], 'on') + self.conn.rollback() + + cur = self.conn.cursor() + self.conn.readonly = None + self.assert_(self.conn.readonly is None) + cur.execute("SHOW transaction_read_only;") + self.assertEqual(cur.fetchone()[0], 'off') # assume defined by server + self.conn.rollback() + + self.conn.readonly = False + self.assert_(self.conn.readonly is False) + cur.execute("SHOW transaction_read_only;") + self.assertEqual(cur.fetchone()[0], 'off') + self.conn.rollback() + + def test_set_default(self): + cur = self.conn.cursor() + cur.execute("SHOW transaction_isolation;") + isolevel = cur.fetchone()[0] + cur.execute("SHOW transaction_read_only;") + readonly = cur.fetchone()[0] + self.conn.rollback() + + self.conn.set_session(isolation_level='serializable', readonly=True) + self.conn.set_session(isolation_level='default', readonly='default') + + cur.execute("SHOW transaction_isolation;") + self.assertEqual(cur.fetchone()[0], isolevel) + cur.execute("SHOW transaction_read_only;") + self.assertEqual(cur.fetchone()[0], readonly) + + @skip_before_postgres(9, 1) + def test_set_deferrable(self): + self.assert_(self.conn.deferrable is None) + cur = self.conn.cursor() + self.conn.set_session(readonly=True, deferrable=True) + self.assert_(self.conn.deferrable is True) + cur.execute("SHOW transaction_read_only;") + self.assertEqual(cur.fetchone()[0], 'on') + cur.execute("SHOW transaction_deferrable;") + self.assertEqual(cur.fetchone()[0], 'on') + self.conn.rollback() + cur.execute("SHOW transaction_deferrable;") + self.assertEqual(cur.fetchone()[0], 'on') + self.conn.rollback() + + self.conn.set_session(deferrable=False) + self.assert_(self.conn.deferrable is False) + cur.execute("SHOW transaction_read_only;") + self.assertEqual(cur.fetchone()[0], 'on') + cur.execute("SHOW transaction_deferrable;") + self.assertEqual(cur.fetchone()[0], 'off') + self.conn.rollback() + + @skip_after_postgres(9, 1) + def test_set_deferrable_error(self): + self.assertRaises(psycopg2.ProgrammingError, + self.conn.set_session, readonly=True, deferrable=True) + self.assertRaises(psycopg2.ProgrammingError, + setattr, self.conn, 'deferrable', True) + + @skip_before_postgres(9, 1) + def test_setattr_deferrable(self): + cur = self.conn.cursor() + self.conn.deferrable = True + self.assert_(self.conn.deferrable is True) + cur.execute("SHOW transaction_deferrable;") + self.assertEqual(cur.fetchone()[0], 'on') + self.assertRaises(self.conn.ProgrammingError, + setattr, self.conn, 'deferrable', False) + self.assert_(self.conn.deferrable is True) + self.conn.rollback() + cur.execute("SHOW transaction_deferrable;") + self.assertEqual(cur.fetchone()[0], 'on') + self.conn.rollback() + + cur = self.conn.cursor() + self.conn.deferrable = None + self.assert_(self.conn.deferrable is None) + cur.execute("SHOW transaction_deferrable;") + self.assertEqual(cur.fetchone()[0], 'off') # assume defined by server + self.conn.rollback() + + self.conn.deferrable = False + self.assert_(self.conn.deferrable is False) + cur.execute("SHOW transaction_deferrable;") + self.assertEqual(cur.fetchone()[0], 'off') + self.conn.rollback() + + def test_mixing_session_attribs(self): + cur = self.conn.cursor() + self.conn.autocommit = True + self.conn.readonly = True + + cur.execute("SHOW transaction_read_only;") + self.assertEqual(cur.fetchone()[0], 'on') + + cur.execute("SHOW default_transaction_read_only;") + self.assertEqual(cur.fetchone()[0], 'on') + + self.conn.autocommit = False + cur.execute("SHOW transaction_read_only;") + self.assertEqual(cur.fetchone()[0], 'on') + + cur.execute("SHOW default_transaction_read_only;") + self.assertEqual(cur.fetchone()[0], 'off') + + def test_idempotence_check(self): + self.conn.autocommit = False + self.conn.readonly = True + self.conn.autocommit = True + self.conn.readonly = True + + cur = self.conn.cursor() + cur.execute("SHOW transaction_read_only") + self.assertEqual(cur.fetchone()[0], 'on') + + +class TestEncryptPassword(ConnectingTestCase): + @skip_before_postgres(10) + def test_encrypt_password_post_9_6(self): + # MD5 algorithm + self.assertEqual( + ext.encrypt_password('psycopg2', 'ashesh', self.conn, 'md5'), + 'md594839d658c28a357126f105b9cb14cfc') + + # keywords + self.assertEqual( + ext.encrypt_password( + password='psycopg2', user='ashesh', + scope=self.conn, algorithm='md5'), + 'md594839d658c28a357126f105b9cb14cfc') + + @skip_if_crdb("password_encryption") + @skip_before_libpq(10) + @skip_before_postgres(10) + def test_encrypt_server(self): + cur = self.conn.cursor() + cur.execute("SHOW password_encryption;") + server_encryption_algorithm = cur.fetchone()[0] + + enc_password = ext.encrypt_password( + 'psycopg2', 'ashesh', self.conn) + + if server_encryption_algorithm == 'md5': + self.assertEqual( + enc_password, 'md594839d658c28a357126f105b9cb14cfc') + elif server_encryption_algorithm == 'scram-sha-256': + self.assertEqual(enc_password[:14], 'SCRAM-SHA-256$') + + self.assertEqual( + ext.encrypt_password( + 'psycopg2', 'ashesh', self.conn, 'scram-sha-256' + )[:14], 'SCRAM-SHA-256$') + + self.assertRaises(psycopg2.ProgrammingError, + ext.encrypt_password, 'psycopg2', 'ashesh', self.conn, 'abc') + + def test_encrypt_md5(self): + self.assertEqual( + ext.encrypt_password('psycopg2', 'ashesh', algorithm='md5'), + 'md594839d658c28a357126f105b9cb14cfc') + + @skip_before_libpq(10) + def test_encrypt_bad_libpq_10(self): + self.assertRaises(psycopg2.ProgrammingError, + ext.encrypt_password, 'psycopg2', 'ashesh', self.conn, 'abc') + + @skip_after_libpq(10) + def test_encrypt_bad_before_libpq_10(self): + self.assertRaises(psycopg2.NotSupportedError, + ext.encrypt_password, 'psycopg2', 'ashesh', self.conn, 'abc') + + @skip_before_libpq(10) + def test_encrypt_scram(self): + self.assert_( + ext.encrypt_password( + 'psycopg2', 'ashesh', self.conn, 'scram-sha-256') + .startswith('SCRAM-SHA-256$')) + + @skip_after_libpq(10) + def test_encrypt_scram_pre_10(self): + self.assertRaises(psycopg2.NotSupportedError, + ext.encrypt_password, + password='psycopg2', user='ashesh', + scope=self.conn, algorithm='scram-sha-256') + + def test_bad_types(self): + self.assertRaises(TypeError, ext.encrypt_password) + self.assertRaises(TypeError, ext.encrypt_password, + 'password', 42, self.conn, 'md5') + self.assertRaises(TypeError, ext.encrypt_password, + 42, 'user', self.conn, 'md5') + self.assertRaises(TypeError, ext.encrypt_password, + 42, 'user', 'wat', 'abc') + self.assertRaises(TypeError, ext.encrypt_password, + 'password', 'user', 'wat', 42) + + +class AutocommitTests(ConnectingTestCase): + def test_closed(self): + self.conn.close() + self.assertRaises(psycopg2.InterfaceError, + setattr, self.conn, 'autocommit', True) + + # The getter doesn't have a guard. We may change this in future + # to make it consistent with other methods; meanwhile let's just check + # it doesn't explode. + try: + self.assert_(self.conn.autocommit in (True, False)) + except psycopg2.InterfaceError: + pass + + def test_default_no_autocommit(self): + self.assert_(not self.conn.autocommit) + self.assertEqual(self.conn.status, ext.STATUS_READY) + self.assertEqual(self.conn.info.transaction_status, + ext.TRANSACTION_STATUS_IDLE) + + cur = self.conn.cursor() + cur.execute('select 1;') + self.assertEqual(self.conn.status, ext.STATUS_BEGIN) + self.assertEqual(self.conn.info.transaction_status, + ext.TRANSACTION_STATUS_INTRANS) + + self.conn.rollback() + self.assertEqual(self.conn.status, ext.STATUS_READY) + self.assertEqual(self.conn.info.transaction_status, + ext.TRANSACTION_STATUS_IDLE) + + def test_set_autocommit(self): + self.conn.autocommit = True + self.assert_(self.conn.autocommit) + self.assertEqual(self.conn.status, ext.STATUS_READY) + self.assertEqual(self.conn.info.transaction_status, + ext.TRANSACTION_STATUS_IDLE) + + cur = self.conn.cursor() + cur.execute('select 1;') + self.assertEqual(self.conn.status, ext.STATUS_READY) + self.assertEqual(self.conn.info.transaction_status, + ext.TRANSACTION_STATUS_IDLE) + + self.conn.autocommit = False + self.assert_(not self.conn.autocommit) + self.assertEqual(self.conn.status, ext.STATUS_READY) + self.assertEqual(self.conn.info.transaction_status, + ext.TRANSACTION_STATUS_IDLE) + + cur.execute('select 1;') + self.assertEqual(self.conn.status, ext.STATUS_BEGIN) + self.assertEqual(self.conn.info.transaction_status, + ext.TRANSACTION_STATUS_INTRANS) + + def test_set_intrans_error(self): + cur = self.conn.cursor() + cur.execute('select 1;') + self.assertRaises(psycopg2.ProgrammingError, + setattr, self.conn, 'autocommit', True) + + def test_set_session_autocommit(self): + self.conn.set_session(autocommit=True) + self.assert_(self.conn.autocommit) + self.assertEqual(self.conn.status, ext.STATUS_READY) + self.assertEqual(self.conn.info.transaction_status, + ext.TRANSACTION_STATUS_IDLE) + + cur = self.conn.cursor() + cur.execute('select 1;') + self.assertEqual(self.conn.status, ext.STATUS_READY) + self.assertEqual(self.conn.info.transaction_status, + ext.TRANSACTION_STATUS_IDLE) + + self.conn.set_session(autocommit=False) + self.assert_(not self.conn.autocommit) + self.assertEqual(self.conn.status, ext.STATUS_READY) + self.assertEqual(self.conn.info.transaction_status, + ext.TRANSACTION_STATUS_IDLE) + + cur.execute('select 1;') + self.assertEqual(self.conn.status, ext.STATUS_BEGIN) + self.assertEqual(self.conn.info.transaction_status, + ext.TRANSACTION_STATUS_INTRANS) + self.conn.rollback() + + self.conn.set_session('serializable', readonly=True, autocommit=True) + self.assert_(self.conn.autocommit) + cur.execute('select 1;') + self.assertEqual(self.conn.status, ext.STATUS_READY) + self.assertEqual(self.conn.info.transaction_status, + ext.TRANSACTION_STATUS_IDLE) + cur.execute("SHOW transaction_isolation;") + self.assertEqual(cur.fetchone()[0], 'serializable') + cur.execute("SHOW transaction_read_only;") + self.assertEqual(cur.fetchone()[0], 'on') + + +class PasswordLeakTestCase(ConnectingTestCase): + def setUp(self): + super().setUp() + PasswordLeakTestCase.dsn = None + + class GrassingConnection(ext.connection): + """A connection snitching the dsn away. + + This connection passes the dsn to the test case class even if init + fails (e.g. connection error). Test that we mangle the dsn ok anyway. + """ + + def __init__(self, *args, **kwargs): + try: + super(PasswordLeakTestCase.GrassingConnection, self).__init__( + *args, **kwargs) + finally: + # The connection is not initialized entirely, however the C + # code should have set the dsn, and it should have scrubbed + # the password away + PasswordLeakTestCase.dsn = self.dsn + + @skip_if_crdb("connect any db") + def test_leak(self): + self.assertRaises(psycopg2.DatabaseError, + self.GrassingConnection, "dbname=nosuch password=whateva") + self.assertDsnEqual(self.dsn, "dbname=nosuch password=xxx") + + @skip_before_libpq(9, 2) + def test_url_leak(self): + self.assertRaises(psycopg2.DatabaseError, + self.GrassingConnection, + "postgres://someone:whateva@localhost/nosuch") + + self.assertDsnEqual(self.dsn, + "user=someone password=xxx host=localhost dbname=nosuch") + + +class SignalTestCase(ConnectingTestCase): + @slow + @skip_before_postgres(8, 2) + def test_bug_551_returning(self): + # Raise an exception trying to decode 'id' + self._test_bug_551(query=""" + INSERT INTO test551 (num) VALUES (%s) RETURNING id + """) + + @slow + def test_bug_551_no_returning(self): + # Raise an exception trying to decode 'INSERT 0 1' + self._test_bug_551(query=""" + INSERT INTO test551 (num) VALUES (%s) + """) + + def _test_bug_551(self, query): + script = f"""import os +import sys +import time +import signal +import warnings +import threading + +# ignore wheel deprecation warning +with warnings.catch_warnings(): + warnings.simplefilter('ignore') + import psycopg2 + +def handle_sigabort(sig, frame): + sys.exit(1) + +def killer(): + time.sleep(0.5) + os.kill(os.getpid(), signal.SIGABRT) + +signal.signal(signal.SIGABRT, handle_sigabort) + +conn = psycopg2.connect({dsn!r}) + +cur = conn.cursor() + +cur.execute("create table test551 (id serial, num varchar(50))") + +t = threading.Thread(target=killer) +t.daemon = True +t.start() + +while True: + cur.execute({query!r}, ("Hello, world!",)) +""" + + proc = sp.Popen([sys.executable, '-c', script], + stdout=sp.PIPE, stderr=sp.PIPE) + (out, err) = proc.communicate() + self.assertNotEqual(proc.returncode, 0) + # Strip [NNN refs] from output + err = re.sub(br'\[[^\]]+\]', b'', err).strip() + self.assert_(not err, err) + + +class TestConnectionInfo(ConnectingTestCase): + def setUp(self): + ConnectingTestCase.setUp(self) + + class BrokenConn(psycopg2.extensions.connection): + def __init__(self, *args, **kwargs): + # don't call superclass + pass + + # A "broken" connection + self.bconn = self.connect(connection_factory=BrokenConn) + + def test_dbname(self): + self.assert_(isinstance(self.conn.info.dbname, str)) + self.assert_(self.bconn.info.dbname is None) + + def test_user(self): + cur = self.conn.cursor() + cur.execute("select user") + self.assertEqual(self.conn.info.user, cur.fetchone()[0]) + self.assert_(self.bconn.info.user is None) + + def test_password(self): + self.assert_(isinstance(self.conn.info.password, str)) + self.assert_(self.bconn.info.password is None) + + def test_host(self): + expected = dbhost if dbhost else "/" + self.assertIn(expected, self.conn.info.host) + self.assert_(self.bconn.info.host is None) + + def test_host_readonly(self): + with self.assertRaises(AttributeError): + self.conn.info.host = 'override' + + def test_port(self): + self.assert_(isinstance(self.conn.info.port, int)) + self.assert_(self.bconn.info.port is None) + + def test_options(self): + self.assert_(isinstance(self.conn.info.options, str)) + self.assert_(self.bconn.info.options is None) + + @skip_before_libpq(9, 3) + def test_dsn_parameters(self): + d = self.conn.info.dsn_parameters + self.assert_(isinstance(d, dict)) + self.assertEqual(d['dbname'], dbname) # the only param we can check reliably + self.assert_('password' not in d, d) + + def test_status(self): + self.assertEqual(self.conn.info.status, 0) + self.assertEqual(self.bconn.info.status, 1) + + def test_transaction_status(self): + self.assertEqual(self.conn.info.transaction_status, 0) + cur = self.conn.cursor() + cur.execute("select 1") + self.assertEqual(self.conn.info.transaction_status, 2) + self.assertEqual(self.bconn.info.transaction_status, 4) + + def test_parameter_status(self): + cur = self.conn.cursor() + try: + cur.execute("show server_version") + except psycopg2.DatabaseError: + self.assertIsInstance( + self.conn.info.parameter_status('server_version'), str) + else: + self.assertEqual( + self.conn.info.parameter_status('server_version'), + cur.fetchone()[0]) + + self.assertIsNone(self.conn.info.parameter_status('wat')) + self.assertIsNone(self.bconn.info.parameter_status('server_version')) + + def test_protocol_version(self): + self.assertEqual(self.conn.info.protocol_version, 3) + self.assertEqual(self.bconn.info.protocol_version, 0) + + def test_server_version(self): + cur = self.conn.cursor() + try: + cur.execute("show server_version_num") + except psycopg2.DatabaseError: + self.assert_(isinstance(self.conn.info.server_version, int)) + else: + self.assertEqual( + self.conn.info.server_version, int(cur.fetchone()[0])) + + self.assertEqual(self.bconn.info.server_version, 0) + + def test_error_message(self): + self.assertIsNone(self.conn.info.error_message) + self.assertIsNotNone(self.bconn.info.error_message) + + cur = self.conn.cursor() + try: + cur.execute("select 1 from nosuchtable") + except psycopg2.DatabaseError: + pass + + self.assert_('nosuchtable' in self.conn.info.error_message) + + def test_socket(self): + self.assert_(self.conn.info.socket >= 0) + self.assert_(self.bconn.info.socket < 0) + + @skip_if_crdb("backend pid") + def test_backend_pid(self): + cur = self.conn.cursor() + try: + cur.execute("select pg_backend_pid()") + except psycopg2.DatabaseError: + self.assert_(self.conn.info.backend_pid > 0) + else: + self.assertEqual( + self.conn.info.backend_pid, int(cur.fetchone()[0])) + + self.assert_(self.bconn.info.backend_pid == 0) + + def test_needs_password(self): + self.assertIs(self.conn.info.needs_password, False) + self.assertIs(self.bconn.info.needs_password, False) + + def test_used_password(self): + self.assertIsInstance(self.conn.info.used_password, bool) + self.assertIs(self.bconn.info.used_password, False) + + @skip_before_libpq(9, 5) + def test_ssl_in_use(self): + self.assertIsInstance(self.conn.info.ssl_in_use, bool) + self.assertIs(self.bconn.info.ssl_in_use, False) + + @skip_after_libpq(9, 5) + def test_ssl_not_supported(self): + with self.assertRaises(psycopg2.NotSupportedError): + self.conn.info.ssl_in_use + with self.assertRaises(psycopg2.NotSupportedError): + self.conn.info.ssl_attribute_names + with self.assertRaises(psycopg2.NotSupportedError): + self.conn.info.ssl_attribute('wat') + + @skip_before_libpq(9, 5) + def test_ssl_attribute(self): + attribs = self.conn.info.ssl_attribute_names + self.assert_(attribs) + if self.conn.info.ssl_in_use: + for attrib in attribs: + self.assertIsInstance(self.conn.info.ssl_attribute(attrib), str) + else: + for attrib in attribs: + self.assertIsNone(self.conn.info.ssl_attribute(attrib)) + + self.assertIsNone(self.conn.info.ssl_attribute('wat')) + + for attrib in attribs: + self.assertIsNone(self.bconn.info.ssl_attribute(attrib)) + + +def test_suite(): + return unittest.TestLoader().loadTestsFromName(__name__) + + +if __name__ == "__main__": + unittest.main() diff --git a/tests/test_copy.py b/tests/test_copy.py new file mode 100755 index 0000000000000000000000000000000000000000..a36cf9184dd7123297c130ae4ffca6522f1857f8 --- /dev/null +++ b/tests/test_copy.py @@ -0,0 +1,404 @@ +#!/usr/bin/env python + +# test_copy.py - unit test for COPY support +# +# Copyright (C) 2010-2019 Daniele Varrazzo +# Copyright (C) 2020-2021 The Psycopg Team +# +# psycopg2 is free software: you can redistribute it and/or modify it +# under the terms of the GNU Lesser General Public License as published +# by the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# In addition, as a special exception, the copyright holders give +# permission to link this program with the OpenSSL library (or with +# modified versions of OpenSSL that use the same license as OpenSSL), +# and distribute linked combinations including the two. +# +# You must obey the GNU Lesser General Public License in all respects for +# all of the code used other than OpenSSL. +# +# psycopg2 is distributed in the hope that it will be useful, but WITHOUT +# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or +# FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public +# License for more details. + +import io +import sys +import string +import unittest +from .testutils import ConnectingTestCase, skip_before_postgres, slow, StringIO +from .testutils import skip_if_crdb +from itertools import cycle +from subprocess import Popen, PIPE + +import psycopg2 +import psycopg2.extensions +from .testutils import skip_copy_if_green, TextIOBase +from .testconfig import dsn + + +class MinimalRead(TextIOBase): + """A file wrapper exposing the minimal interface to copy from.""" + def __init__(self, f): + self.f = f + + def read(self, size): + return self.f.read(size) + + def readline(self): + return self.f.readline() + + +class MinimalWrite(TextIOBase): + """A file wrapper exposing the minimal interface to copy to.""" + def __init__(self, f): + self.f = f + + def write(self, data): + return self.f.write(data) + + +@skip_copy_if_green +class CopyTests(ConnectingTestCase): + + def setUp(self): + ConnectingTestCase.setUp(self) + self._create_temp_table() + + def _create_temp_table(self): + skip_if_crdb("copy", self.conn) + curs = self.conn.cursor() + curs.execute(''' + CREATE TEMPORARY TABLE tcopy ( + id serial PRIMARY KEY, + data text + )''') + + @slow + def test_copy_from(self): + curs = self.conn.cursor() + try: + self._copy_from(curs, nrecs=1024, srec=10 * 1024, copykw={}) + finally: + curs.close() + + @slow + def test_copy_from_insane_size(self): + # Trying to trigger a "would block" error + curs = self.conn.cursor() + try: + self._copy_from(curs, nrecs=10 * 1024, srec=10 * 1024, + copykw={'size': 20 * 1024 * 1024}) + finally: + curs.close() + + def test_copy_from_cols(self): + curs = self.conn.cursor() + f = StringIO() + for i in range(10): + f.write(f"{i}\n") + + f.seek(0) + curs.copy_from(MinimalRead(f), "tcopy", columns=['id']) + + curs.execute("select * from tcopy order by id") + self.assertEqual([(i, None) for i in range(10)], curs.fetchall()) + + def test_copy_from_cols_err(self): + curs = self.conn.cursor() + f = StringIO() + for i in range(10): + f.write(f"{i}\n") + + f.seek(0) + + def cols(): + raise ZeroDivisionError() + yield 'id' + + self.assertRaises(ZeroDivisionError, + curs.copy_from, MinimalRead(f), "tcopy", columns=cols()) + + @slow + def test_copy_to(self): + curs = self.conn.cursor() + try: + self._copy_from(curs, nrecs=1024, srec=10 * 1024, copykw={}) + self._copy_to(curs, srec=10 * 1024) + finally: + curs.close() + + def test_copy_text(self): + self.conn.set_client_encoding('latin1') + self._create_temp_table() # the above call closed the xn + + abin = bytes(list(range(32, 127)) + + list(range(160, 256))).decode('latin1') + about = abin.replace('\\', '\\\\') + + curs = self.conn.cursor() + curs.execute('insert into tcopy values (%s, %s)', + (42, abin)) + + f = io.StringIO() + curs.copy_to(f, 'tcopy', columns=('data',)) + f.seek(0) + self.assertEqual(f.readline().rstrip(), about) + + def test_copy_bytes(self): + self.conn.set_client_encoding('latin1') + self._create_temp_table() # the above call closed the xn + + abin = bytes(list(range(32, 127)) + + list(range(160, 255))).decode('latin1') + about = abin.replace('\\', '\\\\').encode('latin1') + + curs = self.conn.cursor() + curs.execute('insert into tcopy values (%s, %s)', + (42, abin)) + + f = io.BytesIO() + curs.copy_to(f, 'tcopy', columns=('data',)) + f.seek(0) + self.assertEqual(f.readline().rstrip(), about) + + def test_copy_expert_textiobase(self): + self.conn.set_client_encoding('latin1') + self._create_temp_table() # the above call closed the xn + + abin = bytes(list(range(32, 127)) + + list(range(160, 256))).decode('latin1') + about = abin.replace('\\', '\\\\') + + f = io.StringIO() + f.write(about) + f.seek(0) + + curs = self.conn.cursor() + psycopg2.extensions.register_type( + psycopg2.extensions.UNICODE, curs) + + curs.copy_expert('COPY tcopy (data) FROM STDIN', f) + curs.execute("select data from tcopy;") + self.assertEqual(curs.fetchone()[0], abin) + + f = io.StringIO() + curs.copy_expert('COPY tcopy (data) TO STDOUT', f) + f.seek(0) + self.assertEqual(f.readline().rstrip(), about) + + # same tests with setting size + f = io.StringIO() + f.write(about) + f.seek(0) + exp_size = 123 + # hack here to leave file as is, only check size when reading + real_read = f.read + + def read(_size, f=f, exp_size=exp_size): + self.assertEqual(_size, exp_size) + return real_read(_size) + + f.read = read + curs.copy_expert('COPY tcopy (data) FROM STDIN', f, size=exp_size) + curs.execute("select data from tcopy;") + self.assertEqual(curs.fetchone()[0], abin) + + def _copy_from(self, curs, nrecs, srec, copykw): + f = StringIO() + for i, c in zip(range(nrecs), cycle(string.ascii_letters)): + l = c * srec + f.write(f"{i}\t{l}\n") + + f.seek(0) + curs.copy_from(MinimalRead(f), "tcopy", **copykw) + + curs.execute("select count(*) from tcopy") + self.assertEqual(nrecs, curs.fetchone()[0]) + + curs.execute("select data from tcopy where id < %s order by id", + (len(string.ascii_letters),)) + for i, (l,) in enumerate(curs): + self.assertEqual(l, string.ascii_letters[i] * srec) + + def _copy_to(self, curs, srec): + f = StringIO() + curs.copy_to(MinimalWrite(f), "tcopy") + + f.seek(0) + ntests = 0 + for line in f: + n, s = line.split() + if int(n) < len(string.ascii_letters): + self.assertEqual(s, string.ascii_letters[int(n)] * srec) + ntests += 1 + + self.assertEqual(ntests, len(string.ascii_letters)) + + def test_copy_expert_file_refcount(self): + class Whatever: + pass + + f = Whatever() + curs = self.conn.cursor() + self.assertRaises(TypeError, + curs.copy_expert, 'COPY tcopy (data) FROM STDIN', f) + + def test_copy_no_column_limit(self): + cols = [f"c{i:050}" for i in range(200)] + + curs = self.conn.cursor() + curs.execute('CREATE TEMPORARY TABLE manycols (%s)' % ',\n'.join( + ["%s int" % c for c in cols])) + curs.execute("INSERT INTO manycols DEFAULT VALUES") + + f = StringIO() + curs.copy_to(f, "manycols", columns=cols) + f.seek(0) + self.assertEqual(f.read().split(), ['\\N'] * len(cols)) + + f.seek(0) + curs.copy_from(f, "manycols", columns=cols) + curs.execute("select count(*) from manycols;") + self.assertEqual(curs.fetchone()[0], 2) + + def test_copy_funny_names(self): + cols = ["select", "insert", "group"] + + curs = self.conn.cursor() + curs.execute('CREATE TEMPORARY TABLE "select" (%s)' % ',\n'.join( + ['"%s" int' % c for c in cols])) + curs.execute('INSERT INTO "select" DEFAULT VALUES') + + f = StringIO() + curs.copy_to(f, "select", columns=cols) + f.seek(0) + self.assertEqual(f.read().split(), ['\\N'] * len(cols)) + + f.seek(0) + curs.copy_from(f, "select", columns=cols) + curs.execute('select count(*) from "select";') + self.assertEqual(curs.fetchone()[0], 2) + + @skip_before_postgres(8, 2) # they don't send the count + def test_copy_rowcount(self): + curs = self.conn.cursor() + + curs.copy_from(StringIO('aaa\nbbb\nccc\n'), 'tcopy', columns=['data']) + self.assertEqual(curs.rowcount, 3) + + curs.copy_expert( + "copy tcopy (data) from stdin", + StringIO('ddd\neee\n')) + self.assertEqual(curs.rowcount, 2) + + curs.copy_to(StringIO(), "tcopy") + self.assertEqual(curs.rowcount, 5) + + curs.execute("insert into tcopy (data) values ('fff')") + curs.copy_expert("copy tcopy to stdout", StringIO()) + self.assertEqual(curs.rowcount, 6) + + def test_copy_rowcount_error(self): + curs = self.conn.cursor() + + curs.execute("insert into tcopy (data) values ('fff')") + self.assertEqual(curs.rowcount, 1) + + self.assertRaises(psycopg2.DataError, + curs.copy_from, StringIO('aaa\nbbb\nccc\n'), 'tcopy') + self.assertEqual(curs.rowcount, -1) + + def test_copy_query(self): + curs = self.conn.cursor() + + curs.copy_from(StringIO('aaa\nbbb\nccc\n'), 'tcopy', columns=['data']) + self.assert_(b"copy " in curs.query.lower()) + self.assert_(b" from stdin" in curs.query.lower()) + + curs.copy_expert( + "copy tcopy (data) from stdin", + StringIO('ddd\neee\n')) + self.assert_(b"copy " in curs.query.lower()) + self.assert_(b" from stdin" in curs.query.lower()) + + curs.copy_to(StringIO(), "tcopy") + self.assert_(b"copy " in curs.query.lower()) + self.assert_(b" to stdout" in curs.query.lower()) + + curs.execute("insert into tcopy (data) values ('fff')") + curs.copy_expert("copy tcopy to stdout", StringIO()) + self.assert_(b"copy " in curs.query.lower()) + self.assert_(b" to stdout" in curs.query.lower()) + + @slow + def test_copy_from_segfault(self): + # issue #219 + script = f"""import psycopg2 +conn = psycopg2.connect({dsn!r}) +curs = conn.cursor() +curs.execute("create table copy_segf (id int)") +try: + curs.execute("copy copy_segf from stdin") +except psycopg2.ProgrammingError: + pass +conn.close() +""" + + proc = Popen([sys.executable, '-c', script]) + proc.communicate() + self.assertEqual(0, proc.returncode) + + @slow + def test_copy_to_segfault(self): + # issue #219 + script = f"""import psycopg2 +conn = psycopg2.connect({dsn!r}) +curs = conn.cursor() +curs.execute("create table copy_segf (id int)") +try: + curs.execute("copy copy_segf to stdout") +except psycopg2.ProgrammingError: + pass +conn.close() +""" + + proc = Popen([sys.executable, '-c', script], stdout=PIPE) + proc.communicate() + self.assertEqual(0, proc.returncode) + + def test_copy_from_propagate_error(self): + class BrokenRead(TextIOBase): + def read(self, size): + return 1 / 0 + + def readline(self): + return 1 / 0 + + curs = self.conn.cursor() + # It seems we cannot do this, but now at least we propagate the error + # self.assertRaises(ZeroDivisionError, + # curs.copy_from, BrokenRead(), "tcopy") + try: + curs.copy_from(BrokenRead(), "tcopy") + except Exception as e: + self.assert_('ZeroDivisionError' in str(e)) + + def test_copy_to_propagate_error(self): + class BrokenWrite(TextIOBase): + def write(self, data): + return 1 / 0 + + curs = self.conn.cursor() + curs.execute("insert into tcopy values (10, 'hi')") + self.assertRaises(ZeroDivisionError, + curs.copy_to, BrokenWrite(), "tcopy") + + +def test_suite(): + return unittest.TestLoader().loadTestsFromName(__name__) + + +if __name__ == "__main__": + unittest.main() diff --git a/tests/test_cursor.py b/tests/test_cursor.py new file mode 100755 index 0000000000000000000000000000000000000000..04f535a5d8f5fe476509943405610066ae8b0a4b --- /dev/null +++ b/tests/test_cursor.py @@ -0,0 +1,701 @@ +#!/usr/bin/env python + +# test_cursor.py - unit test for cursor attributes +# +# Copyright (C) 2010-2019 Daniele Varrazzo +# Copyright (C) 2020-2021 The Psycopg Team +# +# psycopg2 is free software: you can redistribute it and/or modify it +# under the terms of the GNU Lesser General Public License as published +# by the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# In addition, as a special exception, the copyright holders give +# permission to link this program with the OpenSSL library (or with +# modified versions of OpenSSL that use the same license as OpenSSL), +# and distribute linked combinations including the two. +# +# You must obey the GNU Lesser General Public License in all respects for +# all of the code used other than OpenSSL. +# +# psycopg2 is distributed in the hope that it will be useful, but WITHOUT +# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or +# FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public +# License for more details. + +import gc +import sys +import time +import ctypes +import pickle +import psycopg2 +import psycopg2.extensions +import unittest +from datetime import date +from decimal import Decimal +from weakref import ref +from .testutils import (ConnectingTestCase, skip_before_postgres, + skip_if_no_getrefcount, slow, skip_if_no_superuser, + skip_if_windows, skip_if_crdb, crdb_version) + +import psycopg2.extras + + +class CursorTests(ConnectingTestCase): + + def test_close_idempotent(self): + cur = self.conn.cursor() + cur.close() + cur.close() + self.assert_(cur.closed) + + def test_empty_query(self): + cur = self.conn.cursor() + self.assertRaises(psycopg2.ProgrammingError, cur.execute, "") + self.assertRaises(psycopg2.ProgrammingError, cur.execute, " ") + self.assertRaises(psycopg2.ProgrammingError, cur.execute, ";") + + def test_executemany_propagate_exceptions(self): + conn = self.conn + cur = conn.cursor() + cur.execute("create table test_exc (data int);") + + def buggygen(): + yield 1 // 0 + + self.assertRaises(ZeroDivisionError, + cur.executemany, "insert into test_exc values (%s)", buggygen()) + cur.close() + + def test_mogrify_unicode(self): + conn = self.conn + cur = conn.cursor() + + # test consistency between execute and mogrify. + + # unicode query containing only ascii data + cur.execute("SELECT 'foo';") + self.assertEqual('foo', cur.fetchone()[0]) + self.assertEqual(b"SELECT 'foo';", cur.mogrify("SELECT 'foo';")) + + conn.set_client_encoding('UTF8') + snowman = "\u2603" + + def b(s): + if isinstance(s, str): + return s.encode('utf8') + else: + return s + + # unicode query with non-ascii data + cur.execute(f"SELECT '{snowman}';") + self.assertEqual(snowman.encode('utf8'), b(cur.fetchone()[0])) + self.assertQuotedEqual(f"SELECT '{snowman}';".encode('utf8'), + cur.mogrify(f"SELECT '{snowman}';")) + + # unicode args + cur.execute("SELECT %s;", (snowman,)) + self.assertEqual(snowman.encode("utf-8"), b(cur.fetchone()[0])) + self.assertQuotedEqual(f"SELECT '{snowman}';".encode('utf8'), + cur.mogrify("SELECT %s;", (snowman,))) + + # unicode query and args + cur.execute("SELECT %s;", (snowman,)) + self.assertEqual(snowman.encode("utf-8"), b(cur.fetchone()[0])) + self.assertQuotedEqual(f"SELECT '{snowman}';".encode('utf8'), + cur.mogrify("SELECT %s;", (snowman,))) + + def test_mogrify_decimal_explodes(self): + conn = self.conn + cur = conn.cursor() + self.assertEqual(b'SELECT 10.3;', + cur.mogrify("SELECT %s;", (Decimal("10.3"),))) + + @skip_if_no_getrefcount + def test_mogrify_leak_on_multiple_reference(self): + # issue #81: reference leak when a parameter value is referenced + # more than once from a dict. + cur = self.conn.cursor() + foo = (lambda x: x)('foo') * 10 + nref1 = sys.getrefcount(foo) + cur.mogrify("select %(foo)s, %(foo)s, %(foo)s", {'foo': foo}) + nref2 = sys.getrefcount(foo) + self.assertEqual(nref1, nref2) + + def test_modify_closed(self): + cur = self.conn.cursor() + cur.close() + sql = cur.mogrify("select %s", (10,)) + self.assertEqual(sql, b"select 10") + + def test_bad_placeholder(self): + cur = self.conn.cursor() + self.assertRaises(psycopg2.ProgrammingError, + cur.mogrify, "select %(foo", {}) + self.assertRaises(psycopg2.ProgrammingError, + cur.mogrify, "select %(foo", {'foo': 1}) + self.assertRaises(psycopg2.ProgrammingError, + cur.mogrify, "select %(foo, %(bar)", {'foo': 1}) + self.assertRaises(psycopg2.ProgrammingError, + cur.mogrify, "select %(foo, %(bar)", {'foo': 1, 'bar': 2}) + + def test_cast(self): + curs = self.conn.cursor() + + self.assertEqual(42, curs.cast(20, '42')) + self.assertAlmostEqual(3.14, curs.cast(700, '3.14')) + + self.assertEqual(Decimal('123.45'), curs.cast(1700, '123.45')) + + self.assertEqual(date(2011, 1, 2), curs.cast(1082, '2011-01-02')) + self.assertEqual("who am i?", curs.cast(705, 'who am i?')) # unknown + + def test_cast_specificity(self): + curs = self.conn.cursor() + self.assertEqual("foo", curs.cast(705, 'foo')) + + D = psycopg2.extensions.new_type((705,), "DOUBLING", lambda v, c: v * 2) + psycopg2.extensions.register_type(D, self.conn) + self.assertEqual("foofoo", curs.cast(705, 'foo')) + + T = psycopg2.extensions.new_type((705,), "TREBLING", lambda v, c: v * 3) + psycopg2.extensions.register_type(T, curs) + self.assertEqual("foofoofoo", curs.cast(705, 'foo')) + + curs2 = self.conn.cursor() + self.assertEqual("foofoo", curs2.cast(705, 'foo')) + + def test_weakref(self): + curs = self.conn.cursor() + w = ref(curs) + del curs + gc.collect() + self.assert_(w() is None) + + def test_null_name(self): + curs = self.conn.cursor(None) + self.assertEqual(curs.name, None) + + def test_description_attribs(self): + curs = self.conn.cursor() + curs.execute("""select + 3.14::decimal(10,2) as pi, + 'hello'::text as hi, + '2010-02-18'::date as now; + """) + self.assertEqual(len(curs.description), 3) + for c in curs.description: + self.assertEqual(len(c), 7) # DBAPI happy + for a in ('name', 'type_code', 'display_size', 'internal_size', + 'precision', 'scale', 'null_ok'): + self.assert_(hasattr(c, a), a) + + c = curs.description[0] + self.assertEqual(c.name, 'pi') + self.assert_(c.type_code in psycopg2.extensions.DECIMAL.values) + if crdb_version(self.conn) is None: + self.assert_(c.internal_size > 0) + self.assertEqual(c.precision, 10) + self.assertEqual(c.scale, 2) + + c = curs.description[1] + self.assertEqual(c.name, 'hi') + self.assert_(c.type_code in psycopg2.STRING.values) + self.assert_(c.internal_size < 0) + self.assertEqual(c.precision, None) + self.assertEqual(c.scale, None) + + c = curs.description[2] + self.assertEqual(c.name, 'now') + self.assert_(c.type_code in psycopg2.extensions.DATE.values) + self.assert_(c.internal_size > 0) + self.assertEqual(c.precision, None) + self.assertEqual(c.scale, None) + + @skip_if_crdb("table oid") + def test_description_extra_attribs(self): + curs = self.conn.cursor() + curs.execute(""" + create table testcol ( + pi decimal(10,2), + hi text) + """) + curs.execute("select oid from pg_class where relname = %s", ('testcol',)) + oid = curs.fetchone()[0] + + curs.execute("insert into testcol values (3.14, 'hello')") + curs.execute("select hi, pi, 42 from testcol") + self.assertEqual(curs.description[0].table_oid, oid) + self.assertEqual(curs.description[0].table_column, 2) + + self.assertEqual(curs.description[1].table_oid, oid) + self.assertEqual(curs.description[1].table_column, 1) + + self.assertEqual(curs.description[2].table_oid, None) + self.assertEqual(curs.description[2].table_column, None) + + def test_description_slice(self): + curs = self.conn.cursor() + curs.execute("select 1::int4 as a") + self.assertEqual(curs.description[0][0:2], ('a', 23)) + + def test_pickle_description(self): + curs = self.conn.cursor() + curs.execute('SELECT 1 AS foo') + description = curs.description + + pickled = pickle.dumps(description, pickle.HIGHEST_PROTOCOL) + unpickled = pickle.loads(pickled) + + self.assertEqual(description, unpickled) + + def test_column_refcount(self): + # Reproduce crash describe in ticket #1252 + from psycopg2.extensions import Column + + def do_stuff(): + _ = Column(name='my_column') + + for _ in range(1000): + do_stuff() + + def test_bad_subclass(self): + # check that we get an error message instead of a segfault + # for badly written subclasses. + # see https://stackoverflow.com/questions/22019341/ + class StupidCursor(psycopg2.extensions.cursor): + def __init__(self, *args, **kwargs): + # I am stupid so not calling superclass init + pass + + cur = StupidCursor() + self.assertRaises(psycopg2.InterfaceError, cur.execute, 'select 1') + self.assertRaises(psycopg2.InterfaceError, cur.executemany, + 'select 1', []) + + def test_callproc_badparam(self): + cur = self.conn.cursor() + self.assertRaises(TypeError, cur.callproc, 'lower', 42) + + # It would be inappropriate to test callproc's named parameters in the + # DBAPI2.0 test section because they are a psycopg2 extension. + @skip_before_postgres(9, 0) + @skip_if_crdb("stored procedure") + def test_callproc_dict(self): + # This parameter name tests for injection and quote escaping + paramname = ''' + Robert'); drop table "students" -- + '''.strip() + escaped_paramname = '"%s"' % paramname.replace('"', '""') + procname = 'pg_temp.randall' + + cur = self.conn.cursor() + + # Set up the temporary function + cur.execute(f''' + CREATE FUNCTION {procname}({escaped_paramname} INT) + RETURNS INT AS + 'SELECT $1 * $1' + LANGUAGE SQL + ''') + + # Make sure callproc works right + cur.callproc(procname, {paramname: 2}) + self.assertEquals(cur.fetchone()[0], 4) + + # Make sure callproc fails right + failing_cases = [ + ({paramname: 2, 'foo': 'bar'}, psycopg2.ProgrammingError), + ({paramname: '2'}, psycopg2.ProgrammingError), + ({paramname: 'two'}, psycopg2.ProgrammingError), + ({'bj\xc3rn': 2}, psycopg2.ProgrammingError), + ({3: 2}, TypeError), + ({self: 2}, TypeError), + ] + for parameter_sequence, exception in failing_cases: + self.assertRaises(exception, cur.callproc, procname, parameter_sequence) + self.conn.rollback() + + @skip_if_no_superuser + @skip_if_windows + @skip_if_crdb("backend pid") + @skip_before_postgres(8, 4) + def test_external_close_sync(self): + # If a "victim" connection is closed by a "control" connection + # behind psycopg2's back, psycopg2 always handles it correctly: + # raise OperationalError, set conn.closed to 2. This reproduces + # issue #443, a race between control_conn closing victim_conn and + # psycopg2 noticing. + control_conn = self.conn + connect_func = self.connect + + def wait_func(conn): + pass + + self._test_external_close(control_conn, connect_func, wait_func) + + @skip_if_no_superuser + @skip_if_windows + @skip_if_crdb("backend pid") + @skip_before_postgres(8, 4) + def test_external_close_async(self): + # Issue #443 is in the async code too. Since the fix is duplicated, + # so is the test. + control_conn = self.conn + + def connect_func(): + return self.connect(async_=True) + + wait_func = psycopg2.extras.wait_select + self._test_external_close(control_conn, connect_func, wait_func) + + def _test_external_close(self, control_conn, connect_func, wait_func): + # The short sleep before using victim_conn the second time makes it + # much more likely to lose the race and see the bug. Repeating the + # test several times makes it even more likely. + for i in range(10): + victim_conn = connect_func() + wait_func(victim_conn) + + with victim_conn.cursor() as cur: + cur.execute('select pg_backend_pid()') + wait_func(victim_conn) + pid1 = cur.fetchall()[0][0] + + with control_conn.cursor() as cur: + cur.execute('select pg_terminate_backend(%s)', (pid1,)) + + time.sleep(0.001) + + def f(): + with victim_conn.cursor() as cur: + cur.execute('select 1') + wait_func(victim_conn) + + self.assertRaises(psycopg2.OperationalError, f) + + self.assertEqual(victim_conn.closed, 2) + + @skip_before_postgres(8, 2) + def test_rowcount_on_executemany_returning(self): + cur = self.conn.cursor() + cur.execute("create table execmany(id serial primary key, data int)") + cur.executemany( + "insert into execmany (data) values (%s)", + [(i,) for i in range(4)]) + self.assertEqual(cur.rowcount, 4) + + cur.executemany( + "insert into execmany (data) values (%s) returning data", + [(i,) for i in range(5)]) + self.assertEqual(cur.rowcount, 5) + + @skip_before_postgres(9) + def test_pgresult_ptr(self): + curs = self.conn.cursor() + self.assert_(curs.pgresult_ptr is None) + + curs.execute("select 'x'") + self.assert_(curs.pgresult_ptr is not None) + + try: + f = self.libpq.PQcmdStatus + except AttributeError: + pass + else: + f.argtypes = [ctypes.c_void_p] + f.restype = ctypes.c_char_p + status = f(curs.pgresult_ptr) + self.assertEqual(status, b'SELECT 1') + + curs.close() + self.assert_(curs.pgresult_ptr is None) + + +@skip_if_crdb("named cursor") +class NamedCursorTests(ConnectingTestCase): + def test_invalid_name(self): + curs = self.conn.cursor() + curs.execute("create table invname (data int);") + for i in (10, 20, 30): + curs.execute("insert into invname values (%s)", (i,)) + curs.close() + + curs = self.conn.cursor(r'1-2-3 \ "test"') + curs.execute("select data from invname order by data") + self.assertEqual(curs.fetchall(), [(10,), (20,), (30,)]) + + def _create_withhold_table(self): + curs = self.conn.cursor() + try: + curs.execute("drop table withhold") + except psycopg2.ProgrammingError: + self.conn.rollback() + curs.execute("create table withhold (data int)") + for i in (10, 20, 30): + curs.execute("insert into withhold values (%s)", (i,)) + curs.close() + + def test_withhold(self): + self.assertRaises(psycopg2.ProgrammingError, self.conn.cursor, + withhold=True) + + self._create_withhold_table() + curs = self.conn.cursor("W") + self.assertEqual(curs.withhold, False) + curs.withhold = True + self.assertEqual(curs.withhold, True) + curs.execute("select data from withhold order by data") + self.conn.commit() + self.assertEqual(curs.fetchall(), [(10,), (20,), (30,)]) + curs.close() + + curs = self.conn.cursor("W", withhold=True) + self.assertEqual(curs.withhold, True) + curs.execute("select data from withhold order by data") + self.conn.commit() + self.assertEqual(curs.fetchall(), [(10,), (20,), (30,)]) + + curs = self.conn.cursor() + curs.execute("drop table withhold") + self.conn.commit() + + def test_withhold_no_begin(self): + self._create_withhold_table() + curs = self.conn.cursor("w", withhold=True) + curs.execute("select data from withhold order by data") + self.assertEqual(curs.fetchone(), (10,)) + self.assertEqual(self.conn.status, psycopg2.extensions.STATUS_BEGIN) + self.assertEqual(self.conn.info.transaction_status, + psycopg2.extensions.TRANSACTION_STATUS_INTRANS) + + self.conn.commit() + self.assertEqual(self.conn.status, psycopg2.extensions.STATUS_READY) + self.assertEqual(self.conn.info.transaction_status, + psycopg2.extensions.TRANSACTION_STATUS_IDLE) + + self.assertEqual(curs.fetchone(), (20,)) + self.assertEqual(self.conn.status, psycopg2.extensions.STATUS_READY) + self.assertEqual(self.conn.info.transaction_status, + psycopg2.extensions.TRANSACTION_STATUS_IDLE) + + curs.close() + self.assertEqual(self.conn.status, psycopg2.extensions.STATUS_READY) + self.assertEqual(self.conn.info.transaction_status, + psycopg2.extensions.TRANSACTION_STATUS_IDLE) + + def test_withhold_autocommit(self): + self._create_withhold_table() + self.conn.commit() + self.conn.autocommit = True + curs = self.conn.cursor("w", withhold=True) + curs.execute("select data from withhold order by data") + + self.assertEqual(curs.fetchone(), (10,)) + self.assertEqual(self.conn.status, psycopg2.extensions.STATUS_READY) + self.assertEqual(self.conn.info.transaction_status, + psycopg2.extensions.TRANSACTION_STATUS_IDLE) + + self.conn.commit() + self.assertEqual(self.conn.status, psycopg2.extensions.STATUS_READY) + self.assertEqual(self.conn.info.transaction_status, + psycopg2.extensions.TRANSACTION_STATUS_IDLE) + + curs.close() + self.assertEqual(self.conn.status, psycopg2.extensions.STATUS_READY) + self.assertEqual(self.conn.info.transaction_status, + psycopg2.extensions.TRANSACTION_STATUS_IDLE) + + def test_scrollable(self): + self.assertRaises(psycopg2.ProgrammingError, self.conn.cursor, + scrollable=True) + + curs = self.conn.cursor() + curs.execute("create table scrollable (data int)") + curs.executemany("insert into scrollable values (%s)", + [(i,) for i in range(100)]) + curs.close() + + for t in range(2): + if not t: + curs = self.conn.cursor("S") + self.assertEqual(curs.scrollable, None) + curs.scrollable = True + else: + curs = self.conn.cursor("S", scrollable=True) + + self.assertEqual(curs.scrollable, True) + curs.itersize = 10 + + # complex enough to make postgres cursors declare without + # scroll/no scroll to fail + curs.execute(""" + select x.data + from scrollable x + join scrollable y on x.data = y.data + order by y.data""") + for i, (n,) in enumerate(curs): + self.assertEqual(i, n) + + curs.scroll(-1) + for i in range(99, -1, -1): + curs.scroll(-1) + self.assertEqual(i, curs.fetchone()[0]) + curs.scroll(-1) + + curs.close() + + def test_not_scrollable(self): + self.assertRaises(psycopg2.ProgrammingError, self.conn.cursor, + scrollable=False) + + curs = self.conn.cursor() + curs.execute("create table scrollable (data int)") + curs.executemany("insert into scrollable values (%s)", + [(i,) for i in range(100)]) + curs.close() + + curs = self.conn.cursor("S") # default scrollability + curs.execute("select * from scrollable") + self.assertEqual(curs.scrollable, None) + curs.scroll(2) + try: + curs.scroll(-1) + except psycopg2.OperationalError: + return self.skipTest("can't evaluate non-scrollable cursor") + curs.close() + + curs = self.conn.cursor("S", scrollable=False) + self.assertEqual(curs.scrollable, False) + curs.execute("select * from scrollable") + curs.scroll(2) + self.assertRaises(psycopg2.OperationalError, curs.scroll, -1) + + @slow + @skip_before_postgres(8, 2) + def test_iter_named_cursor_efficient(self): + curs = self.conn.cursor('tmp') + # if these records are fetched in the same roundtrip their + # timestamp will not be influenced by the pause in Python world. + curs.execute("""select clock_timestamp() from generate_series(1,2)""") + i = iter(curs) + t1 = next(i)[0] + time.sleep(0.2) + t2 = next(i)[0] + self.assert_((t2 - t1).microseconds * 1e-6 < 0.1, + f"named cursor records fetched in 2 roundtrips (delta: {t2 - t1})") + + @skip_before_postgres(8, 0) + def test_iter_named_cursor_default_itersize(self): + curs = self.conn.cursor('tmp') + curs.execute('select generate_series(1,50)') + rv = [(r[0], curs.rownumber) for r in curs] + # everything swallowed in one gulp + self.assertEqual(rv, [(i, i) for i in range(1, 51)]) + + @skip_before_postgres(8, 0) + def test_iter_named_cursor_itersize(self): + curs = self.conn.cursor('tmp') + curs.itersize = 30 + curs.execute('select generate_series(1,50)') + rv = [(r[0], curs.rownumber) for r in curs] + # everything swallowed in two gulps + self.assertEqual(rv, [(i, ((i - 1) % 30) + 1) for i in range(1, 51)]) + + @skip_before_postgres(8, 0) + def test_iter_named_cursor_rownumber(self): + curs = self.conn.cursor('tmp') + # note: this fails if itersize < dataset: internally we check + # rownumber == rowcount to detect when to read anoter page, so we + # would need an extra attribute to have a monotonic rownumber. + curs.itersize = 20 + curs.execute('select generate_series(1,10)') + for i, rec in enumerate(curs): + self.assertEqual(i + 1, curs.rownumber) + + @skip_before_postgres(8, 0) + def test_named_cursor_stealing(self): + # you can use a named cursor to iterate on a refcursor created + # somewhere else + cur1 = self.conn.cursor() + cur1.execute("DECLARE test CURSOR WITHOUT HOLD " + " FOR SELECT generate_series(1,7)") + + cur2 = self.conn.cursor('test') + # can call fetch without execute + self.assertEqual((1,), cur2.fetchone()) + self.assertEqual([(2,), (3,), (4,)], cur2.fetchmany(3)) + self.assertEqual([(5,), (6,), (7,)], cur2.fetchall()) + + @skip_before_postgres(8, 2) + def test_named_noop_close(self): + cur = self.conn.cursor('test') + cur.close() + + @skip_before_postgres(8, 2) + def test_stolen_named_cursor_close(self): + cur1 = self.conn.cursor() + cur1.execute("DECLARE test CURSOR WITHOUT HOLD " + " FOR SELECT generate_series(1,7)") + cur2 = self.conn.cursor('test') + cur2.close() + + cur1.execute("DECLARE test CURSOR WITHOUT HOLD " + " FOR SELECT generate_series(1,7)") + cur2 = self.conn.cursor('test') + cur2.close() + + @skip_before_postgres(8, 0) + def test_scroll(self): + cur = self.conn.cursor() + cur.execute("select generate_series(0,9)") + cur.scroll(2) + self.assertEqual(cur.fetchone(), (2,)) + cur.scroll(2) + self.assertEqual(cur.fetchone(), (5,)) + cur.scroll(2, mode='relative') + self.assertEqual(cur.fetchone(), (8,)) + cur.scroll(-1) + self.assertEqual(cur.fetchone(), (8,)) + cur.scroll(-2) + self.assertEqual(cur.fetchone(), (7,)) + cur.scroll(2, mode='absolute') + self.assertEqual(cur.fetchone(), (2,)) + + # on the boundary + cur.scroll(0, mode='absolute') + self.assertEqual(cur.fetchone(), (0,)) + self.assertRaises((IndexError, psycopg2.ProgrammingError), + cur.scroll, -1, mode='absolute') + cur.scroll(0, mode='absolute') + self.assertRaises((IndexError, psycopg2.ProgrammingError), + cur.scroll, -1) + + cur.scroll(9, mode='absolute') + self.assertEqual(cur.fetchone(), (9,)) + self.assertRaises((IndexError, psycopg2.ProgrammingError), + cur.scroll, 10, mode='absolute') + cur.scroll(9, mode='absolute') + self.assertRaises((IndexError, psycopg2.ProgrammingError), + cur.scroll, 1) + + @skip_before_postgres(8, 0) + def test_scroll_named(self): + cur = self.conn.cursor('tmp', scrollable=True) + cur.execute("select generate_series(0,9)") + cur.scroll(2) + self.assertEqual(cur.fetchone(), (2,)) + cur.scroll(2) + self.assertEqual(cur.fetchone(), (5,)) + cur.scroll(2, mode='relative') + self.assertEqual(cur.fetchone(), (8,)) + cur.scroll(9, mode='absolute') + self.assertEqual(cur.fetchone(), (9,)) + + +def test_suite(): + return unittest.TestLoader().loadTestsFromName(__name__) + + +if __name__ == "__main__": + unittest.main() diff --git a/tests/test_dates.py b/tests/test_dates.py new file mode 100755 index 0000000000000000000000000000000000000000..7f52a9164753cb5d1a838dca4549dc6386efc71a --- /dev/null +++ b/tests/test_dates.py @@ -0,0 +1,555 @@ +#!/usr/bin/env python + +# test_dates.py - unit test for dates handling +# +# Copyright (C) 2008-2019 James Henstridge +# Copyright (C) 2020-2021 The Psycopg Team +# +# psycopg2 is free software: you can redistribute it and/or modify it +# under the terms of the GNU Lesser General Public License as published +# by the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# In addition, as a special exception, the copyright holders give +# permission to link this program with the OpenSSL library (or with +# modified versions of OpenSSL that use the same license as OpenSSL), +# and distribute linked combinations including the two. +# +# You must obey the GNU Lesser General Public License in all respects for +# all of the code used other than OpenSSL. +# +# psycopg2 is distributed in the hope that it will be useful, but WITHOUT +# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or +# FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public +# License for more details. + +import sys +import math +import pickle +from datetime import date, datetime, time, timedelta, timezone + +import psycopg2 +from psycopg2.tz import FixedOffsetTimezone, ZERO +import unittest +from .testutils import ConnectingTestCase, skip_before_postgres, skip_if_crdb + + +def total_seconds(d): + """Return total number of seconds of a timedelta as a float.""" + return d.days * 24 * 60 * 60 + d.seconds + d.microseconds / 1000000.0 + + +class CommonDatetimeTestsMixin: + + def execute(self, *args): + self.curs.execute(*args) + return self.curs.fetchone()[0] + + def test_parse_date(self): + value = self.DATE('2007-01-01', self.curs) + self.assert_(value is not None) + self.assertEqual(value.year, 2007) + self.assertEqual(value.month, 1) + self.assertEqual(value.day, 1) + + def test_parse_null_date(self): + value = self.DATE(None, self.curs) + self.assertEqual(value, None) + + def test_parse_incomplete_date(self): + self.assertRaises(psycopg2.DataError, self.DATE, '2007', self.curs) + self.assertRaises(psycopg2.DataError, self.DATE, '2007-01', self.curs) + + def test_parse_time(self): + value = self.TIME('13:30:29', self.curs) + self.assert_(value is not None) + self.assertEqual(value.hour, 13) + self.assertEqual(value.minute, 30) + self.assertEqual(value.second, 29) + + def test_parse_null_time(self): + value = self.TIME(None, self.curs) + self.assertEqual(value, None) + + def test_parse_incomplete_time(self): + self.assertRaises(psycopg2.DataError, self.TIME, '13', self.curs) + self.assertRaises(psycopg2.DataError, self.TIME, '13:30', self.curs) + + def test_parse_datetime(self): + value = self.DATETIME('2007-01-01 13:30:29', self.curs) + self.assert_(value is not None) + self.assertEqual(value.year, 2007) + self.assertEqual(value.month, 1) + self.assertEqual(value.day, 1) + self.assertEqual(value.hour, 13) + self.assertEqual(value.minute, 30) + self.assertEqual(value.second, 29) + + def test_parse_null_datetime(self): + value = self.DATETIME(None, self.curs) + self.assertEqual(value, None) + + def test_parse_incomplete_datetime(self): + self.assertRaises(psycopg2.DataError, + self.DATETIME, '2007', self.curs) + self.assertRaises(psycopg2.DataError, + self.DATETIME, '2007-01', self.curs) + self.assertRaises(psycopg2.DataError, + self.DATETIME, '2007-01-01 13', self.curs) + self.assertRaises(psycopg2.DataError, + self.DATETIME, '2007-01-01 13:30', self.curs) + + def test_parse_null_interval(self): + value = self.INTERVAL(None, self.curs) + self.assertEqual(value, None) + + +class DatetimeTests(ConnectingTestCase, CommonDatetimeTestsMixin): + """Tests for the datetime based date handling in psycopg2.""" + + def setUp(self): + ConnectingTestCase.setUp(self) + self.curs = self.conn.cursor() + self.DATE = psycopg2.extensions.PYDATE + self.TIME = psycopg2.extensions.PYTIME + self.DATETIME = psycopg2.extensions.PYDATETIME + self.INTERVAL = psycopg2.extensions.PYINTERVAL + + def test_parse_bc_date(self): + # datetime does not support BC dates + self.assertRaises(ValueError, self.DATE, '00042-01-01 BC', self.curs) + + def test_parse_bc_datetime(self): + # datetime does not support BC dates + self.assertRaises(ValueError, self.DATETIME, + '00042-01-01 13:30:29 BC', self.curs) + + def test_parse_time_microseconds(self): + value = self.TIME('13:30:29.123456', self.curs) + self.assertEqual(value.second, 29) + self.assertEqual(value.microsecond, 123456) + + def test_parse_datetime_microseconds(self): + value = self.DATETIME('2007-01-01 13:30:29.123456', self.curs) + self.assertEqual(value.second, 29) + self.assertEqual(value.microsecond, 123456) + + def check_time_tz(self, str_offset, offset): + base = time(13, 30, 29) + base_str = '13:30:29' + + value = self.TIME(base_str + str_offset, self.curs) + + # Value has time zone info and correct UTC offset. + self.assertNotEqual(value.tzinfo, None), + self.assertEqual(value.utcoffset(), timedelta(seconds=offset)) + + # Time portion is correct. + self.assertEqual(value.replace(tzinfo=None), base) + + def test_parse_time_timezone(self): + self.check_time_tz("+01", 3600) + self.check_time_tz("-01", -3600) + self.check_time_tz("+01:15", 4500) + self.check_time_tz("-01:15", -4500) + if sys.version_info < (3, 7): + # The Python < 3.7 datetime module does not support time zone + # offsets that are not a whole number of minutes. + # We round the offset to the nearest minute. + self.check_time_tz("+01:15:00", 60 * (60 + 15)) + self.check_time_tz("+01:15:29", 60 * (60 + 15)) + self.check_time_tz("+01:15:30", 60 * (60 + 16)) + self.check_time_tz("+01:15:59", 60 * (60 + 16)) + self.check_time_tz("-01:15:00", -60 * (60 + 15)) + self.check_time_tz("-01:15:29", -60 * (60 + 15)) + self.check_time_tz("-01:15:30", -60 * (60 + 16)) + self.check_time_tz("-01:15:59", -60 * (60 + 16)) + else: + self.check_time_tz("+01:15:00", 60 * (60 + 15)) + self.check_time_tz("+01:15:29", 60 * (60 + 15) + 29) + self.check_time_tz("+01:15:30", 60 * (60 + 15) + 30) + self.check_time_tz("+01:15:59", 60 * (60 + 15) + 59) + self.check_time_tz("-01:15:00", -(60 * (60 + 15))) + self.check_time_tz("-01:15:29", -(60 * (60 + 15) + 29)) + self.check_time_tz("-01:15:30", -(60 * (60 + 15) + 30)) + self.check_time_tz("-01:15:59", -(60 * (60 + 15) + 59)) + + def check_datetime_tz(self, str_offset, offset): + base = datetime(2007, 1, 1, 13, 30, 29) + base_str = '2007-01-01 13:30:29' + + value = self.DATETIME(base_str + str_offset, self.curs) + + # Value has time zone info and correct UTC offset. + self.assertNotEqual(value.tzinfo, None), + self.assertEqual(value.utcoffset(), timedelta(seconds=offset)) + + # Datetime is correct. + self.assertEqual(value.replace(tzinfo=None), base) + + # Conversion to UTC produces the expected offset. + UTC = timezone(timedelta(0)) + value_utc = value.astimezone(UTC).replace(tzinfo=None) + self.assertEqual(base - value_utc, timedelta(seconds=offset)) + + def test_default_tzinfo(self): + self.curs.execute("select '2000-01-01 00:00+02:00'::timestamptz") + dt = self.curs.fetchone()[0] + self.assert_(isinstance(dt.tzinfo, timezone)) + self.assertEqual(dt, + datetime(2000, 1, 1, tzinfo=timezone(timedelta(minutes=120)))) + + def test_fotz_tzinfo(self): + self.curs.tzinfo_factory = FixedOffsetTimezone + self.curs.execute("select '2000-01-01 00:00+02:00'::timestamptz") + dt = self.curs.fetchone()[0] + self.assert_(not isinstance(dt.tzinfo, timezone)) + self.assert_(isinstance(dt.tzinfo, FixedOffsetTimezone)) + self.assertEqual(dt, + datetime(2000, 1, 1, tzinfo=timezone(timedelta(minutes=120)))) + + def test_parse_datetime_timezone(self): + self.check_datetime_tz("+01", 3600) + self.check_datetime_tz("-01", -3600) + self.check_datetime_tz("+01:15", 4500) + self.check_datetime_tz("-01:15", -4500) + if sys.version_info < (3, 7): + # The Python < 3.7 datetime module does not support time zone + # offsets that are not a whole number of minutes. + # We round the offset to the nearest minute. + self.check_datetime_tz("+01:15:00", 60 * (60 + 15)) + self.check_datetime_tz("+01:15:29", 60 * (60 + 15)) + self.check_datetime_tz("+01:15:30", 60 * (60 + 16)) + self.check_datetime_tz("+01:15:59", 60 * (60 + 16)) + self.check_datetime_tz("-01:15:00", -60 * (60 + 15)) + self.check_datetime_tz("-01:15:29", -60 * (60 + 15)) + self.check_datetime_tz("-01:15:30", -60 * (60 + 16)) + self.check_datetime_tz("-01:15:59", -60 * (60 + 16)) + else: + self.check_datetime_tz("+01:15:00", 60 * (60 + 15)) + self.check_datetime_tz("+01:15:29", 60 * (60 + 15) + 29) + self.check_datetime_tz("+01:15:30", 60 * (60 + 15) + 30) + self.check_datetime_tz("+01:15:59", 60 * (60 + 15) + 59) + self.check_datetime_tz("-01:15:00", -(60 * (60 + 15))) + self.check_datetime_tz("-01:15:29", -(60 * (60 + 15) + 29)) + self.check_datetime_tz("-01:15:30", -(60 * (60 + 15) + 30)) + self.check_datetime_tz("-01:15:59", -(60 * (60 + 15) + 59)) + + def test_parse_time_no_timezone(self): + self.assertEqual(self.TIME("13:30:29", self.curs).tzinfo, None) + self.assertEqual(self.TIME("13:30:29.123456", self.curs).tzinfo, None) + + def test_parse_datetime_no_timezone(self): + self.assertEqual( + self.DATETIME("2007-01-01 13:30:29", self.curs).tzinfo, None) + self.assertEqual( + self.DATETIME("2007-01-01 13:30:29.123456", self.curs).tzinfo, None) + + def test_parse_interval(self): + value = self.INTERVAL('42 days 12:34:56.123456', self.curs) + self.assertNotEqual(value, None) + self.assertEqual(value.days, 42) + self.assertEqual(value.seconds, 45296) + self.assertEqual(value.microseconds, 123456) + + def test_parse_negative_interval(self): + value = self.INTERVAL('-42 days -12:34:56.123456', self.curs) + self.assertNotEqual(value, None) + self.assertEqual(value.days, -43) + self.assertEqual(value.seconds, 41103) + self.assertEqual(value.microseconds, 876544) + + def test_parse_infinity(self): + value = self.DATETIME('-infinity', self.curs) + self.assertEqual(str(value), '0001-01-01 00:00:00') + value = self.DATETIME('infinity', self.curs) + self.assertEqual(str(value), '9999-12-31 23:59:59.999999') + value = self.DATE('infinity', self.curs) + self.assertEqual(str(value), '9999-12-31') + + def test_adapt_date(self): + value = self.execute('select (%s)::date::text', + [date(2007, 1, 1)]) + self.assertEqual(value, '2007-01-01') + + def test_adapt_time(self): + value = self.execute('select (%s)::time::text', + [time(13, 30, 29)]) + self.assertEqual(value, '13:30:29') + + @skip_if_crdb("cast adds tz") + def test_adapt_datetime(self): + value = self.execute('select (%s)::timestamp::text', + [datetime(2007, 1, 1, 13, 30, 29)]) + self.assertEqual(value, '2007-01-01 13:30:29') + + def test_adapt_timedelta(self): + value = self.execute('select extract(epoch from (%s)::interval)', + [timedelta(days=42, seconds=45296, + microseconds=123456)]) + seconds = math.floor(value) + self.assertEqual(seconds, 3674096) + self.assertEqual(int(round((value - seconds) * 1000000)), 123456) + + def test_adapt_negative_timedelta(self): + value = self.execute('select extract(epoch from (%s)::interval)', + [timedelta(days=-42, seconds=45296, + microseconds=123456)]) + seconds = math.floor(value) + self.assertEqual(seconds, -3583504) + self.assertEqual(int(round((value - seconds) * 1000000)), 123456) + + def _test_type_roundtrip(self, o1): + o2 = self.execute("select %s;", (o1,)) + self.assertEqual(type(o1), type(o2)) + return o2 + + def _test_type_roundtrip_array(self, o1): + o1 = [o1] + o2 = self.execute("select %s;", (o1,)) + self.assertEqual(type(o1[0]), type(o2[0])) + + def test_type_roundtrip_date(self): + self._test_type_roundtrip(date(2010, 5, 3)) + + def test_type_roundtrip_datetime(self): + dt = self._test_type_roundtrip(datetime(2010, 5, 3, 10, 20, 30)) + self.assertEqual(None, dt.tzinfo) + + def test_type_roundtrip_datetimetz(self): + tz = timezone(timedelta(minutes=8 * 60)) + dt1 = datetime(2010, 5, 3, 10, 20, 30, tzinfo=tz) + dt2 = self._test_type_roundtrip(dt1) + self.assertNotEqual(None, dt2.tzinfo) + self.assertEqual(dt1, dt2) + + def test_type_roundtrip_time(self): + tm = self._test_type_roundtrip(time(10, 20, 30)) + self.assertEqual(None, tm.tzinfo) + + def test_type_roundtrip_timetz(self): + tz = timezone(timedelta(minutes=8 * 60)) + tm1 = time(10, 20, 30, tzinfo=tz) + tm2 = self._test_type_roundtrip(tm1) + self.assertNotEqual(None, tm2.tzinfo) + self.assertEqual(tm1, tm2) + + def test_type_roundtrip_interval(self): + self._test_type_roundtrip(timedelta(seconds=30)) + + def test_type_roundtrip_date_array(self): + self._test_type_roundtrip_array(date(2010, 5, 3)) + + def test_type_roundtrip_datetime_array(self): + self._test_type_roundtrip_array(datetime(2010, 5, 3, 10, 20, 30)) + + def test_type_roundtrip_datetimetz_array(self): + self._test_type_roundtrip_array( + datetime(2010, 5, 3, 10, 20, 30, tzinfo=timezone(timedelta(0)))) + + def test_type_roundtrip_time_array(self): + self._test_type_roundtrip_array(time(10, 20, 30)) + + def test_type_roundtrip_interval_array(self): + self._test_type_roundtrip_array(timedelta(seconds=30)) + + @skip_before_postgres(8, 1) + def test_time_24(self): + t = self.execute("select '24:00'::time;") + self.assertEqual(t, time(0, 0)) + + t = self.execute("select '24:00+05'::timetz;") + self.assertEqual(t, time(0, 0, tzinfo=timezone(timedelta(minutes=300)))) + + t = self.execute("select '24:00+05:30'::timetz;") + self.assertEqual(t, time(0, 0, tzinfo=timezone(timedelta(minutes=330)))) + + @skip_before_postgres(8, 1) + def test_large_interval(self): + t = self.execute("select '999999:00:00'::interval") + self.assertEqual(total_seconds(t), 999999 * 60 * 60) + + t = self.execute("select '-999999:00:00'::interval") + self.assertEqual(total_seconds(t), -999999 * 60 * 60) + + t = self.execute("select '999999:00:00.1'::interval") + self.assertEqual(total_seconds(t), 999999 * 60 * 60 + 0.1) + + t = self.execute("select '999999:00:00.9'::interval") + self.assertEqual(total_seconds(t), 999999 * 60 * 60 + 0.9) + + t = self.execute("select '-999999:00:00.1'::interval") + self.assertEqual(total_seconds(t), -999999 * 60 * 60 - 0.1) + + t = self.execute("select '-999999:00:00.9'::interval") + self.assertEqual(total_seconds(t), -999999 * 60 * 60 - 0.9) + + def test_micros_rounding(self): + t = self.execute("select '0.1'::interval") + self.assertEqual(total_seconds(t), 0.1) + + t = self.execute("select '0.01'::interval") + self.assertEqual(total_seconds(t), 0.01) + + t = self.execute("select '0.000001'::interval") + self.assertEqual(total_seconds(t), 1e-6) + + t = self.execute("select '0.0000004'::interval") + self.assertEqual(total_seconds(t), 0) + + t = self.execute("select '0.0000006'::interval") + self.assertEqual(total_seconds(t), 1e-6) + + def test_interval_overflow(self): + cur = self.conn.cursor() + # hack a cursor to receive values too extreme to be represented + # but still I want an error, not a random number + psycopg2.extensions.register_type( + psycopg2.extensions.new_type( + psycopg2.STRING.values, 'WAT', psycopg2.extensions.INTERVAL), + cur) + + def f(val): + cur.execute(f"select '{val}'::text") + return cur.fetchone()[0] + + self.assertRaises(OverflowError, f, '100000000000000000:00:00') + self.assertRaises(OverflowError, f, '00:100000000000000000:00:00') + self.assertRaises(OverflowError, f, '00:00:100000000000000000:00') + self.assertRaises(OverflowError, f, '00:00:00.100000000000000000') + + @skip_if_crdb("infinity date") + def test_adapt_infinity_tz(self): + t = self.execute("select 'infinity'::timestamp") + self.assert_(t.tzinfo is None) + self.assert_(t > datetime(4000, 1, 1)) + + t = self.execute("select '-infinity'::timestamp") + self.assert_(t.tzinfo is None) + self.assert_(t < datetime(1000, 1, 1)) + + t = self.execute("select 'infinity'::timestamptz") + self.assert_(t.tzinfo is not None) + self.assert_(t > datetime(4000, 1, 1, tzinfo=timezone(timedelta(0)))) + + t = self.execute("select '-infinity'::timestamptz") + self.assert_(t.tzinfo is not None) + self.assert_(t < datetime(1000, 1, 1, tzinfo=timezone(timedelta(0)))) + + def test_redshift_day(self): + # Redshift is reported returning 1 day interval as microsec (bug #558) + cur = self.conn.cursor() + psycopg2.extensions.register_type( + psycopg2.extensions.new_type( + psycopg2.STRING.values, 'WAT', psycopg2.extensions.INTERVAL), + cur) + + for s, v in [ + ('0', timedelta(0)), + ('1', timedelta(microseconds=1)), + ('-1', timedelta(microseconds=-1)), + ('1000000', timedelta(seconds=1)), + ('86400000000', timedelta(days=1)), + ('-86400000000', timedelta(days=-1)), + ]: + cur.execute("select %s::text", (s,)) + r = cur.fetchone()[0] + self.assertEqual(r, v, f"{s} -> {r} != {v}") + + @skip_if_crdb("interval style") + @skip_before_postgres(8, 4) + def test_interval_iso_8601_not_supported(self): + # We may end up supporting, but no pressure for it + cur = self.conn.cursor() + cur.execute("set local intervalstyle to iso_8601") + cur.execute("select '1 day 2 hours'::interval") + self.assertRaises(psycopg2.NotSupportedError, cur.fetchone) + + +class FromTicksTestCase(unittest.TestCase): + # bug "TimestampFromTicks() throws ValueError (2-2.0.14)" + # reported by Jozsef Szalay on 2010-05-06 + def test_timestamp_value_error_sec_59_99(self): + s = psycopg2.TimestampFromTicks(1273173119.99992) + self.assertEqual(s.adapted, + datetime(2010, 5, 6, 14, 11, 59, 999920, + tzinfo=timezone(timedelta(minutes=-5 * 60)))) + + def test_date_value_error_sec_59_99(self): + s = psycopg2.DateFromTicks(1273173119.99992) + # The returned date is local + self.assert_(s.adapted in [date(2010, 5, 6), date(2010, 5, 7)]) + + def test_time_value_error_sec_59_99(self): + s = psycopg2.TimeFromTicks(1273173119.99992) + self.assertEqual(s.adapted.replace(hour=0), + time(0, 11, 59, 999920)) + + +class FixedOffsetTimezoneTests(unittest.TestCase): + + def test_init_with_no_args(self): + tzinfo = FixedOffsetTimezone() + self.assert_(tzinfo._offset is ZERO) + self.assert_(tzinfo._name is None) + + def test_repr_with_positive_offset(self): + tzinfo = FixedOffsetTimezone(5 * 60) + self.assertEqual(repr(tzinfo), + "psycopg2.tz.FixedOffsetTimezone(offset=%r, name=None)" + % timedelta(minutes=5 * 60)) + + def test_repr_with_negative_offset(self): + tzinfo = FixedOffsetTimezone(-5 * 60) + self.assertEqual(repr(tzinfo), + "psycopg2.tz.FixedOffsetTimezone(offset=%r, name=None)" + % timedelta(minutes=-5 * 60)) + + def test_init_with_timedelta(self): + td = timedelta(minutes=5 * 60) + tzinfo = FixedOffsetTimezone(td) + self.assertEqual(tzinfo, FixedOffsetTimezone(5 * 60)) + self.assertEqual(repr(tzinfo), + "psycopg2.tz.FixedOffsetTimezone(offset=%r, name=None)" % td) + + def test_repr_with_name(self): + tzinfo = FixedOffsetTimezone(name="FOO") + self.assertEqual(repr(tzinfo), + "psycopg2.tz.FixedOffsetTimezone(offset=%r, name='FOO')" + % timedelta(0)) + + def test_instance_caching(self): + self.assert_(FixedOffsetTimezone(name="FOO") + is FixedOffsetTimezone(name="FOO")) + self.assert_(FixedOffsetTimezone(7 * 60) + is FixedOffsetTimezone(7 * 60)) + self.assert_(FixedOffsetTimezone(-9 * 60, 'FOO') + is FixedOffsetTimezone(-9 * 60, 'FOO')) + self.assert_(FixedOffsetTimezone(9 * 60) + is not FixedOffsetTimezone(9 * 60, 'FOO')) + self.assert_(FixedOffsetTimezone(name='FOO') + is not FixedOffsetTimezone(9 * 60, 'FOO')) + + def test_pickle(self): + # ticket #135 + tz11 = FixedOffsetTimezone(60) + tz12 = FixedOffsetTimezone(120) + for proto in [-1, 0, 1, 2]: + tz21, tz22 = pickle.loads(pickle.dumps([tz11, tz12], proto)) + self.assertEqual(tz11, tz21) + self.assertEqual(tz12, tz22) + + tz11 = FixedOffsetTimezone(60, name='foo') + tz12 = FixedOffsetTimezone(120, name='bar') + for proto in [-1, 0, 1, 2]: + tz21, tz22 = pickle.loads(pickle.dumps([tz11, tz12], proto)) + self.assertEqual(tz11, tz21) + self.assertEqual(tz12, tz22) + + +def test_suite(): + return unittest.TestLoader().loadTestsFromName(__name__) + + +if __name__ == "__main__": + unittest.main() diff --git a/tests/test_errcodes.py b/tests/test_errcodes.py new file mode 100755 index 0000000000000000000000000000000000000000..139c1ade6ac5d35a90e6fffbfc2cdda319465e03 --- /dev/null +++ b/tests/test_errcodes.py @@ -0,0 +1,74 @@ +#!/usr/bin/env python + +# test_errcodes.py - unit test for psycopg2.errcodes module +# +# Copyright (C) 2015-2019 Daniele Varrazzo +# Copyright (C) 2020-2021 The Psycopg Team +# +# psycopg2 is free software: you can redistribute it and/or modify it +# under the terms of the GNU Lesser General Public License as published +# by the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# In addition, as a special exception, the copyright holders give +# permission to link this program with the OpenSSL library (or with +# modified versions of OpenSSL that use the same license as OpenSSL), +# and distribute linked combinations including the two. +# +# You must obey the GNU Lesser General Public License in all respects for +# all of the code used other than OpenSSL. +# +# psycopg2 is distributed in the hope that it will be useful, but WITHOUT +# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or +# FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public +# License for more details. + +import unittest +from .testutils import ConnectingTestCase, slow, reload + +from threading import Thread +from psycopg2 import errorcodes + + +class ErrocodeTests(ConnectingTestCase): + @slow + def test_lookup_threadsafe(self): + + # Increase if it does not fail with KeyError + MAX_CYCLES = 2000 + + errs = [] + + def f(pg_code='40001'): + try: + errorcodes.lookup(pg_code) + except Exception as e: + errs.append(e) + + for __ in range(MAX_CYCLES): + reload(errorcodes) + (t1, t2) = (Thread(target=f), Thread(target=f)) + (t1.start(), t2.start()) + (t1.join(), t2.join()) + + if errs: + self.fail( + "raised {} errors in {} cycles (first is {} {})".format( + len(errs), MAX_CYCLES, + errs[0].__class__.__name__, errs[0])) + + def test_ambiguous_names(self): + self.assertEqual( + errorcodes.lookup('2F004'), "READING_SQL_DATA_NOT_PERMITTED") + self.assertEqual( + errorcodes.lookup('38004'), "READING_SQL_DATA_NOT_PERMITTED") + self.assertEqual(errorcodes.READING_SQL_DATA_NOT_PERMITTED, '38004') + self.assertEqual(errorcodes.READING_SQL_DATA_NOT_PERMITTED_, '2F004') + + +def test_suite(): + return unittest.TestLoader().loadTestsFromName(__name__) + + +if __name__ == "__main__": + unittest.main() diff --git a/tests/test_errors.py b/tests/test_errors.py new file mode 100755 index 0000000000000000000000000000000000000000..e3b5b237997fb569b5afd51c0abcca6cc415905d --- /dev/null +++ b/tests/test_errors.py @@ -0,0 +1,94 @@ +#!/usr/bin/env python + +# test_errors.py - unit test for psycopg2.errors module +# +# Copyright (C) 2018-2019 Daniele Varrazzo +# Copyright (C) 2020-2021 The Psycopg Team +# +# psycopg2 is free software: you can redistribute it and/or modify it +# under the terms of the GNU Lesser General Public License as published +# by the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# In addition, as a special exception, the copyright holders give +# permission to link this program with the OpenSSL library (or with +# modified versions of OpenSSL that use the same license as OpenSSL), +# and distribute linked combinations including the two. +# +# You must obey the GNU Lesser General Public License in all respects for +# all of the code used other than OpenSSL. +# +# psycopg2 is distributed in the hope that it will be useful, but WITHOUT +# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or +# FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public +# License for more details. + +import unittest +from .testutils import ConnectingTestCase + +import psycopg2 +from psycopg2 import errors +from psycopg2._psycopg import sqlstate_errors +from psycopg2.errors import UndefinedTable + + +class ErrorsTests(ConnectingTestCase): + def test_exception_class(self): + cur = self.conn.cursor() + try: + cur.execute("select * from nonexist") + except psycopg2.Error as exc: + e = exc + + self.assert_(isinstance(e, UndefinedTable), type(e)) + self.assert_(isinstance(e, self.conn.ProgrammingError)) + + def test_exception_class_fallback(self): + cur = self.conn.cursor() + + x = sqlstate_errors.pop('42P01') + try: + cur.execute("select * from nonexist") + except psycopg2.Error as exc: + e = exc + finally: + sqlstate_errors['42P01'] = x + + self.assertEqual(type(e), self.conn.ProgrammingError) + + def test_lookup(self): + self.assertIs(errors.lookup('42P01'), errors.UndefinedTable) + + with self.assertRaises(KeyError): + errors.lookup('XXXXX') + + def test_connection_exceptions_backwards_compatibility(self): + err = errors.lookup('08000') + # connection exceptions are classified as operational errors + self.assert_(issubclass(err, errors.OperationalError)) + # previously these errors were classified only as DatabaseError + self.assert_(issubclass(err, errors.DatabaseError)) + + def test_has_base_exceptions(self): + excs = [] + for n in dir(psycopg2): + obj = getattr(psycopg2, n) + if isinstance(obj, type) and issubclass(obj, Exception): + excs.append(obj) + + self.assert_(len(excs) > 8, str(excs)) + + excs.append(psycopg2.extensions.QueryCanceledError) + excs.append(psycopg2.extensions.TransactionRollbackError) + + for exc in excs: + self.assert_(hasattr(errors, exc.__name__)) + self.assert_(getattr(errors, exc.__name__) is exc) + + +def test_suite(): + return unittest.TestLoader().loadTestsFromName(__name__) + + +if __name__ == "__main__": + unittest.main() diff --git a/tests/test_extras_dictcursor.py b/tests/test_extras_dictcursor.py new file mode 100755 index 0000000000000000000000000000000000000000..186daf302b17f02989b2d8a972c6939b960ab4dd --- /dev/null +++ b/tests/test_extras_dictcursor.py @@ -0,0 +1,646 @@ +#!/usr/bin/env python +# +# extras_dictcursor - test if DictCursor extension class works +# +# Copyright (C) 2004-2019 Federico Di Gregorio +# Copyright (C) 2020-2021 The Psycopg Team +# +# psycopg2 is free software: you can redistribute it and/or modify it +# under the terms of the GNU Lesser General Public License as published +# by the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# psycopg2 is distributed in the hope that it will be useful, but WITHOUT +# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or +# FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public +# License for more details. + +import copy +import time +import pickle +import unittest +from datetime import timedelta +from functools import lru_cache + +import psycopg2 +import psycopg2.extras +from psycopg2.extras import NamedTupleConnection, NamedTupleCursor + +from .testutils import ConnectingTestCase, skip_before_postgres, \ + crdb_version, skip_if_crdb + + +class _DictCursorBase(ConnectingTestCase): + def setUp(self): + ConnectingTestCase.setUp(self) + curs = self.conn.cursor() + if crdb_version(self.conn) is not None: + curs.execute("SET experimental_enable_temp_tables = 'on'") + curs.execute("CREATE TEMPORARY TABLE ExtrasDictCursorTests (foo text)") + curs.execute("INSERT INTO ExtrasDictCursorTests VALUES ('bar')") + self.conn.commit() + + def _testIterRowNumber(self, curs): + # Only checking for dataset < itersize: + # see CursorTests.test_iter_named_cursor_rownumber + curs.itersize = 20 + curs.execute("""select * from generate_series(1,10)""") + for i, r in enumerate(curs): + self.assertEqual(i + 1, curs.rownumber) + + def _testNamedCursorNotGreedy(self, curs): + curs.itersize = 2 + curs.execute("""select clock_timestamp() as ts from generate_series(1,3)""") + recs = [] + for t in curs: + time.sleep(0.01) + recs.append(t) + + # check that the dataset was not fetched in a single gulp + self.assert_(recs[1]['ts'] - recs[0]['ts'] < timedelta(seconds=0.005)) + self.assert_(recs[2]['ts'] - recs[1]['ts'] > timedelta(seconds=0.0099)) + + +class ExtrasDictCursorTests(_DictCursorBase): + """Test if DictCursor extension class works.""" + + @skip_if_crdb("named cursor") + def testDictConnCursorArgs(self): + self.conn.close() + self.conn = self.connect(connection_factory=psycopg2.extras.DictConnection) + cur = self.conn.cursor() + self.assert_(isinstance(cur, psycopg2.extras.DictCursor)) + self.assertEqual(cur.name, None) + # overridable + cur = self.conn.cursor('foo', + cursor_factory=psycopg2.extras.NamedTupleCursor) + self.assertEqual(cur.name, 'foo') + self.assert_(isinstance(cur, psycopg2.extras.NamedTupleCursor)) + + def testDictCursorWithPlainCursorFetchOne(self): + self._testWithPlainCursor(lambda curs: curs.fetchone()) + + def testDictCursorWithPlainCursorFetchMany(self): + self._testWithPlainCursor(lambda curs: curs.fetchmany(100)[0]) + + def testDictCursorWithPlainCursorFetchManyNoarg(self): + self._testWithPlainCursor(lambda curs: curs.fetchmany()[0]) + + def testDictCursorWithPlainCursorFetchAll(self): + self._testWithPlainCursor(lambda curs: curs.fetchall()[0]) + + def testDictCursorWithPlainCursorIter(self): + def getter(curs): + for row in curs: + return row + self._testWithPlainCursor(getter) + + def testUpdateRow(self): + row = self._testWithPlainCursor(lambda curs: curs.fetchone()) + row['foo'] = 'qux' + self.failUnless(row['foo'] == 'qux') + self.failUnless(row[0] == 'qux') + + @skip_before_postgres(8, 0) + def testDictCursorWithPlainCursorIterRowNumber(self): + curs = self.conn.cursor(cursor_factory=psycopg2.extras.DictCursor) + self._testIterRowNumber(curs) + + def _testWithPlainCursor(self, getter): + curs = self.conn.cursor(cursor_factory=psycopg2.extras.DictCursor) + curs.execute("SELECT * FROM ExtrasDictCursorTests") + row = getter(curs) + self.failUnless(row['foo'] == 'bar') + self.failUnless(row[0] == 'bar') + return row + + def testDictCursorWithNamedCursorFetchOne(self): + self._testWithNamedCursor(lambda curs: curs.fetchone()) + + def testDictCursorWithNamedCursorFetchMany(self): + self._testWithNamedCursor(lambda curs: curs.fetchmany(100)[0]) + + def testDictCursorWithNamedCursorFetchManyNoarg(self): + self._testWithNamedCursor(lambda curs: curs.fetchmany()[0]) + + def testDictCursorWithNamedCursorFetchAll(self): + self._testWithNamedCursor(lambda curs: curs.fetchall()[0]) + + def testDictCursorWithNamedCursorIter(self): + def getter(curs): + for row in curs: + return row + self._testWithNamedCursor(getter) + + @skip_if_crdb("named cursor") + @skip_before_postgres(8, 2) + def testDictCursorWithNamedCursorNotGreedy(self): + curs = self.conn.cursor('tmp', cursor_factory=psycopg2.extras.DictCursor) + self._testNamedCursorNotGreedy(curs) + + @skip_if_crdb("named cursor") + @skip_before_postgres(8, 0) + def testDictCursorWithNamedCursorIterRowNumber(self): + curs = self.conn.cursor('tmp', cursor_factory=psycopg2.extras.DictCursor) + self._testIterRowNumber(curs) + + @skip_if_crdb("named cursor") + def _testWithNamedCursor(self, getter): + curs = self.conn.cursor('aname', cursor_factory=psycopg2.extras.DictCursor) + curs.execute("SELECT * FROM ExtrasDictCursorTests") + row = getter(curs) + self.failUnless(row['foo'] == 'bar') + self.failUnless(row[0] == 'bar') + + def testPickleDictRow(self): + curs = self.conn.cursor(cursor_factory=psycopg2.extras.DictCursor) + curs.execute("select 10 as a, 20 as b") + r = curs.fetchone() + d = pickle.dumps(r) + r1 = pickle.loads(d) + self.assertEqual(r, r1) + self.assertEqual(r[0], r1[0]) + self.assertEqual(r[1], r1[1]) + self.assertEqual(r['a'], r1['a']) + self.assertEqual(r['b'], r1['b']) + self.assertEqual(r._index, r1._index) + + def test_copy(self): + curs = self.conn.cursor(cursor_factory=psycopg2.extras.DictCursor) + curs.execute("select 10 as foo, 'hi' as bar") + rv = curs.fetchone() + self.assertEqual(len(rv), 2) + + rv2 = copy.copy(rv) + self.assertEqual(len(rv2), 2) + self.assertEqual(len(rv), 2) + + rv3 = copy.deepcopy(rv) + self.assertEqual(len(rv3), 2) + self.assertEqual(len(rv), 2) + + def test_iter_methods(self): + curs = self.conn.cursor(cursor_factory=psycopg2.extras.DictCursor) + curs.execute("select 10 as a, 20 as b") + r = curs.fetchone() + self.assert_(not isinstance(r.keys(), list)) + self.assertEqual(len(list(r.keys())), 2) + self.assert_(not isinstance(r.values(), list)) + self.assertEqual(len(list(r.values())), 2) + self.assert_(not isinstance(r.items(), list)) + self.assertEqual(len(list(r.items())), 2) + + def test_order(self): + curs = self.conn.cursor(cursor_factory=psycopg2.extras.DictCursor) + curs.execute("select 5 as foo, 4 as bar, 33 as baz, 2 as qux") + r = curs.fetchone() + self.assertEqual(list(r), [5, 4, 33, 2]) + self.assertEqual(list(r.keys()), ['foo', 'bar', 'baz', 'qux']) + self.assertEqual(list(r.values()), [5, 4, 33, 2]) + self.assertEqual(list(r.items()), + [('foo', 5), ('bar', 4), ('baz', 33), ('qux', 2)]) + + r1 = pickle.loads(pickle.dumps(r)) + self.assertEqual(list(r1), list(r)) + self.assertEqual(list(r1.keys()), list(r.keys())) + self.assertEqual(list(r1.values()), list(r.values())) + self.assertEqual(list(r1.items()), list(r.items())) + + +class ExtrasDictCursorRealTests(_DictCursorBase): + def testRealMeansReal(self): + curs = self.conn.cursor(cursor_factory=psycopg2.extras.RealDictCursor) + curs.execute("SELECT * FROM ExtrasDictCursorTests") + row = curs.fetchone() + self.assert_(isinstance(row, dict)) + + def testDictCursorWithPlainCursorRealFetchOne(self): + self._testWithPlainCursorReal(lambda curs: curs.fetchone()) + + def testDictCursorWithPlainCursorRealFetchMany(self): + self._testWithPlainCursorReal(lambda curs: curs.fetchmany(100)[0]) + + def testDictCursorWithPlainCursorRealFetchManyNoarg(self): + self._testWithPlainCursorReal(lambda curs: curs.fetchmany()[0]) + + def testDictCursorWithPlainCursorRealFetchAll(self): + self._testWithPlainCursorReal(lambda curs: curs.fetchall()[0]) + + def testDictCursorWithPlainCursorRealIter(self): + def getter(curs): + for row in curs: + return row + self._testWithPlainCursorReal(getter) + + @skip_before_postgres(8, 0) + def testDictCursorWithPlainCursorRealIterRowNumber(self): + curs = self.conn.cursor(cursor_factory=psycopg2.extras.RealDictCursor) + self._testIterRowNumber(curs) + + def _testWithPlainCursorReal(self, getter): + curs = self.conn.cursor(cursor_factory=psycopg2.extras.RealDictCursor) + curs.execute("SELECT * FROM ExtrasDictCursorTests") + row = getter(curs) + self.failUnless(row['foo'] == 'bar') + + def testPickleRealDictRow(self): + curs = self.conn.cursor(cursor_factory=psycopg2.extras.RealDictCursor) + curs.execute("select 10 as a, 20 as b") + r = curs.fetchone() + d = pickle.dumps(r) + r1 = pickle.loads(d) + self.assertEqual(r, r1) + self.assertEqual(r['a'], r1['a']) + self.assertEqual(r['b'], r1['b']) + + def test_copy(self): + curs = self.conn.cursor(cursor_factory=psycopg2.extras.RealDictCursor) + curs.execute("select 10 as foo, 'hi' as bar") + rv = curs.fetchone() + self.assertEqual(len(rv), 2) + + rv2 = copy.copy(rv) + self.assertEqual(len(rv2), 2) + self.assertEqual(len(rv), 2) + + rv3 = copy.deepcopy(rv) + self.assertEqual(len(rv3), 2) + self.assertEqual(len(rv), 2) + + def testDictCursorRealWithNamedCursorFetchOne(self): + self._testWithNamedCursorReal(lambda curs: curs.fetchone()) + + def testDictCursorRealWithNamedCursorFetchMany(self): + self._testWithNamedCursorReal(lambda curs: curs.fetchmany(100)[0]) + + def testDictCursorRealWithNamedCursorFetchManyNoarg(self): + self._testWithNamedCursorReal(lambda curs: curs.fetchmany()[0]) + + def testDictCursorRealWithNamedCursorFetchAll(self): + self._testWithNamedCursorReal(lambda curs: curs.fetchall()[0]) + + def testDictCursorRealWithNamedCursorIter(self): + def getter(curs): + for row in curs: + return row + self._testWithNamedCursorReal(getter) + + @skip_if_crdb("named cursor") + @skip_before_postgres(8, 2) + def testDictCursorRealWithNamedCursorNotGreedy(self): + curs = self.conn.cursor('tmp', cursor_factory=psycopg2.extras.RealDictCursor) + self._testNamedCursorNotGreedy(curs) + + @skip_if_crdb("named cursor") + @skip_before_postgres(8, 0) + def testDictCursorRealWithNamedCursorIterRowNumber(self): + curs = self.conn.cursor('tmp', cursor_factory=psycopg2.extras.RealDictCursor) + self._testIterRowNumber(curs) + + @skip_if_crdb("named cursor") + def _testWithNamedCursorReal(self, getter): + curs = self.conn.cursor('aname', + cursor_factory=psycopg2.extras.RealDictCursor) + curs.execute("SELECT * FROM ExtrasDictCursorTests") + row = getter(curs) + self.failUnless(row['foo'] == 'bar') + + def test_iter_methods(self): + curs = self.conn.cursor(cursor_factory=psycopg2.extras.RealDictCursor) + curs.execute("select 10 as a, 20 as b") + r = curs.fetchone() + self.assert_(not isinstance(r.keys(), list)) + self.assertEqual(len(list(r.keys())), 2) + self.assert_(not isinstance(r.values(), list)) + self.assertEqual(len(list(r.values())), 2) + self.assert_(not isinstance(r.items(), list)) + self.assertEqual(len(list(r.items())), 2) + + def test_order(self): + curs = self.conn.cursor(cursor_factory=psycopg2.extras.RealDictCursor) + curs.execute("select 5 as foo, 4 as bar, 33 as baz, 2 as qux") + r = curs.fetchone() + self.assertEqual(list(r), ['foo', 'bar', 'baz', 'qux']) + self.assertEqual(list(r.keys()), ['foo', 'bar', 'baz', 'qux']) + self.assertEqual(list(r.values()), [5, 4, 33, 2]) + self.assertEqual(list(r.items()), + [('foo', 5), ('bar', 4), ('baz', 33), ('qux', 2)]) + + r1 = pickle.loads(pickle.dumps(r)) + self.assertEqual(list(r1), list(r)) + self.assertEqual(list(r1.keys()), list(r.keys())) + self.assertEqual(list(r1.values()), list(r.values())) + self.assertEqual(list(r1.items()), list(r.items())) + + def test_pop(self): + curs = self.conn.cursor(cursor_factory=psycopg2.extras.RealDictCursor) + curs.execute("select 1 as a, 2 as b, 3 as c") + r = curs.fetchone() + self.assertEqual(r.pop('b'), 2) + self.assertEqual(list(r), ['a', 'c']) + self.assertEqual(list(r.keys()), ['a', 'c']) + self.assertEqual(list(r.values()), [1, 3]) + self.assertEqual(list(r.items()), [('a', 1), ('c', 3)]) + + self.assertEqual(r.pop('b', None), None) + self.assertRaises(KeyError, r.pop, 'b') + + def test_mod(self): + curs = self.conn.cursor(cursor_factory=psycopg2.extras.RealDictCursor) + curs.execute("select 1 as a, 2 as b, 3 as c") + r = curs.fetchone() + r['d'] = 4 + self.assertEqual(list(r), ['a', 'b', 'c', 'd']) + self.assertEqual(list(r.keys()), ['a', 'b', 'c', 'd']) + self.assertEqual(list(r.values()), [1, 2, 3, 4]) + self.assertEqual(list( + r.items()), [('a', 1), ('b', 2), ('c', 3), ('d', 4)]) + + assert r['a'] == 1 + assert r['b'] == 2 + assert r['c'] == 3 + assert r['d'] == 4 + + +class NamedTupleCursorTest(ConnectingTestCase): + def setUp(self): + ConnectingTestCase.setUp(self) + + self.conn = self.connect(connection_factory=NamedTupleConnection) + curs = self.conn.cursor() + if crdb_version(self.conn) is not None: + curs.execute("SET experimental_enable_temp_tables = 'on'") + curs.execute("CREATE TEMPORARY TABLE nttest (i int, s text)") + curs.execute("INSERT INTO nttest VALUES (1, 'foo')") + curs.execute("INSERT INTO nttest VALUES (2, 'bar')") + curs.execute("INSERT INTO nttest VALUES (3, 'baz')") + self.conn.commit() + + @skip_if_crdb("named cursor") + def test_cursor_args(self): + cur = self.conn.cursor('foo', cursor_factory=psycopg2.extras.DictCursor) + self.assertEqual(cur.name, 'foo') + self.assert_(isinstance(cur, psycopg2.extras.DictCursor)) + + def test_fetchone(self): + curs = self.conn.cursor() + curs.execute("select * from nttest order by 1") + t = curs.fetchone() + self.assertEqual(t[0], 1) + self.assertEqual(t.i, 1) + self.assertEqual(t[1], 'foo') + self.assertEqual(t.s, 'foo') + self.assertEqual(curs.rownumber, 1) + self.assertEqual(curs.rowcount, 3) + + def test_fetchmany_noarg(self): + curs = self.conn.cursor() + curs.arraysize = 2 + curs.execute("select * from nttest order by 1") + res = curs.fetchmany() + self.assertEqual(2, len(res)) + self.assertEqual(res[0].i, 1) + self.assertEqual(res[0].s, 'foo') + self.assertEqual(res[1].i, 2) + self.assertEqual(res[1].s, 'bar') + self.assertEqual(curs.rownumber, 2) + self.assertEqual(curs.rowcount, 3) + + def test_fetchmany(self): + curs = self.conn.cursor() + curs.execute("select * from nttest order by 1") + res = curs.fetchmany(2) + self.assertEqual(2, len(res)) + self.assertEqual(res[0].i, 1) + self.assertEqual(res[0].s, 'foo') + self.assertEqual(res[1].i, 2) + self.assertEqual(res[1].s, 'bar') + self.assertEqual(curs.rownumber, 2) + self.assertEqual(curs.rowcount, 3) + + def test_fetchall(self): + curs = self.conn.cursor() + curs.execute("select * from nttest order by 1") + res = curs.fetchall() + self.assertEqual(3, len(res)) + self.assertEqual(res[0].i, 1) + self.assertEqual(res[0].s, 'foo') + self.assertEqual(res[1].i, 2) + self.assertEqual(res[1].s, 'bar') + self.assertEqual(res[2].i, 3) + self.assertEqual(res[2].s, 'baz') + self.assertEqual(curs.rownumber, 3) + self.assertEqual(curs.rowcount, 3) + + def test_executemany(self): + curs = self.conn.cursor() + curs.executemany("delete from nttest where i = %s", + [(1,), (2,)]) + curs.execute("select * from nttest order by 1") + res = curs.fetchall() + self.assertEqual(1, len(res)) + self.assertEqual(res[0].i, 3) + self.assertEqual(res[0].s, 'baz') + + def test_iter(self): + curs = self.conn.cursor() + curs.execute("select * from nttest order by 1") + i = iter(curs) + self.assertEqual(curs.rownumber, 0) + + t = next(i) + self.assertEqual(t.i, 1) + self.assertEqual(t.s, 'foo') + self.assertEqual(curs.rownumber, 1) + self.assertEqual(curs.rowcount, 3) + + t = next(i) + self.assertEqual(t.i, 2) + self.assertEqual(t.s, 'bar') + self.assertEqual(curs.rownumber, 2) + self.assertEqual(curs.rowcount, 3) + + t = next(i) + self.assertEqual(t.i, 3) + self.assertEqual(t.s, 'baz') + self.assertRaises(StopIteration, next, i) + self.assertEqual(curs.rownumber, 3) + self.assertEqual(curs.rowcount, 3) + + def test_record_updated(self): + curs = self.conn.cursor() + curs.execute("select 1 as foo;") + r = curs.fetchone() + self.assertEqual(r.foo, 1) + + curs.execute("select 2 as bar;") + r = curs.fetchone() + self.assertEqual(r.bar, 2) + self.assertRaises(AttributeError, getattr, r, 'foo') + + def test_no_result_no_surprise(self): + curs = self.conn.cursor() + curs.execute("update nttest set s = s") + self.assertRaises(psycopg2.ProgrammingError, curs.fetchone) + + curs.execute("update nttest set s = s") + self.assertRaises(psycopg2.ProgrammingError, curs.fetchall) + + def test_bad_col_names(self): + curs = self.conn.cursor() + curs.execute('select 1 as "foo.bar_baz", 2 as "?column?", 3 as "3"') + rv = curs.fetchone() + self.assertEqual(rv.foo_bar_baz, 1) + self.assertEqual(rv.f_column_, 2) + self.assertEqual(rv.f3, 3) + + @skip_before_postgres(8) + def test_nonascii_name(self): + curs = self.conn.cursor() + curs.execute('select 1 as \xe5h\xe9') + rv = curs.fetchone() + self.assertEqual(getattr(rv, '\xe5h\xe9'), 1) + + def test_minimal_generation(self): + # Instrument the class to verify it gets called the minimum number of times. + f_orig = NamedTupleCursor._make_nt + calls = [0] + + def f_patched(self_): + calls[0] += 1 + return f_orig(self_) + + NamedTupleCursor._make_nt = f_patched + + try: + curs = self.conn.cursor() + curs.execute("select * from nttest order by 1") + curs.fetchone() + curs.fetchone() + curs.fetchone() + self.assertEqual(1, calls[0]) + + curs.execute("select * from nttest order by 1") + curs.fetchone() + curs.fetchall() + self.assertEqual(2, calls[0]) + + curs.execute("select * from nttest order by 1") + curs.fetchone() + curs.fetchmany(1) + self.assertEqual(3, calls[0]) + + finally: + NamedTupleCursor._make_nt = f_orig + + @skip_if_crdb("named cursor") + @skip_before_postgres(8, 0) + def test_named(self): + curs = self.conn.cursor('tmp') + curs.execute("""select i from generate_series(0,9) i""") + recs = [] + recs.extend(curs.fetchmany(5)) + recs.append(curs.fetchone()) + recs.extend(curs.fetchall()) + self.assertEqual(list(range(10)), [t.i for t in recs]) + + @skip_if_crdb("named cursor") + def test_named_fetchone(self): + curs = self.conn.cursor('tmp') + curs.execute("""select 42 as i""") + t = curs.fetchone() + self.assertEqual(t.i, 42) + + @skip_if_crdb("named cursor") + def test_named_fetchmany(self): + curs = self.conn.cursor('tmp') + curs.execute("""select 42 as i""") + recs = curs.fetchmany(10) + self.assertEqual(recs[0].i, 42) + + @skip_if_crdb("named cursor") + def test_named_fetchall(self): + curs = self.conn.cursor('tmp') + curs.execute("""select 42 as i""") + recs = curs.fetchall() + self.assertEqual(recs[0].i, 42) + + @skip_if_crdb("named cursor") + @skip_before_postgres(8, 2) + def test_not_greedy(self): + curs = self.conn.cursor('tmp') + curs.itersize = 2 + curs.execute("""select clock_timestamp() as ts from generate_series(1,3)""") + recs = [] + for t in curs: + time.sleep(0.01) + recs.append(t) + + # check that the dataset was not fetched in a single gulp + self.assert_(recs[1].ts - recs[0].ts < timedelta(seconds=0.005)) + self.assert_(recs[2].ts - recs[1].ts > timedelta(seconds=0.0099)) + + @skip_if_crdb("named cursor") + @skip_before_postgres(8, 0) + def test_named_rownumber(self): + curs = self.conn.cursor('tmp') + # Only checking for dataset < itersize: + # see CursorTests.test_iter_named_cursor_rownumber + curs.itersize = 4 + curs.execute("""select * from generate_series(1,3)""") + for i, t in enumerate(curs): + self.assertEqual(i + 1, curs.rownumber) + + def test_cache(self): + NamedTupleCursor._cached_make_nt.cache_clear() + + curs = self.conn.cursor() + curs.execute("select 10 as a, 20 as b") + r1 = curs.fetchone() + curs.execute("select 10 as a, 20 as c") + r2 = curs.fetchone() + + # Get a new cursor to check that the cache works across multiple ones + curs = self.conn.cursor() + curs.execute("select 10 as a, 30 as b") + r3 = curs.fetchone() + + self.assert_(type(r1) is type(r3)) + self.assert_(type(r1) is not type(r2)) + + cache_info = NamedTupleCursor._cached_make_nt.cache_info() + self.assertEqual(cache_info.hits, 1) + self.assertEqual(cache_info.misses, 2) + self.assertEqual(cache_info.currsize, 2) + + def test_max_cache(self): + old_func = NamedTupleCursor._cached_make_nt + NamedTupleCursor._cached_make_nt = \ + lru_cache(8)(NamedTupleCursor._cached_make_nt.__wrapped__) + try: + recs = [] + curs = self.conn.cursor() + for i in range(10): + curs.execute(f"select 1 as f{i}") + recs.append(curs.fetchone()) + + # Still in cache + curs.execute("select 1 as f9") + rec = curs.fetchone() + self.assert_(any(type(r) is type(rec) for r in recs)) + + # Gone from cache + curs.execute("select 1 as f0") + rec = curs.fetchone() + self.assert_(all(type(r) is not type(rec) for r in recs)) + + finally: + NamedTupleCursor._cached_make_nt = old_func + + +def test_suite(): + return unittest.TestLoader().loadTestsFromName(__name__) + + +if __name__ == "__main__": + unittest.main() diff --git a/tests/test_fast_executemany.py b/tests/test_fast_executemany.py new file mode 100755 index 0000000000000000000000000000000000000000..289af5ee7684efa9205971957ac5b3506b814c86 --- /dev/null +++ b/tests/test_fast_executemany.py @@ -0,0 +1,269 @@ +#!/usr/bin/env python +# +# test_fast_executemany.py - tests for fast executemany implementations +# +# Copyright (C) 2017-2019 Daniele Varrazzo +# Copyright (C) 2020-2021 The Psycopg Team +# +# psycopg2 is free software: you can redistribute it and/or modify it +# under the terms of the GNU Lesser General Public License as published +# by the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# psycopg2 is distributed in the hope that it will be useful, but WITHOUT +# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or +# FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public +# License for more details. + +from datetime import date + +from . import testutils +import unittest + +import psycopg2 +import psycopg2.extras +import psycopg2.extensions as ext +from psycopg2 import sql + + +class TestPaginate(unittest.TestCase): + def test_paginate(self): + def pag(seq): + return psycopg2.extras._paginate(seq, 100) + + self.assertEqual(list(pag([])), []) + self.assertEqual(list(pag([1])), [[1]]) + self.assertEqual(list(pag(range(99))), [list(range(99))]) + self.assertEqual(list(pag(range(100))), [list(range(100))]) + self.assertEqual(list(pag(range(101))), [list(range(100)), [100]]) + self.assertEqual( + list(pag(range(200))), [list(range(100)), list(range(100, 200))]) + self.assertEqual( + list(pag(range(1000))), + [list(range(i * 100, (i + 1) * 100)) for i in range(10)]) + + +class FastExecuteTestMixin: + def setUp(self): + super().setUp() + cur = self.conn.cursor() + cur.execute("""create table testfast ( + id serial primary key, date date, val int, data text)""") + + +class TestExecuteBatch(FastExecuteTestMixin, testutils.ConnectingTestCase): + def test_empty(self): + cur = self.conn.cursor() + psycopg2.extras.execute_batch(cur, + "insert into testfast (id, val) values (%s, %s)", + []) + cur.execute("select * from testfast order by id") + self.assertEqual(cur.fetchall(), []) + + def test_one(self): + cur = self.conn.cursor() + psycopg2.extras.execute_batch(cur, + "insert into testfast (id, val) values (%s, %s)", + iter([(1, 10)])) + cur.execute("select id, val from testfast order by id") + self.assertEqual(cur.fetchall(), [(1, 10)]) + + def test_tuples(self): + cur = self.conn.cursor() + psycopg2.extras.execute_batch(cur, + "insert into testfast (id, date, val) values (%s, %s, %s)", + ((i, date(2017, 1, i + 1), i * 10) for i in range(10))) + cur.execute("select id, date, val from testfast order by id") + self.assertEqual(cur.fetchall(), + [(i, date(2017, 1, i + 1), i * 10) for i in range(10)]) + + def test_many(self): + cur = self.conn.cursor() + psycopg2.extras.execute_batch(cur, + "insert into testfast (id, val) values (%s, %s)", + ((i, i * 10) for i in range(1000))) + cur.execute("select id, val from testfast order by id") + self.assertEqual(cur.fetchall(), [(i, i * 10) for i in range(1000)]) + + def test_composed(self): + cur = self.conn.cursor() + psycopg2.extras.execute_batch(cur, + sql.SQL("insert into {0} (id, val) values (%s, %s)") + .format(sql.Identifier('testfast')), + ((i, i * 10) for i in range(1000))) + cur.execute("select id, val from testfast order by id") + self.assertEqual(cur.fetchall(), [(i, i * 10) for i in range(1000)]) + + def test_pages(self): + cur = self.conn.cursor() + psycopg2.extras.execute_batch(cur, + "insert into testfast (id, val) values (%s, %s)", + ((i, i * 10) for i in range(25)), + page_size=10) + + # last command was 5 statements + self.assertEqual(sum(c == ';' for c in cur.query.decode('ascii')), 4) + + cur.execute("select id, val from testfast order by id") + self.assertEqual(cur.fetchall(), [(i, i * 10) for i in range(25)]) + + @testutils.skip_before_postgres(8, 0) + def test_unicode(self): + cur = self.conn.cursor() + ext.register_type(ext.UNICODE, cur) + snowman = "\u2603" + + # unicode in statement + psycopg2.extras.execute_batch(cur, + "insert into testfast (id, data) values (%%s, %%s) -- %s" % snowman, + [(1, 'x')]) + cur.execute("select id, data from testfast where id = 1") + self.assertEqual(cur.fetchone(), (1, 'x')) + + # unicode in data + psycopg2.extras.execute_batch(cur, + "insert into testfast (id, data) values (%s, %s)", + [(2, snowman)]) + cur.execute("select id, data from testfast where id = 2") + self.assertEqual(cur.fetchone(), (2, snowman)) + + # unicode in both + psycopg2.extras.execute_batch(cur, + "insert into testfast (id, data) values (%%s, %%s) -- %s" % snowman, + [(3, snowman)]) + cur.execute("select id, data from testfast where id = 3") + self.assertEqual(cur.fetchone(), (3, snowman)) + + +@testutils.skip_before_postgres(8, 2) +class TestExecuteValues(FastExecuteTestMixin, testutils.ConnectingTestCase): + def test_empty(self): + cur = self.conn.cursor() + psycopg2.extras.execute_values(cur, + "insert into testfast (id, val) values %s", + []) + cur.execute("select * from testfast order by id") + self.assertEqual(cur.fetchall(), []) + + def test_one(self): + cur = self.conn.cursor() + psycopg2.extras.execute_values(cur, + "insert into testfast (id, val) values %s", + iter([(1, 10)])) + cur.execute("select id, val from testfast order by id") + self.assertEqual(cur.fetchall(), [(1, 10)]) + + def test_tuples(self): + cur = self.conn.cursor() + psycopg2.extras.execute_values(cur, + "insert into testfast (id, date, val) values %s", + ((i, date(2017, 1, i + 1), i * 10) for i in range(10))) + cur.execute("select id, date, val from testfast order by id") + self.assertEqual(cur.fetchall(), + [(i, date(2017, 1, i + 1), i * 10) for i in range(10)]) + + def test_dicts(self): + cur = self.conn.cursor() + psycopg2.extras.execute_values(cur, + "insert into testfast (id, date, val) values %s", + (dict(id=i, date=date(2017, 1, i + 1), val=i * 10, foo="bar") + for i in range(10)), + template='(%(id)s, %(date)s, %(val)s)') + cur.execute("select id, date, val from testfast order by id") + self.assertEqual(cur.fetchall(), + [(i, date(2017, 1, i + 1), i * 10) for i in range(10)]) + + def test_many(self): + cur = self.conn.cursor() + psycopg2.extras.execute_values(cur, + "insert into testfast (id, val) values %s", + ((i, i * 10) for i in range(1000))) + cur.execute("select id, val from testfast order by id") + self.assertEqual(cur.fetchall(), [(i, i * 10) for i in range(1000)]) + + def test_composed(self): + cur = self.conn.cursor() + psycopg2.extras.execute_values(cur, + sql.SQL("insert into {0} (id, val) values %s") + .format(sql.Identifier('testfast')), + ((i, i * 10) for i in range(1000))) + cur.execute("select id, val from testfast order by id") + self.assertEqual(cur.fetchall(), [(i, i * 10) for i in range(1000)]) + + def test_pages(self): + cur = self.conn.cursor() + psycopg2.extras.execute_values(cur, + "insert into testfast (id, val) values %s", + ((i, i * 10) for i in range(25)), + page_size=10) + + # last statement was 5 tuples (one parens is for the fields list) + self.assertEqual(sum(c == '(' for c in cur.query.decode('ascii')), 6) + + cur.execute("select id, val from testfast order by id") + self.assertEqual(cur.fetchall(), [(i, i * 10) for i in range(25)]) + + def test_unicode(self): + cur = self.conn.cursor() + ext.register_type(ext.UNICODE, cur) + snowman = "\u2603" + + # unicode in statement + psycopg2.extras.execute_values(cur, + "insert into testfast (id, data) values %%s -- %s" % snowman, + [(1, 'x')]) + cur.execute("select id, data from testfast where id = 1") + self.assertEqual(cur.fetchone(), (1, 'x')) + + # unicode in data + psycopg2.extras.execute_values(cur, + "insert into testfast (id, data) values %s", + [(2, snowman)]) + cur.execute("select id, data from testfast where id = 2") + self.assertEqual(cur.fetchone(), (2, snowman)) + + # unicode in both + psycopg2.extras.execute_values(cur, + "insert into testfast (id, data) values %%s -- %s" % snowman, + [(3, snowman)]) + cur.execute("select id, data from testfast where id = 3") + self.assertEqual(cur.fetchone(), (3, snowman)) + + def test_returning(self): + cur = self.conn.cursor() + result = psycopg2.extras.execute_values(cur, + "insert into testfast (id, val) values %s returning id", + ((i, i * 10) for i in range(25)), + page_size=10, fetch=True) + # result contains all returned pages + self.assertEqual([r[0] for r in result], list(range(25))) + + def test_invalid_sql(self): + cur = self.conn.cursor() + self.assertRaises(ValueError, psycopg2.extras.execute_values, cur, + "insert", []) + self.assertRaises(ValueError, psycopg2.extras.execute_values, cur, + "insert %s and %s", []) + self.assertRaises(ValueError, psycopg2.extras.execute_values, cur, + "insert %f", []) + self.assertRaises(ValueError, psycopg2.extras.execute_values, cur, + "insert %f %s", []) + + def test_percent_escape(self): + cur = self.conn.cursor() + psycopg2.extras.execute_values(cur, + "insert into testfast (id, data) values %s -- a%%b", + [(1, 'hi')]) + self.assert_(b'a%%b' not in cur.query) + self.assert_(b'a%b' in cur.query) + + cur.execute("select id, data from testfast") + self.assertEqual(cur.fetchall(), [(1, 'hi')]) + + +def test_suite(): + return unittest.TestLoader().loadTestsFromName(__name__) + + +if __name__ == "__main__": + unittest.main() diff --git a/tests/test_green.py b/tests/test_green.py new file mode 100755 index 0000000000000000000000000000000000000000..e3a494673d3fbd5407c5f3c169101734346f4794 --- /dev/null +++ b/tests/test_green.py @@ -0,0 +1,246 @@ +#!/usr/bin/env python + +# test_green.py - unit test for async wait callback +# +# Copyright (C) 2010-2019 Daniele Varrazzo +# Copyright (C) 2020-2021 The Psycopg Team +# +# psycopg2 is free software: you can redistribute it and/or modify it +# under the terms of the GNU Lesser General Public License as published +# by the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# In addition, as a special exception, the copyright holders give +# permission to link this program with the OpenSSL library (or with +# modified versions of OpenSSL that use the same license as OpenSSL), +# and distribute linked combinations including the two. +# +# You must obey the GNU Lesser General Public License in all respects for +# all of the code used other than OpenSSL. +# +# psycopg2 is distributed in the hope that it will be useful, but WITHOUT +# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or +# FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public +# License for more details. + +import select +import unittest +import warnings + +import psycopg2 +import psycopg2.extensions +import psycopg2.extras +from psycopg2.extensions import POLL_OK, POLL_READ, POLL_WRITE + +from .testutils import ConnectingTestCase, skip_before_postgres, slow +from .testutils import skip_if_crdb + + +class ConnectionStub: + """A `connection` wrapper allowing analysis of the `poll()` calls.""" + def __init__(self, conn): + self.conn = conn + self.polls = [] + + def fileno(self): + return self.conn.fileno() + + def poll(self): + rv = self.conn.poll() + self.polls.append(rv) + return rv + + +class GreenTestCase(ConnectingTestCase): + def setUp(self): + self._cb = psycopg2.extensions.get_wait_callback() + psycopg2.extensions.set_wait_callback(psycopg2.extras.wait_select) + ConnectingTestCase.setUp(self) + + def tearDown(self): + ConnectingTestCase.tearDown(self) + psycopg2.extensions.set_wait_callback(self._cb) + + def set_stub_wait_callback(self, conn, cb=None): + stub = ConnectionStub(conn) + psycopg2.extensions.set_wait_callback( + lambda conn: (cb or psycopg2.extras.wait_select)(stub)) + return stub + + @slow + @skip_if_crdb("flush on write flakey") + def test_flush_on_write(self): + # a very large query requires a flush loop to be sent to the backend + conn = self.conn + stub = self.set_stub_wait_callback(conn) + curs = conn.cursor() + for mb in 1, 5, 10, 20, 50: + size = mb * 1024 * 1024 + del stub.polls[:] + curs.execute("select %s;", ('x' * size,)) + self.assertEqual(size, len(curs.fetchone()[0])) + if stub.polls.count(psycopg2.extensions.POLL_WRITE) > 1: + return + + # This is more a testing glitch than an error: it happens + # on high load on linux: probably because the kernel has more + # buffers ready. A warning may be useful during development, + # but an error is bad during regression testing. + warnings.warn("sending a large query didn't trigger block on write.") + + def test_error_in_callback(self): + # behaviour changed after issue #113: if there is an error in the + # callback for the moment we don't have a way to reset the connection + # without blocking (ticket #113) so just close it. + conn = self.conn + curs = conn.cursor() + curs.execute("select 1") # have a BEGIN + curs.fetchone() + + # now try to do something that will fail in the callback + psycopg2.extensions.set_wait_callback(lambda conn: 1 // 0) + self.assertRaises(ZeroDivisionError, curs.execute, "select 2") + + self.assert_(conn.closed) + + def test_dont_freak_out(self): + # if there is an error in a green query, don't freak out and close + # the connection + conn = self.conn + curs = conn.cursor() + self.assertRaises(psycopg2.ProgrammingError, + curs.execute, "select the unselectable") + + # check that the connection is left in an usable state + self.assert_(not conn.closed) + conn.rollback() + curs.execute("select 1") + self.assertEqual(curs.fetchone()[0], 1) + + @skip_before_postgres(8, 2) + def test_copy_no_hang(self): + cur = self.conn.cursor() + self.assertRaises(psycopg2.ProgrammingError, + cur.execute, "copy (select 1) to stdout") + + @slow + @skip_if_crdb("notice") + @skip_before_postgres(9, 0) + def test_non_block_after_notice(self): + def wait(conn): + while 1: + state = conn.poll() + if state == POLL_OK: + break + elif state == POLL_READ: + select.select([conn.fileno()], [], [], 0.1) + elif state == POLL_WRITE: + select.select([], [conn.fileno()], [], 0.1) + else: + raise conn.OperationalError(f"bad state from poll: {state}") + + stub = self.set_stub_wait_callback(self.conn, wait) + cur = self.conn.cursor() + cur.execute(""" + select 1; + do $$ + begin + raise notice 'hello'; + end + $$ language plpgsql; + select pg_sleep(1); + """) + + polls = stub.polls.count(POLL_READ) + self.assert_(polls > 8, polls) + + +class CallbackErrorTestCase(ConnectingTestCase): + def setUp(self): + self._cb = psycopg2.extensions.get_wait_callback() + psycopg2.extensions.set_wait_callback(self.crappy_callback) + ConnectingTestCase.setUp(self) + self.to_error = None + + def tearDown(self): + ConnectingTestCase.tearDown(self) + psycopg2.extensions.set_wait_callback(self._cb) + + def crappy_callback(self, conn): + """green callback failing after `self.to_error` time it is called""" + while True: + if self.to_error is not None: + self.to_error -= 1 + if self.to_error <= 0: + raise ZeroDivisionError("I accidentally the connection") + try: + state = conn.poll() + if state == POLL_OK: + break + elif state == POLL_READ: + select.select([conn.fileno()], [], []) + elif state == POLL_WRITE: + select.select([], [conn.fileno()], []) + else: + raise conn.OperationalError(f"bad state from poll: {state}") + except KeyboardInterrupt: + conn.cancel() + # the loop will be broken by a server error + continue + + def test_errors_on_connection(self): + # Test error propagation in the different stages of the connection + for i in range(100): + self.to_error = i + try: + self.connect() + except ZeroDivisionError: + pass + else: + # We managed to connect + return + + self.fail("you should have had a success or an error by now") + + def test_errors_on_query(self): + for i in range(100): + self.to_error = None + cnn = self.connect() + cur = cnn.cursor() + self.to_error = i + try: + cur.execute("select 1") + cur.fetchone() + except ZeroDivisionError: + pass + else: + # The query completed + return + + self.fail("you should have had a success or an error by now") + + @skip_if_crdb("named cursor") + def test_errors_named_cursor(self): + for i in range(100): + self.to_error = None + cnn = self.connect() + cur = cnn.cursor('foo') + self.to_error = i + try: + cur.execute("select 1") + cur.fetchone() + except ZeroDivisionError: + pass + else: + # The query completed + return + + self.fail("you should have had a success or an error by now") + + +def test_suite(): + return unittest.TestLoader().loadTestsFromName(__name__) + + +if __name__ == "__main__": + unittest.main() diff --git a/tests/test_ipaddress.py b/tests/test_ipaddress.py new file mode 100755 index 0000000000000000000000000000000000000000..4a2339ef93acddb962933e334ca01d2d554bb828 --- /dev/null +++ b/tests/test_ipaddress.py @@ -0,0 +1,120 @@ +#!/usr/bin/env python +# +# test_ipaddress.py - tests for ipaddress support +# +# Copyright (C) 2016-2019 Daniele Varrazzo +# Copyright (C) 2020-2021 The Psycopg Team +# +# psycopg2 is free software: you can redistribute it and/or modify it +# under the terms of the GNU Lesser General Public License as published +# by the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# psycopg2 is distributed in the hope that it will be useful, but WITHOUT +# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or +# FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public +# License for more details. + + +from . import testutils +import unittest + +import psycopg2 +import psycopg2.extras + +try: + import ipaddress as ip +except ImportError: + # Python 2 + ip = None + + +@unittest.skipIf(ip is None, "'ipaddress' module not available") +class NetworkingTestCase(testutils.ConnectingTestCase): + def test_inet_cast(self): + cur = self.conn.cursor() + psycopg2.extras.register_ipaddress(cur) + + cur.execute("select null::inet") + self.assert_(cur.fetchone()[0] is None) + + cur.execute("select '127.0.0.1/24'::inet") + obj = cur.fetchone()[0] + self.assert_(isinstance(obj, ip.IPv4Interface), repr(obj)) + self.assertEquals(obj, ip.ip_interface('127.0.0.1/24')) + + cur.execute("select '::ffff:102:300/128'::inet") + obj = cur.fetchone()[0] + self.assert_(isinstance(obj, ip.IPv6Interface), repr(obj)) + self.assertEquals(obj, ip.ip_interface('::ffff:102:300/128')) + + @testutils.skip_before_postgres(8, 2) + def test_inet_array_cast(self): + cur = self.conn.cursor() + psycopg2.extras.register_ipaddress(cur) + cur.execute("select '{NULL,127.0.0.1,::ffff:102:300/128}'::inet[]") + l = cur.fetchone()[0] + self.assert_(l[0] is None) + self.assertEquals(l[1], ip.ip_interface('127.0.0.1')) + self.assertEquals(l[2], ip.ip_interface('::ffff:102:300/128')) + self.assert_(isinstance(l[1], ip.IPv4Interface), l) + self.assert_(isinstance(l[2], ip.IPv6Interface), l) + + def test_inet_adapt(self): + cur = self.conn.cursor() + psycopg2.extras.register_ipaddress(cur) + + cur.execute("select %s", [ip.ip_interface('127.0.0.1/24')]) + self.assertEquals(cur.fetchone()[0], '127.0.0.1/24') + + cur.execute("select %s", [ip.ip_interface('::ffff:102:300/128')]) + self.assertEquals(cur.fetchone()[0], '::ffff:102:300/128') + + @testutils.skip_if_crdb("cidr") + def test_cidr_cast(self): + cur = self.conn.cursor() + psycopg2.extras.register_ipaddress(cur) + + cur.execute("select null::cidr") + self.assert_(cur.fetchone()[0] is None) + + cur.execute("select '127.0.0.0/24'::cidr") + obj = cur.fetchone()[0] + self.assert_(isinstance(obj, ip.IPv4Network), repr(obj)) + self.assertEquals(obj, ip.ip_network('127.0.0.0/24')) + + cur.execute("select '::ffff:102:300/128'::cidr") + obj = cur.fetchone()[0] + self.assert_(isinstance(obj, ip.IPv6Network), repr(obj)) + self.assertEquals(obj, ip.ip_network('::ffff:102:300/128')) + + @testutils.skip_if_crdb("cidr") + @testutils.skip_before_postgres(8, 2) + def test_cidr_array_cast(self): + cur = self.conn.cursor() + psycopg2.extras.register_ipaddress(cur) + cur.execute("select '{NULL,127.0.0.1,::ffff:102:300/128}'::cidr[]") + l = cur.fetchone()[0] + self.assert_(l[0] is None) + self.assertEquals(l[1], ip.ip_network('127.0.0.1')) + self.assertEquals(l[2], ip.ip_network('::ffff:102:300/128')) + self.assert_(isinstance(l[1], ip.IPv4Network), l) + self.assert_(isinstance(l[2], ip.IPv6Network), l) + + def test_cidr_adapt(self): + cur = self.conn.cursor() + psycopg2.extras.register_ipaddress(cur) + + cur.execute("select %s", [ip.ip_network('127.0.0.0/24')]) + self.assertEquals(cur.fetchone()[0], '127.0.0.0/24') + + cur.execute("select %s", [ip.ip_network('::ffff:102:300/128')]) + self.assertEquals(cur.fetchone()[0], '::ffff:102:300/128') + + +def test_suite(): + return unittest.TestLoader().loadTestsFromName(__name__) + + +if __name__ == "__main__": + unittest.main() diff --git a/tests/test_lobject.py b/tests/test_lobject.py new file mode 100755 index 0000000000000000000000000000000000000000..2ea5c912a8cf176c440ba81402db573f1edc6bc7 --- /dev/null +++ b/tests/test_lobject.py @@ -0,0 +1,530 @@ +#!/usr/bin/env python + +# test_lobject.py - unit test for large objects support +# +# Copyright (C) 2008-2019 James Henstridge +# Copyright (C) 2020-2021 The Psycopg Team +# +# psycopg2 is free software: you can redistribute it and/or modify it +# under the terms of the GNU Lesser General Public License as published +# by the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# In addition, as a special exception, the copyright holders give +# permission to link this program with the OpenSSL library (or with +# modified versions of OpenSSL that use the same license as OpenSSL), +# and distribute linked combinations including the two. +# +# You must obey the GNU Lesser General Public License in all respects for +# all of the code used other than OpenSSL. +# +# psycopg2 is distributed in the hope that it will be useful, but WITHOUT +# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or +# FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public +# License for more details. + +import os +import shutil +import tempfile +from functools import wraps + +import psycopg2 +import psycopg2.extensions +import unittest +from .testutils import (decorate_all_tests, skip_if_tpc_disabled, + skip_before_postgres, ConnectingTestCase, skip_if_green, skip_if_crdb, slow) + + +def skip_if_no_lo(f): + f = skip_before_postgres(8, 1, "large objects only supported from PG 8.1")(f) + f = skip_if_green("libpq doesn't support LO in async mode")(f) + f = skip_if_crdb("large objects")(f) + return f + + +class LargeObjectTestCase(ConnectingTestCase): + def setUp(self): + ConnectingTestCase.setUp(self) + self.lo_oid = None + self.tmpdir = None + + def tearDown(self): + if self.tmpdir: + shutil.rmtree(self.tmpdir, ignore_errors=True) + + if self.conn.closed: + return + + if self.lo_oid is not None: + self.conn.rollback() + try: + lo = self.conn.lobject(self.lo_oid, "n") + except psycopg2.OperationalError: + pass + else: + lo.unlink() + + ConnectingTestCase.tearDown(self) + + +@skip_if_no_lo +class LargeObjectTests(LargeObjectTestCase): + def test_create(self): + lo = self.conn.lobject() + self.assertNotEqual(lo, None) + self.assertEqual(lo.mode[0], "w") + + def test_connection_needed(self): + self.assertRaises(TypeError, + psycopg2.extensions.lobject, []) + + def test_open_non_existent(self): + # By creating then removing a large object, we get an Oid that + # should be unused. + lo = self.conn.lobject() + lo.unlink() + self.assertRaises(psycopg2.OperationalError, self.conn.lobject, lo.oid) + + def test_open_existing(self): + lo = self.conn.lobject() + lo2 = self.conn.lobject(lo.oid) + self.assertNotEqual(lo2, None) + self.assertEqual(lo2.oid, lo.oid) + self.assertEqual(lo2.mode[0], "r") + + def test_open_for_write(self): + lo = self.conn.lobject() + lo2 = self.conn.lobject(lo.oid, "w") + self.assertEqual(lo2.mode[0], "w") + lo2.write(b"some data") + + def test_open_mode_n(self): + # Openning an object in mode "n" gives us a closed lobject. + lo = self.conn.lobject() + lo.close() + + lo2 = self.conn.lobject(lo.oid, "n") + self.assertEqual(lo2.oid, lo.oid) + self.assertEqual(lo2.closed, True) + + def test_mode_defaults(self): + lo = self.conn.lobject() + lo2 = self.conn.lobject(mode=None) + lo3 = self.conn.lobject(mode="") + self.assertEqual(lo.mode, lo2.mode) + self.assertEqual(lo.mode, lo3.mode) + + def test_close_connection_gone(self): + lo = self.conn.lobject() + self.conn.close() + lo.close() + + def test_create_with_oid(self): + # Create and delete a large object to get an unused Oid. + lo = self.conn.lobject() + oid = lo.oid + lo.unlink() + + lo = self.conn.lobject(0, "w", oid) + self.assertEqual(lo.oid, oid) + + def test_create_with_existing_oid(self): + lo = self.conn.lobject() + lo.close() + + self.assertRaises(psycopg2.OperationalError, + self.conn.lobject, 0, "w", lo.oid) + self.assert_(not self.conn.closed) + + def test_import(self): + self.tmpdir = tempfile.mkdtemp() + filename = os.path.join(self.tmpdir, "data.txt") + fp = open(filename, "wb") + fp.write(b"some data") + fp.close() + + lo = self.conn.lobject(0, "r", 0, filename) + self.assertEqual(lo.read(), "some data") + + def test_close(self): + lo = self.conn.lobject() + self.assertEqual(lo.closed, False) + lo.close() + self.assertEqual(lo.closed, True) + + def test_write(self): + lo = self.conn.lobject() + self.assertEqual(lo.write(b"some data"), len("some data")) + + def test_write_large(self): + lo = self.conn.lobject() + data = "data" * 1000000 + self.assertEqual(lo.write(data), len(data)) + + def test_read(self): + lo = self.conn.lobject() + lo.write(b"some data") + lo.close() + + lo = self.conn.lobject(lo.oid) + x = lo.read(4) + self.assertEqual(type(x), type('')) + self.assertEqual(x, "some") + self.assertEqual(lo.read(), " data") + + def test_read_binary(self): + lo = self.conn.lobject() + lo.write(b"some data") + lo.close() + + lo = self.conn.lobject(lo.oid, "rb") + x = lo.read(4) + self.assertEqual(type(x), type(b'')) + self.assertEqual(x, b"some") + self.assertEqual(lo.read(), b" data") + + def test_read_text(self): + lo = self.conn.lobject() + snowman = "\u2603" + lo.write("some data " + snowman) + lo.close() + + lo = self.conn.lobject(lo.oid, "rt") + x = lo.read(4) + self.assertEqual(type(x), type('')) + self.assertEqual(x, "some") + self.assertEqual(lo.read(), " data " + snowman) + + @slow + def test_read_large(self): + lo = self.conn.lobject() + data = "data" * 1000000 + lo.write("some" + data) + lo.close() + + lo = self.conn.lobject(lo.oid) + self.assertEqual(lo.read(4), "some") + data1 = lo.read() + # avoid dumping megacraps in the console in case of error + self.assert_(data == data1, + f"{data[:100]!r}... != {data1[:100]!r}...") + + def test_seek_tell(self): + lo = self.conn.lobject() + length = lo.write(b"some data") + self.assertEqual(lo.tell(), length) + lo.close() + lo = self.conn.lobject(lo.oid) + + self.assertEqual(lo.seek(5, 0), 5) + self.assertEqual(lo.tell(), 5) + self.assertEqual(lo.read(), "data") + + # SEEK_CUR: relative current location + lo.seek(5) + self.assertEqual(lo.seek(2, 1), 7) + self.assertEqual(lo.tell(), 7) + self.assertEqual(lo.read(), "ta") + + # SEEK_END: relative to end of file + self.assertEqual(lo.seek(-2, 2), length - 2) + self.assertEqual(lo.read(), "ta") + + def test_unlink(self): + lo = self.conn.lobject() + lo.unlink() + + # the object doesn't exist now, so we can't reopen it. + self.assertRaises(psycopg2.OperationalError, self.conn.lobject, lo.oid) + # And the object has been closed. + self.assertEquals(lo.closed, True) + + def test_export(self): + lo = self.conn.lobject() + lo.write(b"some data") + + self.tmpdir = tempfile.mkdtemp() + filename = os.path.join(self.tmpdir, "data.txt") + lo.export(filename) + self.assertTrue(os.path.exists(filename)) + f = open(filename, "rb") + try: + self.assertEqual(f.read(), b"some data") + finally: + f.close() + + def test_close_twice(self): + lo = self.conn.lobject() + lo.close() + lo.close() + + def test_write_after_close(self): + lo = self.conn.lobject() + lo.close() + self.assertRaises(psycopg2.InterfaceError, lo.write, b"some data") + + def test_read_after_close(self): + lo = self.conn.lobject() + lo.close() + self.assertRaises(psycopg2.InterfaceError, lo.read, 5) + + def test_seek_after_close(self): + lo = self.conn.lobject() + lo.close() + self.assertRaises(psycopg2.InterfaceError, lo.seek, 0) + + def test_tell_after_close(self): + lo = self.conn.lobject() + lo.close() + self.assertRaises(psycopg2.InterfaceError, lo.tell) + + def test_unlink_after_close(self): + lo = self.conn.lobject() + lo.close() + # Unlink works on closed files. + lo.unlink() + + def test_export_after_close(self): + lo = self.conn.lobject() + lo.write(b"some data") + lo.close() + + self.tmpdir = tempfile.mkdtemp() + filename = os.path.join(self.tmpdir, "data.txt") + lo.export(filename) + self.assertTrue(os.path.exists(filename)) + f = open(filename, "rb") + try: + self.assertEqual(f.read(), b"some data") + finally: + f.close() + + def test_close_after_commit(self): + lo = self.conn.lobject() + self.lo_oid = lo.oid + self.conn.commit() + + # Closing outside of the transaction is okay. + lo.close() + + def test_write_after_commit(self): + lo = self.conn.lobject() + self.lo_oid = lo.oid + self.conn.commit() + + self.assertRaises(psycopg2.ProgrammingError, lo.write, b"some data") + + def test_read_after_commit(self): + lo = self.conn.lobject() + self.lo_oid = lo.oid + self.conn.commit() + + self.assertRaises(psycopg2.ProgrammingError, lo.read, 5) + + def test_seek_after_commit(self): + lo = self.conn.lobject() + self.lo_oid = lo.oid + self.conn.commit() + + self.assertRaises(psycopg2.ProgrammingError, lo.seek, 0) + + def test_tell_after_commit(self): + lo = self.conn.lobject() + self.lo_oid = lo.oid + self.conn.commit() + + self.assertRaises(psycopg2.ProgrammingError, lo.tell) + + def test_unlink_after_commit(self): + lo = self.conn.lobject() + self.lo_oid = lo.oid + self.conn.commit() + + # Unlink of stale lobject is okay + lo.unlink() + + def test_export_after_commit(self): + lo = self.conn.lobject() + lo.write(b"some data") + self.conn.commit() + + self.tmpdir = tempfile.mkdtemp() + filename = os.path.join(self.tmpdir, "data.txt") + lo.export(filename) + self.assertTrue(os.path.exists(filename)) + f = open(filename, "rb") + try: + self.assertEqual(f.read(), b"some data") + finally: + f.close() + + @skip_if_tpc_disabled + def test_read_after_tpc_commit(self): + self.conn.tpc_begin('test_lobject') + lo = self.conn.lobject() + self.lo_oid = lo.oid + self.conn.tpc_commit() + + self.assertRaises(psycopg2.ProgrammingError, lo.read, 5) + + @skip_if_tpc_disabled + def test_read_after_tpc_prepare(self): + self.conn.tpc_begin('test_lobject') + lo = self.conn.lobject() + self.lo_oid = lo.oid + self.conn.tpc_prepare() + + try: + self.assertRaises(psycopg2.ProgrammingError, lo.read, 5) + finally: + self.conn.tpc_commit() + + def test_large_oid(self): + # Test we don't overflow with an oid not fitting a signed int + try: + self.conn.lobject(0xFFFFFFFE) + except psycopg2.OperationalError: + pass + + def test_factory(self): + class lobject_subclass(psycopg2.extensions.lobject): + pass + + lo = self.conn.lobject(lobject_factory=lobject_subclass) + self.assert_(isinstance(lo, lobject_subclass)) + + +@decorate_all_tests +def skip_if_no_truncate(f): + @wraps(f) + def skip_if_no_truncate_(self): + if self.conn.info.server_version < 80300: + return self.skipTest( + "the server doesn't support large object truncate") + + if not hasattr(psycopg2.extensions.lobject, 'truncate'): + return self.skipTest( + "psycopg2 has been built against a libpq " + "without large object truncate support.") + + return f(self) + + return skip_if_no_truncate_ + + +@skip_if_no_lo +@skip_if_no_truncate +class LargeObjectTruncateTests(LargeObjectTestCase): + def test_truncate(self): + lo = self.conn.lobject() + lo.write("some data") + lo.close() + + lo = self.conn.lobject(lo.oid, "w") + lo.truncate(4) + + # seek position unchanged + self.assertEqual(lo.tell(), 0) + # data truncated + self.assertEqual(lo.read(), "some") + + lo.truncate(6) + lo.seek(0) + # large object extended with zeroes + self.assertEqual(lo.read(), "some\x00\x00") + + lo.truncate() + lo.seek(0) + # large object empty + self.assertEqual(lo.read(), "") + + def test_truncate_after_close(self): + lo = self.conn.lobject() + lo.close() + self.assertRaises(psycopg2.InterfaceError, lo.truncate) + + def test_truncate_after_commit(self): + lo = self.conn.lobject() + self.lo_oid = lo.oid + self.conn.commit() + + self.assertRaises(psycopg2.ProgrammingError, lo.truncate) + + +def _has_lo64(conn): + """Return (bool, msg) about the lo64 support""" + if conn.info.server_version < 90300: + return (False, "server version %s doesn't support the lo64 API" + % conn.info.server_version) + + if 'lo64' not in psycopg2.__version__: + return False, "this psycopg build doesn't support the lo64 API" + + return True, "this server and build support the lo64 API" + + +@decorate_all_tests +def skip_if_no_lo64(f): + @wraps(f) + def skip_if_no_lo64_(self): + lo64, msg = _has_lo64(self.conn) + if not lo64: + return self.skipTest(msg) + else: + return f(self) + + return skip_if_no_lo64_ + + +@skip_if_no_lo +@skip_if_no_truncate +@skip_if_no_lo64 +class LargeObject64Tests(LargeObjectTestCase): + def test_seek_tell_truncate_greater_than_2gb(self): + lo = self.conn.lobject() + + length = (1 << 31) + (1 << 30) # 2gb + 1gb = 3gb + lo.truncate(length) + + self.assertEqual(lo.seek(length, 0), length) + self.assertEqual(lo.tell(), length) + + +@decorate_all_tests +def skip_if_lo64(f): + @wraps(f) + def skip_if_lo64_(self): + lo64, msg = _has_lo64(self.conn) + if lo64: + return self.skipTest(msg) + else: + return f(self) + + return skip_if_lo64_ + + +@skip_if_no_lo +@skip_if_no_truncate +@skip_if_lo64 +class LargeObjectNot64Tests(LargeObjectTestCase): + def test_seek_larger_than_2gb(self): + lo = self.conn.lobject() + offset = 1 << 32 # 4gb + self.assertRaises( + (OverflowError, psycopg2.InterfaceError, psycopg2.NotSupportedError), + lo.seek, offset, 0) + + def test_truncate_larger_than_2gb(self): + lo = self.conn.lobject() + length = 1 << 32 # 4gb + self.assertRaises( + (OverflowError, psycopg2.InterfaceError, psycopg2.NotSupportedError), + lo.truncate, length) + + +def test_suite(): + return unittest.TestLoader().loadTestsFromName(__name__) + + +if __name__ == "__main__": + unittest.main() diff --git a/tests/test_module.py b/tests/test_module.py new file mode 100755 index 0000000000000000000000000000000000000000..7c032597f838dc8d0264f03982450208c274c925 --- /dev/null +++ b/tests/test_module.py @@ -0,0 +1,367 @@ +#!/usr/bin/env python + +# test_module.py - unit test for the module interface +# +# Copyright (C) 2011-2019 Daniele Varrazzo +# Copyright (C) 2020-2021 The Psycopg Team +# +# psycopg2 is free software: you can redistribute it and/or modify it +# under the terms of the GNU Lesser General Public License as published +# by the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# In addition, as a special exception, the copyright holders give +# permission to link this program with the OpenSSL library (or with +# modified versions of OpenSSL that use the same license as OpenSSL), +# and distribute linked combinations including the two. +# +# You must obey the GNU Lesser General Public License in all respects for +# all of the code used other than OpenSSL. +# +# psycopg2 is distributed in the hope that it will be useful, but WITHOUT +# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or +# FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public +# License for more details. + +import gc +import os +import sys +import pickle +from subprocess import Popen +from weakref import ref + +import unittest +from .testutils import (skip_before_postgres, + ConnectingTestCase, skip_copy_if_green, skip_if_crdb, slow, StringIO) + +import psycopg2 + + +class ConnectTestCase(unittest.TestCase): + def setUp(self): + self.args = None + + def connect_stub(dsn, connection_factory=None, async_=False): + self.args = (dsn, connection_factory, async_) + + self._connect_orig = psycopg2._connect + psycopg2._connect = connect_stub + + def tearDown(self): + psycopg2._connect = self._connect_orig + + def test_there_might_be_nothing(self): + psycopg2.connect() + self.assertEqual(self.args[0], '') + self.assertEqual(self.args[1], None) + self.assertEqual(self.args[2], False) + + psycopg2.connect( + connection_factory=lambda dsn, async_=False: None) + self.assertEqual(self.args[0], '') + self.assertNotEqual(self.args[1], None) + self.assertEqual(self.args[2], False) + + psycopg2.connect(async_=True) + self.assertEqual(self.args[0], '') + self.assertEqual(self.args[1], None) + self.assertEqual(self.args[2], True) + + def test_no_keywords(self): + psycopg2.connect('') + self.assertEqual(self.args[0], '') + self.assertEqual(self.args[1], None) + self.assertEqual(self.args[2], False) + + def test_dsn(self): + psycopg2.connect('dbname=blah host=y') + self.assertEqual(self.args[0], 'dbname=blah host=y') + self.assertEqual(self.args[1], None) + self.assertEqual(self.args[2], False) + + def test_supported_keywords(self): + psycopg2.connect(database='foo') + self.assertEqual(self.args[0], 'dbname=foo') + psycopg2.connect(user='postgres') + self.assertEqual(self.args[0], 'user=postgres') + psycopg2.connect(password='secret') + self.assertEqual(self.args[0], 'password=secret') + psycopg2.connect(port=5432) + self.assertEqual(self.args[0], 'port=5432') + psycopg2.connect(sslmode='require') + self.assertEqual(self.args[0], 'sslmode=require') + + psycopg2.connect(database='foo', + user='postgres', password='secret', port=5432) + self.assert_('dbname=foo' in self.args[0]) + self.assert_('user=postgres' in self.args[0]) + self.assert_('password=secret' in self.args[0]) + self.assert_('port=5432' in self.args[0]) + self.assertEqual(len(self.args[0].split()), 4) + + def test_generic_keywords(self): + psycopg2.connect(options='stuff') + self.assertEqual(self.args[0], 'options=stuff') + + def test_factory(self): + def f(dsn, async_=False): + pass + + psycopg2.connect(database='foo', host='baz', connection_factory=f) + self.assertDsnEqual(self.args[0], 'dbname=foo host=baz') + self.assertEqual(self.args[1], f) + self.assertEqual(self.args[2], False) + + psycopg2.connect("dbname=foo host=baz", connection_factory=f) + self.assertDsnEqual(self.args[0], 'dbname=foo host=baz') + self.assertEqual(self.args[1], f) + self.assertEqual(self.args[2], False) + + def test_async(self): + psycopg2.connect(database='foo', host='baz', async_=1) + self.assertDsnEqual(self.args[0], 'dbname=foo host=baz') + self.assertEqual(self.args[1], None) + self.assert_(self.args[2]) + + psycopg2.connect("dbname=foo host=baz", async_=True) + self.assertDsnEqual(self.args[0], 'dbname=foo host=baz') + self.assertEqual(self.args[1], None) + self.assert_(self.args[2]) + + def test_int_port_param(self): + psycopg2.connect(database='sony', port=6543) + dsn = f" {self.args[0]} " + self.assert_(" dbname=sony " in dsn, dsn) + self.assert_(" port=6543 " in dsn, dsn) + + def test_empty_param(self): + psycopg2.connect(database='sony', password='') + self.assertDsnEqual(self.args[0], "dbname=sony password=''") + + def test_escape(self): + psycopg2.connect(database='hello world') + self.assertEqual(self.args[0], "dbname='hello world'") + + psycopg2.connect(database=r'back\slash') + self.assertEqual(self.args[0], r"dbname=back\\slash") + + psycopg2.connect(database="quo'te") + self.assertEqual(self.args[0], r"dbname=quo\'te") + + psycopg2.connect(database="with\ttab") + self.assertEqual(self.args[0], "dbname='with\ttab'") + + psycopg2.connect(database=r"\every thing'") + self.assertEqual(self.args[0], r"dbname='\\every thing\''") + + def test_params_merging(self): + psycopg2.connect('dbname=foo', database='bar') + self.assertEqual(self.args[0], 'dbname=bar') + + psycopg2.connect('dbname=foo', user='postgres') + self.assertDsnEqual(self.args[0], 'dbname=foo user=postgres') + + +class ExceptionsTestCase(ConnectingTestCase): + def test_attributes(self): + cur = self.conn.cursor() + try: + cur.execute("select * from nonexist") + except psycopg2.Error as exc: + e = exc + + self.assertEqual(e.pgcode, '42P01') + self.assert_(e.pgerror) + self.assert_(e.cursor is cur) + + def test_diagnostics_attributes(self): + cur = self.conn.cursor() + try: + cur.execute("select * from nonexist") + except psycopg2.Error as exc: + e = exc + + diag = e.diag + self.assert_(isinstance(diag, psycopg2.extensions.Diagnostics)) + for attr in [ + 'column_name', 'constraint_name', 'context', 'datatype_name', + 'internal_position', 'internal_query', 'message_detail', + 'message_hint', 'message_primary', 'schema_name', 'severity', + 'severity_nonlocalized', 'source_file', 'source_function', + 'source_line', 'sqlstate', 'statement_position', 'table_name', ]: + v = getattr(diag, attr) + if v is not None: + self.assert_(isinstance(v, str)) + + def test_diagnostics_values(self): + cur = self.conn.cursor() + try: + cur.execute("select * from nonexist") + except psycopg2.Error as exc: + e = exc + + self.assertEqual(e.diag.sqlstate, '42P01') + self.assertEqual(e.diag.severity, 'ERROR') + + def test_diagnostics_life(self): + def tmp(): + cur = self.conn.cursor() + try: + cur.execute("select * from nonexist") + except psycopg2.Error as exc: + return cur, exc + + cur, e = tmp() + diag = e.diag + w = ref(cur) + + del e, cur + gc.collect() + assert(w() is not None) + + self.assertEqual(diag.sqlstate, '42P01') + + del diag + gc.collect() + gc.collect() + assert(w() is None) + + @skip_if_crdb("copy") + @skip_copy_if_green + def test_diagnostics_copy(self): + f = StringIO() + cur = self.conn.cursor() + try: + cur.copy_to(f, 'nonexist') + except psycopg2.Error as exc: + diag = exc.diag + + self.assertEqual(diag.sqlstate, '42P01') + + def test_diagnostics_independent(self): + cur = self.conn.cursor() + try: + cur.execute("l'acqua e' poca e 'a papera nun galleggia") + except Exception as exc: + diag1 = exc.diag + + self.conn.rollback() + + try: + cur.execute("select level from water where ducks > 1") + except psycopg2.Error as exc: + diag2 = exc.diag + + self.assertEqual(diag1.sqlstate, '42601') + self.assertEqual(diag2.sqlstate, '42P01') + + @skip_if_crdb("deferrable") + def test_diagnostics_from_commit(self): + cur = self.conn.cursor() + cur.execute(""" + create temp table test_deferred ( + data int primary key, + ref int references test_deferred (data) + deferrable initially deferred) + """) + cur.execute("insert into test_deferred values (1,2)") + try: + self.conn.commit() + except psycopg2.Error as exc: + e = exc + self.assertEqual(e.diag.sqlstate, '23503') + + @skip_if_crdb("diagnostic") + @skip_before_postgres(9, 3) + def test_9_3_diagnostics(self): + cur = self.conn.cursor() + cur.execute(""" + create temp table test_exc ( + data int constraint chk_eq1 check (data = 1) + )""") + try: + cur.execute("insert into test_exc values(2)") + except psycopg2.Error as exc: + e = exc + self.assertEqual(e.pgcode, '23514') + self.assertEqual(e.diag.schema_name[:7], "pg_temp") + self.assertEqual(e.diag.table_name, "test_exc") + self.assertEqual(e.diag.column_name, None) + self.assertEqual(e.diag.constraint_name, "chk_eq1") + self.assertEqual(e.diag.datatype_name, None) + + @skip_if_crdb("diagnostic") + @skip_before_postgres(9, 6) + def test_9_6_diagnostics(self): + cur = self.conn.cursor() + try: + cur.execute("select 1 from nosuchtable") + except psycopg2.Error as exc: + e = exc + self.assertEqual(e.diag.severity_nonlocalized, 'ERROR') + + def test_pickle(self): + cur = self.conn.cursor() + try: + cur.execute("select * from nonexist") + except psycopg2.Error as exc: + e = exc + + e1 = pickle.loads(pickle.dumps(e)) + + self.assertEqual(e.pgerror, e1.pgerror) + self.assertEqual(e.pgcode, e1.pgcode) + self.assert_(e1.cursor is None) + + @skip_if_crdb("connect any db") + def test_pickle_connection_error(self): + # segfaults on psycopg 2.5.1 - see ticket #170 + try: + psycopg2.connect('dbname=nosuchdatabasemate') + except psycopg2.Error as exc: + e = exc + + e1 = pickle.loads(pickle.dumps(e)) + + self.assertEqual(e.pgerror, e1.pgerror) + self.assertEqual(e.pgcode, e1.pgcode) + self.assert_(e1.cursor is None) + + +class TestExtensionModule(unittest.TestCase): + @slow + def test_import_internal(self): + # check that the internal package can be imported "naked" + # we may break this property if there is a compelling reason to do so, + # however having it allows for some import juggling such as the one + # required in ticket #201. + pkgdir = os.path.dirname(psycopg2.__file__) + pardir = os.path.dirname(pkgdir) + self.assert_(pardir in sys.path) + script = f""" +import sys +sys.path.remove({pardir!r}) +sys.path.insert(0, {pkgdir!r}) +import _psycopg +""" + + proc = Popen([sys.executable, '-c', script]) + proc.communicate() + self.assertEqual(0, proc.returncode) + + +class TestVersionDiscovery(unittest.TestCase): + def test_libpq_version(self): + self.assertTrue(type(psycopg2.__libpq_version__) is int) + try: + self.assertTrue(type(psycopg2.extensions.libpq_version()) is int) + except psycopg2.NotSupportedError: + self.assertTrue(psycopg2.__libpq_version__ < 90100) + + +def test_suite(): + return unittest.TestLoader().loadTestsFromName(__name__) + + +if __name__ == "__main__": + unittest.main() diff --git a/tests/test_notify.py b/tests/test_notify.py new file mode 100755 index 0000000000000000000000000000000000000000..03ab4cde2cae2c964ffc521e705239421e7770b8 --- /dev/null +++ b/tests/test_notify.py @@ -0,0 +1,231 @@ +#!/usr/bin/env python + +# test_notify.py - unit test for async notifications +# +# Copyright (C) 2010-2019 Daniele Varrazzo +# Copyright (C) 2020-2021 The Psycopg Team +# +# psycopg2 is free software: you can redistribute it and/or modify it +# under the terms of the GNU Lesser General Public License as published +# by the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# In addition, as a special exception, the copyright holders give +# permission to link this program with the OpenSSL library (or with +# modified versions of OpenSSL that use the same license as OpenSSL), +# and distribute linked combinations including the two. +# +# You must obey the GNU Lesser General Public License in all respects for +# all of the code used other than OpenSSL. +# +# psycopg2 is distributed in the hope that it will be useful, but WITHOUT +# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or +# FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public +# License for more details. + +import unittest +from collections import deque + +import psycopg2 +from psycopg2 import extensions +from psycopg2.extensions import Notify +from .testutils import ConnectingTestCase, skip_if_crdb, slow +from .testconfig import dsn + +import sys +import time +import select +from subprocess import Popen, PIPE + + +@skip_if_crdb("notify") +class NotifiesTests(ConnectingTestCase): + + def autocommit(self, conn): + """Set a connection in autocommit mode.""" + conn.set_isolation_level(extensions.ISOLATION_LEVEL_AUTOCOMMIT) + + def listen(self, name): + """Start listening for a name on self.conn.""" + curs = self.conn.cursor() + curs.execute("LISTEN " + name) + curs.close() + + def notify(self, name, sec=0, payload=None): + """Send a notification to the database, eventually after some time.""" + if payload is None: + payload = '' + else: + payload = f", {payload!r}" + + script = ("""\ +import time +time.sleep({sec}) +import {module} as psycopg2 +import {module}.extensions as ext +conn = psycopg2.connect({dsn!r}) +conn.set_isolation_level(ext.ISOLATION_LEVEL_AUTOCOMMIT) +print(conn.info.backend_pid) +curs = conn.cursor() +curs.execute("NOTIFY " {name!r} {payload!r}) +curs.close() +conn.close() +""".format( + module=psycopg2.__name__, + dsn=dsn, sec=sec, name=name, payload=payload)) + + return Popen([sys.executable, '-c', script], stdout=PIPE) + + @slow + def test_notifies_received_on_poll(self): + self.autocommit(self.conn) + self.listen('foo') + + proc = self.notify('foo', 1) + + t0 = time.time() + select.select([self.conn], [], [], 5) + t1 = time.time() + self.assert_(0.99 < t1 - t0 < 4, t1 - t0) + + pid = int(proc.communicate()[0]) + self.assertEqual(0, len(self.conn.notifies)) + self.assertEqual(extensions.POLL_OK, self.conn.poll()) + self.assertEqual(1, len(self.conn.notifies)) + self.assertEqual(pid, self.conn.notifies[0][0]) + self.assertEqual('foo', self.conn.notifies[0][1]) + + @slow + def test_many_notifies(self): + self.autocommit(self.conn) + for name in ['foo', 'bar', 'baz']: + self.listen(name) + + pids = {} + for name in ['foo', 'bar', 'baz', 'qux']: + pids[name] = int(self.notify(name).communicate()[0]) + + self.assertEqual(0, len(self.conn.notifies)) + for i in range(10): + self.assertEqual(extensions.POLL_OK, self.conn.poll()) + self.assertEqual(3, len(self.conn.notifies)) + + names = dict.fromkeys(['foo', 'bar', 'baz']) + for (pid, name) in self.conn.notifies: + self.assertEqual(pids[name], pid) + names.pop(name) # raise if name found twice + + @slow + def test_notifies_received_on_execute(self): + self.autocommit(self.conn) + self.listen('foo') + pid = int(self.notify('foo').communicate()[0]) + self.assertEqual(0, len(self.conn.notifies)) + self.conn.cursor().execute('select 1;') + self.assertEqual(1, len(self.conn.notifies)) + self.assertEqual(pid, self.conn.notifies[0][0]) + self.assertEqual('foo', self.conn.notifies[0][1]) + + @slow + def test_notify_object(self): + self.autocommit(self.conn) + self.listen('foo') + self.notify('foo').communicate() + time.sleep(0.5) + self.conn.poll() + notify = self.conn.notifies[0] + self.assert_(isinstance(notify, psycopg2.extensions.Notify)) + + @slow + def test_notify_attributes(self): + self.autocommit(self.conn) + self.listen('foo') + pid = int(self.notify('foo').communicate()[0]) + time.sleep(0.5) + self.conn.poll() + self.assertEqual(1, len(self.conn.notifies)) + notify = self.conn.notifies[0] + self.assertEqual(pid, notify.pid) + self.assertEqual('foo', notify.channel) + self.assertEqual('', notify.payload) + + @slow + def test_notify_payload(self): + if self.conn.info.server_version < 90000: + return self.skipTest("server version %s doesn't support notify payload" + % self.conn.info.server_version) + self.autocommit(self.conn) + self.listen('foo') + pid = int(self.notify('foo', payload="Hello, world!").communicate()[0]) + time.sleep(0.5) + self.conn.poll() + self.assertEqual(1, len(self.conn.notifies)) + notify = self.conn.notifies[0] + self.assertEqual(pid, notify.pid) + self.assertEqual('foo', notify.channel) + self.assertEqual('Hello, world!', notify.payload) + + @slow + def test_notify_deque(self): + self.autocommit(self.conn) + self.conn.notifies = deque() + self.listen('foo') + self.notify('foo').communicate() + time.sleep(0.5) + self.conn.poll() + notify = self.conn.notifies.popleft() + self.assert_(isinstance(notify, psycopg2.extensions.Notify)) + self.assertEqual(len(self.conn.notifies), 0) + + @slow + def test_notify_noappend(self): + self.autocommit(self.conn) + self.conn.notifies = None + self.listen('foo') + self.notify('foo').communicate() + time.sleep(0.5) + self.conn.poll() + self.assertEqual(self.conn.notifies, None) + + def test_notify_init(self): + n = psycopg2.extensions.Notify(10, 'foo') + self.assertEqual(10, n.pid) + self.assertEqual('foo', n.channel) + self.assertEqual('', n.payload) + (pid, channel) = n + self.assertEqual((pid, channel), (10, 'foo')) + + n = psycopg2.extensions.Notify(42, 'bar', 'baz') + self.assertEqual(42, n.pid) + self.assertEqual('bar', n.channel) + self.assertEqual('baz', n.payload) + (pid, channel) = n + self.assertEqual((pid, channel), (42, 'bar')) + + def test_compare(self): + data = [(10, 'foo'), (20, 'foo'), (10, 'foo', 'bar'), (10, 'foo', 'baz')] + for d1 in data: + for d2 in data: + n1 = psycopg2.extensions.Notify(*d1) + n2 = psycopg2.extensions.Notify(*d2) + self.assertEqual((n1 == n2), (d1 == d2)) + self.assertEqual((n1 != n2), (d1 != d2)) + + def test_compare_tuple(self): + self.assertEqual((10, 'foo'), Notify(10, 'foo')) + self.assertEqual((10, 'foo'), Notify(10, 'foo', 'bar')) + self.assertNotEqual((10, 'foo'), Notify(20, 'foo')) + self.assertNotEqual((10, 'foo'), Notify(10, 'bar')) + + def test_hash(self): + self.assertEqual(hash((10, 'foo')), hash(Notify(10, 'foo'))) + self.assertNotEqual(hash(Notify(10, 'foo', 'bar')), + hash(Notify(10, 'foo'))) + + +def test_suite(): + return unittest.TestLoader().loadTestsFromName(__name__) + + +if __name__ == "__main__": + unittest.main() diff --git a/tests/test_psycopg2_dbapi20.py b/tests/test_psycopg2_dbapi20.py new file mode 100755 index 0000000000000000000000000000000000000000..304c24dd11ca2420a6aa9e4f2c18e48d3a94cada --- /dev/null +++ b/tests/test_psycopg2_dbapi20.py @@ -0,0 +1,86 @@ +#!/usr/bin/env python + +# test_psycopg2_dbapi20.py - DB API conformance test for psycopg2 +# +# Copyright (C) 2006-2019 Federico Di Gregorio +# Copyright (C) 2020-2021 The Psycopg Team +# +# psycopg2 is free software: you can redistribute it and/or modify it +# under the terms of the GNU Lesser General Public License as published +# by the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# In addition, as a special exception, the copyright holders give +# permission to link this program with the OpenSSL library (or with +# modified versions of OpenSSL that use the same license as OpenSSL), +# and distribute linked combinations including the two. +# +# You must obey the GNU Lesser General Public License in all respects for +# all of the code used other than OpenSSL. +# +# psycopg2 is distributed in the hope that it will be useful, but WITHOUT +# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or +# FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public +# License for more details. + +from . import dbapi20 +from . import dbapi20_tpc +from .testutils import skip_if_tpc_disabled +import unittest +import psycopg2 + +from .testconfig import dsn + + +class Psycopg2Tests(dbapi20.DatabaseAPI20Test): + driver = psycopg2 + connect_args = () + connect_kw_args = {'dsn': dsn} + + lower_func = 'lower' # For stored procedure test + + def test_callproc(self): + # Until DBAPI 2.0 compliance, callproc should return None or it's just + # misleading. Therefore, we will skip the return value test for + # callproc and only perform the fetch test. + # + # For what it's worth, the DBAPI2.0 test_callproc doesn't actually + # test for DBAPI2.0 compliance! It doesn't check for modified OUT and + # IN/OUT parameters in the return values! + con = self._connect() + try: + cur = con.cursor() + if self.lower_func and hasattr(cur, 'callproc'): + cur.callproc(self.lower_func, ('FOO',)) + r = cur.fetchall() + self.assertEqual(len(r), 1, 'callproc produced no result set') + self.assertEqual(len(r[0]), 1, + 'callproc produced invalid result set') + self.assertEqual(r[0][0], 'foo', + 'callproc produced invalid results') + finally: + con.close() + + def test_setoutputsize(self): + # psycopg2's setoutputsize() is a no-op + pass + + def test_nextset(self): + # psycopg2 does not implement nextset() + pass + + +@skip_if_tpc_disabled +class Psycopg2TPCTests(dbapi20_tpc.TwoPhaseCommitTests, unittest.TestCase): + driver = psycopg2 + + def connect(self): + return psycopg2.connect(dsn=dsn) + + +def test_suite(): + return unittest.TestLoader().loadTestsFromName(__name__) + + +if __name__ == '__main__': + unittest.main() diff --git a/tests/test_quote.py b/tests/test_quote.py new file mode 100755 index 0000000000000000000000000000000000000000..69d26003687ec4c1c82d968d593cf8d719a7a76c --- /dev/null +++ b/tests/test_quote.py @@ -0,0 +1,229 @@ +#!/usr/bin/env python + +# test_quote.py - unit test for strings quoting +# +# Copyright (C) 2007-2019 Daniele Varrazzo +# Copyright (C) 2020-2021 The Psycopg Team +# +# psycopg2 is free software: you can redistribute it and/or modify it +# under the terms of the GNU Lesser General Public License as published +# by the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# In addition, as a special exception, the copyright holders give +# permission to link this program with the OpenSSL library (or with +# modified versions of OpenSSL that use the same license as OpenSSL), +# and distribute linked combinations including the two. +# +# You must obey the GNU Lesser General Public License in all respects for +# all of the code used other than OpenSSL. +# +# psycopg2 is distributed in the hope that it will be useful, but WITHOUT +# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or +# FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public +# License for more details. + +from . import testutils +import unittest +from .testutils import ConnectingTestCase, skip_if_crdb + +import psycopg2 +import psycopg2.extensions +from psycopg2.extensions import adapt, quote_ident + + +class QuotingTestCase(ConnectingTestCase): + r"""Checks the correct quoting of strings and binary objects. + + Since ver. 8.1, PostgreSQL is moving towards SQL standard conforming + strings, where the backslash (\) is treated as literal character, + not as escape. To treat the backslash as a C-style escapes, PG supports + the E'' quotes. + + This test case checks that the E'' quotes are used whenever they are + needed. The tests are expected to pass with all PostgreSQL server versions + (currently tested with 7.4 <= PG <= 8.3beta) and with any + 'standard_conforming_strings' server parameter value. + The tests also check that no warning is raised ('escape_string_warning' + should be on). + + https://www.postgresql.org/docs/current/static/sql-syntax-lexical.html#SQL-SYNTAX-STRINGS + https://www.postgresql.org/docs/current/static/runtime-config-compatible.html + """ + def test_string(self): + data = """some data with \t chars + to escape into, 'quotes' and \\ a backslash too. + """ + data += "".join(map(chr, range(1, 127))) + + curs = self.conn.cursor() + curs.execute("SELECT %s;", (data,)) + res = curs.fetchone()[0] + + self.assertEqual(res, data) + self.assert_(not self.conn.notices) + + def test_string_null_terminator(self): + curs = self.conn.cursor() + data = 'abcd\x01\x00cdefg' + + try: + curs.execute("SELECT %s", (data,)) + except ValueError as e: + self.assertEquals(str(e), + 'A string literal cannot contain NUL (0x00) characters.') + else: + self.fail("ValueError not raised") + + def test_binary(self): + data = b"""some data with \000\013 binary + stuff into, 'quotes' and \\ a backslash too. + """ + data += bytes(list(range(256))) + + curs = self.conn.cursor() + curs.execute("SELECT %s::bytea;", (psycopg2.Binary(data),)) + res = curs.fetchone()[0].tobytes() + + if res[0] in (b'x', ord(b'x')) and self.conn.info.server_version >= 90000: + return self.skipTest( + "bytea broken with server >= 9.0, libpq < 9") + + self.assertEqual(res, data) + self.assert_(not self.conn.notices) + + def test_unicode(self): + curs = self.conn.cursor() + curs.execute("SHOW server_encoding") + server_encoding = curs.fetchone()[0] + if server_encoding != "UTF8": + return self.skipTest( + f"Unicode test skipped since server encoding is {server_encoding}") + + data = """some data with \t chars + to escape into, 'quotes', \u20ac euro sign and \\ a backslash too. + """ + data += "".join(map(chr, [u for u in range(1, 65536) + if not 0xD800 <= u <= 0xDFFF])) # surrogate area + self.conn.set_client_encoding('UNICODE') + + psycopg2.extensions.register_type(psycopg2.extensions.UNICODE, self.conn) + curs.execute("SELECT %s::text;", (data,)) + res = curs.fetchone()[0] + + self.assertEqual(res, data) + self.assert_(not self.conn.notices) + + @skip_if_crdb("encoding") + def test_latin1(self): + self.conn.set_client_encoding('LATIN1') + curs = self.conn.cursor() + data = bytes(list(range(32, 127)) + + list(range(160, 256))).decode('latin1') + + # as string + curs.execute("SELECT %s::text;", (data,)) + res = curs.fetchone()[0] + self.assertEqual(res, data) + self.assert_(not self.conn.notices) + + + @skip_if_crdb("encoding") + def test_koi8(self): + self.conn.set_client_encoding('KOI8') + curs = self.conn.cursor() + data = bytes(list(range(32, 127)) + + list(range(128, 256))).decode('koi8_r') + + # as string + curs.execute("SELECT %s::text;", (data,)) + res = curs.fetchone()[0] + self.assertEqual(res, data) + self.assert_(not self.conn.notices) + + def test_bytes(self): + snowman = "\u2603" + conn = self.connect() + conn.set_client_encoding('UNICODE') + psycopg2.extensions.register_type(psycopg2.extensions.BYTES, conn) + curs = conn.cursor() + curs.execute("select %s::text", (snowman,)) + x = curs.fetchone()[0] + self.assert_(isinstance(x, bytes)) + self.assertEqual(x, snowman.encode('utf8')) + + +class TestQuotedString(ConnectingTestCase): + def test_encoding_from_conn(self): + q = psycopg2.extensions.QuotedString('hi') + self.assertEqual(q.encoding, 'latin1') + + self.conn.set_client_encoding('utf_8') + q.prepare(self.conn) + self.assertEqual(q.encoding, 'utf_8') + + +class TestQuotedIdentifier(ConnectingTestCase): + def test_identifier(self): + self.assertEqual(quote_ident('blah-blah', self.conn), '"blah-blah"') + self.assertEqual(quote_ident('quote"inside', self.conn), '"quote""inside"') + + @testutils.skip_before_postgres(8, 0) + def test_unicode_ident(self): + snowman = "\u2603" + quoted = '"' + snowman + '"' + self.assertEqual(quote_ident(snowman, self.conn), quoted) + + +class TestStringAdapter(ConnectingTestCase): + def test_encoding_default(self): + a = adapt("hello") + self.assertEqual(a.encoding, 'latin1') + self.assertEqual(a.getquoted(), b"'hello'") + + # NOTE: we can't really test an encoding different from utf8, because + # when encoding without connection the libpq will use parameters from + # a previous one, so what would happens depends jn the tests run order. + # egrave = u'\xe8' + # self.assertEqual(adapt(egrave).getquoted(), "'\xe8'") + + def test_encoding_error(self): + snowman = "\u2603" + a = adapt(snowman) + self.assertRaises(UnicodeEncodeError, a.getquoted) + + def test_set_encoding(self): + # Note: this works-ish mostly in case when the standard db connection + # we test with is utf8, otherwise the encoding chosen by PQescapeString + # may give bad results. + snowman = "\u2603" + a = adapt(snowman) + a.encoding = 'utf8' + self.assertEqual(a.encoding, 'utf8') + self.assertEqual(a.getquoted(), b"'\xe2\x98\x83'") + + def test_connection_wins_anyway(self): + snowman = "\u2603" + a = adapt(snowman) + a.encoding = 'latin9' + + self.conn.set_client_encoding('utf8') + a.prepare(self.conn) + + self.assertEqual(a.encoding, 'utf_8') + self.assertQuotedEqual(a.getquoted(), b"'\xe2\x98\x83'") + + def test_adapt_bytes(self): + snowman = "\u2603" + self.conn.set_client_encoding('utf8') + a = psycopg2.extensions.QuotedString(snowman.encode('utf8')) + a.prepare(self.conn) + self.assertQuotedEqual(a.getquoted(), b"'\xe2\x98\x83'") + + +def test_suite(): + return unittest.TestLoader().loadTestsFromName(__name__) + + +if __name__ == "__main__": + unittest.main() diff --git a/tests/test_replication.py b/tests/test_replication.py new file mode 100755 index 0000000000000000000000000000000000000000..8ca2187f1ba05ff0bdaabd189ff6af1f5f109cfc --- /dev/null +++ b/tests/test_replication.py @@ -0,0 +1,276 @@ +#!/usr/bin/env python + +# test_replication.py - unit test for replication protocol +# +# Copyright (C) 2015-2019 Daniele Varrazzo +# Copyright (C) 2020-2021 The Psycopg Team +# +# psycopg2 is free software: you can redistribute it and/or modify it +# under the terms of the GNU Lesser General Public License as published +# by the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# In addition, as a special exception, the copyright holders give +# permission to link this program with the OpenSSL library (or with +# modified versions of OpenSSL that use the same license as OpenSSL), +# and distribute linked combinations including the two. +# +# You must obey the GNU Lesser General Public License in all respects for +# all of the code used other than OpenSSL. +# +# psycopg2 is distributed in the hope that it will be useful, but WITHOUT +# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or +# FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public +# License for more details. + +import time +from select import select + +import psycopg2 +from psycopg2 import sql +from psycopg2.extras import ( + PhysicalReplicationConnection, LogicalReplicationConnection, StopReplication) + +from . import testconfig +import unittest +from .testutils import ConnectingTestCase +from .testutils import skip_before_postgres, skip_if_green + +skip_repl_if_green = skip_if_green("replication not supported in green mode") + + +class ReplicationTestCase(ConnectingTestCase): + def setUp(self): + super().setUp() + self.slot = testconfig.repl_slot + self._slots = [] + + def tearDown(self): + # first close all connections, as they might keep the slot(s) active + super().tearDown() + + time.sleep(0.025) # sometimes the slot is still active, wait a little + + if self._slots: + kill_conn = self.connect() + if kill_conn: + kill_cur = kill_conn.cursor() + for slot in self._slots: + kill_cur.execute("SELECT pg_drop_replication_slot(%s)", (slot,)) + kill_conn.commit() + kill_conn.close() + + def create_replication_slot(self, cur, slot_name=testconfig.repl_slot, **kwargs): + cur.create_replication_slot(slot_name, **kwargs) + self._slots.append(slot_name) + + def drop_replication_slot(self, cur, slot_name=testconfig.repl_slot): + cur.drop_replication_slot(slot_name) + self._slots.remove(slot_name) + + # generate some events for our replication stream + def make_replication_events(self): + conn = self.connect() + if conn is None: + return + cur = conn.cursor() + + try: + cur.execute("DROP TABLE dummy1") + except psycopg2.ProgrammingError: + conn.rollback() + cur.execute( + "CREATE TABLE dummy1 AS SELECT * FROM generate_series(1, 5) AS id") + conn.commit() + + +class ReplicationTest(ReplicationTestCase): + @skip_before_postgres(9, 0) + def test_physical_replication_connection(self): + conn = self.repl_connect(connection_factory=PhysicalReplicationConnection) + if conn is None: + return + cur = conn.cursor() + cur.execute("IDENTIFY_SYSTEM") + cur.fetchall() + + @skip_before_postgres(9, 0) + def test_datestyle(self): + if testconfig.repl_dsn is None: + return self.skipTest("replication tests disabled by default") + + conn = self.repl_connect( + dsn=testconfig.repl_dsn, options='-cdatestyle=german', + connection_factory=PhysicalReplicationConnection) + if conn is None: + return + cur = conn.cursor() + cur.execute("IDENTIFY_SYSTEM") + cur.fetchall() + + @skip_before_postgres(9, 4) + def test_logical_replication_connection(self): + conn = self.repl_connect(connection_factory=LogicalReplicationConnection) + if conn is None: + return + cur = conn.cursor() + cur.execute("IDENTIFY_SYSTEM") + cur.fetchall() + + @skip_before_postgres(9, 4) # slots require 9.4 + def test_create_replication_slot(self): + conn = self.repl_connect(connection_factory=PhysicalReplicationConnection) + if conn is None: + return + cur = conn.cursor() + + self.create_replication_slot(cur) + self.assertRaises( + psycopg2.ProgrammingError, self.create_replication_slot, cur) + + @skip_before_postgres(9, 4) # slots require 9.4 + @skip_repl_if_green + def test_start_on_missing_replication_slot(self): + conn = self.repl_connect(connection_factory=PhysicalReplicationConnection) + if conn is None: + return + cur = conn.cursor() + + self.assertRaises(psycopg2.ProgrammingError, + cur.start_replication, self.slot) + + self.create_replication_slot(cur) + cur.start_replication(self.slot) + + @skip_before_postgres(9, 4) # slots require 9.4 + @skip_repl_if_green + def test_start_replication_expert_sql(self): + conn = self.repl_connect(connection_factory=LogicalReplicationConnection) + if conn is None: + return + cur = conn.cursor() + + self.create_replication_slot(cur, output_plugin='test_decoding') + cur.start_replication_expert( + sql.SQL("START_REPLICATION SLOT {slot} LOGICAL 0/00000000").format( + slot=sql.Identifier(self.slot))) + + @skip_before_postgres(9, 4) # slots require 9.4 + @skip_repl_if_green + def test_start_and_recover_from_error(self): + conn = self.repl_connect(connection_factory=LogicalReplicationConnection) + if conn is None: + return + cur = conn.cursor() + + self.create_replication_slot(cur, output_plugin='test_decoding') + self.make_replication_events() + + def consume(msg): + raise StopReplication() + + with self.assertRaises(psycopg2.DataError): + # try with invalid options + cur.start_replication( + slot_name=self.slot, options={'invalid_param': 'value'}) + cur.consume_stream(consume) + + # try with correct command + cur.start_replication(slot_name=self.slot) + self.assertRaises(StopReplication, cur.consume_stream, consume) + + @skip_before_postgres(9, 4) # slots require 9.4 + @skip_repl_if_green + def test_keepalive(self): + conn = self.repl_connect(connection_factory=LogicalReplicationConnection) + if conn is None: + return + + cur = conn.cursor() + + self.create_replication_slot(cur, output_plugin='test_decoding') + + self.make_replication_events() + + cur.start_replication(self.slot) + + def consume(msg): + raise StopReplication() + + self.assertRaises(StopReplication, + cur.consume_stream, consume, keepalive_interval=2) + + conn.close() + + @skip_before_postgres(9, 4) # slots require 9.4 + @skip_repl_if_green + def test_stop_replication(self): + conn = self.repl_connect(connection_factory=LogicalReplicationConnection) + if conn is None: + return + cur = conn.cursor() + + self.create_replication_slot(cur, output_plugin='test_decoding') + + self.make_replication_events() + + cur.start_replication(self.slot) + + def consume(msg): + raise StopReplication() + self.assertRaises(StopReplication, cur.consume_stream, consume) + + +class AsyncReplicationTest(ReplicationTestCase): + @skip_before_postgres(9, 4) # slots require 9.4 + @skip_repl_if_green + def test_async_replication(self): + conn = self.repl_connect( + connection_factory=LogicalReplicationConnection, async_=1) + if conn is None: + return + + cur = conn.cursor() + + self.create_replication_slot(cur, output_plugin='test_decoding') + self.wait(cur) + + cur.start_replication(self.slot) + self.wait(cur) + + self.make_replication_events() + + self.msg_count = 0 + + def consume(msg): + # just check the methods + f"{cur.io_timestamp}: {repr(msg)}" + f"{cur.feedback_timestamp}: {repr(msg)}" + f"{cur.wal_end}: {repr(msg)}" + + self.msg_count += 1 + if self.msg_count > 3: + cur.send_feedback(reply=True) + raise StopReplication() + + cur.send_feedback(flush_lsn=msg.data_start) + + # cannot be used in asynchronous mode + self.assertRaises(psycopg2.ProgrammingError, cur.consume_stream, consume) + + def process_stream(): + while True: + msg = cur.read_message() + if msg: + consume(msg) + else: + select([cur], [], []) + self.assertRaises(StopReplication, process_stream) + + +def test_suite(): + return unittest.TestLoader().loadTestsFromName(__name__) + + +if __name__ == "__main__": + unittest.main() diff --git a/tests/test_sql.py b/tests/test_sql.py new file mode 100755 index 0000000000000000000000000000000000000000..8fb1f114e75e3fc337c5dc595b161033eae4a5d5 --- /dev/null +++ b/tests/test_sql.py @@ -0,0 +1,412 @@ +#!/usr/bin/env python + +# test_sql.py - tests for the psycopg2.sql module +# +# Copyright (C) 2016-2019 Daniele Varrazzo +# Copyright (C) 2020-2021 The Psycopg Team +# +# psycopg2 is free software: you can redistribute it and/or modify it +# under the terms of the GNU Lesser General Public License as published +# by the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# In addition, as a special exception, the copyright holders give +# permission to link this program with the OpenSSL library (or with +# modified versions of OpenSSL that use the same license as OpenSSL), +# and distribute linked combinations including the two. +# +# You must obey the GNU Lesser General Public License in all respects for +# all of the code used other than OpenSSL. +# +# psycopg2 is distributed in the hope that it will be useful, but WITHOUT +# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or +# FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public +# License for more details. + +import datetime as dt +import unittest +from .testutils import ( + ConnectingTestCase, skip_before_postgres, skip_copy_if_green, StringIO, + skip_if_crdb) + +import psycopg2 +from psycopg2 import sql + + +class SqlFormatTests(ConnectingTestCase): + def test_pos(self): + s = sql.SQL("select {} from {}").format( + sql.Identifier('field'), sql.Identifier('table')) + s1 = s.as_string(self.conn) + self.assert_(isinstance(s1, str)) + self.assertEqual(s1, 'select "field" from "table"') + + def test_pos_spec(self): + s = sql.SQL("select {0} from {1}").format( + sql.Identifier('field'), sql.Identifier('table')) + s1 = s.as_string(self.conn) + self.assert_(isinstance(s1, str)) + self.assertEqual(s1, 'select "field" from "table"') + + s = sql.SQL("select {1} from {0}").format( + sql.Identifier('table'), sql.Identifier('field')) + s1 = s.as_string(self.conn) + self.assert_(isinstance(s1, str)) + self.assertEqual(s1, 'select "field" from "table"') + + def test_dict(self): + s = sql.SQL("select {f} from {t}").format( + f=sql.Identifier('field'), t=sql.Identifier('table')) + s1 = s.as_string(self.conn) + self.assert_(isinstance(s1, str)) + self.assertEqual(s1, 'select "field" from "table"') + + def test_compose_literal(self): + s = sql.SQL("select {0};").format(sql.Literal(dt.date(2016, 12, 31))) + s1 = s.as_string(self.conn) + self.assertEqual(s1, "select '2016-12-31'::date;") + + def test_compose_empty(self): + s = sql.SQL("select foo;").format() + s1 = s.as_string(self.conn) + self.assertEqual(s1, "select foo;") + + def test_percent_escape(self): + s = sql.SQL("42 % {0}").format(sql.Literal(7)) + s1 = s.as_string(self.conn) + self.assertEqual(s1, "42 % 7") + + def test_braces_escape(self): + s = sql.SQL("{{{0}}}").format(sql.Literal(7)) + self.assertEqual(s.as_string(self.conn), "{7}") + s = sql.SQL("{{1,{0}}}").format(sql.Literal(7)) + self.assertEqual(s.as_string(self.conn), "{1,7}") + + def test_compose_badnargs(self): + self.assertRaises(IndexError, sql.SQL("select {0};").format) + + def test_compose_badnargs_auto(self): + self.assertRaises(IndexError, sql.SQL("select {};").format) + self.assertRaises(ValueError, sql.SQL("select {} {1};").format, 10, 20) + self.assertRaises(ValueError, sql.SQL("select {0} {};").format, 10, 20) + + def test_compose_bad_args_type(self): + self.assertRaises(IndexError, sql.SQL("select {0};").format, a=10) + self.assertRaises(KeyError, sql.SQL("select {x};").format, 10) + + def test_must_be_composable(self): + self.assertRaises(TypeError, sql.SQL("select {0};").format, 'foo') + self.assertRaises(TypeError, sql.SQL("select {0};").format, 10) + + def test_no_modifiers(self): + self.assertRaises(ValueError, sql.SQL("select {a!r};").format, a=10) + self.assertRaises(ValueError, sql.SQL("select {a:<};").format, a=10) + + def test_must_be_adaptable(self): + class Foo: + pass + + self.assertRaises(psycopg2.ProgrammingError, + sql.SQL("select {0};").format(sql.Literal(Foo())).as_string, self.conn) + + def test_execute(self): + cur = self.conn.cursor() + cur.execute(""" + create table test_compose ( + id serial primary key, + foo text, bar text, "ba'z" text) + """) + cur.execute( + sql.SQL("insert into {0} (id, {1}) values (%s, {2})").format( + sql.Identifier('test_compose'), + sql.SQL(', ').join(map(sql.Identifier, ['foo', 'bar', "ba'z"])), + (sql.Placeholder() * 3).join(', ')), + (10, 'a', 'b', 'c')) + + cur.execute("select * from test_compose") + self.assertEqual(cur.fetchall(), [(10, 'a', 'b', 'c')]) + + def test_executemany(self): + cur = self.conn.cursor() + cur.execute(""" + create table test_compose ( + id serial primary key, + foo text, bar text, "ba'z" text) + """) + cur.executemany( + sql.SQL("insert into {0} (id, {1}) values (%s, {2})").format( + sql.Identifier('test_compose'), + sql.SQL(', ').join(map(sql.Identifier, ['foo', 'bar', "ba'z"])), + (sql.Placeholder() * 3).join(', ')), + [(10, 'a', 'b', 'c'), (20, 'd', 'e', 'f')]) + + cur.execute("select * from test_compose") + self.assertEqual(cur.fetchall(), + [(10, 'a', 'b', 'c'), (20, 'd', 'e', 'f')]) + + @skip_if_crdb("copy") + @skip_copy_if_green + @skip_before_postgres(8, 2) + def test_copy(self): + cur = self.conn.cursor() + cur.execute(""" + create table test_compose ( + id serial primary key, + foo text, bar text, "ba'z" text) + """) + + s = StringIO("10\ta\tb\tc\n20\td\te\tf\n") + cur.copy_expert( + sql.SQL("copy {t} (id, foo, bar, {f}) from stdin").format( + t=sql.Identifier("test_compose"), f=sql.Identifier("ba'z")), s) + + s1 = StringIO() + cur.copy_expert( + sql.SQL("copy (select {f} from {t} order by id) to stdout").format( + t=sql.Identifier("test_compose"), f=sql.Identifier("ba'z")), s1) + s1.seek(0) + self.assertEqual(s1.read(), 'c\nf\n') + + +class IdentifierTests(ConnectingTestCase): + def test_class(self): + self.assert_(issubclass(sql.Identifier, sql.Composable)) + + def test_init(self): + self.assert_(isinstance(sql.Identifier('foo'), sql.Identifier)) + self.assert_(isinstance(sql.Identifier('foo'), sql.Identifier)) + self.assert_(isinstance(sql.Identifier('foo', 'bar', 'baz'), sql.Identifier)) + self.assertRaises(TypeError, sql.Identifier) + self.assertRaises(TypeError, sql.Identifier, 10) + self.assertRaises(TypeError, sql.Identifier, dt.date(2016, 12, 31)) + + def test_strings(self): + self.assertEqual(sql.Identifier('foo').strings, ('foo',)) + self.assertEqual(sql.Identifier('foo', 'bar').strings, ('foo', 'bar')) + + # Legacy method + self.assertEqual(sql.Identifier('foo').string, 'foo') + self.assertRaises(AttributeError, + getattr, sql.Identifier('foo', 'bar'), 'string') + + def test_repr(self): + obj = sql.Identifier("fo'o") + self.assertEqual(repr(obj), 'Identifier("fo\'o")') + self.assertEqual(repr(obj), str(obj)) + + obj = sql.Identifier("fo'o", 'ba"r') + self.assertEqual(repr(obj), 'Identifier("fo\'o", \'ba"r\')') + self.assertEqual(repr(obj), str(obj)) + + def test_eq(self): + self.assert_(sql.Identifier('foo') == sql.Identifier('foo')) + self.assert_(sql.Identifier('foo', 'bar') == sql.Identifier('foo', 'bar')) + self.assert_(sql.Identifier('foo') != sql.Identifier('bar')) + self.assert_(sql.Identifier('foo') != 'foo') + self.assert_(sql.Identifier('foo') != sql.SQL('foo')) + + def test_as_str(self): + self.assertEqual( + sql.Identifier('foo').as_string(self.conn), '"foo"') + self.assertEqual( + sql.Identifier('foo', 'bar').as_string(self.conn), '"foo"."bar"') + self.assertEqual( + sql.Identifier("fo'o", 'ba"r').as_string(self.conn), '"fo\'o"."ba""r"') + + def test_join(self): + self.assert_(not hasattr(sql.Identifier('foo'), 'join')) + + +class LiteralTests(ConnectingTestCase): + def test_class(self): + self.assert_(issubclass(sql.Literal, sql.Composable)) + + def test_init(self): + self.assert_(isinstance(sql.Literal('foo'), sql.Literal)) + self.assert_(isinstance(sql.Literal('foo'), sql.Literal)) + self.assert_(isinstance(sql.Literal(b'foo'), sql.Literal)) + self.assert_(isinstance(sql.Literal(42), sql.Literal)) + self.assert_(isinstance( + sql.Literal(dt.date(2016, 12, 31)), sql.Literal)) + + def test_wrapped(self): + self.assertEqual(sql.Literal('foo').wrapped, 'foo') + + def test_repr(self): + self.assertEqual(repr(sql.Literal("foo")), "Literal('foo')") + self.assertEqual(str(sql.Literal("foo")), "Literal('foo')") + self.assertQuotedEqual(sql.Literal("foo").as_string(self.conn), "'foo'") + self.assertEqual(sql.Literal(42).as_string(self.conn), "42") + self.assertEqual( + sql.Literal(dt.date(2017, 1, 1)).as_string(self.conn), + "'2017-01-01'::date") + + def test_eq(self): + self.assert_(sql.Literal('foo') == sql.Literal('foo')) + self.assert_(sql.Literal('foo') != sql.Literal('bar')) + self.assert_(sql.Literal('foo') != 'foo') + self.assert_(sql.Literal('foo') != sql.SQL('foo')) + + def test_must_be_adaptable(self): + class Foo: + pass + + self.assertRaises(psycopg2.ProgrammingError, + sql.Literal(Foo()).as_string, self.conn) + + +class SQLTests(ConnectingTestCase): + def test_class(self): + self.assert_(issubclass(sql.SQL, sql.Composable)) + + def test_init(self): + self.assert_(isinstance(sql.SQL('foo'), sql.SQL)) + self.assert_(isinstance(sql.SQL('foo'), sql.SQL)) + self.assertRaises(TypeError, sql.SQL, 10) + self.assertRaises(TypeError, sql.SQL, dt.date(2016, 12, 31)) + + def test_string(self): + self.assertEqual(sql.SQL('foo').string, 'foo') + + def test_repr(self): + self.assertEqual(repr(sql.SQL("foo")), "SQL('foo')") + self.assertEqual(str(sql.SQL("foo")), "SQL('foo')") + self.assertEqual(sql.SQL("foo").as_string(self.conn), "foo") + + def test_eq(self): + self.assert_(sql.SQL('foo') == sql.SQL('foo')) + self.assert_(sql.SQL('foo') != sql.SQL('bar')) + self.assert_(sql.SQL('foo') != 'foo') + self.assert_(sql.SQL('foo') != sql.Literal('foo')) + + def test_sum(self): + obj = sql.SQL("foo") + sql.SQL("bar") + self.assert_(isinstance(obj, sql.Composed)) + self.assertEqual(obj.as_string(self.conn), "foobar") + + def test_sum_inplace(self): + obj = sql.SQL("foo") + obj += sql.SQL("bar") + self.assert_(isinstance(obj, sql.Composed)) + self.assertEqual(obj.as_string(self.conn), "foobar") + + def test_multiply(self): + obj = sql.SQL("foo") * 3 + self.assert_(isinstance(obj, sql.Composed)) + self.assertEqual(obj.as_string(self.conn), "foofoofoo") + + def test_join(self): + obj = sql.SQL(", ").join( + [sql.Identifier('foo'), sql.SQL('bar'), sql.Literal(42)]) + self.assert_(isinstance(obj, sql.Composed)) + self.assertEqual(obj.as_string(self.conn), '"foo", bar, 42') + + obj = sql.SQL(", ").join( + sql.Composed([sql.Identifier('foo'), sql.SQL('bar'), sql.Literal(42)])) + self.assert_(isinstance(obj, sql.Composed)) + self.assertEqual(obj.as_string(self.conn), '"foo", bar, 42') + + obj = sql.SQL(", ").join([]) + self.assertEqual(obj, sql.Composed([])) + + +class ComposedTest(ConnectingTestCase): + def test_class(self): + self.assert_(issubclass(sql.Composed, sql.Composable)) + + def test_repr(self): + obj = sql.Composed([sql.Literal("foo"), sql.Identifier("b'ar")]) + self.assertEqual(repr(obj), + """Composed([Literal('foo'), Identifier("b'ar")])""") + self.assertEqual(str(obj), repr(obj)) + + def test_seq(self): + l = [sql.SQL('foo'), sql.Literal('bar'), sql.Identifier('baz')] + self.assertEqual(sql.Composed(l).seq, l) + + def test_eq(self): + l = [sql.Literal("foo"), sql.Identifier("b'ar")] + l2 = [sql.Literal("foo"), sql.Literal("b'ar")] + self.assert_(sql.Composed(l) == sql.Composed(list(l))) + self.assert_(sql.Composed(l) != l) + self.assert_(sql.Composed(l) != sql.Composed(l2)) + + def test_join(self): + obj = sql.Composed([sql.Literal("foo"), sql.Identifier("b'ar")]) + obj = obj.join(", ") + self.assert_(isinstance(obj, sql.Composed)) + self.assertQuotedEqual(obj.as_string(self.conn), "'foo', \"b'ar\"") + + def test_sum(self): + obj = sql.Composed([sql.SQL("foo ")]) + obj = obj + sql.Literal("bar") + self.assert_(isinstance(obj, sql.Composed)) + self.assertQuotedEqual(obj.as_string(self.conn), "foo 'bar'") + + def test_sum_inplace(self): + obj = sql.Composed([sql.SQL("foo ")]) + obj += sql.Literal("bar") + self.assert_(isinstance(obj, sql.Composed)) + self.assertQuotedEqual(obj.as_string(self.conn), "foo 'bar'") + + obj = sql.Composed([sql.SQL("foo ")]) + obj += sql.Composed([sql.Literal("bar")]) + self.assert_(isinstance(obj, sql.Composed)) + self.assertQuotedEqual(obj.as_string(self.conn), "foo 'bar'") + + def test_iter(self): + obj = sql.Composed([sql.SQL("foo"), sql.SQL('bar')]) + it = iter(obj) + i = next(it) + self.assertEqual(i, sql.SQL('foo')) + i = next(it) + self.assertEqual(i, sql.SQL('bar')) + self.assertRaises(StopIteration, next, it) + + +class PlaceholderTest(ConnectingTestCase): + def test_class(self): + self.assert_(issubclass(sql.Placeholder, sql.Composable)) + + def test_name(self): + self.assertEqual(sql.Placeholder().name, None) + self.assertEqual(sql.Placeholder('foo').name, 'foo') + + def test_repr(self): + self.assert_(str(sql.Placeholder()), 'Placeholder()') + self.assert_(repr(sql.Placeholder()), 'Placeholder()') + self.assert_(sql.Placeholder().as_string(self.conn), '%s') + + def test_repr_name(self): + self.assert_(str(sql.Placeholder('foo')), "Placeholder('foo')") + self.assert_(repr(sql.Placeholder('foo')), "Placeholder('foo')") + self.assert_(sql.Placeholder('foo').as_string(self.conn), '%(foo)s') + + def test_bad_name(self): + self.assertRaises(ValueError, sql.Placeholder, ')') + + def test_eq(self): + self.assert_(sql.Placeholder('foo') == sql.Placeholder('foo')) + self.assert_(sql.Placeholder('foo') != sql.Placeholder('bar')) + self.assert_(sql.Placeholder('foo') != 'foo') + self.assert_(sql.Placeholder() == sql.Placeholder()) + self.assert_(sql.Placeholder('foo') != sql.Placeholder()) + self.assert_(sql.Placeholder('foo') != sql.Literal('foo')) + + +class ValuesTest(ConnectingTestCase): + def test_null(self): + self.assert_(isinstance(sql.NULL, sql.SQL)) + self.assertEqual(sql.NULL.as_string(self.conn), "NULL") + + def test_default(self): + self.assert_(isinstance(sql.DEFAULT, sql.SQL)) + self.assertEqual(sql.DEFAULT.as_string(self.conn), "DEFAULT") + + +def test_suite(): + return unittest.TestLoader().loadTestsFromName(__name__) + + +if __name__ == "__main__": + unittest.main() diff --git a/tests/test_transaction.py b/tests/test_transaction.py new file mode 100755 index 0000000000000000000000000000000000000000..315d63eab4fc6c0990dd648db34efe5d5feb6b82 --- /dev/null +++ b/tests/test_transaction.py @@ -0,0 +1,258 @@ +#!/usr/bin/env python + +# test_transaction - unit test on transaction behaviour +# +# Copyright (C) 2007-2019 Federico Di Gregorio +# Copyright (C) 2020-2021 The Psycopg Team +# +# psycopg2 is free software: you can redistribute it and/or modify it +# under the terms of the GNU Lesser General Public License as published +# by the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# In addition, as a special exception, the copyright holders give +# permission to link this program with the OpenSSL library (or with +# modified versions of OpenSSL that use the same license as OpenSSL), +# and distribute linked combinations including the two. +# +# You must obey the GNU Lesser General Public License in all respects for +# all of the code used other than OpenSSL. +# +# psycopg2 is distributed in the hope that it will be useful, but WITHOUT +# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or +# FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public +# License for more details. + +import threading +import unittest +from .testutils import ConnectingTestCase, skip_before_postgres, slow +from .testutils import skip_if_crdb + +import psycopg2 +from psycopg2.extensions import ( + ISOLATION_LEVEL_SERIALIZABLE, STATUS_BEGIN, STATUS_READY) + + +class TransactionTests(ConnectingTestCase): + + def setUp(self): + ConnectingTestCase.setUp(self) + skip_if_crdb("isolation level", self.conn) + self.conn.set_isolation_level(ISOLATION_LEVEL_SERIALIZABLE) + curs = self.conn.cursor() + curs.execute(''' + CREATE TEMPORARY TABLE table1 ( + id int PRIMARY KEY + )''') + # The constraint is set to deferrable for the commit_failed test + curs.execute(''' + CREATE TEMPORARY TABLE table2 ( + id int PRIMARY KEY, + table1_id int, + CONSTRAINT table2__table1_id__fk + FOREIGN KEY (table1_id) REFERENCES table1(id) DEFERRABLE)''') + curs.execute('INSERT INTO table1 VALUES (1)') + curs.execute('INSERT INTO table2 VALUES (1, 1)') + self.conn.commit() + + def test_rollback(self): + # Test that rollback undoes changes + curs = self.conn.cursor() + curs.execute('INSERT INTO table2 VALUES (2, 1)') + # Rollback takes us from BEGIN state to READY state + self.assertEqual(self.conn.status, STATUS_BEGIN) + self.conn.rollback() + self.assertEqual(self.conn.status, STATUS_READY) + curs.execute('SELECT id, table1_id FROM table2 WHERE id = 2') + self.assertEqual(curs.fetchall(), []) + + def test_commit(self): + # Test that commit stores changes + curs = self.conn.cursor() + curs.execute('INSERT INTO table2 VALUES (2, 1)') + # Rollback takes us from BEGIN state to READY state + self.assertEqual(self.conn.status, STATUS_BEGIN) + self.conn.commit() + self.assertEqual(self.conn.status, STATUS_READY) + # Now rollback and show that the new record is still there: + self.conn.rollback() + curs.execute('SELECT id, table1_id FROM table2 WHERE id = 2') + self.assertEqual(curs.fetchall(), [(2, 1)]) + + def test_failed_commit(self): + # Test that we can recover from a failed commit. + # We use a deferred constraint to cause a failure on commit. + curs = self.conn.cursor() + curs.execute('SET CONSTRAINTS table2__table1_id__fk DEFERRED') + curs.execute('INSERT INTO table2 VALUES (2, 42)') + # The commit should fail, and move the cursor back to READY state + self.assertEqual(self.conn.status, STATUS_BEGIN) + self.assertRaises(psycopg2.IntegrityError, self.conn.commit) + self.assertEqual(self.conn.status, STATUS_READY) + # The connection should be ready to use for the next transaction: + curs.execute('SELECT 1') + self.assertEqual(curs.fetchone()[0], 1) + + +class DeadlockSerializationTests(ConnectingTestCase): + """Test deadlock and serialization failure errors.""" + + def connect(self): + conn = ConnectingTestCase.connect(self) + conn.set_isolation_level(ISOLATION_LEVEL_SERIALIZABLE) + return conn + + def setUp(self): + ConnectingTestCase.setUp(self) + skip_if_crdb("isolation level", self.conn) + + curs = self.conn.cursor() + # Drop table if it already exists + try: + curs.execute("DROP TABLE table1") + self.conn.commit() + except psycopg2.DatabaseError: + self.conn.rollback() + try: + curs.execute("DROP TABLE table2") + self.conn.commit() + except psycopg2.DatabaseError: + self.conn.rollback() + # Create sample data + curs.execute(""" + CREATE TABLE table1 ( + id int PRIMARY KEY, + name text) + """) + curs.execute("INSERT INTO table1 VALUES (1, 'hello')") + curs.execute("CREATE TABLE table2 (id int PRIMARY KEY)") + self.conn.commit() + + def tearDown(self): + curs = self.conn.cursor() + curs.execute("DROP TABLE table1") + curs.execute("DROP TABLE table2") + self.conn.commit() + + ConnectingTestCase.tearDown(self) + + @slow + def test_deadlock(self): + self.thread1_error = self.thread2_error = None + step1 = threading.Event() + step2 = threading.Event() + + def task1(): + try: + conn = self.connect() + curs = conn.cursor() + curs.execute("LOCK table1 IN ACCESS EXCLUSIVE MODE") + step1.set() + step2.wait() + curs.execute("LOCK table2 IN ACCESS EXCLUSIVE MODE") + except psycopg2.DatabaseError as exc: + self.thread1_error = exc + step1.set() + conn.close() + + def task2(): + try: + conn = self.connect() + curs = conn.cursor() + step1.wait() + curs.execute("LOCK table2 IN ACCESS EXCLUSIVE MODE") + step2.set() + curs.execute("LOCK table1 IN ACCESS EXCLUSIVE MODE") + except psycopg2.DatabaseError as exc: + self.thread2_error = exc + step2.set() + conn.close() + + # Run the threads in parallel. The "step1" and "step2" events + # ensure that the two transactions overlap. + thread1 = threading.Thread(target=task1) + thread2 = threading.Thread(target=task2) + thread1.start() + thread2.start() + thread1.join() + thread2.join() + + # Exactly one of the threads should have failed with + # TransactionRollbackError: + self.assertFalse(self.thread1_error and self.thread2_error) + error = self.thread1_error or self.thread2_error + self.assertTrue(isinstance( + error, psycopg2.extensions.TransactionRollbackError)) + + @slow + def test_serialisation_failure(self): + self.thread1_error = self.thread2_error = None + step1 = threading.Event() + step2 = threading.Event() + + def task1(): + try: + conn = self.connect() + curs = conn.cursor() + curs.execute("SELECT name FROM table1 WHERE id = 1") + curs.fetchall() + step1.set() + step2.wait() + curs.execute("UPDATE table1 SET name='task1' WHERE id = 1") + conn.commit() + except psycopg2.DatabaseError as exc: + self.thread1_error = exc + step1.set() + conn.close() + + def task2(): + try: + conn = self.connect() + curs = conn.cursor() + step1.wait() + curs.execute("UPDATE table1 SET name='task2' WHERE id = 1") + conn.commit() + except psycopg2.DatabaseError as exc: + self.thread2_error = exc + step2.set() + conn.close() + + # Run the threads in parallel. The "step1" and "step2" events + # ensure that the two transactions overlap. + thread1 = threading.Thread(target=task1) + thread2 = threading.Thread(target=task2) + thread1.start() + thread2.start() + thread1.join() + thread2.join() + + # Exactly one of the threads should have failed with + # TransactionRollbackError: + self.assertFalse(self.thread1_error and self.thread2_error) + error = self.thread1_error or self.thread2_error + self.assertTrue(isinstance( + error, psycopg2.extensions.TransactionRollbackError)) + + +class QueryCancellationTests(ConnectingTestCase): + """Tests for query cancellation.""" + + def setUp(self): + ConnectingTestCase.setUp(self) + self.conn.set_isolation_level(ISOLATION_LEVEL_SERIALIZABLE) + + @skip_before_postgres(8, 2) + def test_statement_timeout(self): + curs = self.conn.cursor() + # Set a low statement timeout, then sleep for a longer period. + curs.execute('SET statement_timeout TO 10') + self.assertRaises(psycopg2.extensions.QueryCanceledError, + curs.execute, 'SELECT pg_sleep(50)') + + +def test_suite(): + return unittest.TestLoader().loadTestsFromName(__name__) + + +if __name__ == "__main__": + unittest.main() diff --git a/tests/test_types_basic.py b/tests/test_types_basic.py new file mode 100755 index 0000000000000000000000000000000000000000..d21309ef2ec6698ff6d64e756e27c7efd759284b --- /dev/null +++ b/tests/test_types_basic.py @@ -0,0 +1,492 @@ +#!/usr/bin/env python +# +# types_basic.py - tests for basic types conversions +# +# Copyright (C) 2004-2019 Federico Di Gregorio +# Copyright (C) 2020-2021 The Psycopg Team +# +# psycopg2 is free software: you can redistribute it and/or modify it +# under the terms of the GNU Lesser General Public License as published +# by the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# In addition, as a special exception, the copyright holders give +# permission to link this program with the OpenSSL library (or with +# modified versions of OpenSSL that use the same license as OpenSSL), +# and distribute linked combinations including the two. +# +# You must obey the GNU Lesser General Public License in all respects for +# all of the code used other than OpenSSL. +# +# psycopg2 is distributed in the hope that it will be useful, but WITHOUT +# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or +# FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public +# License for more details. + +import string +import ctypes +import decimal +import datetime +import platform + +from . import testutils +import unittest +from .testutils import ConnectingTestCase, restore_types +from .testutils import skip_if_crdb + +import psycopg2 +from psycopg2.extensions import AsIs, adapt, register_adapter + + +class TypesBasicTests(ConnectingTestCase): + """Test that all type conversions are working.""" + + def execute(self, *args): + curs = self.conn.cursor() + curs.execute(*args) + return curs.fetchone()[0] + + def testQuoting(self): + s = "Quote'this\\! ''ok?''" + self.failUnless(self.execute("SELECT %s AS foo", (s,)) == s, + "wrong quoting: " + s) + + def testUnicode(self): + s = "Quote'this\\! ''ok?''" + self.failUnless(self.execute("SELECT %s AS foo", (s,)) == s, + "wrong unicode quoting: " + s) + + def testNumber(self): + s = self.execute("SELECT %s AS foo", (1971,)) + self.failUnless(s == 1971, "wrong integer quoting: " + str(s)) + + def testBoolean(self): + x = self.execute("SELECT %s as foo", (False,)) + self.assert_(x is False) + x = self.execute("SELECT %s as foo", (True,)) + self.assert_(x is True) + + def testDecimal(self): + s = self.execute("SELECT %s AS foo", (decimal.Decimal("19.10"),)) + self.failUnless(s - decimal.Decimal("19.10") == 0, + "wrong decimal quoting: " + str(s)) + s = self.execute("SELECT %s AS foo", (decimal.Decimal("NaN"),)) + self.failUnless(str(s) == "NaN", "wrong decimal quoting: " + str(s)) + self.failUnless(type(s) == decimal.Decimal, + "wrong decimal conversion: " + repr(s)) + s = self.execute("SELECT %s AS foo", (decimal.Decimal("infinity"),)) + self.failUnless(str(s) == "NaN", "wrong decimal quoting: " + str(s)) + self.failUnless(type(s) == decimal.Decimal, + "wrong decimal conversion: " + repr(s)) + s = self.execute("SELECT %s AS foo", (decimal.Decimal("-infinity"),)) + self.failUnless(str(s) == "NaN", "wrong decimal quoting: " + str(s)) + self.failUnless(type(s) == decimal.Decimal, + "wrong decimal conversion: " + repr(s)) + + def testFloatNan(self): + try: + float("nan") + except ValueError: + return self.skipTest("nan not available on this platform") + + s = self.execute("SELECT %s AS foo", (float("nan"),)) + self.failUnless(str(s) == "nan", "wrong float quoting: " + str(s)) + self.failUnless(type(s) == float, "wrong float conversion: " + repr(s)) + + def testFloatInf(self): + try: + self.execute("select 'inf'::float") + except psycopg2.DataError: + return self.skipTest("inf::float not available on the server") + except ValueError: + return self.skipTest("inf not available on this platform") + s = self.execute("SELECT %s AS foo", (float("inf"),)) + self.failUnless(str(s) == "inf", "wrong float quoting: " + str(s)) + self.failUnless(type(s) == float, "wrong float conversion: " + repr(s)) + + s = self.execute("SELECT %s AS foo", (float("-inf"),)) + self.failUnless(str(s) == "-inf", "wrong float quoting: " + str(s)) + + def testBinary(self): + s = bytes(range(256)) + b = psycopg2.Binary(s) + buf = self.execute("SELECT %s::bytea AS foo", (b,)) + self.assertEqual(s, buf.tobytes()) + + def testBinaryNone(self): + b = psycopg2.Binary(None) + buf = self.execute("SELECT %s::bytea AS foo", (b,)) + self.assertEqual(buf, None) + + def testBinaryEmptyString(self): + # test to make sure an empty Binary is converted to an empty string + b = psycopg2.Binary(bytes([])) + self.assertEqual(str(b), "''::bytea") + + def testBinaryRoundTrip(self): + # test to make sure buffers returned by psycopg2 are + # understood by execute: + s = bytes(range(256)) + buf = self.execute("SELECT %s::bytea AS foo", (psycopg2.Binary(s),)) + buf2 = self.execute("SELECT %s::bytea AS foo", (buf,)) + self.assertEqual(s, buf2.tobytes()) + + @skip_if_crdb("nested array") + def testArray(self): + s = self.execute("SELECT %s AS foo", ([[1, 2], [3, 4]],)) + self.failUnlessEqual(s, [[1, 2], [3, 4]]) + s = self.execute("SELECT %s AS foo", (['one', 'two', 'three'],)) + self.failUnlessEqual(s, ['one', 'two', 'three']) + + @skip_if_crdb("nested array") + def testEmptyArrayRegression(self): + # ticket #42 + curs = self.conn.cursor() + curs.execute( + "create table array_test " + "(id integer, col timestamp without time zone[])") + + curs.execute("insert into array_test values (%s, %s)", + (1, [datetime.date(2011, 2, 14)])) + curs.execute("select col from array_test where id = 1") + self.assertEqual(curs.fetchone()[0], [datetime.datetime(2011, 2, 14, 0, 0)]) + + curs.execute("insert into array_test values (%s, %s)", (2, [])) + curs.execute("select col from array_test where id = 2") + self.assertEqual(curs.fetchone()[0], []) + + @skip_if_crdb("nested array") + @testutils.skip_before_postgres(8, 4) + def testNestedEmptyArray(self): + # issue #788 + curs = self.conn.cursor() + curs.execute("select 10 = any(%s::int[])", ([[]], )) + self.assertFalse(curs.fetchone()[0]) + + def testEmptyArrayNoCast(self): + s = self.execute("SELECT '{}' AS foo") + self.assertEqual(s, '{}') + s = self.execute("SELECT %s AS foo", ([],)) + self.assertEqual(s, '{}') + + def testEmptyArray(self): + s = self.execute("SELECT '{}'::text[] AS foo") + self.failUnlessEqual(s, []) + s = self.execute("SELECT 1 != ALL(%s)", ([],)) + self.failUnlessEqual(s, True) + # but don't break the strings :) + s = self.execute("SELECT '{}'::text AS foo") + self.failUnlessEqual(s, "{}") + + def testArrayEscape(self): + ss = ['', '\\', '"', '\\\\', '\\"'] + for s in ss: + r = self.execute("SELECT %s AS foo", (s,)) + self.failUnlessEqual(s, r) + r = self.execute("SELECT %s AS foo", ([s],)) + self.failUnlessEqual([s], r) + + r = self.execute("SELECT %s AS foo", (ss,)) + self.failUnlessEqual(ss, r) + + def testArrayMalformed(self): + curs = self.conn.cursor() + ss = ['', '{', '{}}', '{' * 20 + '}' * 20] + for s in ss: + self.assertRaises(psycopg2.DataError, + psycopg2.extensions.STRINGARRAY, s.encode('utf8'), curs) + + def testTextArray(self): + curs = self.conn.cursor() + curs.execute("select '{a,b,c}'::text[]") + x = curs.fetchone()[0] + self.assert_(isinstance(x[0], str)) + self.assertEqual(x, ['a', 'b', 'c']) + + def testUnicodeArray(self): + psycopg2.extensions.register_type( + psycopg2.extensions.UNICODEARRAY, self.conn) + curs = self.conn.cursor() + curs.execute("select '{a,b,c}'::text[]") + x = curs.fetchone()[0] + self.assert_(isinstance(x[0], str)) + self.assertEqual(x, ['a', 'b', 'c']) + + def testBytesArray(self): + psycopg2.extensions.register_type( + psycopg2.extensions.BYTESARRAY, self.conn) + curs = self.conn.cursor() + curs.execute("select '{a,b,c}'::text[]") + x = curs.fetchone()[0] + self.assert_(isinstance(x[0], bytes)) + self.assertEqual(x, [b'a', b'b', b'c']) + + @skip_if_crdb("nested array") + @testutils.skip_before_postgres(8, 2) + def testArrayOfNulls(self): + curs = self.conn.cursor() + curs.execute(""" + create table na ( + texta text[], + inta int[], + boola boolean[], + + textaa text[][], + intaa int[][], + boolaa boolean[][] + )""") + + curs.execute("insert into na (texta) values (%s)", ([None],)) + curs.execute("insert into na (texta) values (%s)", (['a', None],)) + curs.execute("insert into na (texta) values (%s)", ([None, None],)) + curs.execute("insert into na (inta) values (%s)", ([None],)) + curs.execute("insert into na (inta) values (%s)", ([42, None],)) + curs.execute("insert into na (inta) values (%s)", ([None, None],)) + curs.execute("insert into na (boola) values (%s)", ([None],)) + curs.execute("insert into na (boola) values (%s)", ([True, None],)) + curs.execute("insert into na (boola) values (%s)", ([None, None],)) + + curs.execute("insert into na (textaa) values (%s)", ([[None]],)) + curs.execute("insert into na (textaa) values (%s)", ([['a', None]],)) + curs.execute("insert into na (textaa) values (%s)", ([[None, None]],)) + + curs.execute("insert into na (intaa) values (%s)", ([[None]],)) + curs.execute("insert into na (intaa) values (%s)", ([[42, None]],)) + curs.execute("insert into na (intaa) values (%s)", ([[None, None]],)) + + curs.execute("insert into na (boolaa) values (%s)", ([[None]],)) + curs.execute("insert into na (boolaa) values (%s)", ([[True, None]],)) + curs.execute("insert into na (boolaa) values (%s)", ([[None, None]],)) + + @skip_if_crdb("nested array") + @testutils.skip_before_postgres(8, 2) + def testNestedArrays(self): + curs = self.conn.cursor() + for a in [ + [[1]], + [[None]], + [[None, None, None]], + [[None, None], [1, None]], + [[None, None], [None, None]], + [[[None, None], [None, None]]], + ]: + curs.execute("select %s::int[]", (a,)) + self.assertEqual(curs.fetchone()[0], a) + + def testTypeRoundtripBytes(self): + o1 = bytes(range(256)) + o2 = self.execute("select %s;", (o1,)) + self.assertEqual(memoryview, type(o2)) + + # Test with an empty buffer + o1 = bytes([]) + o2 = self.execute("select %s;", (o1,)) + self.assertEqual(memoryview, type(o2)) + + def testTypeRoundtripBytesArray(self): + o1 = bytes(range(256)) + o1 = [o1] + o2 = self.execute("select %s;", (o1,)) + self.assertEqual(memoryview, type(o2[0])) + + def testAdaptBytearray(self): + o1 = bytearray(range(256)) + o2 = self.execute("select %s;", (o1,)) + self.assertEqual(memoryview, type(o2)) + self.assertEqual(len(o1), len(o2)) + for c1, c2 in zip(o1, o2): + self.assertEqual(c1, ord(c2)) + + # Test with an empty buffer + o1 = bytearray([]) + o2 = self.execute("select %s;", (o1,)) + self.assertEqual(len(o2), 0) + self.assertEqual(memoryview, type(o2)) + + def testAdaptMemoryview(self): + o1 = memoryview(bytearray(range(256))) + o2 = self.execute("select %s;", (o1,)) + self.assertEqual(memoryview, type(o2)) + + # Test with an empty buffer + o1 = memoryview(bytearray([])) + o2 = self.execute("select %s;", (o1,)) + self.assertEqual(memoryview, type(o2)) + + def testByteaHexCheckFalsePositive(self): + # the check \x -> x to detect bad bytea decode + # may be fooled if the first char is really an 'x' + o1 = psycopg2.Binary(b'x') + o2 = self.execute("SELECT %s::bytea AS foo", (o1,)) + self.assertEqual(b'x', o2[0]) + + def testNegNumber(self): + d1 = self.execute("select -%s;", (decimal.Decimal('-1.0'),)) + self.assertEqual(1, d1) + f1 = self.execute("select -%s;", (-1.0,)) + self.assertEqual(1, f1) + i1 = self.execute("select -%s;", (-1,)) + self.assertEqual(1, i1) + + def testGenericArray(self): + a = self.execute("select '{1, 2, 3}'::int4[]") + self.assertEqual(a, [1, 2, 3]) + a = self.execute("select array['a', 'b', '''']::text[]") + self.assertEqual(a, ['a', 'b', "'"]) + + @testutils.skip_before_postgres(8, 2) + def testGenericArrayNull(self): + def caster(s, cur): + if s is None: + return "nada" + return int(s) * 2 + base = psycopg2.extensions.new_type((23,), "INT4", caster) + array = psycopg2.extensions.new_array_type((1007,), "INT4ARRAY", base) + + psycopg2.extensions.register_type(array, self.conn) + a = self.execute("select '{1, 2, 3}'::int4[]") + self.assertEqual(a, [2, 4, 6]) + a = self.execute("select '{1, 2, NULL}'::int4[]") + self.assertEqual(a, [2, 4, 'nada']) + + @skip_if_crdb("cidr") + @testutils.skip_before_postgres(8, 2) + def testNetworkArray(self): + # we don't know these types, but we know their arrays + a = self.execute("select '{192.168.0.1/24}'::inet[]") + self.assertEqual(a, ['192.168.0.1/24']) + a = self.execute("select '{192.168.0.0/24}'::cidr[]") + self.assertEqual(a, ['192.168.0.0/24']) + a = self.execute("select '{10:20:30:40:50:60}'::macaddr[]") + self.assertEqual(a, ['10:20:30:40:50:60']) + + def testIntEnum(self): + from enum import IntEnum + + class Color(IntEnum): + RED = 1 + GREEN = 2 + BLUE = 4 + + a = self.execute("select %s", (Color.GREEN,)) + self.assertEqual(a, Color.GREEN) + + +class AdaptSubclassTest(unittest.TestCase): + def test_adapt_subtype(self): + class Sub(str): + pass + s1 = "hel'lo" + s2 = Sub(s1) + self.assertEqual(adapt(s1).getquoted(), adapt(s2).getquoted()) + + @restore_types + def test_adapt_most_specific(self): + class A: + pass + + class B(A): + pass + + class C(B): + pass + + register_adapter(A, lambda a: AsIs("a")) + register_adapter(B, lambda b: AsIs("b")) + self.assertEqual(b'b', adapt(C()).getquoted()) + + @restore_types + def test_adapt_subtype_3(self): + class A: + pass + + class B(A): + pass + + register_adapter(A, lambda a: AsIs("a")) + self.assertEqual(b"a", adapt(B()).getquoted()) + + def test_conform_subclass_precedence(self): + class foo(tuple): + def __conform__(self, proto): + return self + + def getquoted(self): + return 'bar' + + self.assertEqual(adapt(foo((1, 2, 3))).getquoted(), 'bar') + + +@unittest.skipIf( + platform.system() == 'Windows', + "Not testing because we are useless with ctypes on Windows") +class ByteaParserTest(unittest.TestCase): + """Unit test for our bytea format parser.""" + def setUp(self): + self._cast = self._import_cast() + + def _import_cast(self): + """Use ctypes to access the C function.""" + lib = ctypes.pydll.LoadLibrary(psycopg2._psycopg.__file__) + cast = lib.typecast_BINARY_cast + cast.argtypes = [ctypes.c_char_p, ctypes.c_size_t, ctypes.py_object] + cast.restype = ctypes.py_object + return cast + + def cast(self, buffer): + """Cast a buffer from the output format""" + l = buffer and len(buffer) or 0 + rv = self._cast(buffer, l, None) + + if rv is None: + return None + + return rv.tobytes() + + def test_null(self): + rv = self.cast(None) + self.assertEqual(rv, None) + + def test_blank(self): + rv = self.cast(b'') + self.assertEqual(rv, b'') + + def test_blank_hex(self): + # Reported as problematic in ticket #48 + rv = self.cast(b'\\x') + self.assertEqual(rv, b'') + + def test_full_hex(self, upper=False): + buf = ''.join(("%02x" % i) for i in range(256)) + if upper: + buf = buf.upper() + buf = '\\x' + buf + rv = self.cast(buf.encode('utf8')) + self.assertEqual(rv, bytes(range(256))) + + def test_full_hex_upper(self): + return self.test_full_hex(upper=True) + + def test_full_escaped_octal(self): + buf = ''.join(("\\%03o" % i) for i in range(256)) + rv = self.cast(buf.encode('utf8')) + self.assertEqual(rv, bytes(range(256))) + + def test_escaped_mixed(self): + buf = ''.join(("\\%03o" % i) for i in range(32)) + buf += string.ascii_letters + buf += ''.join('\\' + c for c in string.ascii_letters) + buf += '\\\\' + rv = self.cast(buf.encode('utf8')) + tgt = bytes(range(32)) + \ + (string.ascii_letters * 2 + '\\').encode('ascii') + + self.assertEqual(rv, tgt) + + +def test_suite(): + return unittest.TestLoader().loadTestsFromName(__name__) + + +if __name__ == "__main__": + unittest.main() diff --git a/tests/test_types_extras.py b/tests/test_types_extras.py new file mode 100755 index 0000000000000000000000000000000000000000..77f899850adad5b2249de663dc325a924d5f1b9f --- /dev/null +++ b/tests/test_types_extras.py @@ -0,0 +1,1597 @@ +#!/usr/bin/env python +# +# types_extras.py - tests for extras types conversions +# +# Copyright (C) 2008-2019 Federico Di Gregorio +# Copyright (C) 2020-2021 The Psycopg Team +# +# psycopg2 is free software: you can redistribute it and/or modify it +# under the terms of the GNU Lesser General Public License as published +# by the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# psycopg2 is distributed in the hope that it will be useful, but WITHOUT +# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or +# FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public +# License for more details. + +import re +import json +import uuid +import warnings +from decimal import Decimal +from datetime import date, datetime, timedelta, timezone +from functools import wraps +from pickle import dumps, loads + +import unittest +from .testutils import (skip_if_no_uuid, skip_before_postgres, + ConnectingTestCase, raises_typeerror, slow, + restore_types, skip_if_crdb, crdb_version) + +import psycopg2 +import psycopg2.extras +import psycopg2.extensions as ext +from psycopg2._json import _get_json_oids +from psycopg2.extras import ( + CompositeCaster, DateRange, DateTimeRange, DateTimeTZRange, HstoreAdapter, + Inet, Json, NumericRange, Range, RealDictConnection, + register_composite, register_hstore, register_range, +) + + +class TypesExtrasTests(ConnectingTestCase): + """Test that all type conversions are working.""" + + def execute(self, *args): + curs = self.conn.cursor() + curs.execute(*args) + return curs.fetchone()[0] + + @skip_if_no_uuid + def testUUID(self): + psycopg2.extras.register_uuid() + u = uuid.UUID('9c6d5a77-7256-457e-9461-347b4358e350') + s = self.execute("SELECT %s AS foo", (u,)) + self.failUnless(u == s) + # must survive NULL cast to a uuid + s = self.execute("SELECT NULL::uuid AS foo") + self.failUnless(s is None) + + @skip_if_no_uuid + def testUUIDARRAY(self): + psycopg2.extras.register_uuid() + u = [uuid.UUID('9c6d5a77-7256-457e-9461-347b4358e350'), + uuid.UUID('9c6d5a77-7256-457e-9461-347b4358e352')] + s = self.execute("SELECT %s AS foo", (u,)) + self.failUnless(u == s) + # array with a NULL element + u = [uuid.UUID('9c6d5a77-7256-457e-9461-347b4358e350'), None] + s = self.execute("SELECT %s AS foo", (u,)) + self.failUnless(u == s) + # must survive NULL cast to a uuid[] + s = self.execute("SELECT NULL::uuid[] AS foo") + self.failUnless(s is None) + # what about empty arrays? + s = self.execute("SELECT '{}'::uuid[] AS foo") + self.failUnless(type(s) == list and len(s) == 0) + + @restore_types + def testINET(self): + with warnings.catch_warnings(): + warnings.simplefilter('ignore', DeprecationWarning) + psycopg2.extras.register_inet() + + i = psycopg2.extras.Inet("192.168.1.0/24") + s = self.execute("SELECT %s AS foo", (i,)) + self.failUnless(i.addr == s.addr) + # must survive NULL cast to inet + s = self.execute("SELECT NULL::inet AS foo") + self.failUnless(s is None) + + @restore_types + def testINETARRAY(self): + with warnings.catch_warnings(): + warnings.simplefilter('ignore', DeprecationWarning) + psycopg2.extras.register_inet() + + i = psycopg2.extras.Inet("192.168.1.0/24") + s = self.execute("SELECT %s AS foo", ([i],)) + self.failUnless(i.addr == s[0].addr) + # must survive NULL cast to inet + s = self.execute("SELECT NULL::inet[] AS foo") + self.failUnless(s is None) + + def test_inet_conform(self): + i = Inet("192.168.1.0/24") + a = psycopg2.extensions.adapt(i) + a.prepare(self.conn) + self.assertQuotedEqual(a.getquoted(), b"'192.168.1.0/24'::inet") + + # adapts ok with unicode too + i = Inet("192.168.1.0/24") + a = psycopg2.extensions.adapt(i) + a.prepare(self.conn) + self.assertQuotedEqual(a.getquoted(), b"'192.168.1.0/24'::inet") + + def test_adapt_fail(self): + class Foo: + pass + self.assertRaises(psycopg2.ProgrammingError, + psycopg2.extensions.adapt, Foo(), ext.ISQLQuote, None) + try: + psycopg2.extensions.adapt(Foo(), ext.ISQLQuote, None) + except psycopg2.ProgrammingError as err: + self.failUnless(str(err) == "can't adapt type 'Foo'") + + def test_point_array(self): + # make sure a point array is never casted to a float array, + # see https://github.com/psycopg/psycopg2/issues/613 + s = self.execute("""SELECT '{"(1,2)","(3,4)"}' AS foo""") + self.failUnless(s == """{"(1,2)","(3,4)"}""") + + +def skip_if_no_hstore(f): + @wraps(f) + @skip_if_crdb("hstore") + def skip_if_no_hstore_(self): + oids = HstoreAdapter.get_oids(self.conn) + if oids is None or not oids[0]: + return self.skipTest("hstore not available in test database") + return f(self) + + return skip_if_no_hstore_ + + +class HstoreTestCase(ConnectingTestCase): + def test_adapt_8(self): + if self.conn.info.server_version >= 90000: + return self.skipTest("skipping dict adaptation with PG pre-9 syntax") + + o = {'a': '1', 'b': "'", 'c': None} + if self.conn.encoding == 'UTF8': + o['d'] = '\xe0' + + a = HstoreAdapter(o) + a.prepare(self.conn) + q = a.getquoted() + + self.assert_(q.startswith(b"(("), q) + ii = q[1:-1].split(b"||") + ii.sort() + + self.assertEqual(len(ii), len(o)) + self.assertQuotedEqual(ii[0], b"('a' => '1')") + self.assertQuotedEqual(ii[1], b"('b' => '''')") + self.assertQuotedEqual(ii[2], b"('c' => NULL)") + if 'd' in o: + encc = '\xe0'.encode(psycopg2.extensions.encodings[self.conn.encoding]) + self.assertQuotedEqual(ii[3], b"('d' => '" + encc + b"')") + + def test_adapt_9(self): + if self.conn.info.server_version < 90000: + return self.skipTest("skipping dict adaptation with PG 9 syntax") + + o = {'a': '1', 'b': "'", 'c': None} + if self.conn.encoding == 'UTF8': + o['d'] = '\xe0' + + a = HstoreAdapter(o) + a.prepare(self.conn) + q = a.getquoted() + + m = re.match(br'hstore\(ARRAY\[([^\]]+)\], ARRAY\[([^\]]+)\]\)', q) + self.assert_(m, repr(q)) + + kk = m.group(1).split(b",") + vv = m.group(2).split(b",") + ii = list(zip(kk, vv)) + ii.sort() + + self.assertEqual(len(ii), len(o)) + self.assertQuotedEqual(ii[0][0], b"'a'") + self.assertQuotedEqual(ii[0][1], b"'1'") + self.assertQuotedEqual(ii[1][0], b"'b'") + self.assertQuotedEqual(ii[1][1], b"''''") + self.assertQuotedEqual(ii[2][0], b"'c'") + self.assertQuotedEqual(ii[2][1], b"NULL") + if 'd' in o: + encc = '\xe0'.encode(psycopg2.extensions.encodings[self.conn.encoding]) + self.assertQuotedEqual(ii[3][0], b"'d'") + self.assertQuotedEqual(ii[3][1], b"'" + encc + b"'") + + def test_parse(self): + def ok(s, d): + self.assertEqual(HstoreAdapter.parse(s, None), d) + + ok(None, None) + ok('', {}) + ok('"a"=>"1", "b"=>"2"', {'a': '1', 'b': '2'}) + ok('"a" => "1" , "b" => "2"', {'a': '1', 'b': '2'}) + ok('"a"=>NULL, "b"=>"2"', {'a': None, 'b': '2'}) + ok(r'"a"=>"\"", "\""=>"2"', {'a': '"', '"': '2'}) + ok('"a"=>"\'", "\'"=>"2"', {'a': "'", "'": '2'}) + ok('"a"=>"1", "b"=>NULL', {'a': '1', 'b': None}) + ok(r'"a\\"=>"1"', {'a\\': '1'}) + ok(r'"a\""=>"1"', {'a"': '1'}) + ok(r'"a\\\""=>"1"', {r'a\"': '1'}) + ok(r'"a\\\\\""=>"1"', {r'a\\"': '1'}) + + def ko(s): + self.assertRaises(psycopg2.InterfaceError, + HstoreAdapter.parse, s, None) + + ko('a') + ko('"a"') + ko(r'"a\\""=>"1"') + ko(r'"a\\\\""=>"1"') + ko('"a=>"1"') + ko('"a"=>"1", "b"=>NUL') + + @skip_if_no_hstore + def test_register_conn(self): + register_hstore(self.conn) + cur = self.conn.cursor() + cur.execute("select null::hstore, ''::hstore, 'a => b'::hstore") + t = cur.fetchone() + self.assert_(t[0] is None) + self.assertEqual(t[1], {}) + self.assertEqual(t[2], {'a': 'b'}) + + @skip_if_no_hstore + def test_register_curs(self): + cur = self.conn.cursor() + register_hstore(cur) + cur.execute("select null::hstore, ''::hstore, 'a => b'::hstore") + t = cur.fetchone() + self.assert_(t[0] is None) + self.assertEqual(t[1], {}) + self.assertEqual(t[2], {'a': 'b'}) + + @skip_if_no_hstore + @restore_types + def test_register_globally(self): + HstoreAdapter.get_oids(self.conn) + register_hstore(self.conn, globally=True) + conn2 = self.connect() + try: + cur2 = self.conn.cursor() + cur2.execute("select 'a => b'::hstore") + r = cur2.fetchone() + self.assert_(isinstance(r[0], dict)) + finally: + conn2.close() + + @skip_if_no_hstore + def test_roundtrip(self): + register_hstore(self.conn) + cur = self.conn.cursor() + + def ok(d): + cur.execute("select %s", (d,)) + d1 = cur.fetchone()[0] + self.assertEqual(len(d), len(d1)) + for k in d: + self.assert_(k in d1, k) + self.assertEqual(d[k], d1[k]) + + ok({}) + ok({'a': 'b', 'c': None}) + + ab = list(map(chr, range(32, 128))) + ok(dict(zip(ab, ab))) + ok({''.join(ab): ''.join(ab)}) + + self.conn.set_client_encoding('latin1') + ab = bytes(list(range(32, 127)) + list(range(160, 255))).decode('latin1') + + ok({''.join(ab): ''.join(ab)}) + ok(dict(zip(ab, ab))) + + @skip_if_no_hstore + @restore_types + def test_oid(self): + cur = self.conn.cursor() + cur.execute("select 'hstore'::regtype::oid") + oid = cur.fetchone()[0] + + # Note: None as conn_or_cursor is just for testing: not public + # interface and it may break in future. + register_hstore(None, globally=True, oid=oid) + cur.execute("select null::hstore, ''::hstore, 'a => b'::hstore") + t = cur.fetchone() + self.assert_(t[0] is None) + self.assertEqual(t[1], {}) + self.assertEqual(t[2], {'a': 'b'}) + + @skip_if_no_hstore + @skip_before_postgres(8, 3) + def test_roundtrip_array(self): + register_hstore(self.conn) + + ds = [{}, {'a': 'b', 'c': None}] + + ab = list(map(chr, range(32, 128))) + ds.append(dict(zip(ab, ab))) + ds.append({''.join(ab): ''.join(ab)}) + + self.conn.set_client_encoding('latin1') + ab = bytes(list(range(32, 127)) + list(range(160, 255))).decode('latin1') + + ds.append({''.join(ab): ''.join(ab)}) + ds.append(dict(zip(ab, ab))) + + cur = self.conn.cursor() + cur.execute("select %s", (ds,)) + ds1 = cur.fetchone()[0] + self.assertEqual(ds, ds1) + + @skip_if_no_hstore + @skip_before_postgres(8, 3) + def test_array_cast(self): + register_hstore(self.conn) + cur = self.conn.cursor() + cur.execute("select array['a=>1'::hstore, 'b=>2'::hstore];") + a = cur.fetchone()[0] + self.assertEqual(a, [{'a': '1'}, {'b': '2'}]) + + @skip_if_no_hstore + @restore_types + def test_array_cast_oid(self): + cur = self.conn.cursor() + cur.execute("select 'hstore'::regtype::oid, 'hstore[]'::regtype::oid") + oid, aoid = cur.fetchone() + + register_hstore(None, globally=True, oid=oid, array_oid=aoid) + cur.execute(""" + select null::hstore, ''::hstore, + 'a => b'::hstore, '{a=>b}'::hstore[]""") + t = cur.fetchone() + self.assert_(t[0] is None) + self.assertEqual(t[1], {}) + self.assertEqual(t[2], {'a': 'b'}) + self.assertEqual(t[3], [{'a': 'b'}]) + + @skip_if_no_hstore + def test_non_dbapi_connection(self): + conn = self.connect(connection_factory=RealDictConnection) + try: + register_hstore(conn) + curs = conn.cursor() + curs.execute("select ''::hstore as x") + self.assertEqual(curs.fetchone()['x'], {}) + finally: + conn.close() + + conn = self.connect(connection_factory=RealDictConnection) + try: + curs = conn.cursor() + register_hstore(curs) + curs.execute("select ''::hstore as x") + self.assertEqual(curs.fetchone()['x'], {}) + finally: + conn.close() + + +def skip_if_no_composite(f): + @wraps(f) + @skip_if_crdb("composite") + def skip_if_no_composite_(self): + if self.conn.info.server_version < 80000: + return self.skipTest( + "server version %s doesn't support composite types" + % self.conn.info.server_version) + + return f(self) + + return skip_if_no_composite_ + + +class AdaptTypeTestCase(ConnectingTestCase): + @skip_if_no_composite + def test_none_in_record(self): + curs = self.conn.cursor() + s = curs.mogrify("SELECT %s;", [(42, None)]) + self.assertEqual(b"SELECT (42, NULL);", s) + curs.execute("SELECT %s;", [(42, None)]) + d = curs.fetchone()[0] + self.assertEqual("(42,)", d) + + def test_none_fast_path(self): + # the None adapter is not actually invoked in regular adaptation + + class WonkyAdapter: + def __init__(self, obj): + pass + + def getquoted(self): + return "NOPE!" + + curs = self.conn.cursor() + + orig_adapter = ext.adapters[type(None), ext.ISQLQuote] + try: + ext.register_adapter(type(None), WonkyAdapter) + self.assertEqual(ext.adapt(None).getquoted(), "NOPE!") + + s = curs.mogrify("SELECT %s;", (None,)) + self.assertEqual(b"SELECT NULL;", s) + + finally: + ext.register_adapter(type(None), orig_adapter) + + def test_tokenization(self): + def ok(s, v): + self.assertEqual(CompositeCaster.tokenize(s), v) + + ok("(,)", [None, None]) + ok('(,"")', [None, '']) + ok('(hello,,10.234,2010-11-11)', ['hello', None, '10.234', '2010-11-11']) + ok('(10,"""")', ['10', '"']) + ok('(10,",")', ['10', ',']) + ok(r'(10,"\\")', ['10', '\\']) + ok(r'''(10,"\\',""")''', ['10', '''\\',"''']) + ok('(10,"(20,""(30,40)"")")', ['10', '(20,"(30,40)")']) + ok('(10,"(20,""(30,""""(40,50)"""")"")")', ['10', '(20,"(30,""(40,50)"")")']) + ok('(,"(,""(a\nb\tc)"")")', [None, '(,"(a\nb\tc)")']) + ok('(\x01,\x02,\x03,\x04,\x05,\x06,\x07,\x08,"\t","\n","\x0b",' + '"\x0c","\r",\x0e,\x0f,\x10,\x11,\x12,\x13,\x14,\x15,\x16,' + '\x17,\x18,\x19,\x1a,\x1b,\x1c,\x1d,\x1e,\x1f," ",!,"""",#,' + '$,%,&,\',"(",")",*,+,",",-,.,/,0,1,2,3,4,5,6,7,8,9,:,;,<,=,>,?,' + '@,A,B,C,D,E,F,G,H,I,J,K,L,M,N,O,P,Q,R,S,T,U,V,W,X,Y,Z,[,"\\\\",],' + '^,_,`,a,b,c,d,e,f,g,h,i,j,k,l,m,n,o,p,q,r,s,t,u,v,w,x,y,z,{,|,},' + '~,\x7f)', + list(map(chr, range(1, 128)))) + ok('(,"\x01\x02\x03\x04\x05\x06\x07\x08\t\n\x0b\x0c\r\x0e\x0f' + '\x10\x11\x12\x13\x14\x15\x16\x17\x18\x19\x1a\x1b\x1c\x1d\x1e\x1f !' + '""#$%&\'()*+,-./0123456789:;<=>?@ABCDEFGHIJKLMNOPQRSTUVWXYZ[\\\\]' + '^_`abcdefghijklmnopqrstuvwxyz{|}~\x7f")', + [None, ''.join(map(chr, range(1, 128)))]) + + @skip_if_no_composite + def test_cast_composite(self): + oid = self._create_type("type_isd", + [('anint', 'integer'), ('astring', 'text'), ('adate', 'date')]) + + t = psycopg2.extras.register_composite("type_isd", self.conn) + self.assertEqual(t.name, 'type_isd') + self.assertEqual(t.schema, 'public') + self.assertEqual(t.oid, oid) + self.assert_(issubclass(t.type, tuple)) + self.assertEqual(t.attnames, ['anint', 'astring', 'adate']) + self.assertEqual(t.atttypes, [23, 25, 1082]) + + curs = self.conn.cursor() + r = (10, 'hello', date(2011, 1, 2)) + curs.execute("select %s::type_isd;", (r,)) + v = curs.fetchone()[0] + self.assert_(isinstance(v, t.type)) + self.assertEqual(v[0], 10) + self.assertEqual(v[1], "hello") + self.assertEqual(v[2], date(2011, 1, 2)) + self.assert_(t.type is not tuple) + self.assertEqual(v.anint, 10) + self.assertEqual(v.astring, "hello") + self.assertEqual(v.adate, date(2011, 1, 2)) + + @skip_if_no_composite + def test_empty_string(self): + # issue #141 + self._create_type("type_ss", [('s1', 'text'), ('s2', 'text')]) + curs = self.conn.cursor() + psycopg2.extras.register_composite("type_ss", curs) + + def ok(t): + curs.execute("select %s::type_ss", (t,)) + rv = curs.fetchone()[0] + self.assertEqual(t, rv) + + ok(('a', 'b')) + ok(('a', '')) + ok(('', 'b')) + ok(('a', None)) + ok((None, 'b')) + ok(('', '')) + ok((None, None)) + + @skip_if_no_composite + def test_cast_nested(self): + self._create_type("type_is", + [("anint", "integer"), ("astring", "text")]) + self._create_type("type_r_dt", + [("adate", "date"), ("apair", "type_is")]) + self._create_type("type_r_ft", + [("afloat", "float8"), ("anotherpair", "type_r_dt")]) + + psycopg2.extras.register_composite("type_is", self.conn) + psycopg2.extras.register_composite("type_r_dt", self.conn) + psycopg2.extras.register_composite("type_r_ft", self.conn) + + curs = self.conn.cursor() + r = (0.25, (date(2011, 1, 2), (42, "hello"))) + curs.execute("select %s::type_r_ft;", (r,)) + v = curs.fetchone()[0] + + self.assertEqual(r, v) + self.assertEqual(v.anotherpair.apair.astring, "hello") + + @skip_if_no_composite + def test_register_on_cursor(self): + self._create_type("type_ii", [("a", "integer"), ("b", "integer")]) + + curs1 = self.conn.cursor() + curs2 = self.conn.cursor() + psycopg2.extras.register_composite("type_ii", curs1) + curs1.execute("select (1,2)::type_ii") + self.assertEqual(curs1.fetchone()[0], (1, 2)) + curs2.execute("select (1,2)::type_ii") + self.assertEqual(curs2.fetchone()[0], "(1,2)") + + @skip_if_no_composite + def test_register_on_connection(self): + self._create_type("type_ii", [("a", "integer"), ("b", "integer")]) + + conn1 = self.connect() + conn2 = self.connect() + try: + psycopg2.extras.register_composite("type_ii", conn1) + curs1 = conn1.cursor() + curs2 = conn2.cursor() + curs1.execute("select (1,2)::type_ii") + self.assertEqual(curs1.fetchone()[0], (1, 2)) + curs2.execute("select (1,2)::type_ii") + self.assertEqual(curs2.fetchone()[0], "(1,2)") + finally: + conn1.close() + conn2.close() + + @skip_if_no_composite + @restore_types + def test_register_globally(self): + self._create_type("type_ii", [("a", "integer"), ("b", "integer")]) + + conn1 = self.connect() + conn2 = self.connect() + try: + psycopg2.extras.register_composite("type_ii", conn1, globally=True) + curs1 = conn1.cursor() + curs2 = conn2.cursor() + curs1.execute("select (1,2)::type_ii") + self.assertEqual(curs1.fetchone()[0], (1, 2)) + curs2.execute("select (1,2)::type_ii") + self.assertEqual(curs2.fetchone()[0], (1, 2)) + + finally: + conn1.close() + conn2.close() + + @skip_if_no_composite + def test_composite_namespace(self): + curs = self.conn.cursor() + curs.execute(""" + select nspname from pg_namespace + where nspname = 'typens'; + """) + if not curs.fetchone(): + curs.execute("create schema typens;") + self.conn.commit() + + self._create_type("typens.typens_ii", + [("a", "integer"), ("b", "integer")]) + t = psycopg2.extras.register_composite( + "typens.typens_ii", self.conn) + self.assertEqual(t.schema, 'typens') + curs.execute("select (4,8)::typens.typens_ii") + self.assertEqual(curs.fetchone()[0], (4, 8)) + + @skip_if_no_composite + @skip_before_postgres(8, 4) + def test_composite_array(self): + self._create_type("type_isd", + [('anint', 'integer'), ('astring', 'text'), ('adate', 'date')]) + + t = psycopg2.extras.register_composite("type_isd", self.conn) + + curs = self.conn.cursor() + r1 = (10, 'hello', date(2011, 1, 2)) + r2 = (20, 'world', date(2011, 1, 3)) + curs.execute("select %s::type_isd[];", ([r1, r2],)) + v = curs.fetchone()[0] + self.assertEqual(len(v), 2) + self.assert_(isinstance(v[0], t.type)) + self.assertEqual(v[0][0], 10) + self.assertEqual(v[0][1], "hello") + self.assertEqual(v[0][2], date(2011, 1, 2)) + self.assert_(isinstance(v[1], t.type)) + self.assertEqual(v[1][0], 20) + self.assertEqual(v[1][1], "world") + self.assertEqual(v[1][2], date(2011, 1, 3)) + + @skip_if_no_composite + def test_wrong_schema(self): + oid = self._create_type("type_ii", [("a", "integer"), ("b", "integer")]) + c = CompositeCaster('type_ii', oid, [('a', 23), ('b', 23), ('c', 23)]) + curs = self.conn.cursor() + psycopg2.extensions.register_type(c.typecaster, curs) + curs.execute("select (1,2)::type_ii") + self.assertRaises(psycopg2.DataError, curs.fetchone) + + @slow + @skip_if_no_composite + @skip_before_postgres(8, 4) + def test_from_tables(self): + curs = self.conn.cursor() + curs.execute("""create table ctest1 ( + id integer primary key, + temp int, + label varchar + );""") + + curs.execute("""alter table ctest1 drop temp;""") + + curs.execute("""create table ctest2 ( + id serial primary key, + label varchar, + test_id integer references ctest1(id) + );""") + + curs.execute("""insert into ctest1 (id, label) values + (1, 'test1'), + (2, 'test2');""") + curs.execute("""insert into ctest2 (label, test_id) values + ('testa', 1), + ('testb', 1), + ('testc', 2), + ('testd', 2);""") + + psycopg2.extras.register_composite("ctest1", curs) + psycopg2.extras.register_composite("ctest2", curs) + + curs.execute(""" + select ctest1, array_agg(ctest2) as test2s + from ( + select ctest1, ctest2 + from ctest1 inner join ctest2 on ctest1.id = ctest2.test_id + order by ctest1.id, ctest2.label + ) x group by ctest1;""") + + r = curs.fetchone() + self.assertEqual(r[0], (1, 'test1')) + self.assertEqual(r[1], [(1, 'testa', 1), (2, 'testb', 1)]) + r = curs.fetchone() + self.assertEqual(r[0], (2, 'test2')) + self.assertEqual(r[1], [(3, 'testc', 2), (4, 'testd', 2)]) + + @skip_if_no_composite + def test_non_dbapi_connection(self): + self._create_type("type_ii", [("a", "integer"), ("b", "integer")]) + + conn = self.connect(connection_factory=RealDictConnection) + try: + register_composite('type_ii', conn) + curs = conn.cursor() + curs.execute("select '(1,2)'::type_ii as x") + self.assertEqual(curs.fetchone()['x'], (1, 2)) + finally: + conn.close() + + conn = self.connect(connection_factory=RealDictConnection) + try: + curs = conn.cursor() + register_composite('type_ii', conn) + curs.execute("select '(1,2)'::type_ii as x") + self.assertEqual(curs.fetchone()['x'], (1, 2)) + finally: + conn.close() + + @skip_if_no_composite + def test_subclass(self): + oid = self._create_type("type_isd", + [('anint', 'integer'), ('astring', 'text'), ('adate', 'date')]) + + class DictComposite(CompositeCaster): + def make(self, values): + return dict(zip(self.attnames, values)) + + t = register_composite('type_isd', self.conn, factory=DictComposite) + + self.assertEqual(t.name, 'type_isd') + self.assertEqual(t.oid, oid) + + curs = self.conn.cursor() + r = (10, 'hello', date(2011, 1, 2)) + curs.execute("select %s::type_isd;", (r,)) + v = curs.fetchone()[0] + self.assert_(isinstance(v, dict)) + self.assertEqual(v['anint'], 10) + self.assertEqual(v['astring'], "hello") + self.assertEqual(v['adate'], date(2011, 1, 2)) + + def _create_type(self, name, fields): + curs = self.conn.cursor() + try: + curs.execute(f"drop type {name} cascade;") + except psycopg2.ProgrammingError: + self.conn.rollback() + + curs.execute("create type {} as ({});".format(name, + ", ".join(["%s %s" % p for p in fields]))) + if '.' in name: + schema, name = name.split('.') + else: + schema = 'public' + + curs.execute("""\ + SELECT t.oid + FROM pg_type t JOIN pg_namespace ns ON typnamespace = ns.oid + WHERE typname = %s and nspname = %s; + """, (name, schema)) + oid = curs.fetchone()[0] + self.conn.commit() + return oid + + +def skip_if_no_json_type(f): + """Skip a test if PostgreSQL json type is not available""" + @wraps(f) + def skip_if_no_json_type_(self): + curs = self.conn.cursor() + curs.execute("select oid from pg_type where typname = 'json'") + if not curs.fetchone(): + return self.skipTest("json not available in test database") + + return f(self) + + return skip_if_no_json_type_ + + +@skip_if_crdb("json") +class JsonTestCase(ConnectingTestCase): + def test_adapt(self): + objs = [None, "te'xt", 123, 123.45, + '\xe0\u20ac', ['a', 100], {'a': 100}] + + curs = self.conn.cursor() + for obj in enumerate(objs): + self.assertQuotedEqual(curs.mogrify("%s", (Json(obj),)), + psycopg2.extensions.QuotedString(json.dumps(obj)).getquoted()) + + def test_adapt_dumps(self): + class DecimalEncoder(json.JSONEncoder): + def default(self, obj): + if isinstance(obj, Decimal): + return float(obj) + return json.JSONEncoder.default(self, obj) + + curs = self.conn.cursor() + obj = Decimal('123.45') + + def dumps(obj): + return json.dumps(obj, cls=DecimalEncoder) + self.assertQuotedEqual(curs.mogrify("%s", (Json(obj, dumps=dumps),)), + b"'123.45'") + + def test_adapt_subclass(self): + class DecimalEncoder(json.JSONEncoder): + def default(self, obj): + if isinstance(obj, Decimal): + return float(obj) + return json.JSONEncoder.default(self, obj) + + class MyJson(Json): + def dumps(self, obj): + return json.dumps(obj, cls=DecimalEncoder) + + curs = self.conn.cursor() + obj = Decimal('123.45') + self.assertQuotedEqual(curs.mogrify("%s", (MyJson(obj),)), b"'123.45'") + + @restore_types + def test_register_on_dict(self): + psycopg2.extensions.register_adapter(dict, Json) + + curs = self.conn.cursor() + obj = {'a': 123} + self.assertQuotedEqual( + curs.mogrify("%s", (obj,)), b"""'{"a": 123}'""") + + def test_type_not_available(self): + curs = self.conn.cursor() + curs.execute("select oid from pg_type where typname = 'json'") + if curs.fetchone(): + return self.skipTest("json available in test database") + + self.assertRaises(psycopg2.ProgrammingError, + psycopg2.extras.register_json, self.conn) + + @skip_before_postgres(9, 2) + def test_default_cast(self): + curs = self.conn.cursor() + + curs.execute("""select '{"a": 100.0, "b": null}'::json""") + self.assertEqual(curs.fetchone()[0], {'a': 100.0, 'b': None}) + + curs.execute("""select array['{"a": 100.0, "b": null}']::json[]""") + self.assertEqual(curs.fetchone()[0], [{'a': 100.0, 'b': None}]) + + @skip_if_no_json_type + def test_register_on_connection(self): + psycopg2.extras.register_json(self.conn) + curs = self.conn.cursor() + curs.execute("""select '{"a": 100.0, "b": null}'::json""") + self.assertEqual(curs.fetchone()[0], {'a': 100.0, 'b': None}) + + @skip_if_no_json_type + def test_register_on_cursor(self): + curs = self.conn.cursor() + psycopg2.extras.register_json(curs) + curs.execute("""select '{"a": 100.0, "b": null}'::json""") + self.assertEqual(curs.fetchone()[0], {'a': 100.0, 'b': None}) + + @skip_if_no_json_type + @restore_types + def test_register_globally(self): + new, newa = psycopg2.extras.register_json(self.conn, globally=True) + curs = self.conn.cursor() + curs.execute("""select '{"a": 100.0, "b": null}'::json""") + self.assertEqual(curs.fetchone()[0], {'a': 100.0, 'b': None}) + + @skip_if_no_json_type + def test_loads(self): + json = psycopg2.extras.json + + def loads(s): + return json.loads(s, parse_float=Decimal) + psycopg2.extras.register_json(self.conn, loads=loads) + curs = self.conn.cursor() + curs.execute("""select '{"a": 100.0, "b": null}'::json""") + data = curs.fetchone()[0] + self.assert_(isinstance(data['a'], Decimal)) + self.assertEqual(data['a'], Decimal('100.0')) + + @skip_if_no_json_type + @restore_types + def test_no_conn_curs(self): + oid, array_oid = _get_json_oids(self.conn) + + def loads(s): + return psycopg2.extras.json.loads(s, parse_float=Decimal) + + new, newa = psycopg2.extras.register_json( + loads=loads, oid=oid, array_oid=array_oid) + curs = self.conn.cursor() + curs.execute("""select '{"a": 100.0, "b": null}'::json""") + data = curs.fetchone()[0] + self.assert_(isinstance(data['a'], Decimal)) + self.assertEqual(data['a'], Decimal('100.0')) + + @skip_before_postgres(9, 2) + def test_register_default(self): + curs = self.conn.cursor() + + def loads(s): + return psycopg2.extras.json.loads(s, parse_float=Decimal) + psycopg2.extras.register_default_json(curs, loads=loads) + + curs.execute("""select '{"a": 100.0, "b": null}'::json""") + data = curs.fetchone()[0] + self.assert_(isinstance(data['a'], Decimal)) + self.assertEqual(data['a'], Decimal('100.0')) + + curs.execute("""select array['{"a": 100.0, "b": null}']::json[]""") + data = curs.fetchone()[0] + self.assert_(isinstance(data[0]['a'], Decimal)) + self.assertEqual(data[0]['a'], Decimal('100.0')) + + @skip_if_no_json_type + def test_null(self): + psycopg2.extras.register_json(self.conn) + curs = self.conn.cursor() + curs.execute("""select NULL::json""") + self.assertEqual(curs.fetchone()[0], None) + curs.execute("""select NULL::json[]""") + self.assertEqual(curs.fetchone()[0], None) + + def test_no_array_oid(self): + curs = self.conn.cursor() + t1, t2 = psycopg2.extras.register_json(curs, oid=25) + self.assertEqual(t1.values[0], 25) + self.assertEqual(t2, None) + + curs.execute("""select '{"a": 100.0, "b": null}'::text""") + data = curs.fetchone()[0] + self.assertEqual(data['a'], 100) + self.assertEqual(data['b'], None) + + def test_str(self): + snowman = "\u2603" + obj = {'a': [1, 2, snowman]} + j = psycopg2.extensions.adapt(psycopg2.extras.Json(obj)) + s = str(j) + self.assert_(isinstance(s, str)) + # no pesky b's + self.assert_(s.startswith("'")) + self.assert_(s.endswith("'")) + + @skip_before_postgres(8, 2) + def test_scs(self): + cnn_on = self.connect(options="-c standard_conforming_strings=on") + cur_on = cnn_on.cursor() + self.assertEqual( + cur_on.mogrify("%s", [psycopg2.extras.Json({'a': '"'})]), + b'\'{"a": "\\""}\'') + + cnn_off = self.connect(options="-c standard_conforming_strings=off") + cur_off = cnn_off.cursor() + self.assertEqual( + cur_off.mogrify("%s", [psycopg2.extras.Json({'a': '"'})]), + b'E\'{"a": "\\\\""}\'') + + self.assertEqual( + cur_on.mogrify("%s", [psycopg2.extras.Json({'a': '"'})]), + b'\'{"a": "\\""}\'') + + +def skip_if_no_jsonb_type(f): + return skip_before_postgres(9, 4)(f) + + +@skip_if_no_jsonb_type +class JsonbTestCase(ConnectingTestCase): + @staticmethod + def myloads(s): + rv = json.loads(s) + rv['test'] = 1 + return rv + + def test_default_cast(self): + curs = self.conn.cursor() + + curs.execute("""select '{"a": 100.0, "b": null}'::jsonb""") + self.assertEqual(curs.fetchone()[0], {'a': 100.0, 'b': None}) + + if crdb_version(self.conn) is None: + curs.execute("""select array['{"a": 100.0, "b": null}']::jsonb[]""") + self.assertEqual(curs.fetchone()[0], [{'a': 100.0, 'b': None}]) + + def test_register_on_connection(self): + psycopg2.extras.register_json(self.conn, loads=self.myloads, name='jsonb') + curs = self.conn.cursor() + curs.execute("""select '{"a": 100.0, "b": null}'::jsonb""") + self.assertEqual(curs.fetchone()[0], {'a': 100.0, 'b': None, 'test': 1}) + + def test_register_on_cursor(self): + curs = self.conn.cursor() + psycopg2.extras.register_json(curs, loads=self.myloads, name='jsonb') + curs.execute("""select '{"a": 100.0, "b": null}'::jsonb""") + self.assertEqual(curs.fetchone()[0], {'a': 100.0, 'b': None, 'test': 1}) + + @restore_types + def test_register_globally(self): + new, newa = psycopg2.extras.register_json(self.conn, + loads=self.myloads, globally=True, name='jsonb') + curs = self.conn.cursor() + curs.execute("""select '{"a": 100.0, "b": null}'::jsonb""") + self.assertEqual(curs.fetchone()[0], {'a': 100.0, 'b': None, 'test': 1}) + + def test_loads(self): + json = psycopg2.extras.json + + def loads(s): + return json.loads(s, parse_float=Decimal) + + psycopg2.extras.register_json(self.conn, loads=loads, name='jsonb') + curs = self.conn.cursor() + curs.execute("""select '{"a": 100.0, "b": null}'::jsonb""") + data = curs.fetchone()[0] + self.assert_(isinstance(data['a'], Decimal)) + self.assertEqual(data['a'], Decimal('100.0')) + # sure we are not mangling json too? + if crdb_version(self.conn) is None: + curs.execute("""select '{"a": 100.0, "b": null}'::json""") + data = curs.fetchone()[0] + self.assert_(isinstance(data['a'], float)) + self.assertEqual(data['a'], 100.0) + + def test_register_default(self): + curs = self.conn.cursor() + + def loads(s): + return psycopg2.extras.json.loads(s, parse_float=Decimal) + + psycopg2.extras.register_default_jsonb(curs, loads=loads) + + curs.execute("""select '{"a": 100.0, "b": null}'::jsonb""") + data = curs.fetchone()[0] + self.assert_(isinstance(data['a'], Decimal)) + self.assertEqual(data['a'], Decimal('100.0')) + + if crdb_version(self.conn) is None: + curs.execute("""select array['{"a": 100.0, "b": null}']::jsonb[]""") + data = curs.fetchone()[0] + self.assert_(isinstance(data[0]['a'], Decimal)) + self.assertEqual(data[0]['a'], Decimal('100.0')) + + def test_null(self): + curs = self.conn.cursor() + curs.execute("""select NULL::jsonb""") + self.assertEqual(curs.fetchone()[0], None) + if crdb_version(self.conn) is None: + curs.execute("""select NULL::jsonb[]""") + self.assertEqual(curs.fetchone()[0], None) + + +class RangeTestCase(unittest.TestCase): + def test_noparam(self): + r = Range() + + self.assert_(not r.isempty) + self.assertEqual(r.lower, None) + self.assertEqual(r.upper, None) + self.assert_(r.lower_inf) + self.assert_(r.upper_inf) + self.assert_(not r.lower_inc) + self.assert_(not r.upper_inc) + + def test_empty(self): + r = Range(empty=True) + + self.assert_(r.isempty) + self.assertEqual(r.lower, None) + self.assertEqual(r.upper, None) + self.assert_(not r.lower_inf) + self.assert_(not r.upper_inf) + self.assert_(not r.lower_inc) + self.assert_(not r.upper_inc) + + def test_nobounds(self): + r = Range(10, 20) + self.assertEqual(r.lower, 10) + self.assertEqual(r.upper, 20) + self.assert_(not r.isempty) + self.assert_(not r.lower_inf) + self.assert_(not r.upper_inf) + self.assert_(r.lower_inc) + self.assert_(not r.upper_inc) + + def test_bounds(self): + for bounds, lower_inc, upper_inc in [ + ('[)', True, False), + ('(]', False, True), + ('()', False, False), + ('[]', True, True)]: + r = Range(10, 20, bounds) + self.assertEqual(r.lower, 10) + self.assertEqual(r.upper, 20) + self.assert_(not r.isempty) + self.assert_(not r.lower_inf) + self.assert_(not r.upper_inf) + self.assertEqual(r.lower_inc, lower_inc) + self.assertEqual(r.upper_inc, upper_inc) + + def test_keywords(self): + r = Range(upper=20) + self.assertEqual(r.lower, None) + self.assertEqual(r.upper, 20) + self.assert_(not r.isempty) + self.assert_(r.lower_inf) + self.assert_(not r.upper_inf) + self.assert_(not r.lower_inc) + self.assert_(not r.upper_inc) + + r = Range(lower=10, bounds='(]') + self.assertEqual(r.lower, 10) + self.assertEqual(r.upper, None) + self.assert_(not r.isempty) + self.assert_(not r.lower_inf) + self.assert_(r.upper_inf) + self.assert_(not r.lower_inc) + self.assert_(not r.upper_inc) + + def test_bad_bounds(self): + self.assertRaises(ValueError, Range, bounds='(') + self.assertRaises(ValueError, Range, bounds='[}') + + def test_in(self): + r = Range(empty=True) + self.assert_(10 not in r) + + r = Range() + self.assert_(10 in r) + + r = Range(lower=10, bounds='[)') + self.assert_(9 not in r) + self.assert_(10 in r) + self.assert_(11 in r) + + r = Range(lower=10, bounds='()') + self.assert_(9 not in r) + self.assert_(10 not in r) + self.assert_(11 in r) + + r = Range(upper=20, bounds='()') + self.assert_(19 in r) + self.assert_(20 not in r) + self.assert_(21 not in r) + + r = Range(upper=20, bounds='(]') + self.assert_(19 in r) + self.assert_(20 in r) + self.assert_(21 not in r) + + r = Range(10, 20) + self.assert_(9 not in r) + self.assert_(10 in r) + self.assert_(11 in r) + self.assert_(19 in r) + self.assert_(20 not in r) + self.assert_(21 not in r) + + r = Range(10, 20, '(]') + self.assert_(9 not in r) + self.assert_(10 not in r) + self.assert_(11 in r) + self.assert_(19 in r) + self.assert_(20 in r) + self.assert_(21 not in r) + + r = Range(20, 10) + self.assert_(9 not in r) + self.assert_(10 not in r) + self.assert_(11 not in r) + self.assert_(19 not in r) + self.assert_(20 not in r) + self.assert_(21 not in r) + + def test_nonzero(self): + self.assert_(Range()) + self.assert_(Range(10, 20)) + self.assert_(not Range(empty=True)) + + def test_eq_hash(self): + def assert_equal(r1, r2): + self.assert_(r1 == r2) + self.assert_(hash(r1) == hash(r2)) + + assert_equal(Range(empty=True), Range(empty=True)) + assert_equal(Range(), Range()) + assert_equal(Range(10, None), Range(10, None)) + assert_equal(Range(10, 20), Range(10, 20)) + assert_equal(Range(10, 20), Range(10, 20, '[)')) + assert_equal(Range(10, 20, '[]'), Range(10, 20, '[]')) + + def assert_not_equal(r1, r2): + self.assert_(r1 != r2) + self.assert_(hash(r1) != hash(r2)) + + assert_not_equal(Range(10, 20), Range(10, 21)) + assert_not_equal(Range(10, 20), Range(11, 20)) + assert_not_equal(Range(10, 20, '[)'), Range(10, 20, '[]')) + + def test_eq_wrong_type(self): + self.assertNotEqual(Range(10, 20), ()) + + def test_eq_subclass(self): + class IntRange(NumericRange): + pass + + class PositiveIntRange(IntRange): + pass + + self.assertEqual(Range(10, 20), IntRange(10, 20)) + self.assertEqual(PositiveIntRange(10, 20), IntRange(10, 20)) + + # as the postgres docs describe for the server-side stuff, + # ordering is rather arbitrary, but will remain stable + # and consistent. + + def test_lt_ordering(self): + self.assert_(Range(empty=True) < Range(0, 4)) + self.assert_(not Range(1, 2) < Range(0, 4)) + self.assert_(Range(0, 4) < Range(1, 2)) + self.assert_(not Range(1, 2) < Range()) + self.assert_(Range() < Range(1, 2)) + self.assert_(not Range(1) < Range(upper=1)) + self.assert_(not Range() < Range()) + self.assert_(not Range(empty=True) < Range(empty=True)) + self.assert_(not Range(1, 2) < Range(1, 2)) + with raises_typeerror(): + self.assert_(1 < Range(1, 2)) + with raises_typeerror(): + self.assert_(not Range(1, 2) < 1) + + def test_gt_ordering(self): + self.assert_(not Range(empty=True) > Range(0, 4)) + self.assert_(Range(1, 2) > Range(0, 4)) + self.assert_(not Range(0, 4) > Range(1, 2)) + self.assert_(Range(1, 2) > Range()) + self.assert_(not Range() > Range(1, 2)) + self.assert_(Range(1) > Range(upper=1)) + self.assert_(not Range() > Range()) + self.assert_(not Range(empty=True) > Range(empty=True)) + self.assert_(not Range(1, 2) > Range(1, 2)) + with raises_typeerror(): + self.assert_(not 1 > Range(1, 2)) + with raises_typeerror(): + self.assert_(Range(1, 2) > 1) + + def test_le_ordering(self): + self.assert_(Range(empty=True) <= Range(0, 4)) + self.assert_(not Range(1, 2) <= Range(0, 4)) + self.assert_(Range(0, 4) <= Range(1, 2)) + self.assert_(not Range(1, 2) <= Range()) + self.assert_(Range() <= Range(1, 2)) + self.assert_(not Range(1) <= Range(upper=1)) + self.assert_(Range() <= Range()) + self.assert_(Range(empty=True) <= Range(empty=True)) + self.assert_(Range(1, 2) <= Range(1, 2)) + with raises_typeerror(): + self.assert_(1 <= Range(1, 2)) + with raises_typeerror(): + self.assert_(not Range(1, 2) <= 1) + + def test_ge_ordering(self): + self.assert_(not Range(empty=True) >= Range(0, 4)) + self.assert_(Range(1, 2) >= Range(0, 4)) + self.assert_(not Range(0, 4) >= Range(1, 2)) + self.assert_(Range(1, 2) >= Range()) + self.assert_(not Range() >= Range(1, 2)) + self.assert_(Range(1) >= Range(upper=1)) + self.assert_(Range() >= Range()) + self.assert_(Range(empty=True) >= Range(empty=True)) + self.assert_(Range(1, 2) >= Range(1, 2)) + with raises_typeerror(): + self.assert_(not 1 >= Range(1, 2)) + with raises_typeerror(): + self.assert_(Range(1, 2) >= 1) + + def test_pickling(self): + r = Range(0, 4) + self.assertEqual(loads(dumps(r)), r) + + def test_str(self): + ''' + Range types should have a short and readable ``str`` implementation. + + Using ``repr`` for all string conversions can be very unreadable for + longer types like ``DateTimeTZRange``. + ''' + + # Using the "u" prefix to make sure we have the proper return types in + # Python2 + expected = [ + '(0, 4)', + '[0, 4]', + '(0, 4]', + '[0, 4)', + 'empty', + ] + results = [] + + for bounds in ('()', '[]', '(]', '[)'): + r = Range(0, 4, bounds=bounds) + results.append(str(r)) + + r = Range(empty=True) + results.append(str(r)) + self.assertEqual(results, expected) + + def test_str_datetime(self): + ''' + Date-Time ranges should return a human-readable string as well on + string conversion. + ''' + tz = timezone(timedelta(minutes=-5 * 60), "EST") + r = DateTimeTZRange(datetime(2010, 1, 1, tzinfo=tz), + datetime(2011, 1, 1, tzinfo=tz)) + expected = '[2010-01-01 00:00:00-05:00, 2011-01-01 00:00:00-05:00)' + result = str(r) + self.assertEqual(result, expected) + + +@skip_if_crdb("range") +@skip_before_postgres(9, 2, "range not supported before postgres 9.2") +class RangeCasterTestCase(ConnectingTestCase): + + builtin_ranges = ('int4range', 'int8range', 'numrange', + 'daterange', 'tsrange', 'tstzrange') + + def test_cast_null(self): + cur = self.conn.cursor() + for type in self.builtin_ranges: + cur.execute(f"select NULL::{type}") + r = cur.fetchone()[0] + self.assertEqual(r, None) + + def test_cast_empty(self): + cur = self.conn.cursor() + for type in self.builtin_ranges: + cur.execute(f"select 'empty'::{type}") + r = cur.fetchone()[0] + self.assert_(isinstance(r, Range), type) + self.assert_(r.isempty) + + def test_cast_inf(self): + cur = self.conn.cursor() + for type in self.builtin_ranges: + cur.execute(f"select '(,)'::{type}") + r = cur.fetchone()[0] + self.assert_(isinstance(r, Range), type) + self.assert_(not r.isempty) + self.assert_(r.lower_inf) + self.assert_(r.upper_inf) + + def test_cast_numbers(self): + cur = self.conn.cursor() + for type in ('int4range', 'int8range'): + cur.execute(f"select '(10,20)'::{type}") + r = cur.fetchone()[0] + self.assert_(isinstance(r, NumericRange)) + self.assert_(not r.isempty) + self.assertEqual(r.lower, 11) + self.assertEqual(r.upper, 20) + self.assert_(not r.lower_inf) + self.assert_(not r.upper_inf) + self.assert_(r.lower_inc) + self.assert_(not r.upper_inc) + + cur.execute("select '(10.2,20.6)'::numrange") + r = cur.fetchone()[0] + self.assert_(isinstance(r, NumericRange)) + self.assert_(not r.isempty) + self.assertEqual(r.lower, Decimal('10.2')) + self.assertEqual(r.upper, Decimal('20.6')) + self.assert_(not r.lower_inf) + self.assert_(not r.upper_inf) + self.assert_(not r.lower_inc) + self.assert_(not r.upper_inc) + + def test_cast_date(self): + cur = self.conn.cursor() + cur.execute("select '(2000-01-01,2012-12-31)'::daterange") + r = cur.fetchone()[0] + self.assert_(isinstance(r, DateRange)) + self.assert_(not r.isempty) + self.assertEqual(r.lower, date(2000, 1, 2)) + self.assertEqual(r.upper, date(2012, 12, 31)) + self.assert_(not r.lower_inf) + self.assert_(not r.upper_inf) + self.assert_(r.lower_inc) + self.assert_(not r.upper_inc) + + def test_cast_timestamp(self): + cur = self.conn.cursor() + ts1 = datetime(2000, 1, 1) + ts2 = datetime(2000, 12, 31, 23, 59, 59, 999) + cur.execute("select tsrange(%s, %s, '()')", (ts1, ts2)) + r = cur.fetchone()[0] + self.assert_(isinstance(r, DateTimeRange)) + self.assert_(not r.isempty) + self.assertEqual(r.lower, ts1) + self.assertEqual(r.upper, ts2) + self.assert_(not r.lower_inf) + self.assert_(not r.upper_inf) + self.assert_(not r.lower_inc) + self.assert_(not r.upper_inc) + + def test_cast_timestamptz(self): + cur = self.conn.cursor() + ts1 = datetime(2000, 1, 1, tzinfo=timezone(timedelta(minutes=600))) + ts2 = datetime(2000, 12, 31, 23, 59, 59, 999, + tzinfo=timezone(timedelta(minutes=600))) + cur.execute("select tstzrange(%s, %s, '[]')", (ts1, ts2)) + r = cur.fetchone()[0] + self.assert_(isinstance(r, DateTimeTZRange)) + self.assert_(not r.isempty) + self.assertEqual(r.lower, ts1) + self.assertEqual(r.upper, ts2) + self.assert_(not r.lower_inf) + self.assert_(not r.upper_inf) + self.assert_(r.lower_inc) + self.assert_(r.upper_inc) + + def test_adapt_number_range(self): + cur = self.conn.cursor() + + r = NumericRange(empty=True) + cur.execute("select %s::int4range", (r,)) + r1 = cur.fetchone()[0] + self.assert_(isinstance(r1, NumericRange)) + self.assert_(r1.isempty) + + r = NumericRange(10, 20) + cur.execute("select %s::int8range", (r,)) + r1 = cur.fetchone()[0] + self.assert_(isinstance(r1, NumericRange)) + self.assertEqual(r1.lower, 10) + self.assertEqual(r1.upper, 20) + self.assert_(r1.lower_inc) + self.assert_(not r1.upper_inc) + + r = NumericRange(Decimal('10.2'), Decimal('20.5'), '(]') + cur.execute("select %s::numrange", (r,)) + r1 = cur.fetchone()[0] + self.assert_(isinstance(r1, NumericRange)) + self.assertEqual(r1.lower, Decimal('10.2')) + self.assertEqual(r1.upper, Decimal('20.5')) + self.assert_(not r1.lower_inc) + self.assert_(r1.upper_inc) + + def test_adapt_numeric_range(self): + cur = self.conn.cursor() + + r = NumericRange(empty=True) + cur.execute("select %s::int4range", (r,)) + r1 = cur.fetchone()[0] + self.assert_(isinstance(r1, NumericRange), r1) + self.assert_(r1.isempty) + + r = NumericRange(10, 20) + cur.execute("select %s::int8range", (r,)) + r1 = cur.fetchone()[0] + self.assert_(isinstance(r1, NumericRange)) + self.assertEqual(r1.lower, 10) + self.assertEqual(r1.upper, 20) + self.assert_(r1.lower_inc) + self.assert_(not r1.upper_inc) + + r = NumericRange(Decimal('10.2'), Decimal('20.5'), '(]') + cur.execute("select %s::numrange", (r,)) + r1 = cur.fetchone()[0] + self.assert_(isinstance(r1, NumericRange)) + self.assertEqual(r1.lower, Decimal('10.2')) + self.assertEqual(r1.upper, Decimal('20.5')) + self.assert_(not r1.lower_inc) + self.assert_(r1.upper_inc) + + def test_adapt_date_range(self): + cur = self.conn.cursor() + + d1 = date(2012, 1, 1) + d2 = date(2012, 12, 31) + r = DateRange(d1, d2) + cur.execute("select %s", (r,)) + r1 = cur.fetchone()[0] + self.assert_(isinstance(r1, DateRange)) + self.assertEqual(r1.lower, d1) + self.assertEqual(r1.upper, d2) + self.assert_(r1.lower_inc) + self.assert_(not r1.upper_inc) + + r = DateTimeRange(empty=True) + cur.execute("select %s", (r,)) + r1 = cur.fetchone()[0] + self.assert_(isinstance(r1, DateTimeRange)) + self.assert_(r1.isempty) + + ts1 = datetime(2000, 1, 1, tzinfo=timezone(timedelta(minutes=600))) + ts2 = datetime(2000, 12, 31, 23, 59, 59, 999, + tzinfo=timezone(timedelta(minutes=600))) + r = DateTimeTZRange(ts1, ts2, '(]') + cur.execute("select %s", (r,)) + r1 = cur.fetchone()[0] + self.assert_(isinstance(r1, DateTimeTZRange)) + self.assertEqual(r1.lower, ts1) + self.assertEqual(r1.upper, ts2) + self.assert_(not r1.lower_inc) + self.assert_(r1.upper_inc) + + @restore_types + def test_register_range_adapter(self): + cur = self.conn.cursor() + cur.execute("create type textrange as range (subtype=text)") + rc = register_range('textrange', 'TextRange', cur) + + TextRange = rc.range + self.assert_(issubclass(TextRange, Range)) + self.assertEqual(TextRange.__name__, 'TextRange') + + r = TextRange('a', 'b', '(]') + cur.execute("select %s", (r,)) + r1 = cur.fetchone()[0] + self.assertEqual(r1.lower, 'a') + self.assertEqual(r1.upper, 'b') + self.assert_(not r1.lower_inc) + self.assert_(r1.upper_inc) + + cur.execute("select %s", ([r, r, r],)) + rs = cur.fetchone()[0] + self.assertEqual(len(rs), 3) + for r1 in rs: + self.assertEqual(r1.lower, 'a') + self.assertEqual(r1.upper, 'b') + self.assert_(not r1.lower_inc) + self.assert_(r1.upper_inc) + + def test_range_escaping(self): + cur = self.conn.cursor() + cur.execute("create type textrange as range (subtype=text)") + rc = register_range('textrange', 'TextRange', cur) + + TextRange = rc.range + cur.execute(""" + create table rangetest ( + id integer primary key, + range textrange)""") + + bounds = ['[)', '(]', '()', '[]'] + ranges = [TextRange(low, up, bounds[i % 4]) + for i, (low, up) in enumerate(zip( + [None] + list(map(chr, range(1, 128))), + list(map(chr, range(1, 128))) + [None], + ))] + ranges.append(TextRange()) + ranges.append(TextRange(empty=True)) + + errs = 0 + for i, r in enumerate(ranges): + # not all the ranges make sense: + # fun fact: select ascii('#') < ascii('$'), '#' < '$' + # yelds... t, f! At least in en_GB.UTF-8 collation. + # which seems suggesting a supremacy of the pound on the dollar. + # So some of these ranges will fail to insert. Be prepared but... + try: + cur.execute(""" + savepoint x; + insert into rangetest (id, range) values (%s, %s); + """, (i, r)) + except psycopg2.DataError: + errs += 1 + cur.execute("rollback to savepoint x;") + + # ...not too many errors! in the above collate there are 17 errors: + # assume in other collates we won't find more than 30 + self.assert_(errs < 30, + "too many collate errors. Is the test working?") + + cur.execute("select id, range from rangetest order by id") + for i, r in cur: + self.assertEqual(ranges[i].lower, r.lower) + self.assertEqual(ranges[i].upper, r.upper) + self.assertEqual(ranges[i].lower_inc, r.lower_inc) + self.assertEqual(ranges[i].upper_inc, r.upper_inc) + self.assertEqual(ranges[i].lower_inf, r.lower_inf) + self.assertEqual(ranges[i].upper_inf, r.upper_inf) + + # clear the adapters to allow precise count by scripts/refcounter.py + del ext.adapters[TextRange, ext.ISQLQuote] + + def test_range_not_found(self): + cur = self.conn.cursor() + self.assertRaises(psycopg2.ProgrammingError, + register_range, 'nosuchrange', 'FailRange', cur) + + @restore_types + def test_schema_range(self): + cur = self.conn.cursor() + cur.execute("create schema rs") + cur.execute("create type r1 as range (subtype=text)") + cur.execute("create type r2 as range (subtype=text)") + cur.execute("create type rs.r2 as range (subtype=text)") + cur.execute("create type rs.r3 as range (subtype=text)") + cur.execute("savepoint x") + + register_range('r1', 'r1', cur) + ra2 = register_range('r2', 'r2', cur) + rars2 = register_range('rs.r2', 'r2', cur) + register_range('rs.r3', 'r3', cur) + + self.assertNotEqual( + ra2.typecaster.values[0], + rars2.typecaster.values[0]) + + self.assertRaises(psycopg2.ProgrammingError, + register_range, 'r3', 'FailRange', cur) + cur.execute("rollback to savepoint x;") + + self.assertRaises(psycopg2.ProgrammingError, + register_range, 'rs.r1', 'FailRange', cur) + cur.execute("rollback to savepoint x;") + + +def test_suite(): + return unittest.TestLoader().loadTestsFromName(__name__) + + +if __name__ == "__main__": + unittest.main() diff --git a/tests/test_with.py b/tests/test_with.py new file mode 100755 index 0000000000000000000000000000000000000000..f71989dce70343096c79764119378b61eb144acf --- /dev/null +++ b/tests/test_with.py @@ -0,0 +1,319 @@ +#!/usr/bin/env python + +# test_ctxman.py - unit test for connection and cursor used as context manager +# +# Copyright (C) 2012-2019 Daniele Varrazzo +# Copyright (C) 2020-2021 The Psycopg Team +# +# psycopg2 is free software: you can redistribute it and/or modify it +# under the terms of the GNU Lesser General Public License as published +# by the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# In addition, as a special exception, the copyright holders give +# permission to link this program with the OpenSSL library (or with +# modified versions of OpenSSL that use the same license as OpenSSL), +# and distribute linked combinations including the two. +# +# You must obey the GNU Lesser General Public License in all respects for +# all of the code used other than OpenSSL. +# +# psycopg2 is distributed in the hope that it will be useful, but WITHOUT +# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or +# FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public +# License for more details. + +import psycopg2 +import psycopg2.extensions as ext + +import unittest +from .testutils import ConnectingTestCase, skip_before_postgres, skip_if_crdb + + +class WithTestCase(ConnectingTestCase): + def setUp(self): + ConnectingTestCase.setUp(self) + curs = self.conn.cursor() + try: + curs.execute("delete from test_with") + self.conn.commit() + except psycopg2.ProgrammingError: + # assume table doesn't exist + self.conn.rollback() + curs.execute("create table test_with (id integer primary key)") + self.conn.commit() + + +class WithConnectionTestCase(WithTestCase): + def test_with_ok(self): + with self.conn as conn: + self.assert_(self.conn is conn) + self.assertEqual(conn.status, ext.STATUS_READY) + curs = conn.cursor() + curs.execute("insert into test_with values (1)") + self.assertEqual(conn.status, ext.STATUS_BEGIN) + + self.assertEqual(self.conn.status, ext.STATUS_READY) + self.assert_(not self.conn.closed) + + curs = self.conn.cursor() + curs.execute("select * from test_with") + self.assertEqual(curs.fetchall(), [(1,)]) + + def test_with_connect_idiom(self): + with self.connect() as conn: + self.assertEqual(conn.status, ext.STATUS_READY) + curs = conn.cursor() + curs.execute("insert into test_with values (2)") + self.assertEqual(conn.status, ext.STATUS_BEGIN) + + self.assertEqual(self.conn.status, ext.STATUS_READY) + self.assert_(not self.conn.closed) + + curs = self.conn.cursor() + curs.execute("select * from test_with") + self.assertEqual(curs.fetchall(), [(2,)]) + + def test_with_error_db(self): + def f(): + with self.conn as conn: + curs = conn.cursor() + curs.execute("insert into test_with values ('a')") + + self.assertRaises(psycopg2.DataError, f) + self.assertEqual(self.conn.status, ext.STATUS_READY) + self.assert_(not self.conn.closed) + + curs = self.conn.cursor() + curs.execute("select * from test_with") + self.assertEqual(curs.fetchall(), []) + + def test_with_error_python(self): + def f(): + with self.conn as conn: + curs = conn.cursor() + curs.execute("insert into test_with values (3)") + 1 / 0 + + self.assertRaises(ZeroDivisionError, f) + self.assertEqual(self.conn.status, ext.STATUS_READY) + self.assert_(not self.conn.closed) + + curs = self.conn.cursor() + curs.execute("select * from test_with") + self.assertEqual(curs.fetchall(), []) + + def test_with_closed(self): + def f(): + with self.conn: + pass + + self.conn.close() + self.assertRaises(psycopg2.InterfaceError, f) + + def test_subclass_commit(self): + commits = [] + + class MyConn(ext.connection): + def commit(self): + commits.append(None) + super().commit() + + with self.connect(connection_factory=MyConn) as conn: + curs = conn.cursor() + curs.execute("insert into test_with values (10)") + + self.assertEqual(conn.status, ext.STATUS_READY) + self.assert_(commits) + + curs = self.conn.cursor() + curs.execute("select * from test_with") + self.assertEqual(curs.fetchall(), [(10,)]) + + def test_subclass_rollback(self): + rollbacks = [] + + class MyConn(ext.connection): + def rollback(self): + rollbacks.append(None) + super().rollback() + + try: + with self.connect(connection_factory=MyConn) as conn: + curs = conn.cursor() + curs.execute("insert into test_with values (11)") + 1 / 0 + except ZeroDivisionError: + pass + else: + self.assert_("exception not raised") + + self.assertEqual(conn.status, ext.STATUS_READY) + self.assert_(rollbacks) + + curs = conn.cursor() + curs.execute("select * from test_with") + self.assertEqual(curs.fetchall(), []) + + def test_cant_reenter(self): + raised_ok = False + with self.conn: + try: + with self.conn: + pass + except psycopg2.ProgrammingError: + raised_ok = True + + self.assert_(raised_ok) + + # Still good + with self.conn: + pass + + def test_with_autocommit(self): + self.conn.autocommit = True + self.assertEqual( + self.conn.info.transaction_status, ext.TRANSACTION_STATUS_IDLE + ) + with self.conn: + curs = self.conn.cursor() + curs.execute("insert into test_with values (1)") + self.assertEqual( + self.conn.info.transaction_status, + ext.TRANSACTION_STATUS_INTRANS, + ) + + self.assertEqual( + self.conn.info.transaction_status, ext.TRANSACTION_STATUS_IDLE + ) + curs.execute("select count(*) from test_with") + self.assertEqual(curs.fetchone()[0], 1) + self.assertEqual( + self.conn.info.transaction_status, ext.TRANSACTION_STATUS_IDLE + ) + + def test_with_autocommit_pyerror(self): + self.conn.autocommit = True + raised_ok = False + try: + with self.conn: + curs = self.conn.cursor() + curs.execute("insert into test_with values (2)") + self.assertEqual( + self.conn.info.transaction_status, + ext.TRANSACTION_STATUS_INTRANS, + ) + 1 / 0 + except ZeroDivisionError: + raised_ok = True + + self.assert_(raised_ok) + self.assertEqual( + self.conn.info.transaction_status, ext.TRANSACTION_STATUS_IDLE + ) + curs.execute("select count(*) from test_with") + self.assertEqual(curs.fetchone()[0], 0) + self.assertEqual( + self.conn.info.transaction_status, ext.TRANSACTION_STATUS_IDLE + ) + + def test_with_autocommit_pgerror(self): + self.conn.autocommit = True + raised_ok = False + try: + with self.conn: + curs = self.conn.cursor() + curs.execute("insert into test_with values (2)") + self.assertEqual( + self.conn.info.transaction_status, + ext.TRANSACTION_STATUS_INTRANS, + ) + curs.execute("insert into test_with values ('x')") + except psycopg2.errors.InvalidTextRepresentation: + raised_ok = True + + self.assert_(raised_ok) + self.assertEqual( + self.conn.info.transaction_status, ext.TRANSACTION_STATUS_IDLE + ) + curs.execute("select count(*) from test_with") + self.assertEqual(curs.fetchone()[0], 0) + self.assertEqual( + self.conn.info.transaction_status, ext.TRANSACTION_STATUS_IDLE + ) + + +class WithCursorTestCase(WithTestCase): + def test_with_ok(self): + with self.conn as conn: + with conn.cursor() as curs: + curs.execute("insert into test_with values (4)") + self.assert_(not curs.closed) + self.assertEqual(self.conn.status, ext.STATUS_BEGIN) + self.assert_(curs.closed) + + self.assertEqual(self.conn.status, ext.STATUS_READY) + self.assert_(not self.conn.closed) + + curs = self.conn.cursor() + curs.execute("select * from test_with") + self.assertEqual(curs.fetchall(), [(4,)]) + + def test_with_error(self): + try: + with self.conn as conn: + with conn.cursor() as curs: + curs.execute("insert into test_with values (5)") + 1 / 0 + except ZeroDivisionError: + pass + + self.assertEqual(self.conn.status, ext.STATUS_READY) + self.assert_(not self.conn.closed) + self.assert_(curs.closed) + + curs = self.conn.cursor() + curs.execute("select * from test_with") + self.assertEqual(curs.fetchall(), []) + + def test_subclass(self): + closes = [] + + class MyCurs(ext.cursor): + def close(self): + closes.append(None) + super().close() + + with self.conn.cursor(cursor_factory=MyCurs) as curs: + self.assert_(isinstance(curs, MyCurs)) + + self.assert_(curs.closed) + self.assert_(closes) + + @skip_if_crdb("named cursor") + def test_exception_swallow(self): + # bug #262: __exit__ calls cur.close() that hides the exception + # with another error. + try: + with self.conn as conn: + with conn.cursor('named') as cur: + cur.execute("select 1/0") + cur.fetchone() + except psycopg2.DataError as e: + self.assertEqual(e.pgcode, '22012') + else: + self.fail("where is my exception?") + + @skip_if_crdb("named cursor") + @skip_before_postgres(8, 2) + def test_named_with_noop(self): + with self.conn.cursor('named'): + pass + + +def test_suite(): + return unittest.TestLoader().loadTestsFromName(__name__) + + +if __name__ == "__main__": + unittest.main() diff --git a/tests/testconfig.py b/tests/testconfig.py new file mode 100644 index 0000000000000000000000000000000000000000..ed6132a84cebf102085dded9c1261346cbdbb1dd --- /dev/null +++ b/tests/testconfig.py @@ -0,0 +1,42 @@ +# Configure the test suite from the env variables. + +import os + +dbname = os.environ.get('PSYCOPG2_TESTDB', 'psycopg2_test') +dbhost = os.environ.get('PSYCOPG2_TESTDB_HOST', os.environ.get('PGHOST')) +dbport = os.environ.get('PSYCOPG2_TESTDB_PORT', os.environ.get('PGPORT')) +dbuser = os.environ.get('PSYCOPG2_TESTDB_USER', os.environ.get('PGUSER')) +dbpass = os.environ.get('PSYCOPG2_TESTDB_PASSWORD', os.environ.get('PGPASSWORD')) + +# Check if we want to test psycopg's green path. +green = os.environ.get('PSYCOPG2_TEST_GREEN', None) +if green: + if green == '1': + from psycopg2.extras import wait_select as wait_callback + elif green == 'eventlet': + from eventlet.support.psycopg2_patcher import eventlet_wait_callback \ + as wait_callback + else: + raise ValueError("please set 'PSYCOPG2_TEST_GREEN' to a valid value") + + import psycopg2.extensions + psycopg2.extensions.set_wait_callback(wait_callback) + +# Construct a DSN to connect to the test database: +dsn = f'dbname={dbname}' +if dbhost is not None: + dsn += f' host={dbhost}' +if dbport is not None: + dsn += f' port={dbport}' +if dbuser is not None: + dsn += f' user={dbuser}' +if dbpass is not None: + dsn += f' password={dbpass}' + +# Don't run replication tests if REPL_DSN is not set, default to normal DSN if +# set to empty string. +repl_dsn = os.environ.get('PSYCOPG2_TEST_REPL_DSN', None) +if repl_dsn == '': + repl_dsn = dsn + +repl_slot = os.environ.get('PSYCOPG2_TEST_REPL_SLOT', 'psycopg2_test_slot') diff --git a/tests/testutils.py b/tests/testutils.py new file mode 100644 index 0000000000000000000000000000000000000000..1384e843e1426265bd8f9dc1e2da40d1b0bbf09a --- /dev/null +++ b/tests/testutils.py @@ -0,0 +1,544 @@ +# testutils.py - utility module for psycopg2 testing. + +# +# Copyright (C) 2010-2019 Daniele Varrazzo +# Copyright (C) 2020-2021 The Psycopg Team +# +# psycopg2 is free software: you can redistribute it and/or modify it +# under the terms of the GNU Lesser General Public License as published +# by the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# In addition, as a special exception, the copyright holders give +# permission to link this program with the OpenSSL library (or with +# modified versions of OpenSSL that use the same license as OpenSSL), +# and distribute linked combinations including the two. +# +# You must obey the GNU Lesser General Public License in all respects for +# all of the code used other than OpenSSL. +# +# psycopg2 is distributed in the hope that it will be useful, but WITHOUT +# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or +# FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public +# License for more details. + + +import re +import os +import sys +import types +import ctypes +import select +import operator +import platform +import unittest +from functools import wraps +from ctypes.util import find_library +from io import StringIO # noqa +from io import TextIOBase # noqa +from importlib import reload # noqa + +import psycopg2 +import psycopg2.errors +import psycopg2.extensions + +from .testconfig import green, dsn, repl_dsn + + +# Silence warnings caused by the stubbornness of the Python unittest +# maintainers +# https://bugs.python.org/issue9424 +if (not hasattr(unittest.TestCase, 'assert_') + or unittest.TestCase.assert_ is not unittest.TestCase.assertTrue): + # mavaff... + unittest.TestCase.assert_ = unittest.TestCase.assertTrue + unittest.TestCase.failUnless = unittest.TestCase.assertTrue + unittest.TestCase.assertEquals = unittest.TestCase.assertEqual + unittest.TestCase.failUnlessEqual = unittest.TestCase.assertEqual + + +def assertDsnEqual(self, dsn1, dsn2, msg=None): + """Check that two conninfo string have the same content""" + self.assertEqual(set(dsn1.split()), set(dsn2.split()), msg) + + +unittest.TestCase.assertDsnEqual = assertDsnEqual + + +class ConnectingTestCase(unittest.TestCase): + """A test case providing connections for tests. + + A connection for the test is always available as `self.conn`. Others can be + created with `self.connect()`. All are closed on tearDown. + + Subclasses needing to customize setUp and tearDown should remember to call + the base class implementations. + """ + def setUp(self): + self._conns = [] + + def tearDown(self): + # close the connections used in the test + for conn in self._conns: + if not conn.closed: + conn.close() + + def assertQuotedEqual(self, first, second, msg=None): + """Compare two quoted strings disregarding eventual E'' quotes""" + def f(s): + if isinstance(s, str): + return re.sub(r"\bE'", "'", s) + elif isinstance(first, bytes): + return re.sub(br"\bE'", b"'", s) + else: + return s + + return self.assertEqual(f(first), f(second), msg) + + def connect(self, **kwargs): + try: + self._conns + except AttributeError as e: + raise AttributeError( + f"{e} (did you forget to call ConnectingTestCase.setUp()?)") + + if 'dsn' in kwargs: + conninfo = kwargs.pop('dsn') + else: + conninfo = dsn + conn = psycopg2.connect(conninfo, **kwargs) + self._conns.append(conn) + return conn + + def repl_connect(self, **kwargs): + """Return a connection set up for replication + + The connection is on "PSYCOPG2_TEST_REPL_DSN" unless overridden by + a *dsn* kwarg. + + Should raise a skip test if not available, but guard for None on + old Python versions. + """ + if repl_dsn is None: + return self.skipTest("replication tests disabled by default") + + if 'dsn' not in kwargs: + kwargs['dsn'] = repl_dsn + try: + conn = self.connect(**kwargs) + if conn.async_ == 1: + self.wait(conn) + except psycopg2.OperationalError as e: + # If pgcode is not set it is a genuine connection error + # Otherwise we tried to run some bad operation in the connection + # (e.g. bug #482) and we'd rather know that. + if e.pgcode is None: + return self.skipTest(f"replication db not configured: {e}") + else: + raise + + return conn + + def _get_conn(self): + if not hasattr(self, '_the_conn'): + self._the_conn = self.connect() + + return self._the_conn + + def _set_conn(self, conn): + self._the_conn = conn + + conn = property(_get_conn, _set_conn) + + # for use with async connections only + def wait(self, cur_or_conn): + pollable = cur_or_conn + if not hasattr(pollable, 'poll'): + pollable = cur_or_conn.connection + while True: + state = pollable.poll() + if state == psycopg2.extensions.POLL_OK: + break + elif state == psycopg2.extensions.POLL_READ: + select.select([pollable], [], [], 1) + elif state == psycopg2.extensions.POLL_WRITE: + select.select([], [pollable], [], 1) + else: + raise Exception("Unexpected result from poll: %r", state) + + _libpq = None + + @property + def libpq(self): + """Return a ctypes wrapper for the libpq library""" + if ConnectingTestCase._libpq is not None: + return ConnectingTestCase._libpq + + libname = find_library('pq') + if libname is None and platform.system() == 'Windows': + raise self.skipTest("can't import libpq on windows") + + try: + rv = ConnectingTestCase._libpq = ctypes.pydll.LoadLibrary(libname) + except OSError as e: + raise self.skipTest("couldn't open libpq for testing: %s" % e) + return rv + + +def decorate_all_tests(obj, *decorators): + """ + Apply all the *decorators* to all the tests defined in the TestCase *obj*. + + The decorator can also be applied to a decorator: if *obj* is a function, + return a new decorator which can be applied either to a method or to a + class, in which case it will decorate all the tests. + """ + if isinstance(obj, types.FunctionType): + def decorator(func_or_cls): + if isinstance(func_or_cls, types.FunctionType): + return obj(func_or_cls) + else: + decorate_all_tests(func_or_cls, obj) + return func_or_cls + + return decorator + + for n in dir(obj): + if n.startswith('test'): + for d in decorators: + setattr(obj, n, d(getattr(obj, n))) + + +@decorate_all_tests +def skip_if_no_uuid(f): + """Decorator to skip a test if uuid is not supported by PG.""" + @wraps(f) + def skip_if_no_uuid_(self): + try: + cur = self.conn.cursor() + cur.execute("select typname from pg_type where typname = 'uuid'") + has = cur.fetchone() + finally: + self.conn.rollback() + + if has: + return f(self) + else: + return self.skipTest("uuid type not available on the server") + + return skip_if_no_uuid_ + + +@decorate_all_tests +def skip_if_tpc_disabled(f): + """Skip a test if the server has tpc support disabled.""" + @wraps(f) + def skip_if_tpc_disabled_(self): + cnn = self.connect() + skip_if_crdb("2-phase commit", cnn) + + cur = cnn.cursor() + try: + cur.execute("SHOW max_prepared_transactions;") + except psycopg2.ProgrammingError: + return self.skipTest( + "server too old: two phase transactions not supported.") + else: + mtp = int(cur.fetchone()[0]) + cnn.close() + + if not mtp: + return self.skipTest( + "server not configured for two phase transactions. " + "set max_prepared_transactions to > 0 to run the test") + return f(self) + + return skip_if_tpc_disabled_ + + +def skip_before_postgres(*ver): + """Skip a test on PostgreSQL before a certain version.""" + reason = None + if isinstance(ver[-1], str): + ver, reason = ver[:-1], ver[-1] + + ver = ver + (0,) * (3 - len(ver)) + + @decorate_all_tests + def skip_before_postgres_(f): + @wraps(f) + def skip_before_postgres__(self): + if self.conn.info.server_version < int("%d%02d%02d" % ver): + return self.skipTest( + reason or "skipped because PostgreSQL %s" + % self.conn.info.server_version) + else: + return f(self) + + return skip_before_postgres__ + return skip_before_postgres_ + + +def skip_after_postgres(*ver): + """Skip a test on PostgreSQL after (including) a certain version.""" + ver = ver + (0,) * (3 - len(ver)) + + @decorate_all_tests + def skip_after_postgres_(f): + @wraps(f) + def skip_after_postgres__(self): + if self.conn.info.server_version >= int("%d%02d%02d" % ver): + return self.skipTest("skipped because PostgreSQL %s" + % self.conn.info.server_version) + else: + return f(self) + + return skip_after_postgres__ + return skip_after_postgres_ + + +def libpq_version(): + v = psycopg2.__libpq_version__ + if v >= 90100: + v = min(v, psycopg2.extensions.libpq_version()) + return v + + +def skip_before_libpq(*ver): + """Skip a test if libpq we're linked to is older than a certain version.""" + ver = ver + (0,) * (3 - len(ver)) + + def skip_before_libpq_(cls): + v = libpq_version() + decorator = unittest.skipIf( + v < int("%d%02d%02d" % ver), + f"skipped because libpq {v}", + ) + return decorator(cls) + return skip_before_libpq_ + + +def skip_after_libpq(*ver): + """Skip a test if libpq we're linked to is newer than a certain version.""" + ver = ver + (0,) * (3 - len(ver)) + + def skip_after_libpq_(cls): + v = libpq_version() + decorator = unittest.skipIf( + v >= int("%d%02d%02d" % ver), + f"skipped because libpq {v}", + ) + return decorator(cls) + return skip_after_libpq_ + + +def skip_before_python(*ver): + """Skip a test on Python before a certain version.""" + def skip_before_python_(cls): + decorator = unittest.skipIf( + sys.version_info[:len(ver)] < ver, + f"skipped because Python {'.'.join(map(str, sys.version_info[:len(ver)]))}", + ) + return decorator(cls) + return skip_before_python_ + + +def skip_from_python(*ver): + """Skip a test on Python after (including) a certain version.""" + def skip_from_python_(cls): + decorator = unittest.skipIf( + sys.version_info[:len(ver)] >= ver, + f"skipped because Python {'.'.join(map(str, sys.version_info[:len(ver)]))}", + ) + return decorator(cls) + return skip_from_python_ + + +@decorate_all_tests +def skip_if_no_superuser(f): + """Skip a test if the database user running the test is not a superuser""" + @wraps(f) + def skip_if_no_superuser_(self): + try: + return f(self) + except psycopg2.errors.InsufficientPrivilege: + self.skipTest("skipped because not superuser") + + return skip_if_no_superuser_ + + +def skip_if_green(reason): + def skip_if_green_(cls): + decorator = unittest.skipIf(green, reason) + return decorator(cls) + return skip_if_green_ + + +skip_copy_if_green = skip_if_green("copy in async mode currently not supported") + + +def skip_if_no_getrefcount(cls): + decorator = unittest.skipUnless( + hasattr(sys, 'getrefcount'), + 'no sys.getrefcount()', + ) + return decorator(cls) + + +def skip_if_windows(cls): + """Skip a test if run on windows""" + decorator = unittest.skipIf( + platform.system() == 'Windows', + "Not supported on Windows", + ) + return decorator(cls) + + +def crdb_version(conn, __crdb_version=[]): + """ + Return the CockroachDB version if that's the db being tested, else None. + + Return the number as an integer similar to PQserverVersion: return + v20.1.3 as 200103. + + Assume all the connections are on the same db: return a cached result on + following calls. + + """ + if __crdb_version: + return __crdb_version[0] + + sver = conn.info.parameter_status("crdb_version") + if sver is None: + __crdb_version.append(None) + else: + m = re.search(r"\bv(\d+)\.(\d+)\.(\d+)", sver) + if not m: + raise ValueError( + f"can't parse CockroachDB version from {sver}") + + ver = int(m.group(1)) * 10000 + int(m.group(2)) * 100 + int(m.group(3)) + __crdb_version.append(ver) + + return __crdb_version[0] + + +def skip_if_crdb(reason, conn=None, version=None): + """Skip a test or test class if we are testing against CockroachDB. + + Can be used as a decorator for tests function or classes: + + @skip_if_crdb("my reason") + class SomeUnitTest(UnitTest): + # ... + + Or as a normal function if the *conn* argument is passed. + + If *version* is specified it should be a string such as ">= 20.1", "< 20", + "== 20.1.3": the test will be skipped only if the version matches. + + """ + if not isinstance(reason, str): + raise TypeError(f"reason should be a string, got {reason!r} instead") + + if conn is not None: + ver = crdb_version(conn) + if ver is not None and _crdb_match_version(ver, version): + if reason in crdb_reasons: + reason = ( + "%s (https://github.com/cockroachdb/cockroach/issues/%s)" + % (reason, crdb_reasons[reason])) + raise unittest.SkipTest( + f"not supported on CockroachDB {ver}: {reason}") + + @decorate_all_tests + def skip_if_crdb_(f): + @wraps(f) + def skip_if_crdb__(self, *args, **kwargs): + skip_if_crdb(reason, conn=self.connect(), version=version) + return f(self, *args, **kwargs) + + return skip_if_crdb__ + + return skip_if_crdb_ + + +# mapping from reason description to ticket number +crdb_reasons = { + "2-phase commit": 22329, + "backend pid": 35897, + "cancel": 41335, + "cast adds tz": 51692, + "cidr": 18846, + "composite": 27792, + "copy": 41608, + "deferrable": 48307, + "encoding": 35882, + "hstore": 41284, + "infinity date": 41564, + "interval style": 35807, + "large objects": 243, + "named cursor": 41412, + "nested array": 32552, + "notify": 41522, + "password_encryption": 42519, + "range": 41282, + "stored procedure": 1751, +} + + +def _crdb_match_version(version, pattern): + if pattern is None: + return True + + m = re.match(r'^(>|>=|<|<=|==|!=)\s*(\d+)(?:\.(\d+))?(?:\.(\d+))?$', pattern) + if m is None: + raise ValueError( + "bad crdb version pattern %r: should be 'OP MAJOR[.MINOR[.BUGFIX]]'" + % pattern) + + ops = {'>': 'gt', '>=': 'ge', '<': 'lt', '<=': 'le', '==': 'eq', '!=': 'ne'} + op = getattr(operator, ops[m.group(1)]) + ref = int(m.group(2)) * 10000 + int(m.group(3) or 0) * 100 + int(m.group(4) or 0) + return op(version, ref) + + +class raises_typeerror: + def __enter__(self): + pass + + def __exit__(self, type, exc, tb): + assert type is TypeError + return True + + +def slow(f): + """Decorator to mark slow tests we may want to skip + + Note: in order to find slow tests you can run: + + make check 2>&1 | ts -i "%.s" | sort -n + """ + @wraps(f) + def slow_(self): + if os.environ.get('PSYCOPG2_TEST_FAST', '0') != '0': + return self.skipTest("slow test") + return f(self) + return slow_ + + +def restore_types(f): + """Decorator to restore the adaptation system after running a test""" + @wraps(f) + def restore_types_(self): + types = psycopg2.extensions.string_types.copy() + adapters = psycopg2.extensions.adapters.copy() + try: + return f(self) + finally: + psycopg2.extensions.string_types.clear() + psycopg2.extensions.string_types.update(types) + psycopg2.extensions.adapters.clear() + psycopg2.extensions.adapters.update(adapters) + + return restore_types_ diff --git a/tox.ini b/tox.ini new file mode 100644 index 0000000000000000000000000000000000000000..74faa1a545eebad9616cbd40b76e88c8cee1ae0c --- /dev/null +++ b/tox.ini @@ -0,0 +1,12 @@ +[tox] +envlist = {3.6,3.7,3.8,3.9} + +[testenv] +commands = make check +whitelist_externals = make +passenv = PG* PSYCOPG2_TEST* + +[flake8] +max-line-length = 85 +ignore = E128, W503, E741 +exclude = build, doc, tests/dbapi20.py