diff --git a/.dockerignore b/.dockerignore new file mode 100644 index 00000000..ce3c9e6f --- /dev/null +++ b/.dockerignore @@ -0,0 +1,5 @@ +*.gcno +*.gcda +*.gcov +*.so +*.o diff --git a/.editorconfig b/.editorconfig new file mode 100644 index 00000000..a54d21c5 --- /dev/null +++ b/.editorconfig @@ -0,0 +1,3 @@ +[*] +indent_style = tab +indent_size = 4 diff --git a/.gitattributes b/.gitattributes new file mode 100644 index 00000000..5ea3a003 --- /dev/null +++ b/.gitattributes @@ -0,0 +1,4 @@ +pg_pathman*.sql linguist-vendored=true +*.h linguist-language=C +*.c linguist-language=C +*.spec linguist-vendored=true diff --git a/.github/ISSUE_TEMPLATE.md b/.github/ISSUE_TEMPLATE.md new file mode 100644 index 00000000..b1e98a96 --- /dev/null +++ b/.github/ISSUE_TEMPLATE.md @@ -0,0 +1,26 @@ + + + +### Problem description + +Explain your problem here (it's always better to provide reproduction steps) ... + + + +### Environment + + + + + + + + + + + diff --git a/.gitignore b/.gitignore index 7f9490f2..1bc422a5 100644 --- a/.gitignore +++ b/.gitignore @@ -1,9 +1,16 @@ .deps -isolation_output -results/pg_pathman.out +results/* regression.diffs regression.out *.o *.so *.pyc +*.gcda +*.gcno +*.gcov +*.log pg_pathman--*.sql +tags +cscope* +Dockerfile +testgres diff --git a/.travis.yml b/.travis.yml index 047a1c52..411c98aa 100644 --- a/.travis.yml +++ b/.travis.yml @@ -1,20 +1,34 @@ -os: - - linux +os: linux -sudo: required -dist: trusty +dist: focal language: c -compiler: - - clang - - gcc +services: + - docker -before_install: - - sudo sh ./travis/apt.postgresql.org.sh +install: + - ./mk_dockerfile.sh + - docker-compose build -env: - - PGVERSION=9.5 CHECK_CODE=true - - PGVERSION=9.5 CHECK_CODE=false +script: + - docker-compose run $(bash <(curl -s https://codecov.io/env)) tests + +notifications: + email: + on_success: change + on_failure: always -script: bash ./travis/pg-travis-test.sh +env: + - PG_VERSION=16 LEVEL=hardcore + - PG_VERSION=16 + - PG_VERSION=15 LEVEL=hardcore + - PG_VERSION=15 + - PG_VERSION=14 LEVEL=hardcore + - PG_VERSION=14 + - PG_VERSION=13 LEVEL=hardcore + - PG_VERSION=13 + - PG_VERSION=12 LEVEL=hardcore + - PG_VERSION=12 + - PG_VERSION=11 LEVEL=hardcore + - PG_VERSION=11 diff --git a/Dockerfile.tmpl b/Dockerfile.tmpl new file mode 100644 index 00000000..4dd24ca5 --- /dev/null +++ b/Dockerfile.tmpl @@ -0,0 +1,40 @@ +FROM postgres:${PG_VERSION}-alpine + +# Install dependencies +RUN apk add --no-cache \ + openssl curl git patch \ + cmocka-dev \ + perl perl-ipc-run \ + python3 python3-dev py3-virtualenv \ + coreutils linux-headers \ + make musl-dev gcc bison flex \ + zlib-dev libedit-dev \ + pkgconf icu-dev clang clang15 clang-analyzer; + +# Install fresh valgrind +RUN apk add valgrind \ + --update-cache \ + --repository http://dl-3.alpinelinux.org/alpine/edge/main; + +# Environment +ENV LANG=C.UTF-8 PGDATA=/pg/data + +# Make directories +RUN mkdir -p ${PGDATA} && \ + mkdir -p /pg/testdir + +# Add data to test dir +ADD . /pg/testdir + +# Grant privileges +RUN chown -R postgres:postgres ${PGDATA} && \ + chown -R postgres:postgres /pg/testdir && \ + chmod a+rwx /usr/local/share/postgresql/extension && \ + find /usr/local/lib/postgresql -type d -print0 | xargs -0 chmod a+rwx + +COPY run_tests.sh /run.sh +RUN chmod 755 /run.sh + +USER postgres +WORKDIR /pg/testdir +ENTRYPOINT LEVEL=${LEVEL} /run.sh diff --git a/LICENSE b/LICENSE new file mode 100644 index 00000000..263a54bd --- /dev/null +++ b/LICENSE @@ -0,0 +1,11 @@ +pg_pathman is released under the PostgreSQL License, a liberal Open Source license, similar to the BSD or MIT licenses. + +Copyright (c) 2015-2017, Postgres Professional +Portions Copyright (c) 1996-2017, PostgreSQL Global Development Group +Portions Copyright (c) 1994, The Regents of the University of California + +Permission to use, copy, modify, and distribute this software and its documentation for any purpose, without fee, and without a written agreement is hereby granted, provided that the above copyright notice and this paragraph and the following two paragraphs appear in all copies. + +IN NO EVENT SHALL POSTGRES PROFESSIONAL BE LIABLE TO ANY PARTY FOR DIRECT, INDIRECT, SPECIAL, INCIDENTAL, OR CONSEQUENTIAL DAMAGES, INCLUDING LOST PROFITS, ARISING OUT OF THE USE OF THIS SOFTWARE AND ITS DOCUMENTATION, EVEN IF POSTGRES PROFESSIONAL HAS BEEN ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +POSTGRES PROFESSIONAL SPECIFICALLY DISCLAIMS ANY WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. THE SOFTWARE PROVIDED HEREUNDER IS ON AN "AS IS" BASIS, AND POSTGRES PROFESSIONAL HAS NO OBLIGATIONS TO PROVIDE MAINTENANCE, SUPPORT, UPDATES, ENHANCEMENTS, OR MODIFICATIONS. diff --git a/META.json b/META.json new file mode 100644 index 00000000..c32d74ba --- /dev/null +++ b/META.json @@ -0,0 +1,46 @@ +{ + "name": "pg_pathman", + "abstract": "Fast partitioning tool for PostgreSQL", + "description": "pg_pathman provides optimized partitioning mechanism and functions to manage partitions.", + "version": "1.5.12", + "maintainer": [ + "Arseny Sher " + ], + "license": "postgresql", + "resources": { + "bugtracker": { + "web": "https://github.com/postgrespro/pg_pathman/issues" + }, + "repository": { + "url": "git://github.com:postgrespro/pg_pathman.git", + "web": "https://github.com/postgrespro/pg_pathman", + "type": "git" + } + }, + "generated_by": "pgpro", + "provides": { + "pg_pathman": { + "file": "pg_pathman--1.5.sql", + "docfile": "README.md", + "version": "1.5.12", + "abstract": "Effective partitioning tool for PostgreSQL 9.5 and higher" + } + }, + "meta-spec": { + "version": "1.0.0", + "url": "http://pgxn.org/meta/spec.txt" + }, + "tags": [ + "partitioning", + "partition", + "optimization", + "table", + "tables", + "custom node", + "runtime append", + "background worker", + "fdw", + "range", + "hash" + ] +} diff --git a/Makefile b/Makefile index 4224a99b..f6780044 100644 --- a/Makefile +++ b/Makefile @@ -1,22 +1,100 @@ # contrib/pg_pathman/Makefile MODULE_big = pg_pathman -OBJS = src/init.o src/relation_info.o src/utils.o src/partition_filter.o src/runtimeappend.o \ - src/runtime_merge_append.o src/pg_pathman.o src/dsm_array.o src/rangeset.o src/pl_funcs.o \ - src/pathman_workers.o src/hooks.o src/nodes_common.o src/xact_handling.o $(WIN32RES) + +OBJS = src/init.o src/relation_info.o src/utils.o src/partition_filter.o \ + src/runtime_append.o src/runtime_merge_append.o src/pg_pathman.o src/rangeset.o \ + src/pl_funcs.o src/pl_range_funcs.o src/pl_hash_funcs.o src/pathman_workers.o \ + src/hooks.o src/nodes_common.o src/xact_handling.o src/utility_stmt_hooking.o \ + src/planner_tree_modification.o src/debug_print.o src/partition_creation.o \ + src/compat/pg_compat.o src/compat/rowmarks_fix.o src/partition_router.o \ + src/partition_overseer.o $(WIN32RES) + +ifdef USE_PGXS +override PG_CPPFLAGS += -I$(CURDIR)/src/include +else +override PG_CPPFLAGS += -I$(top_srcdir)/$(subdir)/src/include +endif EXTENSION = pg_pathman -EXTVERSION = 1.0 -DATA_built = $(EXTENSION)--$(EXTVERSION).sql -PGFILEDESC = "pg_pathman - partitioning tool" -REGRESS = pg_pathman -EXTRA_REGRESS_OPTS=--temp-config=$(top_srcdir)/$(subdir)/conf.add -EXTRA_CLEAN = $(EXTENSION)--$(EXTVERSION).sql ./isolation_output +EXTVERSION = 1.5 + +DATA_built = pg_pathman--$(EXTVERSION).sql + +DATA = pg_pathman--1.0--1.1.sql \ + pg_pathman--1.1--1.2.sql \ + pg_pathman--1.2--1.3.sql \ + pg_pathman--1.3--1.4.sql \ + pg_pathman--1.4--1.5.sql + +PGFILEDESC = "pg_pathman - partitioning tool for PostgreSQL" + +ifneq (pg_pathman,$(filter pg_pathman,$(PG_TEST_SKIP))) +REGRESS = pathman_array_qual \ + pathman_basic \ + pathman_bgw \ + pathman_cache_pranks \ + pathman_calamity \ + pathman_callbacks \ + pathman_column_type \ + pathman_cte \ + pathman_domains \ + pathman_dropped_cols \ + pathman_expressions \ + pathman_foreign_keys \ + pathman_gaps \ + pathman_inserts \ + pathman_interval \ + pathman_join_clause \ + pathman_lateral \ + pathman_hashjoin \ + pathman_mergejoin \ + pathman_only \ + pathman_param_upd_del \ + pathman_permissions \ + pathman_rebuild_deletes \ + pathman_rebuild_updates \ + pathman_rowmarks \ + pathman_runtime_nodes \ + pathman_subpartitions \ + pathman_update_node \ + pathman_update_triggers \ + pathman_upd_del \ + pathman_utility_stmt \ + pathman_views \ + pathman_CVE-2020-14350 +endif + +ISOLATION = insert_nodes for_update rollback_on_create_partitions + +REGRESS_OPTS = --temp-config $(top_srcdir)/$(subdir)/conf.add +ISOLATION_OPTS = --temp-config $(top_srcdir)/$(subdir)/conf.add + +CMOCKA_EXTRA_CLEAN = missing_basic.o missing_list.o missing_stringinfo.o missing_bitmapset.o rangeset_tests.o rangeset_tests +EXTRA_CLEAN = $(patsubst %,tests/cmocka/%, $(CMOCKA_EXTRA_CLEAN)) ifdef USE_PGXS -PG_CONFIG = pg_config +PG_CONFIG=pg_config PGXS := $(shell $(PG_CONFIG) --pgxs) +VNUM := $(shell $(PG_CONFIG) --version | awk '{print $$2}') + +# check for declarative syntax +# this feature will not be ported to >=12 +ifeq ($(VNUM),$(filter 10% 11%,$(VNUM))) +REGRESS += pathman_declarative +OBJS += src/declarative.o +override PG_CPPFLAGS += -DENABLE_DECLARATIVE +endif + +# We cannot run isolation test for versions 12,13 in PGXS case +# because 'pg_isolation_regress' is not copied to install +# directory, see src/test/isolation/Makefile +ifeq ($(VNUM),$(filter 12% 13%,$(VNUM))) +undefine ISOLATION +undefine ISOLATION_OPTS +endif + include $(PGXS) else subdir = contrib/pg_pathman @@ -28,14 +106,14 @@ endif $(EXTENSION)--$(EXTVERSION).sql: init.sql hash.sql range.sql cat $^ > $@ -ISOLATIONCHECKS=insert_nodes for_update rollback_on_create_partitions +python_tests: + $(MAKE) -C tests/python partitioning_tests CASE=$(CASE) -submake-isolation: - $(MAKE) -C $(top_builddir)/src/test/isolation all +cmocka_tests: + $(MAKE) -C tests/cmocka check -isolationcheck: | submake-isolation - $(MKDIR_P) isolation_output - $(pg_isolation_regress_check) \ - --temp-config=$(top_srcdir)/$(subdir)/conf.add \ - --outputdir=./isolation_output \ - $(ISOLATIONCHECKS) +clean_gcov: + find . \ + -name "*.gcda" -delete -o \ + -name "*.gcno" -delete -o \ + -name "*.gcov" -delete diff --git a/README.md b/README.md index 0c5c36ce..1394bc6f 100644 --- a/README.md +++ b/README.md @@ -1,18 +1,44 @@ -[![Build Status](https://travis-ci.org/postgrespro/pg_pathman.svg?branch=master)](https://travis-ci.org/postgrespro/pg_pathman) +[![Build Status](https://travis-ci.com/postgrespro/pg_pathman.svg?branch=master)](https://travis-ci.com/postgrespro/pg_pathman) +[![PGXN version](https://badge.fury.io/pg/pg_pathman.svg)](https://badge.fury.io/pg/pg_pathman) +[![codecov](https://codecov.io/gh/postgrespro/pg_pathman/branch/master/graph/badge.svg)](https://codecov.io/gh/postgrespro/pg_pathman) +[![GitHub license](https://img.shields.io/badge/license-PostgreSQL-blue.svg)](https://raw.githubusercontent.com/postgrespro/pg_pathman/master/LICENSE) + +### NOTE: this project is not under development anymore + +`pg_pathman` supports Postgres versions [11..15], but most probably it won't be ported to later releases. [Native partitioning](https://www.postgresql.org/docs/current/ddl-partitioning.html) is pretty mature now and has almost everything implemented in `pg_pathman`'; we encourage users switching to it. We are still maintaining the project (fixing bugs in supported versions), but no new development is going to happen here. # pg_pathman The `pg_pathman` module provides optimized partitioning mechanism and functions to manage partitions. +The extension is compatible with: + + * PostgreSQL 12, 13; + * PostgreSQL with core-patch: 11, 14, 15; + * Postgres Pro Standard 11, 12, 13, 14, 15; + * Postgres Pro Enterprise; + +Take a look at our Wiki [out there](https://github.com/postgrespro/pg_pathman/wiki). + ## Overview -**Partitioning** means splitting one large table into smaller pieces. Each row in such table is moved to a single partition according to the partitioning key. PostgreSQL supports partitioning via table inheritance: each partition must be created as a child table with CHECK CONSTRAINT. For example: +**Partitioning** means splitting one large table into smaller pieces. Each row in such table is moved to a single partition according to the partitioning key. PostgreSQL <= 10 supports partitioning via table inheritance: each partition must be created as a child table with CHECK CONSTRAINT: -``` +```plpgsql CREATE TABLE test (id SERIAL PRIMARY KEY, title TEXT); CREATE TABLE test_1 (CHECK ( id >= 100 AND id < 200 )) INHERITS (test); CREATE TABLE test_2 (CHECK ( id >= 200 AND id < 300 )) INHERITS (test); ``` +PostgreSQL 10 provides native partitioning: + +```plpgsql +CREATE TABLE test(id int4, value text) PARTITION BY RANGE(id); +CREATE TABLE test_1 PARTITION OF test FOR VALUES FROM (1) TO (10); +CREATE TABLE test_2 PARTITION OF test FOR VALUES FROM (10) TO (20); +``` + +It's not so different from the classic approach; there are implicit check constraints, and most of its limitations are still relevant. + Despite the flexibility, this approach forces the planner to perform an exhaustive search and to check constraints on each partition to determine whether it should be present in the plan or not. Large amount of partitions may result in significant planning overhead. The `pg_pathman` module features partition managing functions and optimized planning mechanism which utilizes knowledge of the partitions' structure. It stores partitioning configuration in the `pathman_config` table; each row contains a single entry for a partitioned table (relation name, partitioning column and its type). During the initialization stage the `pg_pathman` module caches some information about child partitions in the shared memory, which is used later for plan construction. Before a SELECT query is executed, `pg_pathman` traverses the condition tree in search of expressions like: @@ -22,7 +48,7 @@ VARIABLE OP CONST ``` where `VARIABLE` is a partitioning key, `OP` is a comparison operator (supported operators are =, <, <=, >, >=), `CONST` is a scalar value. For example: -``` +```plpgsql WHERE id = 150 ``` @@ -33,81 +59,135 @@ Based on the partitioning type and condition's operator, `pg_pathman` searches f More interesting features are yet to come. Stay tuned! -## Roadmap - - * Provide a way to create user-defined partition creation\destruction callbacks (issue [#22](https://github.com/postgrespro/pg_pathman/issues/22)) - * Implement LIST partitioning scheme; - * Optimize hash join (both tables are partitioned by join key). +## Feature highlights + + * HASH and RANGE partitioning schemes; + * Partitioning by expression and composite key; + * Both automatic and manual [partition management](#post-creation-partition-management); + * Support for integer, floating point, date and other types, including domains; + * Effective query planning for partitioned tables (JOINs, subselects etc); + * `RuntimeAppend` & `RuntimeMergeAppend` custom plan nodes to pick partitions at runtime; + * [`PartitionFilter`](#custom-plan-nodes): an efficient drop-in replacement for INSERT triggers; + * [`PartitionRouter`](#custom-plan-nodes) and [`PartitionOverseer`](#custom-plan-nodes) for cross-partition UPDATE queries (instead of triggers); + * Automatic partition creation for new INSERTed data (only for RANGE partitioning); + * Improved `COPY FROM` statement that is able to insert rows directly into partitions; + * [User-defined callbacks](#additional-parameters) for partition creation event handling; + * Non-blocking [concurrent table partitioning](#data-migration); + * FDW support (foreign partitions); + * Various [GUC](#disabling-pg_pathman) toggles and configurable settings. + * Partial support of [`declarative partitioning`](#declarative-partitioning) (from PostgreSQL 10). ## Installation guide To install `pg_pathman`, execute this in the module's directory: -``` + +```shell make install USE_PGXS=1 ``` + +> **Important:** Don't forget to set the `PG_CONFIG` variable (`make PG_CONFIG=...`) in case you want to test `pg_pathman` on a non-default or custom build of PostgreSQL. Read more [here](https://wiki.postgresql.org/wiki/Building_and_Installing_PostgreSQL_Extension_Modules). + Modify the **`shared_preload_libraries`** parameter in `postgresql.conf` as following: + ``` shared_preload_libraries = 'pg_pathman' ``` + +> **Important:** `pg_pathman` may cause conflicts with some other extensions that use the same hook functions. For example, `pg_pathman` uses `ProcessUtility_hook` to handle COPY queries for partitioned tables, which means it may interfere with `pg_stat_statements` from time to time. In this case, try listing libraries in certain order: `shared_preload_libraries = 'pg_stat_statements, pg_pathman'`. + It is essential to restart the PostgreSQL instance. After that, execute the following query in psql: -``` -CREATE EXTENSION pg_pathman; +```plpgsql +CREATE SCHEMA pathman; +GRANT USAGE ON SCHEMA pathman TO PUBLIC; +CREATE EXTENSION pg_pathman WITH SCHEMA pathman; ``` Done! Now it's time to setup your partitioning schemes. -> **Important:** Don't forget to set the `PG_CONFIG` variable in case you want to test `pg_pathman` on a custom build of PostgreSQL. Read more [here](https://wiki.postgresql.org/wiki/Building_and_Installing_PostgreSQL_Extension_Modules). +> **Security notice**: pg_pathman is believed to be secure against +search-path-based attacks mentioned in Postgres +[documentation](https://www.postgresql.org/docs/current/sql-createextension.html). However, +if *your* calls of pathman's functions doesn't exactly match the signature, they +might be vulnerable to malicious overloading. If in doubt, install pathman to clean schema where nobody except superusers have CREATE object permission to avoid problems. + +> **Windows-specific**: pg_pathman imports several symbols (e.g. None_Receiver, InvalidObjectAddress) from PostgreSQL, which is fine by itself, but requires that those symbols are marked as `PGDLLIMPORT`. Unfortunately, some of them are not exported from vanilla PostgreSQL, which means that you have to either use Postgres Pro Standard/Enterprise (which includes all necessary patches), or patch and build your own distribution of PostgreSQL. + +## How to update +In order to update pg_pathman: + +1. Install the latest _stable_ release of pg_pathman. +2. Restart your PostgreSQL cluster. +3. Execute the following queries: + +```plpgsql +/* only required for major releases, e.g. 1.4 -> 1.5 */ +ALTER EXTENSION pg_pathman UPDATE; +SET pg_pathman.enable = t; +``` ## Available functions +### Module's version + +```plpgsql +pathman_version() +``` +Although it's possible to get major and minor version numbers using `\dx pg_pathman`, it doesn't show the actual [patch number](http://semver.org/). This function returns a complete version number of the loaded pg_pathman module in `MAJOR.MINOR.PATCH` format. + ### Partition creation ```plpgsql -create_hash_partitions(relation REGCLASS, - attribute TEXT, +create_hash_partitions(parent_relid REGCLASS, + expression TEXT, partitions_count INTEGER, - partition_name TEXT DEFAULT NULL) + partition_data BOOLEAN DEFAULT TRUE, + partition_names TEXT[] DEFAULT NULL, + tablespaces TEXT[] DEFAULT NULL) ``` -Performs HASH partitioning for `relation` by integer key `attribute`. The `partitions_count` parameter specifies the number of partitions to create; it cannot be changed afterwards. If `partition_data` is `true` then all the data will be automatically copied from the parent table to partitions. Note that data migration may took a while to finish and the table will be locked until transaction commits. See `partition_table_concurrently()` for a lock-free way to migrate data. +Performs HASH partitioning for `relation` by partitioning expression `expr`. The `partitions_count` parameter specifies the number of partitions to create; it cannot be changed afterwards. If `partition_data` is `true` then all the data will be automatically copied from the parent table to partitions. Note that data migration may took a while to finish and the table will be locked until transaction commits. See `partition_table_concurrently()` for a lock-free way to migrate data. Partition creation callback is invoked for each partition if set beforehand (see `set_init_callback()`). ```plpgsql -create_range_partitions(relation REGCLASS, - attribute TEXT, - start_value ANYELEMENT, - interval ANYELEMENT, - count INTEGER DEFAULT NULL - partition_data BOOLEAN DEFAULT true) - -create_range_partitions(relation REGCLASS, - attribute TEXT, - start_value ANYELEMENT, - interval INTERVAL, - count INTEGER DEFAULT NULL, - partition_data BOOLEAN DEFAULT true) -``` -Performs RANGE partitioning for `relation` by partitioning key `attribute`. `start_value` argument specifies initial value, `interval` sets the range of values in a single partition, `count` is the number of premade partitions (if not set then pathman tries to determine it based on attribute values). +create_range_partitions(parent_relid REGCLASS, + expression TEXT, + start_value ANYELEMENT, + p_interval ANYELEMENT, + p_count INTEGER DEFAULT NULL + partition_data BOOLEAN DEFAULT TRUE) + +create_range_partitions(parent_relid REGCLASS, + expression TEXT, + start_value ANYELEMENT, + p_interval INTERVAL, + p_count INTEGER DEFAULT NULL, + partition_data BOOLEAN DEFAULT TRUE) + +create_range_partitions(parent_relid REGCLASS, + expression TEXT, + bounds ANYARRAY, + partition_names TEXT[] DEFAULT NULL, + tablespaces TEXT[] DEFAULT NULL, + partition_data BOOLEAN DEFAULT TRUE) +``` +Performs RANGE partitioning for `relation` by partitioning expression `expr`, `start_value` argument specifies initial value, `p_interval` sets the default range for auto created partitions or partitions created with `append_range_partition()` or `prepend_range_partition()` (if `NULL` then auto partition creation feature won't work), `p_count` is the number of premade partitions (if not set then `pg_pathman` tries to determine it based on expression's values). The `bounds` array can be built using `generate_range_bounds()`. Partition creation callback is invoked for each partition if set beforehand. ```plpgsql -create_partitions_from_range(relation REGCLASS, - attribute TEXT, - start_value ANYELEMENT, - end_value ANYELEMENT, - interval ANYELEMENT, - partition_data BOOLEAN DEFAULT true) +generate_range_bounds(p_start ANYELEMENT, + p_interval INTERVAL, + p_count INTEGER) -create_partitions_from_range(relation REGCLASS, - attribute TEXT, - start_value ANYELEMENT, - end_value ANYELEMENT, - interval INTERVAL, - partition_data BOOLEAN DEFAULT true) +generate_range_bounds(p_start ANYELEMENT, + p_interval ANYELEMENT, + p_count INTEGER) ``` -Performs RANGE-partitioning from specified range for `relation` by partitioning key `attribute`. +Builds `bounds` array for `create_range_partitions()`. + ### Data migration ```plpgsql -partition_table_concurrently(relation REGCLASS) +partition_table_concurrently(relation REGCLASS, + batch_size INTEGER DEFAULT 1000, + sleep_time FLOAT8 DEFAULT 1.0) ``` -Starts a background worker to move data from parent table to partitions. The worker utilizes short transactions to copy small batches of data (up to 10K rows per transaction) and thus doesn't significantly interfere with user's activity. +Starts a background worker to move data from parent table to partitions. The worker utilizes short transactions to copy small batches of data (up to 10K rows per transaction) and thus doesn't significantly interfere with user's activity. If the worker is unable to lock rows of a batch, it sleeps for `sleep_time` seconds before the next attempt and tries again up to 60 times, and quits if it's still unable to lock the batch. ```plpgsql stop_concurrent_part_task(relation REGCLASS) @@ -115,102 +195,268 @@ stop_concurrent_part_task(relation REGCLASS) Stops a background worker performing a concurrent partitioning task. Note: worker will exit after it finishes relocating a current batch. ### Triggers + +Triggers are no longer required nor for INSERTs, neither for cross-partition UPDATEs. However, user-supplied triggers *are supported*: + +* Each **inserted row** results in execution of `BEFORE/AFTER INSERT` trigger functions of a *corresponding partition*. +* Each **updated row** results in execution of `BEFORE/AFTER UPDATE` trigger functions of a *corresponding partition*. +* Each **moved row** (cross-partition update) results in execution of `BEFORE UPDATE` + `BEFORE/AFTER DELETE` + `BEFORE/AFTER INSERT` trigger functions of *corresponding partitions*. + +### Post-creation partition management ```plpgsql -create_hash_update_trigger(parent REGCLASS) -``` -Creates the trigger on UPDATE for HASH partitions. The UPDATE trigger isn't created by default because of the overhead. It's useful in cases when the key attribute might change. -```plpgsql -create_range_update_trigger(parent REGCLASS) +replace_hash_partition(old_partition REGCLASS, + new_partition REGCLASS, + lock_parent BOOLEAN DEFAULT TRUE) ``` -Same as above, but for a RANGE-partitioned table. +Replaces specified partition of HASH-partitioned table with another table. The `lock_parent` parameter will prevent any INSERT/UPDATE/ALTER TABLE queries to parent table. + -### Post-creation partition management ```plpgsql -split_range_partition(partition REGCLASS, - value ANYELEMENT, - partition_name TEXT DEFAULT NULL,) +split_range_partition(partition_relid REGCLASS, + split_value ANYELEMENT, + partition_name TEXT DEFAULT NULL, + tablespace TEXT DEFAULT NULL) ``` -Split RANGE `partition` in two by `value`. +Split RANGE `partition` in two by `split_value`. Partition creation callback is invoked for a new partition if available. ```plpgsql -merge_range_partitions(partition1 REGCLASS, partition2 REGCLASS) +merge_range_partitions(variadic partitions REGCLASS[]) ``` -Merge two adjacent RANGE partitions. First, data from `partition2` is copied to `partition1`, then `partition2` is removed. +Merge several adjacent RANGE partitions. Partitions are automatically ordered by increasing bounds; all the data will be accumulated in the first partition. ```plpgsql -append_range_partition(p_relation REGCLASS, - partition_name TEXT DEFAULT NULL) +append_range_partition(parent_relid REGCLASS, + partition_name TEXT DEFAULT NULL, + tablespace TEXT DEFAULT NULL) ``` -Append new RANGE partition. +Append new RANGE partition with `pathman_config.range_interval` as interval. ```plpgsql -prepend_range_partition(p_relation REGCLASS, - partition_name TEXT DEFAULT NULL) +prepend_range_partition(parent_relid REGCLASS, + partition_name TEXT DEFAULT NULL, + tablespace TEXT DEFAULT NULL) ``` -Prepend new RANGE partition. +Prepend new RANGE partition with `pathman_config.range_interval` as interval. ```plpgsql -add_range_partition(relation REGCLASS, +add_range_partition(parent_relid REGCLASS, start_value ANYELEMENT, end_value ANYELEMENT, - partition_name TEXT DEFAULT NULL) + partition_name TEXT DEFAULT NULL, + tablespace TEXT DEFAULT NULL) ``` -Create new RANGE partition for `relation` with specified range bounds. +Create new RANGE partition for `relation` with specified range bounds. If `start_value` or `end_value` are NULL then corresponding range bound will be infinite. ```plpgsql -drop_range_partition(partition TEXT) +drop_range_partition(partition TEXT, delete_data BOOLEAN DEFAULT TRUE) ``` -Drop RANGE partition and all its data. +Drop RANGE partition and all of its data if `delete_data` is true. ```plpgsql -attach_range_partition(relation REGCLASS, - partition REGCLASS, - start_value ANYELEMENT, - end_value ANYELEMENT) +attach_range_partition(parent_relid REGCLASS, + partition_relid REGCLASS, + start_value ANYELEMENT, + end_value ANYELEMENT) ``` -Attach partition to the existing RANGE-partitioned relation. The attached table must have exactly the same structure as the parent table, including the dropped columns. +Attach partition to the existing RANGE-partitioned relation. The attached table must have exactly the same structure as the parent table, including the dropped columns. Partition creation callback is invoked if set (see `pathman_config_params`). ```plpgsql -detach_range_partition(partition REGCLASS) +detach_range_partition(partition_relid REGCLASS) ``` Detach partition from the existing RANGE-partitioned relation. ```plpgsql -disable_pathman_for(relation TEXT) +disable_pathman_for(parent_relid REGCLASS) ``` Permanently disable `pg_pathman` partitioning mechanism for the specified parent table and remove the insert trigger if it exists. All partitions and data remain unchanged. ```plpgsql -drop_partitions(parent REGCLASS, - delete_data BOOLEAN DEFAULT FALSE) +drop_partitions(parent_relid REGCLASS, + delete_data BOOLEAN DEFAULT FALSE) +``` +Drop partitions of the `parent` table (both foreign and local relations). If `delete_data` is `false`, the data is copied to the parent table first. Default is `false`. + +To remove partitioned table along with all partitions fully, use conventional +`DROP TABLE relation CASCADE`. However, care should be taken in somewhat rare +case when you are running logical replication and `DROP` was executed by +replication apply worker, e.g. via trigger on replicated table. `pg_pathman` +uses `pathman_ddl_trigger` event trigger to remove the record about dropped +table from `pathman_config`, and this trigger by default won't fire on replica, +leading to inconsistent state when `pg_pathman` thinks that the table still +exists, but in fact it doesn't. If this is the case, configure this trigger to +fire on replica too: + +```plpgsql +ALTER EVENT TRIGGER pathman_ddl_trigger ENABLE ALWAYS; ``` -Drop partitions of the `parent` table. If `delete_data` is `false` then the data is copied to the parent table first. Default is `false`. + +Physical replication doesn't have this problem since DDL as well as +`pathman_config` table is replicated too; master and slave PostgreSQL instances +are basically identical, and it is only harmful to keep this trigger in `ALWAYS` +mode. ### Additional parameters + +```plpgsql +set_interval(relation REGCLASS, value ANYELEMENT) +``` +Update RANGE partitioned table interval. Note that interval must not be negative and it must not be trivial, i.e. its value should be greater than zero for numeric types, at least 1 microsecond for `TIMESTAMP` and at least 1 day for `DATE`. + ```plpgsql -enable_parent(relation REGCLASS) -disable_parent(relation REGCLASS) +set_enable_parent(relation REGCLASS, value BOOLEAN) ``` -Include/exclude parent table into/from query plan. In original PostgreSQL planner parent table is always included into query plan even if it's empty which can lead to additional overhead. You can use `disable_parent()` if you are never going to use parent table as a storage. Default value depends on the `partition_data` parameter that was specified during initial partitioning in `create_range_partitions()` or `create_partitions_from_range()` functions. If the `partition_data` parameter was `true` then all data have already been migrated to partitions and parent table disabled. Otherwise it is enabled. +Include/exclude parent table into/from query plan. In original PostgreSQL planner parent table is always included into query plan even if it's empty which can lead to additional overhead. You can use `disable_parent()` if you are never going to use parent table as a storage. Default value depends on the `partition_data` parameter that was specified during initial partitioning in `create_range_partitions()` function. If the `partition_data` parameter was `true` then all data have already been migrated to partitions and parent table disabled. Otherwise it is enabled. ```plpgsql -enable_auto(relation REGCLASS) -disable_auto(relation REGCLASS) +set_auto(relation REGCLASS, value BOOLEAN) ``` Enable/disable auto partition propagation (only for RANGE partitioning). It is enabled by default. +```plpgsql +set_init_callback(relation REGCLASS, callback REGPROC DEFAULT 0) +``` +Set partition creation callback to be invoked for each attached or created partition (both HASH and RANGE). If callback is marked with SECURITY INVOKER, it's executed with the privileges of the user that produced a statement which has led to creation of a new partition (e.g. `INSERT INTO partitioned_table VALUES (-5)`). The callback must have the following signature: `part_init_callback(args JSONB) RETURNS VOID`. Parameter `arg` consists of several fields whose presence depends on partitioning type: +```json +/* RANGE-partitioned table abc (child abc_4) */ +{ + "parent": "abc", + "parent_schema": "public", + "parttype": "2", + "partition": "abc_4", + "partition_schema": "public", + "range_max": "401", + "range_min": "301" +} + +/* HASH-partitioned table abc (child abc_0) */ +{ + "parent": "abc", + "parent_schema": "public", + "parttype": "1", + "partition": "abc_0", + "partition_schema": "public" +} +``` + +```plpgsql +set_set_spawn_using_bgw(relation REGCLASS, value BOOLEAN) +``` +When INSERTing new data beyond the partitioning range, use SpawnPartitionsWorker to create new partitions in a separate transaction. + +## Views and tables + +#### `pathman_config` --- main config storage +```plpgsql +CREATE TABLE IF NOT EXISTS pathman_config ( + partrel REGCLASS NOT NULL PRIMARY KEY, + expr TEXT NOT NULL, + parttype INTEGER NOT NULL, + range_interval TEXT, + cooked_expr TEXT); +``` +This table stores a list of partitioned tables. + +#### `pathman_config_params` --- optional parameters +```plpgsql +CREATE TABLE IF NOT EXISTS pathman_config_params ( + partrel REGCLASS NOT NULL PRIMARY KEY, + enable_parent BOOLEAN NOT NULL DEFAULT TRUE, + auto BOOLEAN NOT NULL DEFAULT TRUE, + init_callback TEXT DEFAULT NULL, + spawn_using_bgw BOOLEAN NOT NULL DEFAULT FALSE); +``` +This table stores optional parameters which override standard behavior. + +#### `pathman_concurrent_part_tasks` --- currently running partitioning workers +```plpgsql +-- helper SRF function +CREATE OR REPLACE FUNCTION show_concurrent_part_tasks() +RETURNS TABLE ( + userid REGROLE, + pid INT, + dbid OID, + relid REGCLASS, + processed INT, + status TEXT) +AS 'pg_pathman', 'show_concurrent_part_tasks_internal' +LANGUAGE C STRICT; + +CREATE OR REPLACE VIEW pathman_concurrent_part_tasks +AS SELECT * FROM show_concurrent_part_tasks(); +``` +This view lists all currently running concurrent partitioning tasks. + +#### `pathman_partition_list` --- list of all existing partitions +```plpgsql +-- helper SRF function +CREATE OR REPLACE FUNCTION show_partition_list() +RETURNS TABLE ( + parent REGCLASS, + partition REGCLASS, + parttype INT4, + expr TEXT, + range_min TEXT, + range_max TEXT) +AS 'pg_pathman', 'show_partition_list_internal' +LANGUAGE C STRICT; + +CREATE OR REPLACE VIEW pathman_partition_list +AS SELECT * FROM show_partition_list(); +``` +This view lists all existing partitions, as well as their parents and range boundaries (NULL for HASH partitions). + +#### `pathman_cache_stats` --- per-backend memory consumption +```plpgsql +-- helper SRF function +CREATE OR REPLACE FUNCTION @extschema@.show_cache_stats() +RETURNS TABLE ( + context TEXT, + size INT8, + used INT8, + entries INT8) +AS 'pg_pathman', 'show_cache_stats_internal' +LANGUAGE C STRICT; + +CREATE OR REPLACE VIEW @extschema@.pathman_cache_stats +AS SELECT * FROM @extschema@.show_cache_stats(); +``` +Shows memory consumption of various caches. + +## Declarative partitioning + +From PostgreSQL 10 `ATTACH PARTITION`, `DETACH PARTITION` +and `CREATE TABLE .. PARTITION OF` commands could be used with tables +partitioned by `pg_pathman`: + +```plpgsql +CREATE TABLE child1 (LIKE partitioned_table); + +--- attach new partition +ALTER TABLE partitioned_table ATTACH PARTITION child1 + FOR VALUES FROM ('2015-05-01') TO ('2015-06-01'); + +--- detach the partition +ALTER TABLE partitioned_table DETACH PARTITION child1; + +-- create a partition +CREATE TABLE child2 PARTITION OF partitioned_table + FOR VALUES IN ('2015-05-01', '2015-06-01'); +``` + ## Custom plan nodes `pg_pathman` provides a couple of [custom plan nodes](https://wiki.postgresql.org/wiki/CustomScanAPI) which aim to reduce execution time, namely: - `RuntimeAppend` (overrides `Append` plan node) - `RuntimeMergeAppend` (overrides `MergeAppend` plan node) - `PartitionFilter` (drop-in replacement for INSERT triggers) +- `PartitionOverseer` (implements cross-partition UPDATEs) +- `PartitionRouter` (implements cross-partition UPDATEs) `PartitionFilter` acts as a *proxy node* for INSERT's child scan, which means it can redirect output tuples to the corresponding partition: -``` +```plpgsql EXPLAIN (COSTS OFF) INSERT INTO partitioned_table SELECT generate_series(1, 10), random(); @@ -223,6 +469,29 @@ SELECT generate_series(1, 10), random(); (4 rows) ``` +`PartitionOverseer` and `PartitionRouter` are another *proxy nodes* used +in conjunction with `PartitionFilter` to enable cross-partition UPDATEs +(i.e. when update of partitioning key requires that we move row to another +partition). Since this node has a great deal of side effects (ordinary `UPDATE` becomes slower; +cross-partition `UPDATE` is transformed into `DELETE + INSERT`), +it is disabled by default. +To enable it, refer to the list of [GUCs](#disabling-pg_pathman) below. + +```plpgsql +EXPLAIN (COSTS OFF) +UPDATE partitioned_table +SET value = value + 1 WHERE value = 2; + QUERY PLAN +--------------------------------------------------------- + Custom Scan (PartitionOverseer) + -> Update on partitioned_table_2 + -> Custom Scan (PartitionFilter) + -> Custom Scan (PartitionRouter) + -> Seq Scan on partitioned_table_2 + Filter: (value = 2) +(6 rows) +``` + `RuntimeAppend` and `RuntimeMergeAppend` have much in common: they come in handy in a case when WHERE condition takes form of: ``` VARIABLE OP PARAM @@ -233,7 +502,7 @@ This kind of expressions can no longer be optimized at planning time since the p There are at least several cases that demonstrate usefulness of these nodes: -``` +```plpgsql /* create table we're going to partition */ CREATE TABLE partitioned_table(id INT NOT NULL, payload REAL); @@ -250,7 +519,7 @@ CREATE TABLE some_table AS SELECT generate_series(1, 100) AS VAL; - **`id = (select ... limit 1)`** -``` +```plpgsql EXPLAIN (COSTS OFF, ANALYZE) SELECT * FROM partitioned_table WHERE id = (SELECT * FROM some_table LIMIT 1); QUERY PLAN @@ -290,7 +559,7 @@ WHERE id = (SELECT * FROM some_table LIMIT 1); ``` - **`id = ANY (select ...)`** -``` +```plpgsql EXPLAIN (COSTS OFF, ANALYZE) SELECT * FROM partitioned_table WHERE id = any (SELECT * FROM some_table limit 4); QUERY PLAN @@ -342,24 +611,42 @@ In case you're interested, you can read more about custom nodes at Alexander Kor ### Common tips - You can easily add **_partition_** column containing the names of the underlying partitions using the system attribute called **_tableoid_**: -``` +```plpgsql SELECT tableoid::regclass AS partition, * FROM partitioned_table; ``` -- Though indices on a parent table aren't particularly useful (since it's empty), they act as prototypes for indices on partitions. For each index on the parent table, `pg_pathman` will create a similar index on every partition. +- Though indices on a parent table aren't particularly useful (since it's supposed to be empty), they act as prototypes for indices on partitions. For each index on the parent table, `pg_pathman` will create a similar index on every partition. - All running concurrent partitioning tasks can be listed using the `pathman_concurrent_part_tasks` view: ```plpgsql SELECT * FROM pathman_concurrent_part_tasks; - userid | pid | dbid | relid | processed | status + userid | pid | dbid | relid | processed | status --------+------+-------+-------+-----------+--------- dmitry | 7367 | 16384 | test | 472000 | working (1 row) ``` +- `pathman_partition_list` in conjunction with `drop_range_partition()` can be used to drop RANGE partitions in a more flexible way compared to good old `DROP TABLE`: +```plpgsql +SELECT drop_range_partition(partition, false) /* move data to parent */ +FROM pathman_partition_list +WHERE parent = 'part_test'::regclass AND range_min::int < 500; +NOTICE: 1 rows copied from part_test_11 +NOTICE: 100 rows copied from part_test_1 +NOTICE: 100 rows copied from part_test_2 + drop_range_partition +---------------------- + dummy_test_11 + dummy_test_1 + dummy_test_2 +(3 rows) +``` + +- You can turn foreign tables into partitions using the `attach_range_partition()` function. Rows that were meant to be inserted into parent will be redirected to foreign partitions (as usual, PartitionFilter will be involved), though by default it is prohibited to insert rows into partitions provided not by `postgres_fdw`. Only superuser is allowed to set `pg_pathman.insert_into_fdw` [GUC](#disabling-pg_pathman) variable. + ### HASH partitioning Consider an example of HASH partitioning. First create a table with some integer column: -``` +```plpgsql CREATE TABLE items ( id SERIAL PRIMARY KEY, name TEXT, @@ -370,13 +657,13 @@ SELECT g, md5(g::text), random() * 100000 FROM generate_series(1, 100000) as g; ``` Now run the `create_hash_partitions()` function with appropriate arguments: -``` +```plpgsql SELECT create_hash_partitions('items', 'id', 100); ``` This will create new partitions and move the data from parent to partitions. Here's an example of the query performing filtering by partitioning key: -``` +```plpgsql SELECT * FROM items WHERE id = 1234; id | name | code ------+----------------------------------+------ @@ -396,7 +683,7 @@ Notice that the `Append` node contains only one child scan which corresponds to > **Important:** pay attention to the fact that `pg_pathman` excludes the parent table from the query plan. To access parent table use ONLY modifier: -``` +```plpgsql EXPLAIN SELECT * FROM ONLY items; QUERY PLAN ------------------------------------------------------ @@ -404,13 +691,12 @@ EXPLAIN SELECT * FROM ONLY items; ``` ### RANGE partitioning Consider an example of RANGE partitioning. Let's create a table containing some dummy logs: -``` +```plpgsql CREATE TABLE journal ( id SERIAL, dt TIMESTAMP NOT NULL, level INTEGER, - msg TEXT -); + msg TEXT); -- similar index will also be created for each partition CREATE INDEX ON journal(dt); @@ -421,47 +707,47 @@ SELECT g, random() * 6, md5(g::text) FROM generate_series('2015-01-01'::date, '2015-12-31'::date, '1 minute') as g; ``` Run the `create_range_partitions()` function to create partitions so that each partition would contain the data for one day: -``` +```plpgsql SELECT create_range_partitions('journal', 'dt', '2015-01-01'::date, '1 day'::interval); ``` It will create 365 partitions and move the data from parent to partitions. New partitions are appended automaticaly by insert trigger, but it can be done manually with the following functions: -``` --- append new partition with specified range +```plpgsql +-- add new partition with specified range SELECT add_range_partition('journal', '2016-01-01'::date, '2016-01-07'::date); -- append new partition with default range SELECT append_range_partition('journal'); ``` The first one creates a partition with specified range. The second one creates a partition with default interval and appends it to the partition list. It is also possible to attach an existing table as partition. For example, we may want to attach an archive table (or even foreign table from another server) for some outdated data: -``` +```plpgsql CREATE FOREIGN TABLE journal_archive ( id INTEGER NOT NULL, dt TIMESTAMP NOT NULL, level INTEGER, - msg TEXT -) SERVER archive_server; + msg TEXT) +SERVER archive_server; SELECT attach_range_partition('journal', 'journal_archive', '2014-01-01'::date, '2015-01-01'::date); ``` > **Important:** the definition of the attached table must match the one of the existing partitioned table, including the dropped columns. To merge to adjacent partitions, use the `merge_range_partitions()` function: -``` +```plpgsql SELECT merge_range_partitions('journal_archive', 'journal_1'); ``` To split partition by value, use the `split_range_partition()` function: -``` +```plpgsql SELECT split_range_partition('journal_366', '2016-01-03'::date); ``` To detach partition, use the `detach_range_partition()` function: -``` +```plpgsql SELECT detach_range_partition('journal_archive'); ``` Here's an example of the query performing filtering by partitioning key: -``` +```plpgsql SELECT * FROM journal WHERE dt >= '2015-06-01' AND dt < '2015-06-03'; id | dt | level | msg --------+---------------------+-------+---------------------------------- @@ -483,21 +769,28 @@ EXPLAIN SELECT * FROM journal WHERE dt >= '2015-06-01' AND dt < '2015-06-03'; ### Disabling `pg_pathman` There are several user-accessible [GUC](https://www.postgresql.org/docs/9.5/static/config-setting.html) variables designed to toggle the whole module or specific custom nodes on and off: - - `pg_pathman.enable` --- disable (or enable) `pg_pathman` completely + - `pg_pathman.enable` --- disable (or enable) `pg_pathman` **completely** - `pg_pathman.enable_runtimeappend` --- toggle `RuntimeAppend` custom node on\off - `pg_pathman.enable_runtimemergeappend` --- toggle `RuntimeMergeAppend` custom node on\off - - `pg_pathman.enable_partitionfilter` --- toggle `PartitionFilter` custom node on\off - -To **permanently** disable `pg_pathman` for some previously partitioned table, use the `disable_partitioning()` function: -``` + - `pg_pathman.enable_partitionfilter` --- toggle `PartitionFilter` custom node on\off (for INSERTs) + - `pg_pathman.enable_partitionrouter` --- toggle `PartitionRouter` custom node on\off (for cross-partition UPDATEs) + - `pg_pathman.enable_auto_partition` --- toggle automatic partition creation on\off (per session) + - `pg_pathman.enable_bounds_cache` --- toggle bounds cache on\off (faster updates of partitioning scheme) + - `pg_pathman.insert_into_fdw` --- allow INSERTs into various FDWs `(disabled | postgres | any_fdw)` + - `pg_pathman.override_copy` --- toggle COPY statement hooking on\off + +To **permanently** disable `pg_pathman` for some previously partitioned table, use the `disable_pathman_for()` function: +```plpgsql SELECT disable_pathman_for('range_rel'); ``` All sections and data will remain unchanged and will be handled by the standard PostgreSQL inheritance mechanism. -##Feedback +## Feedback Do not hesitate to post your issues, questions and new ideas at the [issues](https://github.com/postgrespro/pg_pathman/issues) page. ## Authors -Ildar Musin Postgres Professional Ltd., Russia -Alexander Korotkov Postgres Professional Ltd., Russia -Dmitry Ivanov Postgres Professional Ltd., Russia +[Ildar Musin](https://github.com/zilder) +[Alexander Korotkov](https://github.com/akorotkov) +[Dmitry Ivanov](https://github.com/funbringer) +[Maksim Milyutin](https://github.com/maksm90) +[Ildus Kurbangaliev](https://github.com/ildus) diff --git a/README.rus.md b/README.rus.md deleted file mode 100644 index a06f25ce..00000000 --- a/README.rus.md +++ /dev/null @@ -1,490 +0,0 @@ -[![Build Status](https://travis-ci.org/postgrespro/pg_pathman.svg?branch=master)](https://travis-ci.org/postgrespro/pg_pathman) - -# pg_pathman - -Модуль `pg_pathman` предоставляет оптимизированный механизм секционирования, а также функции для создания и управления секциями. - -## Концепция pg_pathman - -**Секционирование** -- это способ разбиения одной большой таблицы на множество меньших по размеру. Для каждой записи можно однозначно определить секцию, в которой она должна храниться посредством вычисления ключа. -Секционирование в postgres основано на механизме наследования. Каждому наследнику задается условие CHECK CONSTRAINT. Например: - -``` -CREATE TABLE test (id SERIAL PRIMARY KEY, title TEXT); -CREATE TABLE test_1 (CHECK ( id >= 100 AND id < 200 )) INHERITS (test); -CREATE TABLE test_2 (CHECK ( id >= 200 AND id < 300 )) INHERITS (test); -``` - -Несмотря на гибкость, этот механизм обладает недостатками. Так при фильтрации данных оптимизатор вынужден перебирать все дочерние секции и сравнивать условие запроса с CHECK CONSTRAINT-ами секции, чтобы определить из каких секций ему следует загружать данные. При большом количестве секций это создает дополнительные накладные расходы, которые могут свести на нет выигрыш в производительности от применения секционирования. - -Модуль `pg_pathman` предоставляет функции для создания и управления секциями, а также механизм секционирования, оптимизированный с учетом знания о структуре дочерних таблиц. Конфигурация сохраняется таблице `pathman_config`, каждая строка которой содержит запись для одной секционированной таблицы (название таблицы, атрибут и тип разбиения). В процессе инициализации `pg_pathman` кеширует конфигурацию дочерних таблиц в формате, удобном для быстрого поиска. Получив запрос типа `SELECT` к секционированной таблице, `pg_pathman` анализирует дерево условий запроса и выделяет из него условия вида: - -``` -ПЕРЕМЕННАЯ ОПЕРАТОР КОНСТАНТА -``` -где `ПЕРЕМЕННАЯ` -- это атрибут, по которому было выполнено разбиение, `ОПЕРАТОР` -- оператор сравнения (поддерживаются =, <, <=, >, >=), `КОНСТАНТА` -- скалярное значение. Например: - -``` -WHERE id = 150 -``` -Затем основываясь на стратегии секционирования и условиях запроса `pg_pathman` находит в кеше соответствующие секции и строит план. - -В текущей версии `pg_pathman` поддерживает следующие типы секционирования: - -* **RANGE** - разбивает таблицу на секции по диапазонам ключевого аттрибута; для оптимизации построения плана используется метод бинарного поиска. -* **HASH** - данные равномерно распределяются по секциям в соответствии со значениями hash-функции, вычисленными по заданному целочисленному атрибуту. - -More interesting features are yet to come. Stay tuned! - -## Roadmap - - * Предоставить возможность установки пользовательских колбеков на создание\уничтожение партиции (issue [#22](https://github.com/postgrespro/pg_pathman/issues/22)) - * LIST-секционирование; - * Оптимизация hash join для случая, когда обе таблицы секционированы по ключу join’а. - -## Установка - -Для установки pg_pathman выполните в директории модуля команду: -``` -make install USE_PGXS=1 -``` -Модифицируйте параметр shared_preload_libraries в конфигурационном файле postgres.conf: -``` -shared_preload_libraries = 'pg_pathman' -``` -Для вступления изменений в силу потребуется перезагрузка сервера PostgreSQL. Затем выполните в psql: -``` -CREATE EXTENSION pg_pathman; -``` - -> **Важно:** Если вы хотите собрать `pg_pathman` для работы с кастомной сборкой PostgreSQL, не забудьте установить переменную окружения `PG_CONFIG` равной пути к исполняемому файлу pg_config. Узнать больше о сборке расширений для PostgreSQL можно по ссылке: [here](https://wiki.postgresql.org/wiki/Building_and_Installing_PostgreSQL_Extension_Modules). - -## Функции `pg_pathman` - -### Создание секций -```plpgsql -create_hash_partitions(relation REGCLASS, - attribute TEXT, - partitions_count INTEGER, - partition_name TEXT DEFAULT NULL) -``` -Выполняет HASH-секционирование таблицы `relation` по целочисленному полю `attribute`. Параметр `partitions_count` определяет, сколько секций будет создано. Если `partition_data` установлен в значение `true`, то данные из родительской таблицы будут автоматически распределены по секциям. Стоит иметь в виду, что миграция данных может занять некоторое время, а данные заблокированы. Для конкурентной миграции данных см. функцию `partition_table_concurrently()`. - -```plpgsql -create_range_partitions(relation REGCLASS, - attribute TEXT, - start_value ANYELEMENT, - interval ANYELEMENT, - count INTEGER DEFAULT NULL - partition_data BOOLEAN DEFAULT true) - -create_range_partitions(relation REGCLASS, - attribute TEXT, - start_value ANYELEMENT, - interval INTERVAL, - count INTEGER DEFAULT NULL, - partition_data BOOLEAN DEFAULT true) -``` -Выполняет RANGE-секционирование таблицы `relation` по полю `attribute`. Аргумент `start_value` задает начальное значение, `interval` -- диапазон значений внутри одной секции, `count` -- количество создаваемых секций (если не задано, то pathman попытается определить количество секций на основе значений аттрибута). - -```plpgsql -create_partitions_from_range(relation REGCLASS, - attribute TEXT, - start_value ANYELEMENT, - end_value ANYELEMENT, - interval ANYELEMENT, - partition_data BOOLEAN DEFAULT true) - -create_partitions_from_range(relation REGCLASS, - attribute TEXT, - start_value ANYELEMENT, - end_value ANYELEMENT, - interval INTERVAL, - partition_data BOOLEAN DEFAULT true) -``` -Выполняет RANGE-секционирование для заданного диапазона таблицы `relation` по полю `attribute`. - -### Миграция данных - -```plpgsql -partition_table_concurrently(relation REGCLASS) -``` -Запускает новый процесс (background worker) для конкурентного перемещения данных из родительской таблицы в дочерние секции. Рабочий процесс использует короткие транзакции для перемещения небольших объемов данных (порядка 10 тысяч записей) и, таким образом, не оказывает существенного влияния на работу пользователей. - -```plpgsql -stop_concurrent_part_task(relation REGCLASS) -``` -Останавливает процесс конкурентного партиционирования. Обратите внимание, что процесс завершается не мгновенно, а только по завершении текущей транзакции. - -### Утилиты -```plpgsql -create_hash_update_trigger(parent REGCLASS) -``` -Создает триггер на UPDATE для HASH секций. По-умолчанию триггер на обновление данных не создается, т.к. это создает дополнительные накладные расходы. Триггер полезен только в том случае, когда меняется значение ключевого аттрибута. -```plpgsql -create_range_update_trigger(parent REGCLASS) -``` -Аналогично предыдущей, но для RANGE секций. - -### Управление секциями -```plpgsql -split_range_partition(partition REGCLASS, - value ANYELEMENT, - partition_name TEXT DEFAULT NULL,) -``` -Разбивает RANGE секцию `partition` на две секции по значению `value`. - -```plpgsql -merge_range_partitions(partition1 REGCLASS, partition2 REGCLASS) -``` -Объединяет две смежные RANGE секции. Данные из `partition2` копируются в `partition1`, после чего секция `partition2` удаляется. - -```plpgsql -append_range_partition(p_relation REGCLASS, - partition_name TEXT DEFAULT NULL) -``` -Добавляет новую RANGE секцию в конец списка секций. - -```plpgsql -prepend_range_partition(p_relation REGCLASS, - partition_name TEXT DEFAULT NULL) -``` -Добавляет новую RANGE секцию в начало списка секций. - -```plpgsql -add_range_partition(relation REGCLASS, - start_value ANYELEMENT, - end_value ANYELEMENT, - partition_name TEXT DEFAULT NULL) -``` -Добавляет новую RANGE секцию с заданным диапазоном к секционированной таблице `relation`. - -```plpgsql -drop_range_partition(partition TEXT) -``` -Удаляет RANGE секцию вместе с содержащимися в ней данными. - -```plpgsql -attach_range_partition(relation REGCLASS, - partition REGCLASS, - start_value ANYELEMENT, - end_value ANYELEMENT) -``` -Присоединяет существующую таблицу `partition` в качестве секции к ранее секционированной таблице `relation`. Структура присоединяемой таблицы должна в точности повторять структуру родительской. - -```plpgsql -detach_range_partition(partition REGCLASS) -``` -Отсоединяет секцию `partition`, после чего она становится независимой таблицей. - -```plpgsql -disable_pathman_for(relation REGCLASS) -``` -Отключает механизм секционирования `pg_pathman` для заданной таблицы. При этом созданные ранее секции остаются без изменений. - -```plpgsql -drop_partitions(parent REGCLASS, - delete_data BOOLEAN DEFAULT FALSE) -``` -Удаляет все секции таблицы `parent`. Если параметр `delete_data` задан как `false` (по-умолчанию `false`), то данные из секций копируются в родительскую таблицу. - -### Дополнительные параметры - -```plpgsql -enable_parent(relation REGCLASS) -disable_parent(relation REGCLASS) -``` -Включает/исключает родительскую таблицу в план запроса. В оригинальном планировщике PostgreSQL родительская таблица всегда включается в план запроса, даже если она пуста. Это создает дополнительные накладные расходы. Выполните `disable_parent()`, если вы не собираетесь хранить какие-либо данные в родительской таблице. Значение по-умолчанию зависит от того, был ли установлен параметр `partition_data` при первоначальном разбиении таблицы (см. функции `create_range_partitions()` и `create_partitions_from_range()`). Если он был установлен в значение `true`, то все данные были перемещены в секции, а родительская таблица отключена. В противном случае родительская таблица по-умолчанию влючена. - -```plpgsql -enable_auto(relation REGCLASS) -disable_auto(relation REGCLASS) -``` -Включает/выключает автоматическое создание секций (только для RANGE секционирования). По-умолчанию включено. - -## Custom plan nodes -`pg_pathman` вводит три новых узла плана (см. [custom plan nodes](https://wiki.postgresql.org/wiki/CustomScanAPI)), предназначенных для оптимизации времени выполнения: - -- `RuntimeAppend` (замещает узел типа `Append`) -- `RuntimeMergeAppend` (замещает узел типа `MergeAppend`) -- `PartitionFilter` (выполняет работу INSERT-триггера) - -`PartitionFilter` работает как прокси-узел для INSERT-запросов, распределяя новые записи по соответствующим секциям: - -``` -EXPLAIN (COSTS OFF) -INSERT INTO partitioned_table -SELECT generate_series(1, 10), random(); - QUERY PLAN ------------------------------------------ - Insert on partitioned_table - -> Custom Scan (PartitionFilter) - -> Subquery Scan on "*SELECT*" - -> Result -(4 rows) -``` - -Узлы `RuntimeAppend` и `RuntimeMergeAppend` имеют между собой много общего: они нужны в случает, когда условие WHERE принимает форму: -``` -ПЕРЕМЕННАЯ ОПЕРАТОР ПАРАМЕТР -``` -Подобные выражения не могут быть оптимизированы во время планирования, т.к. значение параметра неизвестно до стадии выполнения. Проблема может быть решена путем встраивания дополнительной процедуры анализа в код `Append` узла, таким образом позволяя ему выбирать лишь необходимые субпланы из всего списка дочерних планов. - ----------- - -Есть по меньшей мере несколько ситуаций, которые демонстрируют полезность таких узлов: - -``` -/* создаем таблицу, которую хотим секционировать */ -CREATE TABLE partitioned_table(id INT NOT NULL, payload REAL); - -/* заполняем данными */ -INSERT INTO partitioned_table -SELECT generate_series(1, 1000), random(); - -/* выполняем секционирование */ -SELECT create_hash_partitions('partitioned_table', 'id', 100); - -/* создаем обычную таблицу */ -CREATE TABLE some_table AS SELECT generate_series(1, 100) AS VAL; -``` - - - - **`id = (select ... limit 1)`** -``` -EXPLAIN (COSTS OFF, ANALYZE) SELECT * FROM partitioned_table -WHERE id = (SELECT * FROM some_table LIMIT 1); - QUERY PLAN ----------------------------------------------------------------------------------------------------- - Custom Scan (RuntimeAppend) (actual time=0.030..0.033 rows=1 loops=1) - InitPlan 1 (returns $0) - -> Limit (actual time=0.011..0.011 rows=1 loops=1) - -> Seq Scan on some_table (actual time=0.010..0.010 rows=1 loops=1) - -> Seq Scan on partitioned_table_70 partitioned_table (actual time=0.004..0.006 rows=1 loops=1) - Filter: (id = $0) - Rows Removed by Filter: 9 - Planning time: 1.131 ms - Execution time: 0.075 ms -(9 rows) - -/* выключаем узел RuntimeAppend */ -SET pg_pathman.enable_runtimeappend = f; - -EXPLAIN (COSTS OFF, ANALYZE) SELECT * FROM partitioned_table -WHERE id = (SELECT * FROM some_table LIMIT 1); - QUERY PLAN ----------------------------------------------------------------------------------- - Append (actual time=0.196..0.274 rows=1 loops=1) - InitPlan 1 (returns $0) - -> Limit (actual time=0.005..0.005 rows=1 loops=1) - -> Seq Scan on some_table (actual time=0.003..0.003 rows=1 loops=1) - -> Seq Scan on partitioned_table_0 (actual time=0.014..0.014 rows=0 loops=1) - Filter: (id = $0) - Rows Removed by Filter: 6 - -> Seq Scan on partitioned_table_1 (actual time=0.003..0.003 rows=0 loops=1) - Filter: (id = $0) - Rows Removed by Filter: 5 - ... /* more plans follow */ - Planning time: 1.140 ms - Execution time: 0.855 ms -(306 rows) -``` - - - **`id = ANY (select ...)`** -``` -EXPLAIN (COSTS OFF, ANALYZE) SELECT * FROM partitioned_table -WHERE id = any (SELECT * FROM some_table limit 4); - QUERY PLAN ------------------------------------------------------------------------------------------------------------ - Nested Loop (actual time=0.025..0.060 rows=4 loops=1) - -> Limit (actual time=0.009..0.011 rows=4 loops=1) - -> Seq Scan on some_table (actual time=0.008..0.010 rows=4 loops=1) - -> Custom Scan (RuntimeAppend) (actual time=0.002..0.004 rows=1 loops=4) - -> Seq Scan on partitioned_table_70 partitioned_table (actual time=0.001..0.001 rows=10 loops=1) - -> Seq Scan on partitioned_table_26 partitioned_table (actual time=0.002..0.003 rows=9 loops=1) - -> Seq Scan on partitioned_table_27 partitioned_table (actual time=0.001..0.002 rows=20 loops=1) - -> Seq Scan on partitioned_table_63 partitioned_table (actual time=0.001..0.002 rows=9 loops=1) - Planning time: 0.771 ms - Execution time: 0.101 ms -(10 rows) - -/* выключаем узел RuntimeAppend */ -SET pg_pathman.enable_runtimeappend = f; - -EXPLAIN (COSTS OFF, ANALYZE) SELECT * FROM partitioned_table -WHERE id = any (SELECT * FROM some_table limit 4); - QUERY PLAN ------------------------------------------------------------------------------------------ - Nested Loop Semi Join (actual time=0.531..1.526 rows=4 loops=1) - Join Filter: (partitioned_table.id = some_table.val) - Rows Removed by Join Filter: 3990 - -> Append (actual time=0.190..0.470 rows=1000 loops=1) - -> Seq Scan on partitioned_table (actual time=0.187..0.187 rows=0 loops=1) - -> Seq Scan on partitioned_table_0 (actual time=0.002..0.004 rows=6 loops=1) - -> Seq Scan on partitioned_table_1 (actual time=0.001..0.001 rows=5 loops=1) - -> Seq Scan on partitioned_table_2 (actual time=0.002..0.004 rows=14 loops=1) -... /* 96 scans follow */ - -> Materialize (actual time=0.000..0.000 rows=4 loops=1000) - -> Limit (actual time=0.005..0.006 rows=4 loops=1) - -> Seq Scan on some_table (actual time=0.003..0.004 rows=4 loops=1) - Planning time: 2.169 ms - Execution time: 2.059 ms -(110 rows) -``` - - - **`NestLoop` involving a partitioned table**, which is omitted since it's occasionally shown above. - ----------- - -Узнать больше о работе RuntimeAppend можно в [блоге](http://akorotkov.github.io/blog/2016/06/15/pg_pathman-runtime-append/) Александра Короткова. - -## Примеры - -### Common tips -- You can easily add **_partition_** column containing the names of the underlying partitions using the system attribute called **_tableoid_**: -``` -SELECT tableoid::regclass AS partition, * FROM partitioned_table; -``` -- Несмотря на то, что индексы на родительской таблице не очень полезны (т.к. таблица пуста), они тем не менее выполняют роль прототипов для создания индексов в дочерних таблицах: `pg_pathman` автоматически создает аналогичные индексы для каждой новой секции. - -- Получить все текущие процессы конкурентного секционирования можно из представления `pathman_concurrent_part_tasks`: -```plpgsql -SELECT * FROM pathman_concurrent_part_tasks; - userid | pid | dbid | relid | processed | status ---------+------+-------+-------+-----------+--------- - dmitry | 7367 | 16384 | test | 472000 | working -(1 row) -``` - -### HASH секционирование -Рассмотрим пример секционирования таблицы, используя HASH-стратегию на примере таблицы товаров. -``` -CREATE TABLE items ( - id SERIAL PRIMARY KEY, - name TEXT, - code BIGINT); - -INSERT INTO items (id, name, code) -SELECT g, md5(g::text), random() * 100000 -FROM generate_series(1, 100000) as g; -``` -Если дочерние секции подразумевают наличие индексов, то стоит их создать в родительской таблице до разбиения. Тогда при разбиении pg_pathman автоматически создаст соответствующие индексы в дочерних.таблицах. Разобьем таблицу `hash_rel` на 100 секций по полю `value`: -``` -SELECT create_hash_partitions('items', 'id', 100); -``` -Пример построения плана для запроса с фильтрацией по ключевому полю: -``` -SELECT * FROM items WHERE id = 1234; - id | name | code -------+----------------------------------+------ - 1234 | 81dc9bdb52d04dc20036dbd8313ed055 | 1855 -(1 row) - -EXPLAIN SELECT * FROM items WHERE id = 1234; - QUERY PLAN ------------------------------------------------------------------------------------- - Append (cost=0.28..8.29 rows=0 width=0) - -> Index Scan using items_34_pkey on items_34 (cost=0.28..8.29 rows=0 width=0) - Index Cond: (id = 1234) -``` -Стоит отметить, что pg_pathman исключает из плана запроса родительскую таблицу, и чтобы получить данные из нее, следует использовать модификатор ONLY: -``` -EXPLAIN SELECT * FROM ONLY items; - QUERY PLAN ------------------------------------------------------- - Seq Scan on items (cost=0.00..0.00 rows=1 width=45) -``` - -### RANGE секционирование -Рассмотрим пример разбиения таблицы по диапазону дат. Пусть у нас имеется таблица логов: -``` -CREATE TABLE journal ( - id SERIAL, - dt TIMESTAMP NOT NULL, - level INTEGER, - msg TEXT -); -CREATE INDEX ON journal(dt); - -INSERT INTO journal (dt, level, msg) -SELECT g, random()*6, md5(g::text) -FROM generate_series('2015-01-01'::date, '2015-12-31'::date, '1 minute') as g; -``` -Разобьем таблицу на 365 секций так, чтобы каждая секция содержала данные за один день: -``` -SELECT create_range_partitions('journal', 'dt', '2015-01-01'::date, '1 day'::interval); -``` -Новые секции добавляются автоматически при вставке новых записей в непокрытую область. Однако есть возможность добавлять секции вручную. Для этого можно воспользоваться следующими функциями: -``` -SELECT add_range_partition('journal', '2016-01-01'::date, '2016-01-07'::date); -SELECT append_range_partition('journal'); -``` -Первая создает новую секцию с заданным диапазоном. Вторая создает новую секцию с интервалом, заданным при первоначальном разбиении, и добавляет ее в конец списка секций. Также можно присоеднинить существующую таблицу в качестве секции. Например, это может быть таблица с архивными данными, расположенная на другом сервере и подключенная с помощью fdw: - -``` -CREATE FOREIGN TABLE journal_archive ( - id INTEGER NOT NULL, - dt TIMESTAMP NOT NULL, - level INTEGER, - msg TEXT -) SERVER archive_server; -``` -> Важно: структура подключаемой таблицы должна полностью совпадать с родительской. - -Подключим ее к имеющемуся разбиению: -``` -SELECT attach_range_partition('journal', 'journal_archive', '2014-01-01'::date, '2015-01-01'::date); -``` -Устаревшие секции можно сливать с архивной: -``` -SELECT merge_range_partitions('journal_archive', 'journal_1'); -``` -Разделить ранее созданную секцию на две можно с помощью следующей функции, указав точку деления: -``` -SELECT split_range_partition('journal_366', '2016-01-03'::date); -``` -Чтобы отсоединить ранее созданную секцию, воспользуйтесь функцией: -``` -SELECT detach_range_partition('journal_archive'); -``` - -Пример построения плана для запроса с фильтрацией по ключевому полю: -``` -SELECT * FROM journal WHERE dt >= '2015-06-01' AND dt < '2015-06-03'; - id | dt | level | msg ---------+---------------------+-------+---------------------------------- - 217441 | 2015-06-01 00:00:00 | 2 | 15053892d993ce19f580a128f87e3dbf - 217442 | 2015-06-01 00:01:00 | 1 | 3a7c46f18a952d62ce5418ac2056010c - 217443 | 2015-06-01 00:02:00 | 0 | 92c8de8f82faf0b139a3d99f2792311d - ... -(2880 rows) - -EXPLAIN SELECT * FROM journal WHERE dt >= '2015-06-01' AND dt < '2015-06-03'; - QUERY PLAN ------------------------------------------------------------------- - Append (cost=0.00..58.80 rows=0 width=0) - -> Seq Scan on journal_152 (cost=0.00..29.40 rows=0 width=0) - -> Seq Scan on journal_153 (cost=0.00..29.40 rows=0 width=0) -(3 rows) -``` - -### Деакцивация pg_pathman -Для включения и отключения модуля `pg_pathman` и отдельных его копонентов существует ряд [GUC](https://www.postgresql.org/docs/9.5/static/config-setting.html) переменных: - - - `pg_pathman.enable` --- полная отключение (или включение) модуля `pg_pathman` - - `pg_pathman.enable_runtimeappend` --- включение/отключение функционала `RuntimeAppend` - - `pg_pathman.enable_runtimemergeappend` --- включение/отключение функционала `RuntimeMergeAppend` - - `pg_pathman.enable_partitionfilter` --- включение/отключение функционала `PartitionFilter` - -Чтобы **безвозвратно** отключить механизм `pg_pathman` для отдельной таблицы, используйте фунцию `disable_pathman_for()`. В результате этой операции структура таблиц останется прежней, но для планирования и выполнения запросов будет использоваться стандартный механизм PostgreSQL. -``` -SELECT disable_pathman_for('range_rel'); -``` - -## Обратная связь -Если у вас есть вопросы или предложения, а также если вы обнаружили ошибки, напишите нам в разделе [issues](https://github.com/postgrespro/pg_pathman/issues). - -## Авторы -Ильдар Мусин Postgres Professional, Россия -Александр Коротков Postgres Professional, Россия -Дмитрий Иванов Postgres Professional, Россия diff --git a/docker-compose.yml b/docker-compose.yml new file mode 100644 index 00000000..0544d859 --- /dev/null +++ b/docker-compose.yml @@ -0,0 +1,3 @@ +services: + tests: + build: . diff --git a/expected/for_update.out b/expected/for_update.out index 3e41031e..ffd425e4 100644 --- a/expected/for_update.out +++ b/expected/for_update.out @@ -2,37 +2,49 @@ Parsed test spec with 2 sessions starting permutation: s1_b s1_update s2_select s1_r create_range_partitions +----------------------- + 10 +(1 row) -10 step s1_b: begin; step s1_update: update test_tbl set id = 2 where id = 1; step s2_select: select * from test_tbl where id = 1; -id val +id|val +--+--- + 1| 1 +(1 row) -1 1 step s1_r: rollback; starting permutation: s1_b s1_update s2_select_locked s1_r create_range_partitions +----------------------- + 10 +(1 row) -10 step s1_b: begin; step s1_update: update test_tbl set id = 2 where id = 1; step s2_select_locked: select * from test_tbl where id = 1 for share; step s1_r: rollback; step s2_select_locked: <... completed> -id val +id|val +--+--- + 1| 1 +(1 row) -1 1 starting permutation: s1_b s1_update s2_select_locked s1_c create_range_partitions +----------------------- + 10 +(1 row) -10 step s1_b: begin; step s1_update: update test_tbl set id = 2 where id = 1; step s2_select_locked: select * from test_tbl where id = 1 for share; step s1_c: commit; step s2_select_locked: <... completed> -id val +id|val +--+--- +(0 rows) diff --git a/expected/insert_nodes.out b/expected/insert_nodes.out index a6791621..8f725216 100644 --- a/expected/insert_nodes.out +++ b/expected/insert_nodes.out @@ -1,84 +1,127 @@ Parsed test spec with 2 sessions starting permutation: s1b s1_insert_150 s1r s1_show_partitions s2b s2_insert_150 s2c s2_show_partitions -create_range_partitions +set_spawn_using_bgw +------------------- + +(1 row) -1 step s1b: BEGIN; -step s1_insert_150: INSERT INTO range_rel SELECT generate_series(1, 150); +step s1_insert_150: INSERT INTO range_rel SELECT generate_series(1, 150); step s1r: ROLLBACK; -step s1_show_partitions: SELECT c.consrc FROM pg_inherits i LEFT JOIN pg_constraint c ON c.conrelid = i.inhrelid AND c.consrc IS NOT NULL WHERE i.inhparent = 'range_rel'::regclass::oid ORDER BY c.oid; -consrc +step s1_show_partitions: SELECT pg_get_constraintdef(c.oid) FROM pg_inherits i LEFT JOIN pg_constraint c + ON c.conrelid = i.inhrelid + WHERE i.inhparent = 'range_rel'::regclass AND c.contype = 'c' + ORDER BY c.oid; +pg_get_constraintdef +------------------------------------ +CHECK (((id >= 1) AND (id < 101))) +CHECK (((id >= 101) AND (id < 201))) +(2 rows) -((id >= 1) AND (id < 101)) -((id >= 101) AND (id < 201)) step s2b: BEGIN; -step s2_insert_150: INSERT INTO range_rel SELECT generate_series(1, 150); +step s2_insert_150: INSERT INTO range_rel SELECT generate_series(1, 150); step s2c: COMMIT; -step s2_show_partitions: SELECT c.consrc FROM pg_inherits i LEFT JOIN pg_constraint c ON c.conrelid = i.inhrelid AND c.consrc IS NOT NULL WHERE i.inhparent = 'range_rel'::regclass::oid ORDER BY c.oid; -consrc +step s2_show_partitions: SELECT pg_get_constraintdef(c.oid) FROM pg_inherits i LEFT JOIN pg_constraint c + ON c.conrelid = i.inhrelid + WHERE i.inhparent = 'range_rel'::regclass AND c.contype = 'c' + ORDER BY c.oid; +pg_get_constraintdef +------------------------------------ +CHECK (((id >= 1) AND (id < 101))) +CHECK (((id >= 101) AND (id < 201))) +(2 rows) -((id >= 1) AND (id < 101)) -((id >= 101) AND (id < 201)) starting permutation: s1b s1_insert_150 s1r s1_show_partitions s2b s2_insert_300 s2c s2_show_partitions -create_range_partitions +set_spawn_using_bgw +------------------- + +(1 row) -1 step s1b: BEGIN; -step s1_insert_150: INSERT INTO range_rel SELECT generate_series(1, 150); +step s1_insert_150: INSERT INTO range_rel SELECT generate_series(1, 150); step s1r: ROLLBACK; -step s1_show_partitions: SELECT c.consrc FROM pg_inherits i LEFT JOIN pg_constraint c ON c.conrelid = i.inhrelid AND c.consrc IS NOT NULL WHERE i.inhparent = 'range_rel'::regclass::oid ORDER BY c.oid; -consrc +step s1_show_partitions: SELECT pg_get_constraintdef(c.oid) FROM pg_inherits i LEFT JOIN pg_constraint c + ON c.conrelid = i.inhrelid + WHERE i.inhparent = 'range_rel'::regclass AND c.contype = 'c' + ORDER BY c.oid; +pg_get_constraintdef +------------------------------------ +CHECK (((id >= 1) AND (id < 101))) +CHECK (((id >= 101) AND (id < 201))) +(2 rows) -((id >= 1) AND (id < 101)) -((id >= 101) AND (id < 201)) step s2b: BEGIN; step s2_insert_300: INSERT INTO range_rel SELECT generate_series(151, 300); step s2c: COMMIT; -step s2_show_partitions: SELECT c.consrc FROM pg_inherits i LEFT JOIN pg_constraint c ON c.conrelid = i.inhrelid AND c.consrc IS NOT NULL WHERE i.inhparent = 'range_rel'::regclass::oid ORDER BY c.oid; -consrc +step s2_show_partitions: SELECT pg_get_constraintdef(c.oid) FROM pg_inherits i LEFT JOIN pg_constraint c + ON c.conrelid = i.inhrelid + WHERE i.inhparent = 'range_rel'::regclass AND c.contype = 'c' + ORDER BY c.oid; +pg_get_constraintdef +------------------------------------ +CHECK (((id >= 1) AND (id < 101))) +CHECK (((id >= 101) AND (id < 201))) +CHECK (((id >= 201) AND (id < 301))) +(3 rows) -((id >= 1) AND (id < 101)) -((id >= 101) AND (id < 201)) -((id >= 201) AND (id < 301)) starting permutation: s1b s1_insert_300 s1r s1_show_partitions s2b s2_insert_150 s2c s2_show_partitions -create_range_partitions +set_spawn_using_bgw +------------------- + +(1 row) -1 step s1b: BEGIN; step s1_insert_300: INSERT INTO range_rel SELECT generate_series(151, 300); step s1r: ROLLBACK; -step s1_show_partitions: SELECT c.consrc FROM pg_inherits i LEFT JOIN pg_constraint c ON c.conrelid = i.inhrelid AND c.consrc IS NOT NULL WHERE i.inhparent = 'range_rel'::regclass::oid ORDER BY c.oid; -consrc +step s1_show_partitions: SELECT pg_get_constraintdef(c.oid) FROM pg_inherits i LEFT JOIN pg_constraint c + ON c.conrelid = i.inhrelid + WHERE i.inhparent = 'range_rel'::regclass AND c.contype = 'c' + ORDER BY c.oid; +pg_get_constraintdef +------------------------------------ +CHECK (((id >= 1) AND (id < 101))) +CHECK (((id >= 101) AND (id < 201))) +CHECK (((id >= 201) AND (id < 301))) +(3 rows) -((id >= 1) AND (id < 101)) -((id >= 101) AND (id < 201)) -((id >= 201) AND (id < 301)) step s2b: BEGIN; -step s2_insert_150: INSERT INTO range_rel SELECT generate_series(1, 150); +step s2_insert_150: INSERT INTO range_rel SELECT generate_series(1, 150); step s2c: COMMIT; -step s2_show_partitions: SELECT c.consrc FROM pg_inherits i LEFT JOIN pg_constraint c ON c.conrelid = i.inhrelid AND c.consrc IS NOT NULL WHERE i.inhparent = 'range_rel'::regclass::oid ORDER BY c.oid; -consrc +step s2_show_partitions: SELECT pg_get_constraintdef(c.oid) FROM pg_inherits i LEFT JOIN pg_constraint c + ON c.conrelid = i.inhrelid + WHERE i.inhparent = 'range_rel'::regclass AND c.contype = 'c' + ORDER BY c.oid; +pg_get_constraintdef +------------------------------------ +CHECK (((id >= 1) AND (id < 101))) +CHECK (((id >= 101) AND (id < 201))) +CHECK (((id >= 201) AND (id < 301))) +(3 rows) -((id >= 1) AND (id < 101)) -((id >= 101) AND (id < 201)) -((id >= 201) AND (id < 301)) starting permutation: s1b s1_insert_150 s2b s2_insert_300 s1r s2r s2_show_partitions -create_range_partitions +set_spawn_using_bgw +------------------- + +(1 row) -1 step s1b: BEGIN; -step s1_insert_150: INSERT INTO range_rel SELECT generate_series(1, 150); +step s1_insert_150: INSERT INTO range_rel SELECT generate_series(1, 150); step s2b: BEGIN; step s2_insert_300: INSERT INTO range_rel SELECT generate_series(151, 300); step s1r: ROLLBACK; step s2r: ROLLBACK; -step s2_show_partitions: SELECT c.consrc FROM pg_inherits i LEFT JOIN pg_constraint c ON c.conrelid = i.inhrelid AND c.consrc IS NOT NULL WHERE i.inhparent = 'range_rel'::regclass::oid ORDER BY c.oid; -consrc +step s2_show_partitions: SELECT pg_get_constraintdef(c.oid) FROM pg_inherits i LEFT JOIN pg_constraint c + ON c.conrelid = i.inhrelid + WHERE i.inhparent = 'range_rel'::regclass AND c.contype = 'c' + ORDER BY c.oid; +pg_get_constraintdef +------------------------------------ +CHECK (((id >= 1) AND (id < 101))) +CHECK (((id >= 101) AND (id < 201))) +CHECK (((id >= 201) AND (id < 301))) +(3 rows) -((id >= 1) AND (id < 101)) -((id >= 101) AND (id < 201)) -((id >= 201) AND (id < 301)) diff --git a/expected/pathman_CVE-2020-14350.out b/expected/pathman_CVE-2020-14350.out new file mode 100644 index 00000000..a48e182f --- /dev/null +++ b/expected/pathman_CVE-2020-14350.out @@ -0,0 +1,116 @@ +/* + * Check fix for CVE-2020-14350. + * See also 7eeb1d986 postgresql commit. + */ +SET client_min_messages = 'warning'; +DROP FUNCTION IF EXISTS _partition_data_concurrent(oid,integer); +DROP FUNCTION IF EXISTS create_single_range_partition(TEXT,ANYELEMENT,ANYELEMENT,TEXT); +DROP TABLE IF EXISTS test1 CASCADE; +DROP TABLE IF EXISTS test2 CASCADE; +DROP ROLE IF EXISTS pathman_regress_hacker; +SET client_min_messages = 'notice'; +GRANT CREATE ON SCHEMA public TO PUBLIC; +CREATE EXTENSION pg_pathman; +CREATE ROLE pathman_regress_hacker LOGIN; +-- Test 1 +RESET ROLE; +ALTER ROLE pathman_regress_hacker NOSUPERUSER; +SET ROLE pathman_regress_hacker; +SHOW is_superuser; + is_superuser +-------------- + off +(1 row) + +CREATE FUNCTION _partition_data_concurrent(relation oid, p_limit INT, OUT p_total BIGINT) +RETURNS bigint +AS $$ +BEGIN + ALTER ROLE pathman_regress_hacker SUPERUSER; + SELECT _partition_data_concurrent(relation, NULL::text, NULL::text, p_limit) INTO p_total; +END +$$ LANGUAGE plpgsql; +CREATE TABLE test1(i INT4 NOT NULL); +INSERT INTO test1 SELECT generate_series(1, 500); +SELECT create_hash_partitions('test1', 'i', 5, false); + create_hash_partitions +------------------------ + 5 +(1 row) + +RESET ROLE; +SELECT partition_table_concurrently('test1', 10, 1); +NOTICE: worker started, you can stop it with the following command: select public.stop_concurrent_part_task('test1'); + partition_table_concurrently +------------------------------ + +(1 row) + +SELECT pg_sleep(1); + pg_sleep +---------- + +(1 row) + +-- Test result (must be 'off') +SET ROLE pathman_regress_hacker; +SHOW is_superuser; + is_superuser +-------------- + off +(1 row) + +-- Test 2 +RESET ROLE; +ALTER ROLE pathman_regress_hacker NOSUPERUSER; +SET ROLE pathman_regress_hacker; +SHOW is_superuser; + is_superuser +-------------- + off +(1 row) + +CREATE FUNCTION create_single_range_partition(parent_relid TEXT, start_value ANYELEMENT, end_value ANYELEMENT, partition_name TEXT) +RETURNS REGCLASS +AS $$ +BEGIN + ALTER ROLE pathman_regress_hacker SUPERUSER; + RETURN create_single_range_partition(parent_relid, start_value, end_value, partition_name, NULL::text); +END +$$ LANGUAGE plpgsql; +RESET ROLE; +CREATE TABLE test2(i INT4 NOT NULL); +INSERT INTO test2 VALUES(0); +SELECT create_range_partitions('test2', 'i', 0, 1); + create_range_partitions +------------------------- + 1 +(1 row) + +INSERT INTO test2 values(1); +-- Test result (must be 'off') +SET ROLE pathman_regress_hacker; +SHOW is_superuser; + is_superuser +-------------- + off +(1 row) + +-- Cleanup +RESET ROLE; +DROP FUNCTION _partition_data_concurrent(oid,integer); +DROP FUNCTION create_single_range_partition(TEXT,ANYELEMENT,ANYELEMENT,TEXT); +DROP TABLE test1 CASCADE; +NOTICE: drop cascades to 5 other objects +DETAIL: drop cascades to table test1_0 +drop cascades to table test1_1 +drop cascades to table test1_2 +drop cascades to table test1_3 +drop cascades to table test1_4 +DROP TABLE test2 CASCADE; +NOTICE: drop cascades to 3 other objects +DETAIL: drop cascades to sequence test2_seq +drop cascades to table test2_1 +drop cascades to table test2_2 +DROP ROLE pathman_regress_hacker; +DROP EXTENSION pg_pathman; diff --git a/expected/pathman_array_qual.out b/expected/pathman_array_qual.out new file mode 100644 index 00000000..0587a1c8 --- /dev/null +++ b/expected/pathman_array_qual.out @@ -0,0 +1,2408 @@ +/* + * Since 8edd0e794 (>= 12) Append nodes with single subplan are eliminated, + * causing different output. + */ +\set VERBOSITY terse +SET search_path = 'public'; +CREATE EXTENSION pg_pathman; +CREATE SCHEMA array_qual; +CREATE TABLE array_qual.test(val TEXT NOT NULL); +CREATE SEQUENCE array_qual.test_seq; +SELECT add_to_pathman_config('array_qual.test', 'val', NULL); + add_to_pathman_config +----------------------- + t +(1 row) + +SELECT add_range_partition('array_qual.test', 'a'::TEXT, 'b'); + add_range_partition +--------------------- + array_qual.test_1 +(1 row) + +SELECT add_range_partition('array_qual.test', 'b'::TEXT, 'c'); + add_range_partition +--------------------- + array_qual.test_2 +(1 row) + +SELECT add_range_partition('array_qual.test', 'c'::TEXT, 'd'); + add_range_partition +--------------------- + array_qual.test_3 +(1 row) + +SELECT add_range_partition('array_qual.test', 'd'::TEXT, 'e'); + add_range_partition +--------------------- + array_qual.test_4 +(1 row) + +INSERT INTO array_qual.test VALUES ('aaaa'); +INSERT INTO array_qual.test VALUES ('bbbb'); +INSERT INTO array_qual.test VALUES ('cccc'); +ANALYZE; +/* + * Test expr op ANY (...) + */ +/* matching collations */ +EXPLAIN (COSTS OFF) SELECT * FROM array_qual.test WHERE val < ANY (array['a', 'b']); + QUERY PLAN +-------------------------- + Append + -> Seq Scan on test_1 +(2 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM array_qual.test WHERE val < ANY (array['a', 'z']); + QUERY PLAN +-------------------------- + Append + -> Seq Scan on test_1 + -> Seq Scan on test_2 + -> Seq Scan on test_3 + -> Seq Scan on test_4 +(5 rows) + +/* different collations */ +EXPLAIN (COSTS OFF) SELECT * FROM array_qual.test WHERE val COLLATE "POSIX" < ANY (array['a', 'b']); + QUERY PLAN +----------------------------------------------------------- + Append + -> Seq Scan on test_1 + Filter: ((val)::text < 'b'::text COLLATE "POSIX") + -> Seq Scan on test_2 + Filter: ((val)::text < 'b'::text COLLATE "POSIX") + -> Seq Scan on test_3 + Filter: ((val)::text < 'b'::text COLLATE "POSIX") + -> Seq Scan on test_4 + Filter: ((val)::text < 'b'::text COLLATE "POSIX") +(9 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM array_qual.test WHERE val < ANY (array['a', 'b' COLLATE "POSIX"]); + QUERY PLAN +--------------------------------------------------- + Append + -> Seq Scan on test_1 + Filter: (val < 'b'::text COLLATE "POSIX") + -> Seq Scan on test_2 + Filter: (val < 'b'::text COLLATE "POSIX") + -> Seq Scan on test_3 + Filter: (val < 'b'::text COLLATE "POSIX") + -> Seq Scan on test_4 + Filter: (val < 'b'::text COLLATE "POSIX") +(9 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM array_qual.test WHERE val COLLATE "C" < ANY (array['a', 'b' COLLATE "POSIX"]); +ERROR: collation mismatch between explicit collations "C" and "POSIX" at character 95 +/* different collations (pruning should work) */ +EXPLAIN (COSTS OFF) SELECT * FROM array_qual.test WHERE val COLLATE "POSIX" = ANY (array['a', 'b']); + QUERY PLAN +------------------------------------------------------- + Append + -> Seq Scan on test_1 + Filter: ((val)::text = ANY ('{a,b}'::text[])) + -> Seq Scan on test_2 + Filter: ((val)::text = ANY ('{a,b}'::text[])) +(5 rows) + +/* non-btree operator */ +EXPLAIN (COSTS OFF) SELECT * FROM array_qual.test WHERE val ~~ ANY (array['a', 'b']); + QUERY PLAN +------------------------------------------------ + Append + -> Seq Scan on test_1 + Filter: (val ~~ ANY ('{a,b}'::text[])) + -> Seq Scan on test_2 + Filter: (val ~~ ANY ('{a,b}'::text[])) + -> Seq Scan on test_3 + Filter: (val ~~ ANY ('{a,b}'::text[])) + -> Seq Scan on test_4 + Filter: (val ~~ ANY ('{a,b}'::text[])) +(9 rows) + +DROP TABLE array_qual.test CASCADE; +NOTICE: drop cascades to 5 other objects +CREATE TABLE array_qual.test(a INT4 NOT NULL, b INT4); +SELECT create_range_partitions('array_qual.test', 'a', 1, 100, 10); + create_range_partitions +------------------------- + 10 +(1 row) + +INSERT INTO array_qual.test SELECT i, i FROM generate_series(1, 1000) g(i); +ANALYZE; +/* + * Test expr IN (...) + */ +/* a IN (...) - pruning should work */ +EXPLAIN (COSTS OFF) SELECT * FROM array_qual.test WHERE a IN (1, 2, 3, 4); + QUERY PLAN +---------------------------------------------------- + Append + -> Seq Scan on test_1 + Filter: (a = ANY ('{1,2,3,4}'::integer[])) +(3 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM array_qual.test WHERE a IN (100, 200, 300, 400); + QUERY PLAN +------------------------------------------------------------ + Append + -> Seq Scan on test_1 + Filter: (a = ANY ('{100,200,300,400}'::integer[])) + -> Seq Scan on test_2 + Filter: (a = ANY ('{100,200,300,400}'::integer[])) + -> Seq Scan on test_3 + Filter: (a = ANY ('{100,200,300,400}'::integer[])) + -> Seq Scan on test_4 + Filter: (a = ANY ('{100,200,300,400}'::integer[])) +(9 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM array_qual.test WHERE a IN (-100, 100); + QUERY PLAN +----------------------------------------------------- + Append + -> Seq Scan on test_1 + Filter: (a = ANY ('{-100,100}'::integer[])) +(3 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM array_qual.test WHERE a IN (-100, -200, -300); + QUERY PLAN +-------------------------- + Result + One-Time Filter: false +(2 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM array_qual.test WHERE a IN (-100, -200, -300, NULL); + QUERY PLAN +-------------------------- + Result + One-Time Filter: false +(2 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM array_qual.test WHERE a IN (NULL, NULL, NULL, NULL); + QUERY PLAN +-------------------------- + Result + One-Time Filter: false +(2 rows) + +/* b IN (...) - pruning should not work */ +EXPLAIN (COSTS OFF) SELECT * FROM array_qual.test WHERE b IN (1, 2, 3, 4); + QUERY PLAN +---------------------------------------------------- + Append + -> Seq Scan on test_1 + Filter: (b = ANY ('{1,2,3,4}'::integer[])) + -> Seq Scan on test_2 + Filter: (b = ANY ('{1,2,3,4}'::integer[])) + -> Seq Scan on test_3 + Filter: (b = ANY ('{1,2,3,4}'::integer[])) + -> Seq Scan on test_4 + Filter: (b = ANY ('{1,2,3,4}'::integer[])) + -> Seq Scan on test_5 + Filter: (b = ANY ('{1,2,3,4}'::integer[])) + -> Seq Scan on test_6 + Filter: (b = ANY ('{1,2,3,4}'::integer[])) + -> Seq Scan on test_7 + Filter: (b = ANY ('{1,2,3,4}'::integer[])) + -> Seq Scan on test_8 + Filter: (b = ANY ('{1,2,3,4}'::integer[])) + -> Seq Scan on test_9 + Filter: (b = ANY ('{1,2,3,4}'::integer[])) + -> Seq Scan on test_10 + Filter: (b = ANY ('{1,2,3,4}'::integer[])) +(21 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM array_qual.test WHERE b IN (100, 200, 300, 400); + QUERY PLAN +------------------------------------------------------------ + Append + -> Seq Scan on test_1 + Filter: (b = ANY ('{100,200,300,400}'::integer[])) + -> Seq Scan on test_2 + Filter: (b = ANY ('{100,200,300,400}'::integer[])) + -> Seq Scan on test_3 + Filter: (b = ANY ('{100,200,300,400}'::integer[])) + -> Seq Scan on test_4 + Filter: (b = ANY ('{100,200,300,400}'::integer[])) + -> Seq Scan on test_5 + Filter: (b = ANY ('{100,200,300,400}'::integer[])) + -> Seq Scan on test_6 + Filter: (b = ANY ('{100,200,300,400}'::integer[])) + -> Seq Scan on test_7 + Filter: (b = ANY ('{100,200,300,400}'::integer[])) + -> Seq Scan on test_8 + Filter: (b = ANY ('{100,200,300,400}'::integer[])) + -> Seq Scan on test_9 + Filter: (b = ANY ('{100,200,300,400}'::integer[])) + -> Seq Scan on test_10 + Filter: (b = ANY ('{100,200,300,400}'::integer[])) +(21 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM array_qual.test WHERE b IN (-100, 100); + QUERY PLAN +----------------------------------------------------- + Append + -> Seq Scan on test_1 + Filter: (b = ANY ('{-100,100}'::integer[])) + -> Seq Scan on test_2 + Filter: (b = ANY ('{-100,100}'::integer[])) + -> Seq Scan on test_3 + Filter: (b = ANY ('{-100,100}'::integer[])) + -> Seq Scan on test_4 + Filter: (b = ANY ('{-100,100}'::integer[])) + -> Seq Scan on test_5 + Filter: (b = ANY ('{-100,100}'::integer[])) + -> Seq Scan on test_6 + Filter: (b = ANY ('{-100,100}'::integer[])) + -> Seq Scan on test_7 + Filter: (b = ANY ('{-100,100}'::integer[])) + -> Seq Scan on test_8 + Filter: (b = ANY ('{-100,100}'::integer[])) + -> Seq Scan on test_9 + Filter: (b = ANY ('{-100,100}'::integer[])) + -> Seq Scan on test_10 + Filter: (b = ANY ('{-100,100}'::integer[])) +(21 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM array_qual.test WHERE b IN (-100, -200, -300); + QUERY PLAN +----------------------------------------------------------- + Append + -> Seq Scan on test_1 + Filter: (b = ANY ('{-100,-200,-300}'::integer[])) + -> Seq Scan on test_2 + Filter: (b = ANY ('{-100,-200,-300}'::integer[])) + -> Seq Scan on test_3 + Filter: (b = ANY ('{-100,-200,-300}'::integer[])) + -> Seq Scan on test_4 + Filter: (b = ANY ('{-100,-200,-300}'::integer[])) + -> Seq Scan on test_5 + Filter: (b = ANY ('{-100,-200,-300}'::integer[])) + -> Seq Scan on test_6 + Filter: (b = ANY ('{-100,-200,-300}'::integer[])) + -> Seq Scan on test_7 + Filter: (b = ANY ('{-100,-200,-300}'::integer[])) + -> Seq Scan on test_8 + Filter: (b = ANY ('{-100,-200,-300}'::integer[])) + -> Seq Scan on test_9 + Filter: (b = ANY ('{-100,-200,-300}'::integer[])) + -> Seq Scan on test_10 + Filter: (b = ANY ('{-100,-200,-300}'::integer[])) +(21 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM array_qual.test WHERE b IN (-100, -200, -300, NULL); + QUERY PLAN +---------------------------------------------------------------- + Append + -> Seq Scan on test_1 + Filter: (b = ANY ('{-100,-200,-300,NULL}'::integer[])) + -> Seq Scan on test_2 + Filter: (b = ANY ('{-100,-200,-300,NULL}'::integer[])) + -> Seq Scan on test_3 + Filter: (b = ANY ('{-100,-200,-300,NULL}'::integer[])) + -> Seq Scan on test_4 + Filter: (b = ANY ('{-100,-200,-300,NULL}'::integer[])) + -> Seq Scan on test_5 + Filter: (b = ANY ('{-100,-200,-300,NULL}'::integer[])) + -> Seq Scan on test_6 + Filter: (b = ANY ('{-100,-200,-300,NULL}'::integer[])) + -> Seq Scan on test_7 + Filter: (b = ANY ('{-100,-200,-300,NULL}'::integer[])) + -> Seq Scan on test_8 + Filter: (b = ANY ('{-100,-200,-300,NULL}'::integer[])) + -> Seq Scan on test_9 + Filter: (b = ANY ('{-100,-200,-300,NULL}'::integer[])) + -> Seq Scan on test_10 + Filter: (b = ANY ('{-100,-200,-300,NULL}'::integer[])) +(21 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM array_qual.test WHERE b IN (NULL, NULL, NULL, NULL); + QUERY PLAN +---------------------------------------------------------------- + Append + -> Seq Scan on test_1 + Filter: (b = ANY ('{NULL,NULL,NULL,NULL}'::integer[])) + -> Seq Scan on test_2 + Filter: (b = ANY ('{NULL,NULL,NULL,NULL}'::integer[])) + -> Seq Scan on test_3 + Filter: (b = ANY ('{NULL,NULL,NULL,NULL}'::integer[])) + -> Seq Scan on test_4 + Filter: (b = ANY ('{NULL,NULL,NULL,NULL}'::integer[])) + -> Seq Scan on test_5 + Filter: (b = ANY ('{NULL,NULL,NULL,NULL}'::integer[])) + -> Seq Scan on test_6 + Filter: (b = ANY ('{NULL,NULL,NULL,NULL}'::integer[])) + -> Seq Scan on test_7 + Filter: (b = ANY ('{NULL,NULL,NULL,NULL}'::integer[])) + -> Seq Scan on test_8 + Filter: (b = ANY ('{NULL,NULL,NULL,NULL}'::integer[])) + -> Seq Scan on test_9 + Filter: (b = ANY ('{NULL,NULL,NULL,NULL}'::integer[])) + -> Seq Scan on test_10 + Filter: (b = ANY ('{NULL,NULL,NULL,NULL}'::integer[])) +(21 rows) + +/* + * Test expr = ANY (...) + */ +/* a = ANY (...) - pruning should work */ +EXPLAIN (COSTS OFF) SELECT * FROM array_qual.test WHERE a = ANY (NULL); + QUERY PLAN +-------------------------- + Result + One-Time Filter: false +(2 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM array_qual.test WHERE a = ANY (array[]::int4[]); + QUERY PLAN +-------------------------- + Result + One-Time Filter: false +(2 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM array_qual.test WHERE a = ANY (array[100, 100]); + QUERY PLAN +---------------------------------------------------- + Append + -> Seq Scan on test_1 + Filter: (a = ANY ('{100,100}'::integer[])) +(3 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM array_qual.test WHERE a = ANY (array[100, 200, 300, 400]); + QUERY PLAN +------------------------------------------------------------ + Append + -> Seq Scan on test_1 + Filter: (a = ANY ('{100,200,300,400}'::integer[])) + -> Seq Scan on test_2 + Filter: (a = ANY ('{100,200,300,400}'::integer[])) + -> Seq Scan on test_3 + Filter: (a = ANY ('{100,200,300,400}'::integer[])) + -> Seq Scan on test_4 + Filter: (a = ANY ('{100,200,300,400}'::integer[])) +(9 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM array_qual.test WHERE a = ANY (array[array[100, 200], array[300, 400]]); + QUERY PLAN +---------------------------------------------------------------- + Append + -> Seq Scan on test_1 + Filter: (a = ANY ('{{100,200},{300,400}}'::integer[])) + -> Seq Scan on test_2 + Filter: (a = ANY ('{{100,200},{300,400}}'::integer[])) + -> Seq Scan on test_3 + Filter: (a = ANY ('{{100,200},{300,400}}'::integer[])) + -> Seq Scan on test_4 + Filter: (a = ANY ('{{100,200},{300,400}}'::integer[])) +(9 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM array_qual.test WHERE a = ANY (array[array[100, 200], array[300, 400], array[NULL, NULL]::int4[]]); + QUERY PLAN +---------------------------------------------------------------------------- + Append + -> Seq Scan on test_1 + Filter: (a = ANY ('{{100,200},{300,400},{NULL,NULL}}'::integer[])) + -> Seq Scan on test_2 + Filter: (a = ANY ('{{100,200},{300,400},{NULL,NULL}}'::integer[])) + -> Seq Scan on test_3 + Filter: (a = ANY ('{{100,200},{300,400},{NULL,NULL}}'::integer[])) + -> Seq Scan on test_4 + Filter: (a = ANY ('{{100,200},{300,400},{NULL,NULL}}'::integer[])) +(9 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM array_qual.test WHERE a = ANY (array[array[100, 200], array[300, NULL]]); + QUERY PLAN +----------------------------------------------------------------- + Append + -> Seq Scan on test_1 + Filter: (a = ANY ('{{100,200},{300,NULL}}'::integer[])) + -> Seq Scan on test_2 + Filter: (a = ANY ('{{100,200},{300,NULL}}'::integer[])) + -> Seq Scan on test_3 + Filter: (a = ANY ('{{100,200},{300,NULL}}'::integer[])) +(7 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM array_qual.test WHERE a = ANY (array[NULL, NULL]::int4[]); + QUERY PLAN +-------------------------- + Result + One-Time Filter: false +(2 rows) + +/* + * Test expr = ALL (...) + */ +/* a = ALL (...) - pruning should work */ +EXPLAIN (COSTS OFF) SELECT * FROM array_qual.test WHERE a = ALL (NULL); + QUERY PLAN +-------------------------- + Result + One-Time Filter: false +(2 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM array_qual.test WHERE a = ALL (array[]::int4[]); + QUERY PLAN +--------------------------------------------- + Append + -> Seq Scan on test_1 + Filter: (a = ALL ('{}'::integer[])) + -> Seq Scan on test_2 + Filter: (a = ALL ('{}'::integer[])) + -> Seq Scan on test_3 + Filter: (a = ALL ('{}'::integer[])) + -> Seq Scan on test_4 + Filter: (a = ALL ('{}'::integer[])) + -> Seq Scan on test_5 + Filter: (a = ALL ('{}'::integer[])) + -> Seq Scan on test_6 + Filter: (a = ALL ('{}'::integer[])) + -> Seq Scan on test_7 + Filter: (a = ALL ('{}'::integer[])) + -> Seq Scan on test_8 + Filter: (a = ALL ('{}'::integer[])) + -> Seq Scan on test_9 + Filter: (a = ALL ('{}'::integer[])) + -> Seq Scan on test_10 + Filter: (a = ALL ('{}'::integer[])) +(21 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM array_qual.test WHERE a = ALL (array[100, 100]); + QUERY PLAN +---------------------------------------------------- + Append + -> Seq Scan on test_1 + Filter: (a = ALL ('{100,100}'::integer[])) +(3 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM array_qual.test WHERE a = ALL (array[100, 200, 300, 400]); + QUERY PLAN +-------------------------- + Result + One-Time Filter: false +(2 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM array_qual.test WHERE a = ALL (array[array[100, 200], array[300, 400]]); + QUERY PLAN +-------------------------- + Result + One-Time Filter: false +(2 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM array_qual.test WHERE a = ALL (array[array[100, 200], array[300, 400], array[NULL, NULL]::int4[]]); + QUERY PLAN +-------------------------- + Result + One-Time Filter: false +(2 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM array_qual.test WHERE a = ALL (array[array[100, 200], array[300, NULL]]); + QUERY PLAN +-------------------------- + Result + One-Time Filter: false +(2 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM array_qual.test WHERE a = ALL (array[NULL, NULL]::int4[]); + QUERY PLAN +-------------------------- + Result + One-Time Filter: false +(2 rows) + +/* + * Test expr < ANY (...) + */ +/* a < ANY (...) - pruning should work */ +EXPLAIN (COSTS OFF) SELECT * FROM array_qual.test WHERE a < ANY (NULL); + QUERY PLAN +-------------------------- + Result + One-Time Filter: false +(2 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM array_qual.test WHERE a < ANY (array[]::int4[]); + QUERY PLAN +-------------------------- + Result + One-Time Filter: false +(2 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM array_qual.test WHERE a < ANY (array[100, 100]); + QUERY PLAN +--------------------------- + Append + -> Seq Scan on test_1 + Filter: (a < 100) +(3 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM array_qual.test WHERE a < ANY (array[99, 100, 101]); + QUERY PLAN +-------------------------- + Append + -> Seq Scan on test_1 +(2 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM array_qual.test WHERE a < ANY (array[500, 550]); + QUERY PLAN +--------------------------- + Append + -> Seq Scan on test_1 + -> Seq Scan on test_2 + -> Seq Scan on test_3 + -> Seq Scan on test_4 + -> Seq Scan on test_5 + -> Seq Scan on test_6 + Filter: (a < 550) +(8 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM array_qual.test WHERE a < ANY (array[100, 700]); + QUERY PLAN +--------------------------- + Append + -> Seq Scan on test_1 + -> Seq Scan on test_2 + -> Seq Scan on test_3 + -> Seq Scan on test_4 + -> Seq Scan on test_5 + -> Seq Scan on test_6 + -> Seq Scan on test_7 + Filter: (a < 700) +(9 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM array_qual.test WHERE a < ANY (array[NULL, 700]); + QUERY PLAN +----------------------------------------------------- + Append + -> Seq Scan on test_1 + -> Seq Scan on test_2 + -> Seq Scan on test_3 + -> Seq Scan on test_4 + -> Seq Scan on test_5 + -> Seq Scan on test_6 + -> Seq Scan on test_7 + Filter: (a < ANY ('{NULL,700}'::integer[])) +(9 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM array_qual.test WHERE a < ANY (array[NULL, NULL]::int4[]); + QUERY PLAN +-------------------------- + Result + One-Time Filter: false +(2 rows) + +SET pg_pathman.enable = f; +NOTICE: RuntimeAppend, RuntimeMergeAppend and PartitionFilter nodes and some other options have been disabled +SELECT count(*) FROM array_qual.test WHERE a < ANY (array[NULL, 700]); + count +------- + 699 +(1 row) + +SET pg_pathman.enable = t; +NOTICE: RuntimeAppend, RuntimeMergeAppend and PartitionFilter nodes and some other options have been enabled +SELECT count(*) FROM array_qual.test WHERE a < ANY (array[NULL, 700]); + count +------- + 699 +(1 row) + +/* + * Test expr < ALL (...) + */ +/* a < ALL (...) - pruning should work */ +EXPLAIN (COSTS OFF) SELECT * FROM array_qual.test WHERE a < ALL (NULL); + QUERY PLAN +-------------------------- + Result + One-Time Filter: false +(2 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM array_qual.test WHERE a < ALL (array[]::int4[]); + QUERY PLAN +--------------------------------------------- + Append + -> Seq Scan on test_1 + Filter: (a < ALL ('{}'::integer[])) + -> Seq Scan on test_2 + Filter: (a < ALL ('{}'::integer[])) + -> Seq Scan on test_3 + Filter: (a < ALL ('{}'::integer[])) + -> Seq Scan on test_4 + Filter: (a < ALL ('{}'::integer[])) + -> Seq Scan on test_5 + Filter: (a < ALL ('{}'::integer[])) + -> Seq Scan on test_6 + Filter: (a < ALL ('{}'::integer[])) + -> Seq Scan on test_7 + Filter: (a < ALL ('{}'::integer[])) + -> Seq Scan on test_8 + Filter: (a < ALL ('{}'::integer[])) + -> Seq Scan on test_9 + Filter: (a < ALL ('{}'::integer[])) + -> Seq Scan on test_10 + Filter: (a < ALL ('{}'::integer[])) +(21 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM array_qual.test WHERE a < ALL (array[100, 100]); + QUERY PLAN +--------------------------- + Append + -> Seq Scan on test_1 + Filter: (a < 100) +(3 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM array_qual.test WHERE a < ALL (array[99, 100, 101]); + QUERY PLAN +-------------------------- + Append + -> Seq Scan on test_1 + Filter: (a < 99) +(3 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM array_qual.test WHERE a < ALL (array[500, 550]); + QUERY PLAN +--------------------------- + Append + -> Seq Scan on test_1 + -> Seq Scan on test_2 + -> Seq Scan on test_3 + -> Seq Scan on test_4 + -> Seq Scan on test_5 + Filter: (a < 500) +(7 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM array_qual.test WHERE a < ALL (array[100, 700]); + QUERY PLAN +--------------------------- + Append + -> Seq Scan on test_1 + Filter: (a < 100) +(3 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM array_qual.test WHERE a < ALL (array[NULL, 700]); + QUERY PLAN +-------------------------- + Result + One-Time Filter: false +(2 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM array_qual.test WHERE a < ALL (array[NULL, NULL]::int4[]); + QUERY PLAN +-------------------------- + Result + One-Time Filter: false +(2 rows) + +SET pg_pathman.enable = f; +NOTICE: RuntimeAppend, RuntimeMergeAppend and PartitionFilter nodes and some other options have been disabled +SELECT count(*) FROM array_qual.test WHERE a < ALL (array[NULL, 700]); + count +------- + 0 +(1 row) + +SET pg_pathman.enable = t; +NOTICE: RuntimeAppend, RuntimeMergeAppend and PartitionFilter nodes and some other options have been enabled +SELECT count(*) FROM array_qual.test WHERE a < ALL (array[NULL, 700]); + count +------- + 0 +(1 row) + +/* + * Test expr > ANY (...) + */ +/* a > ANY (...) - pruning should work */ +EXPLAIN (COSTS OFF) SELECT * FROM array_qual.test WHERE a > ANY (NULL); + QUERY PLAN +-------------------------- + Result + One-Time Filter: false +(2 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM array_qual.test WHERE a > ANY (array[]::int4[]); + QUERY PLAN +-------------------------- + Result + One-Time Filter: false +(2 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM array_qual.test WHERE a > ANY (array[100, 100]); + QUERY PLAN +--------------------------- + Append + -> Seq Scan on test_1 + Filter: (a > 100) + -> Seq Scan on test_2 + -> Seq Scan on test_3 + -> Seq Scan on test_4 + -> Seq Scan on test_5 + -> Seq Scan on test_6 + -> Seq Scan on test_7 + -> Seq Scan on test_8 + -> Seq Scan on test_9 + -> Seq Scan on test_10 +(12 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM array_qual.test WHERE a > ANY (array[99, 100, 101]); + QUERY PLAN +--------------------------- + Append + -> Seq Scan on test_1 + Filter: (a > 99) + -> Seq Scan on test_2 + -> Seq Scan on test_3 + -> Seq Scan on test_4 + -> Seq Scan on test_5 + -> Seq Scan on test_6 + -> Seq Scan on test_7 + -> Seq Scan on test_8 + -> Seq Scan on test_9 + -> Seq Scan on test_10 +(12 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM array_qual.test WHERE a > ANY (array[500, 550]); + QUERY PLAN +--------------------------- + Append + -> Seq Scan on test_5 + Filter: (a > 500) + -> Seq Scan on test_6 + -> Seq Scan on test_7 + -> Seq Scan on test_8 + -> Seq Scan on test_9 + -> Seq Scan on test_10 +(8 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM array_qual.test WHERE a > ANY (array[100, 700]); + QUERY PLAN +--------------------------- + Append + -> Seq Scan on test_1 + Filter: (a > 100) + -> Seq Scan on test_2 + -> Seq Scan on test_3 + -> Seq Scan on test_4 + -> Seq Scan on test_5 + -> Seq Scan on test_6 + -> Seq Scan on test_7 + -> Seq Scan on test_8 + -> Seq Scan on test_9 + -> Seq Scan on test_10 +(12 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM array_qual.test WHERE a > ANY (array[NULL, 700]); + QUERY PLAN +----------------------------------------------------- + Append + -> Seq Scan on test_7 + Filter: (a > ANY ('{NULL,700}'::integer[])) + -> Seq Scan on test_8 + -> Seq Scan on test_9 + -> Seq Scan on test_10 +(6 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM array_qual.test WHERE a > ANY (array[NULL, NULL]::int4[]); + QUERY PLAN +-------------------------- + Result + One-Time Filter: false +(2 rows) + +SET pg_pathman.enable = f; +NOTICE: RuntimeAppend, RuntimeMergeAppend and PartitionFilter nodes and some other options have been disabled +SELECT count(*) FROM array_qual.test WHERE a > ANY (array[NULL, 700]); + count +------- + 300 +(1 row) + +SET pg_pathman.enable = t; +NOTICE: RuntimeAppend, RuntimeMergeAppend and PartitionFilter nodes and some other options have been enabled +SELECT count(*) FROM array_qual.test WHERE a > ANY (array[NULL, 700]); + count +------- + 300 +(1 row) + +/* + * Test expr > ALL (...) + */ +/* a > ALL (...) - pruning should work */ +EXPLAIN (COSTS OFF) SELECT * FROM array_qual.test WHERE a > ALL (NULL); + QUERY PLAN +-------------------------- + Result + One-Time Filter: false +(2 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM array_qual.test WHERE a > ALL (array[]::int4[]); + QUERY PLAN +--------------------------------------------- + Append + -> Seq Scan on test_1 + Filter: (a > ALL ('{}'::integer[])) + -> Seq Scan on test_2 + Filter: (a > ALL ('{}'::integer[])) + -> Seq Scan on test_3 + Filter: (a > ALL ('{}'::integer[])) + -> Seq Scan on test_4 + Filter: (a > ALL ('{}'::integer[])) + -> Seq Scan on test_5 + Filter: (a > ALL ('{}'::integer[])) + -> Seq Scan on test_6 + Filter: (a > ALL ('{}'::integer[])) + -> Seq Scan on test_7 + Filter: (a > ALL ('{}'::integer[])) + -> Seq Scan on test_8 + Filter: (a > ALL ('{}'::integer[])) + -> Seq Scan on test_9 + Filter: (a > ALL ('{}'::integer[])) + -> Seq Scan on test_10 + Filter: (a > ALL ('{}'::integer[])) +(21 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM array_qual.test WHERE a > ALL (array[100, 100]); + QUERY PLAN +--------------------------- + Append + -> Seq Scan on test_1 + Filter: (a > 100) + -> Seq Scan on test_2 + -> Seq Scan on test_3 + -> Seq Scan on test_4 + -> Seq Scan on test_5 + -> Seq Scan on test_6 + -> Seq Scan on test_7 + -> Seq Scan on test_8 + -> Seq Scan on test_9 + -> Seq Scan on test_10 +(12 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM array_qual.test WHERE a > ALL (array[99, 100, 101]); + QUERY PLAN +--------------------------- + Append + -> Seq Scan on test_2 + Filter: (a > 101) + -> Seq Scan on test_3 + -> Seq Scan on test_4 + -> Seq Scan on test_5 + -> Seq Scan on test_6 + -> Seq Scan on test_7 + -> Seq Scan on test_8 + -> Seq Scan on test_9 + -> Seq Scan on test_10 +(11 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM array_qual.test WHERE a > ALL (array[500, 550]); + QUERY PLAN +--------------------------- + Append + -> Seq Scan on test_6 + Filter: (a > 550) + -> Seq Scan on test_7 + -> Seq Scan on test_8 + -> Seq Scan on test_9 + -> Seq Scan on test_10 +(7 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM array_qual.test WHERE a > ALL (array[100, 700]); + QUERY PLAN +--------------------------- + Append + -> Seq Scan on test_7 + Filter: (a > 700) + -> Seq Scan on test_8 + -> Seq Scan on test_9 + -> Seq Scan on test_10 +(6 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM array_qual.test WHERE a > ALL (array[NULL, 700]); + QUERY PLAN +-------------------------- + Result + One-Time Filter: false +(2 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM array_qual.test WHERE a > ALL (array[NULL, NULL]::int4[]); + QUERY PLAN +-------------------------- + Result + One-Time Filter: false +(2 rows) + +SET pg_pathman.enable = f; +NOTICE: RuntimeAppend, RuntimeMergeAppend and PartitionFilter nodes and some other options have been disabled +SELECT count(*) FROM array_qual.test WHERE a > ALL (array[NULL, 700]); + count +------- + 0 +(1 row) + +SET pg_pathman.enable = t; +NOTICE: RuntimeAppend, RuntimeMergeAppend and PartitionFilter nodes and some other options have been enabled +SELECT count(*) FROM array_qual.test WHERE a > ALL (array[NULL, 700]); + count +------- + 0 +(1 row) + +/* + * Test expr > ANY (... $1 ...) + */ +PREPARE q(int4) AS SELECT * FROM array_qual.test WHERE a > ANY (array[$1, 100, 600]); +EXPLAIN (COSTS OFF) EXECUTE q(1); + QUERY PLAN +--------------------------- + Append + -> Seq Scan on test_1 + Filter: (a > 1) + -> Seq Scan on test_2 + -> Seq Scan on test_3 + -> Seq Scan on test_4 + -> Seq Scan on test_5 + -> Seq Scan on test_6 + -> Seq Scan on test_7 + -> Seq Scan on test_8 + -> Seq Scan on test_9 + -> Seq Scan on test_10 +(12 rows) + +EXPLAIN (COSTS OFF) EXECUTE q(1); + QUERY PLAN +--------------------------- + Append + -> Seq Scan on test_1 + Filter: (a > 1) + -> Seq Scan on test_2 + -> Seq Scan on test_3 + -> Seq Scan on test_4 + -> Seq Scan on test_5 + -> Seq Scan on test_6 + -> Seq Scan on test_7 + -> Seq Scan on test_8 + -> Seq Scan on test_9 + -> Seq Scan on test_10 +(12 rows) + +EXPLAIN (COSTS OFF) EXECUTE q(1); + QUERY PLAN +--------------------------- + Append + -> Seq Scan on test_1 + Filter: (a > 1) + -> Seq Scan on test_2 + -> Seq Scan on test_3 + -> Seq Scan on test_4 + -> Seq Scan on test_5 + -> Seq Scan on test_6 + -> Seq Scan on test_7 + -> Seq Scan on test_8 + -> Seq Scan on test_9 + -> Seq Scan on test_10 +(12 rows) + +EXPLAIN (COSTS OFF) EXECUTE q(1); + QUERY PLAN +--------------------------- + Append + -> Seq Scan on test_1 + Filter: (a > 1) + -> Seq Scan on test_2 + -> Seq Scan on test_3 + -> Seq Scan on test_4 + -> Seq Scan on test_5 + -> Seq Scan on test_6 + -> Seq Scan on test_7 + -> Seq Scan on test_8 + -> Seq Scan on test_9 + -> Seq Scan on test_10 +(12 rows) + +EXPLAIN (COSTS OFF) EXECUTE q(1); + QUERY PLAN +--------------------------- + Append + -> Seq Scan on test_1 + Filter: (a > 1) + -> Seq Scan on test_2 + -> Seq Scan on test_3 + -> Seq Scan on test_4 + -> Seq Scan on test_5 + -> Seq Scan on test_6 + -> Seq Scan on test_7 + -> Seq Scan on test_8 + -> Seq Scan on test_9 + -> Seq Scan on test_10 +(12 rows) + +EXPLAIN (COSTS OFF) EXECUTE q(1); + QUERY PLAN +-------------------------------------------------- + Custom Scan (RuntimeAppend) + Prune by: (test.a > ANY (ARRAY[$1, 100, 600])) + -> Seq Scan on test_1 test + Filter: (a > ANY (ARRAY[$1, 100, 600])) + -> Seq Scan on test_2 test + Filter: (a > ANY (ARRAY[$1, 100, 600])) + -> Seq Scan on test_3 test + Filter: (a > ANY (ARRAY[$1, 100, 600])) + -> Seq Scan on test_4 test + Filter: (a > ANY (ARRAY[$1, 100, 600])) + -> Seq Scan on test_5 test + Filter: (a > ANY (ARRAY[$1, 100, 600])) + -> Seq Scan on test_6 test + Filter: (a > ANY (ARRAY[$1, 100, 600])) + -> Seq Scan on test_7 test + Filter: (a > ANY (ARRAY[$1, 100, 600])) + -> Seq Scan on test_8 test + Filter: (a > ANY (ARRAY[$1, 100, 600])) + -> Seq Scan on test_9 test + Filter: (a > ANY (ARRAY[$1, 100, 600])) + -> Seq Scan on test_10 test + Filter: (a > ANY (ARRAY[$1, 100, 600])) +(22 rows) + +EXPLAIN (COSTS OFF) EXECUTE q(1); + QUERY PLAN +-------------------------------------------------- + Custom Scan (RuntimeAppend) + Prune by: (test.a > ANY (ARRAY[$1, 100, 600])) + -> Seq Scan on test_1 test + Filter: (a > ANY (ARRAY[$1, 100, 600])) + -> Seq Scan on test_2 test + Filter: (a > ANY (ARRAY[$1, 100, 600])) + -> Seq Scan on test_3 test + Filter: (a > ANY (ARRAY[$1, 100, 600])) + -> Seq Scan on test_4 test + Filter: (a > ANY (ARRAY[$1, 100, 600])) + -> Seq Scan on test_5 test + Filter: (a > ANY (ARRAY[$1, 100, 600])) + -> Seq Scan on test_6 test + Filter: (a > ANY (ARRAY[$1, 100, 600])) + -> Seq Scan on test_7 test + Filter: (a > ANY (ARRAY[$1, 100, 600])) + -> Seq Scan on test_8 test + Filter: (a > ANY (ARRAY[$1, 100, 600])) + -> Seq Scan on test_9 test + Filter: (a > ANY (ARRAY[$1, 100, 600])) + -> Seq Scan on test_10 test + Filter: (a > ANY (ARRAY[$1, 100, 600])) +(22 rows) + +DEALLOCATE q; +PREPARE q(int4) AS SELECT * FROM array_qual.test WHERE a > ANY (array[100, 600, $1]); +EXPLAIN (COSTS OFF) EXECUTE q(1); + QUERY PLAN +--------------------------- + Append + -> Seq Scan on test_1 + Filter: (a > 1) + -> Seq Scan on test_2 + -> Seq Scan on test_3 + -> Seq Scan on test_4 + -> Seq Scan on test_5 + -> Seq Scan on test_6 + -> Seq Scan on test_7 + -> Seq Scan on test_8 + -> Seq Scan on test_9 + -> Seq Scan on test_10 +(12 rows) + +EXPLAIN (COSTS OFF) EXECUTE q(1); + QUERY PLAN +--------------------------- + Append + -> Seq Scan on test_1 + Filter: (a > 1) + -> Seq Scan on test_2 + -> Seq Scan on test_3 + -> Seq Scan on test_4 + -> Seq Scan on test_5 + -> Seq Scan on test_6 + -> Seq Scan on test_7 + -> Seq Scan on test_8 + -> Seq Scan on test_9 + -> Seq Scan on test_10 +(12 rows) + +EXPLAIN (COSTS OFF) EXECUTE q(1); + QUERY PLAN +--------------------------- + Append + -> Seq Scan on test_1 + Filter: (a > 1) + -> Seq Scan on test_2 + -> Seq Scan on test_3 + -> Seq Scan on test_4 + -> Seq Scan on test_5 + -> Seq Scan on test_6 + -> Seq Scan on test_7 + -> Seq Scan on test_8 + -> Seq Scan on test_9 + -> Seq Scan on test_10 +(12 rows) + +EXPLAIN (COSTS OFF) EXECUTE q(1); + QUERY PLAN +--------------------------- + Append + -> Seq Scan on test_1 + Filter: (a > 1) + -> Seq Scan on test_2 + -> Seq Scan on test_3 + -> Seq Scan on test_4 + -> Seq Scan on test_5 + -> Seq Scan on test_6 + -> Seq Scan on test_7 + -> Seq Scan on test_8 + -> Seq Scan on test_9 + -> Seq Scan on test_10 +(12 rows) + +EXPLAIN (COSTS OFF) EXECUTE q(1); + QUERY PLAN +--------------------------- + Append + -> Seq Scan on test_1 + Filter: (a > 1) + -> Seq Scan on test_2 + -> Seq Scan on test_3 + -> Seq Scan on test_4 + -> Seq Scan on test_5 + -> Seq Scan on test_6 + -> Seq Scan on test_7 + -> Seq Scan on test_8 + -> Seq Scan on test_9 + -> Seq Scan on test_10 +(12 rows) + +EXPLAIN (COSTS OFF) EXECUTE q(1); + QUERY PLAN +-------------------------------------------------- + Custom Scan (RuntimeAppend) + Prune by: (test.a > ANY (ARRAY[100, 600, $1])) + -> Seq Scan on test_1 test + Filter: (a > ANY (ARRAY[100, 600, $1])) + -> Seq Scan on test_2 test + Filter: (a > ANY (ARRAY[100, 600, $1])) + -> Seq Scan on test_3 test + Filter: (a > ANY (ARRAY[100, 600, $1])) + -> Seq Scan on test_4 test + Filter: (a > ANY (ARRAY[100, 600, $1])) + -> Seq Scan on test_5 test + Filter: (a > ANY (ARRAY[100, 600, $1])) + -> Seq Scan on test_6 test + Filter: (a > ANY (ARRAY[100, 600, $1])) + -> Seq Scan on test_7 test + Filter: (a > ANY (ARRAY[100, 600, $1])) + -> Seq Scan on test_8 test + Filter: (a > ANY (ARRAY[100, 600, $1])) + -> Seq Scan on test_9 test + Filter: (a > ANY (ARRAY[100, 600, $1])) + -> Seq Scan on test_10 test + Filter: (a > ANY (ARRAY[100, 600, $1])) +(22 rows) + +EXPLAIN (COSTS OFF) EXECUTE q(1); + QUERY PLAN +-------------------------------------------------- + Custom Scan (RuntimeAppend) + Prune by: (test.a > ANY (ARRAY[100, 600, $1])) + -> Seq Scan on test_1 test + Filter: (a > ANY (ARRAY[100, 600, $1])) + -> Seq Scan on test_2 test + Filter: (a > ANY (ARRAY[100, 600, $1])) + -> Seq Scan on test_3 test + Filter: (a > ANY (ARRAY[100, 600, $1])) + -> Seq Scan on test_4 test + Filter: (a > ANY (ARRAY[100, 600, $1])) + -> Seq Scan on test_5 test + Filter: (a > ANY (ARRAY[100, 600, $1])) + -> Seq Scan on test_6 test + Filter: (a > ANY (ARRAY[100, 600, $1])) + -> Seq Scan on test_7 test + Filter: (a > ANY (ARRAY[100, 600, $1])) + -> Seq Scan on test_8 test + Filter: (a > ANY (ARRAY[100, 600, $1])) + -> Seq Scan on test_9 test + Filter: (a > ANY (ARRAY[100, 600, $1])) + -> Seq Scan on test_10 test + Filter: (a > ANY (ARRAY[100, 600, $1])) +(22 rows) + +DEALLOCATE q; +PREPARE q(int4) AS SELECT * FROM array_qual.test WHERE a > ANY (array[NULL, $1]); +EXPLAIN (COSTS OFF) EXECUTE q(500); + QUERY PLAN +----------------------------------------------------- + Append + -> Seq Scan on test_5 + Filter: (a > ANY ('{NULL,500}'::integer[])) + -> Seq Scan on test_6 + -> Seq Scan on test_7 + -> Seq Scan on test_8 + -> Seq Scan on test_9 + -> Seq Scan on test_10 +(8 rows) + +EXPLAIN (COSTS OFF) EXECUTE q(500); + QUERY PLAN +----------------------------------------------------- + Append + -> Seq Scan on test_5 + Filter: (a > ANY ('{NULL,500}'::integer[])) + -> Seq Scan on test_6 + -> Seq Scan on test_7 + -> Seq Scan on test_8 + -> Seq Scan on test_9 + -> Seq Scan on test_10 +(8 rows) + +EXPLAIN (COSTS OFF) EXECUTE q(500); + QUERY PLAN +----------------------------------------------------- + Append + -> Seq Scan on test_5 + Filter: (a > ANY ('{NULL,500}'::integer[])) + -> Seq Scan on test_6 + -> Seq Scan on test_7 + -> Seq Scan on test_8 + -> Seq Scan on test_9 + -> Seq Scan on test_10 +(8 rows) + +EXPLAIN (COSTS OFF) EXECUTE q(500); + QUERY PLAN +----------------------------------------------------- + Append + -> Seq Scan on test_5 + Filter: (a > ANY ('{NULL,500}'::integer[])) + -> Seq Scan on test_6 + -> Seq Scan on test_7 + -> Seq Scan on test_8 + -> Seq Scan on test_9 + -> Seq Scan on test_10 +(8 rows) + +EXPLAIN (COSTS OFF) EXECUTE q(500); + QUERY PLAN +----------------------------------------------------- + Append + -> Seq Scan on test_5 + Filter: (a > ANY ('{NULL,500}'::integer[])) + -> Seq Scan on test_6 + -> Seq Scan on test_7 + -> Seq Scan on test_8 + -> Seq Scan on test_9 + -> Seq Scan on test_10 +(8 rows) + +EXPLAIN (COSTS OFF) EXECUTE q(500); + QUERY PLAN +------------------------------------------------------- + Custom Scan (RuntimeAppend) + Prune by: (test.a > ANY (ARRAY[NULL::integer, $1])) + -> Seq Scan on test_1 test + Filter: (a > ANY (ARRAY[NULL::integer, $1])) + -> Seq Scan on test_2 test + Filter: (a > ANY (ARRAY[NULL::integer, $1])) + -> Seq Scan on test_3 test + Filter: (a > ANY (ARRAY[NULL::integer, $1])) + -> Seq Scan on test_4 test + Filter: (a > ANY (ARRAY[NULL::integer, $1])) + -> Seq Scan on test_5 test + Filter: (a > ANY (ARRAY[NULL::integer, $1])) + -> Seq Scan on test_6 test + Filter: (a > ANY (ARRAY[NULL::integer, $1])) + -> Seq Scan on test_7 test + Filter: (a > ANY (ARRAY[NULL::integer, $1])) + -> Seq Scan on test_8 test + Filter: (a > ANY (ARRAY[NULL::integer, $1])) + -> Seq Scan on test_9 test + Filter: (a > ANY (ARRAY[NULL::integer, $1])) + -> Seq Scan on test_10 test + Filter: (a > ANY (ARRAY[NULL::integer, $1])) +(22 rows) + +EXPLAIN (COSTS OFF) EXECUTE q(500); + QUERY PLAN +------------------------------------------------------- + Custom Scan (RuntimeAppend) + Prune by: (test.a > ANY (ARRAY[NULL::integer, $1])) + -> Seq Scan on test_1 test + Filter: (a > ANY (ARRAY[NULL::integer, $1])) + -> Seq Scan on test_2 test + Filter: (a > ANY (ARRAY[NULL::integer, $1])) + -> Seq Scan on test_3 test + Filter: (a > ANY (ARRAY[NULL::integer, $1])) + -> Seq Scan on test_4 test + Filter: (a > ANY (ARRAY[NULL::integer, $1])) + -> Seq Scan on test_5 test + Filter: (a > ANY (ARRAY[NULL::integer, $1])) + -> Seq Scan on test_6 test + Filter: (a > ANY (ARRAY[NULL::integer, $1])) + -> Seq Scan on test_7 test + Filter: (a > ANY (ARRAY[NULL::integer, $1])) + -> Seq Scan on test_8 test + Filter: (a > ANY (ARRAY[NULL::integer, $1])) + -> Seq Scan on test_9 test + Filter: (a > ANY (ARRAY[NULL::integer, $1])) + -> Seq Scan on test_10 test + Filter: (a > ANY (ARRAY[NULL::integer, $1])) +(22 rows) + +EXECUTE q(NULL); + a | b +---+--- +(0 rows) + +DEALLOCATE q; +/* + * Test expr > ALL (... $1 ...) + */ +PREPARE q(int4) AS SELECT * FROM array_qual.test WHERE a > ALL (array[$1, 1000000, 600]); +EXPLAIN (COSTS OFF) EXECUTE q(1); + QUERY PLAN +-------------------------- + Result + One-Time Filter: false +(2 rows) + +EXPLAIN (COSTS OFF) EXECUTE q(1); + QUERY PLAN +-------------------------- + Result + One-Time Filter: false +(2 rows) + +EXPLAIN (COSTS OFF) EXECUTE q(1); + QUERY PLAN +-------------------------- + Result + One-Time Filter: false +(2 rows) + +EXPLAIN (COSTS OFF) EXECUTE q(1); + QUERY PLAN +-------------------------- + Result + One-Time Filter: false +(2 rows) + +EXPLAIN (COSTS OFF) EXECUTE q(1); + QUERY PLAN +-------------------------- + Result + One-Time Filter: false +(2 rows) + +EXPLAIN (COSTS OFF) EXECUTE q(1); + QUERY PLAN +-------------------------- + Result + One-Time Filter: false +(2 rows) + +EXPLAIN (COSTS OFF) EXECUTE q(1); + QUERY PLAN +-------------------------- + Result + One-Time Filter: false +(2 rows) + +DEALLOCATE q; +PREPARE q(int4) AS SELECT * FROM array_qual.test WHERE a > ALL (array[$1, NULL, 600]); +EXPLAIN (COSTS OFF) EXECUTE q(1); + QUERY PLAN +-------------------------- + Result + One-Time Filter: false +(2 rows) + +EXPLAIN (COSTS OFF) EXECUTE q(1); + QUERY PLAN +-------------------------- + Result + One-Time Filter: false +(2 rows) + +EXPLAIN (COSTS OFF) EXECUTE q(1); + QUERY PLAN +-------------------------- + Result + One-Time Filter: false +(2 rows) + +EXPLAIN (COSTS OFF) EXECUTE q(1); + QUERY PLAN +-------------------------- + Result + One-Time Filter: false +(2 rows) + +EXPLAIN (COSTS OFF) EXECUTE q(1); + QUERY PLAN +-------------------------- + Result + One-Time Filter: false +(2 rows) + +EXPLAIN (COSTS OFF) EXECUTE q(1); + QUERY PLAN +-------------------------- + Result + One-Time Filter: false +(2 rows) + +EXPLAIN (COSTS OFF) EXECUTE q(1); + QUERY PLAN +-------------------------- + Result + One-Time Filter: false +(2 rows) + +DEALLOCATE q; +PREPARE q(int4) AS SELECT * FROM array_qual.test WHERE a > ALL (array[NULL, $1, NULL]); +EXPLAIN (COSTS OFF) EXECUTE q(500); + QUERY PLAN +-------------------------- + Result + One-Time Filter: false +(2 rows) + +EXPLAIN (COSTS OFF) EXECUTE q(500); + QUERY PLAN +-------------------------- + Result + One-Time Filter: false +(2 rows) + +EXPLAIN (COSTS OFF) EXECUTE q(500); + QUERY PLAN +-------------------------- + Result + One-Time Filter: false +(2 rows) + +EXPLAIN (COSTS OFF) EXECUTE q(500); + QUERY PLAN +-------------------------- + Result + One-Time Filter: false +(2 rows) + +EXPLAIN (COSTS OFF) EXECUTE q(500); + QUERY PLAN +-------------------------- + Result + One-Time Filter: false +(2 rows) + +EXPLAIN (COSTS OFF) EXECUTE q(500); + QUERY PLAN +-------------------------- + Result + One-Time Filter: false +(2 rows) + +EXPLAIN (COSTS OFF) EXECUTE q(500); + QUERY PLAN +-------------------------- + Result + One-Time Filter: false +(2 rows) + +EXECUTE q(NULL); + a | b +---+--- +(0 rows) + +DEALLOCATE q; +PREPARE q(int4) AS SELECT * FROM array_qual.test WHERE a > ALL (array[$1, 100, 600]); +EXPLAIN (COSTS OFF) EXECUTE q(1); + QUERY PLAN +--------------------------- + Append + -> Seq Scan on test_6 + Filter: (a > 600) + -> Seq Scan on test_7 + -> Seq Scan on test_8 + -> Seq Scan on test_9 + -> Seq Scan on test_10 +(7 rows) + +EXPLAIN (COSTS OFF) EXECUTE q(1); + QUERY PLAN +--------------------------- + Append + -> Seq Scan on test_6 + Filter: (a > 600) + -> Seq Scan on test_7 + -> Seq Scan on test_8 + -> Seq Scan on test_9 + -> Seq Scan on test_10 +(7 rows) + +EXPLAIN (COSTS OFF) EXECUTE q(1); + QUERY PLAN +--------------------------- + Append + -> Seq Scan on test_6 + Filter: (a > 600) + -> Seq Scan on test_7 + -> Seq Scan on test_8 + -> Seq Scan on test_9 + -> Seq Scan on test_10 +(7 rows) + +EXPLAIN (COSTS OFF) EXECUTE q(1); + QUERY PLAN +--------------------------- + Append + -> Seq Scan on test_6 + Filter: (a > 600) + -> Seq Scan on test_7 + -> Seq Scan on test_8 + -> Seq Scan on test_9 + -> Seq Scan on test_10 +(7 rows) + +EXPLAIN (COSTS OFF) EXECUTE q(1); + QUERY PLAN +--------------------------- + Append + -> Seq Scan on test_6 + Filter: (a > 600) + -> Seq Scan on test_7 + -> Seq Scan on test_8 + -> Seq Scan on test_9 + -> Seq Scan on test_10 +(7 rows) + +EXPLAIN (COSTS OFF) EXECUTE q(1); + QUERY PLAN +-------------------------------------------------- + Custom Scan (RuntimeAppend) + Prune by: (test.a > ALL (ARRAY[$1, 100, 600])) + -> Seq Scan on test_6 test + Filter: (a > ALL (ARRAY[$1, 100, 600])) + -> Seq Scan on test_7 test + Filter: (a > ALL (ARRAY[$1, 100, 600])) + -> Seq Scan on test_8 test + Filter: (a > ALL (ARRAY[$1, 100, 600])) + -> Seq Scan on test_9 test + Filter: (a > ALL (ARRAY[$1, 100, 600])) + -> Seq Scan on test_10 test + Filter: (a > ALL (ARRAY[$1, 100, 600])) +(12 rows) + +EXPLAIN (COSTS OFF) EXECUTE q(1); + QUERY PLAN +-------------------------------------------------- + Custom Scan (RuntimeAppend) + Prune by: (test.a > ALL (ARRAY[$1, 100, 600])) + -> Seq Scan on test_6 test + Filter: (a > ALL (ARRAY[$1, 100, 600])) + -> Seq Scan on test_7 test + Filter: (a > ALL (ARRAY[$1, 100, 600])) + -> Seq Scan on test_8 test + Filter: (a > ALL (ARRAY[$1, 100, 600])) + -> Seq Scan on test_9 test + Filter: (a > ALL (ARRAY[$1, 100, 600])) + -> Seq Scan on test_10 test + Filter: (a > ALL (ARRAY[$1, 100, 600])) +(12 rows) + +DEALLOCATE q; +PREPARE q(int4) AS SELECT * FROM array_qual.test WHERE a > ALL (array[100, $1, 600]); +EXPLAIN (COSTS OFF) EXECUTE q(1); + QUERY PLAN +--------------------------- + Append + -> Seq Scan on test_6 + Filter: (a > 600) + -> Seq Scan on test_7 + -> Seq Scan on test_8 + -> Seq Scan on test_9 + -> Seq Scan on test_10 +(7 rows) + +EXPLAIN (COSTS OFF) EXECUTE q(1); + QUERY PLAN +--------------------------- + Append + -> Seq Scan on test_6 + Filter: (a > 600) + -> Seq Scan on test_7 + -> Seq Scan on test_8 + -> Seq Scan on test_9 + -> Seq Scan on test_10 +(7 rows) + +EXPLAIN (COSTS OFF) EXECUTE q(1); + QUERY PLAN +--------------------------- + Append + -> Seq Scan on test_6 + Filter: (a > 600) + -> Seq Scan on test_7 + -> Seq Scan on test_8 + -> Seq Scan on test_9 + -> Seq Scan on test_10 +(7 rows) + +EXPLAIN (COSTS OFF) EXECUTE q(1); + QUERY PLAN +--------------------------- + Append + -> Seq Scan on test_6 + Filter: (a > 600) + -> Seq Scan on test_7 + -> Seq Scan on test_8 + -> Seq Scan on test_9 + -> Seq Scan on test_10 +(7 rows) + +EXPLAIN (COSTS OFF) EXECUTE q(1); + QUERY PLAN +--------------------------- + Append + -> Seq Scan on test_6 + Filter: (a > 600) + -> Seq Scan on test_7 + -> Seq Scan on test_8 + -> Seq Scan on test_9 + -> Seq Scan on test_10 +(7 rows) + +EXPLAIN (COSTS OFF) EXECUTE q(1); + QUERY PLAN +-------------------------------------------------- + Custom Scan (RuntimeAppend) + Prune by: (test.a > ALL (ARRAY[100, $1, 600])) + -> Seq Scan on test_6 test + Filter: (a > ALL (ARRAY[100, $1, 600])) + -> Seq Scan on test_7 test + Filter: (a > ALL (ARRAY[100, $1, 600])) + -> Seq Scan on test_8 test + Filter: (a > ALL (ARRAY[100, $1, 600])) + -> Seq Scan on test_9 test + Filter: (a > ALL (ARRAY[100, $1, 600])) + -> Seq Scan on test_10 test + Filter: (a > ALL (ARRAY[100, $1, 600])) +(12 rows) + +EXPLAIN (COSTS OFF) EXECUTE q(1); + QUERY PLAN +-------------------------------------------------- + Custom Scan (RuntimeAppend) + Prune by: (test.a > ALL (ARRAY[100, $1, 600])) + -> Seq Scan on test_6 test + Filter: (a > ALL (ARRAY[100, $1, 600])) + -> Seq Scan on test_7 test + Filter: (a > ALL (ARRAY[100, $1, 600])) + -> Seq Scan on test_8 test + Filter: (a > ALL (ARRAY[100, $1, 600])) + -> Seq Scan on test_9 test + Filter: (a > ALL (ARRAY[100, $1, 600])) + -> Seq Scan on test_10 test + Filter: (a > ALL (ARRAY[100, $1, 600])) +(12 rows) + +DEALLOCATE q; +PREPARE q(int4) AS SELECT * FROM array_qual.test WHERE a > ALL (array[100, 600, $1]); +EXPLAIN (COSTS OFF) EXECUTE q(1); + QUERY PLAN +--------------------------- + Append + -> Seq Scan on test_6 + Filter: (a > 600) + -> Seq Scan on test_7 + -> Seq Scan on test_8 + -> Seq Scan on test_9 + -> Seq Scan on test_10 +(7 rows) + +EXPLAIN (COSTS OFF) EXECUTE q(1); + QUERY PLAN +--------------------------- + Append + -> Seq Scan on test_6 + Filter: (a > 600) + -> Seq Scan on test_7 + -> Seq Scan on test_8 + -> Seq Scan on test_9 + -> Seq Scan on test_10 +(7 rows) + +EXPLAIN (COSTS OFF) EXECUTE q(1); + QUERY PLAN +--------------------------- + Append + -> Seq Scan on test_6 + Filter: (a > 600) + -> Seq Scan on test_7 + -> Seq Scan on test_8 + -> Seq Scan on test_9 + -> Seq Scan on test_10 +(7 rows) + +EXPLAIN (COSTS OFF) EXECUTE q(1); + QUERY PLAN +--------------------------- + Append + -> Seq Scan on test_6 + Filter: (a > 600) + -> Seq Scan on test_7 + -> Seq Scan on test_8 + -> Seq Scan on test_9 + -> Seq Scan on test_10 +(7 rows) + +EXPLAIN (COSTS OFF) EXECUTE q(1); + QUERY PLAN +--------------------------- + Append + -> Seq Scan on test_6 + Filter: (a > 600) + -> Seq Scan on test_7 + -> Seq Scan on test_8 + -> Seq Scan on test_9 + -> Seq Scan on test_10 +(7 rows) + +EXPLAIN (COSTS OFF) EXECUTE q(1); + QUERY PLAN +-------------------------------------------------- + Custom Scan (RuntimeAppend) + Prune by: (test.a > ALL (ARRAY[100, 600, $1])) + -> Seq Scan on test_6 test + Filter: (a > ALL (ARRAY[100, 600, $1])) + -> Seq Scan on test_7 test + Filter: (a > ALL (ARRAY[100, 600, $1])) + -> Seq Scan on test_8 test + Filter: (a > ALL (ARRAY[100, 600, $1])) + -> Seq Scan on test_9 test + Filter: (a > ALL (ARRAY[100, 600, $1])) + -> Seq Scan on test_10 test + Filter: (a > ALL (ARRAY[100, 600, $1])) +(12 rows) + +EXPLAIN (COSTS OFF) EXECUTE q(1); + QUERY PLAN +-------------------------------------------------- + Custom Scan (RuntimeAppend) + Prune by: (test.a > ALL (ARRAY[100, 600, $1])) + -> Seq Scan on test_6 test + Filter: (a > ALL (ARRAY[100, 600, $1])) + -> Seq Scan on test_7 test + Filter: (a > ALL (ARRAY[100, 600, $1])) + -> Seq Scan on test_8 test + Filter: (a > ALL (ARRAY[100, 600, $1])) + -> Seq Scan on test_9 test + Filter: (a > ALL (ARRAY[100, 600, $1])) + -> Seq Scan on test_10 test + Filter: (a > ALL (ARRAY[100, 600, $1])) +(12 rows) + +DEALLOCATE q; +PREPARE q(int4) AS SELECT * FROM array_qual.test WHERE a > ALL (array[array[100, NULL], array[1, $1]]); +EXPLAIN (COSTS OFF) EXECUTE q(1); + QUERY PLAN +-------------------------- + Result + One-Time Filter: false +(2 rows) + +EXPLAIN (COSTS OFF) EXECUTE q(1); + QUERY PLAN +-------------------------- + Result + One-Time Filter: false +(2 rows) + +EXPLAIN (COSTS OFF) EXECUTE q(1); + QUERY PLAN +-------------------------- + Result + One-Time Filter: false +(2 rows) + +EXPLAIN (COSTS OFF) EXECUTE q(1); + QUERY PLAN +-------------------------- + Result + One-Time Filter: false +(2 rows) + +EXPLAIN (COSTS OFF) EXECUTE q(1); + QUERY PLAN +-------------------------- + Result + One-Time Filter: false +(2 rows) + +EXPLAIN (COSTS OFF) EXECUTE q(1); + QUERY PLAN +-------------------------- + Result + One-Time Filter: false +(2 rows) + +EXPLAIN (COSTS OFF) EXECUTE q(1); + QUERY PLAN +-------------------------- + Result + One-Time Filter: false +(2 rows) + +DEALLOCATE q; +PREPARE q(int4) AS SELECT * FROM array_qual.test WHERE a > ALL (array[array[100, 600], array[1, $1]]); +EXPLAIN (COSTS OFF) EXECUTE q(1); + QUERY PLAN +--------------------------- + Append + -> Seq Scan on test_6 + Filter: (a > 600) + -> Seq Scan on test_7 + -> Seq Scan on test_8 + -> Seq Scan on test_9 + -> Seq Scan on test_10 +(7 rows) + +EXPLAIN (COSTS OFF) EXECUTE q(1); + QUERY PLAN +--------------------------- + Append + -> Seq Scan on test_6 + Filter: (a > 600) + -> Seq Scan on test_7 + -> Seq Scan on test_8 + -> Seq Scan on test_9 + -> Seq Scan on test_10 +(7 rows) + +EXPLAIN (COSTS OFF) EXECUTE q(1); + QUERY PLAN +--------------------------- + Append + -> Seq Scan on test_6 + Filter: (a > 600) + -> Seq Scan on test_7 + -> Seq Scan on test_8 + -> Seq Scan on test_9 + -> Seq Scan on test_10 +(7 rows) + +EXPLAIN (COSTS OFF) EXECUTE q(1); + QUERY PLAN +--------------------------- + Append + -> Seq Scan on test_6 + Filter: (a > 600) + -> Seq Scan on test_7 + -> Seq Scan on test_8 + -> Seq Scan on test_9 + -> Seq Scan on test_10 +(7 rows) + +EXPLAIN (COSTS OFF) EXECUTE q(1); + QUERY PLAN +--------------------------- + Append + -> Seq Scan on test_6 + Filter: (a > 600) + -> Seq Scan on test_7 + -> Seq Scan on test_8 + -> Seq Scan on test_9 + -> Seq Scan on test_10 +(7 rows) + +EXPLAIN (COSTS OFF) EXECUTE q(1); + QUERY PLAN +-------------------------------------------------------------------------- + Custom Scan (RuntimeAppend) + Prune by: (test.a > ALL (ARRAY['{100,600}'::integer[], ARRAY[1, $1]])) + -> Seq Scan on test_6 test + Filter: (a > ALL (ARRAY['{100,600}'::integer[], ARRAY[1, $1]])) + -> Seq Scan on test_7 test + Filter: (a > ALL (ARRAY['{100,600}'::integer[], ARRAY[1, $1]])) + -> Seq Scan on test_8 test + Filter: (a > ALL (ARRAY['{100,600}'::integer[], ARRAY[1, $1]])) + -> Seq Scan on test_9 test + Filter: (a > ALL (ARRAY['{100,600}'::integer[], ARRAY[1, $1]])) + -> Seq Scan on test_10 test + Filter: (a > ALL (ARRAY['{100,600}'::integer[], ARRAY[1, $1]])) +(12 rows) + +EXPLAIN (COSTS OFF) EXECUTE q(1); + QUERY PLAN +-------------------------------------------------------------------------- + Custom Scan (RuntimeAppend) + Prune by: (test.a > ALL (ARRAY['{100,600}'::integer[], ARRAY[1, $1]])) + -> Seq Scan on test_6 test + Filter: (a > ALL (ARRAY['{100,600}'::integer[], ARRAY[1, $1]])) + -> Seq Scan on test_7 test + Filter: (a > ALL (ARRAY['{100,600}'::integer[], ARRAY[1, $1]])) + -> Seq Scan on test_8 test + Filter: (a > ALL (ARRAY['{100,600}'::integer[], ARRAY[1, $1]])) + -> Seq Scan on test_9 test + Filter: (a > ALL (ARRAY['{100,600}'::integer[], ARRAY[1, $1]])) + -> Seq Scan on test_10 test + Filter: (a > ALL (ARRAY['{100,600}'::integer[], ARRAY[1, $1]])) +(12 rows) + +EXPLAIN (COSTS OFF) EXECUTE q(999); + QUERY PLAN +-------------------------------------------------------------------------- + Custom Scan (RuntimeAppend) + Prune by: (test.a > ALL (ARRAY['{100,600}'::integer[], ARRAY[1, $1]])) + -> Seq Scan on test_6 test + Filter: (a > ALL (ARRAY['{100,600}'::integer[], ARRAY[1, $1]])) + -> Seq Scan on test_7 test + Filter: (a > ALL (ARRAY['{100,600}'::integer[], ARRAY[1, $1]])) + -> Seq Scan on test_8 test + Filter: (a > ALL (ARRAY['{100,600}'::integer[], ARRAY[1, $1]])) + -> Seq Scan on test_9 test + Filter: (a > ALL (ARRAY['{100,600}'::integer[], ARRAY[1, $1]])) + -> Seq Scan on test_10 test + Filter: (a > ALL (ARRAY['{100,600}'::integer[], ARRAY[1, $1]])) +(12 rows) + +/* check query plan: EXECUTE q(999) */ +DO language plpgsql +$$ + DECLARE + query text; + result jsonb; + num int; + + BEGIN + query := 'EXECUTE q(999)'; + + EXECUTE format('EXPLAIN (ANALYZE, COSTS OFF, TIMING OFF, FORMAT JSON) %s', query) + INTO result; + + SELECT count(*) FROM jsonb_array_elements_text(result->0->'Plan'->'Plans') INTO num; + + RAISE notice '%: number of partitions: %', query, num; + END +$$; +NOTICE: EXECUTE q(999): number of partitions: 5 +DEALLOCATE q; +PREPARE q(int4[]) AS SELECT * FROM array_qual.test WHERE a > ALL (array[array[100, 600], $1]); +EXPLAIN (COSTS OFF) EXECUTE q('{1, 1}'); + QUERY PLAN +--------------------------- + Append + -> Seq Scan on test_6 + Filter: (a > 600) + -> Seq Scan on test_7 + -> Seq Scan on test_8 + -> Seq Scan on test_9 + -> Seq Scan on test_10 +(7 rows) + +EXPLAIN (COSTS OFF) EXECUTE q('{1, 1}'); + QUERY PLAN +--------------------------- + Append + -> Seq Scan on test_6 + Filter: (a > 600) + -> Seq Scan on test_7 + -> Seq Scan on test_8 + -> Seq Scan on test_9 + -> Seq Scan on test_10 +(7 rows) + +EXPLAIN (COSTS OFF) EXECUTE q('{1, 1}'); + QUERY PLAN +--------------------------- + Append + -> Seq Scan on test_6 + Filter: (a > 600) + -> Seq Scan on test_7 + -> Seq Scan on test_8 + -> Seq Scan on test_9 + -> Seq Scan on test_10 +(7 rows) + +EXPLAIN (COSTS OFF) EXECUTE q('{1, 1}'); + QUERY PLAN +--------------------------- + Append + -> Seq Scan on test_6 + Filter: (a > 600) + -> Seq Scan on test_7 + -> Seq Scan on test_8 + -> Seq Scan on test_9 + -> Seq Scan on test_10 +(7 rows) + +EXPLAIN (COSTS OFF) EXECUTE q('{1, 1}'); + QUERY PLAN +--------------------------- + Append + -> Seq Scan on test_6 + Filter: (a > 600) + -> Seq Scan on test_7 + -> Seq Scan on test_8 + -> Seq Scan on test_9 + -> Seq Scan on test_10 +(7 rows) + +EXPLAIN (COSTS OFF) EXECUTE q('{1, 1}'); + QUERY PLAN +---------------------------------------------------------------- + Custom Scan (RuntimeAppend) + Prune by: (test.a > ALL (ARRAY['{100,600}'::integer[], $1])) + -> Seq Scan on test_6 test + Filter: (a > ALL (ARRAY['{100,600}'::integer[], $1])) + -> Seq Scan on test_7 test + Filter: (a > ALL (ARRAY['{100,600}'::integer[], $1])) + -> Seq Scan on test_8 test + Filter: (a > ALL (ARRAY['{100,600}'::integer[], $1])) + -> Seq Scan on test_9 test + Filter: (a > ALL (ARRAY['{100,600}'::integer[], $1])) + -> Seq Scan on test_10 test + Filter: (a > ALL (ARRAY['{100,600}'::integer[], $1])) +(12 rows) + +EXPLAIN (COSTS OFF) EXECUTE q('{1, 1}'); + QUERY PLAN +---------------------------------------------------------------- + Custom Scan (RuntimeAppend) + Prune by: (test.a > ALL (ARRAY['{100,600}'::integer[], $1])) + -> Seq Scan on test_6 test + Filter: (a > ALL (ARRAY['{100,600}'::integer[], $1])) + -> Seq Scan on test_7 test + Filter: (a > ALL (ARRAY['{100,600}'::integer[], $1])) + -> Seq Scan on test_8 test + Filter: (a > ALL (ARRAY['{100,600}'::integer[], $1])) + -> Seq Scan on test_9 test + Filter: (a > ALL (ARRAY['{100,600}'::integer[], $1])) + -> Seq Scan on test_10 test + Filter: (a > ALL (ARRAY['{100,600}'::integer[], $1])) +(12 rows) + +EXPLAIN (COSTS OFF) EXECUTE q('{1, 999}'); + QUERY PLAN +---------------------------------------------------------------- + Custom Scan (RuntimeAppend) + Prune by: (test.a > ALL (ARRAY['{100,600}'::integer[], $1])) + -> Seq Scan on test_6 test + Filter: (a > ALL (ARRAY['{100,600}'::integer[], $1])) + -> Seq Scan on test_7 test + Filter: (a > ALL (ARRAY['{100,600}'::integer[], $1])) + -> Seq Scan on test_8 test + Filter: (a > ALL (ARRAY['{100,600}'::integer[], $1])) + -> Seq Scan on test_9 test + Filter: (a > ALL (ARRAY['{100,600}'::integer[], $1])) + -> Seq Scan on test_10 test + Filter: (a > ALL (ARRAY['{100,600}'::integer[], $1])) +(12 rows) + +/* check query plan: EXECUTE q('{1, 999}') */ +DO language plpgsql +$$ + DECLARE + query text; + result jsonb; + num int; + + BEGIN + query := 'EXECUTE q(''{1, 999}'')'; + + EXECUTE format('EXPLAIN (ANALYZE, COSTS OFF, TIMING OFF, FORMAT JSON) %s', query) + INTO result; + + SELECT count(*) FROM jsonb_array_elements_text(result->0->'Plan'->'Plans') INTO num; + + RAISE notice '%: number of partitions: %', query, num; + END +$$; +NOTICE: EXECUTE q('{1, 999}'): number of partitions: 1 +DEALLOCATE q; +PREPARE q(int4) AS SELECT * FROM array_qual.test WHERE a > ALL (array[$1, 898]); +EXPLAIN (COSTS OFF) EXECUTE q(1); + QUERY PLAN +--------------------------- + Append + -> Seq Scan on test_9 + Filter: (a > 898) + -> Seq Scan on test_10 +(4 rows) + +EXPLAIN (COSTS OFF) EXECUTE q(1); + QUERY PLAN +--------------------------- + Append + -> Seq Scan on test_9 + Filter: (a > 898) + -> Seq Scan on test_10 +(4 rows) + +EXPLAIN (COSTS OFF) EXECUTE q(1); + QUERY PLAN +--------------------------- + Append + -> Seq Scan on test_9 + Filter: (a > 898) + -> Seq Scan on test_10 +(4 rows) + +EXPLAIN (COSTS OFF) EXECUTE q(1); + QUERY PLAN +--------------------------- + Append + -> Seq Scan on test_9 + Filter: (a > 898) + -> Seq Scan on test_10 +(4 rows) + +EXPLAIN (COSTS OFF) EXECUTE q(1); + QUERY PLAN +--------------------------- + Append + -> Seq Scan on test_9 + Filter: (a > 898) + -> Seq Scan on test_10 +(4 rows) + +EXPLAIN (COSTS OFF) EXECUTE q(1); + QUERY PLAN +--------------------------------------------- + Custom Scan (RuntimeAppend) + Prune by: (test.a > ALL (ARRAY[$1, 898])) + -> Seq Scan on test_9 test + Filter: (a > ALL (ARRAY[$1, 898])) + -> Seq Scan on test_10 test + Filter: (a > ALL (ARRAY[$1, 898])) +(6 rows) + +EXPLAIN (COSTS OFF) EXECUTE q(1); + QUERY PLAN +--------------------------------------------- + Custom Scan (RuntimeAppend) + Prune by: (test.a > ALL (ARRAY[$1, 898])) + -> Seq Scan on test_9 test + Filter: (a > ALL (ARRAY[$1, 898])) + -> Seq Scan on test_10 test + Filter: (a > ALL (ARRAY[$1, 898])) +(6 rows) + +EXPLAIN (COSTS OFF) EXECUTE q(900); /* check quals optimization */ + QUERY PLAN +--------------------------------------------- + Custom Scan (RuntimeAppend) + Prune by: (test.a > ALL (ARRAY[$1, 898])) + -> Seq Scan on test_9 test + Filter: (a > ALL (ARRAY[$1, 898])) + -> Seq Scan on test_10 test + Filter: (a > ALL (ARRAY[$1, 898])) +(6 rows) + +EXECUTE q(1000); + a | b +---+--- +(0 rows) + +/* check query plan: EXECUTE q(999) */ +DO language plpgsql +$$ + DECLARE + query text; + result jsonb; + num int; + + BEGIN + query := 'EXECUTE q(999)'; + + EXECUTE format('EXPLAIN (ANALYZE, COSTS OFF, TIMING OFF, FORMAT JSON) %s', query) + INTO result; + + SELECT count(*) FROM jsonb_array_elements_text(result->0->'Plan'->'Plans') INTO num; + + RAISE notice '%: number of partitions: %', query, num; + END +$$; +NOTICE: EXECUTE q(999): number of partitions: 1 +DEALLOCATE q; +/* + * Test expr = ALL (... $1 ...) + */ +PREPARE q(int4) AS SELECT * FROM array_qual.test WHERE a = ALL (array[$1, 100, 600]); +EXPLAIN (COSTS OFF) EXECUTE q(1); + QUERY PLAN +-------------------------- + Result + One-Time Filter: false +(2 rows) + +EXPLAIN (COSTS OFF) EXECUTE q(1); + QUERY PLAN +-------------------------- + Result + One-Time Filter: false +(2 rows) + +EXPLAIN (COSTS OFF) EXECUTE q(1); + QUERY PLAN +-------------------------- + Result + One-Time Filter: false +(2 rows) + +EXPLAIN (COSTS OFF) EXECUTE q(1); + QUERY PLAN +-------------------------- + Result + One-Time Filter: false +(2 rows) + +EXPLAIN (COSTS OFF) EXECUTE q(1); + QUERY PLAN +-------------------------- + Result + One-Time Filter: false +(2 rows) + +EXPLAIN (COSTS OFF) EXECUTE q(1); + QUERY PLAN +-------------------------- + Result + One-Time Filter: false +(2 rows) + +EXPLAIN (COSTS OFF) EXECUTE q(1); + QUERY PLAN +-------------------------- + Result + One-Time Filter: false +(2 rows) + +DEALLOCATE q; +PREPARE q(int4) AS SELECT * FROM array_qual.test WHERE a = ALL (array[100, 600, $1]); +EXPLAIN (COSTS OFF) EXECUTE q(1); + QUERY PLAN +-------------------------- + Result + One-Time Filter: false +(2 rows) + +EXPLAIN (COSTS OFF) EXECUTE q(1); + QUERY PLAN +-------------------------- + Result + One-Time Filter: false +(2 rows) + +EXPLAIN (COSTS OFF) EXECUTE q(1); + QUERY PLAN +-------------------------- + Result + One-Time Filter: false +(2 rows) + +EXPLAIN (COSTS OFF) EXECUTE q(1); + QUERY PLAN +-------------------------- + Result + One-Time Filter: false +(2 rows) + +EXPLAIN (COSTS OFF) EXECUTE q(1); + QUERY PLAN +-------------------------- + Result + One-Time Filter: false +(2 rows) + +EXPLAIN (COSTS OFF) EXECUTE q(1); + QUERY PLAN +-------------------------- + Result + One-Time Filter: false +(2 rows) + +EXPLAIN (COSTS OFF) EXECUTE q(1); + QUERY PLAN +-------------------------- + Result + One-Time Filter: false +(2 rows) + +DEALLOCATE q; +PREPARE q(int4) AS SELECT * FROM array_qual.test WHERE a = ALL (array[100, $1]); +EXPLAIN (COSTS OFF) EXECUTE q(1); + QUERY PLAN +-------------------------- + Result + One-Time Filter: false +(2 rows) + +EXPLAIN (COSTS OFF) EXECUTE q(1); + QUERY PLAN +-------------------------- + Result + One-Time Filter: false +(2 rows) + +EXPLAIN (COSTS OFF) EXECUTE q(1); + QUERY PLAN +-------------------------- + Result + One-Time Filter: false +(2 rows) + +EXPLAIN (COSTS OFF) EXECUTE q(1); + QUERY PLAN +-------------------------- + Result + One-Time Filter: false +(2 rows) + +EXPLAIN (COSTS OFF) EXECUTE q(1); + QUERY PLAN +-------------------------- + Result + One-Time Filter: false +(2 rows) + +EXPLAIN (COSTS OFF) EXECUTE q(1); + QUERY PLAN +--------------------------------------------- + Custom Scan (RuntimeAppend) + Prune by: (test.a = ALL (ARRAY[100, $1])) + -> Seq Scan on test_1 test + Filter: (a = ALL (ARRAY[100, $1])) +(4 rows) + +EXPLAIN (COSTS OFF) EXECUTE q(1); + QUERY PLAN +--------------------------------------------- + Custom Scan (RuntimeAppend) + Prune by: (test.a = ALL (ARRAY[100, $1])) + -> Seq Scan on test_1 test + Filter: (a = ALL (ARRAY[100, $1])) +(4 rows) + +EXECUTE q(1); + a | b +---+--- +(0 rows) + +EXECUTE q(100); + a | b +-----+----- + 100 | 100 +(1 row) + +DEALLOCATE q; +DROP TABLE array_qual.test CASCADE; +NOTICE: drop cascades to 11 other objects +DROP SCHEMA array_qual; +DROP EXTENSION pg_pathman; diff --git a/expected/pathman_array_qual_1.out b/expected/pathman_array_qual_1.out new file mode 100644 index 00000000..dd7d2485 --- /dev/null +++ b/expected/pathman_array_qual_1.out @@ -0,0 +1,2398 @@ +/* + * Since 8edd0e794 (>= 12) Append nodes with single subplan are eliminated, + * causing different output. + */ +\set VERBOSITY terse +SET search_path = 'public'; +CREATE EXTENSION pg_pathman; +CREATE SCHEMA array_qual; +CREATE TABLE array_qual.test(val TEXT NOT NULL); +CREATE SEQUENCE array_qual.test_seq; +SELECT add_to_pathman_config('array_qual.test', 'val', NULL); + add_to_pathman_config +----------------------- + t +(1 row) + +SELECT add_range_partition('array_qual.test', 'a'::TEXT, 'b'); + add_range_partition +--------------------- + array_qual.test_1 +(1 row) + +SELECT add_range_partition('array_qual.test', 'b'::TEXT, 'c'); + add_range_partition +--------------------- + array_qual.test_2 +(1 row) + +SELECT add_range_partition('array_qual.test', 'c'::TEXT, 'd'); + add_range_partition +--------------------- + array_qual.test_3 +(1 row) + +SELECT add_range_partition('array_qual.test', 'd'::TEXT, 'e'); + add_range_partition +--------------------- + array_qual.test_4 +(1 row) + +INSERT INTO array_qual.test VALUES ('aaaa'); +INSERT INTO array_qual.test VALUES ('bbbb'); +INSERT INTO array_qual.test VALUES ('cccc'); +ANALYZE; +/* + * Test expr op ANY (...) + */ +/* matching collations */ +EXPLAIN (COSTS OFF) SELECT * FROM array_qual.test WHERE val < ANY (array['a', 'b']); + QUERY PLAN +-------------------- + Seq Scan on test_1 +(1 row) + +EXPLAIN (COSTS OFF) SELECT * FROM array_qual.test WHERE val < ANY (array['a', 'z']); + QUERY PLAN +-------------------------- + Append + -> Seq Scan on test_1 + -> Seq Scan on test_2 + -> Seq Scan on test_3 + -> Seq Scan on test_4 +(5 rows) + +/* different collations */ +EXPLAIN (COSTS OFF) SELECT * FROM array_qual.test WHERE val COLLATE "POSIX" < ANY (array['a', 'b']); + QUERY PLAN +----------------------------------------------------------- + Append + -> Seq Scan on test_1 + Filter: ((val)::text < 'b'::text COLLATE "POSIX") + -> Seq Scan on test_2 + Filter: ((val)::text < 'b'::text COLLATE "POSIX") + -> Seq Scan on test_3 + Filter: ((val)::text < 'b'::text COLLATE "POSIX") + -> Seq Scan on test_4 + Filter: ((val)::text < 'b'::text COLLATE "POSIX") +(9 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM array_qual.test WHERE val < ANY (array['a', 'b' COLLATE "POSIX"]); + QUERY PLAN +--------------------------------------------------- + Append + -> Seq Scan on test_1 + Filter: (val < 'b'::text COLLATE "POSIX") + -> Seq Scan on test_2 + Filter: (val < 'b'::text COLLATE "POSIX") + -> Seq Scan on test_3 + Filter: (val < 'b'::text COLLATE "POSIX") + -> Seq Scan on test_4 + Filter: (val < 'b'::text COLLATE "POSIX") +(9 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM array_qual.test WHERE val COLLATE "C" < ANY (array['a', 'b' COLLATE "POSIX"]); +ERROR: collation mismatch between explicit collations "C" and "POSIX" at character 95 +/* different collations (pruning should work) */ +EXPLAIN (COSTS OFF) SELECT * FROM array_qual.test WHERE val COLLATE "POSIX" = ANY (array['a', 'b']); + QUERY PLAN +------------------------------------------------------- + Append + -> Seq Scan on test_1 + Filter: ((val)::text = ANY ('{a,b}'::text[])) + -> Seq Scan on test_2 + Filter: ((val)::text = ANY ('{a,b}'::text[])) +(5 rows) + +/* non-btree operator */ +EXPLAIN (COSTS OFF) SELECT * FROM array_qual.test WHERE val ~~ ANY (array['a', 'b']); + QUERY PLAN +------------------------------------------------ + Append + -> Seq Scan on test_1 + Filter: (val ~~ ANY ('{a,b}'::text[])) + -> Seq Scan on test_2 + Filter: (val ~~ ANY ('{a,b}'::text[])) + -> Seq Scan on test_3 + Filter: (val ~~ ANY ('{a,b}'::text[])) + -> Seq Scan on test_4 + Filter: (val ~~ ANY ('{a,b}'::text[])) +(9 rows) + +DROP TABLE array_qual.test CASCADE; +NOTICE: drop cascades to 5 other objects +CREATE TABLE array_qual.test(a INT4 NOT NULL, b INT4); +SELECT create_range_partitions('array_qual.test', 'a', 1, 100, 10); + create_range_partitions +------------------------- + 10 +(1 row) + +INSERT INTO array_qual.test SELECT i, i FROM generate_series(1, 1000) g(i); +ANALYZE; +/* + * Test expr IN (...) + */ +/* a IN (...) - pruning should work */ +EXPLAIN (COSTS OFF) SELECT * FROM array_qual.test WHERE a IN (1, 2, 3, 4); + QUERY PLAN +---------------------------------------------- + Seq Scan on test_1 + Filter: (a = ANY ('{1,2,3,4}'::integer[])) +(2 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM array_qual.test WHERE a IN (100, 200, 300, 400); + QUERY PLAN +------------------------------------------------------------ + Append + -> Seq Scan on test_1 + Filter: (a = ANY ('{100,200,300,400}'::integer[])) + -> Seq Scan on test_2 + Filter: (a = ANY ('{100,200,300,400}'::integer[])) + -> Seq Scan on test_3 + Filter: (a = ANY ('{100,200,300,400}'::integer[])) + -> Seq Scan on test_4 + Filter: (a = ANY ('{100,200,300,400}'::integer[])) +(9 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM array_qual.test WHERE a IN (-100, 100); + QUERY PLAN +----------------------------------------------- + Seq Scan on test_1 + Filter: (a = ANY ('{-100,100}'::integer[])) +(2 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM array_qual.test WHERE a IN (-100, -200, -300); + QUERY PLAN +-------------------------- + Result + One-Time Filter: false +(2 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM array_qual.test WHERE a IN (-100, -200, -300, NULL); + QUERY PLAN +-------------------------- + Result + One-Time Filter: false +(2 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM array_qual.test WHERE a IN (NULL, NULL, NULL, NULL); + QUERY PLAN +-------------------------- + Result + One-Time Filter: false +(2 rows) + +/* b IN (...) - pruning should not work */ +EXPLAIN (COSTS OFF) SELECT * FROM array_qual.test WHERE b IN (1, 2, 3, 4); + QUERY PLAN +---------------------------------------------------- + Append + -> Seq Scan on test_1 + Filter: (b = ANY ('{1,2,3,4}'::integer[])) + -> Seq Scan on test_2 + Filter: (b = ANY ('{1,2,3,4}'::integer[])) + -> Seq Scan on test_3 + Filter: (b = ANY ('{1,2,3,4}'::integer[])) + -> Seq Scan on test_4 + Filter: (b = ANY ('{1,2,3,4}'::integer[])) + -> Seq Scan on test_5 + Filter: (b = ANY ('{1,2,3,4}'::integer[])) + -> Seq Scan on test_6 + Filter: (b = ANY ('{1,2,3,4}'::integer[])) + -> Seq Scan on test_7 + Filter: (b = ANY ('{1,2,3,4}'::integer[])) + -> Seq Scan on test_8 + Filter: (b = ANY ('{1,2,3,4}'::integer[])) + -> Seq Scan on test_9 + Filter: (b = ANY ('{1,2,3,4}'::integer[])) + -> Seq Scan on test_10 + Filter: (b = ANY ('{1,2,3,4}'::integer[])) +(21 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM array_qual.test WHERE b IN (100, 200, 300, 400); + QUERY PLAN +------------------------------------------------------------ + Append + -> Seq Scan on test_1 + Filter: (b = ANY ('{100,200,300,400}'::integer[])) + -> Seq Scan on test_2 + Filter: (b = ANY ('{100,200,300,400}'::integer[])) + -> Seq Scan on test_3 + Filter: (b = ANY ('{100,200,300,400}'::integer[])) + -> Seq Scan on test_4 + Filter: (b = ANY ('{100,200,300,400}'::integer[])) + -> Seq Scan on test_5 + Filter: (b = ANY ('{100,200,300,400}'::integer[])) + -> Seq Scan on test_6 + Filter: (b = ANY ('{100,200,300,400}'::integer[])) + -> Seq Scan on test_7 + Filter: (b = ANY ('{100,200,300,400}'::integer[])) + -> Seq Scan on test_8 + Filter: (b = ANY ('{100,200,300,400}'::integer[])) + -> Seq Scan on test_9 + Filter: (b = ANY ('{100,200,300,400}'::integer[])) + -> Seq Scan on test_10 + Filter: (b = ANY ('{100,200,300,400}'::integer[])) +(21 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM array_qual.test WHERE b IN (-100, 100); + QUERY PLAN +----------------------------------------------------- + Append + -> Seq Scan on test_1 + Filter: (b = ANY ('{-100,100}'::integer[])) + -> Seq Scan on test_2 + Filter: (b = ANY ('{-100,100}'::integer[])) + -> Seq Scan on test_3 + Filter: (b = ANY ('{-100,100}'::integer[])) + -> Seq Scan on test_4 + Filter: (b = ANY ('{-100,100}'::integer[])) + -> Seq Scan on test_5 + Filter: (b = ANY ('{-100,100}'::integer[])) + -> Seq Scan on test_6 + Filter: (b = ANY ('{-100,100}'::integer[])) + -> Seq Scan on test_7 + Filter: (b = ANY ('{-100,100}'::integer[])) + -> Seq Scan on test_8 + Filter: (b = ANY ('{-100,100}'::integer[])) + -> Seq Scan on test_9 + Filter: (b = ANY ('{-100,100}'::integer[])) + -> Seq Scan on test_10 + Filter: (b = ANY ('{-100,100}'::integer[])) +(21 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM array_qual.test WHERE b IN (-100, -200, -300); + QUERY PLAN +----------------------------------------------------------- + Append + -> Seq Scan on test_1 + Filter: (b = ANY ('{-100,-200,-300}'::integer[])) + -> Seq Scan on test_2 + Filter: (b = ANY ('{-100,-200,-300}'::integer[])) + -> Seq Scan on test_3 + Filter: (b = ANY ('{-100,-200,-300}'::integer[])) + -> Seq Scan on test_4 + Filter: (b = ANY ('{-100,-200,-300}'::integer[])) + -> Seq Scan on test_5 + Filter: (b = ANY ('{-100,-200,-300}'::integer[])) + -> Seq Scan on test_6 + Filter: (b = ANY ('{-100,-200,-300}'::integer[])) + -> Seq Scan on test_7 + Filter: (b = ANY ('{-100,-200,-300}'::integer[])) + -> Seq Scan on test_8 + Filter: (b = ANY ('{-100,-200,-300}'::integer[])) + -> Seq Scan on test_9 + Filter: (b = ANY ('{-100,-200,-300}'::integer[])) + -> Seq Scan on test_10 + Filter: (b = ANY ('{-100,-200,-300}'::integer[])) +(21 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM array_qual.test WHERE b IN (-100, -200, -300, NULL); + QUERY PLAN +---------------------------------------------------------------- + Append + -> Seq Scan on test_1 + Filter: (b = ANY ('{-100,-200,-300,NULL}'::integer[])) + -> Seq Scan on test_2 + Filter: (b = ANY ('{-100,-200,-300,NULL}'::integer[])) + -> Seq Scan on test_3 + Filter: (b = ANY ('{-100,-200,-300,NULL}'::integer[])) + -> Seq Scan on test_4 + Filter: (b = ANY ('{-100,-200,-300,NULL}'::integer[])) + -> Seq Scan on test_5 + Filter: (b = ANY ('{-100,-200,-300,NULL}'::integer[])) + -> Seq Scan on test_6 + Filter: (b = ANY ('{-100,-200,-300,NULL}'::integer[])) + -> Seq Scan on test_7 + Filter: (b = ANY ('{-100,-200,-300,NULL}'::integer[])) + -> Seq Scan on test_8 + Filter: (b = ANY ('{-100,-200,-300,NULL}'::integer[])) + -> Seq Scan on test_9 + Filter: (b = ANY ('{-100,-200,-300,NULL}'::integer[])) + -> Seq Scan on test_10 + Filter: (b = ANY ('{-100,-200,-300,NULL}'::integer[])) +(21 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM array_qual.test WHERE b IN (NULL, NULL, NULL, NULL); + QUERY PLAN +---------------------------------------------------------------- + Append + -> Seq Scan on test_1 + Filter: (b = ANY ('{NULL,NULL,NULL,NULL}'::integer[])) + -> Seq Scan on test_2 + Filter: (b = ANY ('{NULL,NULL,NULL,NULL}'::integer[])) + -> Seq Scan on test_3 + Filter: (b = ANY ('{NULL,NULL,NULL,NULL}'::integer[])) + -> Seq Scan on test_4 + Filter: (b = ANY ('{NULL,NULL,NULL,NULL}'::integer[])) + -> Seq Scan on test_5 + Filter: (b = ANY ('{NULL,NULL,NULL,NULL}'::integer[])) + -> Seq Scan on test_6 + Filter: (b = ANY ('{NULL,NULL,NULL,NULL}'::integer[])) + -> Seq Scan on test_7 + Filter: (b = ANY ('{NULL,NULL,NULL,NULL}'::integer[])) + -> Seq Scan on test_8 + Filter: (b = ANY ('{NULL,NULL,NULL,NULL}'::integer[])) + -> Seq Scan on test_9 + Filter: (b = ANY ('{NULL,NULL,NULL,NULL}'::integer[])) + -> Seq Scan on test_10 + Filter: (b = ANY ('{NULL,NULL,NULL,NULL}'::integer[])) +(21 rows) + +/* + * Test expr = ANY (...) + */ +/* a = ANY (...) - pruning should work */ +EXPLAIN (COSTS OFF) SELECT * FROM array_qual.test WHERE a = ANY (NULL); + QUERY PLAN +-------------------------- + Result + One-Time Filter: false +(2 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM array_qual.test WHERE a = ANY (array[]::int4[]); + QUERY PLAN +-------------------------- + Result + One-Time Filter: false +(2 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM array_qual.test WHERE a = ANY (array[100, 100]); + QUERY PLAN +---------------------------------------------- + Seq Scan on test_1 + Filter: (a = ANY ('{100,100}'::integer[])) +(2 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM array_qual.test WHERE a = ANY (array[100, 200, 300, 400]); + QUERY PLAN +------------------------------------------------------------ + Append + -> Seq Scan on test_1 + Filter: (a = ANY ('{100,200,300,400}'::integer[])) + -> Seq Scan on test_2 + Filter: (a = ANY ('{100,200,300,400}'::integer[])) + -> Seq Scan on test_3 + Filter: (a = ANY ('{100,200,300,400}'::integer[])) + -> Seq Scan on test_4 + Filter: (a = ANY ('{100,200,300,400}'::integer[])) +(9 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM array_qual.test WHERE a = ANY (array[array[100, 200], array[300, 400]]); + QUERY PLAN +---------------------------------------------------------------- + Append + -> Seq Scan on test_1 + Filter: (a = ANY ('{{100,200},{300,400}}'::integer[])) + -> Seq Scan on test_2 + Filter: (a = ANY ('{{100,200},{300,400}}'::integer[])) + -> Seq Scan on test_3 + Filter: (a = ANY ('{{100,200},{300,400}}'::integer[])) + -> Seq Scan on test_4 + Filter: (a = ANY ('{{100,200},{300,400}}'::integer[])) +(9 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM array_qual.test WHERE a = ANY (array[array[100, 200], array[300, 400], array[NULL, NULL]::int4[]]); + QUERY PLAN +---------------------------------------------------------------------------- + Append + -> Seq Scan on test_1 + Filter: (a = ANY ('{{100,200},{300,400},{NULL,NULL}}'::integer[])) + -> Seq Scan on test_2 + Filter: (a = ANY ('{{100,200},{300,400},{NULL,NULL}}'::integer[])) + -> Seq Scan on test_3 + Filter: (a = ANY ('{{100,200},{300,400},{NULL,NULL}}'::integer[])) + -> Seq Scan on test_4 + Filter: (a = ANY ('{{100,200},{300,400},{NULL,NULL}}'::integer[])) +(9 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM array_qual.test WHERE a = ANY (array[array[100, 200], array[300, NULL]]); + QUERY PLAN +----------------------------------------------------------------- + Append + -> Seq Scan on test_1 + Filter: (a = ANY ('{{100,200},{300,NULL}}'::integer[])) + -> Seq Scan on test_2 + Filter: (a = ANY ('{{100,200},{300,NULL}}'::integer[])) + -> Seq Scan on test_3 + Filter: (a = ANY ('{{100,200},{300,NULL}}'::integer[])) +(7 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM array_qual.test WHERE a = ANY (array[NULL, NULL]::int4[]); + QUERY PLAN +-------------------------- + Result + One-Time Filter: false +(2 rows) + +/* + * Test expr = ALL (...) + */ +/* a = ALL (...) - pruning should work */ +EXPLAIN (COSTS OFF) SELECT * FROM array_qual.test WHERE a = ALL (NULL); + QUERY PLAN +-------------------------- + Result + One-Time Filter: false +(2 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM array_qual.test WHERE a = ALL (array[]::int4[]); + QUERY PLAN +--------------------------------------------- + Append + -> Seq Scan on test_1 + Filter: (a = ALL ('{}'::integer[])) + -> Seq Scan on test_2 + Filter: (a = ALL ('{}'::integer[])) + -> Seq Scan on test_3 + Filter: (a = ALL ('{}'::integer[])) + -> Seq Scan on test_4 + Filter: (a = ALL ('{}'::integer[])) + -> Seq Scan on test_5 + Filter: (a = ALL ('{}'::integer[])) + -> Seq Scan on test_6 + Filter: (a = ALL ('{}'::integer[])) + -> Seq Scan on test_7 + Filter: (a = ALL ('{}'::integer[])) + -> Seq Scan on test_8 + Filter: (a = ALL ('{}'::integer[])) + -> Seq Scan on test_9 + Filter: (a = ALL ('{}'::integer[])) + -> Seq Scan on test_10 + Filter: (a = ALL ('{}'::integer[])) +(21 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM array_qual.test WHERE a = ALL (array[100, 100]); + QUERY PLAN +---------------------------------------------- + Seq Scan on test_1 + Filter: (a = ALL ('{100,100}'::integer[])) +(2 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM array_qual.test WHERE a = ALL (array[100, 200, 300, 400]); + QUERY PLAN +-------------------------- + Result + One-Time Filter: false +(2 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM array_qual.test WHERE a = ALL (array[array[100, 200], array[300, 400]]); + QUERY PLAN +-------------------------- + Result + One-Time Filter: false +(2 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM array_qual.test WHERE a = ALL (array[array[100, 200], array[300, 400], array[NULL, NULL]::int4[]]); + QUERY PLAN +-------------------------- + Result + One-Time Filter: false +(2 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM array_qual.test WHERE a = ALL (array[array[100, 200], array[300, NULL]]); + QUERY PLAN +-------------------------- + Result + One-Time Filter: false +(2 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM array_qual.test WHERE a = ALL (array[NULL, NULL]::int4[]); + QUERY PLAN +-------------------------- + Result + One-Time Filter: false +(2 rows) + +/* + * Test expr < ANY (...) + */ +/* a < ANY (...) - pruning should work */ +EXPLAIN (COSTS OFF) SELECT * FROM array_qual.test WHERE a < ANY (NULL); + QUERY PLAN +-------------------------- + Result + One-Time Filter: false +(2 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM array_qual.test WHERE a < ANY (array[]::int4[]); + QUERY PLAN +-------------------------- + Result + One-Time Filter: false +(2 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM array_qual.test WHERE a < ANY (array[100, 100]); + QUERY PLAN +--------------------- + Seq Scan on test_1 + Filter: (a < 100) +(2 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM array_qual.test WHERE a < ANY (array[99, 100, 101]); + QUERY PLAN +-------------------- + Seq Scan on test_1 +(1 row) + +EXPLAIN (COSTS OFF) SELECT * FROM array_qual.test WHERE a < ANY (array[500, 550]); + QUERY PLAN +--------------------------- + Append + -> Seq Scan on test_1 + -> Seq Scan on test_2 + -> Seq Scan on test_3 + -> Seq Scan on test_4 + -> Seq Scan on test_5 + -> Seq Scan on test_6 + Filter: (a < 550) +(8 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM array_qual.test WHERE a < ANY (array[100, 700]); + QUERY PLAN +--------------------------- + Append + -> Seq Scan on test_1 + -> Seq Scan on test_2 + -> Seq Scan on test_3 + -> Seq Scan on test_4 + -> Seq Scan on test_5 + -> Seq Scan on test_6 + -> Seq Scan on test_7 + Filter: (a < 700) +(9 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM array_qual.test WHERE a < ANY (array[NULL, 700]); + QUERY PLAN +----------------------------------------------------- + Append + -> Seq Scan on test_1 + -> Seq Scan on test_2 + -> Seq Scan on test_3 + -> Seq Scan on test_4 + -> Seq Scan on test_5 + -> Seq Scan on test_6 + -> Seq Scan on test_7 + Filter: (a < ANY ('{NULL,700}'::integer[])) +(9 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM array_qual.test WHERE a < ANY (array[NULL, NULL]::int4[]); + QUERY PLAN +-------------------------- + Result + One-Time Filter: false +(2 rows) + +SET pg_pathman.enable = f; +NOTICE: RuntimeAppend, RuntimeMergeAppend and PartitionFilter nodes and some other options have been disabled +SELECT count(*) FROM array_qual.test WHERE a < ANY (array[NULL, 700]); + count +------- + 699 +(1 row) + +SET pg_pathman.enable = t; +NOTICE: RuntimeAppend, RuntimeMergeAppend and PartitionFilter nodes and some other options have been enabled +SELECT count(*) FROM array_qual.test WHERE a < ANY (array[NULL, 700]); + count +------- + 699 +(1 row) + +/* + * Test expr < ALL (...) + */ +/* a < ALL (...) - pruning should work */ +EXPLAIN (COSTS OFF) SELECT * FROM array_qual.test WHERE a < ALL (NULL); + QUERY PLAN +-------------------------- + Result + One-Time Filter: false +(2 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM array_qual.test WHERE a < ALL (array[]::int4[]); + QUERY PLAN +--------------------------------------------- + Append + -> Seq Scan on test_1 + Filter: (a < ALL ('{}'::integer[])) + -> Seq Scan on test_2 + Filter: (a < ALL ('{}'::integer[])) + -> Seq Scan on test_3 + Filter: (a < ALL ('{}'::integer[])) + -> Seq Scan on test_4 + Filter: (a < ALL ('{}'::integer[])) + -> Seq Scan on test_5 + Filter: (a < ALL ('{}'::integer[])) + -> Seq Scan on test_6 + Filter: (a < ALL ('{}'::integer[])) + -> Seq Scan on test_7 + Filter: (a < ALL ('{}'::integer[])) + -> Seq Scan on test_8 + Filter: (a < ALL ('{}'::integer[])) + -> Seq Scan on test_9 + Filter: (a < ALL ('{}'::integer[])) + -> Seq Scan on test_10 + Filter: (a < ALL ('{}'::integer[])) +(21 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM array_qual.test WHERE a < ALL (array[100, 100]); + QUERY PLAN +--------------------- + Seq Scan on test_1 + Filter: (a < 100) +(2 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM array_qual.test WHERE a < ALL (array[99, 100, 101]); + QUERY PLAN +-------------------- + Seq Scan on test_1 + Filter: (a < 99) +(2 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM array_qual.test WHERE a < ALL (array[500, 550]); + QUERY PLAN +--------------------------- + Append + -> Seq Scan on test_1 + -> Seq Scan on test_2 + -> Seq Scan on test_3 + -> Seq Scan on test_4 + -> Seq Scan on test_5 + Filter: (a < 500) +(7 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM array_qual.test WHERE a < ALL (array[100, 700]); + QUERY PLAN +--------------------- + Seq Scan on test_1 + Filter: (a < 100) +(2 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM array_qual.test WHERE a < ALL (array[NULL, 700]); + QUERY PLAN +-------------------------- + Result + One-Time Filter: false +(2 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM array_qual.test WHERE a < ALL (array[NULL, NULL]::int4[]); + QUERY PLAN +-------------------------- + Result + One-Time Filter: false +(2 rows) + +SET pg_pathman.enable = f; +NOTICE: RuntimeAppend, RuntimeMergeAppend and PartitionFilter nodes and some other options have been disabled +SELECT count(*) FROM array_qual.test WHERE a < ALL (array[NULL, 700]); + count +------- + 0 +(1 row) + +SET pg_pathman.enable = t; +NOTICE: RuntimeAppend, RuntimeMergeAppend and PartitionFilter nodes and some other options have been enabled +SELECT count(*) FROM array_qual.test WHERE a < ALL (array[NULL, 700]); + count +------- + 0 +(1 row) + +/* + * Test expr > ANY (...) + */ +/* a > ANY (...) - pruning should work */ +EXPLAIN (COSTS OFF) SELECT * FROM array_qual.test WHERE a > ANY (NULL); + QUERY PLAN +-------------------------- + Result + One-Time Filter: false +(2 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM array_qual.test WHERE a > ANY (array[]::int4[]); + QUERY PLAN +-------------------------- + Result + One-Time Filter: false +(2 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM array_qual.test WHERE a > ANY (array[100, 100]); + QUERY PLAN +--------------------------- + Append + -> Seq Scan on test_1 + Filter: (a > 100) + -> Seq Scan on test_2 + -> Seq Scan on test_3 + -> Seq Scan on test_4 + -> Seq Scan on test_5 + -> Seq Scan on test_6 + -> Seq Scan on test_7 + -> Seq Scan on test_8 + -> Seq Scan on test_9 + -> Seq Scan on test_10 +(12 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM array_qual.test WHERE a > ANY (array[99, 100, 101]); + QUERY PLAN +--------------------------- + Append + -> Seq Scan on test_1 + Filter: (a > 99) + -> Seq Scan on test_2 + -> Seq Scan on test_3 + -> Seq Scan on test_4 + -> Seq Scan on test_5 + -> Seq Scan on test_6 + -> Seq Scan on test_7 + -> Seq Scan on test_8 + -> Seq Scan on test_9 + -> Seq Scan on test_10 +(12 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM array_qual.test WHERE a > ANY (array[500, 550]); + QUERY PLAN +--------------------------- + Append + -> Seq Scan on test_5 + Filter: (a > 500) + -> Seq Scan on test_6 + -> Seq Scan on test_7 + -> Seq Scan on test_8 + -> Seq Scan on test_9 + -> Seq Scan on test_10 +(8 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM array_qual.test WHERE a > ANY (array[100, 700]); + QUERY PLAN +--------------------------- + Append + -> Seq Scan on test_1 + Filter: (a > 100) + -> Seq Scan on test_2 + -> Seq Scan on test_3 + -> Seq Scan on test_4 + -> Seq Scan on test_5 + -> Seq Scan on test_6 + -> Seq Scan on test_7 + -> Seq Scan on test_8 + -> Seq Scan on test_9 + -> Seq Scan on test_10 +(12 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM array_qual.test WHERE a > ANY (array[NULL, 700]); + QUERY PLAN +----------------------------------------------------- + Append + -> Seq Scan on test_7 + Filter: (a > ANY ('{NULL,700}'::integer[])) + -> Seq Scan on test_8 + -> Seq Scan on test_9 + -> Seq Scan on test_10 +(6 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM array_qual.test WHERE a > ANY (array[NULL, NULL]::int4[]); + QUERY PLAN +-------------------------- + Result + One-Time Filter: false +(2 rows) + +SET pg_pathman.enable = f; +NOTICE: RuntimeAppend, RuntimeMergeAppend and PartitionFilter nodes and some other options have been disabled +SELECT count(*) FROM array_qual.test WHERE a > ANY (array[NULL, 700]); + count +------- + 300 +(1 row) + +SET pg_pathman.enable = t; +NOTICE: RuntimeAppend, RuntimeMergeAppend and PartitionFilter nodes and some other options have been enabled +SELECT count(*) FROM array_qual.test WHERE a > ANY (array[NULL, 700]); + count +------- + 300 +(1 row) + +/* + * Test expr > ALL (...) + */ +/* a > ALL (...) - pruning should work */ +EXPLAIN (COSTS OFF) SELECT * FROM array_qual.test WHERE a > ALL (NULL); + QUERY PLAN +-------------------------- + Result + One-Time Filter: false +(2 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM array_qual.test WHERE a > ALL (array[]::int4[]); + QUERY PLAN +--------------------------------------------- + Append + -> Seq Scan on test_1 + Filter: (a > ALL ('{}'::integer[])) + -> Seq Scan on test_2 + Filter: (a > ALL ('{}'::integer[])) + -> Seq Scan on test_3 + Filter: (a > ALL ('{}'::integer[])) + -> Seq Scan on test_4 + Filter: (a > ALL ('{}'::integer[])) + -> Seq Scan on test_5 + Filter: (a > ALL ('{}'::integer[])) + -> Seq Scan on test_6 + Filter: (a > ALL ('{}'::integer[])) + -> Seq Scan on test_7 + Filter: (a > ALL ('{}'::integer[])) + -> Seq Scan on test_8 + Filter: (a > ALL ('{}'::integer[])) + -> Seq Scan on test_9 + Filter: (a > ALL ('{}'::integer[])) + -> Seq Scan on test_10 + Filter: (a > ALL ('{}'::integer[])) +(21 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM array_qual.test WHERE a > ALL (array[100, 100]); + QUERY PLAN +--------------------------- + Append + -> Seq Scan on test_1 + Filter: (a > 100) + -> Seq Scan on test_2 + -> Seq Scan on test_3 + -> Seq Scan on test_4 + -> Seq Scan on test_5 + -> Seq Scan on test_6 + -> Seq Scan on test_7 + -> Seq Scan on test_8 + -> Seq Scan on test_9 + -> Seq Scan on test_10 +(12 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM array_qual.test WHERE a > ALL (array[99, 100, 101]); + QUERY PLAN +--------------------------- + Append + -> Seq Scan on test_2 + Filter: (a > 101) + -> Seq Scan on test_3 + -> Seq Scan on test_4 + -> Seq Scan on test_5 + -> Seq Scan on test_6 + -> Seq Scan on test_7 + -> Seq Scan on test_8 + -> Seq Scan on test_9 + -> Seq Scan on test_10 +(11 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM array_qual.test WHERE a > ALL (array[500, 550]); + QUERY PLAN +--------------------------- + Append + -> Seq Scan on test_6 + Filter: (a > 550) + -> Seq Scan on test_7 + -> Seq Scan on test_8 + -> Seq Scan on test_9 + -> Seq Scan on test_10 +(7 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM array_qual.test WHERE a > ALL (array[100, 700]); + QUERY PLAN +--------------------------- + Append + -> Seq Scan on test_7 + Filter: (a > 700) + -> Seq Scan on test_8 + -> Seq Scan on test_9 + -> Seq Scan on test_10 +(6 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM array_qual.test WHERE a > ALL (array[NULL, 700]); + QUERY PLAN +-------------------------- + Result + One-Time Filter: false +(2 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM array_qual.test WHERE a > ALL (array[NULL, NULL]::int4[]); + QUERY PLAN +-------------------------- + Result + One-Time Filter: false +(2 rows) + +SET pg_pathman.enable = f; +NOTICE: RuntimeAppend, RuntimeMergeAppend and PartitionFilter nodes and some other options have been disabled +SELECT count(*) FROM array_qual.test WHERE a > ALL (array[NULL, 700]); + count +------- + 0 +(1 row) + +SET pg_pathman.enable = t; +NOTICE: RuntimeAppend, RuntimeMergeAppend and PartitionFilter nodes and some other options have been enabled +SELECT count(*) FROM array_qual.test WHERE a > ALL (array[NULL, 700]); + count +------- + 0 +(1 row) + +/* + * Test expr > ANY (... $1 ...) + */ +PREPARE q(int4) AS SELECT * FROM array_qual.test WHERE a > ANY (array[$1, 100, 600]); +EXPLAIN (COSTS OFF) EXECUTE q(1); + QUERY PLAN +--------------------------- + Append + -> Seq Scan on test_1 + Filter: (a > 1) + -> Seq Scan on test_2 + -> Seq Scan on test_3 + -> Seq Scan on test_4 + -> Seq Scan on test_5 + -> Seq Scan on test_6 + -> Seq Scan on test_7 + -> Seq Scan on test_8 + -> Seq Scan on test_9 + -> Seq Scan on test_10 +(12 rows) + +EXPLAIN (COSTS OFF) EXECUTE q(1); + QUERY PLAN +--------------------------- + Append + -> Seq Scan on test_1 + Filter: (a > 1) + -> Seq Scan on test_2 + -> Seq Scan on test_3 + -> Seq Scan on test_4 + -> Seq Scan on test_5 + -> Seq Scan on test_6 + -> Seq Scan on test_7 + -> Seq Scan on test_8 + -> Seq Scan on test_9 + -> Seq Scan on test_10 +(12 rows) + +EXPLAIN (COSTS OFF) EXECUTE q(1); + QUERY PLAN +--------------------------- + Append + -> Seq Scan on test_1 + Filter: (a > 1) + -> Seq Scan on test_2 + -> Seq Scan on test_3 + -> Seq Scan on test_4 + -> Seq Scan on test_5 + -> Seq Scan on test_6 + -> Seq Scan on test_7 + -> Seq Scan on test_8 + -> Seq Scan on test_9 + -> Seq Scan on test_10 +(12 rows) + +EXPLAIN (COSTS OFF) EXECUTE q(1); + QUERY PLAN +--------------------------- + Append + -> Seq Scan on test_1 + Filter: (a > 1) + -> Seq Scan on test_2 + -> Seq Scan on test_3 + -> Seq Scan on test_4 + -> Seq Scan on test_5 + -> Seq Scan on test_6 + -> Seq Scan on test_7 + -> Seq Scan on test_8 + -> Seq Scan on test_9 + -> Seq Scan on test_10 +(12 rows) + +EXPLAIN (COSTS OFF) EXECUTE q(1); + QUERY PLAN +--------------------------- + Append + -> Seq Scan on test_1 + Filter: (a > 1) + -> Seq Scan on test_2 + -> Seq Scan on test_3 + -> Seq Scan on test_4 + -> Seq Scan on test_5 + -> Seq Scan on test_6 + -> Seq Scan on test_7 + -> Seq Scan on test_8 + -> Seq Scan on test_9 + -> Seq Scan on test_10 +(12 rows) + +EXPLAIN (COSTS OFF) EXECUTE q(1); + QUERY PLAN +-------------------------------------------------- + Custom Scan (RuntimeAppend) + Prune by: (test.a > ANY (ARRAY[$1, 100, 600])) + -> Seq Scan on test_1 test + Filter: (a > ANY (ARRAY[$1, 100, 600])) + -> Seq Scan on test_2 test + Filter: (a > ANY (ARRAY[$1, 100, 600])) + -> Seq Scan on test_3 test + Filter: (a > ANY (ARRAY[$1, 100, 600])) + -> Seq Scan on test_4 test + Filter: (a > ANY (ARRAY[$1, 100, 600])) + -> Seq Scan on test_5 test + Filter: (a > ANY (ARRAY[$1, 100, 600])) + -> Seq Scan on test_6 test + Filter: (a > ANY (ARRAY[$1, 100, 600])) + -> Seq Scan on test_7 test + Filter: (a > ANY (ARRAY[$1, 100, 600])) + -> Seq Scan on test_8 test + Filter: (a > ANY (ARRAY[$1, 100, 600])) + -> Seq Scan on test_9 test + Filter: (a > ANY (ARRAY[$1, 100, 600])) + -> Seq Scan on test_10 test + Filter: (a > ANY (ARRAY[$1, 100, 600])) +(22 rows) + +EXPLAIN (COSTS OFF) EXECUTE q(1); + QUERY PLAN +-------------------------------------------------- + Custom Scan (RuntimeAppend) + Prune by: (test.a > ANY (ARRAY[$1, 100, 600])) + -> Seq Scan on test_1 test + Filter: (a > ANY (ARRAY[$1, 100, 600])) + -> Seq Scan on test_2 test + Filter: (a > ANY (ARRAY[$1, 100, 600])) + -> Seq Scan on test_3 test + Filter: (a > ANY (ARRAY[$1, 100, 600])) + -> Seq Scan on test_4 test + Filter: (a > ANY (ARRAY[$1, 100, 600])) + -> Seq Scan on test_5 test + Filter: (a > ANY (ARRAY[$1, 100, 600])) + -> Seq Scan on test_6 test + Filter: (a > ANY (ARRAY[$1, 100, 600])) + -> Seq Scan on test_7 test + Filter: (a > ANY (ARRAY[$1, 100, 600])) + -> Seq Scan on test_8 test + Filter: (a > ANY (ARRAY[$1, 100, 600])) + -> Seq Scan on test_9 test + Filter: (a > ANY (ARRAY[$1, 100, 600])) + -> Seq Scan on test_10 test + Filter: (a > ANY (ARRAY[$1, 100, 600])) +(22 rows) + +DEALLOCATE q; +PREPARE q(int4) AS SELECT * FROM array_qual.test WHERE a > ANY (array[100, 600, $1]); +EXPLAIN (COSTS OFF) EXECUTE q(1); + QUERY PLAN +--------------------------- + Append + -> Seq Scan on test_1 + Filter: (a > 1) + -> Seq Scan on test_2 + -> Seq Scan on test_3 + -> Seq Scan on test_4 + -> Seq Scan on test_5 + -> Seq Scan on test_6 + -> Seq Scan on test_7 + -> Seq Scan on test_8 + -> Seq Scan on test_9 + -> Seq Scan on test_10 +(12 rows) + +EXPLAIN (COSTS OFF) EXECUTE q(1); + QUERY PLAN +--------------------------- + Append + -> Seq Scan on test_1 + Filter: (a > 1) + -> Seq Scan on test_2 + -> Seq Scan on test_3 + -> Seq Scan on test_4 + -> Seq Scan on test_5 + -> Seq Scan on test_6 + -> Seq Scan on test_7 + -> Seq Scan on test_8 + -> Seq Scan on test_9 + -> Seq Scan on test_10 +(12 rows) + +EXPLAIN (COSTS OFF) EXECUTE q(1); + QUERY PLAN +--------------------------- + Append + -> Seq Scan on test_1 + Filter: (a > 1) + -> Seq Scan on test_2 + -> Seq Scan on test_3 + -> Seq Scan on test_4 + -> Seq Scan on test_5 + -> Seq Scan on test_6 + -> Seq Scan on test_7 + -> Seq Scan on test_8 + -> Seq Scan on test_9 + -> Seq Scan on test_10 +(12 rows) + +EXPLAIN (COSTS OFF) EXECUTE q(1); + QUERY PLAN +--------------------------- + Append + -> Seq Scan on test_1 + Filter: (a > 1) + -> Seq Scan on test_2 + -> Seq Scan on test_3 + -> Seq Scan on test_4 + -> Seq Scan on test_5 + -> Seq Scan on test_6 + -> Seq Scan on test_7 + -> Seq Scan on test_8 + -> Seq Scan on test_9 + -> Seq Scan on test_10 +(12 rows) + +EXPLAIN (COSTS OFF) EXECUTE q(1); + QUERY PLAN +--------------------------- + Append + -> Seq Scan on test_1 + Filter: (a > 1) + -> Seq Scan on test_2 + -> Seq Scan on test_3 + -> Seq Scan on test_4 + -> Seq Scan on test_5 + -> Seq Scan on test_6 + -> Seq Scan on test_7 + -> Seq Scan on test_8 + -> Seq Scan on test_9 + -> Seq Scan on test_10 +(12 rows) + +EXPLAIN (COSTS OFF) EXECUTE q(1); + QUERY PLAN +-------------------------------------------------- + Custom Scan (RuntimeAppend) + Prune by: (test.a > ANY (ARRAY[100, 600, $1])) + -> Seq Scan on test_1 test + Filter: (a > ANY (ARRAY[100, 600, $1])) + -> Seq Scan on test_2 test + Filter: (a > ANY (ARRAY[100, 600, $1])) + -> Seq Scan on test_3 test + Filter: (a > ANY (ARRAY[100, 600, $1])) + -> Seq Scan on test_4 test + Filter: (a > ANY (ARRAY[100, 600, $1])) + -> Seq Scan on test_5 test + Filter: (a > ANY (ARRAY[100, 600, $1])) + -> Seq Scan on test_6 test + Filter: (a > ANY (ARRAY[100, 600, $1])) + -> Seq Scan on test_7 test + Filter: (a > ANY (ARRAY[100, 600, $1])) + -> Seq Scan on test_8 test + Filter: (a > ANY (ARRAY[100, 600, $1])) + -> Seq Scan on test_9 test + Filter: (a > ANY (ARRAY[100, 600, $1])) + -> Seq Scan on test_10 test + Filter: (a > ANY (ARRAY[100, 600, $1])) +(22 rows) + +EXPLAIN (COSTS OFF) EXECUTE q(1); + QUERY PLAN +-------------------------------------------------- + Custom Scan (RuntimeAppend) + Prune by: (test.a > ANY (ARRAY[100, 600, $1])) + -> Seq Scan on test_1 test + Filter: (a > ANY (ARRAY[100, 600, $1])) + -> Seq Scan on test_2 test + Filter: (a > ANY (ARRAY[100, 600, $1])) + -> Seq Scan on test_3 test + Filter: (a > ANY (ARRAY[100, 600, $1])) + -> Seq Scan on test_4 test + Filter: (a > ANY (ARRAY[100, 600, $1])) + -> Seq Scan on test_5 test + Filter: (a > ANY (ARRAY[100, 600, $1])) + -> Seq Scan on test_6 test + Filter: (a > ANY (ARRAY[100, 600, $1])) + -> Seq Scan on test_7 test + Filter: (a > ANY (ARRAY[100, 600, $1])) + -> Seq Scan on test_8 test + Filter: (a > ANY (ARRAY[100, 600, $1])) + -> Seq Scan on test_9 test + Filter: (a > ANY (ARRAY[100, 600, $1])) + -> Seq Scan on test_10 test + Filter: (a > ANY (ARRAY[100, 600, $1])) +(22 rows) + +DEALLOCATE q; +PREPARE q(int4) AS SELECT * FROM array_qual.test WHERE a > ANY (array[NULL, $1]); +EXPLAIN (COSTS OFF) EXECUTE q(500); + QUERY PLAN +----------------------------------------------------- + Append + -> Seq Scan on test_5 + Filter: (a > ANY ('{NULL,500}'::integer[])) + -> Seq Scan on test_6 + -> Seq Scan on test_7 + -> Seq Scan on test_8 + -> Seq Scan on test_9 + -> Seq Scan on test_10 +(8 rows) + +EXPLAIN (COSTS OFF) EXECUTE q(500); + QUERY PLAN +----------------------------------------------------- + Append + -> Seq Scan on test_5 + Filter: (a > ANY ('{NULL,500}'::integer[])) + -> Seq Scan on test_6 + -> Seq Scan on test_7 + -> Seq Scan on test_8 + -> Seq Scan on test_9 + -> Seq Scan on test_10 +(8 rows) + +EXPLAIN (COSTS OFF) EXECUTE q(500); + QUERY PLAN +----------------------------------------------------- + Append + -> Seq Scan on test_5 + Filter: (a > ANY ('{NULL,500}'::integer[])) + -> Seq Scan on test_6 + -> Seq Scan on test_7 + -> Seq Scan on test_8 + -> Seq Scan on test_9 + -> Seq Scan on test_10 +(8 rows) + +EXPLAIN (COSTS OFF) EXECUTE q(500); + QUERY PLAN +----------------------------------------------------- + Append + -> Seq Scan on test_5 + Filter: (a > ANY ('{NULL,500}'::integer[])) + -> Seq Scan on test_6 + -> Seq Scan on test_7 + -> Seq Scan on test_8 + -> Seq Scan on test_9 + -> Seq Scan on test_10 +(8 rows) + +EXPLAIN (COSTS OFF) EXECUTE q(500); + QUERY PLAN +----------------------------------------------------- + Append + -> Seq Scan on test_5 + Filter: (a > ANY ('{NULL,500}'::integer[])) + -> Seq Scan on test_6 + -> Seq Scan on test_7 + -> Seq Scan on test_8 + -> Seq Scan on test_9 + -> Seq Scan on test_10 +(8 rows) + +EXPLAIN (COSTS OFF) EXECUTE q(500); + QUERY PLAN +------------------------------------------------------- + Custom Scan (RuntimeAppend) + Prune by: (test.a > ANY (ARRAY[NULL::integer, $1])) + -> Seq Scan on test_1 test + Filter: (a > ANY (ARRAY[NULL::integer, $1])) + -> Seq Scan on test_2 test + Filter: (a > ANY (ARRAY[NULL::integer, $1])) + -> Seq Scan on test_3 test + Filter: (a > ANY (ARRAY[NULL::integer, $1])) + -> Seq Scan on test_4 test + Filter: (a > ANY (ARRAY[NULL::integer, $1])) + -> Seq Scan on test_5 test + Filter: (a > ANY (ARRAY[NULL::integer, $1])) + -> Seq Scan on test_6 test + Filter: (a > ANY (ARRAY[NULL::integer, $1])) + -> Seq Scan on test_7 test + Filter: (a > ANY (ARRAY[NULL::integer, $1])) + -> Seq Scan on test_8 test + Filter: (a > ANY (ARRAY[NULL::integer, $1])) + -> Seq Scan on test_9 test + Filter: (a > ANY (ARRAY[NULL::integer, $1])) + -> Seq Scan on test_10 test + Filter: (a > ANY (ARRAY[NULL::integer, $1])) +(22 rows) + +EXPLAIN (COSTS OFF) EXECUTE q(500); + QUERY PLAN +------------------------------------------------------- + Custom Scan (RuntimeAppend) + Prune by: (test.a > ANY (ARRAY[NULL::integer, $1])) + -> Seq Scan on test_1 test + Filter: (a > ANY (ARRAY[NULL::integer, $1])) + -> Seq Scan on test_2 test + Filter: (a > ANY (ARRAY[NULL::integer, $1])) + -> Seq Scan on test_3 test + Filter: (a > ANY (ARRAY[NULL::integer, $1])) + -> Seq Scan on test_4 test + Filter: (a > ANY (ARRAY[NULL::integer, $1])) + -> Seq Scan on test_5 test + Filter: (a > ANY (ARRAY[NULL::integer, $1])) + -> Seq Scan on test_6 test + Filter: (a > ANY (ARRAY[NULL::integer, $1])) + -> Seq Scan on test_7 test + Filter: (a > ANY (ARRAY[NULL::integer, $1])) + -> Seq Scan on test_8 test + Filter: (a > ANY (ARRAY[NULL::integer, $1])) + -> Seq Scan on test_9 test + Filter: (a > ANY (ARRAY[NULL::integer, $1])) + -> Seq Scan on test_10 test + Filter: (a > ANY (ARRAY[NULL::integer, $1])) +(22 rows) + +EXECUTE q(NULL); + a | b +---+--- +(0 rows) + +DEALLOCATE q; +/* + * Test expr > ALL (... $1 ...) + */ +PREPARE q(int4) AS SELECT * FROM array_qual.test WHERE a > ALL (array[$1, 1000000, 600]); +EXPLAIN (COSTS OFF) EXECUTE q(1); + QUERY PLAN +-------------------------- + Result + One-Time Filter: false +(2 rows) + +EXPLAIN (COSTS OFF) EXECUTE q(1); + QUERY PLAN +-------------------------- + Result + One-Time Filter: false +(2 rows) + +EXPLAIN (COSTS OFF) EXECUTE q(1); + QUERY PLAN +-------------------------- + Result + One-Time Filter: false +(2 rows) + +EXPLAIN (COSTS OFF) EXECUTE q(1); + QUERY PLAN +-------------------------- + Result + One-Time Filter: false +(2 rows) + +EXPLAIN (COSTS OFF) EXECUTE q(1); + QUERY PLAN +-------------------------- + Result + One-Time Filter: false +(2 rows) + +EXPLAIN (COSTS OFF) EXECUTE q(1); + QUERY PLAN +-------------------------- + Result + One-Time Filter: false +(2 rows) + +EXPLAIN (COSTS OFF) EXECUTE q(1); + QUERY PLAN +-------------------------- + Result + One-Time Filter: false +(2 rows) + +DEALLOCATE q; +PREPARE q(int4) AS SELECT * FROM array_qual.test WHERE a > ALL (array[$1, NULL, 600]); +EXPLAIN (COSTS OFF) EXECUTE q(1); + QUERY PLAN +-------------------------- + Result + One-Time Filter: false +(2 rows) + +EXPLAIN (COSTS OFF) EXECUTE q(1); + QUERY PLAN +-------------------------- + Result + One-Time Filter: false +(2 rows) + +EXPLAIN (COSTS OFF) EXECUTE q(1); + QUERY PLAN +-------------------------- + Result + One-Time Filter: false +(2 rows) + +EXPLAIN (COSTS OFF) EXECUTE q(1); + QUERY PLAN +-------------------------- + Result + One-Time Filter: false +(2 rows) + +EXPLAIN (COSTS OFF) EXECUTE q(1); + QUERY PLAN +-------------------------- + Result + One-Time Filter: false +(2 rows) + +EXPLAIN (COSTS OFF) EXECUTE q(1); + QUERY PLAN +-------------------------- + Result + One-Time Filter: false +(2 rows) + +EXPLAIN (COSTS OFF) EXECUTE q(1); + QUERY PLAN +-------------------------- + Result + One-Time Filter: false +(2 rows) + +DEALLOCATE q; +PREPARE q(int4) AS SELECT * FROM array_qual.test WHERE a > ALL (array[NULL, $1, NULL]); +EXPLAIN (COSTS OFF) EXECUTE q(500); + QUERY PLAN +-------------------------- + Result + One-Time Filter: false +(2 rows) + +EXPLAIN (COSTS OFF) EXECUTE q(500); + QUERY PLAN +-------------------------- + Result + One-Time Filter: false +(2 rows) + +EXPLAIN (COSTS OFF) EXECUTE q(500); + QUERY PLAN +-------------------------- + Result + One-Time Filter: false +(2 rows) + +EXPLAIN (COSTS OFF) EXECUTE q(500); + QUERY PLAN +-------------------------- + Result + One-Time Filter: false +(2 rows) + +EXPLAIN (COSTS OFF) EXECUTE q(500); + QUERY PLAN +-------------------------- + Result + One-Time Filter: false +(2 rows) + +EXPLAIN (COSTS OFF) EXECUTE q(500); + QUERY PLAN +-------------------------- + Result + One-Time Filter: false +(2 rows) + +EXPLAIN (COSTS OFF) EXECUTE q(500); + QUERY PLAN +-------------------------- + Result + One-Time Filter: false +(2 rows) + +EXECUTE q(NULL); + a | b +---+--- +(0 rows) + +DEALLOCATE q; +PREPARE q(int4) AS SELECT * FROM array_qual.test WHERE a > ALL (array[$1, 100, 600]); +EXPLAIN (COSTS OFF) EXECUTE q(1); + QUERY PLAN +--------------------------- + Append + -> Seq Scan on test_6 + Filter: (a > 600) + -> Seq Scan on test_7 + -> Seq Scan on test_8 + -> Seq Scan on test_9 + -> Seq Scan on test_10 +(7 rows) + +EXPLAIN (COSTS OFF) EXECUTE q(1); + QUERY PLAN +--------------------------- + Append + -> Seq Scan on test_6 + Filter: (a > 600) + -> Seq Scan on test_7 + -> Seq Scan on test_8 + -> Seq Scan on test_9 + -> Seq Scan on test_10 +(7 rows) + +EXPLAIN (COSTS OFF) EXECUTE q(1); + QUERY PLAN +--------------------------- + Append + -> Seq Scan on test_6 + Filter: (a > 600) + -> Seq Scan on test_7 + -> Seq Scan on test_8 + -> Seq Scan on test_9 + -> Seq Scan on test_10 +(7 rows) + +EXPLAIN (COSTS OFF) EXECUTE q(1); + QUERY PLAN +--------------------------- + Append + -> Seq Scan on test_6 + Filter: (a > 600) + -> Seq Scan on test_7 + -> Seq Scan on test_8 + -> Seq Scan on test_9 + -> Seq Scan on test_10 +(7 rows) + +EXPLAIN (COSTS OFF) EXECUTE q(1); + QUERY PLAN +--------------------------- + Append + -> Seq Scan on test_6 + Filter: (a > 600) + -> Seq Scan on test_7 + -> Seq Scan on test_8 + -> Seq Scan on test_9 + -> Seq Scan on test_10 +(7 rows) + +EXPLAIN (COSTS OFF) EXECUTE q(1); + QUERY PLAN +-------------------------------------------------- + Custom Scan (RuntimeAppend) + Prune by: (test.a > ALL (ARRAY[$1, 100, 600])) + -> Seq Scan on test_6 test + Filter: (a > ALL (ARRAY[$1, 100, 600])) + -> Seq Scan on test_7 test + Filter: (a > ALL (ARRAY[$1, 100, 600])) + -> Seq Scan on test_8 test + Filter: (a > ALL (ARRAY[$1, 100, 600])) + -> Seq Scan on test_9 test + Filter: (a > ALL (ARRAY[$1, 100, 600])) + -> Seq Scan on test_10 test + Filter: (a > ALL (ARRAY[$1, 100, 600])) +(12 rows) + +EXPLAIN (COSTS OFF) EXECUTE q(1); + QUERY PLAN +-------------------------------------------------- + Custom Scan (RuntimeAppend) + Prune by: (test.a > ALL (ARRAY[$1, 100, 600])) + -> Seq Scan on test_6 test + Filter: (a > ALL (ARRAY[$1, 100, 600])) + -> Seq Scan on test_7 test + Filter: (a > ALL (ARRAY[$1, 100, 600])) + -> Seq Scan on test_8 test + Filter: (a > ALL (ARRAY[$1, 100, 600])) + -> Seq Scan on test_9 test + Filter: (a > ALL (ARRAY[$1, 100, 600])) + -> Seq Scan on test_10 test + Filter: (a > ALL (ARRAY[$1, 100, 600])) +(12 rows) + +DEALLOCATE q; +PREPARE q(int4) AS SELECT * FROM array_qual.test WHERE a > ALL (array[100, $1, 600]); +EXPLAIN (COSTS OFF) EXECUTE q(1); + QUERY PLAN +--------------------------- + Append + -> Seq Scan on test_6 + Filter: (a > 600) + -> Seq Scan on test_7 + -> Seq Scan on test_8 + -> Seq Scan on test_9 + -> Seq Scan on test_10 +(7 rows) + +EXPLAIN (COSTS OFF) EXECUTE q(1); + QUERY PLAN +--------------------------- + Append + -> Seq Scan on test_6 + Filter: (a > 600) + -> Seq Scan on test_7 + -> Seq Scan on test_8 + -> Seq Scan on test_9 + -> Seq Scan on test_10 +(7 rows) + +EXPLAIN (COSTS OFF) EXECUTE q(1); + QUERY PLAN +--------------------------- + Append + -> Seq Scan on test_6 + Filter: (a > 600) + -> Seq Scan on test_7 + -> Seq Scan on test_8 + -> Seq Scan on test_9 + -> Seq Scan on test_10 +(7 rows) + +EXPLAIN (COSTS OFF) EXECUTE q(1); + QUERY PLAN +--------------------------- + Append + -> Seq Scan on test_6 + Filter: (a > 600) + -> Seq Scan on test_7 + -> Seq Scan on test_8 + -> Seq Scan on test_9 + -> Seq Scan on test_10 +(7 rows) + +EXPLAIN (COSTS OFF) EXECUTE q(1); + QUERY PLAN +--------------------------- + Append + -> Seq Scan on test_6 + Filter: (a > 600) + -> Seq Scan on test_7 + -> Seq Scan on test_8 + -> Seq Scan on test_9 + -> Seq Scan on test_10 +(7 rows) + +EXPLAIN (COSTS OFF) EXECUTE q(1); + QUERY PLAN +-------------------------------------------------- + Custom Scan (RuntimeAppend) + Prune by: (test.a > ALL (ARRAY[100, $1, 600])) + -> Seq Scan on test_6 test + Filter: (a > ALL (ARRAY[100, $1, 600])) + -> Seq Scan on test_7 test + Filter: (a > ALL (ARRAY[100, $1, 600])) + -> Seq Scan on test_8 test + Filter: (a > ALL (ARRAY[100, $1, 600])) + -> Seq Scan on test_9 test + Filter: (a > ALL (ARRAY[100, $1, 600])) + -> Seq Scan on test_10 test + Filter: (a > ALL (ARRAY[100, $1, 600])) +(12 rows) + +EXPLAIN (COSTS OFF) EXECUTE q(1); + QUERY PLAN +-------------------------------------------------- + Custom Scan (RuntimeAppend) + Prune by: (test.a > ALL (ARRAY[100, $1, 600])) + -> Seq Scan on test_6 test + Filter: (a > ALL (ARRAY[100, $1, 600])) + -> Seq Scan on test_7 test + Filter: (a > ALL (ARRAY[100, $1, 600])) + -> Seq Scan on test_8 test + Filter: (a > ALL (ARRAY[100, $1, 600])) + -> Seq Scan on test_9 test + Filter: (a > ALL (ARRAY[100, $1, 600])) + -> Seq Scan on test_10 test + Filter: (a > ALL (ARRAY[100, $1, 600])) +(12 rows) + +DEALLOCATE q; +PREPARE q(int4) AS SELECT * FROM array_qual.test WHERE a > ALL (array[100, 600, $1]); +EXPLAIN (COSTS OFF) EXECUTE q(1); + QUERY PLAN +--------------------------- + Append + -> Seq Scan on test_6 + Filter: (a > 600) + -> Seq Scan on test_7 + -> Seq Scan on test_8 + -> Seq Scan on test_9 + -> Seq Scan on test_10 +(7 rows) + +EXPLAIN (COSTS OFF) EXECUTE q(1); + QUERY PLAN +--------------------------- + Append + -> Seq Scan on test_6 + Filter: (a > 600) + -> Seq Scan on test_7 + -> Seq Scan on test_8 + -> Seq Scan on test_9 + -> Seq Scan on test_10 +(7 rows) + +EXPLAIN (COSTS OFF) EXECUTE q(1); + QUERY PLAN +--------------------------- + Append + -> Seq Scan on test_6 + Filter: (a > 600) + -> Seq Scan on test_7 + -> Seq Scan on test_8 + -> Seq Scan on test_9 + -> Seq Scan on test_10 +(7 rows) + +EXPLAIN (COSTS OFF) EXECUTE q(1); + QUERY PLAN +--------------------------- + Append + -> Seq Scan on test_6 + Filter: (a > 600) + -> Seq Scan on test_7 + -> Seq Scan on test_8 + -> Seq Scan on test_9 + -> Seq Scan on test_10 +(7 rows) + +EXPLAIN (COSTS OFF) EXECUTE q(1); + QUERY PLAN +--------------------------- + Append + -> Seq Scan on test_6 + Filter: (a > 600) + -> Seq Scan on test_7 + -> Seq Scan on test_8 + -> Seq Scan on test_9 + -> Seq Scan on test_10 +(7 rows) + +EXPLAIN (COSTS OFF) EXECUTE q(1); + QUERY PLAN +-------------------------------------------------- + Custom Scan (RuntimeAppend) + Prune by: (test.a > ALL (ARRAY[100, 600, $1])) + -> Seq Scan on test_6 test + Filter: (a > ALL (ARRAY[100, 600, $1])) + -> Seq Scan on test_7 test + Filter: (a > ALL (ARRAY[100, 600, $1])) + -> Seq Scan on test_8 test + Filter: (a > ALL (ARRAY[100, 600, $1])) + -> Seq Scan on test_9 test + Filter: (a > ALL (ARRAY[100, 600, $1])) + -> Seq Scan on test_10 test + Filter: (a > ALL (ARRAY[100, 600, $1])) +(12 rows) + +EXPLAIN (COSTS OFF) EXECUTE q(1); + QUERY PLAN +-------------------------------------------------- + Custom Scan (RuntimeAppend) + Prune by: (test.a > ALL (ARRAY[100, 600, $1])) + -> Seq Scan on test_6 test + Filter: (a > ALL (ARRAY[100, 600, $1])) + -> Seq Scan on test_7 test + Filter: (a > ALL (ARRAY[100, 600, $1])) + -> Seq Scan on test_8 test + Filter: (a > ALL (ARRAY[100, 600, $1])) + -> Seq Scan on test_9 test + Filter: (a > ALL (ARRAY[100, 600, $1])) + -> Seq Scan on test_10 test + Filter: (a > ALL (ARRAY[100, 600, $1])) +(12 rows) + +DEALLOCATE q; +PREPARE q(int4) AS SELECT * FROM array_qual.test WHERE a > ALL (array[array[100, NULL], array[1, $1]]); +EXPLAIN (COSTS OFF) EXECUTE q(1); + QUERY PLAN +-------------------------- + Result + One-Time Filter: false +(2 rows) + +EXPLAIN (COSTS OFF) EXECUTE q(1); + QUERY PLAN +-------------------------- + Result + One-Time Filter: false +(2 rows) + +EXPLAIN (COSTS OFF) EXECUTE q(1); + QUERY PLAN +-------------------------- + Result + One-Time Filter: false +(2 rows) + +EXPLAIN (COSTS OFF) EXECUTE q(1); + QUERY PLAN +-------------------------- + Result + One-Time Filter: false +(2 rows) + +EXPLAIN (COSTS OFF) EXECUTE q(1); + QUERY PLAN +-------------------------- + Result + One-Time Filter: false +(2 rows) + +EXPLAIN (COSTS OFF) EXECUTE q(1); + QUERY PLAN +-------------------------- + Result + One-Time Filter: false +(2 rows) + +EXPLAIN (COSTS OFF) EXECUTE q(1); + QUERY PLAN +-------------------------- + Result + One-Time Filter: false +(2 rows) + +DEALLOCATE q; +PREPARE q(int4) AS SELECT * FROM array_qual.test WHERE a > ALL (array[array[100, 600], array[1, $1]]); +EXPLAIN (COSTS OFF) EXECUTE q(1); + QUERY PLAN +--------------------------- + Append + -> Seq Scan on test_6 + Filter: (a > 600) + -> Seq Scan on test_7 + -> Seq Scan on test_8 + -> Seq Scan on test_9 + -> Seq Scan on test_10 +(7 rows) + +EXPLAIN (COSTS OFF) EXECUTE q(1); + QUERY PLAN +--------------------------- + Append + -> Seq Scan on test_6 + Filter: (a > 600) + -> Seq Scan on test_7 + -> Seq Scan on test_8 + -> Seq Scan on test_9 + -> Seq Scan on test_10 +(7 rows) + +EXPLAIN (COSTS OFF) EXECUTE q(1); + QUERY PLAN +--------------------------- + Append + -> Seq Scan on test_6 + Filter: (a > 600) + -> Seq Scan on test_7 + -> Seq Scan on test_8 + -> Seq Scan on test_9 + -> Seq Scan on test_10 +(7 rows) + +EXPLAIN (COSTS OFF) EXECUTE q(1); + QUERY PLAN +--------------------------- + Append + -> Seq Scan on test_6 + Filter: (a > 600) + -> Seq Scan on test_7 + -> Seq Scan on test_8 + -> Seq Scan on test_9 + -> Seq Scan on test_10 +(7 rows) + +EXPLAIN (COSTS OFF) EXECUTE q(1); + QUERY PLAN +--------------------------- + Append + -> Seq Scan on test_6 + Filter: (a > 600) + -> Seq Scan on test_7 + -> Seq Scan on test_8 + -> Seq Scan on test_9 + -> Seq Scan on test_10 +(7 rows) + +EXPLAIN (COSTS OFF) EXECUTE q(1); + QUERY PLAN +-------------------------------------------------------------------------- + Custom Scan (RuntimeAppend) + Prune by: (test.a > ALL (ARRAY['{100,600}'::integer[], ARRAY[1, $1]])) + -> Seq Scan on test_6 test + Filter: (a > ALL (ARRAY['{100,600}'::integer[], ARRAY[1, $1]])) + -> Seq Scan on test_7 test + Filter: (a > ALL (ARRAY['{100,600}'::integer[], ARRAY[1, $1]])) + -> Seq Scan on test_8 test + Filter: (a > ALL (ARRAY['{100,600}'::integer[], ARRAY[1, $1]])) + -> Seq Scan on test_9 test + Filter: (a > ALL (ARRAY['{100,600}'::integer[], ARRAY[1, $1]])) + -> Seq Scan on test_10 test + Filter: (a > ALL (ARRAY['{100,600}'::integer[], ARRAY[1, $1]])) +(12 rows) + +EXPLAIN (COSTS OFF) EXECUTE q(1); + QUERY PLAN +-------------------------------------------------------------------------- + Custom Scan (RuntimeAppend) + Prune by: (test.a > ALL (ARRAY['{100,600}'::integer[], ARRAY[1, $1]])) + -> Seq Scan on test_6 test + Filter: (a > ALL (ARRAY['{100,600}'::integer[], ARRAY[1, $1]])) + -> Seq Scan on test_7 test + Filter: (a > ALL (ARRAY['{100,600}'::integer[], ARRAY[1, $1]])) + -> Seq Scan on test_8 test + Filter: (a > ALL (ARRAY['{100,600}'::integer[], ARRAY[1, $1]])) + -> Seq Scan on test_9 test + Filter: (a > ALL (ARRAY['{100,600}'::integer[], ARRAY[1, $1]])) + -> Seq Scan on test_10 test + Filter: (a > ALL (ARRAY['{100,600}'::integer[], ARRAY[1, $1]])) +(12 rows) + +EXPLAIN (COSTS OFF) EXECUTE q(999); + QUERY PLAN +-------------------------------------------------------------------------- + Custom Scan (RuntimeAppend) + Prune by: (test.a > ALL (ARRAY['{100,600}'::integer[], ARRAY[1, $1]])) + -> Seq Scan on test_6 test + Filter: (a > ALL (ARRAY['{100,600}'::integer[], ARRAY[1, $1]])) + -> Seq Scan on test_7 test + Filter: (a > ALL (ARRAY['{100,600}'::integer[], ARRAY[1, $1]])) + -> Seq Scan on test_8 test + Filter: (a > ALL (ARRAY['{100,600}'::integer[], ARRAY[1, $1]])) + -> Seq Scan on test_9 test + Filter: (a > ALL (ARRAY['{100,600}'::integer[], ARRAY[1, $1]])) + -> Seq Scan on test_10 test + Filter: (a > ALL (ARRAY['{100,600}'::integer[], ARRAY[1, $1]])) +(12 rows) + +/* check query plan: EXECUTE q(999) */ +DO language plpgsql +$$ + DECLARE + query text; + result jsonb; + num int; + + BEGIN + query := 'EXECUTE q(999)'; + + EXECUTE format('EXPLAIN (ANALYZE, COSTS OFF, TIMING OFF, FORMAT JSON) %s', query) + INTO result; + + SELECT count(*) FROM jsonb_array_elements_text(result->0->'Plan'->'Plans') INTO num; + + RAISE notice '%: number of partitions: %', query, num; + END +$$; +NOTICE: EXECUTE q(999): number of partitions: 5 +DEALLOCATE q; +PREPARE q(int4[]) AS SELECT * FROM array_qual.test WHERE a > ALL (array[array[100, 600], $1]); +EXPLAIN (COSTS OFF) EXECUTE q('{1, 1}'); + QUERY PLAN +--------------------------- + Append + -> Seq Scan on test_6 + Filter: (a > 600) + -> Seq Scan on test_7 + -> Seq Scan on test_8 + -> Seq Scan on test_9 + -> Seq Scan on test_10 +(7 rows) + +EXPLAIN (COSTS OFF) EXECUTE q('{1, 1}'); + QUERY PLAN +--------------------------- + Append + -> Seq Scan on test_6 + Filter: (a > 600) + -> Seq Scan on test_7 + -> Seq Scan on test_8 + -> Seq Scan on test_9 + -> Seq Scan on test_10 +(7 rows) + +EXPLAIN (COSTS OFF) EXECUTE q('{1, 1}'); + QUERY PLAN +--------------------------- + Append + -> Seq Scan on test_6 + Filter: (a > 600) + -> Seq Scan on test_7 + -> Seq Scan on test_8 + -> Seq Scan on test_9 + -> Seq Scan on test_10 +(7 rows) + +EXPLAIN (COSTS OFF) EXECUTE q('{1, 1}'); + QUERY PLAN +--------------------------- + Append + -> Seq Scan on test_6 + Filter: (a > 600) + -> Seq Scan on test_7 + -> Seq Scan on test_8 + -> Seq Scan on test_9 + -> Seq Scan on test_10 +(7 rows) + +EXPLAIN (COSTS OFF) EXECUTE q('{1, 1}'); + QUERY PLAN +--------------------------- + Append + -> Seq Scan on test_6 + Filter: (a > 600) + -> Seq Scan on test_7 + -> Seq Scan on test_8 + -> Seq Scan on test_9 + -> Seq Scan on test_10 +(7 rows) + +EXPLAIN (COSTS OFF) EXECUTE q('{1, 1}'); + QUERY PLAN +---------------------------------------------------------------- + Custom Scan (RuntimeAppend) + Prune by: (test.a > ALL (ARRAY['{100,600}'::integer[], $1])) + -> Seq Scan on test_6 test + Filter: (a > ALL (ARRAY['{100,600}'::integer[], $1])) + -> Seq Scan on test_7 test + Filter: (a > ALL (ARRAY['{100,600}'::integer[], $1])) + -> Seq Scan on test_8 test + Filter: (a > ALL (ARRAY['{100,600}'::integer[], $1])) + -> Seq Scan on test_9 test + Filter: (a > ALL (ARRAY['{100,600}'::integer[], $1])) + -> Seq Scan on test_10 test + Filter: (a > ALL (ARRAY['{100,600}'::integer[], $1])) +(12 rows) + +EXPLAIN (COSTS OFF) EXECUTE q('{1, 1}'); + QUERY PLAN +---------------------------------------------------------------- + Custom Scan (RuntimeAppend) + Prune by: (test.a > ALL (ARRAY['{100,600}'::integer[], $1])) + -> Seq Scan on test_6 test + Filter: (a > ALL (ARRAY['{100,600}'::integer[], $1])) + -> Seq Scan on test_7 test + Filter: (a > ALL (ARRAY['{100,600}'::integer[], $1])) + -> Seq Scan on test_8 test + Filter: (a > ALL (ARRAY['{100,600}'::integer[], $1])) + -> Seq Scan on test_9 test + Filter: (a > ALL (ARRAY['{100,600}'::integer[], $1])) + -> Seq Scan on test_10 test + Filter: (a > ALL (ARRAY['{100,600}'::integer[], $1])) +(12 rows) + +EXPLAIN (COSTS OFF) EXECUTE q('{1, 999}'); + QUERY PLAN +---------------------------------------------------------------- + Custom Scan (RuntimeAppend) + Prune by: (test.a > ALL (ARRAY['{100,600}'::integer[], $1])) + -> Seq Scan on test_6 test + Filter: (a > ALL (ARRAY['{100,600}'::integer[], $1])) + -> Seq Scan on test_7 test + Filter: (a > ALL (ARRAY['{100,600}'::integer[], $1])) + -> Seq Scan on test_8 test + Filter: (a > ALL (ARRAY['{100,600}'::integer[], $1])) + -> Seq Scan on test_9 test + Filter: (a > ALL (ARRAY['{100,600}'::integer[], $1])) + -> Seq Scan on test_10 test + Filter: (a > ALL (ARRAY['{100,600}'::integer[], $1])) +(12 rows) + +/* check query plan: EXECUTE q('{1, 999}') */ +DO language plpgsql +$$ + DECLARE + query text; + result jsonb; + num int; + + BEGIN + query := 'EXECUTE q(''{1, 999}'')'; + + EXECUTE format('EXPLAIN (ANALYZE, COSTS OFF, TIMING OFF, FORMAT JSON) %s', query) + INTO result; + + SELECT count(*) FROM jsonb_array_elements_text(result->0->'Plan'->'Plans') INTO num; + + RAISE notice '%: number of partitions: %', query, num; + END +$$; +NOTICE: EXECUTE q('{1, 999}'): number of partitions: 1 +DEALLOCATE q; +PREPARE q(int4) AS SELECT * FROM array_qual.test WHERE a > ALL (array[$1, 898]); +EXPLAIN (COSTS OFF) EXECUTE q(1); + QUERY PLAN +--------------------------- + Append + -> Seq Scan on test_9 + Filter: (a > 898) + -> Seq Scan on test_10 +(4 rows) + +EXPLAIN (COSTS OFF) EXECUTE q(1); + QUERY PLAN +--------------------------- + Append + -> Seq Scan on test_9 + Filter: (a > 898) + -> Seq Scan on test_10 +(4 rows) + +EXPLAIN (COSTS OFF) EXECUTE q(1); + QUERY PLAN +--------------------------- + Append + -> Seq Scan on test_9 + Filter: (a > 898) + -> Seq Scan on test_10 +(4 rows) + +EXPLAIN (COSTS OFF) EXECUTE q(1); + QUERY PLAN +--------------------------- + Append + -> Seq Scan on test_9 + Filter: (a > 898) + -> Seq Scan on test_10 +(4 rows) + +EXPLAIN (COSTS OFF) EXECUTE q(1); + QUERY PLAN +--------------------------- + Append + -> Seq Scan on test_9 + Filter: (a > 898) + -> Seq Scan on test_10 +(4 rows) + +EXPLAIN (COSTS OFF) EXECUTE q(1); + QUERY PLAN +--------------------------------------------- + Custom Scan (RuntimeAppend) + Prune by: (test.a > ALL (ARRAY[$1, 898])) + -> Seq Scan on test_9 test + Filter: (a > ALL (ARRAY[$1, 898])) + -> Seq Scan on test_10 test + Filter: (a > ALL (ARRAY[$1, 898])) +(6 rows) + +EXPLAIN (COSTS OFF) EXECUTE q(1); + QUERY PLAN +--------------------------------------------- + Custom Scan (RuntimeAppend) + Prune by: (test.a > ALL (ARRAY[$1, 898])) + -> Seq Scan on test_9 test + Filter: (a > ALL (ARRAY[$1, 898])) + -> Seq Scan on test_10 test + Filter: (a > ALL (ARRAY[$1, 898])) +(6 rows) + +EXPLAIN (COSTS OFF) EXECUTE q(900); /* check quals optimization */ + QUERY PLAN +--------------------------------------------- + Custom Scan (RuntimeAppend) + Prune by: (test.a > ALL (ARRAY[$1, 898])) + -> Seq Scan on test_9 test + Filter: (a > ALL (ARRAY[$1, 898])) + -> Seq Scan on test_10 test + Filter: (a > ALL (ARRAY[$1, 898])) +(6 rows) + +EXECUTE q(1000); + a | b +---+--- +(0 rows) + +/* check query plan: EXECUTE q(999) */ +DO language plpgsql +$$ + DECLARE + query text; + result jsonb; + num int; + + BEGIN + query := 'EXECUTE q(999)'; + + EXECUTE format('EXPLAIN (ANALYZE, COSTS OFF, TIMING OFF, FORMAT JSON) %s', query) + INTO result; + + SELECT count(*) FROM jsonb_array_elements_text(result->0->'Plan'->'Plans') INTO num; + + RAISE notice '%: number of partitions: %', query, num; + END +$$; +NOTICE: EXECUTE q(999): number of partitions: 1 +DEALLOCATE q; +/* + * Test expr = ALL (... $1 ...) + */ +PREPARE q(int4) AS SELECT * FROM array_qual.test WHERE a = ALL (array[$1, 100, 600]); +EXPLAIN (COSTS OFF) EXECUTE q(1); + QUERY PLAN +-------------------------- + Result + One-Time Filter: false +(2 rows) + +EXPLAIN (COSTS OFF) EXECUTE q(1); + QUERY PLAN +-------------------------- + Result + One-Time Filter: false +(2 rows) + +EXPLAIN (COSTS OFF) EXECUTE q(1); + QUERY PLAN +-------------------------- + Result + One-Time Filter: false +(2 rows) + +EXPLAIN (COSTS OFF) EXECUTE q(1); + QUERY PLAN +-------------------------- + Result + One-Time Filter: false +(2 rows) + +EXPLAIN (COSTS OFF) EXECUTE q(1); + QUERY PLAN +-------------------------- + Result + One-Time Filter: false +(2 rows) + +EXPLAIN (COSTS OFF) EXECUTE q(1); + QUERY PLAN +-------------------------- + Result + One-Time Filter: false +(2 rows) + +EXPLAIN (COSTS OFF) EXECUTE q(1); + QUERY PLAN +-------------------------- + Result + One-Time Filter: false +(2 rows) + +DEALLOCATE q; +PREPARE q(int4) AS SELECT * FROM array_qual.test WHERE a = ALL (array[100, 600, $1]); +EXPLAIN (COSTS OFF) EXECUTE q(1); + QUERY PLAN +-------------------------- + Result + One-Time Filter: false +(2 rows) + +EXPLAIN (COSTS OFF) EXECUTE q(1); + QUERY PLAN +-------------------------- + Result + One-Time Filter: false +(2 rows) + +EXPLAIN (COSTS OFF) EXECUTE q(1); + QUERY PLAN +-------------------------- + Result + One-Time Filter: false +(2 rows) + +EXPLAIN (COSTS OFF) EXECUTE q(1); + QUERY PLAN +-------------------------- + Result + One-Time Filter: false +(2 rows) + +EXPLAIN (COSTS OFF) EXECUTE q(1); + QUERY PLAN +-------------------------- + Result + One-Time Filter: false +(2 rows) + +EXPLAIN (COSTS OFF) EXECUTE q(1); + QUERY PLAN +-------------------------- + Result + One-Time Filter: false +(2 rows) + +EXPLAIN (COSTS OFF) EXECUTE q(1); + QUERY PLAN +-------------------------- + Result + One-Time Filter: false +(2 rows) + +DEALLOCATE q; +PREPARE q(int4) AS SELECT * FROM array_qual.test WHERE a = ALL (array[100, $1]); +EXPLAIN (COSTS OFF) EXECUTE q(1); + QUERY PLAN +-------------------------- + Result + One-Time Filter: false +(2 rows) + +EXPLAIN (COSTS OFF) EXECUTE q(1); + QUERY PLAN +-------------------------- + Result + One-Time Filter: false +(2 rows) + +EXPLAIN (COSTS OFF) EXECUTE q(1); + QUERY PLAN +-------------------------- + Result + One-Time Filter: false +(2 rows) + +EXPLAIN (COSTS OFF) EXECUTE q(1); + QUERY PLAN +-------------------------- + Result + One-Time Filter: false +(2 rows) + +EXPLAIN (COSTS OFF) EXECUTE q(1); + QUERY PLAN +-------------------------- + Result + One-Time Filter: false +(2 rows) + +EXPLAIN (COSTS OFF) EXECUTE q(1); + QUERY PLAN +--------------------------------------------- + Custom Scan (RuntimeAppend) + Prune by: (test.a = ALL (ARRAY[100, $1])) + -> Seq Scan on test_1 test + Filter: (a = ALL (ARRAY[100, $1])) +(4 rows) + +EXPLAIN (COSTS OFF) EXECUTE q(1); + QUERY PLAN +--------------------------------------------- + Custom Scan (RuntimeAppend) + Prune by: (test.a = ALL (ARRAY[100, $1])) + -> Seq Scan on test_1 test + Filter: (a = ALL (ARRAY[100, $1])) +(4 rows) + +EXECUTE q(1); + a | b +---+--- +(0 rows) + +EXECUTE q(100); + a | b +-----+----- + 100 | 100 +(1 row) + +DEALLOCATE q; +DROP TABLE array_qual.test CASCADE; +NOTICE: drop cascades to 11 other objects +DROP SCHEMA array_qual; +DROP EXTENSION pg_pathman; diff --git a/expected/pathman_array_qual_2.out b/expected/pathman_array_qual_2.out new file mode 100644 index 00000000..ab504858 --- /dev/null +++ b/expected/pathman_array_qual_2.out @@ -0,0 +1,2398 @@ +/* + * Since 8edd0e794 (>= 12) Append nodes with single subplan are eliminated, + * causing different output. + */ +\set VERBOSITY terse +SET search_path = 'public'; +CREATE EXTENSION pg_pathman; +CREATE SCHEMA array_qual; +CREATE TABLE array_qual.test(val TEXT NOT NULL); +CREATE SEQUENCE array_qual.test_seq; +SELECT add_to_pathman_config('array_qual.test', 'val', NULL); + add_to_pathman_config +----------------------- + t +(1 row) + +SELECT add_range_partition('array_qual.test', 'a'::TEXT, 'b'); + add_range_partition +--------------------- + array_qual.test_1 +(1 row) + +SELECT add_range_partition('array_qual.test', 'b'::TEXT, 'c'); + add_range_partition +--------------------- + array_qual.test_2 +(1 row) + +SELECT add_range_partition('array_qual.test', 'c'::TEXT, 'd'); + add_range_partition +--------------------- + array_qual.test_3 +(1 row) + +SELECT add_range_partition('array_qual.test', 'd'::TEXT, 'e'); + add_range_partition +--------------------- + array_qual.test_4 +(1 row) + +INSERT INTO array_qual.test VALUES ('aaaa'); +INSERT INTO array_qual.test VALUES ('bbbb'); +INSERT INTO array_qual.test VALUES ('cccc'); +ANALYZE; +/* + * Test expr op ANY (...) + */ +/* matching collations */ +EXPLAIN (COSTS OFF) SELECT * FROM array_qual.test WHERE val < ANY (array['a', 'b']); + QUERY PLAN +------------------------- + Seq Scan on test_1 test +(1 row) + +EXPLAIN (COSTS OFF) SELECT * FROM array_qual.test WHERE val < ANY (array['a', 'z']); + QUERY PLAN +-------------------------- + Append + -> Seq Scan on test_1 + -> Seq Scan on test_2 + -> Seq Scan on test_3 + -> Seq Scan on test_4 +(5 rows) + +/* different collations */ +EXPLAIN (COSTS OFF) SELECT * FROM array_qual.test WHERE val COLLATE "POSIX" < ANY (array['a', 'b']); + QUERY PLAN +----------------------------------------------------------- + Append + -> Seq Scan on test_1 + Filter: ((val)::text < 'b'::text COLLATE "POSIX") + -> Seq Scan on test_2 + Filter: ((val)::text < 'b'::text COLLATE "POSIX") + -> Seq Scan on test_3 + Filter: ((val)::text < 'b'::text COLLATE "POSIX") + -> Seq Scan on test_4 + Filter: ((val)::text < 'b'::text COLLATE "POSIX") +(9 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM array_qual.test WHERE val < ANY (array['a', 'b' COLLATE "POSIX"]); + QUERY PLAN +--------------------------------------------------- + Append + -> Seq Scan on test_1 + Filter: (val < 'b'::text COLLATE "POSIX") + -> Seq Scan on test_2 + Filter: (val < 'b'::text COLLATE "POSIX") + -> Seq Scan on test_3 + Filter: (val < 'b'::text COLLATE "POSIX") + -> Seq Scan on test_4 + Filter: (val < 'b'::text COLLATE "POSIX") +(9 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM array_qual.test WHERE val COLLATE "C" < ANY (array['a', 'b' COLLATE "POSIX"]); +ERROR: collation mismatch between explicit collations "C" and "POSIX" at character 95 +/* different collations (pruning should work) */ +EXPLAIN (COSTS OFF) SELECT * FROM array_qual.test WHERE val COLLATE "POSIX" = ANY (array['a', 'b']); + QUERY PLAN +------------------------------------------------------- + Append + -> Seq Scan on test_1 + Filter: ((val)::text = ANY ('{a,b}'::text[])) + -> Seq Scan on test_2 + Filter: ((val)::text = ANY ('{a,b}'::text[])) +(5 rows) + +/* non-btree operator */ +EXPLAIN (COSTS OFF) SELECT * FROM array_qual.test WHERE val ~~ ANY (array['a', 'b']); + QUERY PLAN +------------------------------------------------ + Append + -> Seq Scan on test_1 + Filter: (val ~~ ANY ('{a,b}'::text[])) + -> Seq Scan on test_2 + Filter: (val ~~ ANY ('{a,b}'::text[])) + -> Seq Scan on test_3 + Filter: (val ~~ ANY ('{a,b}'::text[])) + -> Seq Scan on test_4 + Filter: (val ~~ ANY ('{a,b}'::text[])) +(9 rows) + +DROP TABLE array_qual.test CASCADE; +NOTICE: drop cascades to 5 other objects +CREATE TABLE array_qual.test(a INT4 NOT NULL, b INT4); +SELECT create_range_partitions('array_qual.test', 'a', 1, 100, 10); + create_range_partitions +------------------------- + 10 +(1 row) + +INSERT INTO array_qual.test SELECT i, i FROM generate_series(1, 1000) g(i); +ANALYZE; +/* + * Test expr IN (...) + */ +/* a IN (...) - pruning should work */ +EXPLAIN (COSTS OFF) SELECT * FROM array_qual.test WHERE a IN (1, 2, 3, 4); + QUERY PLAN +---------------------------------------------- + Seq Scan on test_1 test + Filter: (a = ANY ('{1,2,3,4}'::integer[])) +(2 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM array_qual.test WHERE a IN (100, 200, 300, 400); + QUERY PLAN +------------------------------------------------------------ + Append + -> Seq Scan on test_1 + Filter: (a = ANY ('{100,200,300,400}'::integer[])) + -> Seq Scan on test_2 + Filter: (a = ANY ('{100,200,300,400}'::integer[])) + -> Seq Scan on test_3 + Filter: (a = ANY ('{100,200,300,400}'::integer[])) + -> Seq Scan on test_4 + Filter: (a = ANY ('{100,200,300,400}'::integer[])) +(9 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM array_qual.test WHERE a IN (-100, 100); + QUERY PLAN +----------------------------------------------- + Seq Scan on test_1 test + Filter: (a = ANY ('{-100,100}'::integer[])) +(2 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM array_qual.test WHERE a IN (-100, -200, -300); + QUERY PLAN +-------------------------- + Result + One-Time Filter: false +(2 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM array_qual.test WHERE a IN (-100, -200, -300, NULL); + QUERY PLAN +-------------------------- + Result + One-Time Filter: false +(2 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM array_qual.test WHERE a IN (NULL, NULL, NULL, NULL); + QUERY PLAN +-------------------------- + Result + One-Time Filter: false +(2 rows) + +/* b IN (...) - pruning should not work */ +EXPLAIN (COSTS OFF) SELECT * FROM array_qual.test WHERE b IN (1, 2, 3, 4); + QUERY PLAN +---------------------------------------------------- + Append + -> Seq Scan on test_1 + Filter: (b = ANY ('{1,2,3,4}'::integer[])) + -> Seq Scan on test_2 + Filter: (b = ANY ('{1,2,3,4}'::integer[])) + -> Seq Scan on test_3 + Filter: (b = ANY ('{1,2,3,4}'::integer[])) + -> Seq Scan on test_4 + Filter: (b = ANY ('{1,2,3,4}'::integer[])) + -> Seq Scan on test_5 + Filter: (b = ANY ('{1,2,3,4}'::integer[])) + -> Seq Scan on test_6 + Filter: (b = ANY ('{1,2,3,4}'::integer[])) + -> Seq Scan on test_7 + Filter: (b = ANY ('{1,2,3,4}'::integer[])) + -> Seq Scan on test_8 + Filter: (b = ANY ('{1,2,3,4}'::integer[])) + -> Seq Scan on test_9 + Filter: (b = ANY ('{1,2,3,4}'::integer[])) + -> Seq Scan on test_10 + Filter: (b = ANY ('{1,2,3,4}'::integer[])) +(21 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM array_qual.test WHERE b IN (100, 200, 300, 400); + QUERY PLAN +------------------------------------------------------------ + Append + -> Seq Scan on test_1 + Filter: (b = ANY ('{100,200,300,400}'::integer[])) + -> Seq Scan on test_2 + Filter: (b = ANY ('{100,200,300,400}'::integer[])) + -> Seq Scan on test_3 + Filter: (b = ANY ('{100,200,300,400}'::integer[])) + -> Seq Scan on test_4 + Filter: (b = ANY ('{100,200,300,400}'::integer[])) + -> Seq Scan on test_5 + Filter: (b = ANY ('{100,200,300,400}'::integer[])) + -> Seq Scan on test_6 + Filter: (b = ANY ('{100,200,300,400}'::integer[])) + -> Seq Scan on test_7 + Filter: (b = ANY ('{100,200,300,400}'::integer[])) + -> Seq Scan on test_8 + Filter: (b = ANY ('{100,200,300,400}'::integer[])) + -> Seq Scan on test_9 + Filter: (b = ANY ('{100,200,300,400}'::integer[])) + -> Seq Scan on test_10 + Filter: (b = ANY ('{100,200,300,400}'::integer[])) +(21 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM array_qual.test WHERE b IN (-100, 100); + QUERY PLAN +----------------------------------------------------- + Append + -> Seq Scan on test_1 + Filter: (b = ANY ('{-100,100}'::integer[])) + -> Seq Scan on test_2 + Filter: (b = ANY ('{-100,100}'::integer[])) + -> Seq Scan on test_3 + Filter: (b = ANY ('{-100,100}'::integer[])) + -> Seq Scan on test_4 + Filter: (b = ANY ('{-100,100}'::integer[])) + -> Seq Scan on test_5 + Filter: (b = ANY ('{-100,100}'::integer[])) + -> Seq Scan on test_6 + Filter: (b = ANY ('{-100,100}'::integer[])) + -> Seq Scan on test_7 + Filter: (b = ANY ('{-100,100}'::integer[])) + -> Seq Scan on test_8 + Filter: (b = ANY ('{-100,100}'::integer[])) + -> Seq Scan on test_9 + Filter: (b = ANY ('{-100,100}'::integer[])) + -> Seq Scan on test_10 + Filter: (b = ANY ('{-100,100}'::integer[])) +(21 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM array_qual.test WHERE b IN (-100, -200, -300); + QUERY PLAN +----------------------------------------------------------- + Append + -> Seq Scan on test_1 + Filter: (b = ANY ('{-100,-200,-300}'::integer[])) + -> Seq Scan on test_2 + Filter: (b = ANY ('{-100,-200,-300}'::integer[])) + -> Seq Scan on test_3 + Filter: (b = ANY ('{-100,-200,-300}'::integer[])) + -> Seq Scan on test_4 + Filter: (b = ANY ('{-100,-200,-300}'::integer[])) + -> Seq Scan on test_5 + Filter: (b = ANY ('{-100,-200,-300}'::integer[])) + -> Seq Scan on test_6 + Filter: (b = ANY ('{-100,-200,-300}'::integer[])) + -> Seq Scan on test_7 + Filter: (b = ANY ('{-100,-200,-300}'::integer[])) + -> Seq Scan on test_8 + Filter: (b = ANY ('{-100,-200,-300}'::integer[])) + -> Seq Scan on test_9 + Filter: (b = ANY ('{-100,-200,-300}'::integer[])) + -> Seq Scan on test_10 + Filter: (b = ANY ('{-100,-200,-300}'::integer[])) +(21 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM array_qual.test WHERE b IN (-100, -200, -300, NULL); + QUERY PLAN +---------------------------------------------------------------- + Append + -> Seq Scan on test_1 + Filter: (b = ANY ('{-100,-200,-300,NULL}'::integer[])) + -> Seq Scan on test_2 + Filter: (b = ANY ('{-100,-200,-300,NULL}'::integer[])) + -> Seq Scan on test_3 + Filter: (b = ANY ('{-100,-200,-300,NULL}'::integer[])) + -> Seq Scan on test_4 + Filter: (b = ANY ('{-100,-200,-300,NULL}'::integer[])) + -> Seq Scan on test_5 + Filter: (b = ANY ('{-100,-200,-300,NULL}'::integer[])) + -> Seq Scan on test_6 + Filter: (b = ANY ('{-100,-200,-300,NULL}'::integer[])) + -> Seq Scan on test_7 + Filter: (b = ANY ('{-100,-200,-300,NULL}'::integer[])) + -> Seq Scan on test_8 + Filter: (b = ANY ('{-100,-200,-300,NULL}'::integer[])) + -> Seq Scan on test_9 + Filter: (b = ANY ('{-100,-200,-300,NULL}'::integer[])) + -> Seq Scan on test_10 + Filter: (b = ANY ('{-100,-200,-300,NULL}'::integer[])) +(21 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM array_qual.test WHERE b IN (NULL, NULL, NULL, NULL); + QUERY PLAN +---------------------------------------------------------------- + Append + -> Seq Scan on test_1 + Filter: (b = ANY ('{NULL,NULL,NULL,NULL}'::integer[])) + -> Seq Scan on test_2 + Filter: (b = ANY ('{NULL,NULL,NULL,NULL}'::integer[])) + -> Seq Scan on test_3 + Filter: (b = ANY ('{NULL,NULL,NULL,NULL}'::integer[])) + -> Seq Scan on test_4 + Filter: (b = ANY ('{NULL,NULL,NULL,NULL}'::integer[])) + -> Seq Scan on test_5 + Filter: (b = ANY ('{NULL,NULL,NULL,NULL}'::integer[])) + -> Seq Scan on test_6 + Filter: (b = ANY ('{NULL,NULL,NULL,NULL}'::integer[])) + -> Seq Scan on test_7 + Filter: (b = ANY ('{NULL,NULL,NULL,NULL}'::integer[])) + -> Seq Scan on test_8 + Filter: (b = ANY ('{NULL,NULL,NULL,NULL}'::integer[])) + -> Seq Scan on test_9 + Filter: (b = ANY ('{NULL,NULL,NULL,NULL}'::integer[])) + -> Seq Scan on test_10 + Filter: (b = ANY ('{NULL,NULL,NULL,NULL}'::integer[])) +(21 rows) + +/* + * Test expr = ANY (...) + */ +/* a = ANY (...) - pruning should work */ +EXPLAIN (COSTS OFF) SELECT * FROM array_qual.test WHERE a = ANY (NULL); + QUERY PLAN +-------------------------- + Result + One-Time Filter: false +(2 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM array_qual.test WHERE a = ANY (array[]::int4[]); + QUERY PLAN +-------------------------- + Result + One-Time Filter: false +(2 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM array_qual.test WHERE a = ANY (array[100, 100]); + QUERY PLAN +---------------------------------------------- + Seq Scan on test_1 test + Filter: (a = ANY ('{100,100}'::integer[])) +(2 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM array_qual.test WHERE a = ANY (array[100, 200, 300, 400]); + QUERY PLAN +------------------------------------------------------------ + Append + -> Seq Scan on test_1 + Filter: (a = ANY ('{100,200,300,400}'::integer[])) + -> Seq Scan on test_2 + Filter: (a = ANY ('{100,200,300,400}'::integer[])) + -> Seq Scan on test_3 + Filter: (a = ANY ('{100,200,300,400}'::integer[])) + -> Seq Scan on test_4 + Filter: (a = ANY ('{100,200,300,400}'::integer[])) +(9 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM array_qual.test WHERE a = ANY (array[array[100, 200], array[300, 400]]); + QUERY PLAN +---------------------------------------------------------------- + Append + -> Seq Scan on test_1 + Filter: (a = ANY ('{{100,200},{300,400}}'::integer[])) + -> Seq Scan on test_2 + Filter: (a = ANY ('{{100,200},{300,400}}'::integer[])) + -> Seq Scan on test_3 + Filter: (a = ANY ('{{100,200},{300,400}}'::integer[])) + -> Seq Scan on test_4 + Filter: (a = ANY ('{{100,200},{300,400}}'::integer[])) +(9 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM array_qual.test WHERE a = ANY (array[array[100, 200], array[300, 400], array[NULL, NULL]::int4[]]); + QUERY PLAN +---------------------------------------------------------------------------- + Append + -> Seq Scan on test_1 + Filter: (a = ANY ('{{100,200},{300,400},{NULL,NULL}}'::integer[])) + -> Seq Scan on test_2 + Filter: (a = ANY ('{{100,200},{300,400},{NULL,NULL}}'::integer[])) + -> Seq Scan on test_3 + Filter: (a = ANY ('{{100,200},{300,400},{NULL,NULL}}'::integer[])) + -> Seq Scan on test_4 + Filter: (a = ANY ('{{100,200},{300,400},{NULL,NULL}}'::integer[])) +(9 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM array_qual.test WHERE a = ANY (array[array[100, 200], array[300, NULL]]); + QUERY PLAN +----------------------------------------------------------------- + Append + -> Seq Scan on test_1 + Filter: (a = ANY ('{{100,200},{300,NULL}}'::integer[])) + -> Seq Scan on test_2 + Filter: (a = ANY ('{{100,200},{300,NULL}}'::integer[])) + -> Seq Scan on test_3 + Filter: (a = ANY ('{{100,200},{300,NULL}}'::integer[])) +(7 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM array_qual.test WHERE a = ANY (array[NULL, NULL]::int4[]); + QUERY PLAN +-------------------------- + Result + One-Time Filter: false +(2 rows) + +/* + * Test expr = ALL (...) + */ +/* a = ALL (...) - pruning should work */ +EXPLAIN (COSTS OFF) SELECT * FROM array_qual.test WHERE a = ALL (NULL); + QUERY PLAN +-------------------------- + Result + One-Time Filter: false +(2 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM array_qual.test WHERE a = ALL (array[]::int4[]); + QUERY PLAN +--------------------------------------------- + Append + -> Seq Scan on test_1 + Filter: (a = ALL ('{}'::integer[])) + -> Seq Scan on test_2 + Filter: (a = ALL ('{}'::integer[])) + -> Seq Scan on test_3 + Filter: (a = ALL ('{}'::integer[])) + -> Seq Scan on test_4 + Filter: (a = ALL ('{}'::integer[])) + -> Seq Scan on test_5 + Filter: (a = ALL ('{}'::integer[])) + -> Seq Scan on test_6 + Filter: (a = ALL ('{}'::integer[])) + -> Seq Scan on test_7 + Filter: (a = ALL ('{}'::integer[])) + -> Seq Scan on test_8 + Filter: (a = ALL ('{}'::integer[])) + -> Seq Scan on test_9 + Filter: (a = ALL ('{}'::integer[])) + -> Seq Scan on test_10 + Filter: (a = ALL ('{}'::integer[])) +(21 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM array_qual.test WHERE a = ALL (array[100, 100]); + QUERY PLAN +---------------------------------------------- + Seq Scan on test_1 test + Filter: (a = ALL ('{100,100}'::integer[])) +(2 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM array_qual.test WHERE a = ALL (array[100, 200, 300, 400]); + QUERY PLAN +-------------------------- + Result + One-Time Filter: false +(2 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM array_qual.test WHERE a = ALL (array[array[100, 200], array[300, 400]]); + QUERY PLAN +-------------------------- + Result + One-Time Filter: false +(2 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM array_qual.test WHERE a = ALL (array[array[100, 200], array[300, 400], array[NULL, NULL]::int4[]]); + QUERY PLAN +-------------------------- + Result + One-Time Filter: false +(2 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM array_qual.test WHERE a = ALL (array[array[100, 200], array[300, NULL]]); + QUERY PLAN +-------------------------- + Result + One-Time Filter: false +(2 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM array_qual.test WHERE a = ALL (array[NULL, NULL]::int4[]); + QUERY PLAN +-------------------------- + Result + One-Time Filter: false +(2 rows) + +/* + * Test expr < ANY (...) + */ +/* a < ANY (...) - pruning should work */ +EXPLAIN (COSTS OFF) SELECT * FROM array_qual.test WHERE a < ANY (NULL); + QUERY PLAN +-------------------------- + Result + One-Time Filter: false +(2 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM array_qual.test WHERE a < ANY (array[]::int4[]); + QUERY PLAN +-------------------------- + Result + One-Time Filter: false +(2 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM array_qual.test WHERE a < ANY (array[100, 100]); + QUERY PLAN +------------------------- + Seq Scan on test_1 test + Filter: (a < 100) +(2 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM array_qual.test WHERE a < ANY (array[99, 100, 101]); + QUERY PLAN +------------------------- + Seq Scan on test_1 test +(1 row) + +EXPLAIN (COSTS OFF) SELECT * FROM array_qual.test WHERE a < ANY (array[500, 550]); + QUERY PLAN +--------------------------- + Append + -> Seq Scan on test_1 + -> Seq Scan on test_2 + -> Seq Scan on test_3 + -> Seq Scan on test_4 + -> Seq Scan on test_5 + -> Seq Scan on test_6 + Filter: (a < 550) +(8 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM array_qual.test WHERE a < ANY (array[100, 700]); + QUERY PLAN +--------------------------- + Append + -> Seq Scan on test_1 + -> Seq Scan on test_2 + -> Seq Scan on test_3 + -> Seq Scan on test_4 + -> Seq Scan on test_5 + -> Seq Scan on test_6 + -> Seq Scan on test_7 + Filter: (a < 700) +(9 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM array_qual.test WHERE a < ANY (array[NULL, 700]); + QUERY PLAN +----------------------------------------------------- + Append + -> Seq Scan on test_1 + -> Seq Scan on test_2 + -> Seq Scan on test_3 + -> Seq Scan on test_4 + -> Seq Scan on test_5 + -> Seq Scan on test_6 + -> Seq Scan on test_7 + Filter: (a < ANY ('{NULL,700}'::integer[])) +(9 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM array_qual.test WHERE a < ANY (array[NULL, NULL]::int4[]); + QUERY PLAN +-------------------------- + Result + One-Time Filter: false +(2 rows) + +SET pg_pathman.enable = f; +NOTICE: RuntimeAppend, RuntimeMergeAppend and PartitionFilter nodes and some other options have been disabled +SELECT count(*) FROM array_qual.test WHERE a < ANY (array[NULL, 700]); + count +------- + 699 +(1 row) + +SET pg_pathman.enable = t; +NOTICE: RuntimeAppend, RuntimeMergeAppend and PartitionFilter nodes and some other options have been enabled +SELECT count(*) FROM array_qual.test WHERE a < ANY (array[NULL, 700]); + count +------- + 699 +(1 row) + +/* + * Test expr < ALL (...) + */ +/* a < ALL (...) - pruning should work */ +EXPLAIN (COSTS OFF) SELECT * FROM array_qual.test WHERE a < ALL (NULL); + QUERY PLAN +-------------------------- + Result + One-Time Filter: false +(2 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM array_qual.test WHERE a < ALL (array[]::int4[]); + QUERY PLAN +--------------------------------------------- + Append + -> Seq Scan on test_1 + Filter: (a < ALL ('{}'::integer[])) + -> Seq Scan on test_2 + Filter: (a < ALL ('{}'::integer[])) + -> Seq Scan on test_3 + Filter: (a < ALL ('{}'::integer[])) + -> Seq Scan on test_4 + Filter: (a < ALL ('{}'::integer[])) + -> Seq Scan on test_5 + Filter: (a < ALL ('{}'::integer[])) + -> Seq Scan on test_6 + Filter: (a < ALL ('{}'::integer[])) + -> Seq Scan on test_7 + Filter: (a < ALL ('{}'::integer[])) + -> Seq Scan on test_8 + Filter: (a < ALL ('{}'::integer[])) + -> Seq Scan on test_9 + Filter: (a < ALL ('{}'::integer[])) + -> Seq Scan on test_10 + Filter: (a < ALL ('{}'::integer[])) +(21 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM array_qual.test WHERE a < ALL (array[100, 100]); + QUERY PLAN +------------------------- + Seq Scan on test_1 test + Filter: (a < 100) +(2 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM array_qual.test WHERE a < ALL (array[99, 100, 101]); + QUERY PLAN +------------------------- + Seq Scan on test_1 test + Filter: (a < 99) +(2 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM array_qual.test WHERE a < ALL (array[500, 550]); + QUERY PLAN +--------------------------- + Append + -> Seq Scan on test_1 + -> Seq Scan on test_2 + -> Seq Scan on test_3 + -> Seq Scan on test_4 + -> Seq Scan on test_5 + Filter: (a < 500) +(7 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM array_qual.test WHERE a < ALL (array[100, 700]); + QUERY PLAN +------------------------- + Seq Scan on test_1 test + Filter: (a < 100) +(2 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM array_qual.test WHERE a < ALL (array[NULL, 700]); + QUERY PLAN +-------------------------- + Result + One-Time Filter: false +(2 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM array_qual.test WHERE a < ALL (array[NULL, NULL]::int4[]); + QUERY PLAN +-------------------------- + Result + One-Time Filter: false +(2 rows) + +SET pg_pathman.enable = f; +NOTICE: RuntimeAppend, RuntimeMergeAppend and PartitionFilter nodes and some other options have been disabled +SELECT count(*) FROM array_qual.test WHERE a < ALL (array[NULL, 700]); + count +------- + 0 +(1 row) + +SET pg_pathman.enable = t; +NOTICE: RuntimeAppend, RuntimeMergeAppend and PartitionFilter nodes and some other options have been enabled +SELECT count(*) FROM array_qual.test WHERE a < ALL (array[NULL, 700]); + count +------- + 0 +(1 row) + +/* + * Test expr > ANY (...) + */ +/* a > ANY (...) - pruning should work */ +EXPLAIN (COSTS OFF) SELECT * FROM array_qual.test WHERE a > ANY (NULL); + QUERY PLAN +-------------------------- + Result + One-Time Filter: false +(2 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM array_qual.test WHERE a > ANY (array[]::int4[]); + QUERY PLAN +-------------------------- + Result + One-Time Filter: false +(2 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM array_qual.test WHERE a > ANY (array[100, 100]); + QUERY PLAN +--------------------------- + Append + -> Seq Scan on test_1 + Filter: (a > 100) + -> Seq Scan on test_2 + -> Seq Scan on test_3 + -> Seq Scan on test_4 + -> Seq Scan on test_5 + -> Seq Scan on test_6 + -> Seq Scan on test_7 + -> Seq Scan on test_8 + -> Seq Scan on test_9 + -> Seq Scan on test_10 +(12 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM array_qual.test WHERE a > ANY (array[99, 100, 101]); + QUERY PLAN +--------------------------- + Append + -> Seq Scan on test_1 + Filter: (a > 99) + -> Seq Scan on test_2 + -> Seq Scan on test_3 + -> Seq Scan on test_4 + -> Seq Scan on test_5 + -> Seq Scan on test_6 + -> Seq Scan on test_7 + -> Seq Scan on test_8 + -> Seq Scan on test_9 + -> Seq Scan on test_10 +(12 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM array_qual.test WHERE a > ANY (array[500, 550]); + QUERY PLAN +---------------------------------- + Append + -> Seq Scan on test_5 test_1 + Filter: (a > 500) + -> Seq Scan on test_6 test_2 + -> Seq Scan on test_7 test_3 + -> Seq Scan on test_8 test_4 + -> Seq Scan on test_9 test_5 + -> Seq Scan on test_10 test_6 +(8 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM array_qual.test WHERE a > ANY (array[100, 700]); + QUERY PLAN +--------------------------- + Append + -> Seq Scan on test_1 + Filter: (a > 100) + -> Seq Scan on test_2 + -> Seq Scan on test_3 + -> Seq Scan on test_4 + -> Seq Scan on test_5 + -> Seq Scan on test_6 + -> Seq Scan on test_7 + -> Seq Scan on test_8 + -> Seq Scan on test_9 + -> Seq Scan on test_10 +(12 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM array_qual.test WHERE a > ANY (array[NULL, 700]); + QUERY PLAN +----------------------------------------------------- + Append + -> Seq Scan on test_7 test_1 + Filter: (a > ANY ('{NULL,700}'::integer[])) + -> Seq Scan on test_8 test_2 + -> Seq Scan on test_9 test_3 + -> Seq Scan on test_10 test_4 +(6 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM array_qual.test WHERE a > ANY (array[NULL, NULL]::int4[]); + QUERY PLAN +-------------------------- + Result + One-Time Filter: false +(2 rows) + +SET pg_pathman.enable = f; +NOTICE: RuntimeAppend, RuntimeMergeAppend and PartitionFilter nodes and some other options have been disabled +SELECT count(*) FROM array_qual.test WHERE a > ANY (array[NULL, 700]); + count +------- + 300 +(1 row) + +SET pg_pathman.enable = t; +NOTICE: RuntimeAppend, RuntimeMergeAppend and PartitionFilter nodes and some other options have been enabled +SELECT count(*) FROM array_qual.test WHERE a > ANY (array[NULL, 700]); + count +------- + 300 +(1 row) + +/* + * Test expr > ALL (...) + */ +/* a > ALL (...) - pruning should work */ +EXPLAIN (COSTS OFF) SELECT * FROM array_qual.test WHERE a > ALL (NULL); + QUERY PLAN +-------------------------- + Result + One-Time Filter: false +(2 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM array_qual.test WHERE a > ALL (array[]::int4[]); + QUERY PLAN +--------------------------------------------- + Append + -> Seq Scan on test_1 + Filter: (a > ALL ('{}'::integer[])) + -> Seq Scan on test_2 + Filter: (a > ALL ('{}'::integer[])) + -> Seq Scan on test_3 + Filter: (a > ALL ('{}'::integer[])) + -> Seq Scan on test_4 + Filter: (a > ALL ('{}'::integer[])) + -> Seq Scan on test_5 + Filter: (a > ALL ('{}'::integer[])) + -> Seq Scan on test_6 + Filter: (a > ALL ('{}'::integer[])) + -> Seq Scan on test_7 + Filter: (a > ALL ('{}'::integer[])) + -> Seq Scan on test_8 + Filter: (a > ALL ('{}'::integer[])) + -> Seq Scan on test_9 + Filter: (a > ALL ('{}'::integer[])) + -> Seq Scan on test_10 + Filter: (a > ALL ('{}'::integer[])) +(21 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM array_qual.test WHERE a > ALL (array[100, 100]); + QUERY PLAN +--------------------------- + Append + -> Seq Scan on test_1 + Filter: (a > 100) + -> Seq Scan on test_2 + -> Seq Scan on test_3 + -> Seq Scan on test_4 + -> Seq Scan on test_5 + -> Seq Scan on test_6 + -> Seq Scan on test_7 + -> Seq Scan on test_8 + -> Seq Scan on test_9 + -> Seq Scan on test_10 +(12 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM array_qual.test WHERE a > ALL (array[99, 100, 101]); + QUERY PLAN +---------------------------------- + Append + -> Seq Scan on test_2 test_1 + Filter: (a > 101) + -> Seq Scan on test_3 test_2 + -> Seq Scan on test_4 test_3 + -> Seq Scan on test_5 test_4 + -> Seq Scan on test_6 test_5 + -> Seq Scan on test_7 test_6 + -> Seq Scan on test_8 test_7 + -> Seq Scan on test_9 test_8 + -> Seq Scan on test_10 test_9 +(11 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM array_qual.test WHERE a > ALL (array[500, 550]); + QUERY PLAN +---------------------------------- + Append + -> Seq Scan on test_6 test_1 + Filter: (a > 550) + -> Seq Scan on test_7 test_2 + -> Seq Scan on test_8 test_3 + -> Seq Scan on test_9 test_4 + -> Seq Scan on test_10 test_5 +(7 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM array_qual.test WHERE a > ALL (array[100, 700]); + QUERY PLAN +---------------------------------- + Append + -> Seq Scan on test_7 test_1 + Filter: (a > 700) + -> Seq Scan on test_8 test_2 + -> Seq Scan on test_9 test_3 + -> Seq Scan on test_10 test_4 +(6 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM array_qual.test WHERE a > ALL (array[NULL, 700]); + QUERY PLAN +-------------------------- + Result + One-Time Filter: false +(2 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM array_qual.test WHERE a > ALL (array[NULL, NULL]::int4[]); + QUERY PLAN +-------------------------- + Result + One-Time Filter: false +(2 rows) + +SET pg_pathman.enable = f; +NOTICE: RuntimeAppend, RuntimeMergeAppend and PartitionFilter nodes and some other options have been disabled +SELECT count(*) FROM array_qual.test WHERE a > ALL (array[NULL, 700]); + count +------- + 0 +(1 row) + +SET pg_pathman.enable = t; +NOTICE: RuntimeAppend, RuntimeMergeAppend and PartitionFilter nodes and some other options have been enabled +SELECT count(*) FROM array_qual.test WHERE a > ALL (array[NULL, 700]); + count +------- + 0 +(1 row) + +/* + * Test expr > ANY (... $1 ...) + */ +PREPARE q(int4) AS SELECT * FROM array_qual.test WHERE a > ANY (array[$1, 100, 600]); +EXPLAIN (COSTS OFF) EXECUTE q(1); + QUERY PLAN +--------------------------- + Append + -> Seq Scan on test_1 + Filter: (a > 1) + -> Seq Scan on test_2 + -> Seq Scan on test_3 + -> Seq Scan on test_4 + -> Seq Scan on test_5 + -> Seq Scan on test_6 + -> Seq Scan on test_7 + -> Seq Scan on test_8 + -> Seq Scan on test_9 + -> Seq Scan on test_10 +(12 rows) + +EXPLAIN (COSTS OFF) EXECUTE q(1); + QUERY PLAN +--------------------------- + Append + -> Seq Scan on test_1 + Filter: (a > 1) + -> Seq Scan on test_2 + -> Seq Scan on test_3 + -> Seq Scan on test_4 + -> Seq Scan on test_5 + -> Seq Scan on test_6 + -> Seq Scan on test_7 + -> Seq Scan on test_8 + -> Seq Scan on test_9 + -> Seq Scan on test_10 +(12 rows) + +EXPLAIN (COSTS OFF) EXECUTE q(1); + QUERY PLAN +--------------------------- + Append + -> Seq Scan on test_1 + Filter: (a > 1) + -> Seq Scan on test_2 + -> Seq Scan on test_3 + -> Seq Scan on test_4 + -> Seq Scan on test_5 + -> Seq Scan on test_6 + -> Seq Scan on test_7 + -> Seq Scan on test_8 + -> Seq Scan on test_9 + -> Seq Scan on test_10 +(12 rows) + +EXPLAIN (COSTS OFF) EXECUTE q(1); + QUERY PLAN +--------------------------- + Append + -> Seq Scan on test_1 + Filter: (a > 1) + -> Seq Scan on test_2 + -> Seq Scan on test_3 + -> Seq Scan on test_4 + -> Seq Scan on test_5 + -> Seq Scan on test_6 + -> Seq Scan on test_7 + -> Seq Scan on test_8 + -> Seq Scan on test_9 + -> Seq Scan on test_10 +(12 rows) + +EXPLAIN (COSTS OFF) EXECUTE q(1); + QUERY PLAN +--------------------------- + Append + -> Seq Scan on test_1 + Filter: (a > 1) + -> Seq Scan on test_2 + -> Seq Scan on test_3 + -> Seq Scan on test_4 + -> Seq Scan on test_5 + -> Seq Scan on test_6 + -> Seq Scan on test_7 + -> Seq Scan on test_8 + -> Seq Scan on test_9 + -> Seq Scan on test_10 +(12 rows) + +EXPLAIN (COSTS OFF) EXECUTE q(1); + QUERY PLAN +-------------------------------------------------- + Custom Scan (RuntimeAppend) + Prune by: (test.a > ANY (ARRAY[$1, 100, 600])) + -> Seq Scan on test_1 test + Filter: (a > ANY (ARRAY[$1, 100, 600])) + -> Seq Scan on test_2 test + Filter: (a > ANY (ARRAY[$1, 100, 600])) + -> Seq Scan on test_3 test + Filter: (a > ANY (ARRAY[$1, 100, 600])) + -> Seq Scan on test_4 test + Filter: (a > ANY (ARRAY[$1, 100, 600])) + -> Seq Scan on test_5 test + Filter: (a > ANY (ARRAY[$1, 100, 600])) + -> Seq Scan on test_6 test + Filter: (a > ANY (ARRAY[$1, 100, 600])) + -> Seq Scan on test_7 test + Filter: (a > ANY (ARRAY[$1, 100, 600])) + -> Seq Scan on test_8 test + Filter: (a > ANY (ARRAY[$1, 100, 600])) + -> Seq Scan on test_9 test + Filter: (a > ANY (ARRAY[$1, 100, 600])) + -> Seq Scan on test_10 test + Filter: (a > ANY (ARRAY[$1, 100, 600])) +(22 rows) + +EXPLAIN (COSTS OFF) EXECUTE q(1); + QUERY PLAN +-------------------------------------------------- + Custom Scan (RuntimeAppend) + Prune by: (test.a > ANY (ARRAY[$1, 100, 600])) + -> Seq Scan on test_1 test + Filter: (a > ANY (ARRAY[$1, 100, 600])) + -> Seq Scan on test_2 test + Filter: (a > ANY (ARRAY[$1, 100, 600])) + -> Seq Scan on test_3 test + Filter: (a > ANY (ARRAY[$1, 100, 600])) + -> Seq Scan on test_4 test + Filter: (a > ANY (ARRAY[$1, 100, 600])) + -> Seq Scan on test_5 test + Filter: (a > ANY (ARRAY[$1, 100, 600])) + -> Seq Scan on test_6 test + Filter: (a > ANY (ARRAY[$1, 100, 600])) + -> Seq Scan on test_7 test + Filter: (a > ANY (ARRAY[$1, 100, 600])) + -> Seq Scan on test_8 test + Filter: (a > ANY (ARRAY[$1, 100, 600])) + -> Seq Scan on test_9 test + Filter: (a > ANY (ARRAY[$1, 100, 600])) + -> Seq Scan on test_10 test + Filter: (a > ANY (ARRAY[$1, 100, 600])) +(22 rows) + +DEALLOCATE q; +PREPARE q(int4) AS SELECT * FROM array_qual.test WHERE a > ANY (array[100, 600, $1]); +EXPLAIN (COSTS OFF) EXECUTE q(1); + QUERY PLAN +--------------------------- + Append + -> Seq Scan on test_1 + Filter: (a > 1) + -> Seq Scan on test_2 + -> Seq Scan on test_3 + -> Seq Scan on test_4 + -> Seq Scan on test_5 + -> Seq Scan on test_6 + -> Seq Scan on test_7 + -> Seq Scan on test_8 + -> Seq Scan on test_9 + -> Seq Scan on test_10 +(12 rows) + +EXPLAIN (COSTS OFF) EXECUTE q(1); + QUERY PLAN +--------------------------- + Append + -> Seq Scan on test_1 + Filter: (a > 1) + -> Seq Scan on test_2 + -> Seq Scan on test_3 + -> Seq Scan on test_4 + -> Seq Scan on test_5 + -> Seq Scan on test_6 + -> Seq Scan on test_7 + -> Seq Scan on test_8 + -> Seq Scan on test_9 + -> Seq Scan on test_10 +(12 rows) + +EXPLAIN (COSTS OFF) EXECUTE q(1); + QUERY PLAN +--------------------------- + Append + -> Seq Scan on test_1 + Filter: (a > 1) + -> Seq Scan on test_2 + -> Seq Scan on test_3 + -> Seq Scan on test_4 + -> Seq Scan on test_5 + -> Seq Scan on test_6 + -> Seq Scan on test_7 + -> Seq Scan on test_8 + -> Seq Scan on test_9 + -> Seq Scan on test_10 +(12 rows) + +EXPLAIN (COSTS OFF) EXECUTE q(1); + QUERY PLAN +--------------------------- + Append + -> Seq Scan on test_1 + Filter: (a > 1) + -> Seq Scan on test_2 + -> Seq Scan on test_3 + -> Seq Scan on test_4 + -> Seq Scan on test_5 + -> Seq Scan on test_6 + -> Seq Scan on test_7 + -> Seq Scan on test_8 + -> Seq Scan on test_9 + -> Seq Scan on test_10 +(12 rows) + +EXPLAIN (COSTS OFF) EXECUTE q(1); + QUERY PLAN +--------------------------- + Append + -> Seq Scan on test_1 + Filter: (a > 1) + -> Seq Scan on test_2 + -> Seq Scan on test_3 + -> Seq Scan on test_4 + -> Seq Scan on test_5 + -> Seq Scan on test_6 + -> Seq Scan on test_7 + -> Seq Scan on test_8 + -> Seq Scan on test_9 + -> Seq Scan on test_10 +(12 rows) + +EXPLAIN (COSTS OFF) EXECUTE q(1); + QUERY PLAN +-------------------------------------------------- + Custom Scan (RuntimeAppend) + Prune by: (test.a > ANY (ARRAY[100, 600, $1])) + -> Seq Scan on test_1 test + Filter: (a > ANY (ARRAY[100, 600, $1])) + -> Seq Scan on test_2 test + Filter: (a > ANY (ARRAY[100, 600, $1])) + -> Seq Scan on test_3 test + Filter: (a > ANY (ARRAY[100, 600, $1])) + -> Seq Scan on test_4 test + Filter: (a > ANY (ARRAY[100, 600, $1])) + -> Seq Scan on test_5 test + Filter: (a > ANY (ARRAY[100, 600, $1])) + -> Seq Scan on test_6 test + Filter: (a > ANY (ARRAY[100, 600, $1])) + -> Seq Scan on test_7 test + Filter: (a > ANY (ARRAY[100, 600, $1])) + -> Seq Scan on test_8 test + Filter: (a > ANY (ARRAY[100, 600, $1])) + -> Seq Scan on test_9 test + Filter: (a > ANY (ARRAY[100, 600, $1])) + -> Seq Scan on test_10 test + Filter: (a > ANY (ARRAY[100, 600, $1])) +(22 rows) + +EXPLAIN (COSTS OFF) EXECUTE q(1); + QUERY PLAN +-------------------------------------------------- + Custom Scan (RuntimeAppend) + Prune by: (test.a > ANY (ARRAY[100, 600, $1])) + -> Seq Scan on test_1 test + Filter: (a > ANY (ARRAY[100, 600, $1])) + -> Seq Scan on test_2 test + Filter: (a > ANY (ARRAY[100, 600, $1])) + -> Seq Scan on test_3 test + Filter: (a > ANY (ARRAY[100, 600, $1])) + -> Seq Scan on test_4 test + Filter: (a > ANY (ARRAY[100, 600, $1])) + -> Seq Scan on test_5 test + Filter: (a > ANY (ARRAY[100, 600, $1])) + -> Seq Scan on test_6 test + Filter: (a > ANY (ARRAY[100, 600, $1])) + -> Seq Scan on test_7 test + Filter: (a > ANY (ARRAY[100, 600, $1])) + -> Seq Scan on test_8 test + Filter: (a > ANY (ARRAY[100, 600, $1])) + -> Seq Scan on test_9 test + Filter: (a > ANY (ARRAY[100, 600, $1])) + -> Seq Scan on test_10 test + Filter: (a > ANY (ARRAY[100, 600, $1])) +(22 rows) + +DEALLOCATE q; +PREPARE q(int4) AS SELECT * FROM array_qual.test WHERE a > ANY (array[NULL, $1]); +EXPLAIN (COSTS OFF) EXECUTE q(500); + QUERY PLAN +----------------------------------------------------- + Append + -> Seq Scan on test_5 test_1 + Filter: (a > ANY ('{NULL,500}'::integer[])) + -> Seq Scan on test_6 test_2 + -> Seq Scan on test_7 test_3 + -> Seq Scan on test_8 test_4 + -> Seq Scan on test_9 test_5 + -> Seq Scan on test_10 test_6 +(8 rows) + +EXPLAIN (COSTS OFF) EXECUTE q(500); + QUERY PLAN +----------------------------------------------------- + Append + -> Seq Scan on test_5 test_1 + Filter: (a > ANY ('{NULL,500}'::integer[])) + -> Seq Scan on test_6 test_2 + -> Seq Scan on test_7 test_3 + -> Seq Scan on test_8 test_4 + -> Seq Scan on test_9 test_5 + -> Seq Scan on test_10 test_6 +(8 rows) + +EXPLAIN (COSTS OFF) EXECUTE q(500); + QUERY PLAN +----------------------------------------------------- + Append + -> Seq Scan on test_5 test_1 + Filter: (a > ANY ('{NULL,500}'::integer[])) + -> Seq Scan on test_6 test_2 + -> Seq Scan on test_7 test_3 + -> Seq Scan on test_8 test_4 + -> Seq Scan on test_9 test_5 + -> Seq Scan on test_10 test_6 +(8 rows) + +EXPLAIN (COSTS OFF) EXECUTE q(500); + QUERY PLAN +----------------------------------------------------- + Append + -> Seq Scan on test_5 test_1 + Filter: (a > ANY ('{NULL,500}'::integer[])) + -> Seq Scan on test_6 test_2 + -> Seq Scan on test_7 test_3 + -> Seq Scan on test_8 test_4 + -> Seq Scan on test_9 test_5 + -> Seq Scan on test_10 test_6 +(8 rows) + +EXPLAIN (COSTS OFF) EXECUTE q(500); + QUERY PLAN +----------------------------------------------------- + Append + -> Seq Scan on test_5 test_1 + Filter: (a > ANY ('{NULL,500}'::integer[])) + -> Seq Scan on test_6 test_2 + -> Seq Scan on test_7 test_3 + -> Seq Scan on test_8 test_4 + -> Seq Scan on test_9 test_5 + -> Seq Scan on test_10 test_6 +(8 rows) + +EXPLAIN (COSTS OFF) EXECUTE q(500); + QUERY PLAN +------------------------------------------------------- + Custom Scan (RuntimeAppend) + Prune by: (test.a > ANY (ARRAY[NULL::integer, $1])) + -> Seq Scan on test_1 test + Filter: (a > ANY (ARRAY[NULL::integer, $1])) + -> Seq Scan on test_2 test + Filter: (a > ANY (ARRAY[NULL::integer, $1])) + -> Seq Scan on test_3 test + Filter: (a > ANY (ARRAY[NULL::integer, $1])) + -> Seq Scan on test_4 test + Filter: (a > ANY (ARRAY[NULL::integer, $1])) + -> Seq Scan on test_5 test + Filter: (a > ANY (ARRAY[NULL::integer, $1])) + -> Seq Scan on test_6 test + Filter: (a > ANY (ARRAY[NULL::integer, $1])) + -> Seq Scan on test_7 test + Filter: (a > ANY (ARRAY[NULL::integer, $1])) + -> Seq Scan on test_8 test + Filter: (a > ANY (ARRAY[NULL::integer, $1])) + -> Seq Scan on test_9 test + Filter: (a > ANY (ARRAY[NULL::integer, $1])) + -> Seq Scan on test_10 test + Filter: (a > ANY (ARRAY[NULL::integer, $1])) +(22 rows) + +EXPLAIN (COSTS OFF) EXECUTE q(500); + QUERY PLAN +------------------------------------------------------- + Custom Scan (RuntimeAppend) + Prune by: (test.a > ANY (ARRAY[NULL::integer, $1])) + -> Seq Scan on test_1 test + Filter: (a > ANY (ARRAY[NULL::integer, $1])) + -> Seq Scan on test_2 test + Filter: (a > ANY (ARRAY[NULL::integer, $1])) + -> Seq Scan on test_3 test + Filter: (a > ANY (ARRAY[NULL::integer, $1])) + -> Seq Scan on test_4 test + Filter: (a > ANY (ARRAY[NULL::integer, $1])) + -> Seq Scan on test_5 test + Filter: (a > ANY (ARRAY[NULL::integer, $1])) + -> Seq Scan on test_6 test + Filter: (a > ANY (ARRAY[NULL::integer, $1])) + -> Seq Scan on test_7 test + Filter: (a > ANY (ARRAY[NULL::integer, $1])) + -> Seq Scan on test_8 test + Filter: (a > ANY (ARRAY[NULL::integer, $1])) + -> Seq Scan on test_9 test + Filter: (a > ANY (ARRAY[NULL::integer, $1])) + -> Seq Scan on test_10 test + Filter: (a > ANY (ARRAY[NULL::integer, $1])) +(22 rows) + +EXECUTE q(NULL); + a | b +---+--- +(0 rows) + +DEALLOCATE q; +/* + * Test expr > ALL (... $1 ...) + */ +PREPARE q(int4) AS SELECT * FROM array_qual.test WHERE a > ALL (array[$1, 1000000, 600]); +EXPLAIN (COSTS OFF) EXECUTE q(1); + QUERY PLAN +-------------------------- + Result + One-Time Filter: false +(2 rows) + +EXPLAIN (COSTS OFF) EXECUTE q(1); + QUERY PLAN +-------------------------- + Result + One-Time Filter: false +(2 rows) + +EXPLAIN (COSTS OFF) EXECUTE q(1); + QUERY PLAN +-------------------------- + Result + One-Time Filter: false +(2 rows) + +EXPLAIN (COSTS OFF) EXECUTE q(1); + QUERY PLAN +-------------------------- + Result + One-Time Filter: false +(2 rows) + +EXPLAIN (COSTS OFF) EXECUTE q(1); + QUERY PLAN +-------------------------- + Result + One-Time Filter: false +(2 rows) + +EXPLAIN (COSTS OFF) EXECUTE q(1); + QUERY PLAN +-------------------------- + Result + One-Time Filter: false +(2 rows) + +EXPLAIN (COSTS OFF) EXECUTE q(1); + QUERY PLAN +-------------------------- + Result + One-Time Filter: false +(2 rows) + +DEALLOCATE q; +PREPARE q(int4) AS SELECT * FROM array_qual.test WHERE a > ALL (array[$1, NULL, 600]); +EXPLAIN (COSTS OFF) EXECUTE q(1); + QUERY PLAN +-------------------------- + Result + One-Time Filter: false +(2 rows) + +EXPLAIN (COSTS OFF) EXECUTE q(1); + QUERY PLAN +-------------------------- + Result + One-Time Filter: false +(2 rows) + +EXPLAIN (COSTS OFF) EXECUTE q(1); + QUERY PLAN +-------------------------- + Result + One-Time Filter: false +(2 rows) + +EXPLAIN (COSTS OFF) EXECUTE q(1); + QUERY PLAN +-------------------------- + Result + One-Time Filter: false +(2 rows) + +EXPLAIN (COSTS OFF) EXECUTE q(1); + QUERY PLAN +-------------------------- + Result + One-Time Filter: false +(2 rows) + +EXPLAIN (COSTS OFF) EXECUTE q(1); + QUERY PLAN +-------------------------- + Result + One-Time Filter: false +(2 rows) + +EXPLAIN (COSTS OFF) EXECUTE q(1); + QUERY PLAN +-------------------------- + Result + One-Time Filter: false +(2 rows) + +DEALLOCATE q; +PREPARE q(int4) AS SELECT * FROM array_qual.test WHERE a > ALL (array[NULL, $1, NULL]); +EXPLAIN (COSTS OFF) EXECUTE q(500); + QUERY PLAN +-------------------------- + Result + One-Time Filter: false +(2 rows) + +EXPLAIN (COSTS OFF) EXECUTE q(500); + QUERY PLAN +-------------------------- + Result + One-Time Filter: false +(2 rows) + +EXPLAIN (COSTS OFF) EXECUTE q(500); + QUERY PLAN +-------------------------- + Result + One-Time Filter: false +(2 rows) + +EXPLAIN (COSTS OFF) EXECUTE q(500); + QUERY PLAN +-------------------------- + Result + One-Time Filter: false +(2 rows) + +EXPLAIN (COSTS OFF) EXECUTE q(500); + QUERY PLAN +-------------------------- + Result + One-Time Filter: false +(2 rows) + +EXPLAIN (COSTS OFF) EXECUTE q(500); + QUERY PLAN +-------------------------- + Result + One-Time Filter: false +(2 rows) + +EXPLAIN (COSTS OFF) EXECUTE q(500); + QUERY PLAN +-------------------------- + Result + One-Time Filter: false +(2 rows) + +EXECUTE q(NULL); + a | b +---+--- +(0 rows) + +DEALLOCATE q; +PREPARE q(int4) AS SELECT * FROM array_qual.test WHERE a > ALL (array[$1, 100, 600]); +EXPLAIN (COSTS OFF) EXECUTE q(1); + QUERY PLAN +---------------------------------- + Append + -> Seq Scan on test_6 test_1 + Filter: (a > 600) + -> Seq Scan on test_7 test_2 + -> Seq Scan on test_8 test_3 + -> Seq Scan on test_9 test_4 + -> Seq Scan on test_10 test_5 +(7 rows) + +EXPLAIN (COSTS OFF) EXECUTE q(1); + QUERY PLAN +---------------------------------- + Append + -> Seq Scan on test_6 test_1 + Filter: (a > 600) + -> Seq Scan on test_7 test_2 + -> Seq Scan on test_8 test_3 + -> Seq Scan on test_9 test_4 + -> Seq Scan on test_10 test_5 +(7 rows) + +EXPLAIN (COSTS OFF) EXECUTE q(1); + QUERY PLAN +---------------------------------- + Append + -> Seq Scan on test_6 test_1 + Filter: (a > 600) + -> Seq Scan on test_7 test_2 + -> Seq Scan on test_8 test_3 + -> Seq Scan on test_9 test_4 + -> Seq Scan on test_10 test_5 +(7 rows) + +EXPLAIN (COSTS OFF) EXECUTE q(1); + QUERY PLAN +---------------------------------- + Append + -> Seq Scan on test_6 test_1 + Filter: (a > 600) + -> Seq Scan on test_7 test_2 + -> Seq Scan on test_8 test_3 + -> Seq Scan on test_9 test_4 + -> Seq Scan on test_10 test_5 +(7 rows) + +EXPLAIN (COSTS OFF) EXECUTE q(1); + QUERY PLAN +---------------------------------- + Append + -> Seq Scan on test_6 test_1 + Filter: (a > 600) + -> Seq Scan on test_7 test_2 + -> Seq Scan on test_8 test_3 + -> Seq Scan on test_9 test_4 + -> Seq Scan on test_10 test_5 +(7 rows) + +EXPLAIN (COSTS OFF) EXECUTE q(1); + QUERY PLAN +-------------------------------------------------- + Custom Scan (RuntimeAppend) + Prune by: (test.a > ALL (ARRAY[$1, 100, 600])) + -> Seq Scan on test_6 test + Filter: (a > ALL (ARRAY[$1, 100, 600])) + -> Seq Scan on test_7 test + Filter: (a > ALL (ARRAY[$1, 100, 600])) + -> Seq Scan on test_8 test + Filter: (a > ALL (ARRAY[$1, 100, 600])) + -> Seq Scan on test_9 test + Filter: (a > ALL (ARRAY[$1, 100, 600])) + -> Seq Scan on test_10 test + Filter: (a > ALL (ARRAY[$1, 100, 600])) +(12 rows) + +EXPLAIN (COSTS OFF) EXECUTE q(1); + QUERY PLAN +-------------------------------------------------- + Custom Scan (RuntimeAppend) + Prune by: (test.a > ALL (ARRAY[$1, 100, 600])) + -> Seq Scan on test_6 test + Filter: (a > ALL (ARRAY[$1, 100, 600])) + -> Seq Scan on test_7 test + Filter: (a > ALL (ARRAY[$1, 100, 600])) + -> Seq Scan on test_8 test + Filter: (a > ALL (ARRAY[$1, 100, 600])) + -> Seq Scan on test_9 test + Filter: (a > ALL (ARRAY[$1, 100, 600])) + -> Seq Scan on test_10 test + Filter: (a > ALL (ARRAY[$1, 100, 600])) +(12 rows) + +DEALLOCATE q; +PREPARE q(int4) AS SELECT * FROM array_qual.test WHERE a > ALL (array[100, $1, 600]); +EXPLAIN (COSTS OFF) EXECUTE q(1); + QUERY PLAN +---------------------------------- + Append + -> Seq Scan on test_6 test_1 + Filter: (a > 600) + -> Seq Scan on test_7 test_2 + -> Seq Scan on test_8 test_3 + -> Seq Scan on test_9 test_4 + -> Seq Scan on test_10 test_5 +(7 rows) + +EXPLAIN (COSTS OFF) EXECUTE q(1); + QUERY PLAN +---------------------------------- + Append + -> Seq Scan on test_6 test_1 + Filter: (a > 600) + -> Seq Scan on test_7 test_2 + -> Seq Scan on test_8 test_3 + -> Seq Scan on test_9 test_4 + -> Seq Scan on test_10 test_5 +(7 rows) + +EXPLAIN (COSTS OFF) EXECUTE q(1); + QUERY PLAN +---------------------------------- + Append + -> Seq Scan on test_6 test_1 + Filter: (a > 600) + -> Seq Scan on test_7 test_2 + -> Seq Scan on test_8 test_3 + -> Seq Scan on test_9 test_4 + -> Seq Scan on test_10 test_5 +(7 rows) + +EXPLAIN (COSTS OFF) EXECUTE q(1); + QUERY PLAN +---------------------------------- + Append + -> Seq Scan on test_6 test_1 + Filter: (a > 600) + -> Seq Scan on test_7 test_2 + -> Seq Scan on test_8 test_3 + -> Seq Scan on test_9 test_4 + -> Seq Scan on test_10 test_5 +(7 rows) + +EXPLAIN (COSTS OFF) EXECUTE q(1); + QUERY PLAN +---------------------------------- + Append + -> Seq Scan on test_6 test_1 + Filter: (a > 600) + -> Seq Scan on test_7 test_2 + -> Seq Scan on test_8 test_3 + -> Seq Scan on test_9 test_4 + -> Seq Scan on test_10 test_5 +(7 rows) + +EXPLAIN (COSTS OFF) EXECUTE q(1); + QUERY PLAN +-------------------------------------------------- + Custom Scan (RuntimeAppend) + Prune by: (test.a > ALL (ARRAY[100, $1, 600])) + -> Seq Scan on test_6 test + Filter: (a > ALL (ARRAY[100, $1, 600])) + -> Seq Scan on test_7 test + Filter: (a > ALL (ARRAY[100, $1, 600])) + -> Seq Scan on test_8 test + Filter: (a > ALL (ARRAY[100, $1, 600])) + -> Seq Scan on test_9 test + Filter: (a > ALL (ARRAY[100, $1, 600])) + -> Seq Scan on test_10 test + Filter: (a > ALL (ARRAY[100, $1, 600])) +(12 rows) + +EXPLAIN (COSTS OFF) EXECUTE q(1); + QUERY PLAN +-------------------------------------------------- + Custom Scan (RuntimeAppend) + Prune by: (test.a > ALL (ARRAY[100, $1, 600])) + -> Seq Scan on test_6 test + Filter: (a > ALL (ARRAY[100, $1, 600])) + -> Seq Scan on test_7 test + Filter: (a > ALL (ARRAY[100, $1, 600])) + -> Seq Scan on test_8 test + Filter: (a > ALL (ARRAY[100, $1, 600])) + -> Seq Scan on test_9 test + Filter: (a > ALL (ARRAY[100, $1, 600])) + -> Seq Scan on test_10 test + Filter: (a > ALL (ARRAY[100, $1, 600])) +(12 rows) + +DEALLOCATE q; +PREPARE q(int4) AS SELECT * FROM array_qual.test WHERE a > ALL (array[100, 600, $1]); +EXPLAIN (COSTS OFF) EXECUTE q(1); + QUERY PLAN +---------------------------------- + Append + -> Seq Scan on test_6 test_1 + Filter: (a > 600) + -> Seq Scan on test_7 test_2 + -> Seq Scan on test_8 test_3 + -> Seq Scan on test_9 test_4 + -> Seq Scan on test_10 test_5 +(7 rows) + +EXPLAIN (COSTS OFF) EXECUTE q(1); + QUERY PLAN +---------------------------------- + Append + -> Seq Scan on test_6 test_1 + Filter: (a > 600) + -> Seq Scan on test_7 test_2 + -> Seq Scan on test_8 test_3 + -> Seq Scan on test_9 test_4 + -> Seq Scan on test_10 test_5 +(7 rows) + +EXPLAIN (COSTS OFF) EXECUTE q(1); + QUERY PLAN +---------------------------------- + Append + -> Seq Scan on test_6 test_1 + Filter: (a > 600) + -> Seq Scan on test_7 test_2 + -> Seq Scan on test_8 test_3 + -> Seq Scan on test_9 test_4 + -> Seq Scan on test_10 test_5 +(7 rows) + +EXPLAIN (COSTS OFF) EXECUTE q(1); + QUERY PLAN +---------------------------------- + Append + -> Seq Scan on test_6 test_1 + Filter: (a > 600) + -> Seq Scan on test_7 test_2 + -> Seq Scan on test_8 test_3 + -> Seq Scan on test_9 test_4 + -> Seq Scan on test_10 test_5 +(7 rows) + +EXPLAIN (COSTS OFF) EXECUTE q(1); + QUERY PLAN +---------------------------------- + Append + -> Seq Scan on test_6 test_1 + Filter: (a > 600) + -> Seq Scan on test_7 test_2 + -> Seq Scan on test_8 test_3 + -> Seq Scan on test_9 test_4 + -> Seq Scan on test_10 test_5 +(7 rows) + +EXPLAIN (COSTS OFF) EXECUTE q(1); + QUERY PLAN +-------------------------------------------------- + Custom Scan (RuntimeAppend) + Prune by: (test.a > ALL (ARRAY[100, 600, $1])) + -> Seq Scan on test_6 test + Filter: (a > ALL (ARRAY[100, 600, $1])) + -> Seq Scan on test_7 test + Filter: (a > ALL (ARRAY[100, 600, $1])) + -> Seq Scan on test_8 test + Filter: (a > ALL (ARRAY[100, 600, $1])) + -> Seq Scan on test_9 test + Filter: (a > ALL (ARRAY[100, 600, $1])) + -> Seq Scan on test_10 test + Filter: (a > ALL (ARRAY[100, 600, $1])) +(12 rows) + +EXPLAIN (COSTS OFF) EXECUTE q(1); + QUERY PLAN +-------------------------------------------------- + Custom Scan (RuntimeAppend) + Prune by: (test.a > ALL (ARRAY[100, 600, $1])) + -> Seq Scan on test_6 test + Filter: (a > ALL (ARRAY[100, 600, $1])) + -> Seq Scan on test_7 test + Filter: (a > ALL (ARRAY[100, 600, $1])) + -> Seq Scan on test_8 test + Filter: (a > ALL (ARRAY[100, 600, $1])) + -> Seq Scan on test_9 test + Filter: (a > ALL (ARRAY[100, 600, $1])) + -> Seq Scan on test_10 test + Filter: (a > ALL (ARRAY[100, 600, $1])) +(12 rows) + +DEALLOCATE q; +PREPARE q(int4) AS SELECT * FROM array_qual.test WHERE a > ALL (array[array[100, NULL], array[1, $1]]); +EXPLAIN (COSTS OFF) EXECUTE q(1); + QUERY PLAN +-------------------------- + Result + One-Time Filter: false +(2 rows) + +EXPLAIN (COSTS OFF) EXECUTE q(1); + QUERY PLAN +-------------------------- + Result + One-Time Filter: false +(2 rows) + +EXPLAIN (COSTS OFF) EXECUTE q(1); + QUERY PLAN +-------------------------- + Result + One-Time Filter: false +(2 rows) + +EXPLAIN (COSTS OFF) EXECUTE q(1); + QUERY PLAN +-------------------------- + Result + One-Time Filter: false +(2 rows) + +EXPLAIN (COSTS OFF) EXECUTE q(1); + QUERY PLAN +-------------------------- + Result + One-Time Filter: false +(2 rows) + +EXPLAIN (COSTS OFF) EXECUTE q(1); + QUERY PLAN +-------------------------- + Result + One-Time Filter: false +(2 rows) + +EXPLAIN (COSTS OFF) EXECUTE q(1); + QUERY PLAN +-------------------------- + Result + One-Time Filter: false +(2 rows) + +DEALLOCATE q; +PREPARE q(int4) AS SELECT * FROM array_qual.test WHERE a > ALL (array[array[100, 600], array[1, $1]]); +EXPLAIN (COSTS OFF) EXECUTE q(1); + QUERY PLAN +---------------------------------- + Append + -> Seq Scan on test_6 test_1 + Filter: (a > 600) + -> Seq Scan on test_7 test_2 + -> Seq Scan on test_8 test_3 + -> Seq Scan on test_9 test_4 + -> Seq Scan on test_10 test_5 +(7 rows) + +EXPLAIN (COSTS OFF) EXECUTE q(1); + QUERY PLAN +---------------------------------- + Append + -> Seq Scan on test_6 test_1 + Filter: (a > 600) + -> Seq Scan on test_7 test_2 + -> Seq Scan on test_8 test_3 + -> Seq Scan on test_9 test_4 + -> Seq Scan on test_10 test_5 +(7 rows) + +EXPLAIN (COSTS OFF) EXECUTE q(1); + QUERY PLAN +---------------------------------- + Append + -> Seq Scan on test_6 test_1 + Filter: (a > 600) + -> Seq Scan on test_7 test_2 + -> Seq Scan on test_8 test_3 + -> Seq Scan on test_9 test_4 + -> Seq Scan on test_10 test_5 +(7 rows) + +EXPLAIN (COSTS OFF) EXECUTE q(1); + QUERY PLAN +---------------------------------- + Append + -> Seq Scan on test_6 test_1 + Filter: (a > 600) + -> Seq Scan on test_7 test_2 + -> Seq Scan on test_8 test_3 + -> Seq Scan on test_9 test_4 + -> Seq Scan on test_10 test_5 +(7 rows) + +EXPLAIN (COSTS OFF) EXECUTE q(1); + QUERY PLAN +---------------------------------- + Append + -> Seq Scan on test_6 test_1 + Filter: (a > 600) + -> Seq Scan on test_7 test_2 + -> Seq Scan on test_8 test_3 + -> Seq Scan on test_9 test_4 + -> Seq Scan on test_10 test_5 +(7 rows) + +EXPLAIN (COSTS OFF) EXECUTE q(1); + QUERY PLAN +-------------------------------------------------------------------------- + Custom Scan (RuntimeAppend) + Prune by: (test.a > ALL (ARRAY['{100,600}'::integer[], ARRAY[1, $1]])) + -> Seq Scan on test_6 test + Filter: (a > ALL (ARRAY['{100,600}'::integer[], ARRAY[1, $1]])) + -> Seq Scan on test_7 test + Filter: (a > ALL (ARRAY['{100,600}'::integer[], ARRAY[1, $1]])) + -> Seq Scan on test_8 test + Filter: (a > ALL (ARRAY['{100,600}'::integer[], ARRAY[1, $1]])) + -> Seq Scan on test_9 test + Filter: (a > ALL (ARRAY['{100,600}'::integer[], ARRAY[1, $1]])) + -> Seq Scan on test_10 test + Filter: (a > ALL (ARRAY['{100,600}'::integer[], ARRAY[1, $1]])) +(12 rows) + +EXPLAIN (COSTS OFF) EXECUTE q(1); + QUERY PLAN +-------------------------------------------------------------------------- + Custom Scan (RuntimeAppend) + Prune by: (test.a > ALL (ARRAY['{100,600}'::integer[], ARRAY[1, $1]])) + -> Seq Scan on test_6 test + Filter: (a > ALL (ARRAY['{100,600}'::integer[], ARRAY[1, $1]])) + -> Seq Scan on test_7 test + Filter: (a > ALL (ARRAY['{100,600}'::integer[], ARRAY[1, $1]])) + -> Seq Scan on test_8 test + Filter: (a > ALL (ARRAY['{100,600}'::integer[], ARRAY[1, $1]])) + -> Seq Scan on test_9 test + Filter: (a > ALL (ARRAY['{100,600}'::integer[], ARRAY[1, $1]])) + -> Seq Scan on test_10 test + Filter: (a > ALL (ARRAY['{100,600}'::integer[], ARRAY[1, $1]])) +(12 rows) + +EXPLAIN (COSTS OFF) EXECUTE q(999); + QUERY PLAN +-------------------------------------------------------------------------- + Custom Scan (RuntimeAppend) + Prune by: (test.a > ALL (ARRAY['{100,600}'::integer[], ARRAY[1, $1]])) + -> Seq Scan on test_6 test + Filter: (a > ALL (ARRAY['{100,600}'::integer[], ARRAY[1, $1]])) + -> Seq Scan on test_7 test + Filter: (a > ALL (ARRAY['{100,600}'::integer[], ARRAY[1, $1]])) + -> Seq Scan on test_8 test + Filter: (a > ALL (ARRAY['{100,600}'::integer[], ARRAY[1, $1]])) + -> Seq Scan on test_9 test + Filter: (a > ALL (ARRAY['{100,600}'::integer[], ARRAY[1, $1]])) + -> Seq Scan on test_10 test + Filter: (a > ALL (ARRAY['{100,600}'::integer[], ARRAY[1, $1]])) +(12 rows) + +/* check query plan: EXECUTE q(999) */ +DO language plpgsql +$$ + DECLARE + query text; + result jsonb; + num int; + + BEGIN + query := 'EXECUTE q(999)'; + + EXECUTE format('EXPLAIN (ANALYZE, COSTS OFF, TIMING OFF, FORMAT JSON) %s', query) + INTO result; + + SELECT count(*) FROM jsonb_array_elements_text(result->0->'Plan'->'Plans') INTO num; + + RAISE notice '%: number of partitions: %', query, num; + END +$$; +NOTICE: EXECUTE q(999): number of partitions: 5 +DEALLOCATE q; +PREPARE q(int4[]) AS SELECT * FROM array_qual.test WHERE a > ALL (array[array[100, 600], $1]); +EXPLAIN (COSTS OFF) EXECUTE q('{1, 1}'); + QUERY PLAN +---------------------------------- + Append + -> Seq Scan on test_6 test_1 + Filter: (a > 600) + -> Seq Scan on test_7 test_2 + -> Seq Scan on test_8 test_3 + -> Seq Scan on test_9 test_4 + -> Seq Scan on test_10 test_5 +(7 rows) + +EXPLAIN (COSTS OFF) EXECUTE q('{1, 1}'); + QUERY PLAN +---------------------------------- + Append + -> Seq Scan on test_6 test_1 + Filter: (a > 600) + -> Seq Scan on test_7 test_2 + -> Seq Scan on test_8 test_3 + -> Seq Scan on test_9 test_4 + -> Seq Scan on test_10 test_5 +(7 rows) + +EXPLAIN (COSTS OFF) EXECUTE q('{1, 1}'); + QUERY PLAN +---------------------------------- + Append + -> Seq Scan on test_6 test_1 + Filter: (a > 600) + -> Seq Scan on test_7 test_2 + -> Seq Scan on test_8 test_3 + -> Seq Scan on test_9 test_4 + -> Seq Scan on test_10 test_5 +(7 rows) + +EXPLAIN (COSTS OFF) EXECUTE q('{1, 1}'); + QUERY PLAN +---------------------------------- + Append + -> Seq Scan on test_6 test_1 + Filter: (a > 600) + -> Seq Scan on test_7 test_2 + -> Seq Scan on test_8 test_3 + -> Seq Scan on test_9 test_4 + -> Seq Scan on test_10 test_5 +(7 rows) + +EXPLAIN (COSTS OFF) EXECUTE q('{1, 1}'); + QUERY PLAN +---------------------------------- + Append + -> Seq Scan on test_6 test_1 + Filter: (a > 600) + -> Seq Scan on test_7 test_2 + -> Seq Scan on test_8 test_3 + -> Seq Scan on test_9 test_4 + -> Seq Scan on test_10 test_5 +(7 rows) + +EXPLAIN (COSTS OFF) EXECUTE q('{1, 1}'); + QUERY PLAN +---------------------------------------------------------------- + Custom Scan (RuntimeAppend) + Prune by: (test.a > ALL (ARRAY['{100,600}'::integer[], $1])) + -> Seq Scan on test_6 test + Filter: (a > ALL (ARRAY['{100,600}'::integer[], $1])) + -> Seq Scan on test_7 test + Filter: (a > ALL (ARRAY['{100,600}'::integer[], $1])) + -> Seq Scan on test_8 test + Filter: (a > ALL (ARRAY['{100,600}'::integer[], $1])) + -> Seq Scan on test_9 test + Filter: (a > ALL (ARRAY['{100,600}'::integer[], $1])) + -> Seq Scan on test_10 test + Filter: (a > ALL (ARRAY['{100,600}'::integer[], $1])) +(12 rows) + +EXPLAIN (COSTS OFF) EXECUTE q('{1, 1}'); + QUERY PLAN +---------------------------------------------------------------- + Custom Scan (RuntimeAppend) + Prune by: (test.a > ALL (ARRAY['{100,600}'::integer[], $1])) + -> Seq Scan on test_6 test + Filter: (a > ALL (ARRAY['{100,600}'::integer[], $1])) + -> Seq Scan on test_7 test + Filter: (a > ALL (ARRAY['{100,600}'::integer[], $1])) + -> Seq Scan on test_8 test + Filter: (a > ALL (ARRAY['{100,600}'::integer[], $1])) + -> Seq Scan on test_9 test + Filter: (a > ALL (ARRAY['{100,600}'::integer[], $1])) + -> Seq Scan on test_10 test + Filter: (a > ALL (ARRAY['{100,600}'::integer[], $1])) +(12 rows) + +EXPLAIN (COSTS OFF) EXECUTE q('{1, 999}'); + QUERY PLAN +---------------------------------------------------------------- + Custom Scan (RuntimeAppend) + Prune by: (test.a > ALL (ARRAY['{100,600}'::integer[], $1])) + -> Seq Scan on test_6 test + Filter: (a > ALL (ARRAY['{100,600}'::integer[], $1])) + -> Seq Scan on test_7 test + Filter: (a > ALL (ARRAY['{100,600}'::integer[], $1])) + -> Seq Scan on test_8 test + Filter: (a > ALL (ARRAY['{100,600}'::integer[], $1])) + -> Seq Scan on test_9 test + Filter: (a > ALL (ARRAY['{100,600}'::integer[], $1])) + -> Seq Scan on test_10 test + Filter: (a > ALL (ARRAY['{100,600}'::integer[], $1])) +(12 rows) + +/* check query plan: EXECUTE q('{1, 999}') */ +DO language plpgsql +$$ + DECLARE + query text; + result jsonb; + num int; + + BEGIN + query := 'EXECUTE q(''{1, 999}'')'; + + EXECUTE format('EXPLAIN (ANALYZE, COSTS OFF, TIMING OFF, FORMAT JSON) %s', query) + INTO result; + + SELECT count(*) FROM jsonb_array_elements_text(result->0->'Plan'->'Plans') INTO num; + + RAISE notice '%: number of partitions: %', query, num; + END +$$; +NOTICE: EXECUTE q('{1, 999}'): number of partitions: 1 +DEALLOCATE q; +PREPARE q(int4) AS SELECT * FROM array_qual.test WHERE a > ALL (array[$1, 898]); +EXPLAIN (COSTS OFF) EXECUTE q(1); + QUERY PLAN +---------------------------------- + Append + -> Seq Scan on test_9 test_1 + Filter: (a > 898) + -> Seq Scan on test_10 test_2 +(4 rows) + +EXPLAIN (COSTS OFF) EXECUTE q(1); + QUERY PLAN +---------------------------------- + Append + -> Seq Scan on test_9 test_1 + Filter: (a > 898) + -> Seq Scan on test_10 test_2 +(4 rows) + +EXPLAIN (COSTS OFF) EXECUTE q(1); + QUERY PLAN +---------------------------------- + Append + -> Seq Scan on test_9 test_1 + Filter: (a > 898) + -> Seq Scan on test_10 test_2 +(4 rows) + +EXPLAIN (COSTS OFF) EXECUTE q(1); + QUERY PLAN +---------------------------------- + Append + -> Seq Scan on test_9 test_1 + Filter: (a > 898) + -> Seq Scan on test_10 test_2 +(4 rows) + +EXPLAIN (COSTS OFF) EXECUTE q(1); + QUERY PLAN +---------------------------------- + Append + -> Seq Scan on test_9 test_1 + Filter: (a > 898) + -> Seq Scan on test_10 test_2 +(4 rows) + +EXPLAIN (COSTS OFF) EXECUTE q(1); + QUERY PLAN +--------------------------------------------- + Custom Scan (RuntimeAppend) + Prune by: (test.a > ALL (ARRAY[$1, 898])) + -> Seq Scan on test_9 test + Filter: (a > ALL (ARRAY[$1, 898])) + -> Seq Scan on test_10 test + Filter: (a > ALL (ARRAY[$1, 898])) +(6 rows) + +EXPLAIN (COSTS OFF) EXECUTE q(1); + QUERY PLAN +--------------------------------------------- + Custom Scan (RuntimeAppend) + Prune by: (test.a > ALL (ARRAY[$1, 898])) + -> Seq Scan on test_9 test + Filter: (a > ALL (ARRAY[$1, 898])) + -> Seq Scan on test_10 test + Filter: (a > ALL (ARRAY[$1, 898])) +(6 rows) + +EXPLAIN (COSTS OFF) EXECUTE q(900); /* check quals optimization */ + QUERY PLAN +--------------------------------------------- + Custom Scan (RuntimeAppend) + Prune by: (test.a > ALL (ARRAY[$1, 898])) + -> Seq Scan on test_9 test + Filter: (a > ALL (ARRAY[$1, 898])) + -> Seq Scan on test_10 test + Filter: (a > ALL (ARRAY[$1, 898])) +(6 rows) + +EXECUTE q(1000); + a | b +---+--- +(0 rows) + +/* check query plan: EXECUTE q(999) */ +DO language plpgsql +$$ + DECLARE + query text; + result jsonb; + num int; + + BEGIN + query := 'EXECUTE q(999)'; + + EXECUTE format('EXPLAIN (ANALYZE, COSTS OFF, TIMING OFF, FORMAT JSON) %s', query) + INTO result; + + SELECT count(*) FROM jsonb_array_elements_text(result->0->'Plan'->'Plans') INTO num; + + RAISE notice '%: number of partitions: %', query, num; + END +$$; +NOTICE: EXECUTE q(999): number of partitions: 1 +DEALLOCATE q; +/* + * Test expr = ALL (... $1 ...) + */ +PREPARE q(int4) AS SELECT * FROM array_qual.test WHERE a = ALL (array[$1, 100, 600]); +EXPLAIN (COSTS OFF) EXECUTE q(1); + QUERY PLAN +-------------------------- + Result + One-Time Filter: false +(2 rows) + +EXPLAIN (COSTS OFF) EXECUTE q(1); + QUERY PLAN +-------------------------- + Result + One-Time Filter: false +(2 rows) + +EXPLAIN (COSTS OFF) EXECUTE q(1); + QUERY PLAN +-------------------------- + Result + One-Time Filter: false +(2 rows) + +EXPLAIN (COSTS OFF) EXECUTE q(1); + QUERY PLAN +-------------------------- + Result + One-Time Filter: false +(2 rows) + +EXPLAIN (COSTS OFF) EXECUTE q(1); + QUERY PLAN +-------------------------- + Result + One-Time Filter: false +(2 rows) + +EXPLAIN (COSTS OFF) EXECUTE q(1); + QUERY PLAN +-------------------------- + Result + One-Time Filter: false +(2 rows) + +EXPLAIN (COSTS OFF) EXECUTE q(1); + QUERY PLAN +-------------------------- + Result + One-Time Filter: false +(2 rows) + +DEALLOCATE q; +PREPARE q(int4) AS SELECT * FROM array_qual.test WHERE a = ALL (array[100, 600, $1]); +EXPLAIN (COSTS OFF) EXECUTE q(1); + QUERY PLAN +-------------------------- + Result + One-Time Filter: false +(2 rows) + +EXPLAIN (COSTS OFF) EXECUTE q(1); + QUERY PLAN +-------------------------- + Result + One-Time Filter: false +(2 rows) + +EXPLAIN (COSTS OFF) EXECUTE q(1); + QUERY PLAN +-------------------------- + Result + One-Time Filter: false +(2 rows) + +EXPLAIN (COSTS OFF) EXECUTE q(1); + QUERY PLAN +-------------------------- + Result + One-Time Filter: false +(2 rows) + +EXPLAIN (COSTS OFF) EXECUTE q(1); + QUERY PLAN +-------------------------- + Result + One-Time Filter: false +(2 rows) + +EXPLAIN (COSTS OFF) EXECUTE q(1); + QUERY PLAN +-------------------------- + Result + One-Time Filter: false +(2 rows) + +EXPLAIN (COSTS OFF) EXECUTE q(1); + QUERY PLAN +-------------------------- + Result + One-Time Filter: false +(2 rows) + +DEALLOCATE q; +PREPARE q(int4) AS SELECT * FROM array_qual.test WHERE a = ALL (array[100, $1]); +EXPLAIN (COSTS OFF) EXECUTE q(1); + QUERY PLAN +-------------------------- + Result + One-Time Filter: false +(2 rows) + +EXPLAIN (COSTS OFF) EXECUTE q(1); + QUERY PLAN +-------------------------- + Result + One-Time Filter: false +(2 rows) + +EXPLAIN (COSTS OFF) EXECUTE q(1); + QUERY PLAN +-------------------------- + Result + One-Time Filter: false +(2 rows) + +EXPLAIN (COSTS OFF) EXECUTE q(1); + QUERY PLAN +-------------------------- + Result + One-Time Filter: false +(2 rows) + +EXPLAIN (COSTS OFF) EXECUTE q(1); + QUERY PLAN +-------------------------- + Result + One-Time Filter: false +(2 rows) + +EXPLAIN (COSTS OFF) EXECUTE q(1); + QUERY PLAN +--------------------------------------------- + Custom Scan (RuntimeAppend) + Prune by: (test.a = ALL (ARRAY[100, $1])) + -> Seq Scan on test_1 test + Filter: (a = ALL (ARRAY[100, $1])) +(4 rows) + +EXPLAIN (COSTS OFF) EXECUTE q(1); + QUERY PLAN +--------------------------------------------- + Custom Scan (RuntimeAppend) + Prune by: (test.a = ALL (ARRAY[100, $1])) + -> Seq Scan on test_1 test + Filter: (a = ALL (ARRAY[100, $1])) +(4 rows) + +EXECUTE q(1); + a | b +---+--- +(0 rows) + +EXECUTE q(100); + a | b +-----+----- + 100 | 100 +(1 row) + +DEALLOCATE q; +DROP TABLE array_qual.test CASCADE; +NOTICE: drop cascades to 11 other objects +DROP SCHEMA array_qual; +DROP EXTENSION pg_pathman; diff --git a/expected/pg_pathman.out b/expected/pathman_basic.out similarity index 50% rename from expected/pg_pathman.out rename to expected/pathman_basic.out index 4fa5aa56..3afde299 100644 --- a/expected/pg_pathman.out +++ b/expected/pathman_basic.out @@ -1,4 +1,13 @@ +/* + * Since 8edd0e794 (>= 12) Append nodes with single subplan are eliminated, + * causing different output. Also, EXPLAIN now always shows key first in quals + * ('test commutator' queries). + * + * Since 55a1954da16 and 6ef77cf46e8 (>= 13) output of EXPLAIN was changed, + * now it includes aliases for inherited tables. + */ \set VERBOSITY terse +SET search_path = 'public'; CREATE SCHEMA pathman; CREATE EXTENSION pg_pathman SCHEMA pathman; CREATE SCHEMA test; @@ -8,8 +17,17 @@ CREATE TABLE test.hash_rel ( INSERT INTO test.hash_rel VALUES (1, 1); INSERT INTO test.hash_rel VALUES (2, 2); INSERT INTO test.hash_rel VALUES (3, 3); +\set VERBOSITY default SELECT pathman.create_hash_partitions('test.hash_rel', 'value', 3); -ERROR: Partitioning key 'value' must be NOT NULL +ERROR: failed to analyze partitioning expression "value" +DETAIL: column "value" should be marked NOT NULL +CONTEXT: SQL statement "SELECT pathman.validate_expression(parent_relid, expression)" +PL/pgSQL function pathman.prepare_for_partitioning(regclass,text,boolean) line 9 at PERFORM +SQL statement "SELECT pathman.prepare_for_partitioning(parent_relid, + expression, + partition_data)" +PL/pgSQL function pathman.create_hash_partitions(regclass,text,integer,boolean,text[],text[]) line 3 at PERFORM +\set VERBOSITY terse ALTER TABLE test.hash_rel ALTER COLUMN value SET NOT NULL; SELECT pathman.create_hash_partitions('test.hash_rel', 'value', 3, partition_data:=false); create_hash_partitions @@ -35,9 +53,9 @@ SELECT * FROM test.hash_rel; 3 | 3 (3 rows) -SELECT pathman.disable_parent('test.hash_rel'); - disable_parent ----------------- +SELECT pathman.set_enable_parent('test.hash_rel', false); + set_enable_parent +------------------- (1 row) @@ -55,9 +73,9 @@ SELECT * FROM test.hash_rel; ----+------- (0 rows) -SELECT pathman.enable_parent('test.hash_rel'); - enable_parent ---------------- +SELECT pathman.set_enable_parent('test.hash_rel', true); + set_enable_parent +------------------- (1 row) @@ -80,7 +98,6 @@ SELECT * FROM test.hash_rel; (3 rows) SELECT pathman.drop_partitions('test.hash_rel'); -NOTICE: function test.hash_rel_upd_trig_func() does not exist, skipping NOTICE: 0 rows copied from test.hash_rel_0 NOTICE: 0 rows copied from test.hash_rel_1 NOTICE: 0 rows copied from test.hash_rel_2 @@ -129,13 +146,21 @@ CREATE TABLE test.range_rel ( CREATE INDEX ON test.range_rel (dt); INSERT INTO test.range_rel (dt, txt) SELECT g, md5(g::TEXT) FROM generate_series('2015-01-01', '2015-04-30', '1 day'::interval) as g; -SELECT pathman.create_range_partitions('test.range_rel', 'dt', '2015-01-01'::DATE, '1 month'::INTERVAL, 2); -ERROR: Partitioning key 'dt' must be NOT NULL +\set VERBOSITY default +SELECT pathman.create_range_partitions('test.range_rel', 'dt', '2015-01-01'::DATE, '1 month'::INTERVAL); +ERROR: failed to analyze partitioning expression "dt" +DETAIL: column "dt" should be marked NOT NULL +CONTEXT: SQL statement "SELECT pathman.validate_expression(parent_relid, expression)" +PL/pgSQL function pathman.prepare_for_partitioning(regclass,text,boolean) line 9 at PERFORM +SQL statement "SELECT pathman.prepare_for_partitioning(parent_relid, + expression, + partition_data)" +PL/pgSQL function pathman.create_range_partitions(regclass,text,anyelement,interval,integer,boolean) line 11 at PERFORM +\set VERBOSITY terse ALTER TABLE test.range_rel ALTER COLUMN dt SET NOT NULL; SELECT pathman.create_range_partitions('test.range_rel', 'dt', '2015-01-01'::DATE, '1 month'::INTERVAL, 2); -ERROR: Not enough partitions to fit all values of 'dt' +ERROR: not enough partitions to fit all values of "dt" SELECT pathman.create_range_partitions('test.range_rel', 'DT', '2015-01-01'::DATE, '1 month'::INTERVAL); -NOTICE: sequence "range_rel_seq" does not exist, skipping create_range_partitions ------------------------- 4 @@ -157,7 +182,6 @@ CREATE TABLE test.num_range_rel ( id SERIAL PRIMARY KEY, txt TEXT); SELECT pathman.create_range_partitions('test.num_range_rel', 'id', 0, 1000, 4); -NOTICE: sequence "num_range_rel_seq" does not exist, skipping create_range_partitions ------------------------- 4 @@ -189,54 +213,208 @@ SELECT COUNT(*) FROM ONLY test.num_range_rel; 0 (1 row) -SELECT * FROM ONLY test.range_rel UNION SELECT * FROM test.range_rel; -ERROR: It is prohibited to query partitioned tables both with and without ONLY modifier -SET pg_pathman.enable_runtimeappend = OFF; -SET pg_pathman.enable_runtimemergeappend = OFF; -VACUUM; -/* update triggers test */ -SELECT pathman.create_hash_update_trigger('test.hash_rel'); - create_hash_update_trigger ------------------------------ - test.hash_rel_upd_trig_func +/* since rel_1_2_beta: check append_child_relation(), make_ands_explicit(), dummy path */ +CREATE TABLE test.improved_dummy (id BIGSERIAL, name TEXT NOT NULL); +INSERT INTO test.improved_dummy (name) SELECT md5(g::TEXT) FROM generate_series(1, 100) as g; +SELECT pathman.create_range_partitions('test.improved_dummy', 'id', 1, 10); + create_range_partitions +------------------------- + 10 (1 row) -UPDATE test.hash_rel SET value = 7 WHERE value = 6; -EXPLAIN (COSTS OFF) SELECT * FROM test.hash_rel WHERE value = 7; - QUERY PLAN ------------------------------- +INSERT INTO test.improved_dummy (name) VALUES ('test'); /* spawns new partition */ +EXPLAIN (COSTS OFF) SELECT * FROM test.improved_dummy WHERE id = 101 OR id = 5 AND name = 'ib'; + QUERY PLAN +---------------------------------------------------- Append - -> Seq Scan on hash_rel_1 - Filter: (value = 7) -(3 rows) + -> Seq Scan on improved_dummy_1 + Filter: ((id = 5) AND (name = 'ib'::text)) + -> Seq Scan on improved_dummy_11 + Filter: (id = 101) +(5 rows) -SELECT * FROM test.hash_rel WHERE value = 7; - id | value -----+------- - 6 | 7 +SELECT pathman.set_enable_parent('test.improved_dummy', true); /* enable parent */ + set_enable_parent +------------------- + (1 row) -SELECT pathman.create_range_update_trigger('test.num_range_rel'); - create_range_update_trigger ----------------------------------- - test.num_range_rel_upd_trig_func +EXPLAIN (COSTS OFF) SELECT * FROM test.improved_dummy WHERE id = 101 OR id = 5 AND name = 'ib'; + QUERY PLAN +-------------------------------------------------------------------- + Append + -> Seq Scan on improved_dummy + Filter: ((id = 101) OR ((id = 5) AND (name = 'ib'::text))) + -> Seq Scan on improved_dummy_1 + Filter: ((id = 5) AND (name = 'ib'::text)) + -> Seq Scan on improved_dummy_11 + Filter: (id = 101) +(7 rows) + +SELECT pathman.set_enable_parent('test.improved_dummy', false); /* disable parent */ + set_enable_parent +------------------- + (1 row) -UPDATE test.num_range_rel SET id = 3001 WHERE id = 1; -EXPLAIN (COSTS OFF) SELECT * FROM test.num_range_rel WHERE id = 3001; - QUERY PLAN ------------------------------------ +ALTER TABLE test.improved_dummy_1 ADD CHECK (name != 'ib'); /* make test.improved_dummy_1 disappear */ +EXPLAIN (COSTS OFF) SELECT * FROM test.improved_dummy WHERE id = 101 OR id = 5 AND name = 'ib'; + QUERY PLAN +------------------------------------- Append - -> Seq Scan on num_range_rel_4 - Filter: (id = 3001) + -> Seq Scan on improved_dummy_11 + Filter: (id = 101) (3 rows) -SELECT * FROM test.num_range_rel WHERE id = 3001; - id | txt -------+---------------------------------- - 3001 | c4ca4238a0b923820dcc509a6f75849b +SELECT pathman.set_enable_parent('test.improved_dummy', true); /* enable parent */ + set_enable_parent +------------------- + +(1 row) + +EXPLAIN (COSTS OFF) SELECT * FROM test.improved_dummy WHERE id = 101 OR id = 5 AND name = 'ib'; + QUERY PLAN +-------------------------------------------------------------------- + Append + -> Seq Scan on improved_dummy + Filter: ((id = 101) OR ((id = 5) AND (name = 'ib'::text))) + -> Seq Scan on improved_dummy_11 + Filter: (id = 101) +(5 rows) + +DROP TABLE test.improved_dummy CASCADE; +NOTICE: drop cascades to 12 other objects +/* since rel_1_4_beta: check create_range_partitions(bounds array) */ +CREATE TABLE test.improved_dummy (val INT NOT NULL); +SELECT pathman.create_range_partitions('test.improved_dummy', 'val', + pathman.generate_range_bounds(1, 1, 2)); + create_range_partitions +------------------------- + 2 +(1 row) + +SELECT * FROM pathman.pathman_partition_list +WHERE parent = 'test.improved_dummy'::REGCLASS +ORDER BY partition; + parent | partition | parttype | expr | range_min | range_max +---------------------+-----------------------+----------+------+-----------+----------- + test.improved_dummy | test.improved_dummy_1 | 2 | val | 1 | 2 + test.improved_dummy | test.improved_dummy_2 | 2 | val | 2 | 3 +(2 rows) + +SELECT pathman.drop_partitions('test.improved_dummy'); +NOTICE: 0 rows copied from test.improved_dummy_1 +NOTICE: 0 rows copied from test.improved_dummy_2 + drop_partitions +----------------- + 2 +(1 row) + +SELECT pathman.create_range_partitions('test.improved_dummy', 'val', + pathman.generate_range_bounds(1, 1, 2), + partition_names := '{p1, p2}'); + create_range_partitions +------------------------- + 2 +(1 row) + +SELECT * FROM pathman.pathman_partition_list +WHERE parent = 'test.improved_dummy'::REGCLASS +ORDER BY partition; + parent | partition | parttype | expr | range_min | range_max +---------------------+-----------+----------+------+-----------+----------- + test.improved_dummy | p1 | 2 | val | 1 | 2 + test.improved_dummy | p2 | 2 | val | 2 | 3 +(2 rows) + +SELECT pathman.drop_partitions('test.improved_dummy'); +NOTICE: 0 rows copied from p1 +NOTICE: 0 rows copied from p2 + drop_partitions +----------------- + 2 +(1 row) + +SELECT pathman.create_range_partitions('test.improved_dummy', 'val', + pathman.generate_range_bounds(1, 1, 2), + partition_names := '{p1, p2}', + tablespaces := '{pg_default, pg_default}'); + create_range_partitions +------------------------- + 2 +(1 row) + +SELECT * FROM pathman.pathman_partition_list +WHERE parent = 'test.improved_dummy'::REGCLASS +ORDER BY partition; + parent | partition | parttype | expr | range_min | range_max +---------------------+-----------+----------+------+-----------+----------- + test.improved_dummy | p1 | 2 | val | 1 | 2 + test.improved_dummy | p2 | 2 | val | 2 | 3 +(2 rows) + +DROP TABLE test.improved_dummy CASCADE; +NOTICE: drop cascades to 3 other objects +/* Test pathman_rel_pathlist_hook() with INSERT query */ +CREATE TABLE test.insert_into_select(val int NOT NULL); +INSERT INTO test.insert_into_select SELECT generate_series(1, 100); +SELECT pathman.create_range_partitions('test.insert_into_select', 'val', 1, 20); + create_range_partitions +------------------------- + 5 +(1 row) + +CREATE TABLE test.insert_into_select_copy (LIKE test.insert_into_select); /* INSERT INTO ... SELECT ... */ +EXPLAIN (COSTS OFF) +INSERT INTO test.insert_into_select_copy +SELECT * FROM test.insert_into_select +WHERE val <= 80; + QUERY PLAN +---------------------------------------------- + Insert on insert_into_select_copy + -> Append + -> Seq Scan on insert_into_select_1 + -> Seq Scan on insert_into_select_2 + -> Seq Scan on insert_into_select_3 + -> Seq Scan on insert_into_select_4 + Filter: (val <= 80) +(7 rows) + +SELECT pathman.set_enable_parent('test.insert_into_select', true); + set_enable_parent +------------------- + +(1 row) + +EXPLAIN (COSTS OFF) +INSERT INTO test.insert_into_select_copy +SELECT * FROM test.insert_into_select +WHERE val <= 80; + QUERY PLAN +---------------------------------------------- + Insert on insert_into_select_copy + -> Append + -> Seq Scan on insert_into_select + Filter: (val <= 80) + -> Seq Scan on insert_into_select_1 + -> Seq Scan on insert_into_select_2 + -> Seq Scan on insert_into_select_3 + -> Seq Scan on insert_into_select_4 + Filter: (val <= 80) +(9 rows) + +INSERT INTO test.insert_into_select_copy SELECT * FROM test.insert_into_select; +SELECT count(*) FROM test.insert_into_select_copy; + count +------- + 100 (1 row) +DROP TABLE test.insert_into_select_copy, test.insert_into_select CASCADE; +NOTICE: drop cascades to 6 other objects +SET pg_pathman.enable_runtimeappend = OFF; +SET pg_pathman.enable_runtimemergeappend = OFF; +VACUUM; SET enable_indexscan = OFF; SET enable_bitmapscan = OFF; SET enable_seqscan = ON; @@ -249,6 +427,20 @@ EXPLAIN (COSTS OFF) SELECT * FROM test.hash_rel; -> Seq Scan on hash_rel_2 (4 rows) +EXPLAIN (COSTS OFF) SELECT * FROM test.hash_rel WHERE false; + QUERY PLAN +-------------------------- + Result + One-Time Filter: false +(2 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM test.hash_rel WHERE value = NULL; + QUERY PLAN +-------------------------- + Result + One-Time Filter: false +(2 rows) + EXPLAIN (COSTS OFF) SELECT * FROM test.hash_rel WHERE value = 2; QUERY PLAN ------------------------------ @@ -257,6 +449,14 @@ EXPLAIN (COSTS OFF) SELECT * FROM test.hash_rel WHERE value = 2; Filter: (value = 2) (3 rows) +EXPLAIN (COSTS OFF) SELECT * FROM test.hash_rel WHERE 2 = value; /* test commutator */ + QUERY PLAN +------------------------------ + Append + -> Seq Scan on hash_rel_1 + Filter: (2 = value) +(3 rows) + EXPLAIN (COSTS OFF) SELECT * FROM test.hash_rel WHERE value = 2 OR value = 1; QUERY PLAN ------------------------------ @@ -267,16 +467,23 @@ EXPLAIN (COSTS OFF) SELECT * FROM test.hash_rel WHERE value = 2 OR value = 1; Filter: (value = 1) (5 rows) --- Temporarily commented out --- EXPLAIN (COSTS OFF) SELECT * FROM test.hash_rel WHERE value BETWEEN 1 AND 2; --- QUERY PLAN --- ------------------------------------------------- --- Append --- -> Seq Scan on hash_rel_1 --- Filter: ((value >= 1) AND (value <= 2)) --- -> Seq Scan on hash_rel_2 --- Filter: ((value >= 1) AND (value <= 2)) --- (5 rows) +EXPLAIN (COSTS OFF) SELECT * FROM test.num_range_rel WHERE 2500 = id; /* test commutator */ + QUERY PLAN +----------------------------------- + Append + -> Seq Scan on num_range_rel_3 + Filter: (2500 = id) +(3 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM test.num_range_rel WHERE 2500 < id; /* test commutator */ + QUERY PLAN +----------------------------------- + Append + -> Seq Scan on num_range_rel_3 + Filter: (2500 < id) + -> Seq Scan on num_range_rel_4 +(4 rows) + EXPLAIN (COSTS OFF) SELECT * FROM test.num_range_rel WHERE id > 2500; QUERY PLAN ----------------------------------- @@ -327,6 +534,16 @@ EXPLAIN (COSTS OFF) SELECT * FROM test.range_rel WHERE dt > '2015-02-15'; -> Seq Scan on range_rel_4 (5 rows) +EXPLAIN (COSTS OFF) SELECT * FROM test.range_rel WHERE '2015-02-15' < dt; /* test commutator */ + QUERY PLAN +-------------------------------------------------------------------------------- + Append + -> Seq Scan on range_rel_2 + Filter: ('Sun Feb 15 00:00:00 2015'::timestamp without time zone < dt) + -> Seq Scan on range_rel_3 + -> Seq Scan on range_rel_4 +(5 rows) + EXPLAIN (COSTS OFF) SELECT * FROM test.range_rel WHERE dt >= '2015-02-01' AND dt < '2015-03-01'; QUERY PLAN ------------------------------- @@ -369,6 +586,20 @@ EXPLAIN (COSTS OFF) SELECT * FROM test.hash_rel; -> Seq Scan on hash_rel_2 (4 rows) +EXPLAIN (COSTS OFF) SELECT * FROM test.hash_rel WHERE false; + QUERY PLAN +-------------------------- + Result + One-Time Filter: false +(2 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM test.hash_rel WHERE value = NULL; + QUERY PLAN +-------------------------- + Result + One-Time Filter: false +(2 rows) + EXPLAIN (COSTS OFF) SELECT * FROM test.hash_rel WHERE value = 2; QUERY PLAN ------------------------------ @@ -377,6 +608,14 @@ EXPLAIN (COSTS OFF) SELECT * FROM test.hash_rel WHERE value = 2; Filter: (value = 2) (3 rows) +EXPLAIN (COSTS OFF) SELECT * FROM test.hash_rel WHERE 2 = value; /* test commutator */ + QUERY PLAN +------------------------------ + Append + -> Seq Scan on hash_rel_1 + Filter: (2 = value) +(3 rows) + EXPLAIN (COSTS OFF) SELECT * FROM test.hash_rel WHERE value = 2 OR value = 1; QUERY PLAN ------------------------------ @@ -387,6 +626,23 @@ EXPLAIN (COSTS OFF) SELECT * FROM test.hash_rel WHERE value = 2 OR value = 1; Filter: (value = 1) (5 rows) +EXPLAIN (COSTS OFF) SELECT * FROM test.num_range_rel WHERE 2500 = id; /* test commutator */ + QUERY PLAN +---------------------------------------------------------------- + Append + -> Index Scan using num_range_rel_3_pkey on num_range_rel_3 + Index Cond: (2500 = id) +(3 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM test.num_range_rel WHERE 2500 < id; /* test commutator */ + QUERY PLAN +---------------------------------------------------------------- + Append + -> Index Scan using num_range_rel_3_pkey on num_range_rel_3 + Index Cond: (2500 < id) + -> Seq Scan on num_range_rel_4 +(4 rows) + EXPLAIN (COSTS OFF) SELECT * FROM test.num_range_rel WHERE id > 2500; QUERY PLAN ---------------------------------------------------------------- @@ -457,6 +713,16 @@ EXPLAIN (COSTS OFF) SELECT * FROM test.range_rel WHERE dt > '2015-02-15'; -> Seq Scan on range_rel_4 (5 rows) +EXPLAIN (COSTS OFF) SELECT * FROM test.range_rel WHERE '2015-02-15' < dt; /* test commutator */ + QUERY PLAN +------------------------------------------------------------------------------------ + Append + -> Index Scan using range_rel_2_dt_idx on range_rel_2 + Index Cond: ('Sun Feb 15 00:00:00 2015'::timestamp without time zone < dt) + -> Seq Scan on range_rel_3 + -> Seq Scan on range_rel_4 +(5 rows) + EXPLAIN (COSTS OFF) SELECT * FROM test.range_rel WHERE dt >= '2015-02-01' AND dt < '2015-03-01'; QUERY PLAN ------------------------------- @@ -553,381 +819,63 @@ EXPLAIN (COSTS OFF) SELECT * FROM test.range_rel_1 UNION ALL SELECT * FROM test. (4 rows) /* - * Join + * Test inlined SQL functions */ -SET enable_hashjoin = OFF; -set enable_nestloop = OFF; -SET enable_mergejoin = ON; -EXPLAIN (COSTS OFF) -SELECT * FROM test.range_rel j1 -JOIN test.range_rel j2 on j2.id = j1.id -JOIN test.num_range_rel j3 on j3.id = j1.id -WHERE j1.dt < '2015-03-01' AND j2.dt >= '2015-02-01' ORDER BY j2.dt; - QUERY PLAN -------------------------------------------------------------------------------------------- - Sort - Sort Key: j2.dt - -> Merge Join - Merge Cond: (j3.id = j2.id) - -> Append - -> Index Scan using num_range_rel_1_pkey on num_range_rel_1 j3 - -> Index Scan using num_range_rel_2_pkey on num_range_rel_2 j3_1 - -> Index Scan using num_range_rel_3_pkey on num_range_rel_3 j3_2 - -> Index Scan using num_range_rel_4_pkey on num_range_rel_4 j3_3 - -> Materialize - -> Merge Join - Merge Cond: (j2.id = j1.id) - -> Merge Append - Sort Key: j2.id - -> Index Scan using range_rel_2_pkey on range_rel_2 j2 - -> Index Scan using range_rel_3_pkey on range_rel_3 j2_1 - -> Index Scan using range_rel_4_pkey on range_rel_4 j2_2 - -> Materialize - -> Merge Append - Sort Key: j1.id - -> Index Scan using range_rel_1_pkey on range_rel_1 j1 - -> Index Scan using range_rel_2_pkey on range_rel_2 j1_1 -(22 rows) - -SET enable_hashjoin = ON; -SET enable_mergejoin = OFF; -EXPLAIN (COSTS OFF) -SELECT * FROM test.range_rel j1 -JOIN test.range_rel j2 on j2.id = j1.id -JOIN test.num_range_rel j3 on j3.id = j1.id -WHERE j1.dt < '2015-03-01' AND j2.dt >= '2015-02-01' ORDER BY j2.dt; - QUERY PLAN -------------------------------------------------------------------------------------------- - Sort - Sort Key: j2.dt - -> Hash Join - Hash Cond: (j3.id = j2.id) - -> Append - -> Index Scan using num_range_rel_1_pkey on num_range_rel_1 j3 - -> Index Scan using num_range_rel_2_pkey on num_range_rel_2 j3_1 - -> Index Scan using num_range_rel_3_pkey on num_range_rel_3 j3_2 - -> Index Scan using num_range_rel_4_pkey on num_range_rel_4 j3_3 - -> Hash - -> Hash Join - Hash Cond: (j2.id = j1.id) - -> Append - -> Index Scan using range_rel_2_dt_idx on range_rel_2 j2 - -> Index Scan using range_rel_3_dt_idx on range_rel_3 j2_1 - -> Index Scan using range_rel_4_dt_idx on range_rel_4 j2_2 - -> Hash - -> Append - -> Index Scan using range_rel_1_pkey on range_rel_1 j1 - -> Index Scan using range_rel_2_pkey on range_rel_2 j1_1 -(20 rows) +CREATE TABLE test.sql_inline (id INT NOT NULL); +SELECT pathman.create_hash_partitions('test.sql_inline', 'id', 3); + create_hash_partitions +------------------------ + 3 +(1 row) -/* - * Test CTE query - */ -EXPLAIN (COSTS OFF) - WITH ttt AS (SELECT * FROM test.range_rel WHERE dt >= '2015-02-01' AND dt < '2015-03-15') -SELECT * FROM ttt; - QUERY PLAN --------------------------------------------------------------------------------------------- - CTE Scan on ttt - CTE ttt - -> Append - -> Seq Scan on range_rel_2 - -> Index Scan using range_rel_3_dt_idx on range_rel_3 - Index Cond: (dt < 'Sun Mar 15 00:00:00 2015'::timestamp without time zone) -(6 rows) +CREATE OR REPLACE FUNCTION test.sql_inline_func(i_id int) RETURNS SETOF INT AS $$ + select * from test.sql_inline where id = i_id limit 1; +$$ LANGUAGE sql STABLE; +EXPLAIN (COSTS OFF) SELECT * FROM test.sql_inline_func(5); + QUERY PLAN +-------------------------------------- + Limit + -> Append + -> Seq Scan on sql_inline_0 + Filter: (id = 5) +(4 rows) -EXPLAIN (COSTS OFF) - WITH ttt AS (SELECT * FROM test.hash_rel WHERE value = 2) -SELECT * FROM ttt; +EXPLAIN (COSTS OFF) SELECT * FROM test.sql_inline_func(1); QUERY PLAN -------------------------------------- - CTE Scan on ttt - CTE ttt - -> Append - -> Seq Scan on hash_rel_1 - Filter: (value = 2) -(5 rows) + Limit + -> Append + -> Seq Scan on sql_inline_2 + Filter: (id = 1) +(4 rows) +DROP FUNCTION test.sql_inline_func(int); +DROP TABLE test.sql_inline CASCADE; +NOTICE: drop cascades to 3 other objects /* - * Test RuntimeAppend + * Test by @baiyinqiqi (issue #60) */ -create or replace function test.pathman_assert(smt bool, error_msg text) returns text as $$ -begin - if not smt then - raise exception '%', error_msg; - end if; - - return 'ok'; -end; -$$ language plpgsql; -create or replace function test.pathman_equal(a text, b text, error_msg text) returns text as $$ -begin - if a != b then - raise exception '''%'' is not equal to ''%'', %', a, b, error_msg; - end if; - - return 'equal'; -end; -$$ language plpgsql; -create or replace function test.pathman_test(query text) returns jsonb as $$ -declare - plan jsonb; -begin - execute 'explain (analyze, format json)' || query into plan; - - return plan; -end; -$$ language plpgsql; -create or replace function test.pathman_test_1() returns text as $$ -declare - plan jsonb; - num int; -begin - plan = test.pathman_test('select * from test.runtime_test_1 where id = (select * from test.run_values limit 1)'); - - perform test.pathman_equal((plan->0->'Plan'->'Node Type')::text, - '"Custom Scan"', - 'wrong plan type'); - - perform test.pathman_equal((plan->0->'Plan'->'Custom Plan Provider')::text, - '"RuntimeAppend"', - 'wrong plan provider'); - - perform test.pathman_equal((plan->0->'Plan'->'Plans'->1->'Relation Name')::text, - format('"runtime_test_1_%s"', pathman.get_hash_part_idx(hashint4(1), 6)), - 'wrong partition'); - - select count(*) from jsonb_array_elements_text(plan->0->'Plan'->'Plans') into num; - perform test.pathman_equal(num::text, '2', 'expected 2 child plans for custom scan'); - - return 'ok'; -end; -$$ language plpgsql; -create or replace function test.pathman_test_2() returns text as $$ -declare - plan jsonb; - num int; -begin - plan = test.pathman_test('select * from test.runtime_test_1 where id = any (select * from test.run_values limit 4)'); - - perform test.pathman_equal((plan->0->'Plan'->'Node Type')::text, - '"Nested Loop"', - 'wrong plan type'); - - perform test.pathman_equal((plan->0->'Plan'->'Plans'->1->'Node Type')::text, - '"Custom Scan"', - 'wrong plan type'); - - perform test.pathman_equal((plan->0->'Plan'->'Plans'->1->'Custom Plan Provider')::text, - '"RuntimeAppend"', - 'wrong plan provider'); - - select count(*) from jsonb_array_elements_text(plan->0->'Plan'->'Plans'->1->'Plans') into num; - perform test.pathman_equal(num::text, '4', 'expected 4 child plans for custom scan'); - - for i in 0..3 loop - perform test.pathman_equal((plan->0->'Plan'->'Plans'->1->'Plans'->i->'Relation Name')::text, - format('"runtime_test_1_%s"', pathman.get_hash_part_idx(hashint4(i + 1), 6)), - 'wrong partition'); - - num = plan->0->'Plan'->'Plans'->1->'Plans'->i->'Actual Loops'; - perform test.pathman_equal(num::text, '1', 'expected 1 loop'); - end loop; - - return 'ok'; -end; -$$ language plpgsql; -create or replace function test.pathman_test_3() returns text as $$ -declare - plan jsonb; - num int; -begin - plan = test.pathman_test('select * from test.runtime_test_1 a join test.run_values b on a.id = b.val'); - - perform test.pathman_equal((plan->0->'Plan'->'Node Type')::text, - '"Nested Loop"', - 'wrong plan type'); - - perform test.pathman_equal((plan->0->'Plan'->'Plans'->1->'Node Type')::text, - '"Custom Scan"', - 'wrong plan type'); - - perform test.pathman_equal((plan->0->'Plan'->'Plans'->1->'Custom Plan Provider')::text, - '"RuntimeAppend"', - 'wrong plan provider'); - - select count(*) from jsonb_array_elements_text(plan->0->'Plan'->'Plans'->1->'Plans') into num; - perform test.pathman_equal(num::text, '6', 'expected 6 child plans for custom scan'); - - for i in 0..5 loop - num = plan->0->'Plan'->'Plans'->1->'Plans'->i->'Actual Loops'; - perform test.pathman_assert(num > 0 and num <= 1718, 'expected no more than 1718 loops'); - end loop; - - return 'ok'; -end; -$$ language plpgsql; -create or replace function test.pathman_test_4() returns text as $$ -declare - plan jsonb; - num int; -begin - plan = test.pathman_test('select * from test.category c, lateral' || - '(select * from test.runtime_test_2 g where g.category_id = c.id order by rating limit 4) as tg'); - - perform test.pathman_equal((plan->0->'Plan'->'Node Type')::text, - '"Nested Loop"', - 'wrong plan type'); - - /* Limit -> Custom Scan */ - perform test.pathman_equal((plan->0->'Plan'->'Plans'->1->0->'Node Type')::text, - '"Custom Scan"', - 'wrong plan type'); - - perform test.pathman_equal((plan->0->'Plan'->'Plans'->1->0->'Custom Plan Provider')::text, - '"RuntimeMergeAppend"', - 'wrong plan provider'); - - select count(*) from jsonb_array_elements_text(plan->0->'Plan'->'Plans'->1->'Plans'->0->'Plans') into num; - perform test.pathman_equal(num::text, '4', 'expected 4 child plans for custom scan'); - - for i in 0..3 loop - perform test.pathman_equal((plan->0->'Plan'->'Plans'->1->'Plans'->0->'Plans'->i->'Relation Name')::text, - format('"runtime_test_2_%s"', pathman.get_hash_part_idx(hashint4(i + 1), 6)), - 'wrong partition'); - - num = plan->0->'Plan'->'Plans'->1->'Plans'->0->'Plans'->i->'Actual Loops'; - perform test.pathman_assert(num = 1, 'expected no more than 1 loops'); - end loop; - - return 'ok'; -end; -$$ language plpgsql; -create or replace function test.pathman_test_5() returns text as $$ -declare - res record; -begin - select - from test.runtime_test_3 - where id = (select * from test.vals order by val limit 1) - limit 1 - into res; /* test empty tlist */ - - - select id, generate_series(1, 2) gen, val - from test.runtime_test_3 - where id = any (select * from test.vals order by val limit 5) - order by id, gen, val - offset 1 limit 1 - into res; /* without IndexOnlyScan */ - - perform test.pathman_equal(res.id::text, '1', 'id is incorrect (t2)'); - perform test.pathman_equal(res.gen::text, '2', 'gen is incorrect (t2)'); - perform test.pathman_equal(res.val::text, 'k = 1', 'val is incorrect (t2)'); - - - select id - from test.runtime_test_3 - where id = any (select * from test.vals order by val limit 5) - order by id - offset 3 limit 1 - into res; /* with IndexOnlyScan */ - - perform test.pathman_equal(res.id::text, '4', 'id is incorrect (t3)'); - - - select v.val v1, generate_series(2, 2) gen, t.val v2 - from test.runtime_test_3 t join test.vals v on id = v.val - order by v1, gen, v2 - limit 1 - into res; - - perform test.pathman_equal(res.v1::text, '1', 'v1 is incorrect (t4)'); - perform test.pathman_equal(res.gen::text, '2', 'gen is incorrect (t4)'); - perform test.pathman_equal(res.v2::text, 'k = 1', 'v2 is incorrect (t4)'); - - return 'ok'; -end; -$$ language plpgsql -set pg_pathman.enable = true -set enable_hashjoin = off -set enable_mergejoin = off; -NOTICE: RuntimeAppend, RuntimeMergeAppend and PartitionFilter nodes have been enabled -create table test.run_values as select generate_series(1, 10000) val; -create table test.runtime_test_1(id serial primary key, val real); -insert into test.runtime_test_1 select generate_series(1, 10000), random(); -select pathman.create_hash_partitions('test.runtime_test_1', 'id', 6); - create_hash_partitions ------------------------- - 6 -(1 row) - -create table test.category as (select id, 'cat' || id::text as name from generate_series(1, 4) id); -create table test.runtime_test_2 (id serial, category_id int not null, name text, rating real); -insert into test.runtime_test_2 (select id, (id % 6) + 1 as category_id, 'good' || id::text as name, random() as rating from generate_series(1, 100000) id); -create index on test.runtime_test_2 (category_id, rating); -select pathman.create_hash_partitions('test.runtime_test_2', 'category_id', 6); - create_hash_partitions ------------------------- - 6 -(1 row) - -create table test.vals as (select generate_series(1, 10000) as val); -create table test.runtime_test_3(val text, id serial not null); -insert into test.runtime_test_3(id, val) select * from generate_series(1, 10000) k, format('k = %s', k); -select pathman.create_hash_partitions('test.runtime_test_3', 'id', 4); +CREATE TABLE test.hash_varchar(val VARCHAR(40) NOT NULL); +INSERT INTO test.hash_varchar SELECT generate_series(1, 20); +SELECT pathman.create_hash_partitions('test.hash_varchar', 'val', 4); create_hash_partitions ------------------------ 4 (1 row) -create index on test.runtime_test_3 (id); -create index on test.runtime_test_3_0 (id); -analyze test.run_values; -analyze test.runtime_test_1; -analyze test.runtime_test_2; -analyze test.runtime_test_3; -analyze test.runtime_test_3_0; -set enable_mergejoin = off; -set enable_hashjoin = off; -set pg_pathman.enable_runtimeappend = on; -set pg_pathman.enable_runtimemergeappend = on; -select test.pathman_test_1(); /* RuntimeAppend (select ... where id = (subquery)) */ - pathman_test_1 ----------------- - ok -(1 row) - -select test.pathman_test_2(); /* RuntimeAppend (select ... where id = any(subquery)) */ - pathman_test_2 ----------------- - ok -(1 row) - -select test.pathman_test_3(); /* RuntimeAppend (a join b on a.id = b.val) */ - pathman_test_3 ----------------- - ok -(1 row) - -select test.pathman_test_4(); /* RuntimeMergeAppend (lateral) */ - pathman_test_4 ----------------- - ok -(1 row) +SELECT * FROM test.hash_varchar WHERE val = 'a'; + val +----- +(0 rows) -select test.pathman_test_5(); /* projection tests for RuntimeXXX nodes */ - pathman_test_5 ----------------- - ok +SELECT * FROM test.hash_varchar WHERE val = '12'::TEXT; + val +----- + 12 (1 row) -set pg_pathman.enable_runtimeappend = off; -set pg_pathman.enable_runtimemergeappend = off; -set enable_mergejoin = on; -set enable_hashjoin = on; -drop table test.run_values, test.runtime_test_1, test.runtime_test_2, test.runtime_test_3, test.vals cascade; -NOTICE: drop cascades to 16 other objects +DROP TABLE test.hash_varchar CASCADE; +NOTICE: drop cascades to 4 other objects /* * Test split and merge */ @@ -935,7 +883,7 @@ NOTICE: drop cascades to 16 other objects SELECT pathman.split_range_partition('test.num_range_rel_1', 500); split_range_partition ----------------------- - {0,1000} + test.num_range_rel_5 (1 row) EXPLAIN (COSTS OFF) SELECT * FROM test.num_range_rel WHERE id BETWEEN 100 AND 700; @@ -948,17 +896,25 @@ EXPLAIN (COSTS OFF) SELECT * FROM test.num_range_rel WHERE id BETWEEN 100 AND 70 Index Cond: (id <= 700) (5 rows) +SELECT tableoid::regclass, id FROM test.num_range_rel WHERE id IN (499, 500, 501) ORDER BY id; + tableoid | id +----------------------+----- + test.num_range_rel_1 | 499 + test.num_range_rel_5 | 500 + test.num_range_rel_5 | 501 +(3 rows) + SELECT pathman.split_range_partition('test.range_rel_1', '2015-01-15'::DATE); - split_range_partition -------------------------- - {01-01-2015,02-01-2015} + split_range_partition +----------------------- + test.range_rel_5 (1 row) /* Merge two partitions into one */ SELECT pathman.merge_range_partitions('test.num_range_rel_1', 'test.num_range_rel_' || currval('test.num_range_rel_seq')); merge_range_partitions ------------------------ - + test.num_range_rel_1 (1 row) EXPLAIN (COSTS OFF) SELECT * FROM test.num_range_rel WHERE id BETWEEN 100 AND 700; @@ -972,7 +928,7 @@ EXPLAIN (COSTS OFF) SELECT * FROM test.num_range_rel WHERE id BETWEEN 100 AND 70 SELECT pathman.merge_range_partitions('test.range_rel_1', 'test.range_rel_' || currval('test.range_rel_seq')); merge_range_partitions ------------------------ - + test.range_rel_1 (1 row) /* Append and prepend partitions */ @@ -1008,6 +964,35 @@ SELECT pathman.drop_range_partition('test.num_range_rel_7'); test.num_range_rel_7 (1 row) +SELECT pathman.drop_range_partition_expand_next('test.num_range_rel_4'); + drop_range_partition_expand_next +---------------------------------- + +(1 row) + +SELECT * FROM pathman.pathman_partition_list WHERE parent = 'test.num_range_rel'::regclass; + parent | partition | parttype | expr | range_min | range_max +--------------------+----------------------+----------+------+-----------+----------- + test.num_range_rel | test.num_range_rel_1 | 2 | id | 0 | 1000 + test.num_range_rel | test.num_range_rel_2 | 2 | id | 1000 | 2000 + test.num_range_rel | test.num_range_rel_3 | 2 | id | 2000 | 3000 + test.num_range_rel | test.num_range_rel_6 | 2 | id | 3000 | 5000 +(4 rows) + +SELECT pathman.drop_range_partition_expand_next('test.num_range_rel_6'); + drop_range_partition_expand_next +---------------------------------- + +(1 row) + +SELECT * FROM pathman.pathman_partition_list WHERE parent = 'test.num_range_rel'::regclass; + parent | partition | parttype | expr | range_min | range_max +--------------------+----------------------+----------+------+-----------+----------- + test.num_range_rel | test.num_range_rel_1 | 2 | id | 0 | 1000 + test.num_range_rel | test.num_range_rel_2 | 2 | id | 1000 | 2000 + test.num_range_rel | test.num_range_rel_3 | 2 | id | 2000 | 3000 +(3 rows) + SELECT pathman.append_range_partition('test.range_rel'); append_range_partition ------------------------ @@ -1045,7 +1030,7 @@ EXPLAIN (COSTS OFF) SELECT * FROM test.range_rel WHERE dt BETWEEN '2014-12-15' A (3 rows) SELECT pathman.add_range_partition('test.range_rel', '2014-12-01'::DATE, '2015-01-02'::DATE); -ERROR: Specified range overlaps with existing partitions +ERROR: specified range [12-01-2014, 01-02-2015) overlaps with existing partitions SELECT pathman.add_range_partition('test.range_rel', '2014-12-01'::DATE, '2015-01-01'::DATE); add_range_partition --------------------- @@ -1064,7 +1049,7 @@ EXPLAIN (COSTS OFF) SELECT * FROM test.range_rel WHERE dt BETWEEN '2014-12-15' A CREATE TABLE test.range_rel_archive (LIKE test.range_rel INCLUDING ALL); SELECT pathman.attach_range_partition('test.range_rel', 'test.range_rel_archive', '2014-01-01'::DATE, '2015-01-01'::DATE); -ERROR: Specified range overlaps with existing partitions +ERROR: specified range [01-01-2014, 01-01-2015) overlaps with existing partitions SELECT pathman.attach_range_partition('test.range_rel', 'test.range_rel_archive', '2014-01-01'::DATE, '2014-12-01'::DATE); attach_range_partition ------------------------ @@ -1103,12 +1088,68 @@ CREATE TABLE test.range_rel_test1 ( txt TEXT, abc INTEGER); SELECT pathman.attach_range_partition('test.range_rel', 'test.range_rel_test1', '2013-01-01'::DATE, '2014-01-01'::DATE); -ERROR: Partition must have the exact same structure as parent +ERROR: partition must have a compatible tuple format CREATE TABLE test.range_rel_test2 ( id SERIAL PRIMARY KEY, dt TIMESTAMP); SELECT pathman.attach_range_partition('test.range_rel', 'test.range_rel_test2', '2013-01-01'::DATE, '2014-01-01'::DATE); -ERROR: Partition must have the exact same structure as parent +ERROR: column "dt" in child table must be marked NOT NULL +/* Half open ranges */ +SELECT pathman.add_range_partition('test.range_rel', NULL, '2014-12-01'::DATE, 'test.range_rel_minus_infinity'); + add_range_partition +------------------------------- + test.range_rel_minus_infinity +(1 row) + +SELECT pathman.add_range_partition('test.range_rel', '2015-06-01'::DATE, NULL, 'test.range_rel_plus_infinity'); + add_range_partition +------------------------------ + test.range_rel_plus_infinity +(1 row) + +SELECT pathman.append_range_partition('test.range_rel'); +ERROR: Cannot append partition because last partition's range is half open +SELECT pathman.prepend_range_partition('test.range_rel'); +ERROR: Cannot prepend partition because first partition's range is half open +DROP TABLE test.range_rel_minus_infinity; +CREATE TABLE test.range_rel_minus_infinity (LIKE test.range_rel INCLUDING ALL); +SELECT pathman.attach_range_partition('test.range_rel', 'test.range_rel_minus_infinity', NULL, '2014-12-01'::DATE); + attach_range_partition +------------------------------- + test.range_rel_minus_infinity +(1 row) + +SELECT * FROM pathman.pathman_partition_list WHERE parent = 'test.range_rel'::REGCLASS; + parent | partition | parttype | expr | range_min | range_max +----------------+-------------------------------+----------+------+--------------------------+-------------------------- + test.range_rel | test.range_rel_minus_infinity | 2 | dt | | Mon Dec 01 00:00:00 2014 + test.range_rel | test.range_rel_8 | 2 | dt | Mon Dec 01 00:00:00 2014 | Thu Jan 01 00:00:00 2015 + test.range_rel | test.range_rel_1 | 2 | dt | Thu Jan 01 00:00:00 2015 | Sun Feb 01 00:00:00 2015 + test.range_rel | test.range_rel_2 | 2 | dt | Sun Feb 01 00:00:00 2015 | Sun Mar 01 00:00:00 2015 + test.range_rel | test.range_rel_3 | 2 | dt | Sun Mar 01 00:00:00 2015 | Wed Apr 01 00:00:00 2015 + test.range_rel | test.range_rel_4 | 2 | dt | Wed Apr 01 00:00:00 2015 | Fri May 01 00:00:00 2015 + test.range_rel | test.range_rel_6 | 2 | dt | Fri May 01 00:00:00 2015 | Mon Jun 01 00:00:00 2015 + test.range_rel | test.range_rel_plus_infinity | 2 | dt | Mon Jun 01 00:00:00 2015 | +(8 rows) + +INSERT INTO test.range_rel (dt) VALUES ('2012-06-15'); +INSERT INTO test.range_rel (dt) VALUES ('2015-12-15'); +EXPLAIN (COSTS OFF) SELECT * FROM test.range_rel WHERE dt < '2015-01-01'; + QUERY PLAN +-------------------------------------------- + Append + -> Seq Scan on range_rel_minus_infinity + -> Seq Scan on range_rel_8 +(3 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM test.range_rel WHERE dt >= '2015-05-01'; + QUERY PLAN +------------------------------------------- + Append + -> Seq Scan on range_rel_6 + -> Seq Scan on range_rel_plus_infinity +(3 rows) + /* * Zero partitions count and adding partitions with specified name */ @@ -1117,16 +1158,15 @@ CREATE TABLE test.zero( value INT NOT NULL); INSERT INTO test.zero SELECT g, g FROM generate_series(1, 100) as g; SELECT pathman.create_range_partitions('test.zero', 'value', 50, 10, 0); -NOTICE: sequence "zero_seq" does not exist, skipping create_range_partitions ------------------------- 0 (1 row) SELECT pathman.append_range_partition('test.zero', 'test.zero_0'); -ERROR: Cannot append to empty partitions set +ERROR: relation "zero" has no partitions SELECT pathman.prepend_range_partition('test.zero', 'test.zero_1'); -ERROR: Cannot prepend to empty partitions set +ERROR: relation "zero" has no partitions SELECT pathman.add_range_partition('test.zero', 50, 70, 'test.zero_50'); add_range_partition --------------------- @@ -1148,11 +1188,11 @@ SELECT pathman.prepend_range_partition('test.zero', 'test.zero_prepended'); SELECT pathman.split_range_partition('test.zero_50', 60, 'test.zero_60'); split_range_partition ----------------------- - {50,70} + test."test.zero_60" (1 row) DROP TABLE test.zero CASCADE; -NOTICE: drop cascades to 4 other objects +NOTICE: drop cascades to 5 other objects /* * Check that altering table columns doesn't break trigger */ @@ -1164,14 +1204,79 @@ SELECT * FROM test.hash_rel WHERE id = 123; 123 | 456 | 789 (1 row) +/* Test replacing hash partition */ +CREATE TABLE test.hash_rel_extern (LIKE test.hash_rel INCLUDING ALL); +SELECT pathman.replace_hash_partition('test.hash_rel_0', 'test.hash_rel_extern'); + replace_hash_partition +------------------------ + test.hash_rel_extern +(1 row) + +/* Check the consistency of test.hash_rel_0 and test.hash_rel_extern relations */ +EXPLAIN(COSTS OFF) SELECT * FROM test.hash_rel; + QUERY PLAN +----------------------------------- + Append + -> Seq Scan on hash_rel_extern + -> Seq Scan on hash_rel_1 + -> Seq Scan on hash_rel_2 +(4 rows) + +SELECT parent, partition, parttype +FROM pathman.pathman_partition_list +WHERE parent='test.hash_rel'::regclass +ORDER BY 2; + parent | partition | parttype +---------------+----------------------+---------- + test.hash_rel | test.hash_rel_1 | 1 + test.hash_rel | test.hash_rel_2 | 1 + test.hash_rel | test.hash_rel_extern | 1 +(3 rows) + +SELECT c.oid::regclass::text, + array_agg(pg_get_indexdef(i.indexrelid)) AS indexes, + array_agg(pg_get_triggerdef(t.oid)) AS triggers +FROM pg_class c + LEFT JOIN pg_index i ON c.oid=i.indrelid + LEFT JOIN pg_trigger t ON c.oid=t.tgrelid +WHERE c.oid IN ('test.hash_rel_0'::regclass, 'test.hash_rel_extern'::regclass) +GROUP BY 1 ORDER BY 1; + oid | indexes | triggers +----------------------+---------------------------------------------------------------------------------------+---------- + test.hash_rel_0 | {"CREATE UNIQUE INDEX hash_rel_0_pkey ON test.hash_rel_0 USING btree (id)"} | {NULL} + test.hash_rel_extern | {"CREATE UNIQUE INDEX hash_rel_extern_pkey ON test.hash_rel_extern USING btree (id)"} | {NULL} +(2 rows) + +SELECT pathman.is_tuple_convertible('test.hash_rel_0', 'test.hash_rel_extern'); + is_tuple_convertible +---------------------- + t +(1 row) + +INSERT INTO test.hash_rel SELECT * FROM test.hash_rel_0; +DROP TABLE test.hash_rel_0; +/* Table with which we are replacing partition must have exact same structure */ +CREATE TABLE test.hash_rel_wrong( + id INTEGER NOT NULL, + value INTEGER); +SELECT pathman.replace_hash_partition('test.hash_rel_1', 'test.hash_rel_wrong'); +ERROR: column "value" in child table must be marked NOT NULL +EXPLAIN (COSTS OFF) SELECT * FROM test.hash_rel; + QUERY PLAN +----------------------------------- + Append + -> Seq Scan on hash_rel_extern + -> Seq Scan on hash_rel_1 + -> Seq Scan on hash_rel_2 +(4 rows) + /* * Clean up */ SELECT pathman.drop_partitions('test.hash_rel'); -NOTICE: drop cascades to 3 other objects -NOTICE: 2 rows copied from test.hash_rel_2 NOTICE: 3 rows copied from test.hash_rel_1 -NOTICE: 2 rows copied from test.hash_rel_0 +NOTICE: 2 rows copied from test.hash_rel_2 +NOTICE: 2 rows copied from test.hash_rel_extern drop_partitions ----------------- 3 @@ -1190,7 +1295,6 @@ SELECT pathman.create_hash_partitions('test.hash_rel', 'value', 3); (1 row) SELECT pathman.drop_partitions('test.hash_rel', TRUE); -NOTICE: function test.hash_rel_upd_trig_func() does not exist, skipping drop_partitions ----------------- 3 @@ -1204,24 +1308,50 @@ SELECT COUNT(*) FROM ONLY test.hash_rel; DROP TABLE test.hash_rel CASCADE; SELECT pathman.drop_partitions('test.num_range_rel'); -NOTICE: drop cascades to 4 other objects -NOTICE: 0 rows copied from test.num_range_rel_6 -NOTICE: 2 rows copied from test.num_range_rel_4 -NOTICE: 1000 rows copied from test.num_range_rel_3 +NOTICE: 999 rows copied from test.num_range_rel_1 NOTICE: 1000 rows copied from test.num_range_rel_2 -NOTICE: 998 rows copied from test.num_range_rel_1 +NOTICE: 1000 rows copied from test.num_range_rel_3 drop_partitions ----------------- - 5 + 3 (1 row) DROP TABLE test.num_range_rel CASCADE; DROP TABLE test.range_rel CASCADE; -NOTICE: drop cascades to 7 other objects +NOTICE: drop cascades to 10 other objects +/* Test attributes copying */ +CREATE TABLE test.range_rel ( + id SERIAL PRIMARY KEY, + dt DATE NOT NULL) +WITH (fillfactor = 70); +INSERT INTO test.range_rel (dt) + SELECT g FROM generate_series('2015-01-01', '2015-02-15', '1 month'::interval) AS g; +SELECT pathman.create_range_partitions('test.range_rel', 'dt', + '2015-01-01'::date, '1 month'::interval); + create_range_partitions +------------------------- + 2 +(1 row) + +SELECT reloptions, relpersistence FROM pg_class WHERE oid='test.range_rel'::REGCLASS; + reloptions | relpersistence +-----------------+---------------- + {fillfactor=70} | p +(1 row) + +SELECT reloptions, relpersistence FROM pg_class WHERE oid='test.range_rel_1'::REGCLASS; + reloptions | relpersistence +-----------------+---------------- + {fillfactor=70} | p +(1 row) + +DROP TABLE test.range_rel CASCADE; +NOTICE: drop cascades to 3 other objects /* Test automatic partition creation */ CREATE TABLE test.range_rel ( id SERIAL PRIMARY KEY, - dt TIMESTAMP NOT NULL); + dt TIMESTAMP NOT NULL, + data TEXT); SELECT pathman.create_range_partitions('test.range_rel', 'dt', '2015-01-01'::DATE, '10 days'::INTERVAL, 1); create_range_partitions ------------------------- @@ -1241,9 +1371,9 @@ EXPLAIN (COSTS OFF) SELECT * FROM test.range_rel WHERE dt = '2014-12-15'; (3 rows) SELECT * FROM test.range_rel WHERE dt = '2014-12-15'; - id | dt ------+-------------------------- - 137 | Mon Dec 15 00:00:00 2014 + id | dt | data +-----+--------------------------+------ + 137 | Mon Dec 15 00:00:00 2014 | (1 row) EXPLAIN (COSTS OFF) SELECT * FROM test.range_rel WHERE dt = '2015-03-15'; @@ -1255,31 +1385,42 @@ EXPLAIN (COSTS OFF) SELECT * FROM test.range_rel WHERE dt = '2015-03-15'; (3 rows) SELECT * FROM test.range_rel WHERE dt = '2015-03-15'; - id | dt -----+-------------------------- - 74 | Sun Mar 15 00:00:00 2015 + id | dt | data +----+--------------------------+------ + 74 | Sun Mar 15 00:00:00 2015 | (1 row) -SELECT pathman.disable_auto('test.range_rel'); - disable_auto --------------- +SELECT pathman.set_auto('test.range_rel', false); + set_auto +---------- (1 row) INSERT INTO test.range_rel (dt) VALUES ('2015-06-01'); -ERROR: There is no suitable partition for key 'Mon Jun 01 00:00:00 2015' -SELECT pathman.enable_auto('test.range_rel'); - enable_auto -------------- +ERROR: no suitable partition for key 'Mon Jun 01 00:00:00 2015' +SELECT pathman.set_auto('test.range_rel', true); + set_auto +---------- (1 row) INSERT INTO test.range_rel (dt) VALUES ('2015-06-01'); +/* + * Test auto removing record from config on table DROP (but not on column drop + * as it used to be before version 1.2) + */ +ALTER TABLE test.range_rel DROP COLUMN data; +SELECT * FROM pathman.pathman_config; + partrel | expr | parttype | range_interval +----------------+------+----------+---------------- + test.range_rel | dt | 2 | @ 10 days +(1 row) + DROP TABLE test.range_rel CASCADE; -NOTICE: drop cascades to 20 other objects +NOTICE: drop cascades to 21 other objects SELECT * FROM pathman.pathman_config; - partrel | attname | parttype | range_interval ----------+---------+----------+---------------- + partrel | expr | parttype | range_interval +---------+------+----------+---------------- (0 rows) /* Check overlaps */ @@ -1292,48 +1433,28 @@ SELECT pathman.create_range_partitions('test.num_range_rel', 'id', 1000, 1000, 4 4 (1 row) -SELECT pathman.check_overlap('test.num_range_rel'::regclass::oid, 4001, 5000); - check_overlap ---------------- - t -(1 row) - -SELECT pathman.check_overlap('test.num_range_rel'::regclass::oid, 4000, 5000); - check_overlap ---------------- - t -(1 row) - -SELECT pathman.check_overlap('test.num_range_rel'::regclass::oid, 3999, 5000); - check_overlap ---------------- - t -(1 row) - -SELECT pathman.check_overlap('test.num_range_rel'::regclass::oid, 3000, 3500); - check_overlap ---------------- - t -(1 row) - -SELECT pathman.check_overlap('test.num_range_rel'::regclass::oid, 0, 999); - check_overlap ---------------- - f -(1 row) - -SELECT pathman.check_overlap('test.num_range_rel'::regclass::oid, 0, 1000); - check_overlap ---------------- - f +SELECT pathman.check_range_available('test.num_range_rel'::regclass, 4001, 5000); +ERROR: specified range [4001, 5000) overlaps with existing partitions +SELECT pathman.check_range_available('test.num_range_rel'::regclass, 4000, 5000); +ERROR: specified range [4000, 5000) overlaps with existing partitions +SELECT pathman.check_range_available('test.num_range_rel'::regclass, 3999, 5000); +ERROR: specified range [3999, 5000) overlaps with existing partitions +SELECT pathman.check_range_available('test.num_range_rel'::regclass, 3000, 3500); +ERROR: specified range [3000, 3500) overlaps with existing partitions +SELECT pathman.check_range_available('test.num_range_rel'::regclass, 0, 999); + check_range_available +----------------------- + (1 row) -SELECT pathman.check_overlap('test.num_range_rel'::regclass::oid, 0, 1001); - check_overlap ---------------- - t +SELECT pathman.check_range_available('test.num_range_rel'::regclass, 0, 1000); + check_range_available +----------------------- + (1 row) +SELECT pathman.check_range_available('test.num_range_rel'::regclass, 0, 1001); +ERROR: specified range [0, 1001) overlaps with existing partitions /* CaMeL cAsE table names and attributes */ CREATE TABLE test."TeSt" (a INT NOT NULL, b INT); SELECT pathman.create_hash_partitions('test.TeSt', 'a', 3); @@ -1355,55 +1476,8 @@ SELECT * FROM test."TeSt"; 1 | 1 (3 rows) -SELECT pathman.create_hash_update_trigger('test."TeSt"'); - create_hash_update_trigger ----------------------------- - test."TeSt_upd_trig_func" -(1 row) - -UPDATE test."TeSt" SET a = 1; -SELECT * FROM test."TeSt"; - a | b ----+--- - 1 | 3 - 1 | 2 - 1 | 1 -(3 rows) - -SELECT * FROM test."TeSt" WHERE a = 1; - a | b ----+--- - 1 | 3 - 1 | 2 - 1 | 1 -(3 rows) - -EXPLAIN (COSTS OFF) SELECT * FROM test."TeSt" WHERE a = 1; - QUERY PLAN ----------------------------- - Append - -> Seq Scan on "TeSt_2" - Filter: (a = 1) -(3 rows) - -SELECT pathman.drop_partitions('test."TeSt"'); +DROP TABLE test."TeSt" CASCADE; NOTICE: drop cascades to 3 other objects -NOTICE: 3 rows copied from test."TeSt_2" -NOTICE: 0 rows copied from test."TeSt_1" -NOTICE: 0 rows copied from test."TeSt_0" - drop_partitions ------------------ - 3 -(1 row) - -SELECT * FROM test."TeSt"; - a | b ----+--- - 1 | 3 - 1 | 2 - 1 | 1 -(3 rows) - CREATE TABLE test."RangeRel" ( id SERIAL PRIMARY KEY, dt TIMESTAMP NOT NULL, @@ -1411,7 +1485,6 @@ CREATE TABLE test."RangeRel" ( INSERT INTO test."RangeRel" (dt, txt) SELECT g, md5(g::TEXT) FROM generate_series('2015-01-01', '2015-01-03', '1 day'::interval) as g; SELECT pathman.create_range_partitions('test."RangeRel"', 'dt', '2015-01-01'::DATE, '1 day'::INTERVAL); -NOTICE: sequence "RangeRel_seq" does not exist, skipping create_range_partitions ------------------------- 3 @@ -1432,39 +1505,21 @@ SELECT pathman.prepend_range_partition('test."RangeRel"'); SELECT pathman.merge_range_partitions('test."RangeRel_1"', 'test."RangeRel_' || currval('test."RangeRel_seq"') || '"'); merge_range_partitions ------------------------ - + test."RangeRel_1" (1 row) SELECT pathman.split_range_partition('test."RangeRel_1"', '2015-01-01'::DATE); - split_range_partition -------------------------- - {12-31-2014,01-02-2015} -(1 row) - -SELECT pathman.drop_partitions('test."RangeRel"'); -NOTICE: function test.RangeRel_upd_trig_func() does not exist, skipping -NOTICE: 1 rows copied from test."RangeRel_6" -NOTICE: 0 rows copied from test."RangeRel_4" -NOTICE: 1 rows copied from test."RangeRel_3" -NOTICE: 1 rows copied from test."RangeRel_2" -NOTICE: 0 rows copied from test."RangeRel_1" - drop_partitions ------------------ - 5 -(1 row) - -SELECT pathman.create_partitions_from_range('test."RangeRel"', 'dt', '2015-01-01'::DATE, '2015-01-05'::DATE, '1 day'::INTERVAL); - create_partitions_from_range ------------------------------- - 5 + split_range_partition +----------------------- + test."RangeRel_6" (1 row) DROP TABLE test."RangeRel" CASCADE; -NOTICE: drop cascades to 5 other objects +NOTICE: drop cascades to 6 other objects SELECT * FROM pathman.pathman_config; - partrel | attname | parttype | range_interval ---------------------+---------+----------+---------------- - test.num_range_rel | id | 2 | 1000 + partrel | expr | parttype | range_interval +--------------------+------+----------+---------------- + test.num_range_rel | id | 2 | 1000 (1 row) CREATE TABLE test."RangeRel" ( @@ -1477,39 +1532,23 @@ SELECT pathman.create_range_partitions('test."RangeRel"', 'id', 1, 100, 3); 3 (1 row) -SELECT pathman.drop_partitions('test."RangeRel"'); -NOTICE: function test.RangeRel_upd_trig_func() does not exist, skipping -NOTICE: 0 rows copied from test."RangeRel_3" -NOTICE: 0 rows copied from test."RangeRel_2" -NOTICE: 0 rows copied from test."RangeRel_1" - drop_partitions ------------------ - 3 -(1 row) - -SELECT pathman.create_partitions_from_range('test."RangeRel"', 'id', 1, 300, 100); - create_partitions_from_range ------------------------------- - 3 -(1 row) - DROP TABLE test."RangeRel" CASCADE; -NOTICE: drop cascades to 3 other objects +NOTICE: drop cascades to 4 other objects DROP EXTENSION pg_pathman; -/* Test that everithing works fine without schemas */ +/* Test that everything works fine without schemas */ CREATE EXTENSION pg_pathman; /* Hash */ -CREATE TABLE hash_rel ( +CREATE TABLE test.hash_rel ( id SERIAL PRIMARY KEY, value INTEGER NOT NULL); -INSERT INTO hash_rel (value) SELECT g FROM generate_series(1, 10000) as g; -SELECT create_hash_partitions('hash_rel', 'value', 3); +INSERT INTO test.hash_rel (value) SELECT g FROM generate_series(1, 10000) as g; +SELECT create_hash_partitions('test.hash_rel', 'value', 3); create_hash_partitions ------------------------ 3 (1 row) -EXPLAIN (COSTS OFF) SELECT * FROM hash_rel WHERE id = 1234; +EXPLAIN (COSTS OFF) SELECT * FROM test.hash_rel WHERE id = 1234; QUERY PLAN ------------------------------------------------------ Append @@ -1522,43 +1561,42 @@ EXPLAIN (COSTS OFF) SELECT * FROM hash_rel WHERE id = 1234; (7 rows) /* Range */ -CREATE TABLE range_rel ( +CREATE TABLE test.range_rel ( id SERIAL PRIMARY KEY, dt TIMESTAMP NOT NULL, value INTEGER); -INSERT INTO range_rel (dt, value) SELECT g, extract(day from g) FROM generate_series('2010-01-01'::date, '2010-12-31'::date, '1 day') as g; -SELECT create_range_partitions('range_rel', 'dt', '2010-01-01'::date, '1 month'::interval, 12); -NOTICE: sequence "range_rel_seq" does not exist, skipping +INSERT INTO test.range_rel (dt, value) SELECT g, extract(day from g) FROM generate_series('2010-01-01'::date, '2010-12-31'::date, '1 day') as g; +SELECT create_range_partitions('test.range_rel', 'dt', '2010-01-01'::date, '1 month'::interval, 12); create_range_partitions ------------------------- 12 (1 row) -SELECT merge_range_partitions('range_rel_1', 'range_rel_2'); +SELECT merge_range_partitions('test.range_rel_1', 'test.range_rel_2'); merge_range_partitions ------------------------ - + test.range_rel_1 (1 row) -SELECT split_range_partition('range_rel_1', '2010-02-15'::date); - split_range_partition -------------------------- - {01-01-2010,03-01-2010} +SELECT split_range_partition('test.range_rel_1', '2010-02-15'::date); + split_range_partition +----------------------- + test.range_rel_13 (1 row) -SELECT append_range_partition('range_rel'); +SELECT append_range_partition('test.range_rel'); append_range_partition ------------------------ - public.range_rel_14 + test.range_rel_14 (1 row) -SELECT prepend_range_partition('range_rel'); +SELECT prepend_range_partition('test.range_rel'); prepend_range_partition ------------------------- - public.range_rel_15 + test.range_rel_15 (1 row) -EXPLAIN (COSTS OFF) SELECT * FROM range_rel WHERE dt < '2010-03-01'; +EXPLAIN (COSTS OFF) SELECT * FROM test.range_rel WHERE dt < '2010-03-01'; QUERY PLAN -------------------------------- Append @@ -1567,7 +1605,7 @@ EXPLAIN (COSTS OFF) SELECT * FROM range_rel WHERE dt < '2010-03-01'; -> Seq Scan on range_rel_13 (4 rows) -EXPLAIN (COSTS OFF) SELECT * FROM range_rel WHERE dt > '2010-12-15'; +EXPLAIN (COSTS OFF) SELECT * FROM test.range_rel WHERE dt > '2010-12-15'; QUERY PLAN -------------------------------------------------------------------------------- Append @@ -1576,134 +1614,238 @@ EXPLAIN (COSTS OFF) SELECT * FROM range_rel WHERE dt > '2010-12-15'; -> Seq Scan on range_rel_14 (4 rows) -/* Temporary table for JOINs */ -CREATE TABLE tmp (id INTEGER NOT NULL, value INTEGER NOT NULL); -INSERT INTO tmp VALUES (1, 1), (2, 2); -/* Test UPDATE and DELETE */ -EXPLAIN (COSTS OFF) UPDATE range_rel SET value = 111 WHERE dt = '2010-06-15'; - QUERY PLAN --------------------------------------------------------------------------------- - Update on range_rel_6 - -> Seq Scan on range_rel_6 - Filter: (dt = 'Tue Jun 15 00:00:00 2010'::timestamp without time zone) -(3 rows) +/* Create range partitions from whole range */ +SELECT drop_partitions('test.range_rel'); +NOTICE: 45 rows copied from test.range_rel_1 +NOTICE: 31 rows copied from test.range_rel_3 +NOTICE: 30 rows copied from test.range_rel_4 +NOTICE: 31 rows copied from test.range_rel_5 +NOTICE: 30 rows copied from test.range_rel_6 +NOTICE: 31 rows copied from test.range_rel_7 +NOTICE: 31 rows copied from test.range_rel_8 +NOTICE: 30 rows copied from test.range_rel_9 +NOTICE: 31 rows copied from test.range_rel_10 +NOTICE: 30 rows copied from test.range_rel_11 +NOTICE: 31 rows copied from test.range_rel_12 +NOTICE: 14 rows copied from test.range_rel_13 +NOTICE: 0 rows copied from test.range_rel_14 +NOTICE: 0 rows copied from test.range_rel_15 + drop_partitions +----------------- + 14 +(1 row) + +/* Test NOT operator */ +CREATE TABLE bool_test(a INT NOT NULL, b BOOLEAN); +SELECT create_hash_partitions('bool_test', 'a', 3); + create_hash_partitions +------------------------ + 3 +(1 row) -UPDATE range_rel SET value = 111 WHERE dt = '2010-06-15'; -SELECT * FROM range_rel WHERE dt = '2010-06-15'; - id | dt | value ------+--------------------------+------- - 166 | Tue Jun 15 00:00:00 2010 | 111 +INSERT INTO bool_test SELECT g, (g % 4) = 0 FROM generate_series(1, 100) AS g; +SELECT count(*) FROM bool_test; + count +------- + 100 (1 row) -EXPLAIN (COSTS OFF) DELETE FROM range_rel WHERE dt = '2010-06-15'; - QUERY PLAN --------------------------------------------------------------------------------- - Delete on range_rel_6 - -> Seq Scan on range_rel_6 - Filter: (dt = 'Tue Jun 15 00:00:00 2010'::timestamp without time zone) -(3 rows) +SELECT count(*) FROM bool_test WHERE (b = true AND b = false); + count +------- + 0 +(1 row) -DELETE FROM range_rel WHERE dt = '2010-06-15'; -SELECT * FROM range_rel WHERE dt = '2010-06-15'; - id | dt | value -----+----+------- -(0 rows) +SELECT count(*) FROM bool_test WHERE b = false; /* 75 values */ + count +------- + 75 +(1 row) -EXPLAIN (COSTS OFF) UPDATE range_rel r SET value = t.value FROM tmp t WHERE r.dt = '2010-01-01' AND r.id = t.id; - QUERY PLAN --------------------------------------------------------------------------------------------- - Update on range_rel_1 r - -> Hash Join - Hash Cond: (t.id = r.id) - -> Seq Scan on tmp t - -> Hash - -> Index Scan using range_rel_1_pkey on range_rel_1 r - Filter: (dt = 'Fri Jan 01 00:00:00 2010'::timestamp without time zone) -(7 rows) +SELECT count(*) FROM bool_test WHERE b = true; /* 25 values */ + count +------- + 25 +(1 row) + +DROP TABLE bool_test CASCADE; +NOTICE: drop cascades to 3 other objects +/* Special test case (quals generation) -- fixing commit f603e6c5 */ +CREATE TABLE test.special_case_1_ind_o_s(val serial, comment text); +INSERT INTO test.special_case_1_ind_o_s SELECT generate_series(1, 200), NULL; +SELECT create_range_partitions('test.special_case_1_ind_o_s', 'val', 1, 50); + create_range_partitions +------------------------- + 4 +(1 row) -UPDATE range_rel r SET value = t.value FROM tmp t WHERE r.dt = '2010-01-01' AND r.id = t.id; -EXPLAIN (COSTS OFF) DELETE FROM range_rel r USING tmp t WHERE r.dt = '2010-01-02' AND r.id = t.id; - QUERY PLAN --------------------------------------------------------------------------------------------- - Delete on range_rel_1 r - -> Hash Join - Hash Cond: (t.id = r.id) - -> Seq Scan on tmp t - -> Hash - -> Index Scan using range_rel_1_pkey on range_rel_1 r - Filter: (dt = 'Sat Jan 02 00:00:00 2010'::timestamp without time zone) +INSERT INTO test.special_case_1_ind_o_s_2 SELECT 75 FROM generate_series(1, 6000); +CREATE INDEX ON test.special_case_1_ind_o_s_2 (val, comment); +VACUUM ANALYZE test.special_case_1_ind_o_s_2; +EXPLAIN (COSTS OFF) SELECT * FROM test.special_case_1_ind_o_s WHERE val < 75 AND comment = 'a'; + QUERY PLAN +-------------------------------------------------------------------------------------------------- + Append + -> Seq Scan on special_case_1_ind_o_s_1 + Filter: (comment = 'a'::text) + -> Index Only Scan using special_case_1_ind_o_s_2_val_comment_idx on special_case_1_ind_o_s_2 + Index Cond: ((val < 75) AND (comment = 'a'::text)) +(5 rows) + +SELECT set_enable_parent('test.special_case_1_ind_o_s', true); + set_enable_parent +------------------- + +(1 row) + +EXPLAIN (COSTS OFF) SELECT * FROM test.special_case_1_ind_o_s WHERE val < 75 AND comment = 'a'; + QUERY PLAN +-------------------------------------------------------------------------------------------------- + Append + -> Seq Scan on special_case_1_ind_o_s + Filter: ((val < 75) AND (comment = 'a'::text)) + -> Seq Scan on special_case_1_ind_o_s_1 + Filter: (comment = 'a'::text) + -> Index Only Scan using special_case_1_ind_o_s_2_val_comment_idx on special_case_1_ind_o_s_2 + Index Cond: ((val < 75) AND (comment = 'a'::text)) (7 rows) -DELETE FROM range_rel r USING tmp t WHERE r.dt = '2010-01-02' AND r.id = t.id; -/* Create range partitions from whole range */ -SELECT drop_partitions('range_rel'); -NOTICE: function public.range_rel_upd_trig_func() does not exist, skipping -NOTICE: 0 rows copied from range_rel_15 -NOTICE: 0 rows copied from range_rel_14 -NOTICE: 14 rows copied from range_rel_13 -NOTICE: 31 rows copied from range_rel_12 -NOTICE: 30 rows copied from range_rel_11 -NOTICE: 31 rows copied from range_rel_10 -NOTICE: 30 rows copied from range_rel_9 -NOTICE: 31 rows copied from range_rel_8 -NOTICE: 31 rows copied from range_rel_7 -NOTICE: 29 rows copied from range_rel_6 -NOTICE: 31 rows copied from range_rel_5 -NOTICE: 30 rows copied from range_rel_4 -NOTICE: 31 rows copied from range_rel_3 -NOTICE: 44 rows copied from range_rel_1 - drop_partitions ------------------ - 14 +SELECT set_enable_parent('test.special_case_1_ind_o_s', false); + set_enable_parent +------------------- + (1 row) -SELECT create_partitions_from_range('range_rel', 'id', 1, 1000, 100); - create_partitions_from_range ------------------------------- - 10 +EXPLAIN (COSTS OFF) SELECT * FROM test.special_case_1_ind_o_s WHERE val < 75 AND comment = 'a'; + QUERY PLAN +-------------------------------------------------------------------------------------------------- + Append + -> Seq Scan on special_case_1_ind_o_s_1 + Filter: (comment = 'a'::text) + -> Index Only Scan using special_case_1_ind_o_s_2_val_comment_idx on special_case_1_ind_o_s_2 + Index Cond: ((val < 75) AND (comment = 'a'::text)) +(5 rows) + +/* Test index scans on child relation under enable_parent is set */ +CREATE TABLE test.index_on_childs(c1 integer not null, c2 integer); +CREATE INDEX ON test.index_on_childs(c2); +INSERT INTO test.index_on_childs SELECT i, (random()*10000)::integer FROM generate_series(1, 10000) i; +SELECT create_range_partitions('test.index_on_childs', 'c1', 1, 1000, 0, false); + create_range_partitions +------------------------- + 0 (1 row) -SELECT drop_partitions('range_rel', TRUE); -NOTICE: function public.range_rel_upd_trig_func() does not exist, skipping - drop_partitions ------------------ - 10 +SELECT add_range_partition('test.index_on_childs', 1, 1000, 'test.index_on_childs_1_1k'); + add_range_partition +--------------------------- + test.index_on_childs_1_1k (1 row) -SELECT create_partitions_from_range('range_rel', 'dt', '2015-01-01'::date, '2015-12-01'::date, '1 month'::interval); - create_partitions_from_range ------------------------------- - 12 +SELECT append_range_partition('test.index_on_childs', 'test.index_on_childs_1k_2k'); + append_range_partition +---------------------------- + test.index_on_childs_1k_2k (1 row) -EXPLAIN (COSTS OFF) SELECT * FROM range_rel WHERE dt = '2015-12-15'; - QUERY PLAN --------------------------------------------------------------------------------- +SELECT append_range_partition('test.index_on_childs', 'test.index_on_childs_2k_3k'); + append_range_partition +---------------------------- + test.index_on_childs_2k_3k +(1 row) + +SELECT append_range_partition('test.index_on_childs', 'test.index_on_childs_3k_4k'); + append_range_partition +---------------------------- + test.index_on_childs_3k_4k +(1 row) + +SELECT append_range_partition('test.index_on_childs', 'test.index_on_childs_4k_5k'); + append_range_partition +---------------------------- + test.index_on_childs_4k_5k +(1 row) + +SELECT set_enable_parent('test.index_on_childs', true); + set_enable_parent +------------------- + +(1 row) + +VACUUM ANALYZE test.index_on_childs; +EXPLAIN (COSTS OFF) SELECT * FROM test.index_on_childs WHERE c1 > 100 AND c1 < 2500 AND c2 = 500; + QUERY PLAN +------------------------------------------------------------------------------ Append - -> Seq Scan on range_rel_12 - Filter: (dt = 'Tue Dec 15 00:00:00 2015'::timestamp without time zone) -(3 rows) + -> Index Scan using index_on_childs_c2_idx on index_on_childs + Index Cond: (c2 = 500) + Filter: ((c1 > 100) AND (c1 < 2500)) + -> Index Scan using index_on_childs_1_1k_c2_idx on index_on_childs_1_1k + Index Cond: (c2 = 500) + Filter: (c1 > 100) + -> Index Scan using index_on_childs_1k_2k_c2_idx on index_on_childs_1k_2k + Index Cond: (c2 = 500) + -> Index Scan using index_on_childs_2k_3k_c2_idx on index_on_childs_2k_3k + Index Cond: (c2 = 500) + Filter: (c1 < 2500) +(12 rows) + +/* Test create_range_partitions() + partition_names */ +CREATE TABLE test.provided_part_names(id INT NOT NULL); +INSERT INTO test.provided_part_names SELECT generate_series(1, 10); +SELECT create_hash_partitions('test.provided_part_names', 'id', 2, + partition_names := ARRAY['p1', 'p2']::TEXT[]); /* ok */ + create_hash_partitions +------------------------ + 2 +(1 row) + +/* list partitions */ +SELECT partition FROM pathman_partition_list +WHERE parent = 'test.provided_part_names'::REGCLASS +ORDER BY partition; + partition +----------- + p1 + p2 +(2 rows) -CREATE TABLE messages(id SERIAL PRIMARY KEY, msg TEXT); -CREATE TABLE replies(id SERIAL PRIMARY KEY, message_id INTEGER REFERENCES messages(id), msg TEXT); -INSERT INTO messages SELECT g, md5(g::text) FROM generate_series(1, 10) as g; -INSERT INTO replies SELECT g, g, md5(g::text) FROM generate_series(1, 10) as g; -SELECT create_range_partitions('messages', 'id', 1, 100, 2); -WARNING: Foreign key 'replies_message_id_fkey' references to the relation 'messages' -ERROR: Relation "messages" is referenced from other relations -ALTER TABLE replies DROP CONSTRAINT replies_message_id_fkey; -SELECT create_range_partitions('messages', 'id', 1, 100, 2); -NOTICE: sequence "messages_seq" does not exist, skipping +DROP TABLE test.provided_part_names CASCADE; +NOTICE: drop cascades to 2 other objects +/* test preventing of double expand of inherited tables */ +CREATE TABLE test.mixinh_parent (id INT PRIMARY KEY); +CREATE TABLE test.mixinh_child1 () INHERITS (test.mixinh_parent); +SELECT create_range_partitions('test.mixinh_child1', 'id', 1, 10, 1); create_range_partitions ------------------------- - 2 + 1 (1 row) -EXPLAIN (COSTS OFF) SELECT * FROM messages; - QUERY PLAN ------------------------------- - Append - -> Seq Scan on messages_1 - -> Seq Scan on messages_2 -(3 rows) +INSERT INTO test.mixinh_child1 VALUES (1); +SELECT * FROM test.mixinh_child1; + id +---- + 1 +(1 row) +SELECT * FROM test.mixinh_parent; +ERROR: could not expand partitioned table "mixinh_child1" +DROP TABLE test.hash_rel CASCADE; +NOTICE: drop cascades to 3 other objects +DROP TABLE test.index_on_childs CASCADE; +NOTICE: drop cascades to 6 other objects +DROP TABLE test.mixinh_child1 CASCADE; +NOTICE: drop cascades to 2 other objects +DROP TABLE test.mixinh_parent CASCADE; +DROP TABLE test.num_range_rel CASCADE; +NOTICE: drop cascades to 5 other objects +DROP TABLE test.hash_rel_wrong CASCADE; +DROP TABLE test.range_rel CASCADE; +DROP TABLE test.range_rel_archive CASCADE; +DROP TABLE test.special_case_1_ind_o_s CASCADE; +NOTICE: drop cascades to 5 other objects +DROP TABLE test.range_rel_test1 CASCADE; +DROP TABLE test.range_rel_test2 CASCADE; +DROP SCHEMA test; +DROP EXTENSION pg_pathman CASCADE; +DROP SCHEMA pathman; diff --git a/expected/pathman_basic_1.out b/expected/pathman_basic_1.out new file mode 100644 index 00000000..92a86727 --- /dev/null +++ b/expected/pathman_basic_1.out @@ -0,0 +1,1834 @@ +/* + * Since 8edd0e794 (>= 12) Append nodes with single subplan are eliminated, + * causing different output. Also, EXPLAIN now always shows key first in quals + * ('test commutator' queries). + * + * Since 55a1954da16 and 6ef77cf46e8 (>= 13) output of EXPLAIN was changed, + * now it includes aliases for inherited tables. + */ +\set VERBOSITY terse +SET search_path = 'public'; +CREATE SCHEMA pathman; +CREATE EXTENSION pg_pathman SCHEMA pathman; +CREATE SCHEMA test; +CREATE TABLE test.hash_rel ( + id SERIAL PRIMARY KEY, + value INTEGER); +INSERT INTO test.hash_rel VALUES (1, 1); +INSERT INTO test.hash_rel VALUES (2, 2); +INSERT INTO test.hash_rel VALUES (3, 3); +\set VERBOSITY default +SELECT pathman.create_hash_partitions('test.hash_rel', 'value', 3); +ERROR: failed to analyze partitioning expression "value" +DETAIL: column "value" should be marked NOT NULL +CONTEXT: SQL statement "SELECT pathman.validate_expression(parent_relid, expression)" +PL/pgSQL function pathman.prepare_for_partitioning(regclass,text,boolean) line 9 at PERFORM +SQL statement "SELECT pathman.prepare_for_partitioning(parent_relid, + expression, + partition_data)" +PL/pgSQL function pathman.create_hash_partitions(regclass,text,integer,boolean,text[],text[]) line 3 at PERFORM +\set VERBOSITY terse +ALTER TABLE test.hash_rel ALTER COLUMN value SET NOT NULL; +SELECT pathman.create_hash_partitions('test.hash_rel', 'value', 3, partition_data:=false); + create_hash_partitions +------------------------ + 3 +(1 row) + +EXPLAIN (COSTS OFF) SELECT * FROM test.hash_rel; + QUERY PLAN +------------------------------ + Append + -> Seq Scan on hash_rel + -> Seq Scan on hash_rel_0 + -> Seq Scan on hash_rel_1 + -> Seq Scan on hash_rel_2 +(5 rows) + +SELECT * FROM test.hash_rel; + id | value +----+------- + 1 | 1 + 2 | 2 + 3 | 3 +(3 rows) + +SELECT pathman.set_enable_parent('test.hash_rel', false); + set_enable_parent +------------------- + +(1 row) + +EXPLAIN (COSTS OFF) SELECT * FROM test.hash_rel; + QUERY PLAN +------------------------------ + Append + -> Seq Scan on hash_rel_0 + -> Seq Scan on hash_rel_1 + -> Seq Scan on hash_rel_2 +(4 rows) + +SELECT * FROM test.hash_rel; + id | value +----+------- +(0 rows) + +SELECT pathman.set_enable_parent('test.hash_rel', true); + set_enable_parent +------------------- + +(1 row) + +EXPLAIN (COSTS OFF) SELECT * FROM test.hash_rel; + QUERY PLAN +------------------------------ + Append + -> Seq Scan on hash_rel + -> Seq Scan on hash_rel_0 + -> Seq Scan on hash_rel_1 + -> Seq Scan on hash_rel_2 +(5 rows) + +SELECT * FROM test.hash_rel; + id | value +----+------- + 1 | 1 + 2 | 2 + 3 | 3 +(3 rows) + +SELECT pathman.drop_partitions('test.hash_rel'); +NOTICE: 0 rows copied from test.hash_rel_0 +NOTICE: 0 rows copied from test.hash_rel_1 +NOTICE: 0 rows copied from test.hash_rel_2 + drop_partitions +----------------- + 3 +(1 row) + +SELECT pathman.create_hash_partitions('test.hash_rel', 'Value', 3); + create_hash_partitions +------------------------ + 3 +(1 row) + +SELECT COUNT(*) FROM test.hash_rel; + count +------- + 3 +(1 row) + +SELECT COUNT(*) FROM ONLY test.hash_rel; + count +------- + 0 +(1 row) + +INSERT INTO test.hash_rel VALUES (4, 4); +INSERT INTO test.hash_rel VALUES (5, 5); +INSERT INTO test.hash_rel VALUES (6, 6); +SELECT COUNT(*) FROM test.hash_rel; + count +------- + 6 +(1 row) + +SELECT COUNT(*) FROM ONLY test.hash_rel; + count +------- + 0 +(1 row) + +CREATE TABLE test.range_rel ( + id SERIAL PRIMARY KEY, + dt TIMESTAMP, + txt TEXT); +CREATE INDEX ON test.range_rel (dt); +INSERT INTO test.range_rel (dt, txt) +SELECT g, md5(g::TEXT) FROM generate_series('2015-01-01', '2015-04-30', '1 day'::interval) as g; +\set VERBOSITY default +SELECT pathman.create_range_partitions('test.range_rel', 'dt', '2015-01-01'::DATE, '1 month'::INTERVAL); +ERROR: failed to analyze partitioning expression "dt" +DETAIL: column "dt" should be marked NOT NULL +CONTEXT: SQL statement "SELECT pathman.validate_expression(parent_relid, expression)" +PL/pgSQL function pathman.prepare_for_partitioning(regclass,text,boolean) line 9 at PERFORM +SQL statement "SELECT pathman.prepare_for_partitioning(parent_relid, + expression, + partition_data)" +PL/pgSQL function pathman.create_range_partitions(regclass,text,anyelement,interval,integer,boolean) line 11 at PERFORM +\set VERBOSITY terse +ALTER TABLE test.range_rel ALTER COLUMN dt SET NOT NULL; +SELECT pathman.create_range_partitions('test.range_rel', 'dt', '2015-01-01'::DATE, '1 month'::INTERVAL, 2); +ERROR: not enough partitions to fit all values of "dt" +SELECT pathman.create_range_partitions('test.range_rel', 'DT', '2015-01-01'::DATE, '1 month'::INTERVAL); + create_range_partitions +------------------------- + 4 +(1 row) + +SELECT COUNT(*) FROM test.range_rel; + count +------- + 120 +(1 row) + +SELECT COUNT(*) FROM ONLY test.range_rel; + count +------- + 0 +(1 row) + +CREATE TABLE test.num_range_rel ( + id SERIAL PRIMARY KEY, + txt TEXT); +SELECT pathman.create_range_partitions('test.num_range_rel', 'id', 0, 1000, 4); + create_range_partitions +------------------------- + 4 +(1 row) + +SELECT COUNT(*) FROM test.num_range_rel; + count +------- + 0 +(1 row) + +SELECT COUNT(*) FROM ONLY test.num_range_rel; + count +------- + 0 +(1 row) + +INSERT INTO test.num_range_rel + SELECT g, md5(g::TEXT) FROM generate_series(1, 3000) as g; +SELECT COUNT(*) FROM test.num_range_rel; + count +------- + 3000 +(1 row) + +SELECT COUNT(*) FROM ONLY test.num_range_rel; + count +------- + 0 +(1 row) + +/* since rel_1_2_beta: check append_child_relation(), make_ands_explicit(), dummy path */ +CREATE TABLE test.improved_dummy (id BIGSERIAL, name TEXT NOT NULL); +INSERT INTO test.improved_dummy (name) SELECT md5(g::TEXT) FROM generate_series(1, 100) as g; +SELECT pathman.create_range_partitions('test.improved_dummy', 'id', 1, 10); + create_range_partitions +------------------------- + 10 +(1 row) + +INSERT INTO test.improved_dummy (name) VALUES ('test'); /* spawns new partition */ +EXPLAIN (COSTS OFF) SELECT * FROM test.improved_dummy WHERE id = 101 OR id = 5 AND name = 'ib'; + QUERY PLAN +---------------------------------------------------- + Append + -> Seq Scan on improved_dummy_1 + Filter: ((id = 5) AND (name = 'ib'::text)) + -> Seq Scan on improved_dummy_11 + Filter: (id = 101) +(5 rows) + +SELECT pathman.set_enable_parent('test.improved_dummy', true); /* enable parent */ + set_enable_parent +------------------- + +(1 row) + +EXPLAIN (COSTS OFF) SELECT * FROM test.improved_dummy WHERE id = 101 OR id = 5 AND name = 'ib'; + QUERY PLAN +-------------------------------------------------------------------- + Append + -> Seq Scan on improved_dummy + Filter: ((id = 101) OR ((id = 5) AND (name = 'ib'::text))) + -> Seq Scan on improved_dummy_1 + Filter: ((id = 5) AND (name = 'ib'::text)) + -> Seq Scan on improved_dummy_11 + Filter: (id = 101) +(7 rows) + +SELECT pathman.set_enable_parent('test.improved_dummy', false); /* disable parent */ + set_enable_parent +------------------- + +(1 row) + +ALTER TABLE test.improved_dummy_1 ADD CHECK (name != 'ib'); /* make test.improved_dummy_1 disappear */ +EXPLAIN (COSTS OFF) SELECT * FROM test.improved_dummy WHERE id = 101 OR id = 5 AND name = 'ib'; + QUERY PLAN +------------------------------- + Seq Scan on improved_dummy_11 + Filter: (id = 101) +(2 rows) + +SELECT pathman.set_enable_parent('test.improved_dummy', true); /* enable parent */ + set_enable_parent +------------------- + +(1 row) + +EXPLAIN (COSTS OFF) SELECT * FROM test.improved_dummy WHERE id = 101 OR id = 5 AND name = 'ib'; + QUERY PLAN +-------------------------------------------------------------------- + Append + -> Seq Scan on improved_dummy + Filter: ((id = 101) OR ((id = 5) AND (name = 'ib'::text))) + -> Seq Scan on improved_dummy_11 + Filter: (id = 101) +(5 rows) + +DROP TABLE test.improved_dummy CASCADE; +NOTICE: drop cascades to 12 other objects +/* since rel_1_4_beta: check create_range_partitions(bounds array) */ +CREATE TABLE test.improved_dummy (val INT NOT NULL); +SELECT pathman.create_range_partitions('test.improved_dummy', 'val', + pathman.generate_range_bounds(1, 1, 2)); + create_range_partitions +------------------------- + 2 +(1 row) + +SELECT * FROM pathman.pathman_partition_list +WHERE parent = 'test.improved_dummy'::REGCLASS +ORDER BY partition; + parent | partition | parttype | expr | range_min | range_max +---------------------+-----------------------+----------+------+-----------+----------- + test.improved_dummy | test.improved_dummy_1 | 2 | val | 1 | 2 + test.improved_dummy | test.improved_dummy_2 | 2 | val | 2 | 3 +(2 rows) + +SELECT pathman.drop_partitions('test.improved_dummy'); +NOTICE: 0 rows copied from test.improved_dummy_1 +NOTICE: 0 rows copied from test.improved_dummy_2 + drop_partitions +----------------- + 2 +(1 row) + +SELECT pathman.create_range_partitions('test.improved_dummy', 'val', + pathman.generate_range_bounds(1, 1, 2), + partition_names := '{p1, p2}'); + create_range_partitions +------------------------- + 2 +(1 row) + +SELECT * FROM pathman.pathman_partition_list +WHERE parent = 'test.improved_dummy'::REGCLASS +ORDER BY partition; + parent | partition | parttype | expr | range_min | range_max +---------------------+-----------+----------+------+-----------+----------- + test.improved_dummy | p1 | 2 | val | 1 | 2 + test.improved_dummy | p2 | 2 | val | 2 | 3 +(2 rows) + +SELECT pathman.drop_partitions('test.improved_dummy'); +NOTICE: 0 rows copied from p1 +NOTICE: 0 rows copied from p2 + drop_partitions +----------------- + 2 +(1 row) + +SELECT pathman.create_range_partitions('test.improved_dummy', 'val', + pathman.generate_range_bounds(1, 1, 2), + partition_names := '{p1, p2}', + tablespaces := '{pg_default, pg_default}'); + create_range_partitions +------------------------- + 2 +(1 row) + +SELECT * FROM pathman.pathman_partition_list +WHERE parent = 'test.improved_dummy'::REGCLASS +ORDER BY partition; + parent | partition | parttype | expr | range_min | range_max +---------------------+-----------+----------+------+-----------+----------- + test.improved_dummy | p1 | 2 | val | 1 | 2 + test.improved_dummy | p2 | 2 | val | 2 | 3 +(2 rows) + +DROP TABLE test.improved_dummy CASCADE; +NOTICE: drop cascades to 3 other objects +/* Test pathman_rel_pathlist_hook() with INSERT query */ +CREATE TABLE test.insert_into_select(val int NOT NULL); +INSERT INTO test.insert_into_select SELECT generate_series(1, 100); +SELECT pathman.create_range_partitions('test.insert_into_select', 'val', 1, 20); + create_range_partitions +------------------------- + 5 +(1 row) + +CREATE TABLE test.insert_into_select_copy (LIKE test.insert_into_select); /* INSERT INTO ... SELECT ... */ +EXPLAIN (COSTS OFF) +INSERT INTO test.insert_into_select_copy +SELECT * FROM test.insert_into_select +WHERE val <= 80; + QUERY PLAN +---------------------------------------------- + Insert on insert_into_select_copy + -> Append + -> Seq Scan on insert_into_select_1 + -> Seq Scan on insert_into_select_2 + -> Seq Scan on insert_into_select_3 + -> Seq Scan on insert_into_select_4 + Filter: (val <= 80) +(7 rows) + +SELECT pathman.set_enable_parent('test.insert_into_select', true); + set_enable_parent +------------------- + +(1 row) + +EXPLAIN (COSTS OFF) +INSERT INTO test.insert_into_select_copy +SELECT * FROM test.insert_into_select +WHERE val <= 80; + QUERY PLAN +---------------------------------------------- + Insert on insert_into_select_copy + -> Append + -> Seq Scan on insert_into_select + Filter: (val <= 80) + -> Seq Scan on insert_into_select_1 + -> Seq Scan on insert_into_select_2 + -> Seq Scan on insert_into_select_3 + -> Seq Scan on insert_into_select_4 + Filter: (val <= 80) +(9 rows) + +INSERT INTO test.insert_into_select_copy SELECT * FROM test.insert_into_select; +SELECT count(*) FROM test.insert_into_select_copy; + count +------- + 100 +(1 row) + +DROP TABLE test.insert_into_select_copy, test.insert_into_select CASCADE; +NOTICE: drop cascades to 6 other objects +SET pg_pathman.enable_runtimeappend = OFF; +SET pg_pathman.enable_runtimemergeappend = OFF; +VACUUM; +SET enable_indexscan = OFF; +SET enable_bitmapscan = OFF; +SET enable_seqscan = ON; +EXPLAIN (COSTS OFF) SELECT * FROM test.hash_rel; + QUERY PLAN +------------------------------ + Append + -> Seq Scan on hash_rel_0 + -> Seq Scan on hash_rel_1 + -> Seq Scan on hash_rel_2 +(4 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM test.hash_rel WHERE false; + QUERY PLAN +-------------------------- + Result + One-Time Filter: false +(2 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM test.hash_rel WHERE value = NULL; + QUERY PLAN +-------------------------- + Result + One-Time Filter: false +(2 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM test.hash_rel WHERE value = 2; + QUERY PLAN +------------------------ + Seq Scan on hash_rel_1 + Filter: (value = 2) +(2 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM test.hash_rel WHERE 2 = value; /* test commutator */ + QUERY PLAN +------------------------ + Seq Scan on hash_rel_1 + Filter: (2 = value) +(2 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM test.hash_rel WHERE value = 2 OR value = 1; + QUERY PLAN +------------------------------ + Append + -> Seq Scan on hash_rel_1 + Filter: (value = 2) + -> Seq Scan on hash_rel_2 + Filter: (value = 1) +(5 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM test.num_range_rel WHERE 2500 = id; /* test commutator */ + QUERY PLAN +----------------------------- + Seq Scan on num_range_rel_3 + Filter: (2500 = id) +(2 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM test.num_range_rel WHERE 2500 < id; /* test commutator */ + QUERY PLAN +----------------------------------- + Append + -> Seq Scan on num_range_rel_3 + Filter: (2500 < id) + -> Seq Scan on num_range_rel_4 +(4 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM test.num_range_rel WHERE id > 2500; + QUERY PLAN +----------------------------------- + Append + -> Seq Scan on num_range_rel_3 + Filter: (id > 2500) + -> Seq Scan on num_range_rel_4 +(4 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM test.num_range_rel WHERE id >= 1000 AND id < 3000; + QUERY PLAN +----------------------------------- + Append + -> Seq Scan on num_range_rel_2 + -> Seq Scan on num_range_rel_3 +(3 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM test.num_range_rel WHERE id >= 1500 AND id < 2500; + QUERY PLAN +----------------------------------- + Append + -> Seq Scan on num_range_rel_2 + Filter: (id >= 1500) + -> Seq Scan on num_range_rel_3 + Filter: (id < 2500) +(5 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM test.num_range_rel WHERE (id >= 500 AND id < 1500) OR (id > 2500); + QUERY PLAN +----------------------------------- + Append + -> Seq Scan on num_range_rel_1 + Filter: (id >= 500) + -> Seq Scan on num_range_rel_2 + Filter: (id < 1500) + -> Seq Scan on num_range_rel_3 + Filter: (id > 2500) + -> Seq Scan on num_range_rel_4 +(8 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM test.range_rel WHERE dt > '2015-02-15'; + QUERY PLAN +-------------------------------------------------------------------------------- + Append + -> Seq Scan on range_rel_2 + Filter: (dt > 'Sun Feb 15 00:00:00 2015'::timestamp without time zone) + -> Seq Scan on range_rel_3 + -> Seq Scan on range_rel_4 +(5 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM test.range_rel WHERE '2015-02-15' < dt; /* test commutator */ + QUERY PLAN +-------------------------------------------------------------------------------- + Append + -> Seq Scan on range_rel_2 + Filter: ('Sun Feb 15 00:00:00 2015'::timestamp without time zone < dt) + -> Seq Scan on range_rel_3 + -> Seq Scan on range_rel_4 +(5 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM test.range_rel WHERE dt >= '2015-02-01' AND dt < '2015-03-01'; + QUERY PLAN +------------------------- + Seq Scan on range_rel_2 +(1 row) + +EXPLAIN (COSTS OFF) SELECT * FROM test.range_rel WHERE dt >= '2015-02-15' AND dt < '2015-03-15'; + QUERY PLAN +--------------------------------------------------------------------------------- + Append + -> Seq Scan on range_rel_2 + Filter: (dt >= 'Sun Feb 15 00:00:00 2015'::timestamp without time zone) + -> Seq Scan on range_rel_3 + Filter: (dt < 'Sun Mar 15 00:00:00 2015'::timestamp without time zone) +(5 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM test.range_rel WHERE (dt >= '2015-01-15' AND dt < '2015-02-15') OR (dt > '2015-03-15'); + QUERY PLAN +--------------------------------------------------------------------------------- + Append + -> Seq Scan on range_rel_1 + Filter: (dt >= 'Thu Jan 15 00:00:00 2015'::timestamp without time zone) + -> Seq Scan on range_rel_2 + Filter: (dt < 'Sun Feb 15 00:00:00 2015'::timestamp without time zone) + -> Seq Scan on range_rel_3 + Filter: (dt > 'Sun Mar 15 00:00:00 2015'::timestamp without time zone) + -> Seq Scan on range_rel_4 +(8 rows) + +SET enable_indexscan = ON; +SET enable_bitmapscan = OFF; +SET enable_seqscan = OFF; +EXPLAIN (COSTS OFF) SELECT * FROM test.hash_rel; + QUERY PLAN +------------------------------ + Append + -> Seq Scan on hash_rel_0 + -> Seq Scan on hash_rel_1 + -> Seq Scan on hash_rel_2 +(4 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM test.hash_rel WHERE false; + QUERY PLAN +-------------------------- + Result + One-Time Filter: false +(2 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM test.hash_rel WHERE value = NULL; + QUERY PLAN +-------------------------- + Result + One-Time Filter: false +(2 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM test.hash_rel WHERE value = 2; + QUERY PLAN +------------------------ + Seq Scan on hash_rel_1 + Filter: (value = 2) +(2 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM test.hash_rel WHERE 2 = value; /* test commutator */ + QUERY PLAN +------------------------ + Seq Scan on hash_rel_1 + Filter: (2 = value) +(2 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM test.hash_rel WHERE value = 2 OR value = 1; + QUERY PLAN +------------------------------ + Append + -> Seq Scan on hash_rel_1 + Filter: (value = 2) + -> Seq Scan on hash_rel_2 + Filter: (value = 1) +(5 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM test.num_range_rel WHERE 2500 = id; /* test commutator */ + QUERY PLAN +---------------------------------------------------------- + Index Scan using num_range_rel_3_pkey on num_range_rel_3 + Index Cond: (id = 2500) +(2 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM test.num_range_rel WHERE 2500 < id; /* test commutator */ + QUERY PLAN +---------------------------------------------------------------- + Append + -> Index Scan using num_range_rel_3_pkey on num_range_rel_3 + Index Cond: (id > 2500) + -> Seq Scan on num_range_rel_4 +(4 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM test.num_range_rel WHERE id > 2500; + QUERY PLAN +---------------------------------------------------------------- + Append + -> Index Scan using num_range_rel_3_pkey on num_range_rel_3 + Index Cond: (id > 2500) + -> Seq Scan on num_range_rel_4 +(4 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM test.num_range_rel WHERE id >= 1000 AND id < 3000; + QUERY PLAN +----------------------------------- + Append + -> Seq Scan on num_range_rel_2 + -> Seq Scan on num_range_rel_3 +(3 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM test.num_range_rel WHERE id >= 1500 AND id < 2500; + QUERY PLAN +---------------------------------------------------------------- + Append + -> Index Scan using num_range_rel_2_pkey on num_range_rel_2 + Index Cond: (id >= 1500) + -> Index Scan using num_range_rel_3_pkey on num_range_rel_3 + Index Cond: (id < 2500) +(5 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM test.num_range_rel WHERE (id >= 500 AND id < 1500) OR (id > 2500); + QUERY PLAN +---------------------------------------------------------------- + Append + -> Index Scan using num_range_rel_1_pkey on num_range_rel_1 + Index Cond: (id >= 500) + -> Index Scan using num_range_rel_2_pkey on num_range_rel_2 + Index Cond: (id < 1500) + -> Index Scan using num_range_rel_3_pkey on num_range_rel_3 + Index Cond: (id > 2500) + -> Seq Scan on num_range_rel_4 +(8 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM test.num_range_rel ORDER BY id; + QUERY PLAN +---------------------------------------------------------------- + Append + -> Index Scan using num_range_rel_1_pkey on num_range_rel_1 + -> Index Scan using num_range_rel_2_pkey on num_range_rel_2 + -> Index Scan using num_range_rel_3_pkey on num_range_rel_3 + -> Index Scan using num_range_rel_4_pkey on num_range_rel_4 +(5 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM test.num_range_rel WHERE id <= 2500 ORDER BY id; + QUERY PLAN +---------------------------------------------------------------- + Append + -> Index Scan using num_range_rel_1_pkey on num_range_rel_1 + -> Index Scan using num_range_rel_2_pkey on num_range_rel_2 + -> Index Scan using num_range_rel_3_pkey on num_range_rel_3 + Index Cond: (id <= 2500) +(5 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM test.range_rel WHERE dt > '2015-02-15'; + QUERY PLAN +------------------------------------------------------------------------------------ + Append + -> Index Scan using range_rel_2_dt_idx on range_rel_2 + Index Cond: (dt > 'Sun Feb 15 00:00:00 2015'::timestamp without time zone) + -> Seq Scan on range_rel_3 + -> Seq Scan on range_rel_4 +(5 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM test.range_rel WHERE '2015-02-15' < dt; /* test commutator */ + QUERY PLAN +------------------------------------------------------------------------------------ + Append + -> Index Scan using range_rel_2_dt_idx on range_rel_2 + Index Cond: (dt > 'Sun Feb 15 00:00:00 2015'::timestamp without time zone) + -> Seq Scan on range_rel_3 + -> Seq Scan on range_rel_4 +(5 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM test.range_rel WHERE dt >= '2015-02-01' AND dt < '2015-03-01'; + QUERY PLAN +------------------------- + Seq Scan on range_rel_2 +(1 row) + +EXPLAIN (COSTS OFF) SELECT * FROM test.range_rel WHERE dt >= '2015-02-15' AND dt < '2015-03-15'; + QUERY PLAN +------------------------------------------------------------------------------------- + Append + -> Index Scan using range_rel_2_dt_idx on range_rel_2 + Index Cond: (dt >= 'Sun Feb 15 00:00:00 2015'::timestamp without time zone) + -> Index Scan using range_rel_3_dt_idx on range_rel_3 + Index Cond: (dt < 'Sun Mar 15 00:00:00 2015'::timestamp without time zone) +(5 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM test.range_rel WHERE (dt >= '2015-01-15' AND dt < '2015-02-15') OR (dt > '2015-03-15'); + QUERY PLAN +------------------------------------------------------------------------------------- + Append + -> Index Scan using range_rel_1_dt_idx on range_rel_1 + Index Cond: (dt >= 'Thu Jan 15 00:00:00 2015'::timestamp without time zone) + -> Index Scan using range_rel_2_dt_idx on range_rel_2 + Index Cond: (dt < 'Sun Feb 15 00:00:00 2015'::timestamp without time zone) + -> Index Scan using range_rel_3_dt_idx on range_rel_3 + Index Cond: (dt > 'Sun Mar 15 00:00:00 2015'::timestamp without time zone) + -> Seq Scan on range_rel_4 +(8 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM test.range_rel ORDER BY dt; + QUERY PLAN +---------------------------------------------------------- + Append + -> Index Scan using range_rel_1_dt_idx on range_rel_1 + -> Index Scan using range_rel_2_dt_idx on range_rel_2 + -> Index Scan using range_rel_3_dt_idx on range_rel_3 + -> Index Scan using range_rel_4_dt_idx on range_rel_4 +(5 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM test.range_rel WHERE dt >= '2015-01-15' ORDER BY dt DESC; + QUERY PLAN +------------------------------------------------------------------------------------- + Append + -> Index Scan Backward using range_rel_4_dt_idx on range_rel_4 + -> Index Scan Backward using range_rel_3_dt_idx on range_rel_3 + -> Index Scan Backward using range_rel_2_dt_idx on range_rel_2 + -> Index Scan Backward using range_rel_1_dt_idx on range_rel_1 + Index Cond: (dt >= 'Thu Jan 15 00:00:00 2015'::timestamp without time zone) +(6 rows) + +/* + * Sorting + */ +SET enable_indexscan = OFF; +SET enable_seqscan = ON; +EXPLAIN (COSTS OFF) SELECT * FROM test.range_rel WHERE dt < '2015-03-01' ORDER BY dt; + QUERY PLAN +------------------------------------- + Sort + Sort Key: range_rel_1.dt + -> Append + -> Seq Scan on range_rel_1 + -> Seq Scan on range_rel_2 +(5 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM test.range_rel_1 UNION ALL SELECT * FROM test.range_rel_2 ORDER BY dt; + QUERY PLAN +------------------------------------- + Sort + Sort Key: range_rel_1.dt + -> Append + -> Seq Scan on range_rel_1 + -> Seq Scan on range_rel_2 +(5 rows) + +SET enable_indexscan = ON; +SET enable_seqscan = OFF; +EXPLAIN (COSTS OFF) SELECT * FROM test.range_rel WHERE dt < '2015-03-01' ORDER BY dt; + QUERY PLAN +---------------------------------------------------------- + Append + -> Index Scan using range_rel_1_dt_idx on range_rel_1 + -> Index Scan using range_rel_2_dt_idx on range_rel_2 +(3 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM test.range_rel_1 UNION ALL SELECT * FROM test.range_rel_2 ORDER BY dt; + QUERY PLAN +---------------------------------------------------------- + Merge Append + Sort Key: range_rel_1.dt + -> Index Scan using range_rel_1_dt_idx on range_rel_1 + -> Index Scan using range_rel_2_dt_idx on range_rel_2 +(4 rows) + +/* + * Test inlined SQL functions + */ +CREATE TABLE test.sql_inline (id INT NOT NULL); +SELECT pathman.create_hash_partitions('test.sql_inline', 'id', 3); + create_hash_partitions +------------------------ + 3 +(1 row) + +CREATE OR REPLACE FUNCTION test.sql_inline_func(i_id int) RETURNS SETOF INT AS $$ + select * from test.sql_inline where id = i_id limit 1; +$$ LANGUAGE sql STABLE; +EXPLAIN (COSTS OFF) SELECT * FROM test.sql_inline_func(5); + QUERY PLAN +-------------------------------- + Limit + -> Seq Scan on sql_inline_0 + Filter: (id = 5) +(3 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM test.sql_inline_func(1); + QUERY PLAN +-------------------------------- + Limit + -> Seq Scan on sql_inline_2 + Filter: (id = 1) +(3 rows) + +DROP FUNCTION test.sql_inline_func(int); +DROP TABLE test.sql_inline CASCADE; +NOTICE: drop cascades to 3 other objects +/* + * Test by @baiyinqiqi (issue #60) + */ +CREATE TABLE test.hash_varchar(val VARCHAR(40) NOT NULL); +INSERT INTO test.hash_varchar SELECT generate_series(1, 20); +SELECT pathman.create_hash_partitions('test.hash_varchar', 'val', 4); + create_hash_partitions +------------------------ + 4 +(1 row) + +SELECT * FROM test.hash_varchar WHERE val = 'a'; + val +----- +(0 rows) + +SELECT * FROM test.hash_varchar WHERE val = '12'::TEXT; + val +----- + 12 +(1 row) + +DROP TABLE test.hash_varchar CASCADE; +NOTICE: drop cascades to 4 other objects +/* + * Test split and merge + */ +/* Split first partition in half */ +SELECT pathman.split_range_partition('test.num_range_rel_1', 500); + split_range_partition +----------------------- + test.num_range_rel_5 +(1 row) + +EXPLAIN (COSTS OFF) SELECT * FROM test.num_range_rel WHERE id BETWEEN 100 AND 700; + QUERY PLAN +---------------------------------------------------------------- + Append + -> Index Scan using num_range_rel_1_pkey on num_range_rel_1 + Index Cond: (id >= 100) + -> Index Scan using num_range_rel_5_pkey on num_range_rel_5 + Index Cond: (id <= 700) +(5 rows) + +SELECT tableoid::regclass, id FROM test.num_range_rel WHERE id IN (499, 500, 501) ORDER BY id; + tableoid | id +----------------------+----- + test.num_range_rel_1 | 499 + test.num_range_rel_5 | 500 + test.num_range_rel_5 | 501 +(3 rows) + +SELECT pathman.split_range_partition('test.range_rel_1', '2015-01-15'::DATE); + split_range_partition +----------------------- + test.range_rel_5 +(1 row) + +/* Merge two partitions into one */ +SELECT pathman.merge_range_partitions('test.num_range_rel_1', 'test.num_range_rel_' || currval('test.num_range_rel_seq')); + merge_range_partitions +------------------------ + test.num_range_rel_1 +(1 row) + +EXPLAIN (COSTS OFF) SELECT * FROM test.num_range_rel WHERE id BETWEEN 100 AND 700; + QUERY PLAN +---------------------------------------------------------- + Index Scan using num_range_rel_1_pkey on num_range_rel_1 + Index Cond: ((id >= 100) AND (id <= 700)) +(2 rows) + +SELECT pathman.merge_range_partitions('test.range_rel_1', 'test.range_rel_' || currval('test.range_rel_seq')); + merge_range_partitions +------------------------ + test.range_rel_1 +(1 row) + +/* Append and prepend partitions */ +SELECT pathman.append_range_partition('test.num_range_rel'); + append_range_partition +------------------------ + test.num_range_rel_6 +(1 row) + +EXPLAIN (COSTS OFF) SELECT * FROM test.num_range_rel WHERE id >= 4000; + QUERY PLAN +----------------------------- + Seq Scan on num_range_rel_6 +(1 row) + +SELECT pathman.prepend_range_partition('test.num_range_rel'); + prepend_range_partition +------------------------- + test.num_range_rel_7 +(1 row) + +EXPLAIN (COSTS OFF) SELECT * FROM test.num_range_rel WHERE id < 0; + QUERY PLAN +----------------------------- + Seq Scan on num_range_rel_7 +(1 row) + +SELECT pathman.drop_range_partition('test.num_range_rel_7'); + drop_range_partition +---------------------- + test.num_range_rel_7 +(1 row) + +SELECT pathman.drop_range_partition_expand_next('test.num_range_rel_4'); + drop_range_partition_expand_next +---------------------------------- + +(1 row) + +SELECT * FROM pathman.pathman_partition_list WHERE parent = 'test.num_range_rel'::regclass; + parent | partition | parttype | expr | range_min | range_max +--------------------+----------------------+----------+------+-----------+----------- + test.num_range_rel | test.num_range_rel_1 | 2 | id | 0 | 1000 + test.num_range_rel | test.num_range_rel_2 | 2 | id | 1000 | 2000 + test.num_range_rel | test.num_range_rel_3 | 2 | id | 2000 | 3000 + test.num_range_rel | test.num_range_rel_6 | 2 | id | 3000 | 5000 +(4 rows) + +SELECT pathman.drop_range_partition_expand_next('test.num_range_rel_6'); + drop_range_partition_expand_next +---------------------------------- + +(1 row) + +SELECT * FROM pathman.pathman_partition_list WHERE parent = 'test.num_range_rel'::regclass; + parent | partition | parttype | expr | range_min | range_max +--------------------+----------------------+----------+------+-----------+----------- + test.num_range_rel | test.num_range_rel_1 | 2 | id | 0 | 1000 + test.num_range_rel | test.num_range_rel_2 | 2 | id | 1000 | 2000 + test.num_range_rel | test.num_range_rel_3 | 2 | id | 2000 | 3000 +(3 rows) + +SELECT pathman.append_range_partition('test.range_rel'); + append_range_partition +------------------------ + test.range_rel_6 +(1 row) + +SELECT pathman.prepend_range_partition('test.range_rel'); + prepend_range_partition +------------------------- + test.range_rel_7 +(1 row) + +EXPLAIN (COSTS OFF) SELECT * FROM test.range_rel WHERE dt BETWEEN '2014-12-15' AND '2015-01-15'; + QUERY PLAN +------------------------------------------------------------------------------------- + Append + -> Index Scan using range_rel_7_dt_idx on range_rel_7 + Index Cond: (dt >= 'Mon Dec 15 00:00:00 2014'::timestamp without time zone) + -> Index Scan using range_rel_1_dt_idx on range_rel_1 + Index Cond: (dt <= 'Thu Jan 15 00:00:00 2015'::timestamp without time zone) +(5 rows) + +SELECT pathman.drop_range_partition('test.range_rel_7'); + drop_range_partition +---------------------- + test.range_rel_7 +(1 row) + +EXPLAIN (COSTS OFF) SELECT * FROM test.range_rel WHERE dt BETWEEN '2014-12-15' AND '2015-01-15'; + QUERY PLAN +------------------------------------------------------------------------------- + Index Scan using range_rel_1_dt_idx on range_rel_1 + Index Cond: (dt <= 'Thu Jan 15 00:00:00 2015'::timestamp without time zone) +(2 rows) + +SELECT pathman.add_range_partition('test.range_rel', '2014-12-01'::DATE, '2015-01-02'::DATE); +ERROR: specified range [12-01-2014, 01-02-2015) overlaps with existing partitions +SELECT pathman.add_range_partition('test.range_rel', '2014-12-01'::DATE, '2015-01-01'::DATE); + add_range_partition +--------------------- + test.range_rel_8 +(1 row) + +EXPLAIN (COSTS OFF) SELECT * FROM test.range_rel WHERE dt BETWEEN '2014-12-15' AND '2015-01-15'; + QUERY PLAN +------------------------------------------------------------------------------------- + Append + -> Index Scan using range_rel_8_dt_idx on range_rel_8 + Index Cond: (dt >= 'Mon Dec 15 00:00:00 2014'::timestamp without time zone) + -> Index Scan using range_rel_1_dt_idx on range_rel_1 + Index Cond: (dt <= 'Thu Jan 15 00:00:00 2015'::timestamp without time zone) +(5 rows) + +CREATE TABLE test.range_rel_archive (LIKE test.range_rel INCLUDING ALL); +SELECT pathman.attach_range_partition('test.range_rel', 'test.range_rel_archive', '2014-01-01'::DATE, '2015-01-01'::DATE); +ERROR: specified range [01-01-2014, 01-01-2015) overlaps with existing partitions +SELECT pathman.attach_range_partition('test.range_rel', 'test.range_rel_archive', '2014-01-01'::DATE, '2014-12-01'::DATE); + attach_range_partition +------------------------ + test.range_rel_archive +(1 row) + +EXPLAIN (COSTS OFF) SELECT * FROM test.range_rel WHERE dt BETWEEN '2014-11-15' AND '2015-01-15'; + QUERY PLAN +------------------------------------------------------------------------------------- + Append + -> Index Scan using range_rel_archive_dt_idx on range_rel_archive + Index Cond: (dt >= 'Sat Nov 15 00:00:00 2014'::timestamp without time zone) + -> Seq Scan on range_rel_8 + -> Index Scan using range_rel_1_dt_idx on range_rel_1 + Index Cond: (dt <= 'Thu Jan 15 00:00:00 2015'::timestamp without time zone) +(6 rows) + +SELECT pathman.detach_range_partition('test.range_rel_archive'); + detach_range_partition +------------------------ + test.range_rel_archive +(1 row) + +EXPLAIN (COSTS OFF) SELECT * FROM test.range_rel WHERE dt BETWEEN '2014-11-15' AND '2015-01-15'; + QUERY PLAN +------------------------------------------------------------------------------------- + Append + -> Seq Scan on range_rel_8 + -> Index Scan using range_rel_1_dt_idx on range_rel_1 + Index Cond: (dt <= 'Thu Jan 15 00:00:00 2015'::timestamp without time zone) +(4 rows) + +CREATE TABLE test.range_rel_test1 ( + id SERIAL PRIMARY KEY, + dt TIMESTAMP, + txt TEXT, + abc INTEGER); +SELECT pathman.attach_range_partition('test.range_rel', 'test.range_rel_test1', '2013-01-01'::DATE, '2014-01-01'::DATE); +ERROR: partition must have a compatible tuple format +CREATE TABLE test.range_rel_test2 ( + id SERIAL PRIMARY KEY, + dt TIMESTAMP); +SELECT pathman.attach_range_partition('test.range_rel', 'test.range_rel_test2', '2013-01-01'::DATE, '2014-01-01'::DATE); +ERROR: column "dt" in child table must be marked NOT NULL +/* Half open ranges */ +SELECT pathman.add_range_partition('test.range_rel', NULL, '2014-12-01'::DATE, 'test.range_rel_minus_infinity'); + add_range_partition +------------------------------- + test.range_rel_minus_infinity +(1 row) + +SELECT pathman.add_range_partition('test.range_rel', '2015-06-01'::DATE, NULL, 'test.range_rel_plus_infinity'); + add_range_partition +------------------------------ + test.range_rel_plus_infinity +(1 row) + +SELECT pathman.append_range_partition('test.range_rel'); +ERROR: Cannot append partition because last partition's range is half open +SELECT pathman.prepend_range_partition('test.range_rel'); +ERROR: Cannot prepend partition because first partition's range is half open +DROP TABLE test.range_rel_minus_infinity; +CREATE TABLE test.range_rel_minus_infinity (LIKE test.range_rel INCLUDING ALL); +SELECT pathman.attach_range_partition('test.range_rel', 'test.range_rel_minus_infinity', NULL, '2014-12-01'::DATE); + attach_range_partition +------------------------------- + test.range_rel_minus_infinity +(1 row) + +SELECT * FROM pathman.pathman_partition_list WHERE parent = 'test.range_rel'::REGCLASS; + parent | partition | parttype | expr | range_min | range_max +----------------+-------------------------------+----------+------+--------------------------+-------------------------- + test.range_rel | test.range_rel_minus_infinity | 2 | dt | | Mon Dec 01 00:00:00 2014 + test.range_rel | test.range_rel_8 | 2 | dt | Mon Dec 01 00:00:00 2014 | Thu Jan 01 00:00:00 2015 + test.range_rel | test.range_rel_1 | 2 | dt | Thu Jan 01 00:00:00 2015 | Sun Feb 01 00:00:00 2015 + test.range_rel | test.range_rel_2 | 2 | dt | Sun Feb 01 00:00:00 2015 | Sun Mar 01 00:00:00 2015 + test.range_rel | test.range_rel_3 | 2 | dt | Sun Mar 01 00:00:00 2015 | Wed Apr 01 00:00:00 2015 + test.range_rel | test.range_rel_4 | 2 | dt | Wed Apr 01 00:00:00 2015 | Fri May 01 00:00:00 2015 + test.range_rel | test.range_rel_6 | 2 | dt | Fri May 01 00:00:00 2015 | Mon Jun 01 00:00:00 2015 + test.range_rel | test.range_rel_plus_infinity | 2 | dt | Mon Jun 01 00:00:00 2015 | +(8 rows) + +INSERT INTO test.range_rel (dt) VALUES ('2012-06-15'); +INSERT INTO test.range_rel (dt) VALUES ('2015-12-15'); +EXPLAIN (COSTS OFF) SELECT * FROM test.range_rel WHERE dt < '2015-01-01'; + QUERY PLAN +-------------------------------------------- + Append + -> Seq Scan on range_rel_minus_infinity + -> Seq Scan on range_rel_8 +(3 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM test.range_rel WHERE dt >= '2015-05-01'; + QUERY PLAN +------------------------------------------- + Append + -> Seq Scan on range_rel_6 + -> Seq Scan on range_rel_plus_infinity +(3 rows) + +/* + * Zero partitions count and adding partitions with specified name + */ +CREATE TABLE test.zero( + id SERIAL PRIMARY KEY, + value INT NOT NULL); +INSERT INTO test.zero SELECT g, g FROM generate_series(1, 100) as g; +SELECT pathman.create_range_partitions('test.zero', 'value', 50, 10, 0); + create_range_partitions +------------------------- + 0 +(1 row) + +SELECT pathman.append_range_partition('test.zero', 'test.zero_0'); +ERROR: relation "zero" has no partitions +SELECT pathman.prepend_range_partition('test.zero', 'test.zero_1'); +ERROR: relation "zero" has no partitions +SELECT pathman.add_range_partition('test.zero', 50, 70, 'test.zero_50'); + add_range_partition +--------------------- + test.zero_50 +(1 row) + +SELECT pathman.append_range_partition('test.zero', 'test.zero_appended'); + append_range_partition +------------------------ + test.zero_appended +(1 row) + +SELECT pathman.prepend_range_partition('test.zero', 'test.zero_prepended'); + prepend_range_partition +------------------------- + test.zero_prepended +(1 row) + +SELECT pathman.split_range_partition('test.zero_50', 60, 'test.zero_60'); + split_range_partition +----------------------- + test."test.zero_60" +(1 row) + +DROP TABLE test.zero CASCADE; +NOTICE: drop cascades to 5 other objects +/* + * Check that altering table columns doesn't break trigger + */ +ALTER TABLE test.hash_rel ADD COLUMN abc int; +INSERT INTO test.hash_rel (id, value, abc) VALUES (123, 456, 789); +SELECT * FROM test.hash_rel WHERE id = 123; + id | value | abc +-----+-------+----- + 123 | 456 | 789 +(1 row) + +/* Test replacing hash partition */ +CREATE TABLE test.hash_rel_extern (LIKE test.hash_rel INCLUDING ALL); +SELECT pathman.replace_hash_partition('test.hash_rel_0', 'test.hash_rel_extern'); + replace_hash_partition +------------------------ + test.hash_rel_extern +(1 row) + +/* Check the consistency of test.hash_rel_0 and test.hash_rel_extern relations */ +EXPLAIN(COSTS OFF) SELECT * FROM test.hash_rel; + QUERY PLAN +----------------------------------- + Append + -> Seq Scan on hash_rel_extern + -> Seq Scan on hash_rel_1 + -> Seq Scan on hash_rel_2 +(4 rows) + +SELECT parent, partition, parttype +FROM pathman.pathman_partition_list +WHERE parent='test.hash_rel'::regclass +ORDER BY 2; + parent | partition | parttype +---------------+----------------------+---------- + test.hash_rel | test.hash_rel_1 | 1 + test.hash_rel | test.hash_rel_2 | 1 + test.hash_rel | test.hash_rel_extern | 1 +(3 rows) + +SELECT c.oid::regclass::text, + array_agg(pg_get_indexdef(i.indexrelid)) AS indexes, + array_agg(pg_get_triggerdef(t.oid)) AS triggers +FROM pg_class c + LEFT JOIN pg_index i ON c.oid=i.indrelid + LEFT JOIN pg_trigger t ON c.oid=t.tgrelid +WHERE c.oid IN ('test.hash_rel_0'::regclass, 'test.hash_rel_extern'::regclass) +GROUP BY 1 ORDER BY 1; + oid | indexes | triggers +----------------------+---------------------------------------------------------------------------------------+---------- + test.hash_rel_0 | {"CREATE UNIQUE INDEX hash_rel_0_pkey ON test.hash_rel_0 USING btree (id)"} | {NULL} + test.hash_rel_extern | {"CREATE UNIQUE INDEX hash_rel_extern_pkey ON test.hash_rel_extern USING btree (id)"} | {NULL} +(2 rows) + +SELECT pathman.is_tuple_convertible('test.hash_rel_0', 'test.hash_rel_extern'); + is_tuple_convertible +---------------------- + t +(1 row) + +INSERT INTO test.hash_rel SELECT * FROM test.hash_rel_0; +DROP TABLE test.hash_rel_0; +/* Table with which we are replacing partition must have exact same structure */ +CREATE TABLE test.hash_rel_wrong( + id INTEGER NOT NULL, + value INTEGER); +SELECT pathman.replace_hash_partition('test.hash_rel_1', 'test.hash_rel_wrong'); +ERROR: column "value" in child table must be marked NOT NULL +EXPLAIN (COSTS OFF) SELECT * FROM test.hash_rel; + QUERY PLAN +----------------------------------- + Append + -> Seq Scan on hash_rel_extern + -> Seq Scan on hash_rel_1 + -> Seq Scan on hash_rel_2 +(4 rows) + +/* + * Clean up + */ +SELECT pathman.drop_partitions('test.hash_rel'); +NOTICE: 3 rows copied from test.hash_rel_1 +NOTICE: 2 rows copied from test.hash_rel_2 +NOTICE: 2 rows copied from test.hash_rel_extern + drop_partitions +----------------- + 3 +(1 row) + +SELECT COUNT(*) FROM ONLY test.hash_rel; + count +------- + 7 +(1 row) + +SELECT pathman.create_hash_partitions('test.hash_rel', 'value', 3); + create_hash_partitions +------------------------ + 3 +(1 row) + +SELECT pathman.drop_partitions('test.hash_rel', TRUE); + drop_partitions +----------------- + 3 +(1 row) + +SELECT COUNT(*) FROM ONLY test.hash_rel; + count +------- + 0 +(1 row) + +DROP TABLE test.hash_rel CASCADE; +SELECT pathman.drop_partitions('test.num_range_rel'); +NOTICE: 999 rows copied from test.num_range_rel_1 +NOTICE: 1000 rows copied from test.num_range_rel_2 +NOTICE: 1000 rows copied from test.num_range_rel_3 + drop_partitions +----------------- + 3 +(1 row) + +DROP TABLE test.num_range_rel CASCADE; +DROP TABLE test.range_rel CASCADE; +NOTICE: drop cascades to 10 other objects +/* Test attributes copying */ +CREATE TABLE test.range_rel ( + id SERIAL PRIMARY KEY, + dt DATE NOT NULL) +WITH (fillfactor = 70); +INSERT INTO test.range_rel (dt) + SELECT g FROM generate_series('2015-01-01', '2015-02-15', '1 month'::interval) AS g; +SELECT pathman.create_range_partitions('test.range_rel', 'dt', + '2015-01-01'::date, '1 month'::interval); + create_range_partitions +------------------------- + 2 +(1 row) + +SELECT reloptions, relpersistence FROM pg_class WHERE oid='test.range_rel'::REGCLASS; + reloptions | relpersistence +-----------------+---------------- + {fillfactor=70} | p +(1 row) + +SELECT reloptions, relpersistence FROM pg_class WHERE oid='test.range_rel_1'::REGCLASS; + reloptions | relpersistence +-----------------+---------------- + {fillfactor=70} | p +(1 row) + +DROP TABLE test.range_rel CASCADE; +NOTICE: drop cascades to 3 other objects +/* Test automatic partition creation */ +CREATE TABLE test.range_rel ( + id SERIAL PRIMARY KEY, + dt TIMESTAMP NOT NULL, + data TEXT); +SELECT pathman.create_range_partitions('test.range_rel', 'dt', '2015-01-01'::DATE, '10 days'::INTERVAL, 1); + create_range_partitions +------------------------- + 1 +(1 row) + +INSERT INTO test.range_rel (dt) +SELECT generate_series('2015-01-01', '2015-04-30', '1 day'::interval); +INSERT INTO test.range_rel (dt) +SELECT generate_series('2014-12-31', '2014-12-01', '-1 day'::interval); +EXPLAIN (COSTS OFF) SELECT * FROM test.range_rel WHERE dt = '2014-12-15'; + QUERY PLAN +-------------------------------------------------------------------------- + Seq Scan on range_rel_14 + Filter: (dt = 'Mon Dec 15 00:00:00 2014'::timestamp without time zone) +(2 rows) + +SELECT * FROM test.range_rel WHERE dt = '2014-12-15'; + id | dt | data +-----+--------------------------+------ + 137 | Mon Dec 15 00:00:00 2014 | +(1 row) + +EXPLAIN (COSTS OFF) SELECT * FROM test.range_rel WHERE dt = '2015-03-15'; + QUERY PLAN +-------------------------------------------------------------------------- + Seq Scan on range_rel_8 + Filter: (dt = 'Sun Mar 15 00:00:00 2015'::timestamp without time zone) +(2 rows) + +SELECT * FROM test.range_rel WHERE dt = '2015-03-15'; + id | dt | data +----+--------------------------+------ + 74 | Sun Mar 15 00:00:00 2015 | +(1 row) + +SELECT pathman.set_auto('test.range_rel', false); + set_auto +---------- + +(1 row) + +INSERT INTO test.range_rel (dt) VALUES ('2015-06-01'); +ERROR: no suitable partition for key 'Mon Jun 01 00:00:00 2015' +SELECT pathman.set_auto('test.range_rel', true); + set_auto +---------- + +(1 row) + +INSERT INTO test.range_rel (dt) VALUES ('2015-06-01'); +/* + * Test auto removing record from config on table DROP (but not on column drop + * as it used to be before version 1.2) + */ +ALTER TABLE test.range_rel DROP COLUMN data; +SELECT * FROM pathman.pathman_config; + partrel | expr | parttype | range_interval +----------------+------+----------+---------------- + test.range_rel | dt | 2 | @ 10 days +(1 row) + +DROP TABLE test.range_rel CASCADE; +NOTICE: drop cascades to 21 other objects +SELECT * FROM pathman.pathman_config; + partrel | expr | parttype | range_interval +---------+------+----------+---------------- +(0 rows) + +/* Check overlaps */ +CREATE TABLE test.num_range_rel ( + id SERIAL PRIMARY KEY, + txt TEXT); +SELECT pathman.create_range_partitions('test.num_range_rel', 'id', 1000, 1000, 4); + create_range_partitions +------------------------- + 4 +(1 row) + +SELECT pathman.check_range_available('test.num_range_rel'::regclass, 4001, 5000); +ERROR: specified range [4001, 5000) overlaps with existing partitions +SELECT pathman.check_range_available('test.num_range_rel'::regclass, 4000, 5000); +ERROR: specified range [4000, 5000) overlaps with existing partitions +SELECT pathman.check_range_available('test.num_range_rel'::regclass, 3999, 5000); +ERROR: specified range [3999, 5000) overlaps with existing partitions +SELECT pathman.check_range_available('test.num_range_rel'::regclass, 3000, 3500); +ERROR: specified range [3000, 3500) overlaps with existing partitions +SELECT pathman.check_range_available('test.num_range_rel'::regclass, 0, 999); + check_range_available +----------------------- + +(1 row) + +SELECT pathman.check_range_available('test.num_range_rel'::regclass, 0, 1000); + check_range_available +----------------------- + +(1 row) + +SELECT pathman.check_range_available('test.num_range_rel'::regclass, 0, 1001); +ERROR: specified range [0, 1001) overlaps with existing partitions +/* CaMeL cAsE table names and attributes */ +CREATE TABLE test."TeSt" (a INT NOT NULL, b INT); +SELECT pathman.create_hash_partitions('test.TeSt', 'a', 3); +ERROR: relation "test.test" does not exist at character 39 +SELECT pathman.create_hash_partitions('test."TeSt"', 'a', 3); + create_hash_partitions +------------------------ + 3 +(1 row) + +INSERT INTO test."TeSt" VALUES (1, 1); +INSERT INTO test."TeSt" VALUES (2, 2); +INSERT INTO test."TeSt" VALUES (3, 3); +SELECT * FROM test."TeSt"; + a | b +---+--- + 3 | 3 + 2 | 2 + 1 | 1 +(3 rows) + +DROP TABLE test."TeSt" CASCADE; +NOTICE: drop cascades to 3 other objects +CREATE TABLE test."RangeRel" ( + id SERIAL PRIMARY KEY, + dt TIMESTAMP NOT NULL, + txt TEXT); +INSERT INTO test."RangeRel" (dt, txt) +SELECT g, md5(g::TEXT) FROM generate_series('2015-01-01', '2015-01-03', '1 day'::interval) as g; +SELECT pathman.create_range_partitions('test."RangeRel"', 'dt', '2015-01-01'::DATE, '1 day'::INTERVAL); + create_range_partitions +------------------------- + 3 +(1 row) + +SELECT pathman.append_range_partition('test."RangeRel"'); + append_range_partition +------------------------ + test."RangeRel_4" +(1 row) + +SELECT pathman.prepend_range_partition('test."RangeRel"'); + prepend_range_partition +------------------------- + test."RangeRel_5" +(1 row) + +SELECT pathman.merge_range_partitions('test."RangeRel_1"', 'test."RangeRel_' || currval('test."RangeRel_seq"') || '"'); + merge_range_partitions +------------------------ + test."RangeRel_1" +(1 row) + +SELECT pathman.split_range_partition('test."RangeRel_1"', '2015-01-01'::DATE); + split_range_partition +----------------------- + test."RangeRel_6" +(1 row) + +DROP TABLE test."RangeRel" CASCADE; +NOTICE: drop cascades to 6 other objects +SELECT * FROM pathman.pathman_config; + partrel | expr | parttype | range_interval +--------------------+------+----------+---------------- + test.num_range_rel | id | 2 | 1000 +(1 row) + +CREATE TABLE test."RangeRel" ( + id SERIAL PRIMARY KEY, + dt TIMESTAMP NOT NULL, + txt TEXT); +SELECT pathman.create_range_partitions('test."RangeRel"', 'id', 1, 100, 3); + create_range_partitions +------------------------- + 3 +(1 row) + +DROP TABLE test."RangeRel" CASCADE; +NOTICE: drop cascades to 4 other objects +DROP EXTENSION pg_pathman; +/* Test that everything works fine without schemas */ +CREATE EXTENSION pg_pathman; +/* Hash */ +CREATE TABLE test.hash_rel ( + id SERIAL PRIMARY KEY, + value INTEGER NOT NULL); +INSERT INTO test.hash_rel (value) SELECT g FROM generate_series(1, 10000) as g; +SELECT create_hash_partitions('test.hash_rel', 'value', 3); + create_hash_partitions +------------------------ + 3 +(1 row) + +EXPLAIN (COSTS OFF) SELECT * FROM test.hash_rel WHERE id = 1234; + QUERY PLAN +------------------------------------------------------ + Append + -> Index Scan using hash_rel_0_pkey on hash_rel_0 + Index Cond: (id = 1234) + -> Index Scan using hash_rel_1_pkey on hash_rel_1 + Index Cond: (id = 1234) + -> Index Scan using hash_rel_2_pkey on hash_rel_2 + Index Cond: (id = 1234) +(7 rows) + +/* Range */ +CREATE TABLE test.range_rel ( + id SERIAL PRIMARY KEY, + dt TIMESTAMP NOT NULL, + value INTEGER); +INSERT INTO test.range_rel (dt, value) SELECT g, extract(day from g) FROM generate_series('2010-01-01'::date, '2010-12-31'::date, '1 day') as g; +SELECT create_range_partitions('test.range_rel', 'dt', '2010-01-01'::date, '1 month'::interval, 12); + create_range_partitions +------------------------- + 12 +(1 row) + +SELECT merge_range_partitions('test.range_rel_1', 'test.range_rel_2'); + merge_range_partitions +------------------------ + test.range_rel_1 +(1 row) + +SELECT split_range_partition('test.range_rel_1', '2010-02-15'::date); + split_range_partition +----------------------- + test.range_rel_13 +(1 row) + +SELECT append_range_partition('test.range_rel'); + append_range_partition +------------------------ + test.range_rel_14 +(1 row) + +SELECT prepend_range_partition('test.range_rel'); + prepend_range_partition +------------------------- + test.range_rel_15 +(1 row) + +EXPLAIN (COSTS OFF) SELECT * FROM test.range_rel WHERE dt < '2010-03-01'; + QUERY PLAN +-------------------------------- + Append + -> Seq Scan on range_rel_15 + -> Seq Scan on range_rel_1 + -> Seq Scan on range_rel_13 +(4 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM test.range_rel WHERE dt > '2010-12-15'; + QUERY PLAN +-------------------------------------------------------------------------------- + Append + -> Seq Scan on range_rel_12 + Filter: (dt > 'Wed Dec 15 00:00:00 2010'::timestamp without time zone) + -> Seq Scan on range_rel_14 +(4 rows) + +/* Create range partitions from whole range */ +SELECT drop_partitions('test.range_rel'); +NOTICE: 45 rows copied from test.range_rel_1 +NOTICE: 31 rows copied from test.range_rel_3 +NOTICE: 30 rows copied from test.range_rel_4 +NOTICE: 31 rows copied from test.range_rel_5 +NOTICE: 30 rows copied from test.range_rel_6 +NOTICE: 31 rows copied from test.range_rel_7 +NOTICE: 31 rows copied from test.range_rel_8 +NOTICE: 30 rows copied from test.range_rel_9 +NOTICE: 31 rows copied from test.range_rel_10 +NOTICE: 30 rows copied from test.range_rel_11 +NOTICE: 31 rows copied from test.range_rel_12 +NOTICE: 14 rows copied from test.range_rel_13 +NOTICE: 0 rows copied from test.range_rel_14 +NOTICE: 0 rows copied from test.range_rel_15 + drop_partitions +----------------- + 14 +(1 row) + +/* Test NOT operator */ +CREATE TABLE bool_test(a INT NOT NULL, b BOOLEAN); +SELECT create_hash_partitions('bool_test', 'a', 3); + create_hash_partitions +------------------------ + 3 +(1 row) + +INSERT INTO bool_test SELECT g, (g % 4) = 0 FROM generate_series(1, 100) AS g; +SELECT count(*) FROM bool_test; + count +------- + 100 +(1 row) + +SELECT count(*) FROM bool_test WHERE (b = true AND b = false); + count +------- + 0 +(1 row) + +SELECT count(*) FROM bool_test WHERE b = false; /* 75 values */ + count +------- + 75 +(1 row) + +SELECT count(*) FROM bool_test WHERE b = true; /* 25 values */ + count +------- + 25 +(1 row) + +DROP TABLE bool_test CASCADE; +NOTICE: drop cascades to 3 other objects +/* Special test case (quals generation) -- fixing commit f603e6c5 */ +CREATE TABLE test.special_case_1_ind_o_s(val serial, comment text); +INSERT INTO test.special_case_1_ind_o_s SELECT generate_series(1, 200), NULL; +SELECT create_range_partitions('test.special_case_1_ind_o_s', 'val', 1, 50); + create_range_partitions +------------------------- + 4 +(1 row) + +INSERT INTO test.special_case_1_ind_o_s_2 SELECT 75 FROM generate_series(1, 6000); +CREATE INDEX ON test.special_case_1_ind_o_s_2 (val, comment); +VACUUM ANALYZE test.special_case_1_ind_o_s_2; +EXPLAIN (COSTS OFF) SELECT * FROM test.special_case_1_ind_o_s WHERE val < 75 AND comment = 'a'; + QUERY PLAN +-------------------------------------------------------------------------------------------------- + Append + -> Seq Scan on special_case_1_ind_o_s_1 + Filter: (comment = 'a'::text) + -> Index Only Scan using special_case_1_ind_o_s_2_val_comment_idx on special_case_1_ind_o_s_2 + Index Cond: ((val < 75) AND (comment = 'a'::text)) +(5 rows) + +SELECT set_enable_parent('test.special_case_1_ind_o_s', true); + set_enable_parent +------------------- + +(1 row) + +EXPLAIN (COSTS OFF) SELECT * FROM test.special_case_1_ind_o_s WHERE val < 75 AND comment = 'a'; + QUERY PLAN +-------------------------------------------------------------------------------------------------- + Append + -> Seq Scan on special_case_1_ind_o_s + Filter: ((val < 75) AND (comment = 'a'::text)) + -> Seq Scan on special_case_1_ind_o_s_1 + Filter: (comment = 'a'::text) + -> Index Only Scan using special_case_1_ind_o_s_2_val_comment_idx on special_case_1_ind_o_s_2 + Index Cond: ((val < 75) AND (comment = 'a'::text)) +(7 rows) + +SELECT set_enable_parent('test.special_case_1_ind_o_s', false); + set_enable_parent +------------------- + +(1 row) + +EXPLAIN (COSTS OFF) SELECT * FROM test.special_case_1_ind_o_s WHERE val < 75 AND comment = 'a'; + QUERY PLAN +-------------------------------------------------------------------------------------------------- + Append + -> Seq Scan on special_case_1_ind_o_s_1 + Filter: (comment = 'a'::text) + -> Index Only Scan using special_case_1_ind_o_s_2_val_comment_idx on special_case_1_ind_o_s_2 + Index Cond: ((val < 75) AND (comment = 'a'::text)) +(5 rows) + +/* Test index scans on child relation under enable_parent is set */ +CREATE TABLE test.index_on_childs(c1 integer not null, c2 integer); +CREATE INDEX ON test.index_on_childs(c2); +INSERT INTO test.index_on_childs SELECT i, (random()*10000)::integer FROM generate_series(1, 10000) i; +SELECT create_range_partitions('test.index_on_childs', 'c1', 1, 1000, 0, false); + create_range_partitions +------------------------- + 0 +(1 row) + +SELECT add_range_partition('test.index_on_childs', 1, 1000, 'test.index_on_childs_1_1k'); + add_range_partition +--------------------------- + test.index_on_childs_1_1k +(1 row) + +SELECT append_range_partition('test.index_on_childs', 'test.index_on_childs_1k_2k'); + append_range_partition +---------------------------- + test.index_on_childs_1k_2k +(1 row) + +SELECT append_range_partition('test.index_on_childs', 'test.index_on_childs_2k_3k'); + append_range_partition +---------------------------- + test.index_on_childs_2k_3k +(1 row) + +SELECT append_range_partition('test.index_on_childs', 'test.index_on_childs_3k_4k'); + append_range_partition +---------------------------- + test.index_on_childs_3k_4k +(1 row) + +SELECT append_range_partition('test.index_on_childs', 'test.index_on_childs_4k_5k'); + append_range_partition +---------------------------- + test.index_on_childs_4k_5k +(1 row) + +SELECT set_enable_parent('test.index_on_childs', true); + set_enable_parent +------------------- + +(1 row) + +VACUUM ANALYZE test.index_on_childs; +EXPLAIN (COSTS OFF) SELECT * FROM test.index_on_childs WHERE c1 > 100 AND c1 < 2500 AND c2 = 500; + QUERY PLAN +------------------------------------------------------------------------------ + Append + -> Index Scan using index_on_childs_c2_idx on index_on_childs + Index Cond: (c2 = 500) + Filter: ((c1 > 100) AND (c1 < 2500)) + -> Index Scan using index_on_childs_1_1k_c2_idx on index_on_childs_1_1k + Index Cond: (c2 = 500) + Filter: (c1 > 100) + -> Index Scan using index_on_childs_1k_2k_c2_idx on index_on_childs_1k_2k + Index Cond: (c2 = 500) + -> Index Scan using index_on_childs_2k_3k_c2_idx on index_on_childs_2k_3k + Index Cond: (c2 = 500) + Filter: (c1 < 2500) +(12 rows) + +/* Test create_range_partitions() + partition_names */ +CREATE TABLE test.provided_part_names(id INT NOT NULL); +INSERT INTO test.provided_part_names SELECT generate_series(1, 10); +SELECT create_hash_partitions('test.provided_part_names', 'id', 2, + partition_names := ARRAY['p1', 'p2']::TEXT[]); /* ok */ + create_hash_partitions +------------------------ + 2 +(1 row) + +/* list partitions */ +SELECT partition FROM pathman_partition_list +WHERE parent = 'test.provided_part_names'::REGCLASS +ORDER BY partition; + partition +----------- + p1 + p2 +(2 rows) + +DROP TABLE test.provided_part_names CASCADE; +NOTICE: drop cascades to 2 other objects +/* test preventing of double expand of inherited tables */ +CREATE TABLE test.mixinh_parent (id INT PRIMARY KEY); +CREATE TABLE test.mixinh_child1 () INHERITS (test.mixinh_parent); +SELECT create_range_partitions('test.mixinh_child1', 'id', 1, 10, 1); + create_range_partitions +------------------------- + 1 +(1 row) + +INSERT INTO test.mixinh_child1 VALUES (1); +SELECT * FROM test.mixinh_child1; + id +---- + 1 +(1 row) + +SELECT * FROM test.mixinh_parent; +ERROR: could not expand partitioned table "mixinh_child1" +DROP TABLE test.hash_rel CASCADE; +NOTICE: drop cascades to 3 other objects +DROP TABLE test.index_on_childs CASCADE; +NOTICE: drop cascades to 6 other objects +DROP TABLE test.mixinh_child1 CASCADE; +NOTICE: drop cascades to 2 other objects +DROP TABLE test.mixinh_parent CASCADE; +DROP TABLE test.num_range_rel CASCADE; +NOTICE: drop cascades to 5 other objects +DROP TABLE test.hash_rel_wrong CASCADE; +DROP TABLE test.range_rel CASCADE; +DROP TABLE test.range_rel_archive CASCADE; +DROP TABLE test.special_case_1_ind_o_s CASCADE; +NOTICE: drop cascades to 5 other objects +DROP TABLE test.range_rel_test1 CASCADE; +DROP TABLE test.range_rel_test2 CASCADE; +DROP SCHEMA test; +DROP EXTENSION pg_pathman CASCADE; +DROP SCHEMA pathman; diff --git a/expected/pathman_basic_2.out b/expected/pathman_basic_2.out new file mode 100644 index 00000000..ec180fdb --- /dev/null +++ b/expected/pathman_basic_2.out @@ -0,0 +1,1834 @@ +/* + * Since 8edd0e794 (>= 12) Append nodes with single subplan are eliminated, + * causing different output. Also, EXPLAIN now always shows key first in quals + * ('test commutator' queries). + * + * Since 55a1954da16 and 6ef77cf46e8 (>= 13) output of EXPLAIN was changed, + * now it includes aliases for inherited tables. + */ +\set VERBOSITY terse +SET search_path = 'public'; +CREATE SCHEMA pathman; +CREATE EXTENSION pg_pathman SCHEMA pathman; +CREATE SCHEMA test; +CREATE TABLE test.hash_rel ( + id SERIAL PRIMARY KEY, + value INTEGER); +INSERT INTO test.hash_rel VALUES (1, 1); +INSERT INTO test.hash_rel VALUES (2, 2); +INSERT INTO test.hash_rel VALUES (3, 3); +\set VERBOSITY default +SELECT pathman.create_hash_partitions('test.hash_rel', 'value', 3); +ERROR: failed to analyze partitioning expression "value" +DETAIL: column "value" should be marked NOT NULL +CONTEXT: SQL statement "SELECT pathman.validate_expression(parent_relid, expression)" +PL/pgSQL function pathman.prepare_for_partitioning(regclass,text,boolean) line 9 at PERFORM +SQL statement "SELECT pathman.prepare_for_partitioning(parent_relid, + expression, + partition_data)" +PL/pgSQL function pathman.create_hash_partitions(regclass,text,integer,boolean,text[],text[]) line 3 at PERFORM +\set VERBOSITY terse +ALTER TABLE test.hash_rel ALTER COLUMN value SET NOT NULL; +SELECT pathman.create_hash_partitions('test.hash_rel', 'value', 3, partition_data:=false); + create_hash_partitions +------------------------ + 3 +(1 row) + +EXPLAIN (COSTS OFF) SELECT * FROM test.hash_rel; + QUERY PLAN +----------------------------------------- + Append + -> Seq Scan on hash_rel hash_rel_1 + -> Seq Scan on hash_rel_0 hash_rel_2 + -> Seq Scan on hash_rel_1 hash_rel_3 + -> Seq Scan on hash_rel_2 hash_rel_4 +(5 rows) + +SELECT * FROM test.hash_rel; + id | value +----+------- + 1 | 1 + 2 | 2 + 3 | 3 +(3 rows) + +SELECT pathman.set_enable_parent('test.hash_rel', false); + set_enable_parent +------------------- + +(1 row) + +EXPLAIN (COSTS OFF) SELECT * FROM test.hash_rel; + QUERY PLAN +----------------------------------------- + Append + -> Seq Scan on hash_rel_0 hash_rel_1 + -> Seq Scan on hash_rel_1 hash_rel_2 + -> Seq Scan on hash_rel_2 hash_rel_3 +(4 rows) + +SELECT * FROM test.hash_rel; + id | value +----+------- +(0 rows) + +SELECT pathman.set_enable_parent('test.hash_rel', true); + set_enable_parent +------------------- + +(1 row) + +EXPLAIN (COSTS OFF) SELECT * FROM test.hash_rel; + QUERY PLAN +----------------------------------------- + Append + -> Seq Scan on hash_rel hash_rel_1 + -> Seq Scan on hash_rel_0 hash_rel_2 + -> Seq Scan on hash_rel_1 hash_rel_3 + -> Seq Scan on hash_rel_2 hash_rel_4 +(5 rows) + +SELECT * FROM test.hash_rel; + id | value +----+------- + 1 | 1 + 2 | 2 + 3 | 3 +(3 rows) + +SELECT pathman.drop_partitions('test.hash_rel'); +NOTICE: 0 rows copied from test.hash_rel_0 +NOTICE: 0 rows copied from test.hash_rel_1 +NOTICE: 0 rows copied from test.hash_rel_2 + drop_partitions +----------------- + 3 +(1 row) + +SELECT pathman.create_hash_partitions('test.hash_rel', 'Value', 3); + create_hash_partitions +------------------------ + 3 +(1 row) + +SELECT COUNT(*) FROM test.hash_rel; + count +------- + 3 +(1 row) + +SELECT COUNT(*) FROM ONLY test.hash_rel; + count +------- + 0 +(1 row) + +INSERT INTO test.hash_rel VALUES (4, 4); +INSERT INTO test.hash_rel VALUES (5, 5); +INSERT INTO test.hash_rel VALUES (6, 6); +SELECT COUNT(*) FROM test.hash_rel; + count +------- + 6 +(1 row) + +SELECT COUNT(*) FROM ONLY test.hash_rel; + count +------- + 0 +(1 row) + +CREATE TABLE test.range_rel ( + id SERIAL PRIMARY KEY, + dt TIMESTAMP, + txt TEXT); +CREATE INDEX ON test.range_rel (dt); +INSERT INTO test.range_rel (dt, txt) +SELECT g, md5(g::TEXT) FROM generate_series('2015-01-01', '2015-04-30', '1 day'::interval) as g; +\set VERBOSITY default +SELECT pathman.create_range_partitions('test.range_rel', 'dt', '2015-01-01'::DATE, '1 month'::INTERVAL); +ERROR: failed to analyze partitioning expression "dt" +DETAIL: column "dt" should be marked NOT NULL +CONTEXT: SQL statement "SELECT pathman.validate_expression(parent_relid, expression)" +PL/pgSQL function pathman.prepare_for_partitioning(regclass,text,boolean) line 9 at PERFORM +SQL statement "SELECT pathman.prepare_for_partitioning(parent_relid, + expression, + partition_data)" +PL/pgSQL function pathman.create_range_partitions(regclass,text,anyelement,interval,integer,boolean) line 11 at PERFORM +\set VERBOSITY terse +ALTER TABLE test.range_rel ALTER COLUMN dt SET NOT NULL; +SELECT pathman.create_range_partitions('test.range_rel', 'dt', '2015-01-01'::DATE, '1 month'::INTERVAL, 2); +ERROR: not enough partitions to fit all values of "dt" +SELECT pathman.create_range_partitions('test.range_rel', 'DT', '2015-01-01'::DATE, '1 month'::INTERVAL); + create_range_partitions +------------------------- + 4 +(1 row) + +SELECT COUNT(*) FROM test.range_rel; + count +------- + 120 +(1 row) + +SELECT COUNT(*) FROM ONLY test.range_rel; + count +------- + 0 +(1 row) + +CREATE TABLE test.num_range_rel ( + id SERIAL PRIMARY KEY, + txt TEXT); +SELECT pathman.create_range_partitions('test.num_range_rel', 'id', 0, 1000, 4); + create_range_partitions +------------------------- + 4 +(1 row) + +SELECT COUNT(*) FROM test.num_range_rel; + count +------- + 0 +(1 row) + +SELECT COUNT(*) FROM ONLY test.num_range_rel; + count +------- + 0 +(1 row) + +INSERT INTO test.num_range_rel + SELECT g, md5(g::TEXT) FROM generate_series(1, 3000) as g; +SELECT COUNT(*) FROM test.num_range_rel; + count +------- + 3000 +(1 row) + +SELECT COUNT(*) FROM ONLY test.num_range_rel; + count +------- + 0 +(1 row) + +/* since rel_1_2_beta: check append_child_relation(), make_ands_explicit(), dummy path */ +CREATE TABLE test.improved_dummy (id BIGSERIAL, name TEXT NOT NULL); +INSERT INTO test.improved_dummy (name) SELECT md5(g::TEXT) FROM generate_series(1, 100) as g; +SELECT pathman.create_range_partitions('test.improved_dummy', 'id', 1, 10); + create_range_partitions +------------------------- + 10 +(1 row) + +INSERT INTO test.improved_dummy (name) VALUES ('test'); /* spawns new partition */ +EXPLAIN (COSTS OFF) SELECT * FROM test.improved_dummy WHERE id = 101 OR id = 5 AND name = 'ib'; + QUERY PLAN +------------------------------------------------------ + Append + -> Seq Scan on improved_dummy_1 + Filter: ((id = 5) AND (name = 'ib'::text)) + -> Seq Scan on improved_dummy_11 improved_dummy_2 + Filter: (id = 101) +(5 rows) + +SELECT pathman.set_enable_parent('test.improved_dummy', true); /* enable parent */ + set_enable_parent +------------------- + +(1 row) + +EXPLAIN (COSTS OFF) SELECT * FROM test.improved_dummy WHERE id = 101 OR id = 5 AND name = 'ib'; + QUERY PLAN +-------------------------------------------------------------------- + Append + -> Seq Scan on improved_dummy improved_dummy_1 + Filter: ((id = 101) OR ((id = 5) AND (name = 'ib'::text))) + -> Seq Scan on improved_dummy_1 improved_dummy_2 + Filter: ((id = 5) AND (name = 'ib'::text)) + -> Seq Scan on improved_dummy_11 improved_dummy_3 + Filter: (id = 101) +(7 rows) + +SELECT pathman.set_enable_parent('test.improved_dummy', false); /* disable parent */ + set_enable_parent +------------------- + +(1 row) + +ALTER TABLE test.improved_dummy_1 ADD CHECK (name != 'ib'); /* make test.improved_dummy_1 disappear */ +EXPLAIN (COSTS OFF) SELECT * FROM test.improved_dummy WHERE id = 101 OR id = 5 AND name = 'ib'; + QUERY PLAN +---------------------------------------------- + Seq Scan on improved_dummy_11 improved_dummy + Filter: (id = 101) +(2 rows) + +SELECT pathman.set_enable_parent('test.improved_dummy', true); /* enable parent */ + set_enable_parent +------------------- + +(1 row) + +EXPLAIN (COSTS OFF) SELECT * FROM test.improved_dummy WHERE id = 101 OR id = 5 AND name = 'ib'; + QUERY PLAN +-------------------------------------------------------------------- + Append + -> Seq Scan on improved_dummy improved_dummy_1 + Filter: ((id = 101) OR ((id = 5) AND (name = 'ib'::text))) + -> Seq Scan on improved_dummy_11 improved_dummy_2 + Filter: (id = 101) +(5 rows) + +DROP TABLE test.improved_dummy CASCADE; +NOTICE: drop cascades to 12 other objects +/* since rel_1_4_beta: check create_range_partitions(bounds array) */ +CREATE TABLE test.improved_dummy (val INT NOT NULL); +SELECT pathman.create_range_partitions('test.improved_dummy', 'val', + pathman.generate_range_bounds(1, 1, 2)); + create_range_partitions +------------------------- + 2 +(1 row) + +SELECT * FROM pathman.pathman_partition_list +WHERE parent = 'test.improved_dummy'::REGCLASS +ORDER BY partition; + parent | partition | parttype | expr | range_min | range_max +---------------------+-----------------------+----------+------+-----------+----------- + test.improved_dummy | test.improved_dummy_1 | 2 | val | 1 | 2 + test.improved_dummy | test.improved_dummy_2 | 2 | val | 2 | 3 +(2 rows) + +SELECT pathman.drop_partitions('test.improved_dummy'); +NOTICE: 0 rows copied from test.improved_dummy_1 +NOTICE: 0 rows copied from test.improved_dummy_2 + drop_partitions +----------------- + 2 +(1 row) + +SELECT pathman.create_range_partitions('test.improved_dummy', 'val', + pathman.generate_range_bounds(1, 1, 2), + partition_names := '{p1, p2}'); + create_range_partitions +------------------------- + 2 +(1 row) + +SELECT * FROM pathman.pathman_partition_list +WHERE parent = 'test.improved_dummy'::REGCLASS +ORDER BY partition; + parent | partition | parttype | expr | range_min | range_max +---------------------+-----------+----------+------+-----------+----------- + test.improved_dummy | p1 | 2 | val | 1 | 2 + test.improved_dummy | p2 | 2 | val | 2 | 3 +(2 rows) + +SELECT pathman.drop_partitions('test.improved_dummy'); +NOTICE: 0 rows copied from p1 +NOTICE: 0 rows copied from p2 + drop_partitions +----------------- + 2 +(1 row) + +SELECT pathman.create_range_partitions('test.improved_dummy', 'val', + pathman.generate_range_bounds(1, 1, 2), + partition_names := '{p1, p2}', + tablespaces := '{pg_default, pg_default}'); + create_range_partitions +------------------------- + 2 +(1 row) + +SELECT * FROM pathman.pathman_partition_list +WHERE parent = 'test.improved_dummy'::REGCLASS +ORDER BY partition; + parent | partition | parttype | expr | range_min | range_max +---------------------+-----------+----------+------+-----------+----------- + test.improved_dummy | p1 | 2 | val | 1 | 2 + test.improved_dummy | p2 | 2 | val | 2 | 3 +(2 rows) + +DROP TABLE test.improved_dummy CASCADE; +NOTICE: drop cascades to 3 other objects +/* Test pathman_rel_pathlist_hook() with INSERT query */ +CREATE TABLE test.insert_into_select(val int NOT NULL); +INSERT INTO test.insert_into_select SELECT generate_series(1, 100); +SELECT pathman.create_range_partitions('test.insert_into_select', 'val', 1, 20); + create_range_partitions +------------------------- + 5 +(1 row) + +CREATE TABLE test.insert_into_select_copy (LIKE test.insert_into_select); /* INSERT INTO ... SELECT ... */ +EXPLAIN (COSTS OFF) +INSERT INTO test.insert_into_select_copy +SELECT * FROM test.insert_into_select +WHERE val <= 80; + QUERY PLAN +---------------------------------------------- + Insert on insert_into_select_copy + -> Append + -> Seq Scan on insert_into_select_1 + -> Seq Scan on insert_into_select_2 + -> Seq Scan on insert_into_select_3 + -> Seq Scan on insert_into_select_4 + Filter: (val <= 80) +(7 rows) + +SELECT pathman.set_enable_parent('test.insert_into_select', true); + set_enable_parent +------------------- + +(1 row) + +EXPLAIN (COSTS OFF) +INSERT INTO test.insert_into_select_copy +SELECT * FROM test.insert_into_select +WHERE val <= 80; + QUERY PLAN +------------------------------------------------------------------- + Insert on insert_into_select_copy + -> Append + -> Seq Scan on insert_into_select insert_into_select_1 + Filter: (val <= 80) + -> Seq Scan on insert_into_select_1 insert_into_select_2 + -> Seq Scan on insert_into_select_2 insert_into_select_3 + -> Seq Scan on insert_into_select_3 insert_into_select_4 + -> Seq Scan on insert_into_select_4 insert_into_select_5 + Filter: (val <= 80) +(9 rows) + +INSERT INTO test.insert_into_select_copy SELECT * FROM test.insert_into_select; +SELECT count(*) FROM test.insert_into_select_copy; + count +------- + 100 +(1 row) + +DROP TABLE test.insert_into_select_copy, test.insert_into_select CASCADE; +NOTICE: drop cascades to 6 other objects +SET pg_pathman.enable_runtimeappend = OFF; +SET pg_pathman.enable_runtimemergeappend = OFF; +VACUUM; +SET enable_indexscan = OFF; +SET enable_bitmapscan = OFF; +SET enable_seqscan = ON; +EXPLAIN (COSTS OFF) SELECT * FROM test.hash_rel; + QUERY PLAN +----------------------------------------- + Append + -> Seq Scan on hash_rel_0 hash_rel_1 + -> Seq Scan on hash_rel_1 hash_rel_2 + -> Seq Scan on hash_rel_2 hash_rel_3 +(4 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM test.hash_rel WHERE false; + QUERY PLAN +-------------------------- + Result + One-Time Filter: false +(2 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM test.hash_rel WHERE value = NULL; + QUERY PLAN +-------------------------- + Result + One-Time Filter: false +(2 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM test.hash_rel WHERE value = 2; + QUERY PLAN +--------------------------------- + Seq Scan on hash_rel_1 hash_rel + Filter: (value = 2) +(2 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM test.hash_rel WHERE 2 = value; /* test commutator */ + QUERY PLAN +--------------------------------- + Seq Scan on hash_rel_1 hash_rel + Filter: (2 = value) +(2 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM test.hash_rel WHERE value = 2 OR value = 1; + QUERY PLAN +------------------------------ + Append + -> Seq Scan on hash_rel_1 + Filter: (value = 2) + -> Seq Scan on hash_rel_2 + Filter: (value = 1) +(5 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM test.num_range_rel WHERE 2500 = id; /* test commutator */ + QUERY PLAN +------------------------------------------- + Seq Scan on num_range_rel_3 num_range_rel + Filter: (2500 = id) +(2 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM test.num_range_rel WHERE 2500 < id; /* test commutator */ + QUERY PLAN +--------------------------------------------------- + Append + -> Seq Scan on num_range_rel_3 num_range_rel_1 + Filter: (2500 < id) + -> Seq Scan on num_range_rel_4 num_range_rel_2 +(4 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM test.num_range_rel WHERE id > 2500; + QUERY PLAN +--------------------------------------------------- + Append + -> Seq Scan on num_range_rel_3 num_range_rel_1 + Filter: (id > 2500) + -> Seq Scan on num_range_rel_4 num_range_rel_2 +(4 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM test.num_range_rel WHERE id >= 1000 AND id < 3000; + QUERY PLAN +--------------------------------------------------- + Append + -> Seq Scan on num_range_rel_2 num_range_rel_1 + -> Seq Scan on num_range_rel_3 num_range_rel_2 +(3 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM test.num_range_rel WHERE id >= 1500 AND id < 2500; + QUERY PLAN +--------------------------------------------------- + Append + -> Seq Scan on num_range_rel_2 num_range_rel_1 + Filter: (id >= 1500) + -> Seq Scan on num_range_rel_3 num_range_rel_2 + Filter: (id < 2500) +(5 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM test.num_range_rel WHERE (id >= 500 AND id < 1500) OR (id > 2500); + QUERY PLAN +----------------------------------- + Append + -> Seq Scan on num_range_rel_1 + Filter: (id >= 500) + -> Seq Scan on num_range_rel_2 + Filter: (id < 1500) + -> Seq Scan on num_range_rel_3 + Filter: (id > 2500) + -> Seq Scan on num_range_rel_4 +(8 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM test.range_rel WHERE dt > '2015-02-15'; + QUERY PLAN +-------------------------------------------------------------------------------- + Append + -> Seq Scan on range_rel_2 range_rel_1 + Filter: (dt > 'Sun Feb 15 00:00:00 2015'::timestamp without time zone) + -> Seq Scan on range_rel_3 range_rel_2 + -> Seq Scan on range_rel_4 range_rel_3 +(5 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM test.range_rel WHERE '2015-02-15' < dt; /* test commutator */ + QUERY PLAN +-------------------------------------------------------------------------------- + Append + -> Seq Scan on range_rel_2 range_rel_1 + Filter: ('Sun Feb 15 00:00:00 2015'::timestamp without time zone < dt) + -> Seq Scan on range_rel_3 range_rel_2 + -> Seq Scan on range_rel_4 range_rel_3 +(5 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM test.range_rel WHERE dt >= '2015-02-01' AND dt < '2015-03-01'; + QUERY PLAN +----------------------------------- + Seq Scan on range_rel_2 range_rel +(1 row) + +EXPLAIN (COSTS OFF) SELECT * FROM test.range_rel WHERE dt >= '2015-02-15' AND dt < '2015-03-15'; + QUERY PLAN +--------------------------------------------------------------------------------- + Append + -> Seq Scan on range_rel_2 range_rel_1 + Filter: (dt >= 'Sun Feb 15 00:00:00 2015'::timestamp without time zone) + -> Seq Scan on range_rel_3 range_rel_2 + Filter: (dt < 'Sun Mar 15 00:00:00 2015'::timestamp without time zone) +(5 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM test.range_rel WHERE (dt >= '2015-01-15' AND dt < '2015-02-15') OR (dt > '2015-03-15'); + QUERY PLAN +--------------------------------------------------------------------------------- + Append + -> Seq Scan on range_rel_1 + Filter: (dt >= 'Thu Jan 15 00:00:00 2015'::timestamp without time zone) + -> Seq Scan on range_rel_2 + Filter: (dt < 'Sun Feb 15 00:00:00 2015'::timestamp without time zone) + -> Seq Scan on range_rel_3 + Filter: (dt > 'Sun Mar 15 00:00:00 2015'::timestamp without time zone) + -> Seq Scan on range_rel_4 +(8 rows) + +SET enable_indexscan = ON; +SET enable_bitmapscan = OFF; +SET enable_seqscan = OFF; +EXPLAIN (COSTS OFF) SELECT * FROM test.hash_rel; + QUERY PLAN +----------------------------------------- + Append + -> Seq Scan on hash_rel_0 hash_rel_1 + -> Seq Scan on hash_rel_1 hash_rel_2 + -> Seq Scan on hash_rel_2 hash_rel_3 +(4 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM test.hash_rel WHERE false; + QUERY PLAN +-------------------------- + Result + One-Time Filter: false +(2 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM test.hash_rel WHERE value = NULL; + QUERY PLAN +-------------------------- + Result + One-Time Filter: false +(2 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM test.hash_rel WHERE value = 2; + QUERY PLAN +--------------------------------- + Seq Scan on hash_rel_1 hash_rel + Filter: (value = 2) +(2 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM test.hash_rel WHERE 2 = value; /* test commutator */ + QUERY PLAN +--------------------------------- + Seq Scan on hash_rel_1 hash_rel + Filter: (2 = value) +(2 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM test.hash_rel WHERE value = 2 OR value = 1; + QUERY PLAN +------------------------------ + Append + -> Seq Scan on hash_rel_1 + Filter: (value = 2) + -> Seq Scan on hash_rel_2 + Filter: (value = 1) +(5 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM test.num_range_rel WHERE 2500 = id; /* test commutator */ + QUERY PLAN +------------------------------------------------------------------------ + Index Scan using num_range_rel_3_pkey on num_range_rel_3 num_range_rel + Index Cond: (id = 2500) +(2 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM test.num_range_rel WHERE 2500 < id; /* test commutator */ + QUERY PLAN +-------------------------------------------------------------------------------- + Append + -> Index Scan using num_range_rel_3_pkey on num_range_rel_3 num_range_rel_1 + Index Cond: (id > 2500) + -> Seq Scan on num_range_rel_4 num_range_rel_2 +(4 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM test.num_range_rel WHERE id > 2500; + QUERY PLAN +-------------------------------------------------------------------------------- + Append + -> Index Scan using num_range_rel_3_pkey on num_range_rel_3 num_range_rel_1 + Index Cond: (id > 2500) + -> Seq Scan on num_range_rel_4 num_range_rel_2 +(4 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM test.num_range_rel WHERE id >= 1000 AND id < 3000; + QUERY PLAN +--------------------------------------------------- + Append + -> Seq Scan on num_range_rel_2 num_range_rel_1 + -> Seq Scan on num_range_rel_3 num_range_rel_2 +(3 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM test.num_range_rel WHERE id >= 1500 AND id < 2500; + QUERY PLAN +-------------------------------------------------------------------------------- + Append + -> Index Scan using num_range_rel_2_pkey on num_range_rel_2 num_range_rel_1 + Index Cond: (id >= 1500) + -> Index Scan using num_range_rel_3_pkey on num_range_rel_3 num_range_rel_2 + Index Cond: (id < 2500) +(5 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM test.num_range_rel WHERE (id >= 500 AND id < 1500) OR (id > 2500); + QUERY PLAN +---------------------------------------------------------------- + Append + -> Index Scan using num_range_rel_1_pkey on num_range_rel_1 + Index Cond: (id >= 500) + -> Index Scan using num_range_rel_2_pkey on num_range_rel_2 + Index Cond: (id < 1500) + -> Index Scan using num_range_rel_3_pkey on num_range_rel_3 + Index Cond: (id > 2500) + -> Seq Scan on num_range_rel_4 +(8 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM test.num_range_rel ORDER BY id; + QUERY PLAN +---------------------------------------------------------------- + Append + -> Index Scan using num_range_rel_1_pkey on num_range_rel_1 + -> Index Scan using num_range_rel_2_pkey on num_range_rel_2 + -> Index Scan using num_range_rel_3_pkey on num_range_rel_3 + -> Index Scan using num_range_rel_4_pkey on num_range_rel_4 +(5 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM test.num_range_rel WHERE id <= 2500 ORDER BY id; + QUERY PLAN +---------------------------------------------------------------- + Append + -> Index Scan using num_range_rel_1_pkey on num_range_rel_1 + -> Index Scan using num_range_rel_2_pkey on num_range_rel_2 + -> Index Scan using num_range_rel_3_pkey on num_range_rel_3 + Index Cond: (id <= 2500) +(5 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM test.range_rel WHERE dt > '2015-02-15'; + QUERY PLAN +------------------------------------------------------------------------------------ + Append + -> Index Scan using range_rel_2_dt_idx on range_rel_2 range_rel_1 + Index Cond: (dt > 'Sun Feb 15 00:00:00 2015'::timestamp without time zone) + -> Seq Scan on range_rel_3 range_rel_2 + -> Seq Scan on range_rel_4 range_rel_3 +(5 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM test.range_rel WHERE '2015-02-15' < dt; /* test commutator */ + QUERY PLAN +------------------------------------------------------------------------------------ + Append + -> Index Scan using range_rel_2_dt_idx on range_rel_2 range_rel_1 + Index Cond: (dt > 'Sun Feb 15 00:00:00 2015'::timestamp without time zone) + -> Seq Scan on range_rel_3 range_rel_2 + -> Seq Scan on range_rel_4 range_rel_3 +(5 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM test.range_rel WHERE dt >= '2015-02-01' AND dt < '2015-03-01'; + QUERY PLAN +----------------------------------- + Seq Scan on range_rel_2 range_rel +(1 row) + +EXPLAIN (COSTS OFF) SELECT * FROM test.range_rel WHERE dt >= '2015-02-15' AND dt < '2015-03-15'; + QUERY PLAN +------------------------------------------------------------------------------------- + Append + -> Index Scan using range_rel_2_dt_idx on range_rel_2 range_rel_1 + Index Cond: (dt >= 'Sun Feb 15 00:00:00 2015'::timestamp without time zone) + -> Index Scan using range_rel_3_dt_idx on range_rel_3 range_rel_2 + Index Cond: (dt < 'Sun Mar 15 00:00:00 2015'::timestamp without time zone) +(5 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM test.range_rel WHERE (dt >= '2015-01-15' AND dt < '2015-02-15') OR (dt > '2015-03-15'); + QUERY PLAN +------------------------------------------------------------------------------------- + Append + -> Index Scan using range_rel_1_dt_idx on range_rel_1 + Index Cond: (dt >= 'Thu Jan 15 00:00:00 2015'::timestamp without time zone) + -> Index Scan using range_rel_2_dt_idx on range_rel_2 + Index Cond: (dt < 'Sun Feb 15 00:00:00 2015'::timestamp without time zone) + -> Index Scan using range_rel_3_dt_idx on range_rel_3 + Index Cond: (dt > 'Sun Mar 15 00:00:00 2015'::timestamp without time zone) + -> Seq Scan on range_rel_4 +(8 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM test.range_rel ORDER BY dt; + QUERY PLAN +---------------------------------------------------------- + Append + -> Index Scan using range_rel_1_dt_idx on range_rel_1 + -> Index Scan using range_rel_2_dt_idx on range_rel_2 + -> Index Scan using range_rel_3_dt_idx on range_rel_3 + -> Index Scan using range_rel_4_dt_idx on range_rel_4 +(5 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM test.range_rel WHERE dt >= '2015-01-15' ORDER BY dt DESC; + QUERY PLAN +------------------------------------------------------------------------------------- + Append + -> Index Scan Backward using range_rel_4_dt_idx on range_rel_4 + -> Index Scan Backward using range_rel_3_dt_idx on range_rel_3 + -> Index Scan Backward using range_rel_2_dt_idx on range_rel_2 + -> Index Scan Backward using range_rel_1_dt_idx on range_rel_1 + Index Cond: (dt >= 'Thu Jan 15 00:00:00 2015'::timestamp without time zone) +(6 rows) + +/* + * Sorting + */ +SET enable_indexscan = OFF; +SET enable_seqscan = ON; +EXPLAIN (COSTS OFF) SELECT * FROM test.range_rel WHERE dt < '2015-03-01' ORDER BY dt; + QUERY PLAN +------------------------------------- + Sort + Sort Key: range_rel.dt + -> Append + -> Seq Scan on range_rel_1 + -> Seq Scan on range_rel_2 +(5 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM test.range_rel_1 UNION ALL SELECT * FROM test.range_rel_2 ORDER BY dt; + QUERY PLAN +------------------------------------- + Sort + Sort Key: range_rel_1.dt + -> Append + -> Seq Scan on range_rel_1 + -> Seq Scan on range_rel_2 +(5 rows) + +SET enable_indexscan = ON; +SET enable_seqscan = OFF; +EXPLAIN (COSTS OFF) SELECT * FROM test.range_rel WHERE dt < '2015-03-01' ORDER BY dt; + QUERY PLAN +---------------------------------------------------------- + Append + -> Index Scan using range_rel_1_dt_idx on range_rel_1 + -> Index Scan using range_rel_2_dt_idx on range_rel_2 +(3 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM test.range_rel_1 UNION ALL SELECT * FROM test.range_rel_2 ORDER BY dt; + QUERY PLAN +---------------------------------------------------------- + Merge Append + Sort Key: range_rel_1.dt + -> Index Scan using range_rel_1_dt_idx on range_rel_1 + -> Index Scan using range_rel_2_dt_idx on range_rel_2 +(4 rows) + +/* + * Test inlined SQL functions + */ +CREATE TABLE test.sql_inline (id INT NOT NULL); +SELECT pathman.create_hash_partitions('test.sql_inline', 'id', 3); + create_hash_partitions +------------------------ + 3 +(1 row) + +CREATE OR REPLACE FUNCTION test.sql_inline_func(i_id int) RETURNS SETOF INT AS $$ + select * from test.sql_inline where id = i_id limit 1; +$$ LANGUAGE sql STABLE; +EXPLAIN (COSTS OFF) SELECT * FROM test.sql_inline_func(5); + QUERY PLAN +------------------------------------------- + Limit + -> Seq Scan on sql_inline_0 sql_inline + Filter: (id = 5) +(3 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM test.sql_inline_func(1); + QUERY PLAN +------------------------------------------- + Limit + -> Seq Scan on sql_inline_2 sql_inline + Filter: (id = 1) +(3 rows) + +DROP FUNCTION test.sql_inline_func(int); +DROP TABLE test.sql_inline CASCADE; +NOTICE: drop cascades to 3 other objects +/* + * Test by @baiyinqiqi (issue #60) + */ +CREATE TABLE test.hash_varchar(val VARCHAR(40) NOT NULL); +INSERT INTO test.hash_varchar SELECT generate_series(1, 20); +SELECT pathman.create_hash_partitions('test.hash_varchar', 'val', 4); + create_hash_partitions +------------------------ + 4 +(1 row) + +SELECT * FROM test.hash_varchar WHERE val = 'a'; + val +----- +(0 rows) + +SELECT * FROM test.hash_varchar WHERE val = '12'::TEXT; + val +----- + 12 +(1 row) + +DROP TABLE test.hash_varchar CASCADE; +NOTICE: drop cascades to 4 other objects +/* + * Test split and merge + */ +/* Split first partition in half */ +SELECT pathman.split_range_partition('test.num_range_rel_1', 500); + split_range_partition +----------------------- + test.num_range_rel_5 +(1 row) + +EXPLAIN (COSTS OFF) SELECT * FROM test.num_range_rel WHERE id BETWEEN 100 AND 700; + QUERY PLAN +-------------------------------------------------------------------------------- + Append + -> Index Scan using num_range_rel_1_pkey on num_range_rel_1 + Index Cond: (id >= 100) + -> Index Scan using num_range_rel_5_pkey on num_range_rel_5 num_range_rel_2 + Index Cond: (id <= 700) +(5 rows) + +SELECT tableoid::regclass, id FROM test.num_range_rel WHERE id IN (499, 500, 501) ORDER BY id; + tableoid | id +----------------------+----- + test.num_range_rel_1 | 499 + test.num_range_rel_5 | 500 + test.num_range_rel_5 | 501 +(3 rows) + +SELECT pathman.split_range_partition('test.range_rel_1', '2015-01-15'::DATE); + split_range_partition +----------------------- + test.range_rel_5 +(1 row) + +/* Merge two partitions into one */ +SELECT pathman.merge_range_partitions('test.num_range_rel_1', 'test.num_range_rel_' || currval('test.num_range_rel_seq')); + merge_range_partitions +------------------------ + test.num_range_rel_1 +(1 row) + +EXPLAIN (COSTS OFF) SELECT * FROM test.num_range_rel WHERE id BETWEEN 100 AND 700; + QUERY PLAN +------------------------------------------------------------------------ + Index Scan using num_range_rel_1_pkey on num_range_rel_1 num_range_rel + Index Cond: ((id >= 100) AND (id <= 700)) +(2 rows) + +SELECT pathman.merge_range_partitions('test.range_rel_1', 'test.range_rel_' || currval('test.range_rel_seq')); + merge_range_partitions +------------------------ + test.range_rel_1 +(1 row) + +/* Append and prepend partitions */ +SELECT pathman.append_range_partition('test.num_range_rel'); + append_range_partition +------------------------ + test.num_range_rel_6 +(1 row) + +EXPLAIN (COSTS OFF) SELECT * FROM test.num_range_rel WHERE id >= 4000; + QUERY PLAN +------------------------------------------- + Seq Scan on num_range_rel_6 num_range_rel +(1 row) + +SELECT pathman.prepend_range_partition('test.num_range_rel'); + prepend_range_partition +------------------------- + test.num_range_rel_7 +(1 row) + +EXPLAIN (COSTS OFF) SELECT * FROM test.num_range_rel WHERE id < 0; + QUERY PLAN +------------------------------------------- + Seq Scan on num_range_rel_7 num_range_rel +(1 row) + +SELECT pathman.drop_range_partition('test.num_range_rel_7'); + drop_range_partition +---------------------- + test.num_range_rel_7 +(1 row) + +SELECT pathman.drop_range_partition_expand_next('test.num_range_rel_4'); + drop_range_partition_expand_next +---------------------------------- + +(1 row) + +SELECT * FROM pathman.pathman_partition_list WHERE parent = 'test.num_range_rel'::regclass; + parent | partition | parttype | expr | range_min | range_max +--------------------+----------------------+----------+------+-----------+----------- + test.num_range_rel | test.num_range_rel_1 | 2 | id | 0 | 1000 + test.num_range_rel | test.num_range_rel_2 | 2 | id | 1000 | 2000 + test.num_range_rel | test.num_range_rel_3 | 2 | id | 2000 | 3000 + test.num_range_rel | test.num_range_rel_6 | 2 | id | 3000 | 5000 +(4 rows) + +SELECT pathman.drop_range_partition_expand_next('test.num_range_rel_6'); + drop_range_partition_expand_next +---------------------------------- + +(1 row) + +SELECT * FROM pathman.pathman_partition_list WHERE parent = 'test.num_range_rel'::regclass; + parent | partition | parttype | expr | range_min | range_max +--------------------+----------------------+----------+------+-----------+----------- + test.num_range_rel | test.num_range_rel_1 | 2 | id | 0 | 1000 + test.num_range_rel | test.num_range_rel_2 | 2 | id | 1000 | 2000 + test.num_range_rel | test.num_range_rel_3 | 2 | id | 2000 | 3000 +(3 rows) + +SELECT pathman.append_range_partition('test.range_rel'); + append_range_partition +------------------------ + test.range_rel_6 +(1 row) + +SELECT pathman.prepend_range_partition('test.range_rel'); + prepend_range_partition +------------------------- + test.range_rel_7 +(1 row) + +EXPLAIN (COSTS OFF) SELECT * FROM test.range_rel WHERE dt BETWEEN '2014-12-15' AND '2015-01-15'; + QUERY PLAN +------------------------------------------------------------------------------------- + Append + -> Index Scan using range_rel_7_dt_idx on range_rel_7 range_rel_1 + Index Cond: (dt >= 'Mon Dec 15 00:00:00 2014'::timestamp without time zone) + -> Index Scan using range_rel_1_dt_idx on range_rel_1 range_rel_2 + Index Cond: (dt <= 'Thu Jan 15 00:00:00 2015'::timestamp without time zone) +(5 rows) + +SELECT pathman.drop_range_partition('test.range_rel_7'); + drop_range_partition +---------------------- + test.range_rel_7 +(1 row) + +EXPLAIN (COSTS OFF) SELECT * FROM test.range_rel WHERE dt BETWEEN '2014-12-15' AND '2015-01-15'; + QUERY PLAN +------------------------------------------------------------------------------- + Index Scan using range_rel_1_dt_idx on range_rel_1 range_rel + Index Cond: (dt <= 'Thu Jan 15 00:00:00 2015'::timestamp without time zone) +(2 rows) + +SELECT pathman.add_range_partition('test.range_rel', '2014-12-01'::DATE, '2015-01-02'::DATE); +ERROR: specified range [12-01-2014, 01-02-2015) overlaps with existing partitions +SELECT pathman.add_range_partition('test.range_rel', '2014-12-01'::DATE, '2015-01-01'::DATE); + add_range_partition +--------------------- + test.range_rel_8 +(1 row) + +EXPLAIN (COSTS OFF) SELECT * FROM test.range_rel WHERE dt BETWEEN '2014-12-15' AND '2015-01-15'; + QUERY PLAN +------------------------------------------------------------------------------------- + Append + -> Index Scan using range_rel_8_dt_idx on range_rel_8 range_rel_1 + Index Cond: (dt >= 'Mon Dec 15 00:00:00 2014'::timestamp without time zone) + -> Index Scan using range_rel_1_dt_idx on range_rel_1 range_rel_2 + Index Cond: (dt <= 'Thu Jan 15 00:00:00 2015'::timestamp without time zone) +(5 rows) + +CREATE TABLE test.range_rel_archive (LIKE test.range_rel INCLUDING ALL); +SELECT pathman.attach_range_partition('test.range_rel', 'test.range_rel_archive', '2014-01-01'::DATE, '2015-01-01'::DATE); +ERROR: specified range [01-01-2014, 01-01-2015) overlaps with existing partitions +SELECT pathman.attach_range_partition('test.range_rel', 'test.range_rel_archive', '2014-01-01'::DATE, '2014-12-01'::DATE); + attach_range_partition +------------------------ + test.range_rel_archive +(1 row) + +EXPLAIN (COSTS OFF) SELECT * FROM test.range_rel WHERE dt BETWEEN '2014-11-15' AND '2015-01-15'; + QUERY PLAN +------------------------------------------------------------------------------------- + Append + -> Index Scan using range_rel_archive_dt_idx on range_rel_archive range_rel_1 + Index Cond: (dt >= 'Sat Nov 15 00:00:00 2014'::timestamp without time zone) + -> Seq Scan on range_rel_8 range_rel_2 + -> Index Scan using range_rel_1_dt_idx on range_rel_1 range_rel_3 + Index Cond: (dt <= 'Thu Jan 15 00:00:00 2015'::timestamp without time zone) +(6 rows) + +SELECT pathman.detach_range_partition('test.range_rel_archive'); + detach_range_partition +------------------------ + test.range_rel_archive +(1 row) + +EXPLAIN (COSTS OFF) SELECT * FROM test.range_rel WHERE dt BETWEEN '2014-11-15' AND '2015-01-15'; + QUERY PLAN +------------------------------------------------------------------------------------- + Append + -> Seq Scan on range_rel_8 range_rel_1 + -> Index Scan using range_rel_1_dt_idx on range_rel_1 range_rel_2 + Index Cond: (dt <= 'Thu Jan 15 00:00:00 2015'::timestamp without time zone) +(4 rows) + +CREATE TABLE test.range_rel_test1 ( + id SERIAL PRIMARY KEY, + dt TIMESTAMP, + txt TEXT, + abc INTEGER); +SELECT pathman.attach_range_partition('test.range_rel', 'test.range_rel_test1', '2013-01-01'::DATE, '2014-01-01'::DATE); +ERROR: partition must have a compatible tuple format +CREATE TABLE test.range_rel_test2 ( + id SERIAL PRIMARY KEY, + dt TIMESTAMP); +SELECT pathman.attach_range_partition('test.range_rel', 'test.range_rel_test2', '2013-01-01'::DATE, '2014-01-01'::DATE); +ERROR: column "dt" in child table must be marked NOT NULL +/* Half open ranges */ +SELECT pathman.add_range_partition('test.range_rel', NULL, '2014-12-01'::DATE, 'test.range_rel_minus_infinity'); + add_range_partition +------------------------------- + test.range_rel_minus_infinity +(1 row) + +SELECT pathman.add_range_partition('test.range_rel', '2015-06-01'::DATE, NULL, 'test.range_rel_plus_infinity'); + add_range_partition +------------------------------ + test.range_rel_plus_infinity +(1 row) + +SELECT pathman.append_range_partition('test.range_rel'); +ERROR: Cannot append partition because last partition's range is half open +SELECT pathman.prepend_range_partition('test.range_rel'); +ERROR: Cannot prepend partition because first partition's range is half open +DROP TABLE test.range_rel_minus_infinity; +CREATE TABLE test.range_rel_minus_infinity (LIKE test.range_rel INCLUDING ALL); +SELECT pathman.attach_range_partition('test.range_rel', 'test.range_rel_minus_infinity', NULL, '2014-12-01'::DATE); + attach_range_partition +------------------------------- + test.range_rel_minus_infinity +(1 row) + +SELECT * FROM pathman.pathman_partition_list WHERE parent = 'test.range_rel'::REGCLASS; + parent | partition | parttype | expr | range_min | range_max +----------------+-------------------------------+----------+------+--------------------------+-------------------------- + test.range_rel | test.range_rel_minus_infinity | 2 | dt | | Mon Dec 01 00:00:00 2014 + test.range_rel | test.range_rel_8 | 2 | dt | Mon Dec 01 00:00:00 2014 | Thu Jan 01 00:00:00 2015 + test.range_rel | test.range_rel_1 | 2 | dt | Thu Jan 01 00:00:00 2015 | Sun Feb 01 00:00:00 2015 + test.range_rel | test.range_rel_2 | 2 | dt | Sun Feb 01 00:00:00 2015 | Sun Mar 01 00:00:00 2015 + test.range_rel | test.range_rel_3 | 2 | dt | Sun Mar 01 00:00:00 2015 | Wed Apr 01 00:00:00 2015 + test.range_rel | test.range_rel_4 | 2 | dt | Wed Apr 01 00:00:00 2015 | Fri May 01 00:00:00 2015 + test.range_rel | test.range_rel_6 | 2 | dt | Fri May 01 00:00:00 2015 | Mon Jun 01 00:00:00 2015 + test.range_rel | test.range_rel_plus_infinity | 2 | dt | Mon Jun 01 00:00:00 2015 | +(8 rows) + +INSERT INTO test.range_rel (dt) VALUES ('2012-06-15'); +INSERT INTO test.range_rel (dt) VALUES ('2015-12-15'); +EXPLAIN (COSTS OFF) SELECT * FROM test.range_rel WHERE dt < '2015-01-01'; + QUERY PLAN +-------------------------------------------------------- + Append + -> Seq Scan on range_rel_minus_infinity range_rel_1 + -> Seq Scan on range_rel_8 range_rel_2 +(3 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM test.range_rel WHERE dt >= '2015-05-01'; + QUERY PLAN +------------------------------------------------------- + Append + -> Seq Scan on range_rel_6 range_rel_1 + -> Seq Scan on range_rel_plus_infinity range_rel_2 +(3 rows) + +/* + * Zero partitions count and adding partitions with specified name + */ +CREATE TABLE test.zero( + id SERIAL PRIMARY KEY, + value INT NOT NULL); +INSERT INTO test.zero SELECT g, g FROM generate_series(1, 100) as g; +SELECT pathman.create_range_partitions('test.zero', 'value', 50, 10, 0); + create_range_partitions +------------------------- + 0 +(1 row) + +SELECT pathman.append_range_partition('test.zero', 'test.zero_0'); +ERROR: relation "zero" has no partitions +SELECT pathman.prepend_range_partition('test.zero', 'test.zero_1'); +ERROR: relation "zero" has no partitions +SELECT pathman.add_range_partition('test.zero', 50, 70, 'test.zero_50'); + add_range_partition +--------------------- + test.zero_50 +(1 row) + +SELECT pathman.append_range_partition('test.zero', 'test.zero_appended'); + append_range_partition +------------------------ + test.zero_appended +(1 row) + +SELECT pathman.prepend_range_partition('test.zero', 'test.zero_prepended'); + prepend_range_partition +------------------------- + test.zero_prepended +(1 row) + +SELECT pathman.split_range_partition('test.zero_50', 60, 'test.zero_60'); + split_range_partition +----------------------- + test."test.zero_60" +(1 row) + +DROP TABLE test.zero CASCADE; +NOTICE: drop cascades to 5 other objects +/* + * Check that altering table columns doesn't break trigger + */ +ALTER TABLE test.hash_rel ADD COLUMN abc int; +INSERT INTO test.hash_rel (id, value, abc) VALUES (123, 456, 789); +SELECT * FROM test.hash_rel WHERE id = 123; + id | value | abc +-----+-------+----- + 123 | 456 | 789 +(1 row) + +/* Test replacing hash partition */ +CREATE TABLE test.hash_rel_extern (LIKE test.hash_rel INCLUDING ALL); +SELECT pathman.replace_hash_partition('test.hash_rel_0', 'test.hash_rel_extern'); + replace_hash_partition +------------------------ + test.hash_rel_extern +(1 row) + +/* Check the consistency of test.hash_rel_0 and test.hash_rel_extern relations */ +EXPLAIN(COSTS OFF) SELECT * FROM test.hash_rel; + QUERY PLAN +---------------------------------------------- + Append + -> Seq Scan on hash_rel_extern hash_rel_1 + -> Seq Scan on hash_rel_1 hash_rel_2 + -> Seq Scan on hash_rel_2 hash_rel_3 +(4 rows) + +SELECT parent, partition, parttype +FROM pathman.pathman_partition_list +WHERE parent='test.hash_rel'::regclass +ORDER BY 2; + parent | partition | parttype +---------------+----------------------+---------- + test.hash_rel | test.hash_rel_1 | 1 + test.hash_rel | test.hash_rel_2 | 1 + test.hash_rel | test.hash_rel_extern | 1 +(3 rows) + +SELECT c.oid::regclass::text, + array_agg(pg_get_indexdef(i.indexrelid)) AS indexes, + array_agg(pg_get_triggerdef(t.oid)) AS triggers +FROM pg_class c + LEFT JOIN pg_index i ON c.oid=i.indrelid + LEFT JOIN pg_trigger t ON c.oid=t.tgrelid +WHERE c.oid IN ('test.hash_rel_0'::regclass, 'test.hash_rel_extern'::regclass) +GROUP BY 1 ORDER BY 1; + oid | indexes | triggers +----------------------+---------------------------------------------------------------------------------------+---------- + test.hash_rel_0 | {"CREATE UNIQUE INDEX hash_rel_0_pkey ON test.hash_rel_0 USING btree (id)"} | {NULL} + test.hash_rel_extern | {"CREATE UNIQUE INDEX hash_rel_extern_pkey ON test.hash_rel_extern USING btree (id)"} | {NULL} +(2 rows) + +SELECT pathman.is_tuple_convertible('test.hash_rel_0', 'test.hash_rel_extern'); + is_tuple_convertible +---------------------- + t +(1 row) + +INSERT INTO test.hash_rel SELECT * FROM test.hash_rel_0; +DROP TABLE test.hash_rel_0; +/* Table with which we are replacing partition must have exact same structure */ +CREATE TABLE test.hash_rel_wrong( + id INTEGER NOT NULL, + value INTEGER); +SELECT pathman.replace_hash_partition('test.hash_rel_1', 'test.hash_rel_wrong'); +ERROR: column "value" in child table must be marked NOT NULL +EXPLAIN (COSTS OFF) SELECT * FROM test.hash_rel; + QUERY PLAN +---------------------------------------------- + Append + -> Seq Scan on hash_rel_extern hash_rel_1 + -> Seq Scan on hash_rel_1 hash_rel_2 + -> Seq Scan on hash_rel_2 hash_rel_3 +(4 rows) + +/* + * Clean up + */ +SELECT pathman.drop_partitions('test.hash_rel'); +NOTICE: 3 rows copied from test.hash_rel_1 +NOTICE: 2 rows copied from test.hash_rel_2 +NOTICE: 2 rows copied from test.hash_rel_extern + drop_partitions +----------------- + 3 +(1 row) + +SELECT COUNT(*) FROM ONLY test.hash_rel; + count +------- + 7 +(1 row) + +SELECT pathman.create_hash_partitions('test.hash_rel', 'value', 3); + create_hash_partitions +------------------------ + 3 +(1 row) + +SELECT pathman.drop_partitions('test.hash_rel', TRUE); + drop_partitions +----------------- + 3 +(1 row) + +SELECT COUNT(*) FROM ONLY test.hash_rel; + count +------- + 0 +(1 row) + +DROP TABLE test.hash_rel CASCADE; +SELECT pathman.drop_partitions('test.num_range_rel'); +NOTICE: 999 rows copied from test.num_range_rel_1 +NOTICE: 1000 rows copied from test.num_range_rel_2 +NOTICE: 1000 rows copied from test.num_range_rel_3 + drop_partitions +----------------- + 3 +(1 row) + +DROP TABLE test.num_range_rel CASCADE; +DROP TABLE test.range_rel CASCADE; +NOTICE: drop cascades to 10 other objects +/* Test attributes copying */ +CREATE TABLE test.range_rel ( + id SERIAL PRIMARY KEY, + dt DATE NOT NULL) +WITH (fillfactor = 70); +INSERT INTO test.range_rel (dt) + SELECT g FROM generate_series('2015-01-01', '2015-02-15', '1 month'::interval) AS g; +SELECT pathman.create_range_partitions('test.range_rel', 'dt', + '2015-01-01'::date, '1 month'::interval); + create_range_partitions +------------------------- + 2 +(1 row) + +SELECT reloptions, relpersistence FROM pg_class WHERE oid='test.range_rel'::REGCLASS; + reloptions | relpersistence +-----------------+---------------- + {fillfactor=70} | p +(1 row) + +SELECT reloptions, relpersistence FROM pg_class WHERE oid='test.range_rel_1'::REGCLASS; + reloptions | relpersistence +-----------------+---------------- + {fillfactor=70} | p +(1 row) + +DROP TABLE test.range_rel CASCADE; +NOTICE: drop cascades to 3 other objects +/* Test automatic partition creation */ +CREATE TABLE test.range_rel ( + id SERIAL PRIMARY KEY, + dt TIMESTAMP NOT NULL, + data TEXT); +SELECT pathman.create_range_partitions('test.range_rel', 'dt', '2015-01-01'::DATE, '10 days'::INTERVAL, 1); + create_range_partitions +------------------------- + 1 +(1 row) + +INSERT INTO test.range_rel (dt) +SELECT generate_series('2015-01-01', '2015-04-30', '1 day'::interval); +INSERT INTO test.range_rel (dt) +SELECT generate_series('2014-12-31', '2014-12-01', '-1 day'::interval); +EXPLAIN (COSTS OFF) SELECT * FROM test.range_rel WHERE dt = '2014-12-15'; + QUERY PLAN +-------------------------------------------------------------------------- + Seq Scan on range_rel_14 range_rel + Filter: (dt = 'Mon Dec 15 00:00:00 2014'::timestamp without time zone) +(2 rows) + +SELECT * FROM test.range_rel WHERE dt = '2014-12-15'; + id | dt | data +-----+--------------------------+------ + 137 | Mon Dec 15 00:00:00 2014 | +(1 row) + +EXPLAIN (COSTS OFF) SELECT * FROM test.range_rel WHERE dt = '2015-03-15'; + QUERY PLAN +-------------------------------------------------------------------------- + Seq Scan on range_rel_8 range_rel + Filter: (dt = 'Sun Mar 15 00:00:00 2015'::timestamp without time zone) +(2 rows) + +SELECT * FROM test.range_rel WHERE dt = '2015-03-15'; + id | dt | data +----+--------------------------+------ + 74 | Sun Mar 15 00:00:00 2015 | +(1 row) + +SELECT pathman.set_auto('test.range_rel', false); + set_auto +---------- + +(1 row) + +INSERT INTO test.range_rel (dt) VALUES ('2015-06-01'); +ERROR: no suitable partition for key 'Mon Jun 01 00:00:00 2015' +SELECT pathman.set_auto('test.range_rel', true); + set_auto +---------- + +(1 row) + +INSERT INTO test.range_rel (dt) VALUES ('2015-06-01'); +/* + * Test auto removing record from config on table DROP (but not on column drop + * as it used to be before version 1.2) + */ +ALTER TABLE test.range_rel DROP COLUMN data; +SELECT * FROM pathman.pathman_config; + partrel | expr | parttype | range_interval +----------------+------+----------+---------------- + test.range_rel | dt | 2 | @ 10 days +(1 row) + +DROP TABLE test.range_rel CASCADE; +NOTICE: drop cascades to 21 other objects +SELECT * FROM pathman.pathman_config; + partrel | expr | parttype | range_interval +---------+------+----------+---------------- +(0 rows) + +/* Check overlaps */ +CREATE TABLE test.num_range_rel ( + id SERIAL PRIMARY KEY, + txt TEXT); +SELECT pathman.create_range_partitions('test.num_range_rel', 'id', 1000, 1000, 4); + create_range_partitions +------------------------- + 4 +(1 row) + +SELECT pathman.check_range_available('test.num_range_rel'::regclass, 4001, 5000); +ERROR: specified range [4001, 5000) overlaps with existing partitions +SELECT pathman.check_range_available('test.num_range_rel'::regclass, 4000, 5000); +ERROR: specified range [4000, 5000) overlaps with existing partitions +SELECT pathman.check_range_available('test.num_range_rel'::regclass, 3999, 5000); +ERROR: specified range [3999, 5000) overlaps with existing partitions +SELECT pathman.check_range_available('test.num_range_rel'::regclass, 3000, 3500); +ERROR: specified range [3000, 3500) overlaps with existing partitions +SELECT pathman.check_range_available('test.num_range_rel'::regclass, 0, 999); + check_range_available +----------------------- + +(1 row) + +SELECT pathman.check_range_available('test.num_range_rel'::regclass, 0, 1000); + check_range_available +----------------------- + +(1 row) + +SELECT pathman.check_range_available('test.num_range_rel'::regclass, 0, 1001); +ERROR: specified range [0, 1001) overlaps with existing partitions +/* CaMeL cAsE table names and attributes */ +CREATE TABLE test."TeSt" (a INT NOT NULL, b INT); +SELECT pathman.create_hash_partitions('test.TeSt', 'a', 3); +ERROR: relation "test.test" does not exist at character 39 +SELECT pathman.create_hash_partitions('test."TeSt"', 'a', 3); + create_hash_partitions +------------------------ + 3 +(1 row) + +INSERT INTO test."TeSt" VALUES (1, 1); +INSERT INTO test."TeSt" VALUES (2, 2); +INSERT INTO test."TeSt" VALUES (3, 3); +SELECT * FROM test."TeSt"; + a | b +---+--- + 3 | 3 + 2 | 2 + 1 | 1 +(3 rows) + +DROP TABLE test."TeSt" CASCADE; +NOTICE: drop cascades to 3 other objects +CREATE TABLE test."RangeRel" ( + id SERIAL PRIMARY KEY, + dt TIMESTAMP NOT NULL, + txt TEXT); +INSERT INTO test."RangeRel" (dt, txt) +SELECT g, md5(g::TEXT) FROM generate_series('2015-01-01', '2015-01-03', '1 day'::interval) as g; +SELECT pathman.create_range_partitions('test."RangeRel"', 'dt', '2015-01-01'::DATE, '1 day'::INTERVAL); + create_range_partitions +------------------------- + 3 +(1 row) + +SELECT pathman.append_range_partition('test."RangeRel"'); + append_range_partition +------------------------ + test."RangeRel_4" +(1 row) + +SELECT pathman.prepend_range_partition('test."RangeRel"'); + prepend_range_partition +------------------------- + test."RangeRel_5" +(1 row) + +SELECT pathman.merge_range_partitions('test."RangeRel_1"', 'test."RangeRel_' || currval('test."RangeRel_seq"') || '"'); + merge_range_partitions +------------------------ + test."RangeRel_1" +(1 row) + +SELECT pathman.split_range_partition('test."RangeRel_1"', '2015-01-01'::DATE); + split_range_partition +----------------------- + test."RangeRel_6" +(1 row) + +DROP TABLE test."RangeRel" CASCADE; +NOTICE: drop cascades to 6 other objects +SELECT * FROM pathman.pathman_config; + partrel | expr | parttype | range_interval +--------------------+------+----------+---------------- + test.num_range_rel | id | 2 | 1000 +(1 row) + +CREATE TABLE test."RangeRel" ( + id SERIAL PRIMARY KEY, + dt TIMESTAMP NOT NULL, + txt TEXT); +SELECT pathman.create_range_partitions('test."RangeRel"', 'id', 1, 100, 3); + create_range_partitions +------------------------- + 3 +(1 row) + +DROP TABLE test."RangeRel" CASCADE; +NOTICE: drop cascades to 4 other objects +DROP EXTENSION pg_pathman; +/* Test that everything works fine without schemas */ +CREATE EXTENSION pg_pathman; +/* Hash */ +CREATE TABLE test.hash_rel ( + id SERIAL PRIMARY KEY, + value INTEGER NOT NULL); +INSERT INTO test.hash_rel (value) SELECT g FROM generate_series(1, 10000) as g; +SELECT create_hash_partitions('test.hash_rel', 'value', 3); + create_hash_partitions +------------------------ + 3 +(1 row) + +EXPLAIN (COSTS OFF) SELECT * FROM test.hash_rel WHERE id = 1234; + QUERY PLAN +----------------------------------------------------------------- + Append + -> Index Scan using hash_rel_0_pkey on hash_rel_0 hash_rel_1 + Index Cond: (id = 1234) + -> Index Scan using hash_rel_1_pkey on hash_rel_1 hash_rel_2 + Index Cond: (id = 1234) + -> Index Scan using hash_rel_2_pkey on hash_rel_2 hash_rel_3 + Index Cond: (id = 1234) +(7 rows) + +/* Range */ +CREATE TABLE test.range_rel ( + id SERIAL PRIMARY KEY, + dt TIMESTAMP NOT NULL, + value INTEGER); +INSERT INTO test.range_rel (dt, value) SELECT g, extract(day from g) FROM generate_series('2010-01-01'::date, '2010-12-31'::date, '1 day') as g; +SELECT create_range_partitions('test.range_rel', 'dt', '2010-01-01'::date, '1 month'::interval, 12); + create_range_partitions +------------------------- + 12 +(1 row) + +SELECT merge_range_partitions('test.range_rel_1', 'test.range_rel_2'); + merge_range_partitions +------------------------ + test.range_rel_1 +(1 row) + +SELECT split_range_partition('test.range_rel_1', '2010-02-15'::date); + split_range_partition +----------------------- + test.range_rel_13 +(1 row) + +SELECT append_range_partition('test.range_rel'); + append_range_partition +------------------------ + test.range_rel_14 +(1 row) + +SELECT prepend_range_partition('test.range_rel'); + prepend_range_partition +------------------------- + test.range_rel_15 +(1 row) + +EXPLAIN (COSTS OFF) SELECT * FROM test.range_rel WHERE dt < '2010-03-01'; + QUERY PLAN +-------------------------------------------- + Append + -> Seq Scan on range_rel_15 range_rel_1 + -> Seq Scan on range_rel_1 range_rel_2 + -> Seq Scan on range_rel_13 range_rel_3 +(4 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM test.range_rel WHERE dt > '2010-12-15'; + QUERY PLAN +-------------------------------------------------------------------------------- + Append + -> Seq Scan on range_rel_12 range_rel_1 + Filter: (dt > 'Wed Dec 15 00:00:00 2010'::timestamp without time zone) + -> Seq Scan on range_rel_14 range_rel_2 +(4 rows) + +/* Create range partitions from whole range */ +SELECT drop_partitions('test.range_rel'); +NOTICE: 45 rows copied from test.range_rel_1 +NOTICE: 31 rows copied from test.range_rel_3 +NOTICE: 30 rows copied from test.range_rel_4 +NOTICE: 31 rows copied from test.range_rel_5 +NOTICE: 30 rows copied from test.range_rel_6 +NOTICE: 31 rows copied from test.range_rel_7 +NOTICE: 31 rows copied from test.range_rel_8 +NOTICE: 30 rows copied from test.range_rel_9 +NOTICE: 31 rows copied from test.range_rel_10 +NOTICE: 30 rows copied from test.range_rel_11 +NOTICE: 31 rows copied from test.range_rel_12 +NOTICE: 14 rows copied from test.range_rel_13 +NOTICE: 0 rows copied from test.range_rel_14 +NOTICE: 0 rows copied from test.range_rel_15 + drop_partitions +----------------- + 14 +(1 row) + +/* Test NOT operator */ +CREATE TABLE bool_test(a INT NOT NULL, b BOOLEAN); +SELECT create_hash_partitions('bool_test', 'a', 3); + create_hash_partitions +------------------------ + 3 +(1 row) + +INSERT INTO bool_test SELECT g, (g % 4) = 0 FROM generate_series(1, 100) AS g; +SELECT count(*) FROM bool_test; + count +------- + 100 +(1 row) + +SELECT count(*) FROM bool_test WHERE (b = true AND b = false); + count +------- + 0 +(1 row) + +SELECT count(*) FROM bool_test WHERE b = false; /* 75 values */ + count +------- + 75 +(1 row) + +SELECT count(*) FROM bool_test WHERE b = true; /* 25 values */ + count +------- + 25 +(1 row) + +DROP TABLE bool_test CASCADE; +NOTICE: drop cascades to 3 other objects +/* Special test case (quals generation) -- fixing commit f603e6c5 */ +CREATE TABLE test.special_case_1_ind_o_s(val serial, comment text); +INSERT INTO test.special_case_1_ind_o_s SELECT generate_series(1, 200), NULL; +SELECT create_range_partitions('test.special_case_1_ind_o_s', 'val', 1, 50); + create_range_partitions +------------------------- + 4 +(1 row) + +INSERT INTO test.special_case_1_ind_o_s_2 SELECT 75 FROM generate_series(1, 6000); +CREATE INDEX ON test.special_case_1_ind_o_s_2 (val, comment); +VACUUM ANALYZE test.special_case_1_ind_o_s_2; +EXPLAIN (COSTS OFF) SELECT * FROM test.special_case_1_ind_o_s WHERE val < 75 AND comment = 'a'; + QUERY PLAN +-------------------------------------------------------------------------------------------------- + Append + -> Seq Scan on special_case_1_ind_o_s_1 + Filter: (comment = 'a'::text) + -> Index Only Scan using special_case_1_ind_o_s_2_val_comment_idx on special_case_1_ind_o_s_2 + Index Cond: ((val < 75) AND (comment = 'a'::text)) +(5 rows) + +SELECT set_enable_parent('test.special_case_1_ind_o_s', true); + set_enable_parent +------------------- + +(1 row) + +EXPLAIN (COSTS OFF) SELECT * FROM test.special_case_1_ind_o_s WHERE val < 75 AND comment = 'a'; + QUERY PLAN +--------------------------------------------------------------------------------------------------------------------------- + Append + -> Seq Scan on special_case_1_ind_o_s special_case_1_ind_o_s_1 + Filter: ((val < 75) AND (comment = 'a'::text)) + -> Seq Scan on special_case_1_ind_o_s_1 special_case_1_ind_o_s_2 + Filter: (comment = 'a'::text) + -> Index Only Scan using special_case_1_ind_o_s_2_val_comment_idx on special_case_1_ind_o_s_2 special_case_1_ind_o_s_3 + Index Cond: ((val < 75) AND (comment = 'a'::text)) +(7 rows) + +SELECT set_enable_parent('test.special_case_1_ind_o_s', false); + set_enable_parent +------------------- + +(1 row) + +EXPLAIN (COSTS OFF) SELECT * FROM test.special_case_1_ind_o_s WHERE val < 75 AND comment = 'a'; + QUERY PLAN +-------------------------------------------------------------------------------------------------- + Append + -> Seq Scan on special_case_1_ind_o_s_1 + Filter: (comment = 'a'::text) + -> Index Only Scan using special_case_1_ind_o_s_2_val_comment_idx on special_case_1_ind_o_s_2 + Index Cond: ((val < 75) AND (comment = 'a'::text)) +(5 rows) + +/* Test index scans on child relation under enable_parent is set */ +CREATE TABLE test.index_on_childs(c1 integer not null, c2 integer); +CREATE INDEX ON test.index_on_childs(c2); +INSERT INTO test.index_on_childs SELECT i, (random()*10000)::integer FROM generate_series(1, 10000) i; +SELECT create_range_partitions('test.index_on_childs', 'c1', 1, 1000, 0, false); + create_range_partitions +------------------------- + 0 +(1 row) + +SELECT add_range_partition('test.index_on_childs', 1, 1000, 'test.index_on_childs_1_1k'); + add_range_partition +--------------------------- + test.index_on_childs_1_1k +(1 row) + +SELECT append_range_partition('test.index_on_childs', 'test.index_on_childs_1k_2k'); + append_range_partition +---------------------------- + test.index_on_childs_1k_2k +(1 row) + +SELECT append_range_partition('test.index_on_childs', 'test.index_on_childs_2k_3k'); + append_range_partition +---------------------------- + test.index_on_childs_2k_3k +(1 row) + +SELECT append_range_partition('test.index_on_childs', 'test.index_on_childs_3k_4k'); + append_range_partition +---------------------------- + test.index_on_childs_3k_4k +(1 row) + +SELECT append_range_partition('test.index_on_childs', 'test.index_on_childs_4k_5k'); + append_range_partition +---------------------------- + test.index_on_childs_4k_5k +(1 row) + +SELECT set_enable_parent('test.index_on_childs', true); + set_enable_parent +------------------- + +(1 row) + +VACUUM ANALYZE test.index_on_childs; +EXPLAIN (COSTS OFF) SELECT * FROM test.index_on_childs WHERE c1 > 100 AND c1 < 2500 AND c2 = 500; + QUERY PLAN +------------------------------------------------------------------------------------------------ + Append + -> Index Scan using index_on_childs_c2_idx on index_on_childs index_on_childs_1 + Index Cond: (c2 = 500) + Filter: ((c1 > 100) AND (c1 < 2500)) + -> Index Scan using index_on_childs_1_1k_c2_idx on index_on_childs_1_1k index_on_childs_2 + Index Cond: (c2 = 500) + Filter: (c1 > 100) + -> Index Scan using index_on_childs_1k_2k_c2_idx on index_on_childs_1k_2k index_on_childs_3 + Index Cond: (c2 = 500) + -> Index Scan using index_on_childs_2k_3k_c2_idx on index_on_childs_2k_3k index_on_childs_4 + Index Cond: (c2 = 500) + Filter: (c1 < 2500) +(12 rows) + +/* Test create_range_partitions() + partition_names */ +CREATE TABLE test.provided_part_names(id INT NOT NULL); +INSERT INTO test.provided_part_names SELECT generate_series(1, 10); +SELECT create_hash_partitions('test.provided_part_names', 'id', 2, + partition_names := ARRAY['p1', 'p2']::TEXT[]); /* ok */ + create_hash_partitions +------------------------ + 2 +(1 row) + +/* list partitions */ +SELECT partition FROM pathman_partition_list +WHERE parent = 'test.provided_part_names'::REGCLASS +ORDER BY partition; + partition +----------- + p1 + p2 +(2 rows) + +DROP TABLE test.provided_part_names CASCADE; +NOTICE: drop cascades to 2 other objects +/* test preventing of double expand of inherited tables */ +CREATE TABLE test.mixinh_parent (id INT PRIMARY KEY); +CREATE TABLE test.mixinh_child1 () INHERITS (test.mixinh_parent); +SELECT create_range_partitions('test.mixinh_child1', 'id', 1, 10, 1); + create_range_partitions +------------------------- + 1 +(1 row) + +INSERT INTO test.mixinh_child1 VALUES (1); +SELECT * FROM test.mixinh_child1; + id +---- + 1 +(1 row) + +SELECT * FROM test.mixinh_parent; +ERROR: could not expand partitioned table "mixinh_child1" +DROP TABLE test.hash_rel CASCADE; +NOTICE: drop cascades to 3 other objects +DROP TABLE test.index_on_childs CASCADE; +NOTICE: drop cascades to 6 other objects +DROP TABLE test.mixinh_child1 CASCADE; +NOTICE: drop cascades to 2 other objects +DROP TABLE test.mixinh_parent CASCADE; +DROP TABLE test.num_range_rel CASCADE; +NOTICE: drop cascades to 5 other objects +DROP TABLE test.hash_rel_wrong CASCADE; +DROP TABLE test.range_rel CASCADE; +DROP TABLE test.range_rel_archive CASCADE; +DROP TABLE test.special_case_1_ind_o_s CASCADE; +NOTICE: drop cascades to 5 other objects +DROP TABLE test.range_rel_test1 CASCADE; +DROP TABLE test.range_rel_test2 CASCADE; +DROP SCHEMA test; +DROP EXTENSION pg_pathman CASCADE; +DROP SCHEMA pathman; diff --git a/expected/pathman_bgw.out b/expected/pathman_bgw.out new file mode 100644 index 00000000..4f2ad6b8 --- /dev/null +++ b/expected/pathman_bgw.out @@ -0,0 +1,246 @@ +\set VERBOSITY terse +SET search_path = 'public'; +CREATE EXTENSION pg_pathman; +CREATE SCHEMA test_bgw; +/* + * Tests for SpawnPartitionsWorker + */ +/* int4, size of Datum == 4 */ +CREATE TABLE test_bgw.test_1(val INT4 NOT NULL); +SELECT create_range_partitions('test_bgw.test_1', 'val', 1, 5, 2); + create_range_partitions +------------------------- + 2 +(1 row) + +SELECT set_spawn_using_bgw('test_bgw.test_1', true); + set_spawn_using_bgw +--------------------- + +(1 row) + +INSERT INTO test_bgw.test_1 VALUES (11); +SELECT * FROM pathman_partition_list ORDER BY partition; /* should contain 3 partitions */ + parent | partition | parttype | expr | range_min | range_max +-----------------+-------------------+----------+------+-----------+----------- + test_bgw.test_1 | test_bgw.test_1_1 | 2 | val | 1 | 6 + test_bgw.test_1 | test_bgw.test_1_2 | 2 | val | 6 | 11 + test_bgw.test_1 | test_bgw.test_1_3 | 2 | val | 11 | 16 +(3 rows) + +DROP TABLE test_bgw.test_1 CASCADE; +NOTICE: drop cascades to 4 other objects +/* int8, size of Datum == 8 */ +CREATE TABLE test_bgw.test_2(val INT8 NOT NULL); +SELECT create_range_partitions('test_bgw.test_2', 'val', 1, 5, 2); + create_range_partitions +------------------------- + 2 +(1 row) + +SELECT set_spawn_using_bgw('test_bgw.test_2', true); + set_spawn_using_bgw +--------------------- + +(1 row) + +INSERT INTO test_bgw.test_2 VALUES (11); +SELECT * FROM pathman_partition_list ORDER BY partition; /* should contain 3 partitions */ + parent | partition | parttype | expr | range_min | range_max +-----------------+-------------------+----------+------+-----------+----------- + test_bgw.test_2 | test_bgw.test_2_1 | 2 | val | 1 | 6 + test_bgw.test_2 | test_bgw.test_2_2 | 2 | val | 6 | 11 + test_bgw.test_2 | test_bgw.test_2_3 | 2 | val | 11 | 16 +(3 rows) + +DROP TABLE test_bgw.test_2 CASCADE; +NOTICE: drop cascades to 4 other objects +/* numeric, size of Datum == var */ +CREATE TABLE test_bgw.test_3(val NUMERIC NOT NULL); +SELECT create_range_partitions('test_bgw.test_3', 'val', 1, 5, 2); + create_range_partitions +------------------------- + 2 +(1 row) + +SELECT set_spawn_using_bgw('test_bgw.test_3', true); + set_spawn_using_bgw +--------------------- + +(1 row) + +INSERT INTO test_bgw.test_3 VALUES (11); +SELECT * FROM pathman_partition_list ORDER BY partition; /* should contain 3 partitions */ + parent | partition | parttype | expr | range_min | range_max +-----------------+-------------------+----------+------+-----------+----------- + test_bgw.test_3 | test_bgw.test_3_1 | 2 | val | 1 | 6 + test_bgw.test_3 | test_bgw.test_3_2 | 2 | val | 6 | 11 + test_bgw.test_3 | test_bgw.test_3_3 | 2 | val | 11 | 16 +(3 rows) + +DROP TABLE test_bgw.test_3 CASCADE; +NOTICE: drop cascades to 4 other objects +/* date, size of Datum == var */ +CREATE TABLE test_bgw.test_4(val DATE NOT NULL); +SELECT create_range_partitions('test_bgw.test_4', 'val', '20170213'::date, '1 day'::interval, 2); + create_range_partitions +------------------------- + 2 +(1 row) + +SELECT set_spawn_using_bgw('test_bgw.test_4', true); + set_spawn_using_bgw +--------------------- + +(1 row) + +INSERT INTO test_bgw.test_4 VALUES ('20170215'); +SELECT * FROM pathman_partition_list ORDER BY partition; /* should contain 3 partitions */ + parent | partition | parttype | expr | range_min | range_max +-----------------+-------------------+----------+------+------------+------------ + test_bgw.test_4 | test_bgw.test_4_1 | 2 | val | 02-13-2017 | 02-14-2017 + test_bgw.test_4 | test_bgw.test_4_2 | 2 | val | 02-14-2017 | 02-15-2017 + test_bgw.test_4 | test_bgw.test_4_3 | 2 | val | 02-15-2017 | 02-16-2017 +(3 rows) + +DROP TABLE test_bgw.test_4 CASCADE; +NOTICE: drop cascades to 4 other objects +/* test error handling in BGW */ +CREATE TABLE test_bgw.test_5(val INT4 NOT NULL); +SELECT create_range_partitions('test_bgw.test_5', 'val', 1, 10, 2); + create_range_partitions +------------------------- + 2 +(1 row) + +CREATE OR REPLACE FUNCTION test_bgw.abort_xact(args JSONB) +RETURNS VOID AS $$ +BEGIN + RAISE EXCEPTION 'aborting xact!'; +END +$$ language plpgsql; +SELECT set_spawn_using_bgw('test_bgw.test_5', true); + set_spawn_using_bgw +--------------------- + +(1 row) + +SELECT set_init_callback('test_bgw.test_5', 'test_bgw.abort_xact(jsonb)'); + set_init_callback +------------------- + +(1 row) + +INSERT INTO test_bgw.test_5 VALUES (-100); +ERROR: attempt to spawn new partitions of relation "test_5" failed +SELECT * FROM pathman_partition_list ORDER BY partition; /* should contain 3 partitions */ + parent | partition | parttype | expr | range_min | range_max +-----------------+-------------------+----------+------+-----------+----------- + test_bgw.test_5 | test_bgw.test_5_1 | 2 | val | 1 | 11 + test_bgw.test_5 | test_bgw.test_5_2 | 2 | val | 11 | 21 +(2 rows) + +DROP FUNCTION test_bgw.abort_xact(args JSONB); +DROP TABLE test_bgw.test_5 CASCADE; +NOTICE: drop cascades to 3 other objects +/* + * Tests for ConcurrentPartWorker + */ +CREATE TABLE test_bgw.conc_part(id INT4 NOT NULL); +INSERT INTO test_bgw.conc_part SELECT generate_series(1, 500); +SELECT create_hash_partitions('test_bgw.conc_part', 'id', 5, false); + create_hash_partitions +------------------------ + 5 +(1 row) + +BEGIN; +/* Also test FOR SHARE/UPDATE conflicts in BGW */ +SELECT * FROM test_bgw.conc_part ORDER BY id LIMIT 1 FOR SHARE; + id +---- + 1 +(1 row) + +/* Run partitioning bgworker */ +SELECT partition_table_concurrently('test_bgw.conc_part', 10, 1); +NOTICE: worker started, you can stop it with the following command: select public.stop_concurrent_part_task('conc_part'); + partition_table_concurrently +------------------------------ + +(1 row) + +/* Wait until bgworker starts */ +SELECT pg_sleep(1); + pg_sleep +---------- + +(1 row) + +ROLLBACK; +/* Wait until it finises */ +DO $$ +DECLARE + ops int8; + rows int8; + rows_old int8 := 0; + i int4 := 0; -- protect from endless loop +BEGIN + LOOP + -- get total number of processed rows + SELECT processed + FROM pathman_concurrent_part_tasks + WHERE relid = 'test_bgw.conc_part'::regclass + INTO rows; + + -- get number of partitioning tasks + GET DIAGNOSTICS ops = ROW_COUNT; + + IF ops > 0 THEN + PERFORM pg_sleep(0.2); + + ASSERT rows IS NOT NULL; + + IF rows_old = rows THEN + i = i + 1; + ELSIF rows < rows_old THEN + RAISE EXCEPTION 'rows is decreasing: new %, old %', rows, rows_old; + ELSIF rows > 500 THEN + RAISE EXCEPTION 'processed % rows', rows; + END IF; + ELSE + EXIT; -- exit loop + END IF; + + IF i > 500 THEN + RAISE WARNING 'looks like partitioning bgw is stuck!'; + EXIT; -- exit loop + END IF; + + rows_old = rows; + END LOOP; +END +$$ LANGUAGE plpgsql; +/* Check amount of tasks and rows in parent and partitions */ +SELECT count(*) FROM pathman_concurrent_part_tasks; + count +------- + 0 +(1 row) + +SELECT count(*) FROM ONLY test_bgw.conc_part; + count +------- + 0 +(1 row) + +SELECT count(*) FROM test_bgw.conc_part; + count +------- + 500 +(1 row) + +DROP TABLE test_bgw.conc_part CASCADE; +NOTICE: drop cascades to 5 other objects +DROP SCHEMA test_bgw; +DROP EXTENSION pg_pathman; diff --git a/expected/pathman_cache_pranks.out b/expected/pathman_cache_pranks.out new file mode 100644 index 00000000..278643ff --- /dev/null +++ b/expected/pathman_cache_pranks.out @@ -0,0 +1,230 @@ +\set VERBOSITY terse +-- is pathman (caches, in particular) strong enough to carry out this? +SET search_path = 'public'; +-- make sure nothing breaks on disable/enable when nothing was initialized yet +SET pg_pathman.enable = false; +NOTICE: RuntimeAppend, RuntimeMergeAppend and PartitionFilter nodes and some other options have been disabled +SET pg_pathman.enable = true; +NOTICE: RuntimeAppend, RuntimeMergeAppend and PartitionFilter nodes and some other options have been enabled +-- wobble with create-drop ext: tests cached relids sanity +CREATE EXTENSION pg_pathman; +SET pg_pathman.enable = f; +NOTICE: RuntimeAppend, RuntimeMergeAppend and PartitionFilter nodes and some other options have been disabled +DROP EXTENSION pg_pathman; +CREATE EXTENSION pg_pathman; +SET pg_pathman.enable = true; +NOTICE: RuntimeAppend, RuntimeMergeAppend and PartitionFilter nodes and some other options have been enabled +DROP EXTENSION pg_pathman; +CREATE EXTENSION pg_pathman; +DROP EXTENSION pg_pathman; +-- create it for further tests +CREATE EXTENSION pg_pathman; +-- 079797e0d5 +CREATE TABLE part_test(val serial); +INSERT INTO part_test SELECT generate_series(1, 30); +SELECT create_range_partitions('part_test', 'val', 1, 10); + create_range_partitions +------------------------- + 3 +(1 row) + +SELECT set_interval('part_test', 100); + set_interval +-------------- + +(1 row) + +DELETE FROM pathman_config WHERE partrel = 'part_test'::REGCLASS; +SELECT drop_partitions('part_test'); +ERROR: table "part_test" has no partitions +SELECT disable_pathman_for('part_test'); + disable_pathman_for +--------------------- + +(1 row) + +CREATE TABLE wrong_partition (LIKE part_test) INHERITS (part_test); +NOTICE: merging column "val" with inherited definition +SELECT add_to_pathman_config('part_test', 'val', '10'); +ERROR: constraint "pathman_wrong_partition_check" of partition "wrong_partition" does not exist +SELECT add_to_pathman_config('part_test', 'val'); +ERROR: wrong constraint format for HASH partition "part_test_1" +DROP TABLE part_test CASCADE; +NOTICE: drop cascades to 5 other objects +-- +-- 85fc5ccf121 +CREATE TABLE part_test(val serial); +INSERT INTO part_test SELECT generate_series(1, 3000); +SELECT create_range_partitions('part_test', 'val', 1, 10); + create_range_partitions +------------------------- + 300 +(1 row) + +SELECT append_range_partition('part_test'); + append_range_partition +------------------------ + part_test_301 +(1 row) + +DELETE FROM part_test; +SELECT create_single_range_partition('part_test', NULL::INT4, NULL); /* not ok */ +ERROR: cannot create partition with range (-inf, +inf) +DELETE FROM pathman_config WHERE partrel = 'part_test'::REGCLASS; +SELECT create_hash_partitions('part_test', 'val', 2, partition_names := ARRAY[]::TEXT[]); /* not ok */ +ERROR: can't partition table "part_test" with existing children +DROP TABLE part_test CASCADE; +NOTICE: drop cascades to 302 other objects +-- +-- +-- PGPRO-7870 +-- Added error for case executing prepared query after DROP/CREATE EXTENSION. +-- +-- DROP/CREATE extension +CREATE TABLE part_test(a INT4 NOT NULL, b INT4); +PREPARE q(int4) AS SELECT * FROM part_test WHERE a > ALL (array[$1, 898]); +SELECT create_range_partitions('part_test', 'a', 1, 100, 10); + create_range_partitions +------------------------- + 10 +(1 row) + +EXECUTE q(1); + a | b +---+--- +(0 rows) + +EXECUTE q(1); + a | b +---+--- +(0 rows) + +EXECUTE q(1); + a | b +---+--- +(0 rows) + +EXECUTE q(1); + a | b +---+--- +(0 rows) + +EXECUTE q(1); + a | b +---+--- +(0 rows) + +EXECUTE q(1); + a | b +---+--- +(0 rows) + +DROP EXTENSION pg_pathman; +CREATE EXTENSION pg_pathman; +EXECUTE q(1); +ERROR: table "part_test" is not partitioned +DEALLOCATE q; +DROP TABLE part_test CASCADE; +NOTICE: drop cascades to 11 other objects +-- DROP/CREATE disabled extension +CREATE TABLE part_test(a INT4 NOT NULL, b INT4); +PREPARE q(int4) AS SELECT * FROM part_test WHERE a > ALL (array[$1, 898]); +SELECT create_range_partitions('part_test', 'a', 1, 100, 10); + create_range_partitions +------------------------- + 10 +(1 row) + +EXECUTE q(1); + a | b +---+--- +(0 rows) + +EXECUTE q(1); + a | b +---+--- +(0 rows) + +EXECUTE q(1); + a | b +---+--- +(0 rows) + +EXECUTE q(1); + a | b +---+--- +(0 rows) + +EXECUTE q(1); + a | b +---+--- +(0 rows) + +EXECUTE q(1); + a | b +---+--- +(0 rows) + +SET pg_pathman.enable = f; +NOTICE: RuntimeAppend, RuntimeMergeAppend and PartitionFilter nodes and some other options have been disabled +DROP EXTENSION pg_pathman; +CREATE EXTENSION pg_pathman; +SET pg_pathman.enable = t; +NOTICE: RuntimeAppend, RuntimeMergeAppend and PartitionFilter nodes and some other options have been enabled +EXECUTE q(1); +ERROR: table "part_test" is not partitioned +DEALLOCATE q; +DROP TABLE part_test CASCADE; +NOTICE: drop cascades to 11 other objects +-- DROP/CREATE extension in autonomous transaction +CREATE TABLE part_test(a INT4 NOT NULL, b INT4); +PREPARE q(int4) AS SELECT * FROM part_test WHERE a > ALL (array[$1, 198]); +SELECT create_range_partitions('part_test', 'a', 1, 100, 2); + create_range_partitions +------------------------- + 2 +(1 row) + +EXECUTE q(1); + a | b +---+--- +(0 rows) + +EXECUTE q(1); + a | b +---+--- +(0 rows) + +EXECUTE q(1); + a | b +---+--- +(0 rows) + +EXECUTE q(1); + a | b +---+--- +(0 rows) + +EXECUTE q(1); + a | b +---+--- +(0 rows) + +EXECUTE q(1); + a | b +---+--- +(0 rows) + +BEGIN; + BEGIN AUTONOMOUS; + DROP EXTENSION pg_pathman; + CREATE EXTENSION pg_pathman; + COMMIT; +COMMIT; +EXECUTE q(1); +ERROR: table "part_test" is not partitioned +DEALLOCATE q; +DROP TABLE part_test CASCADE; +NOTICE: drop cascades to 3 other objects +-- finalize +DROP EXTENSION pg_pathman; diff --git a/expected/pathman_cache_pranks_1.out b/expected/pathman_cache_pranks_1.out new file mode 100644 index 00000000..4a3982a6 --- /dev/null +++ b/expected/pathman_cache_pranks_1.out @@ -0,0 +1,237 @@ +\set VERBOSITY terse +-- is pathman (caches, in particular) strong enough to carry out this? +SET search_path = 'public'; +-- make sure nothing breaks on disable/enable when nothing was initialized yet +SET pg_pathman.enable = false; +NOTICE: RuntimeAppend, RuntimeMergeAppend and PartitionFilter nodes and some other options have been disabled +SET pg_pathman.enable = true; +NOTICE: RuntimeAppend, RuntimeMergeAppend and PartitionFilter nodes and some other options have been enabled +-- wobble with create-drop ext: tests cached relids sanity +CREATE EXTENSION pg_pathman; +SET pg_pathman.enable = f; +NOTICE: RuntimeAppend, RuntimeMergeAppend and PartitionFilter nodes and some other options have been disabled +DROP EXTENSION pg_pathman; +CREATE EXTENSION pg_pathman; +SET pg_pathman.enable = true; +NOTICE: RuntimeAppend, RuntimeMergeAppend and PartitionFilter nodes and some other options have been enabled +DROP EXTENSION pg_pathman; +CREATE EXTENSION pg_pathman; +DROP EXTENSION pg_pathman; +-- create it for further tests +CREATE EXTENSION pg_pathman; +-- 079797e0d5 +CREATE TABLE part_test(val serial); +INSERT INTO part_test SELECT generate_series(1, 30); +SELECT create_range_partitions('part_test', 'val', 1, 10); + create_range_partitions +------------------------- + 3 +(1 row) + +SELECT set_interval('part_test', 100); + set_interval +-------------- + +(1 row) + +DELETE FROM pathman_config WHERE partrel = 'part_test'::REGCLASS; +SELECT drop_partitions('part_test'); +ERROR: table "part_test" has no partitions +SELECT disable_pathman_for('part_test'); + disable_pathman_for +--------------------- + +(1 row) + +CREATE TABLE wrong_partition (LIKE part_test) INHERITS (part_test); +NOTICE: merging column "val" with inherited definition +SELECT add_to_pathman_config('part_test', 'val', '10'); +ERROR: constraint "pathman_wrong_partition_check" of partition "wrong_partition" does not exist +SELECT add_to_pathman_config('part_test', 'val'); +ERROR: wrong constraint format for HASH partition "part_test_1" +DROP TABLE part_test CASCADE; +NOTICE: drop cascades to 5 other objects +-- +-- 85fc5ccf121 +CREATE TABLE part_test(val serial); +INSERT INTO part_test SELECT generate_series(1, 3000); +SELECT create_range_partitions('part_test', 'val', 1, 10); + create_range_partitions +------------------------- + 300 +(1 row) + +SELECT append_range_partition('part_test'); + append_range_partition +------------------------ + part_test_301 +(1 row) + +DELETE FROM part_test; +SELECT create_single_range_partition('part_test', NULL::INT4, NULL); /* not ok */ +ERROR: cannot create partition with range (-inf, +inf) +DELETE FROM pathman_config WHERE partrel = 'part_test'::REGCLASS; +SELECT create_hash_partitions('part_test', 'val', 2, partition_names := ARRAY[]::TEXT[]); /* not ok */ +ERROR: can't partition table "part_test" with existing children +DROP TABLE part_test CASCADE; +NOTICE: drop cascades to 302 other objects +-- +-- +-- PGPRO-7870 +-- Added error for case executing prepared query after DROP/CREATE EXTENSION. +-- +-- DROP/CREATE extension +CREATE TABLE part_test(a INT4 NOT NULL, b INT4); +PREPARE q(int4) AS SELECT * FROM part_test WHERE a > ALL (array[$1, 898]); +SELECT create_range_partitions('part_test', 'a', 1, 100, 10); + create_range_partitions +------------------------- + 10 +(1 row) + +EXECUTE q(1); + a | b +---+--- +(0 rows) + +EXECUTE q(1); + a | b +---+--- +(0 rows) + +EXECUTE q(1); + a | b +---+--- +(0 rows) + +EXECUTE q(1); + a | b +---+--- +(0 rows) + +EXECUTE q(1); + a | b +---+--- +(0 rows) + +EXECUTE q(1); + a | b +---+--- +(0 rows) + +DROP EXTENSION pg_pathman; +CREATE EXTENSION pg_pathman; +EXECUTE q(1); +ERROR: table "part_test" is not partitioned +DEALLOCATE q; +DROP TABLE part_test CASCADE; +NOTICE: drop cascades to 11 other objects +-- DROP/CREATE disabled extension +CREATE TABLE part_test(a INT4 NOT NULL, b INT4); +PREPARE q(int4) AS SELECT * FROM part_test WHERE a > ALL (array[$1, 898]); +SELECT create_range_partitions('part_test', 'a', 1, 100, 10); + create_range_partitions +------------------------- + 10 +(1 row) + +EXECUTE q(1); + a | b +---+--- +(0 rows) + +EXECUTE q(1); + a | b +---+--- +(0 rows) + +EXECUTE q(1); + a | b +---+--- +(0 rows) + +EXECUTE q(1); + a | b +---+--- +(0 rows) + +EXECUTE q(1); + a | b +---+--- +(0 rows) + +EXECUTE q(1); + a | b +---+--- +(0 rows) + +SET pg_pathman.enable = f; +NOTICE: RuntimeAppend, RuntimeMergeAppend and PartitionFilter nodes and some other options have been disabled +DROP EXTENSION pg_pathman; +CREATE EXTENSION pg_pathman; +SET pg_pathman.enable = t; +NOTICE: RuntimeAppend, RuntimeMergeAppend and PartitionFilter nodes and some other options have been enabled +EXECUTE q(1); +ERROR: table "part_test" is not partitioned +DEALLOCATE q; +DROP TABLE part_test CASCADE; +NOTICE: drop cascades to 11 other objects +-- DROP/CREATE extension in autonomous transaction +CREATE TABLE part_test(a INT4 NOT NULL, b INT4); +PREPARE q(int4) AS SELECT * FROM part_test WHERE a > ALL (array[$1, 198]); +SELECT create_range_partitions('part_test', 'a', 1, 100, 2); + create_range_partitions +------------------------- + 2 +(1 row) + +EXECUTE q(1); + a | b +---+--- +(0 rows) + +EXECUTE q(1); + a | b +---+--- +(0 rows) + +EXECUTE q(1); + a | b +---+--- +(0 rows) + +EXECUTE q(1); + a | b +---+--- +(0 rows) + +EXECUTE q(1); + a | b +---+--- +(0 rows) + +EXECUTE q(1); + a | b +---+--- +(0 rows) + +BEGIN; + BEGIN AUTONOMOUS; +ERROR: syntax error at or near "AUTONOMOUS" at character 7 + DROP EXTENSION pg_pathman; +ERROR: current transaction is aborted, commands ignored until end of transaction block + CREATE EXTENSION pg_pathman; +ERROR: current transaction is aborted, commands ignored until end of transaction block + COMMIT; +COMMIT; +WARNING: there is no transaction in progress +EXECUTE q(1); + a | b +---+--- +(0 rows) + +DEALLOCATE q; +DROP TABLE part_test CASCADE; +NOTICE: drop cascades to 3 other objects +-- finalize +DROP EXTENSION pg_pathman; diff --git a/expected/pathman_calamity.out b/expected/pathman_calamity.out new file mode 100644 index 00000000..b9421bde --- /dev/null +++ b/expected/pathman_calamity.out @@ -0,0 +1,1072 @@ +/* + * pathman_calamity.out and pathman_calamity_1.out differ only in that since + * 12 we get + * ERROR: invalid input syntax for type integer: "abc" + * instead of + * ERROR: invalid input syntax for integer: "15.6" + * + * Since 55a1954da16 and 6ef77cf46e8 (>= 13) output of EXPLAIN was changed, + * now it includes aliases for inherited tables. + */ +\set VERBOSITY terse +SET search_path = 'public'; +CREATE EXTENSION pg_pathman; +CREATE SCHEMA calamity; +/* call for coverage test */ +set client_min_messages = ERROR; +SELECT debug_capture(); + debug_capture +--------------- + +(1 row) + +SELECT pathman_version(); + pathman_version +----------------- + 1.5.12 +(1 row) + +set client_min_messages = NOTICE; +/* create table to be partitioned */ +CREATE TABLE calamity.part_test(val serial); +/* test pg_pathman's cache */ +INSERT INTO calamity.part_test SELECT generate_series(1, 30); +SELECT create_range_partitions('calamity.part_test', 'val', 1, 10); + create_range_partitions +------------------------- + 3 +(1 row) + +SELECT drop_partitions('calamity.part_test'); +NOTICE: 10 rows copied from calamity.part_test_1 +NOTICE: 10 rows copied from calamity.part_test_2 +NOTICE: 10 rows copied from calamity.part_test_3 + drop_partitions +----------------- + 3 +(1 row) + +SELECT create_range_partitions('calamity.part_test', 'val', 1, 10); + create_range_partitions +------------------------- + 3 +(1 row) + +SELECT drop_partitions('calamity.part_test'); +NOTICE: 10 rows copied from calamity.part_test_1 +NOTICE: 10 rows copied from calamity.part_test_2 +NOTICE: 10 rows copied from calamity.part_test_3 + drop_partitions +----------------- + 3 +(1 row) + +SELECT create_range_partitions('calamity.part_test', 'val', 1, 10); + create_range_partitions +------------------------- + 3 +(1 row) + +SELECT append_range_partition('calamity.part_test'); + append_range_partition +------------------------ + calamity.part_test_4 +(1 row) + +SELECT drop_partitions('calamity.part_test'); +NOTICE: 10 rows copied from calamity.part_test_1 +NOTICE: 10 rows copied from calamity.part_test_2 +NOTICE: 10 rows copied from calamity.part_test_3 +NOTICE: 0 rows copied from calamity.part_test_4 + drop_partitions +----------------- + 4 +(1 row) + +SELECT create_range_partitions('calamity.part_test', 'val', 1, 10); + create_range_partitions +------------------------- + 3 +(1 row) + +SELECT append_range_partition('calamity.part_test'); + append_range_partition +------------------------ + calamity.part_test_4 +(1 row) + +SELECT drop_partitions('calamity.part_test'); +NOTICE: 10 rows copied from calamity.part_test_1 +NOTICE: 10 rows copied from calamity.part_test_2 +NOTICE: 10 rows copied from calamity.part_test_3 +NOTICE: 0 rows copied from calamity.part_test_4 + drop_partitions +----------------- + 4 +(1 row) + +SELECT count(*) FROM calamity.part_test; + count +------- + 30 +(1 row) + +DELETE FROM calamity.part_test; +/* test function create_single_range_partition() */ +SELECT create_single_range_partition(NULL, NULL::INT4, NULL); /* not ok */ +ERROR: 'parent_relid' should not be NULL +SELECT create_single_range_partition('pg_class', NULL::INT4, NULL); /* not ok */ +ERROR: table "pg_class" is not partitioned by RANGE +SELECT add_to_pathman_config('calamity.part_test', 'val'); + add_to_pathman_config +----------------------- + t +(1 row) + +SELECT create_single_range_partition('calamity.part_test', NULL::INT4, NULL); /* not ok */ +ERROR: table "part_test" is not partitioned by RANGE +DELETE FROM pathman_config WHERE partrel = 'calamity.part_test'::REGCLASS; +/* test function create_range_partitions_internal() */ +SELECT create_range_partitions_internal(NULL, '{}'::INT[], NULL, NULL); /* not ok */ +ERROR: 'parent_relid' should not be NULL +SELECT create_range_partitions_internal('calamity.part_test', + NULL::INT[], NULL, NULL); /* not ok */ +ERROR: 'bounds' should not be NULL +SELECT create_range_partitions_internal('calamity.part_test', '{1}'::INT[], + '{part_1}'::TEXT[], NULL); /* not ok */ +ERROR: wrong length of 'partition_names' array +SELECT create_range_partitions_internal('calamity.part_test', '{1}'::INT[], + NULL, '{tblspc_1}'::TEXT[]); /* not ok */ +ERROR: wrong length of 'tablespaces' array +SELECT create_range_partitions_internal('calamity.part_test', + '{1, NULL}'::INT[], NULL, NULL); /* not ok */ +ERROR: only first bound can be NULL +SELECT create_range_partitions_internal('calamity.part_test', + '{2, 1}'::INT[], NULL, NULL); /* not ok */ +ERROR: 'bounds' array must be ascending +/* test function create_hash_partitions() */ +SELECT create_hash_partitions('calamity.part_test', 'val', 2, + partition_names := ARRAY[]::TEXT[]); /* not ok */ +ERROR: array should not be empty +SELECT create_hash_partitions('calamity.part_test', 'val', 2, + partition_names := ARRAY[ 'p1', NULL ]::TEXT[]); /* not ok */ +ERROR: array should not contain NULLs +SELECT create_hash_partitions('calamity.part_test', 'val', 2, + partition_names := ARRAY[ ['p1'], ['p2'] ]::TEXT[]); /* not ok */ +ERROR: array should contain only 1 dimension +SELECT create_hash_partitions('calamity.part_test', 'val', 2, + partition_names := ARRAY['calamity.p1']::TEXT[]); /* not ok */ +ERROR: size of 'partition_names' must be equal to 'partitions_count' +SELECT create_hash_partitions('calamity.part_test', 'val', 2, + tablespaces := ARRAY['abcd']::TEXT[]); /* not ok */ +ERROR: size of 'tablespaces' must be equal to 'partitions_count' +/* test case when naming sequence does not exist */ +CREATE TABLE calamity.no_naming_seq(val INT4 NOT NULL); +SELECT add_to_pathman_config('calamity.no_naming_seq', 'val', '100'); + add_to_pathman_config +----------------------- + t +(1 row) + +select add_range_partition(' calamity.no_naming_seq', 10, 20); +ERROR: auto naming sequence "no_naming_seq_seq" does not exist +DROP TABLE calamity.no_naming_seq CASCADE; +/* test (-inf, +inf) partition creation */ +CREATE TABLE calamity.double_inf(val INT4 NOT NULL); +SELECT add_to_pathman_config('calamity.double_inf', 'val', '10'); + add_to_pathman_config +----------------------- + t +(1 row) + +select add_range_partition('calamity.double_inf', NULL::INT4, NULL::INT4, + partition_name := 'double_inf_part'); +ERROR: cannot create partition with range (-inf, +inf) +DROP TABLE calamity.double_inf CASCADE; +/* test stub 'enable_parent' value for PATHMAN_CONFIG_PARAMS */ +INSERT INTO calamity.part_test SELECT generate_series(1, 30); +SELECT create_range_partitions('calamity.part_test', 'val', 1, 10); + create_range_partitions +------------------------- + 3 +(1 row) + +DELETE FROM pathman_config_params WHERE partrel = 'calamity.part_test'::regclass; +SELECT append_range_partition('calamity.part_test'); + append_range_partition +------------------------ + calamity.part_test_4 +(1 row) + +EXPLAIN (COSTS OFF) SELECT * FROM calamity.part_test; + QUERY PLAN +------------------------------- + Append + -> Seq Scan on part_test_1 + -> Seq Scan on part_test_2 + -> Seq Scan on part_test_3 + -> Seq Scan on part_test_4 +(5 rows) + +SELECT drop_partitions('calamity.part_test', true); + drop_partitions +----------------- + 4 +(1 row) + +DELETE FROM calamity.part_test; +/* check function validate_interval_value() */ +SELECT set_interval('pg_catalog.pg_class', 100); /* not ok */ +ERROR: table "pg_class" is not partitioned by RANGE +INSERT INTO calamity.part_test SELECT generate_series(1, 30); +SELECT create_range_partitions('calamity.part_test', 'val', 1, 10); + create_range_partitions +------------------------- + 3 +(1 row) + +SELECT set_interval('calamity.part_test', 100); /* ok */ + set_interval +-------------- + +(1 row) + +SELECT set_interval('calamity.part_test', 15.6); /* not ok */ +ERROR: invalid input syntax for integer: "15.6" +SELECT set_interval('calamity.part_test', 'abc'::text); /* not ok */ +ERROR: invalid input syntax for integer: "abc" +SELECT drop_partitions('calamity.part_test', true); + drop_partitions +----------------- + 3 +(1 row) + +DELETE FROM calamity.part_test; +/* check function build_hash_condition() */ +SELECT build_hash_condition('int4', 'val', 10, 1); + build_hash_condition +------------------------------------------------- + public.get_hash_part_idx(hashint4(val), 10) = 1 +(1 row) + +SELECT build_hash_condition('text', 'val', 10, 1); + build_hash_condition +------------------------------------------------- + public.get_hash_part_idx(hashtext(val), 10) = 1 +(1 row) + +SELECT build_hash_condition('int4', 'val', 1, 1); +ERROR: 'partition_index' must be lower than 'partitions_count' +SELECT build_hash_condition('int4', 'val', 10, 20); +ERROR: 'partition_index' must be lower than 'partitions_count' +SELECT build_hash_condition('text', 'val', 10, NULL) IS NULL; + ?column? +---------- + t +(1 row) + +SELECT build_hash_condition('calamity.part_test', 'val', 10, 1); +ERROR: no hash function for type calamity.part_test +/* check function build_range_condition() */ +SELECT build_range_condition(NULL, 'val', 10, 20); /* not ok */ +ERROR: 'partition_relid' should not be NULL +SELECT build_range_condition('calamity.part_test', NULL, 10, 20); /* not ok */ +ERROR: 'expression' should not be NULL +SELECT build_range_condition('calamity.part_test', 'val', 10, 20); /* OK */ + build_range_condition +------------------------------ + ((val >= 10) AND (val < 20)) +(1 row) + +SELECT build_range_condition('calamity.part_test', 'val', 10, NULL); /* OK */ + build_range_condition +----------------------- + ((val >= 10)) +(1 row) + +SELECT build_range_condition('calamity.part_test', 'val', NULL, 10); /* OK */ + build_range_condition +----------------------- + ((val < 10)) +(1 row) + +/* check function validate_interval_value() */ +SELECT validate_interval_value(1::REGCLASS, 'expr', 2, '1 mon'); /* not ok */ +ERROR: relation "1" does not exist +SELECT validate_interval_value(NULL, 'expr', 2, '1 mon'); /* not ok */ +ERROR: 'partrel' should not be NULL +SELECT validate_interval_value('pg_class', NULL, 2, '1 mon'); /* not ok */ +ERROR: 'expression' should not be NULL +SELECT validate_interval_value('pg_class', 'relname', NULL, '1 mon'); /* not ok */ +ERROR: 'parttype' should not be NULL +SELECT validate_interval_value('pg_class', 'relname', 1, 'HASH'); /* not ok */ +ERROR: interval should be NULL for HASH partitioned table +SELECT validate_interval_value('pg_class', 'expr', 2, '1 mon'); /* not ok */ +ERROR: failed to analyze partitioning expression "expr" +SELECT validate_interval_value('pg_class', 'expr', 2, NULL); /* not ok */ +ERROR: failed to analyze partitioning expression "expr" +SELECT validate_interval_value('pg_class', 'EXPR', 1, 'HASH'); /* not ok */ +ERROR: failed to analyze partitioning expression "EXPR" +/* check function validate_relname() */ +SELECT validate_relname('calamity.part_test'); + validate_relname +------------------ + +(1 row) + +SELECT validate_relname(1::REGCLASS); +ERROR: relation "1" does not exist +SELECT validate_relname(NULL); +ERROR: relation should not be NULL +/* check function validate_expression() */ +SELECT validate_expression(1::regclass, NULL); /* not ok */ +ERROR: identifier "1" must be normal Oid +SELECT validate_expression(NULL::regclass, NULL); /* not ok */ +ERROR: 'relid' should not be NULL +SELECT validate_expression('calamity.part_test', NULL); /* not ok */ +ERROR: 'expression' should not be NULL +SELECT validate_expression('calamity.part_test', 'valval'); /* not ok */ +ERROR: failed to analyze partitioning expression "valval" +SELECT validate_expression('calamity.part_test', 'random()'); /* not ok */ +ERROR: failed to analyze partitioning expression "random()" +SELECT validate_expression('calamity.part_test', 'val'); /* OK */ + validate_expression +--------------------- + +(1 row) + +SELECT validate_expression('calamity.part_test', 'VaL'); /* OK */ + validate_expression +--------------------- + +(1 row) + +/* check function get_number_of_partitions() */ +SELECT get_number_of_partitions('calamity.part_test'); + get_number_of_partitions +-------------------------- + 0 +(1 row) + +SELECT get_number_of_partitions(NULL) IS NULL; + ?column? +---------- + t +(1 row) + +/* check function get_parent_of_partition() */ +SELECT get_parent_of_partition('calamity.part_test'); +ERROR: "part_test" is not a partition +SELECT get_parent_of_partition(NULL) IS NULL; + ?column? +---------- + t +(1 row) + +/* check function get_base_type() */ +CREATE DOMAIN calamity.test_domain AS INT4; +SELECT get_base_type('int4'::regtype); + get_base_type +--------------- + integer +(1 row) + +SELECT get_base_type('calamity.test_domain'::regtype); + get_base_type +--------------- + integer +(1 row) + +SELECT get_base_type(NULL) IS NULL; + ?column? +---------- + t +(1 row) + +/* check function get_partition_key_type() */ +SELECT get_partition_key_type('calamity.part_test'); +ERROR: relation "part_test" has no partitions +SELECT get_partition_key_type(0::regclass); +ERROR: relation "0" has no partitions +SELECT get_partition_key_type(NULL) IS NULL; + ?column? +---------- + t +(1 row) + +/* check function build_check_constraint_name() */ +SELECT build_check_constraint_name('calamity.part_test'); /* OK */ + build_check_constraint_name +----------------------------- + pathman_part_test_check +(1 row) + +SELECT build_check_constraint_name(0::REGCLASS); /* not ok */ +ERROR: relation "0" does not exist +SELECT build_check_constraint_name(NULL) IS NULL; + ?column? +---------- + t +(1 row) + +/* check function build_sequence_name() */ +SELECT build_sequence_name('calamity.part_test'); /* OK */ + build_sequence_name +------------------------ + calamity.part_test_seq +(1 row) + +SELECT build_sequence_name(1::REGCLASS); /* not ok */ +ERROR: relation "1" does not exist +SELECT build_sequence_name(NULL) IS NULL; + ?column? +---------- + t +(1 row) + +/* check function partition_table_concurrently() */ +SELECT partition_table_concurrently(1::REGCLASS); /* not ok */ +ERROR: identifier "1" must be normal Oid +SELECT partition_table_concurrently('pg_class', 0); /* not ok */ +ERROR: 'batch_size' should not be less than 1 or greater than 10000 +SELECT partition_table_concurrently('pg_class', 1, 1E-5); /* not ok */ +ERROR: 'sleep_time' should not be less than 0.5 +SELECT partition_table_concurrently('pg_class'); /* not ok */ +ERROR: identifier "1259" must be normal Oid +/* check function stop_concurrent_part_task() */ +SELECT stop_concurrent_part_task(1::REGCLASS); /* not ok */ +ERROR: cannot find worker for relation "1" +/* check function drop_range_partition_expand_next() */ +SELECT drop_range_partition_expand_next('pg_class'); /* not ok */ +ERROR: identifier "1259" must be normal Oid +SELECT drop_range_partition_expand_next(NULL) IS NULL; + ?column? +---------- + t +(1 row) + +/* check function generate_range_bounds() */ +SELECT generate_range_bounds(NULL, 100, 10) IS NULL; + ?column? +---------- + t +(1 row) + +SELECT generate_range_bounds(0, NULL::INT4, 10) IS NULL; + ?column? +---------- + t +(1 row) + +SELECT generate_range_bounds(0, 100, NULL) IS NULL; + ?column? +---------- + t +(1 row) + +SELECT generate_range_bounds(0, 100, 0); /* not ok */ +ERROR: 'p_count' must be greater than zero +SELECT generate_range_bounds('a'::TEXT, 'test'::TEXT, 10); /* not ok */ +ERROR: cannot find operator +(text, text) +SELECT generate_range_bounds('a'::TEXT, '1 mon'::INTERVAL, 10); /* not ok */ +ERROR: cannot find operator +(text, interval) +SELECT generate_range_bounds(0::NUMERIC, 1::NUMERIC, 10); /* OK */ + generate_range_bounds +-------------------------- + {0,1,2,3,4,5,6,7,8,9,10} +(1 row) + +SELECT generate_range_bounds('1-jan-2017'::DATE, + '1 day'::INTERVAL, + 4); /* OK */ + generate_range_bounds +---------------------------------------------------------- + {01-01-2017,01-02-2017,01-03-2017,01-04-2017,01-05-2017} +(1 row) + +SELECT check_range_available(NULL, NULL::INT4, NULL); /* not ok */ +ERROR: 'parent_relid' should not be NULL +SELECT check_range_available('pg_class', 1, 10); /* OK (not partitioned) */ +WARNING: table "pg_class" is not partitioned + check_range_available +----------------------- + +(1 row) + +/* check invoke_on_partition_created_callback() */ +CREATE FUNCTION calamity.dummy_cb(arg jsonb) RETURNS void AS $$ + begin + raise warning 'arg: %', arg::text; + end +$$ LANGUAGE plpgsql; +/* Invalid args */ +SELECT invoke_on_partition_created_callback(NULL, 'calamity.part_test', 1); +ERROR: 'parent_relid' should not be NULL +SELECT invoke_on_partition_created_callback('calamity.part_test', NULL, 1); +ERROR: 'partition_relid' should not be NULL +SELECT invoke_on_partition_created_callback('calamity.part_test', 'calamity.part_test', 0); + invoke_on_partition_created_callback +-------------------------------------- + +(1 row) + +SELECT invoke_on_partition_created_callback('calamity.part_test', 'calamity.part_test', 1); +ERROR: callback function 1 does not exist +SELECT invoke_on_partition_created_callback('calamity.part_test', 'calamity.part_test', NULL); + invoke_on_partition_created_callback +-------------------------------------- + +(1 row) + +/* HASH */ +SELECT invoke_on_partition_created_callback(0::regclass, 1::regclass, 'calamity.dummy_cb(jsonb)'::regprocedure); +WARNING: arg: {"parent": null, "parttype": "1", "partition": null, "parent_schema": null, "partition_schema": null} + invoke_on_partition_created_callback +-------------------------------------- + +(1 row) + +/* RANGE */ +SELECT invoke_on_partition_created_callback('calamity.part_test'::regclass, 'pg_class'::regclass, 'calamity.dummy_cb(jsonb)'::regprocedure, NULL::int, NULL); +WARNING: arg: {"parent": "part_test", "parttype": "2", "partition": "pg_class", "range_max": null, "range_min": null, "parent_schema": "calamity", "partition_schema": "pg_catalog"} + invoke_on_partition_created_callback +-------------------------------------- + +(1 row) + +SELECT invoke_on_partition_created_callback(0::regclass, 1::regclass, 'calamity.dummy_cb(jsonb)'::regprocedure, NULL::int, NULL); +WARNING: arg: {"parent": null, "parttype": "2", "partition": null, "range_max": null, "range_min": null, "parent_schema": null, "partition_schema": null} + invoke_on_partition_created_callback +-------------------------------------- + +(1 row) + +SELECT invoke_on_partition_created_callback(0::regclass, 1::regclass, 'calamity.dummy_cb(jsonb)'::regprocedure, 1, NULL); +WARNING: arg: {"parent": null, "parttype": "2", "partition": null, "range_max": null, "range_min": "1", "parent_schema": null, "partition_schema": null} + invoke_on_partition_created_callback +-------------------------------------- + +(1 row) + +SELECT invoke_on_partition_created_callback(0::regclass, 1::regclass, 'calamity.dummy_cb(jsonb)'::regprocedure, NULL, 1); +WARNING: arg: {"parent": null, "parttype": "2", "partition": null, "range_max": "1", "range_min": null, "parent_schema": null, "partition_schema": null} + invoke_on_partition_created_callback +-------------------------------------- + +(1 row) + +DROP FUNCTION calamity.dummy_cb(arg jsonb); +/* check function add_to_pathman_config() -- PHASE #1 */ +SELECT add_to_pathman_config(NULL, 'val'); /* no table */ +ERROR: 'parent_relid' should not be NULL +SELECT add_to_pathman_config(0::REGCLASS, 'val'); /* no table (oid) */ +ERROR: identifier "0" must be normal Oid +SELECT add_to_pathman_config('calamity.part_test', NULL); /* no expr */ +ERROR: 'expression' should not be NULL +SELECT add_to_pathman_config('calamity.part_test', 'V_A_L'); /* wrong expr */ +ERROR: failed to analyze partitioning expression "V_A_L" +SELECT add_to_pathman_config('calamity.part_test', 'val'); /* OK */ + add_to_pathman_config +----------------------- + t +(1 row) + +SELECT disable_pathman_for('calamity.part_test'); + disable_pathman_for +--------------------- + +(1 row) + +SELECT add_to_pathman_config('calamity.part_test', 'val', '10'); /* OK */ + add_to_pathman_config +----------------------- + t +(1 row) + +SELECT disable_pathman_for('calamity.part_test'); + disable_pathman_for +--------------------- + +(1 row) + +/* check function add_to_pathman_config() -- PHASE #2 */ +CREATE TABLE calamity.part_ok(val serial); +INSERT INTO calamity.part_ok SELECT generate_series(1, 2); +SELECT create_hash_partitions('calamity.part_ok', 'val', 4); + create_hash_partitions +------------------------ + 4 +(1 row) + +CREATE TABLE calamity.wrong_partition (LIKE calamity.part_test) INHERITS (calamity.part_test); /* wrong partition w\o constraints */ +NOTICE: merging column "val" with inherited definition +SELECT add_to_pathman_config('calamity.part_test', 'val'); +ERROR: constraint "pathman_wrong_partition_check" of partition "wrong_partition" does not exist +EXPLAIN (COSTS OFF) SELECT * FROM calamity.part_ok; /* check that pathman is enabled */ + QUERY PLAN +----------------------------- + Append + -> Seq Scan on part_ok_0 + -> Seq Scan on part_ok_1 + -> Seq Scan on part_ok_2 + -> Seq Scan on part_ok_3 +(5 rows) + +SELECT add_to_pathman_config('calamity.part_test', 'val', '10'); +ERROR: constraint "pathman_wrong_partition_check" of partition "wrong_partition" does not exist +EXPLAIN (COSTS OFF) SELECT * FROM calamity.part_ok; /* check that pathman is enabled */ + QUERY PLAN +----------------------------- + Append + -> Seq Scan on part_ok_0 + -> Seq Scan on part_ok_1 + -> Seq Scan on part_ok_2 + -> Seq Scan on part_ok_3 +(5 rows) + +ALTER TABLE calamity.wrong_partition +ADD CONSTRAINT pathman_wrong_partition_check +CHECK (val = 1 OR val = 2); /* wrong constraint */ +SELECT add_to_pathman_config('calamity.part_test', 'val', '10'); +ERROR: wrong constraint format for RANGE partition "wrong_partition" +EXPLAIN (COSTS OFF) SELECT * FROM calamity.part_ok; /* check that pathman is enabled */ + QUERY PLAN +----------------------------- + Append + -> Seq Scan on part_ok_0 + -> Seq Scan on part_ok_1 + -> Seq Scan on part_ok_2 + -> Seq Scan on part_ok_3 +(5 rows) + +ALTER TABLE calamity.wrong_partition DROP CONSTRAINT pathman_wrong_partition_check; +ALTER TABLE calamity.wrong_partition +ADD CONSTRAINT pathman_wrong_partition_check +CHECK (val >= 10 AND val = 2); /* wrong constraint */ +SELECT add_to_pathman_config('calamity.part_test', 'val', '10'); +ERROR: wrong constraint format for RANGE partition "wrong_partition" +EXPLAIN (COSTS OFF) SELECT * FROM calamity.part_ok; /* check that pathman is enabled */ + QUERY PLAN +----------------------------- + Append + -> Seq Scan on part_ok_0 + -> Seq Scan on part_ok_1 + -> Seq Scan on part_ok_2 + -> Seq Scan on part_ok_3 +(5 rows) + +ALTER TABLE calamity.wrong_partition DROP CONSTRAINT pathman_wrong_partition_check; +/* check GUC variable */ +SHOW pg_pathman.enable; + pg_pathman.enable +------------------- + on +(1 row) + +/* check function create_hash_partitions_internal() (called for the 2nd time) */ +CREATE TABLE calamity.hash_two_times(val serial); +SELECT create_hash_partitions_internal('calamity.hash_two_times', 'val', 2); +ERROR: table "hash_two_times" is not partitioned +SELECT create_hash_partitions('calamity.hash_two_times', 'val', 2); + create_hash_partitions +------------------------ + 2 +(1 row) + +SELECT create_hash_partitions_internal('calamity.hash_two_times', 'val', 2); +ERROR: cannot add new HASH partitions +/* check function disable_pathman_for() */ +CREATE TABLE calamity.to_be_disabled(val INT NOT NULL); +SELECT create_hash_partitions('calamity.to_be_disabled', 'val', 3); /* add row to main config */ + create_hash_partitions +------------------------ + 3 +(1 row) + +SELECT set_enable_parent('calamity.to_be_disabled', true); /* add row to params */ + set_enable_parent +------------------- + +(1 row) + +SELECT disable_pathman_for('calamity.to_be_disabled'); /* should delete both rows */ + disable_pathman_for +--------------------- + +(1 row) + +SELECT count(*) FROM pathman_config WHERE partrel = 'calamity.to_be_disabled'::REGCLASS; + count +------- + 0 +(1 row) + +SELECT count(*) FROM pathman_config_params WHERE partrel = 'calamity.to_be_disabled'::REGCLASS; + count +------- + 0 +(1 row) + +/* check function get_part_range_by_idx() */ +CREATE TABLE calamity.test_range_idx(val INT4 NOT NULL); +SELECT create_range_partitions('calamity.test_range_idx', 'val', 1, 10, 1); + create_range_partitions +------------------------- + 1 +(1 row) + +SELECT get_part_range(NULL, 1, NULL::INT4); /* not ok */ +ERROR: 'parent_relid' should not be NULL +SELECT get_part_range('calamity.test_range_idx', NULL, NULL::INT4); /* not ok */ +ERROR: 'partition_idx' should not be NULL +SELECT get_part_range('calamity.test_range_idx', 0, NULL::INT2); /* not ok */ +ERROR: pg_typeof(dummy) should be integer +SELECT get_part_range('calamity.test_range_idx', -2, NULL::INT4); /* not ok */ +ERROR: negative indices other than -1 (last partition) are not allowed +SELECT get_part_range('calamity.test_range_idx', 4, NULL::INT4); /* not ok */ +ERROR: partition #4 does not exist (total amount is 1) +SELECT get_part_range('calamity.test_range_idx', 0, NULL::INT4); /* OK */ + get_part_range +---------------- + {1,11} +(1 row) + +DROP TABLE calamity.test_range_idx CASCADE; +NOTICE: drop cascades to 2 other objects +/* check function get_part_range_by_oid() */ +CREATE TABLE calamity.test_range_oid(val INT4 NOT NULL); +SELECT create_range_partitions('calamity.test_range_oid', 'val', 1, 10, 1); + create_range_partitions +------------------------- + 1 +(1 row) + +SELECT get_part_range(NULL, NULL::INT4); /* not ok */ +ERROR: 'partition_relid' should not be NULL +SELECT get_part_range('pg_class', NULL::INT4); /* not ok */ +ERROR: relation "pg_class" is not a partition +SELECT get_part_range('calamity.test_range_oid_1', NULL::INT2); /* not ok */ +ERROR: pg_typeof(dummy) should be integer +SELECT get_part_range('calamity.test_range_oid_1', NULL::INT4); /* OK */ + get_part_range +---------------- + {1,11} +(1 row) + +DROP TABLE calamity.test_range_oid CASCADE; +NOTICE: drop cascades to 2 other objects +/* check function merge_range_partitions() */ +SELECT merge_range_partitions('pg_class'); /* not ok */ +ERROR: cannot merge partitions +SELECT merge_range_partitions('pg_class', 'pg_inherits'); /* not ok */ +ERROR: cannot merge partitions +CREATE TABLE calamity.merge_test_a(val INT4 NOT NULL); +CREATE TABLE calamity.merge_test_b(val INT4 NOT NULL); +SELECT create_range_partitions('calamity.merge_test_a', 'val', 1, 10, 2); + create_range_partitions +------------------------- + 2 +(1 row) + +SELECT create_range_partitions('calamity.merge_test_b', 'val', 1, 10, 2); + create_range_partitions +------------------------- + 2 +(1 row) + +SELECT merge_range_partitions('calamity.merge_test_a_1', + 'calamity.merge_test_b_1'); /* not ok */ +ERROR: cannot merge partitions +DROP TABLE calamity.merge_test_a,calamity.merge_test_b CASCADE; +NOTICE: drop cascades to 6 other objects +DROP DOMAIN calamity.test_domain; +DROP TABLE calamity.part_test CASCADE; +NOTICE: drop cascades to table calamity.wrong_partition +DROP TABLE calamity.part_ok CASCADE; +NOTICE: drop cascades to 4 other objects +DROP TABLE calamity.hash_two_times CASCADE; +NOTICE: drop cascades to 2 other objects +DROP TABLE calamity.to_be_disabled CASCADE; +NOTICE: drop cascades to 3 other objects +DROP SCHEMA calamity; +DROP EXTENSION pg_pathman; +/* + * ------------------------------- + * Special tests (SET statement) + * ------------------------------- + */ +CREATE EXTENSION pg_pathman; +SET pg_pathman.enable = false; +NOTICE: RuntimeAppend, RuntimeMergeAppend and PartitionFilter nodes and some other options have been disabled +SET pg_pathman.enable = true; +NOTICE: RuntimeAppend, RuntimeMergeAppend and PartitionFilter nodes and some other options have been enabled +SET pg_pathman.enable = false; +NOTICE: RuntimeAppend, RuntimeMergeAppend and PartitionFilter nodes and some other options have been disabled +RESET pg_pathman.enable; +NOTICE: RuntimeAppend, RuntimeMergeAppend and PartitionFilter nodes and some other options have been enabled +RESET ALL; +BEGIN; ROLLBACK; +BEGIN ISOLATION LEVEL SERIALIZABLE; ROLLBACK; +BEGIN; SET TRANSACTION ISOLATION LEVEL SERIALIZABLE; ROLLBACK; +DROP EXTENSION pg_pathman; +/* + * ------------------------------------- + * Special tests (pathman_cache_stats) + * ------------------------------------- + */ +CREATE SCHEMA calamity; +CREATE EXTENSION pg_pathman; +/* check that cache loading is lazy */ +CREATE TABLE calamity.test_pathman_cache_stats(val NUMERIC NOT NULL); +SELECT create_range_partitions('calamity.test_pathman_cache_stats', 'val', 1, 10, 10); + create_range_partitions +------------------------- + 10 +(1 row) + +SELECT context, entries FROM pathman_cache_stats + WHERE context != 'partition status cache' ORDER BY context; /* OK */ + context | entries +-------------------------+--------- + maintenance | 0 + partition bounds cache | 0 + partition parents cache | 0 +(3 rows) + +DROP TABLE calamity.test_pathman_cache_stats CASCADE; +NOTICE: drop cascades to 11 other objects +SELECT context, entries FROM pathman_cache_stats + WHERE context != 'partition status cache' ORDER BY context; /* OK */ + context | entries +-------------------------+--------- + maintenance | 0 + partition bounds cache | 0 + partition parents cache | 0 +(3 rows) + +/* Change this setting for code coverage */ +SET pg_pathman.enable_bounds_cache = false; +/* check view pathman_cache_stats (bounds cache disabled) */ +CREATE TABLE calamity.test_pathman_cache_stats(val NUMERIC NOT NULL); +SELECT create_range_partitions('calamity.test_pathman_cache_stats', 'val', 1, 10, 10); + create_range_partitions +------------------------- + 10 +(1 row) + +EXPLAIN (COSTS OFF) SELECT * FROM calamity.test_pathman_cache_stats; + QUERY PLAN +----------------------------------------------- + Append + -> Seq Scan on test_pathman_cache_stats_1 + -> Seq Scan on test_pathman_cache_stats_2 + -> Seq Scan on test_pathman_cache_stats_3 + -> Seq Scan on test_pathman_cache_stats_4 + -> Seq Scan on test_pathman_cache_stats_5 + -> Seq Scan on test_pathman_cache_stats_6 + -> Seq Scan on test_pathman_cache_stats_7 + -> Seq Scan on test_pathman_cache_stats_8 + -> Seq Scan on test_pathman_cache_stats_9 + -> Seq Scan on test_pathman_cache_stats_10 +(11 rows) + +SELECT context, entries FROM pathman_cache_stats + WHERE context != 'partition status cache' ORDER BY context; /* OK */ + context | entries +-------------------------+--------- + maintenance | 0 + partition bounds cache | 0 + partition parents cache | 10 +(3 rows) + +DROP TABLE calamity.test_pathman_cache_stats CASCADE; +NOTICE: drop cascades to 11 other objects +SELECT context, entries FROM pathman_cache_stats + WHERE context != 'partition status cache' ORDER BY context; /* OK */ + context | entries +-------------------------+--------- + maintenance | 0 + partition bounds cache | 0 + partition parents cache | 0 +(3 rows) + +/* Restore this GUC */ +SET pg_pathman.enable_bounds_cache = true; +/* check view pathman_cache_stats (bounds cache enabled) */ +CREATE TABLE calamity.test_pathman_cache_stats(val NUMERIC NOT NULL); +SELECT create_range_partitions('calamity.test_pathman_cache_stats', 'val', 1, 10, 10); + create_range_partitions +------------------------- + 10 +(1 row) + +EXPLAIN (COSTS OFF) SELECT * FROM calamity.test_pathman_cache_stats; + QUERY PLAN +----------------------------------------------- + Append + -> Seq Scan on test_pathman_cache_stats_1 + -> Seq Scan on test_pathman_cache_stats_2 + -> Seq Scan on test_pathman_cache_stats_3 + -> Seq Scan on test_pathman_cache_stats_4 + -> Seq Scan on test_pathman_cache_stats_5 + -> Seq Scan on test_pathman_cache_stats_6 + -> Seq Scan on test_pathman_cache_stats_7 + -> Seq Scan on test_pathman_cache_stats_8 + -> Seq Scan on test_pathman_cache_stats_9 + -> Seq Scan on test_pathman_cache_stats_10 +(11 rows) + +SELECT context, entries FROM pathman_cache_stats + WHERE context != 'partition status cache' ORDER BY context; /* OK */ + context | entries +-------------------------+--------- + maintenance | 0 + partition bounds cache | 10 + partition parents cache | 10 +(3 rows) + +DROP TABLE calamity.test_pathman_cache_stats CASCADE; +NOTICE: drop cascades to 11 other objects +SELECT context, entries FROM pathman_cache_stats + WHERE context != 'partition status cache' ORDER BY context; /* OK */ + context | entries +-------------------------+--------- + maintenance | 0 + partition bounds cache | 0 + partition parents cache | 0 +(3 rows) + +/* check that parents cache has been flushed after partition was dropped */ +CREATE TABLE calamity.test_pathman_cache_stats(val NUMERIC NOT NULL); +SELECT create_range_partitions('calamity.test_pathman_cache_stats', 'val', 1, 10, 10); + create_range_partitions +------------------------- + 10 +(1 row) + +EXPLAIN (COSTS OFF) SELECT * FROM calamity.test_pathman_cache_stats; + QUERY PLAN +----------------------------------------------- + Append + -> Seq Scan on test_pathman_cache_stats_1 + -> Seq Scan on test_pathman_cache_stats_2 + -> Seq Scan on test_pathman_cache_stats_3 + -> Seq Scan on test_pathman_cache_stats_4 + -> Seq Scan on test_pathman_cache_stats_5 + -> Seq Scan on test_pathman_cache_stats_6 + -> Seq Scan on test_pathman_cache_stats_7 + -> Seq Scan on test_pathman_cache_stats_8 + -> Seq Scan on test_pathman_cache_stats_9 + -> Seq Scan on test_pathman_cache_stats_10 +(11 rows) + +SELECT context, entries FROM pathman_cache_stats + WHERE context != 'partition status cache' ORDER BY context; /* OK */ + context | entries +-------------------------+--------- + maintenance | 0 + partition bounds cache | 10 + partition parents cache | 10 +(3 rows) + +SELECT drop_range_partition('calamity.test_pathman_cache_stats_1'); + drop_range_partition +------------------------------------- + calamity.test_pathman_cache_stats_1 +(1 row) + +SELECT context, entries FROM pathman_cache_stats + WHERE context != 'partition status cache' ORDER BY context; /* OK */ + context | entries +-------------------------+--------- + maintenance | 0 + partition bounds cache | 9 + partition parents cache | 9 +(3 rows) + +DROP TABLE calamity.test_pathman_cache_stats CASCADE; +NOTICE: drop cascades to 10 other objects +SELECT context, entries FROM pathman_cache_stats + WHERE context != 'partition status cache' ORDER BY context; /* OK */ + context | entries +-------------------------+--------- + maintenance | 0 + partition bounds cache | 0 + partition parents cache | 0 +(3 rows) + +DROP SCHEMA calamity; +DROP EXTENSION pg_pathman; +/* + * ------------------------------------------ + * Special tests (uninitialized pg_pathman) + * ------------------------------------------ + */ +CREATE SCHEMA calamity; +CREATE EXTENSION pg_pathman; +/* check function pathman_cache_search_relid() */ +CREATE TABLE calamity.survivor(val INT NOT NULL); +SELECT create_range_partitions('calamity.survivor', 'val', 1, 10, 2); + create_range_partitions +------------------------- + 2 +(1 row) + +DROP EXTENSION pg_pathman CASCADE; +SET pg_pathman.enable = f; /* DON'T LOAD CONFIG */ +NOTICE: RuntimeAppend, RuntimeMergeAppend and PartitionFilter nodes and some other options have been disabled +CREATE EXTENSION pg_pathman; +SHOW pg_pathman.enable; + pg_pathman.enable +------------------- + off +(1 row) + +SELECT add_to_pathman_config('calamity.survivor', 'val', '10'); /* not ok */ +ERROR: pg_pathman is disabled +SELECT * FROM pathman_partition_list; /* not ok */ +ERROR: pg_pathman is not initialized yet +SELECT get_part_range('calamity.survivor', 0, NULL::INT); /* not ok */ +ERROR: pg_pathman is disabled +EXPLAIN (COSTS OFF) SELECT * FROM calamity.survivor; /* OK */ + QUERY PLAN +------------------------------ + Append + -> Seq Scan on survivor + -> Seq Scan on survivor_1 + -> Seq Scan on survivor_2 +(4 rows) + +SET pg_pathman.enable = t; /* LOAD CONFIG */ +NOTICE: RuntimeAppend, RuntimeMergeAppend and PartitionFilter nodes and some other options have been enabled +SELECT add_to_pathman_config('calamity.survivor', 'val', '10'); /* OK */ + add_to_pathman_config +----------------------- + t +(1 row) + +SELECT * FROM pathman_partition_list; /* OK */ + parent | partition | parttype | expr | range_min | range_max +-------------------+---------------------+----------+------+-----------+----------- + calamity.survivor | calamity.survivor_1 | 2 | val | 1 | 11 + calamity.survivor | calamity.survivor_2 | 2 | val | 11 | 21 +(2 rows) + +SELECT get_part_range('calamity.survivor', 0, NULL::INT); /* OK */ + get_part_range +---------------- + {1,11} +(1 row) + +EXPLAIN (COSTS OFF) SELECT * FROM calamity.survivor; /* OK */ + QUERY PLAN +------------------------------ + Append + -> Seq Scan on survivor_1 + -> Seq Scan on survivor_2 +(3 rows) + +DROP TABLE calamity.survivor CASCADE; +NOTICE: drop cascades to 3 other objects +DROP SCHEMA calamity; +DROP EXTENSION pg_pathman; diff --git a/expected/pathman_calamity_1.out b/expected/pathman_calamity_1.out new file mode 100644 index 00000000..6ca2e7dd --- /dev/null +++ b/expected/pathman_calamity_1.out @@ -0,0 +1,1072 @@ +/* + * pathman_calamity.out and pathman_calamity_1.out differ only in that since + * 12 we get + * ERROR: invalid input syntax for type integer: "abc" + * instead of + * ERROR: invalid input syntax for integer: "15.6" + * + * Since 55a1954da16 and 6ef77cf46e8 (>= 13) output of EXPLAIN was changed, + * now it includes aliases for inherited tables. + */ +\set VERBOSITY terse +SET search_path = 'public'; +CREATE EXTENSION pg_pathman; +CREATE SCHEMA calamity; +/* call for coverage test */ +set client_min_messages = ERROR; +SELECT debug_capture(); + debug_capture +--------------- + +(1 row) + +SELECT pathman_version(); + pathman_version +----------------- + 1.5.12 +(1 row) + +set client_min_messages = NOTICE; +/* create table to be partitioned */ +CREATE TABLE calamity.part_test(val serial); +/* test pg_pathman's cache */ +INSERT INTO calamity.part_test SELECT generate_series(1, 30); +SELECT create_range_partitions('calamity.part_test', 'val', 1, 10); + create_range_partitions +------------------------- + 3 +(1 row) + +SELECT drop_partitions('calamity.part_test'); +NOTICE: 10 rows copied from calamity.part_test_1 +NOTICE: 10 rows copied from calamity.part_test_2 +NOTICE: 10 rows copied from calamity.part_test_3 + drop_partitions +----------------- + 3 +(1 row) + +SELECT create_range_partitions('calamity.part_test', 'val', 1, 10); + create_range_partitions +------------------------- + 3 +(1 row) + +SELECT drop_partitions('calamity.part_test'); +NOTICE: 10 rows copied from calamity.part_test_1 +NOTICE: 10 rows copied from calamity.part_test_2 +NOTICE: 10 rows copied from calamity.part_test_3 + drop_partitions +----------------- + 3 +(1 row) + +SELECT create_range_partitions('calamity.part_test', 'val', 1, 10); + create_range_partitions +------------------------- + 3 +(1 row) + +SELECT append_range_partition('calamity.part_test'); + append_range_partition +------------------------ + calamity.part_test_4 +(1 row) + +SELECT drop_partitions('calamity.part_test'); +NOTICE: 10 rows copied from calamity.part_test_1 +NOTICE: 10 rows copied from calamity.part_test_2 +NOTICE: 10 rows copied from calamity.part_test_3 +NOTICE: 0 rows copied from calamity.part_test_4 + drop_partitions +----------------- + 4 +(1 row) + +SELECT create_range_partitions('calamity.part_test', 'val', 1, 10); + create_range_partitions +------------------------- + 3 +(1 row) + +SELECT append_range_partition('calamity.part_test'); + append_range_partition +------------------------ + calamity.part_test_4 +(1 row) + +SELECT drop_partitions('calamity.part_test'); +NOTICE: 10 rows copied from calamity.part_test_1 +NOTICE: 10 rows copied from calamity.part_test_2 +NOTICE: 10 rows copied from calamity.part_test_3 +NOTICE: 0 rows copied from calamity.part_test_4 + drop_partitions +----------------- + 4 +(1 row) + +SELECT count(*) FROM calamity.part_test; + count +------- + 30 +(1 row) + +DELETE FROM calamity.part_test; +/* test function create_single_range_partition() */ +SELECT create_single_range_partition(NULL, NULL::INT4, NULL); /* not ok */ +ERROR: 'parent_relid' should not be NULL +SELECT create_single_range_partition('pg_class', NULL::INT4, NULL); /* not ok */ +ERROR: table "pg_class" is not partitioned by RANGE +SELECT add_to_pathman_config('calamity.part_test', 'val'); + add_to_pathman_config +----------------------- + t +(1 row) + +SELECT create_single_range_partition('calamity.part_test', NULL::INT4, NULL); /* not ok */ +ERROR: table "part_test" is not partitioned by RANGE +DELETE FROM pathman_config WHERE partrel = 'calamity.part_test'::REGCLASS; +/* test function create_range_partitions_internal() */ +SELECT create_range_partitions_internal(NULL, '{}'::INT[], NULL, NULL); /* not ok */ +ERROR: 'parent_relid' should not be NULL +SELECT create_range_partitions_internal('calamity.part_test', + NULL::INT[], NULL, NULL); /* not ok */ +ERROR: 'bounds' should not be NULL +SELECT create_range_partitions_internal('calamity.part_test', '{1}'::INT[], + '{part_1}'::TEXT[], NULL); /* not ok */ +ERROR: wrong length of 'partition_names' array +SELECT create_range_partitions_internal('calamity.part_test', '{1}'::INT[], + NULL, '{tblspc_1}'::TEXT[]); /* not ok */ +ERROR: wrong length of 'tablespaces' array +SELECT create_range_partitions_internal('calamity.part_test', + '{1, NULL}'::INT[], NULL, NULL); /* not ok */ +ERROR: only first bound can be NULL +SELECT create_range_partitions_internal('calamity.part_test', + '{2, 1}'::INT[], NULL, NULL); /* not ok */ +ERROR: 'bounds' array must be ascending +/* test function create_hash_partitions() */ +SELECT create_hash_partitions('calamity.part_test', 'val', 2, + partition_names := ARRAY[]::TEXT[]); /* not ok */ +ERROR: array should not be empty +SELECT create_hash_partitions('calamity.part_test', 'val', 2, + partition_names := ARRAY[ 'p1', NULL ]::TEXT[]); /* not ok */ +ERROR: array should not contain NULLs +SELECT create_hash_partitions('calamity.part_test', 'val', 2, + partition_names := ARRAY[ ['p1'], ['p2'] ]::TEXT[]); /* not ok */ +ERROR: array should contain only 1 dimension +SELECT create_hash_partitions('calamity.part_test', 'val', 2, + partition_names := ARRAY['calamity.p1']::TEXT[]); /* not ok */ +ERROR: size of 'partition_names' must be equal to 'partitions_count' +SELECT create_hash_partitions('calamity.part_test', 'val', 2, + tablespaces := ARRAY['abcd']::TEXT[]); /* not ok */ +ERROR: size of 'tablespaces' must be equal to 'partitions_count' +/* test case when naming sequence does not exist */ +CREATE TABLE calamity.no_naming_seq(val INT4 NOT NULL); +SELECT add_to_pathman_config('calamity.no_naming_seq', 'val', '100'); + add_to_pathman_config +----------------------- + t +(1 row) + +select add_range_partition(' calamity.no_naming_seq', 10, 20); +ERROR: auto naming sequence "no_naming_seq_seq" does not exist +DROP TABLE calamity.no_naming_seq CASCADE; +/* test (-inf, +inf) partition creation */ +CREATE TABLE calamity.double_inf(val INT4 NOT NULL); +SELECT add_to_pathman_config('calamity.double_inf', 'val', '10'); + add_to_pathman_config +----------------------- + t +(1 row) + +select add_range_partition('calamity.double_inf', NULL::INT4, NULL::INT4, + partition_name := 'double_inf_part'); +ERROR: cannot create partition with range (-inf, +inf) +DROP TABLE calamity.double_inf CASCADE; +/* test stub 'enable_parent' value for PATHMAN_CONFIG_PARAMS */ +INSERT INTO calamity.part_test SELECT generate_series(1, 30); +SELECT create_range_partitions('calamity.part_test', 'val', 1, 10); + create_range_partitions +------------------------- + 3 +(1 row) + +DELETE FROM pathman_config_params WHERE partrel = 'calamity.part_test'::regclass; +SELECT append_range_partition('calamity.part_test'); + append_range_partition +------------------------ + calamity.part_test_4 +(1 row) + +EXPLAIN (COSTS OFF) SELECT * FROM calamity.part_test; + QUERY PLAN +------------------------------- + Append + -> Seq Scan on part_test_1 + -> Seq Scan on part_test_2 + -> Seq Scan on part_test_3 + -> Seq Scan on part_test_4 +(5 rows) + +SELECT drop_partitions('calamity.part_test', true); + drop_partitions +----------------- + 4 +(1 row) + +DELETE FROM calamity.part_test; +/* check function validate_interval_value() */ +SELECT set_interval('pg_catalog.pg_class', 100); /* not ok */ +ERROR: table "pg_class" is not partitioned by RANGE +INSERT INTO calamity.part_test SELECT generate_series(1, 30); +SELECT create_range_partitions('calamity.part_test', 'val', 1, 10); + create_range_partitions +------------------------- + 3 +(1 row) + +SELECT set_interval('calamity.part_test', 100); /* ok */ + set_interval +-------------- + +(1 row) + +SELECT set_interval('calamity.part_test', 15.6); /* not ok */ +ERROR: invalid input syntax for type integer: "15.6" +SELECT set_interval('calamity.part_test', 'abc'::text); /* not ok */ +ERROR: invalid input syntax for type integer: "abc" +SELECT drop_partitions('calamity.part_test', true); + drop_partitions +----------------- + 3 +(1 row) + +DELETE FROM calamity.part_test; +/* check function build_hash_condition() */ +SELECT build_hash_condition('int4', 'val', 10, 1); + build_hash_condition +------------------------------------------------- + public.get_hash_part_idx(hashint4(val), 10) = 1 +(1 row) + +SELECT build_hash_condition('text', 'val', 10, 1); + build_hash_condition +------------------------------------------------- + public.get_hash_part_idx(hashtext(val), 10) = 1 +(1 row) + +SELECT build_hash_condition('int4', 'val', 1, 1); +ERROR: 'partition_index' must be lower than 'partitions_count' +SELECT build_hash_condition('int4', 'val', 10, 20); +ERROR: 'partition_index' must be lower than 'partitions_count' +SELECT build_hash_condition('text', 'val', 10, NULL) IS NULL; + ?column? +---------- + t +(1 row) + +SELECT build_hash_condition('calamity.part_test', 'val', 10, 1); +ERROR: no hash function for type calamity.part_test +/* check function build_range_condition() */ +SELECT build_range_condition(NULL, 'val', 10, 20); /* not ok */ +ERROR: 'partition_relid' should not be NULL +SELECT build_range_condition('calamity.part_test', NULL, 10, 20); /* not ok */ +ERROR: 'expression' should not be NULL +SELECT build_range_condition('calamity.part_test', 'val', 10, 20); /* OK */ + build_range_condition +------------------------------ + ((val >= 10) AND (val < 20)) +(1 row) + +SELECT build_range_condition('calamity.part_test', 'val', 10, NULL); /* OK */ + build_range_condition +----------------------- + ((val >= 10)) +(1 row) + +SELECT build_range_condition('calamity.part_test', 'val', NULL, 10); /* OK */ + build_range_condition +----------------------- + ((val < 10)) +(1 row) + +/* check function validate_interval_value() */ +SELECT validate_interval_value(1::REGCLASS, 'expr', 2, '1 mon'); /* not ok */ +ERROR: relation "1" does not exist +SELECT validate_interval_value(NULL, 'expr', 2, '1 mon'); /* not ok */ +ERROR: 'partrel' should not be NULL +SELECT validate_interval_value('pg_class', NULL, 2, '1 mon'); /* not ok */ +ERROR: 'expression' should not be NULL +SELECT validate_interval_value('pg_class', 'relname', NULL, '1 mon'); /* not ok */ +ERROR: 'parttype' should not be NULL +SELECT validate_interval_value('pg_class', 'relname', 1, 'HASH'); /* not ok */ +ERROR: interval should be NULL for HASH partitioned table +SELECT validate_interval_value('pg_class', 'expr', 2, '1 mon'); /* not ok */ +ERROR: failed to analyze partitioning expression "expr" +SELECT validate_interval_value('pg_class', 'expr', 2, NULL); /* not ok */ +ERROR: failed to analyze partitioning expression "expr" +SELECT validate_interval_value('pg_class', 'EXPR', 1, 'HASH'); /* not ok */ +ERROR: failed to analyze partitioning expression "EXPR" +/* check function validate_relname() */ +SELECT validate_relname('calamity.part_test'); + validate_relname +------------------ + +(1 row) + +SELECT validate_relname(1::REGCLASS); +ERROR: relation "1" does not exist +SELECT validate_relname(NULL); +ERROR: relation should not be NULL +/* check function validate_expression() */ +SELECT validate_expression(1::regclass, NULL); /* not ok */ +ERROR: identifier "1" must be normal Oid +SELECT validate_expression(NULL::regclass, NULL); /* not ok */ +ERROR: 'relid' should not be NULL +SELECT validate_expression('calamity.part_test', NULL); /* not ok */ +ERROR: 'expression' should not be NULL +SELECT validate_expression('calamity.part_test', 'valval'); /* not ok */ +ERROR: failed to analyze partitioning expression "valval" +SELECT validate_expression('calamity.part_test', 'random()'); /* not ok */ +ERROR: failed to analyze partitioning expression "random()" +SELECT validate_expression('calamity.part_test', 'val'); /* OK */ + validate_expression +--------------------- + +(1 row) + +SELECT validate_expression('calamity.part_test', 'VaL'); /* OK */ + validate_expression +--------------------- + +(1 row) + +/* check function get_number_of_partitions() */ +SELECT get_number_of_partitions('calamity.part_test'); + get_number_of_partitions +-------------------------- + 0 +(1 row) + +SELECT get_number_of_partitions(NULL) IS NULL; + ?column? +---------- + t +(1 row) + +/* check function get_parent_of_partition() */ +SELECT get_parent_of_partition('calamity.part_test'); +ERROR: "part_test" is not a partition +SELECT get_parent_of_partition(NULL) IS NULL; + ?column? +---------- + t +(1 row) + +/* check function get_base_type() */ +CREATE DOMAIN calamity.test_domain AS INT4; +SELECT get_base_type('int4'::regtype); + get_base_type +--------------- + integer +(1 row) + +SELECT get_base_type('calamity.test_domain'::regtype); + get_base_type +--------------- + integer +(1 row) + +SELECT get_base_type(NULL) IS NULL; + ?column? +---------- + t +(1 row) + +/* check function get_partition_key_type() */ +SELECT get_partition_key_type('calamity.part_test'); +ERROR: relation "part_test" has no partitions +SELECT get_partition_key_type(0::regclass); +ERROR: relation "0" has no partitions +SELECT get_partition_key_type(NULL) IS NULL; + ?column? +---------- + t +(1 row) + +/* check function build_check_constraint_name() */ +SELECT build_check_constraint_name('calamity.part_test'); /* OK */ + build_check_constraint_name +----------------------------- + pathman_part_test_check +(1 row) + +SELECT build_check_constraint_name(0::REGCLASS); /* not ok */ +ERROR: relation "0" does not exist +SELECT build_check_constraint_name(NULL) IS NULL; + ?column? +---------- + t +(1 row) + +/* check function build_sequence_name() */ +SELECT build_sequence_name('calamity.part_test'); /* OK */ + build_sequence_name +------------------------ + calamity.part_test_seq +(1 row) + +SELECT build_sequence_name(1::REGCLASS); /* not ok */ +ERROR: relation "1" does not exist +SELECT build_sequence_name(NULL) IS NULL; + ?column? +---------- + t +(1 row) + +/* check function partition_table_concurrently() */ +SELECT partition_table_concurrently(1::REGCLASS); /* not ok */ +ERROR: identifier "1" must be normal Oid +SELECT partition_table_concurrently('pg_class', 0); /* not ok */ +ERROR: 'batch_size' should not be less than 1 or greater than 10000 +SELECT partition_table_concurrently('pg_class', 1, 1E-5); /* not ok */ +ERROR: 'sleep_time' should not be less than 0.5 +SELECT partition_table_concurrently('pg_class'); /* not ok */ +ERROR: identifier "1259" must be normal Oid +/* check function stop_concurrent_part_task() */ +SELECT stop_concurrent_part_task(1::REGCLASS); /* not ok */ +ERROR: cannot find worker for relation "1" +/* check function drop_range_partition_expand_next() */ +SELECT drop_range_partition_expand_next('pg_class'); /* not ok */ +ERROR: identifier "1259" must be normal Oid +SELECT drop_range_partition_expand_next(NULL) IS NULL; + ?column? +---------- + t +(1 row) + +/* check function generate_range_bounds() */ +SELECT generate_range_bounds(NULL, 100, 10) IS NULL; + ?column? +---------- + t +(1 row) + +SELECT generate_range_bounds(0, NULL::INT4, 10) IS NULL; + ?column? +---------- + t +(1 row) + +SELECT generate_range_bounds(0, 100, NULL) IS NULL; + ?column? +---------- + t +(1 row) + +SELECT generate_range_bounds(0, 100, 0); /* not ok */ +ERROR: 'p_count' must be greater than zero +SELECT generate_range_bounds('a'::TEXT, 'test'::TEXT, 10); /* not ok */ +ERROR: cannot find operator +(text, text) +SELECT generate_range_bounds('a'::TEXT, '1 mon'::INTERVAL, 10); /* not ok */ +ERROR: cannot find operator +(text, interval) +SELECT generate_range_bounds(0::NUMERIC, 1::NUMERIC, 10); /* OK */ + generate_range_bounds +-------------------------- + {0,1,2,3,4,5,6,7,8,9,10} +(1 row) + +SELECT generate_range_bounds('1-jan-2017'::DATE, + '1 day'::INTERVAL, + 4); /* OK */ + generate_range_bounds +---------------------------------------------------------- + {01-01-2017,01-02-2017,01-03-2017,01-04-2017,01-05-2017} +(1 row) + +SELECT check_range_available(NULL, NULL::INT4, NULL); /* not ok */ +ERROR: 'parent_relid' should not be NULL +SELECT check_range_available('pg_class', 1, 10); /* OK (not partitioned) */ +WARNING: table "pg_class" is not partitioned + check_range_available +----------------------- + +(1 row) + +/* check invoke_on_partition_created_callback() */ +CREATE FUNCTION calamity.dummy_cb(arg jsonb) RETURNS void AS $$ + begin + raise warning 'arg: %', arg::text; + end +$$ LANGUAGE plpgsql; +/* Invalid args */ +SELECT invoke_on_partition_created_callback(NULL, 'calamity.part_test', 1); +ERROR: 'parent_relid' should not be NULL +SELECT invoke_on_partition_created_callback('calamity.part_test', NULL, 1); +ERROR: 'partition_relid' should not be NULL +SELECT invoke_on_partition_created_callback('calamity.part_test', 'calamity.part_test', 0); + invoke_on_partition_created_callback +-------------------------------------- + +(1 row) + +SELECT invoke_on_partition_created_callback('calamity.part_test', 'calamity.part_test', 1); +ERROR: callback function 1 does not exist +SELECT invoke_on_partition_created_callback('calamity.part_test', 'calamity.part_test', NULL); + invoke_on_partition_created_callback +-------------------------------------- + +(1 row) + +/* HASH */ +SELECT invoke_on_partition_created_callback(0::regclass, 1::regclass, 'calamity.dummy_cb(jsonb)'::regprocedure); +WARNING: arg: {"parent": null, "parttype": "1", "partition": null, "parent_schema": null, "partition_schema": null} + invoke_on_partition_created_callback +-------------------------------------- + +(1 row) + +/* RANGE */ +SELECT invoke_on_partition_created_callback('calamity.part_test'::regclass, 'pg_class'::regclass, 'calamity.dummy_cb(jsonb)'::regprocedure, NULL::int, NULL); +WARNING: arg: {"parent": "part_test", "parttype": "2", "partition": "pg_class", "range_max": null, "range_min": null, "parent_schema": "calamity", "partition_schema": "pg_catalog"} + invoke_on_partition_created_callback +-------------------------------------- + +(1 row) + +SELECT invoke_on_partition_created_callback(0::regclass, 1::regclass, 'calamity.dummy_cb(jsonb)'::regprocedure, NULL::int, NULL); +WARNING: arg: {"parent": null, "parttype": "2", "partition": null, "range_max": null, "range_min": null, "parent_schema": null, "partition_schema": null} + invoke_on_partition_created_callback +-------------------------------------- + +(1 row) + +SELECT invoke_on_partition_created_callback(0::regclass, 1::regclass, 'calamity.dummy_cb(jsonb)'::regprocedure, 1, NULL); +WARNING: arg: {"parent": null, "parttype": "2", "partition": null, "range_max": null, "range_min": "1", "parent_schema": null, "partition_schema": null} + invoke_on_partition_created_callback +-------------------------------------- + +(1 row) + +SELECT invoke_on_partition_created_callback(0::regclass, 1::regclass, 'calamity.dummy_cb(jsonb)'::regprocedure, NULL, 1); +WARNING: arg: {"parent": null, "parttype": "2", "partition": null, "range_max": "1", "range_min": null, "parent_schema": null, "partition_schema": null} + invoke_on_partition_created_callback +-------------------------------------- + +(1 row) + +DROP FUNCTION calamity.dummy_cb(arg jsonb); +/* check function add_to_pathman_config() -- PHASE #1 */ +SELECT add_to_pathman_config(NULL, 'val'); /* no table */ +ERROR: 'parent_relid' should not be NULL +SELECT add_to_pathman_config(0::REGCLASS, 'val'); /* no table (oid) */ +ERROR: identifier "0" must be normal Oid +SELECT add_to_pathman_config('calamity.part_test', NULL); /* no expr */ +ERROR: 'expression' should not be NULL +SELECT add_to_pathman_config('calamity.part_test', 'V_A_L'); /* wrong expr */ +ERROR: failed to analyze partitioning expression "V_A_L" +SELECT add_to_pathman_config('calamity.part_test', 'val'); /* OK */ + add_to_pathman_config +----------------------- + t +(1 row) + +SELECT disable_pathman_for('calamity.part_test'); + disable_pathman_for +--------------------- + +(1 row) + +SELECT add_to_pathman_config('calamity.part_test', 'val', '10'); /* OK */ + add_to_pathman_config +----------------------- + t +(1 row) + +SELECT disable_pathman_for('calamity.part_test'); + disable_pathman_for +--------------------- + +(1 row) + +/* check function add_to_pathman_config() -- PHASE #2 */ +CREATE TABLE calamity.part_ok(val serial); +INSERT INTO calamity.part_ok SELECT generate_series(1, 2); +SELECT create_hash_partitions('calamity.part_ok', 'val', 4); + create_hash_partitions +------------------------ + 4 +(1 row) + +CREATE TABLE calamity.wrong_partition (LIKE calamity.part_test) INHERITS (calamity.part_test); /* wrong partition w\o constraints */ +NOTICE: merging column "val" with inherited definition +SELECT add_to_pathman_config('calamity.part_test', 'val'); +ERROR: constraint "pathman_wrong_partition_check" of partition "wrong_partition" does not exist +EXPLAIN (COSTS OFF) SELECT * FROM calamity.part_ok; /* check that pathman is enabled */ + QUERY PLAN +----------------------------- + Append + -> Seq Scan on part_ok_0 + -> Seq Scan on part_ok_1 + -> Seq Scan on part_ok_2 + -> Seq Scan on part_ok_3 +(5 rows) + +SELECT add_to_pathman_config('calamity.part_test', 'val', '10'); +ERROR: constraint "pathman_wrong_partition_check" of partition "wrong_partition" does not exist +EXPLAIN (COSTS OFF) SELECT * FROM calamity.part_ok; /* check that pathman is enabled */ + QUERY PLAN +----------------------------- + Append + -> Seq Scan on part_ok_0 + -> Seq Scan on part_ok_1 + -> Seq Scan on part_ok_2 + -> Seq Scan on part_ok_3 +(5 rows) + +ALTER TABLE calamity.wrong_partition +ADD CONSTRAINT pathman_wrong_partition_check +CHECK (val = 1 OR val = 2); /* wrong constraint */ +SELECT add_to_pathman_config('calamity.part_test', 'val', '10'); +ERROR: wrong constraint format for RANGE partition "wrong_partition" +EXPLAIN (COSTS OFF) SELECT * FROM calamity.part_ok; /* check that pathman is enabled */ + QUERY PLAN +----------------------------- + Append + -> Seq Scan on part_ok_0 + -> Seq Scan on part_ok_1 + -> Seq Scan on part_ok_2 + -> Seq Scan on part_ok_3 +(5 rows) + +ALTER TABLE calamity.wrong_partition DROP CONSTRAINT pathman_wrong_partition_check; +ALTER TABLE calamity.wrong_partition +ADD CONSTRAINT pathman_wrong_partition_check +CHECK (val >= 10 AND val = 2); /* wrong constraint */ +SELECT add_to_pathman_config('calamity.part_test', 'val', '10'); +ERROR: wrong constraint format for RANGE partition "wrong_partition" +EXPLAIN (COSTS OFF) SELECT * FROM calamity.part_ok; /* check that pathman is enabled */ + QUERY PLAN +----------------------------- + Append + -> Seq Scan on part_ok_0 + -> Seq Scan on part_ok_1 + -> Seq Scan on part_ok_2 + -> Seq Scan on part_ok_3 +(5 rows) + +ALTER TABLE calamity.wrong_partition DROP CONSTRAINT pathman_wrong_partition_check; +/* check GUC variable */ +SHOW pg_pathman.enable; + pg_pathman.enable +------------------- + on +(1 row) + +/* check function create_hash_partitions_internal() (called for the 2nd time) */ +CREATE TABLE calamity.hash_two_times(val serial); +SELECT create_hash_partitions_internal('calamity.hash_two_times', 'val', 2); +ERROR: table "hash_two_times" is not partitioned +SELECT create_hash_partitions('calamity.hash_two_times', 'val', 2); + create_hash_partitions +------------------------ + 2 +(1 row) + +SELECT create_hash_partitions_internal('calamity.hash_two_times', 'val', 2); +ERROR: cannot add new HASH partitions +/* check function disable_pathman_for() */ +CREATE TABLE calamity.to_be_disabled(val INT NOT NULL); +SELECT create_hash_partitions('calamity.to_be_disabled', 'val', 3); /* add row to main config */ + create_hash_partitions +------------------------ + 3 +(1 row) + +SELECT set_enable_parent('calamity.to_be_disabled', true); /* add row to params */ + set_enable_parent +------------------- + +(1 row) + +SELECT disable_pathman_for('calamity.to_be_disabled'); /* should delete both rows */ + disable_pathman_for +--------------------- + +(1 row) + +SELECT count(*) FROM pathman_config WHERE partrel = 'calamity.to_be_disabled'::REGCLASS; + count +------- + 0 +(1 row) + +SELECT count(*) FROM pathman_config_params WHERE partrel = 'calamity.to_be_disabled'::REGCLASS; + count +------- + 0 +(1 row) + +/* check function get_part_range_by_idx() */ +CREATE TABLE calamity.test_range_idx(val INT4 NOT NULL); +SELECT create_range_partitions('calamity.test_range_idx', 'val', 1, 10, 1); + create_range_partitions +------------------------- + 1 +(1 row) + +SELECT get_part_range(NULL, 1, NULL::INT4); /* not ok */ +ERROR: 'parent_relid' should not be NULL +SELECT get_part_range('calamity.test_range_idx', NULL, NULL::INT4); /* not ok */ +ERROR: 'partition_idx' should not be NULL +SELECT get_part_range('calamity.test_range_idx', 0, NULL::INT2); /* not ok */ +ERROR: pg_typeof(dummy) should be integer +SELECT get_part_range('calamity.test_range_idx', -2, NULL::INT4); /* not ok */ +ERROR: negative indices other than -1 (last partition) are not allowed +SELECT get_part_range('calamity.test_range_idx', 4, NULL::INT4); /* not ok */ +ERROR: partition #4 does not exist (total amount is 1) +SELECT get_part_range('calamity.test_range_idx', 0, NULL::INT4); /* OK */ + get_part_range +---------------- + {1,11} +(1 row) + +DROP TABLE calamity.test_range_idx CASCADE; +NOTICE: drop cascades to 2 other objects +/* check function get_part_range_by_oid() */ +CREATE TABLE calamity.test_range_oid(val INT4 NOT NULL); +SELECT create_range_partitions('calamity.test_range_oid', 'val', 1, 10, 1); + create_range_partitions +------------------------- + 1 +(1 row) + +SELECT get_part_range(NULL, NULL::INT4); /* not ok */ +ERROR: 'partition_relid' should not be NULL +SELECT get_part_range('pg_class', NULL::INT4); /* not ok */ +ERROR: relation "pg_class" is not a partition +SELECT get_part_range('calamity.test_range_oid_1', NULL::INT2); /* not ok */ +ERROR: pg_typeof(dummy) should be integer +SELECT get_part_range('calamity.test_range_oid_1', NULL::INT4); /* OK */ + get_part_range +---------------- + {1,11} +(1 row) + +DROP TABLE calamity.test_range_oid CASCADE; +NOTICE: drop cascades to 2 other objects +/* check function merge_range_partitions() */ +SELECT merge_range_partitions('pg_class'); /* not ok */ +ERROR: cannot merge partitions +SELECT merge_range_partitions('pg_class', 'pg_inherits'); /* not ok */ +ERROR: cannot merge partitions +CREATE TABLE calamity.merge_test_a(val INT4 NOT NULL); +CREATE TABLE calamity.merge_test_b(val INT4 NOT NULL); +SELECT create_range_partitions('calamity.merge_test_a', 'val', 1, 10, 2); + create_range_partitions +------------------------- + 2 +(1 row) + +SELECT create_range_partitions('calamity.merge_test_b', 'val', 1, 10, 2); + create_range_partitions +------------------------- + 2 +(1 row) + +SELECT merge_range_partitions('calamity.merge_test_a_1', + 'calamity.merge_test_b_1'); /* not ok */ +ERROR: cannot merge partitions +DROP TABLE calamity.merge_test_a,calamity.merge_test_b CASCADE; +NOTICE: drop cascades to 6 other objects +DROP DOMAIN calamity.test_domain; +DROP TABLE calamity.part_test CASCADE; +NOTICE: drop cascades to table calamity.wrong_partition +DROP TABLE calamity.part_ok CASCADE; +NOTICE: drop cascades to 4 other objects +DROP TABLE calamity.hash_two_times CASCADE; +NOTICE: drop cascades to 2 other objects +DROP TABLE calamity.to_be_disabled CASCADE; +NOTICE: drop cascades to 3 other objects +DROP SCHEMA calamity; +DROP EXTENSION pg_pathman; +/* + * ------------------------------- + * Special tests (SET statement) + * ------------------------------- + */ +CREATE EXTENSION pg_pathman; +SET pg_pathman.enable = false; +NOTICE: RuntimeAppend, RuntimeMergeAppend and PartitionFilter nodes and some other options have been disabled +SET pg_pathman.enable = true; +NOTICE: RuntimeAppend, RuntimeMergeAppend and PartitionFilter nodes and some other options have been enabled +SET pg_pathman.enable = false; +NOTICE: RuntimeAppend, RuntimeMergeAppend and PartitionFilter nodes and some other options have been disabled +RESET pg_pathman.enable; +NOTICE: RuntimeAppend, RuntimeMergeAppend and PartitionFilter nodes and some other options have been enabled +RESET ALL; +BEGIN; ROLLBACK; +BEGIN ISOLATION LEVEL SERIALIZABLE; ROLLBACK; +BEGIN; SET TRANSACTION ISOLATION LEVEL SERIALIZABLE; ROLLBACK; +DROP EXTENSION pg_pathman; +/* + * ------------------------------------- + * Special tests (pathman_cache_stats) + * ------------------------------------- + */ +CREATE SCHEMA calamity; +CREATE EXTENSION pg_pathman; +/* check that cache loading is lazy */ +CREATE TABLE calamity.test_pathman_cache_stats(val NUMERIC NOT NULL); +SELECT create_range_partitions('calamity.test_pathman_cache_stats', 'val', 1, 10, 10); + create_range_partitions +------------------------- + 10 +(1 row) + +SELECT context, entries FROM pathman_cache_stats + WHERE context != 'partition status cache' ORDER BY context; /* OK */ + context | entries +-------------------------+--------- + maintenance | 0 + partition bounds cache | 0 + partition parents cache | 0 +(3 rows) + +DROP TABLE calamity.test_pathman_cache_stats CASCADE; +NOTICE: drop cascades to 11 other objects +SELECT context, entries FROM pathman_cache_stats + WHERE context != 'partition status cache' ORDER BY context; /* OK */ + context | entries +-------------------------+--------- + maintenance | 0 + partition bounds cache | 0 + partition parents cache | 0 +(3 rows) + +/* Change this setting for code coverage */ +SET pg_pathman.enable_bounds_cache = false; +/* check view pathman_cache_stats (bounds cache disabled) */ +CREATE TABLE calamity.test_pathman_cache_stats(val NUMERIC NOT NULL); +SELECT create_range_partitions('calamity.test_pathman_cache_stats', 'val', 1, 10, 10); + create_range_partitions +------------------------- + 10 +(1 row) + +EXPLAIN (COSTS OFF) SELECT * FROM calamity.test_pathman_cache_stats; + QUERY PLAN +----------------------------------------------- + Append + -> Seq Scan on test_pathman_cache_stats_1 + -> Seq Scan on test_pathman_cache_stats_2 + -> Seq Scan on test_pathman_cache_stats_3 + -> Seq Scan on test_pathman_cache_stats_4 + -> Seq Scan on test_pathman_cache_stats_5 + -> Seq Scan on test_pathman_cache_stats_6 + -> Seq Scan on test_pathman_cache_stats_7 + -> Seq Scan on test_pathman_cache_stats_8 + -> Seq Scan on test_pathman_cache_stats_9 + -> Seq Scan on test_pathman_cache_stats_10 +(11 rows) + +SELECT context, entries FROM pathman_cache_stats + WHERE context != 'partition status cache' ORDER BY context; /* OK */ + context | entries +-------------------------+--------- + maintenance | 0 + partition bounds cache | 0 + partition parents cache | 10 +(3 rows) + +DROP TABLE calamity.test_pathman_cache_stats CASCADE; +NOTICE: drop cascades to 11 other objects +SELECT context, entries FROM pathman_cache_stats + WHERE context != 'partition status cache' ORDER BY context; /* OK */ + context | entries +-------------------------+--------- + maintenance | 0 + partition bounds cache | 0 + partition parents cache | 0 +(3 rows) + +/* Restore this GUC */ +SET pg_pathman.enable_bounds_cache = true; +/* check view pathman_cache_stats (bounds cache enabled) */ +CREATE TABLE calamity.test_pathman_cache_stats(val NUMERIC NOT NULL); +SELECT create_range_partitions('calamity.test_pathman_cache_stats', 'val', 1, 10, 10); + create_range_partitions +------------------------- + 10 +(1 row) + +EXPLAIN (COSTS OFF) SELECT * FROM calamity.test_pathman_cache_stats; + QUERY PLAN +----------------------------------------------- + Append + -> Seq Scan on test_pathman_cache_stats_1 + -> Seq Scan on test_pathman_cache_stats_2 + -> Seq Scan on test_pathman_cache_stats_3 + -> Seq Scan on test_pathman_cache_stats_4 + -> Seq Scan on test_pathman_cache_stats_5 + -> Seq Scan on test_pathman_cache_stats_6 + -> Seq Scan on test_pathman_cache_stats_7 + -> Seq Scan on test_pathman_cache_stats_8 + -> Seq Scan on test_pathman_cache_stats_9 + -> Seq Scan on test_pathman_cache_stats_10 +(11 rows) + +SELECT context, entries FROM pathman_cache_stats + WHERE context != 'partition status cache' ORDER BY context; /* OK */ + context | entries +-------------------------+--------- + maintenance | 0 + partition bounds cache | 10 + partition parents cache | 10 +(3 rows) + +DROP TABLE calamity.test_pathman_cache_stats CASCADE; +NOTICE: drop cascades to 11 other objects +SELECT context, entries FROM pathman_cache_stats + WHERE context != 'partition status cache' ORDER BY context; /* OK */ + context | entries +-------------------------+--------- + maintenance | 0 + partition bounds cache | 0 + partition parents cache | 0 +(3 rows) + +/* check that parents cache has been flushed after partition was dropped */ +CREATE TABLE calamity.test_pathman_cache_stats(val NUMERIC NOT NULL); +SELECT create_range_partitions('calamity.test_pathman_cache_stats', 'val', 1, 10, 10); + create_range_partitions +------------------------- + 10 +(1 row) + +EXPLAIN (COSTS OFF) SELECT * FROM calamity.test_pathman_cache_stats; + QUERY PLAN +----------------------------------------------- + Append + -> Seq Scan on test_pathman_cache_stats_1 + -> Seq Scan on test_pathman_cache_stats_2 + -> Seq Scan on test_pathman_cache_stats_3 + -> Seq Scan on test_pathman_cache_stats_4 + -> Seq Scan on test_pathman_cache_stats_5 + -> Seq Scan on test_pathman_cache_stats_6 + -> Seq Scan on test_pathman_cache_stats_7 + -> Seq Scan on test_pathman_cache_stats_8 + -> Seq Scan on test_pathman_cache_stats_9 + -> Seq Scan on test_pathman_cache_stats_10 +(11 rows) + +SELECT context, entries FROM pathman_cache_stats + WHERE context != 'partition status cache' ORDER BY context; /* OK */ + context | entries +-------------------------+--------- + maintenance | 0 + partition bounds cache | 10 + partition parents cache | 10 +(3 rows) + +SELECT drop_range_partition('calamity.test_pathman_cache_stats_1'); + drop_range_partition +------------------------------------- + calamity.test_pathman_cache_stats_1 +(1 row) + +SELECT context, entries FROM pathman_cache_stats + WHERE context != 'partition status cache' ORDER BY context; /* OK */ + context | entries +-------------------------+--------- + maintenance | 0 + partition bounds cache | 9 + partition parents cache | 9 +(3 rows) + +DROP TABLE calamity.test_pathman_cache_stats CASCADE; +NOTICE: drop cascades to 10 other objects +SELECT context, entries FROM pathman_cache_stats + WHERE context != 'partition status cache' ORDER BY context; /* OK */ + context | entries +-------------------------+--------- + maintenance | 0 + partition bounds cache | 0 + partition parents cache | 0 +(3 rows) + +DROP SCHEMA calamity; +DROP EXTENSION pg_pathman; +/* + * ------------------------------------------ + * Special tests (uninitialized pg_pathman) + * ------------------------------------------ + */ +CREATE SCHEMA calamity; +CREATE EXTENSION pg_pathman; +/* check function pathman_cache_search_relid() */ +CREATE TABLE calamity.survivor(val INT NOT NULL); +SELECT create_range_partitions('calamity.survivor', 'val', 1, 10, 2); + create_range_partitions +------------------------- + 2 +(1 row) + +DROP EXTENSION pg_pathman CASCADE; +SET pg_pathman.enable = f; /* DON'T LOAD CONFIG */ +NOTICE: RuntimeAppend, RuntimeMergeAppend and PartitionFilter nodes and some other options have been disabled +CREATE EXTENSION pg_pathman; +SHOW pg_pathman.enable; + pg_pathman.enable +------------------- + off +(1 row) + +SELECT add_to_pathman_config('calamity.survivor', 'val', '10'); /* not ok */ +ERROR: pg_pathman is disabled +SELECT * FROM pathman_partition_list; /* not ok */ +ERROR: pg_pathman is not initialized yet +SELECT get_part_range('calamity.survivor', 0, NULL::INT); /* not ok */ +ERROR: pg_pathman is disabled +EXPLAIN (COSTS OFF) SELECT * FROM calamity.survivor; /* OK */ + QUERY PLAN +------------------------------ + Append + -> Seq Scan on survivor + -> Seq Scan on survivor_1 + -> Seq Scan on survivor_2 +(4 rows) + +SET pg_pathman.enable = t; /* LOAD CONFIG */ +NOTICE: RuntimeAppend, RuntimeMergeAppend and PartitionFilter nodes and some other options have been enabled +SELECT add_to_pathman_config('calamity.survivor', 'val', '10'); /* OK */ + add_to_pathman_config +----------------------- + t +(1 row) + +SELECT * FROM pathman_partition_list; /* OK */ + parent | partition | parttype | expr | range_min | range_max +-------------------+---------------------+----------+------+-----------+----------- + calamity.survivor | calamity.survivor_1 | 2 | val | 1 | 11 + calamity.survivor | calamity.survivor_2 | 2 | val | 11 | 21 +(2 rows) + +SELECT get_part_range('calamity.survivor', 0, NULL::INT); /* OK */ + get_part_range +---------------- + {1,11} +(1 row) + +EXPLAIN (COSTS OFF) SELECT * FROM calamity.survivor; /* OK */ + QUERY PLAN +------------------------------ + Append + -> Seq Scan on survivor_1 + -> Seq Scan on survivor_2 +(3 rows) + +DROP TABLE calamity.survivor CASCADE; +NOTICE: drop cascades to 3 other objects +DROP SCHEMA calamity; +DROP EXTENSION pg_pathman; diff --git a/expected/pathman_calamity_2.out b/expected/pathman_calamity_2.out new file mode 100644 index 00000000..fa3295f6 --- /dev/null +++ b/expected/pathman_calamity_2.out @@ -0,0 +1,1072 @@ +/* + * pathman_calamity.out and pathman_calamity_1.out differ only in that since + * 12 we get + * ERROR: invalid input syntax for type integer: "abc" + * instead of + * ERROR: invalid input syntax for integer: "15.6" + * + * Since 55a1954da16 and 6ef77cf46e8 (>= 13) output of EXPLAIN was changed, + * now it includes aliases for inherited tables. + */ +\set VERBOSITY terse +SET search_path = 'public'; +CREATE EXTENSION pg_pathman; +CREATE SCHEMA calamity; +/* call for coverage test */ +set client_min_messages = ERROR; +SELECT debug_capture(); + debug_capture +--------------- + +(1 row) + +SELECT pathman_version(); + pathman_version +----------------- + 1.5.12 +(1 row) + +set client_min_messages = NOTICE; +/* create table to be partitioned */ +CREATE TABLE calamity.part_test(val serial); +/* test pg_pathman's cache */ +INSERT INTO calamity.part_test SELECT generate_series(1, 30); +SELECT create_range_partitions('calamity.part_test', 'val', 1, 10); + create_range_partitions +------------------------- + 3 +(1 row) + +SELECT drop_partitions('calamity.part_test'); +NOTICE: 10 rows copied from calamity.part_test_1 +NOTICE: 10 rows copied from calamity.part_test_2 +NOTICE: 10 rows copied from calamity.part_test_3 + drop_partitions +----------------- + 3 +(1 row) + +SELECT create_range_partitions('calamity.part_test', 'val', 1, 10); + create_range_partitions +------------------------- + 3 +(1 row) + +SELECT drop_partitions('calamity.part_test'); +NOTICE: 10 rows copied from calamity.part_test_1 +NOTICE: 10 rows copied from calamity.part_test_2 +NOTICE: 10 rows copied from calamity.part_test_3 + drop_partitions +----------------- + 3 +(1 row) + +SELECT create_range_partitions('calamity.part_test', 'val', 1, 10); + create_range_partitions +------------------------- + 3 +(1 row) + +SELECT append_range_partition('calamity.part_test'); + append_range_partition +------------------------ + calamity.part_test_4 +(1 row) + +SELECT drop_partitions('calamity.part_test'); +NOTICE: 10 rows copied from calamity.part_test_1 +NOTICE: 10 rows copied from calamity.part_test_2 +NOTICE: 10 rows copied from calamity.part_test_3 +NOTICE: 0 rows copied from calamity.part_test_4 + drop_partitions +----------------- + 4 +(1 row) + +SELECT create_range_partitions('calamity.part_test', 'val', 1, 10); + create_range_partitions +------------------------- + 3 +(1 row) + +SELECT append_range_partition('calamity.part_test'); + append_range_partition +------------------------ + calamity.part_test_4 +(1 row) + +SELECT drop_partitions('calamity.part_test'); +NOTICE: 10 rows copied from calamity.part_test_1 +NOTICE: 10 rows copied from calamity.part_test_2 +NOTICE: 10 rows copied from calamity.part_test_3 +NOTICE: 0 rows copied from calamity.part_test_4 + drop_partitions +----------------- + 4 +(1 row) + +SELECT count(*) FROM calamity.part_test; + count +------- + 30 +(1 row) + +DELETE FROM calamity.part_test; +/* test function create_single_range_partition() */ +SELECT create_single_range_partition(NULL, NULL::INT4, NULL); /* not ok */ +ERROR: 'parent_relid' should not be NULL +SELECT create_single_range_partition('pg_class', NULL::INT4, NULL); /* not ok */ +ERROR: table "pg_class" is not partitioned by RANGE +SELECT add_to_pathman_config('calamity.part_test', 'val'); + add_to_pathman_config +----------------------- + t +(1 row) + +SELECT create_single_range_partition('calamity.part_test', NULL::INT4, NULL); /* not ok */ +ERROR: table "part_test" is not partitioned by RANGE +DELETE FROM pathman_config WHERE partrel = 'calamity.part_test'::REGCLASS; +/* test function create_range_partitions_internal() */ +SELECT create_range_partitions_internal(NULL, '{}'::INT[], NULL, NULL); /* not ok */ +ERROR: 'parent_relid' should not be NULL +SELECT create_range_partitions_internal('calamity.part_test', + NULL::INT[], NULL, NULL); /* not ok */ +ERROR: 'bounds' should not be NULL +SELECT create_range_partitions_internal('calamity.part_test', '{1}'::INT[], + '{part_1}'::TEXT[], NULL); /* not ok */ +ERROR: wrong length of 'partition_names' array +SELECT create_range_partitions_internal('calamity.part_test', '{1}'::INT[], + NULL, '{tblspc_1}'::TEXT[]); /* not ok */ +ERROR: wrong length of 'tablespaces' array +SELECT create_range_partitions_internal('calamity.part_test', + '{1, NULL}'::INT[], NULL, NULL); /* not ok */ +ERROR: only first bound can be NULL +SELECT create_range_partitions_internal('calamity.part_test', + '{2, 1}'::INT[], NULL, NULL); /* not ok */ +ERROR: 'bounds' array must be ascending +/* test function create_hash_partitions() */ +SELECT create_hash_partitions('calamity.part_test', 'val', 2, + partition_names := ARRAY[]::TEXT[]); /* not ok */ +ERROR: array should not be empty +SELECT create_hash_partitions('calamity.part_test', 'val', 2, + partition_names := ARRAY[ 'p1', NULL ]::TEXT[]); /* not ok */ +ERROR: array should not contain NULLs +SELECT create_hash_partitions('calamity.part_test', 'val', 2, + partition_names := ARRAY[ ['p1'], ['p2'] ]::TEXT[]); /* not ok */ +ERROR: array should contain only 1 dimension +SELECT create_hash_partitions('calamity.part_test', 'val', 2, + partition_names := ARRAY['calamity.p1']::TEXT[]); /* not ok */ +ERROR: size of 'partition_names' must be equal to 'partitions_count' +SELECT create_hash_partitions('calamity.part_test', 'val', 2, + tablespaces := ARRAY['abcd']::TEXT[]); /* not ok */ +ERROR: size of 'tablespaces' must be equal to 'partitions_count' +/* test case when naming sequence does not exist */ +CREATE TABLE calamity.no_naming_seq(val INT4 NOT NULL); +SELECT add_to_pathman_config('calamity.no_naming_seq', 'val', '100'); + add_to_pathman_config +----------------------- + t +(1 row) + +select add_range_partition(' calamity.no_naming_seq', 10, 20); +ERROR: auto naming sequence "no_naming_seq_seq" does not exist +DROP TABLE calamity.no_naming_seq CASCADE; +/* test (-inf, +inf) partition creation */ +CREATE TABLE calamity.double_inf(val INT4 NOT NULL); +SELECT add_to_pathman_config('calamity.double_inf', 'val', '10'); + add_to_pathman_config +----------------------- + t +(1 row) + +select add_range_partition('calamity.double_inf', NULL::INT4, NULL::INT4, + partition_name := 'double_inf_part'); +ERROR: cannot create partition with range (-inf, +inf) +DROP TABLE calamity.double_inf CASCADE; +/* test stub 'enable_parent' value for PATHMAN_CONFIG_PARAMS */ +INSERT INTO calamity.part_test SELECT generate_series(1, 30); +SELECT create_range_partitions('calamity.part_test', 'val', 1, 10); + create_range_partitions +------------------------- + 3 +(1 row) + +DELETE FROM pathman_config_params WHERE partrel = 'calamity.part_test'::regclass; +SELECT append_range_partition('calamity.part_test'); + append_range_partition +------------------------ + calamity.part_test_4 +(1 row) + +EXPLAIN (COSTS OFF) SELECT * FROM calamity.part_test; + QUERY PLAN +------------------------------- + Append + -> Seq Scan on part_test_1 + -> Seq Scan on part_test_2 + -> Seq Scan on part_test_3 + -> Seq Scan on part_test_4 +(5 rows) + +SELECT drop_partitions('calamity.part_test', true); + drop_partitions +----------------- + 4 +(1 row) + +DELETE FROM calamity.part_test; +/* check function validate_interval_value() */ +SELECT set_interval('pg_catalog.pg_class', 100); /* not ok */ +ERROR: table "pg_class" is not partitioned by RANGE +INSERT INTO calamity.part_test SELECT generate_series(1, 30); +SELECT create_range_partitions('calamity.part_test', 'val', 1, 10); + create_range_partitions +------------------------- + 3 +(1 row) + +SELECT set_interval('calamity.part_test', 100); /* ok */ + set_interval +-------------- + +(1 row) + +SELECT set_interval('calamity.part_test', 15.6); /* not ok */ +ERROR: invalid input syntax for type integer: "15.6" +SELECT set_interval('calamity.part_test', 'abc'::text); /* not ok */ +ERROR: invalid input syntax for type integer: "abc" +SELECT drop_partitions('calamity.part_test', true); + drop_partitions +----------------- + 3 +(1 row) + +DELETE FROM calamity.part_test; +/* check function build_hash_condition() */ +SELECT build_hash_condition('int4', 'val', 10, 1); + build_hash_condition +------------------------------------------------- + public.get_hash_part_idx(hashint4(val), 10) = 1 +(1 row) + +SELECT build_hash_condition('text', 'val', 10, 1); + build_hash_condition +------------------------------------------------- + public.get_hash_part_idx(hashtext(val), 10) = 1 +(1 row) + +SELECT build_hash_condition('int4', 'val', 1, 1); +ERROR: 'partition_index' must be lower than 'partitions_count' +SELECT build_hash_condition('int4', 'val', 10, 20); +ERROR: 'partition_index' must be lower than 'partitions_count' +SELECT build_hash_condition('text', 'val', 10, NULL) IS NULL; + ?column? +---------- + t +(1 row) + +SELECT build_hash_condition('calamity.part_test', 'val', 10, 1); +ERROR: no hash function for type calamity.part_test +/* check function build_range_condition() */ +SELECT build_range_condition(NULL, 'val', 10, 20); /* not ok */ +ERROR: 'partition_relid' should not be NULL +SELECT build_range_condition('calamity.part_test', NULL, 10, 20); /* not ok */ +ERROR: 'expression' should not be NULL +SELECT build_range_condition('calamity.part_test', 'val', 10, 20); /* OK */ + build_range_condition +------------------------------ + ((val >= 10) AND (val < 20)) +(1 row) + +SELECT build_range_condition('calamity.part_test', 'val', 10, NULL); /* OK */ + build_range_condition +----------------------- + ((val >= 10)) +(1 row) + +SELECT build_range_condition('calamity.part_test', 'val', NULL, 10); /* OK */ + build_range_condition +----------------------- + ((val < 10)) +(1 row) + +/* check function validate_interval_value() */ +SELECT validate_interval_value(1::REGCLASS, 'expr', 2, '1 mon'); /* not ok */ +ERROR: relation "1" does not exist +SELECT validate_interval_value(NULL, 'expr', 2, '1 mon'); /* not ok */ +ERROR: 'partrel' should not be NULL +SELECT validate_interval_value('pg_class', NULL, 2, '1 mon'); /* not ok */ +ERROR: 'expression' should not be NULL +SELECT validate_interval_value('pg_class', 'relname', NULL, '1 mon'); /* not ok */ +ERROR: 'parttype' should not be NULL +SELECT validate_interval_value('pg_class', 'relname', 1, 'HASH'); /* not ok */ +ERROR: interval should be NULL for HASH partitioned table +SELECT validate_interval_value('pg_class', 'expr', 2, '1 mon'); /* not ok */ +ERROR: failed to analyze partitioning expression "expr" +SELECT validate_interval_value('pg_class', 'expr', 2, NULL); /* not ok */ +ERROR: failed to analyze partitioning expression "expr" +SELECT validate_interval_value('pg_class', 'EXPR', 1, 'HASH'); /* not ok */ +ERROR: failed to analyze partitioning expression "EXPR" +/* check function validate_relname() */ +SELECT validate_relname('calamity.part_test'); + validate_relname +------------------ + +(1 row) + +SELECT validate_relname(1::REGCLASS); +ERROR: relation "1" does not exist +SELECT validate_relname(NULL); +ERROR: relation should not be NULL +/* check function validate_expression() */ +SELECT validate_expression(1::regclass, NULL); /* not ok */ +ERROR: identifier "1" must be normal Oid +SELECT validate_expression(NULL::regclass, NULL); /* not ok */ +ERROR: 'relid' should not be NULL +SELECT validate_expression('calamity.part_test', NULL); /* not ok */ +ERROR: 'expression' should not be NULL +SELECT validate_expression('calamity.part_test', 'valval'); /* not ok */ +ERROR: failed to analyze partitioning expression "valval" +SELECT validate_expression('calamity.part_test', 'random()'); /* not ok */ +ERROR: failed to analyze partitioning expression "random()" +SELECT validate_expression('calamity.part_test', 'val'); /* OK */ + validate_expression +--------------------- + +(1 row) + +SELECT validate_expression('calamity.part_test', 'VaL'); /* OK */ + validate_expression +--------------------- + +(1 row) + +/* check function get_number_of_partitions() */ +SELECT get_number_of_partitions('calamity.part_test'); + get_number_of_partitions +-------------------------- + 0 +(1 row) + +SELECT get_number_of_partitions(NULL) IS NULL; + ?column? +---------- + t +(1 row) + +/* check function get_parent_of_partition() */ +SELECT get_parent_of_partition('calamity.part_test'); +ERROR: "part_test" is not a partition +SELECT get_parent_of_partition(NULL) IS NULL; + ?column? +---------- + t +(1 row) + +/* check function get_base_type() */ +CREATE DOMAIN calamity.test_domain AS INT4; +SELECT get_base_type('int4'::regtype); + get_base_type +--------------- + integer +(1 row) + +SELECT get_base_type('calamity.test_domain'::regtype); + get_base_type +--------------- + integer +(1 row) + +SELECT get_base_type(NULL) IS NULL; + ?column? +---------- + t +(1 row) + +/* check function get_partition_key_type() */ +SELECT get_partition_key_type('calamity.part_test'); +ERROR: relation "part_test" has no partitions +SELECT get_partition_key_type(0::regclass); +ERROR: relation "0" has no partitions +SELECT get_partition_key_type(NULL) IS NULL; + ?column? +---------- + t +(1 row) + +/* check function build_check_constraint_name() */ +SELECT build_check_constraint_name('calamity.part_test'); /* OK */ + build_check_constraint_name +----------------------------- + pathman_part_test_check +(1 row) + +SELECT build_check_constraint_name(0::REGCLASS); /* not ok */ +ERROR: relation "0" does not exist +SELECT build_check_constraint_name(NULL) IS NULL; + ?column? +---------- + t +(1 row) + +/* check function build_sequence_name() */ +SELECT build_sequence_name('calamity.part_test'); /* OK */ + build_sequence_name +------------------------ + calamity.part_test_seq +(1 row) + +SELECT build_sequence_name(1::REGCLASS); /* not ok */ +ERROR: relation "1" does not exist +SELECT build_sequence_name(NULL) IS NULL; + ?column? +---------- + t +(1 row) + +/* check function partition_table_concurrently() */ +SELECT partition_table_concurrently(1::REGCLASS); /* not ok */ +ERROR: identifier "1" must be normal Oid +SELECT partition_table_concurrently('pg_class', 0); /* not ok */ +ERROR: 'batch_size' should not be less than 1 or greater than 10000 +SELECT partition_table_concurrently('pg_class', 1, 1E-5); /* not ok */ +ERROR: 'sleep_time' should not be less than 0.5 +SELECT partition_table_concurrently('pg_class'); /* not ok */ +ERROR: identifier "1259" must be normal Oid +/* check function stop_concurrent_part_task() */ +SELECT stop_concurrent_part_task(1::REGCLASS); /* not ok */ +ERROR: cannot find worker for relation "1" +/* check function drop_range_partition_expand_next() */ +SELECT drop_range_partition_expand_next('pg_class'); /* not ok */ +ERROR: identifier "1259" must be normal Oid +SELECT drop_range_partition_expand_next(NULL) IS NULL; + ?column? +---------- + t +(1 row) + +/* check function generate_range_bounds() */ +SELECT generate_range_bounds(NULL, 100, 10) IS NULL; + ?column? +---------- + t +(1 row) + +SELECT generate_range_bounds(0, NULL::INT4, 10) IS NULL; + ?column? +---------- + t +(1 row) + +SELECT generate_range_bounds(0, 100, NULL) IS NULL; + ?column? +---------- + t +(1 row) + +SELECT generate_range_bounds(0, 100, 0); /* not ok */ +ERROR: 'p_count' must be greater than zero +SELECT generate_range_bounds('a'::TEXT, 'test'::TEXT, 10); /* not ok */ +ERROR: cannot find operator +(text, text) +SELECT generate_range_bounds('a'::TEXT, '1 mon'::INTERVAL, 10); /* not ok */ +ERROR: cannot find operator +(text, interval) +SELECT generate_range_bounds(0::NUMERIC, 1::NUMERIC, 10); /* OK */ + generate_range_bounds +-------------------------- + {0,1,2,3,4,5,6,7,8,9,10} +(1 row) + +SELECT generate_range_bounds('1-jan-2017'::DATE, + '1 day'::INTERVAL, + 4); /* OK */ + generate_range_bounds +---------------------------------------------------------- + {01-01-2017,01-02-2017,01-03-2017,01-04-2017,01-05-2017} +(1 row) + +SELECT check_range_available(NULL, NULL::INT4, NULL); /* not ok */ +ERROR: 'parent_relid' should not be NULL +SELECT check_range_available('pg_class', 1, 10); /* OK (not partitioned) */ +WARNING: table "pg_class" is not partitioned + check_range_available +----------------------- + +(1 row) + +/* check invoke_on_partition_created_callback() */ +CREATE FUNCTION calamity.dummy_cb(arg jsonb) RETURNS void AS $$ + begin + raise warning 'arg: %', arg::text; + end +$$ LANGUAGE plpgsql; +/* Invalid args */ +SELECT invoke_on_partition_created_callback(NULL, 'calamity.part_test', 1); +ERROR: 'parent_relid' should not be NULL +SELECT invoke_on_partition_created_callback('calamity.part_test', NULL, 1); +ERROR: 'partition_relid' should not be NULL +SELECT invoke_on_partition_created_callback('calamity.part_test', 'calamity.part_test', 0); + invoke_on_partition_created_callback +-------------------------------------- + +(1 row) + +SELECT invoke_on_partition_created_callback('calamity.part_test', 'calamity.part_test', 1); +ERROR: callback function 1 does not exist +SELECT invoke_on_partition_created_callback('calamity.part_test', 'calamity.part_test', NULL); + invoke_on_partition_created_callback +-------------------------------------- + +(1 row) + +/* HASH */ +SELECT invoke_on_partition_created_callback(0::regclass, 1::regclass, 'calamity.dummy_cb(jsonb)'::regprocedure); +WARNING: arg: {"parent": null, "parttype": "1", "partition": null, "parent_schema": null, "partition_schema": null} + invoke_on_partition_created_callback +-------------------------------------- + +(1 row) + +/* RANGE */ +SELECT invoke_on_partition_created_callback('calamity.part_test'::regclass, 'pg_class'::regclass, 'calamity.dummy_cb(jsonb)'::regprocedure, NULL::int, NULL); +WARNING: arg: {"parent": "part_test", "parttype": "2", "partition": "pg_class", "range_max": null, "range_min": null, "parent_schema": "calamity", "partition_schema": "pg_catalog"} + invoke_on_partition_created_callback +-------------------------------------- + +(1 row) + +SELECT invoke_on_partition_created_callback(0::regclass, 1::regclass, 'calamity.dummy_cb(jsonb)'::regprocedure, NULL::int, NULL); +WARNING: arg: {"parent": null, "parttype": "2", "partition": null, "range_max": null, "range_min": null, "parent_schema": null, "partition_schema": null} + invoke_on_partition_created_callback +-------------------------------------- + +(1 row) + +SELECT invoke_on_partition_created_callback(0::regclass, 1::regclass, 'calamity.dummy_cb(jsonb)'::regprocedure, 1, NULL); +WARNING: arg: {"parent": null, "parttype": "2", "partition": null, "range_max": null, "range_min": "1", "parent_schema": null, "partition_schema": null} + invoke_on_partition_created_callback +-------------------------------------- + +(1 row) + +SELECT invoke_on_partition_created_callback(0::regclass, 1::regclass, 'calamity.dummy_cb(jsonb)'::regprocedure, NULL, 1); +WARNING: arg: {"parent": null, "parttype": "2", "partition": null, "range_max": "1", "range_min": null, "parent_schema": null, "partition_schema": null} + invoke_on_partition_created_callback +-------------------------------------- + +(1 row) + +DROP FUNCTION calamity.dummy_cb(arg jsonb); +/* check function add_to_pathman_config() -- PHASE #1 */ +SELECT add_to_pathman_config(NULL, 'val'); /* no table */ +ERROR: 'parent_relid' should not be NULL +SELECT add_to_pathman_config(0::REGCLASS, 'val'); /* no table (oid) */ +ERROR: identifier "0" must be normal Oid +SELECT add_to_pathman_config('calamity.part_test', NULL); /* no expr */ +ERROR: 'expression' should not be NULL +SELECT add_to_pathman_config('calamity.part_test', 'V_A_L'); /* wrong expr */ +ERROR: failed to analyze partitioning expression "V_A_L" +SELECT add_to_pathman_config('calamity.part_test', 'val'); /* OK */ + add_to_pathman_config +----------------------- + t +(1 row) + +SELECT disable_pathman_for('calamity.part_test'); + disable_pathman_for +--------------------- + +(1 row) + +SELECT add_to_pathman_config('calamity.part_test', 'val', '10'); /* OK */ + add_to_pathman_config +----------------------- + t +(1 row) + +SELECT disable_pathman_for('calamity.part_test'); + disable_pathman_for +--------------------- + +(1 row) + +/* check function add_to_pathman_config() -- PHASE #2 */ +CREATE TABLE calamity.part_ok(val serial); +INSERT INTO calamity.part_ok SELECT generate_series(1, 2); +SELECT create_hash_partitions('calamity.part_ok', 'val', 4); + create_hash_partitions +------------------------ + 4 +(1 row) + +CREATE TABLE calamity.wrong_partition (LIKE calamity.part_test) INHERITS (calamity.part_test); /* wrong partition w\o constraints */ +NOTICE: merging column "val" with inherited definition +SELECT add_to_pathman_config('calamity.part_test', 'val'); +ERROR: constraint "pathman_wrong_partition_check" of partition "wrong_partition" does not exist +EXPLAIN (COSTS OFF) SELECT * FROM calamity.part_ok; /* check that pathman is enabled */ + QUERY PLAN +--------------------------------------- + Append + -> Seq Scan on part_ok_0 part_ok_1 + -> Seq Scan on part_ok_1 part_ok_2 + -> Seq Scan on part_ok_2 part_ok_3 + -> Seq Scan on part_ok_3 part_ok_4 +(5 rows) + +SELECT add_to_pathman_config('calamity.part_test', 'val', '10'); +ERROR: constraint "pathman_wrong_partition_check" of partition "wrong_partition" does not exist +EXPLAIN (COSTS OFF) SELECT * FROM calamity.part_ok; /* check that pathman is enabled */ + QUERY PLAN +--------------------------------------- + Append + -> Seq Scan on part_ok_0 part_ok_1 + -> Seq Scan on part_ok_1 part_ok_2 + -> Seq Scan on part_ok_2 part_ok_3 + -> Seq Scan on part_ok_3 part_ok_4 +(5 rows) + +ALTER TABLE calamity.wrong_partition +ADD CONSTRAINT pathman_wrong_partition_check +CHECK (val = 1 OR val = 2); /* wrong constraint */ +SELECT add_to_pathman_config('calamity.part_test', 'val', '10'); +ERROR: wrong constraint format for RANGE partition "wrong_partition" +EXPLAIN (COSTS OFF) SELECT * FROM calamity.part_ok; /* check that pathman is enabled */ + QUERY PLAN +--------------------------------------- + Append + -> Seq Scan on part_ok_0 part_ok_1 + -> Seq Scan on part_ok_1 part_ok_2 + -> Seq Scan on part_ok_2 part_ok_3 + -> Seq Scan on part_ok_3 part_ok_4 +(5 rows) + +ALTER TABLE calamity.wrong_partition DROP CONSTRAINT pathman_wrong_partition_check; +ALTER TABLE calamity.wrong_partition +ADD CONSTRAINT pathman_wrong_partition_check +CHECK (val >= 10 AND val = 2); /* wrong constraint */ +SELECT add_to_pathman_config('calamity.part_test', 'val', '10'); +ERROR: wrong constraint format for RANGE partition "wrong_partition" +EXPLAIN (COSTS OFF) SELECT * FROM calamity.part_ok; /* check that pathman is enabled */ + QUERY PLAN +--------------------------------------- + Append + -> Seq Scan on part_ok_0 part_ok_1 + -> Seq Scan on part_ok_1 part_ok_2 + -> Seq Scan on part_ok_2 part_ok_3 + -> Seq Scan on part_ok_3 part_ok_4 +(5 rows) + +ALTER TABLE calamity.wrong_partition DROP CONSTRAINT pathman_wrong_partition_check; +/* check GUC variable */ +SHOW pg_pathman.enable; + pg_pathman.enable +------------------- + on +(1 row) + +/* check function create_hash_partitions_internal() (called for the 2nd time) */ +CREATE TABLE calamity.hash_two_times(val serial); +SELECT create_hash_partitions_internal('calamity.hash_two_times', 'val', 2); +ERROR: table "hash_two_times" is not partitioned +SELECT create_hash_partitions('calamity.hash_two_times', 'val', 2); + create_hash_partitions +------------------------ + 2 +(1 row) + +SELECT create_hash_partitions_internal('calamity.hash_two_times', 'val', 2); +ERROR: cannot add new HASH partitions +/* check function disable_pathman_for() */ +CREATE TABLE calamity.to_be_disabled(val INT NOT NULL); +SELECT create_hash_partitions('calamity.to_be_disabled', 'val', 3); /* add row to main config */ + create_hash_partitions +------------------------ + 3 +(1 row) + +SELECT set_enable_parent('calamity.to_be_disabled', true); /* add row to params */ + set_enable_parent +------------------- + +(1 row) + +SELECT disable_pathman_for('calamity.to_be_disabled'); /* should delete both rows */ + disable_pathman_for +--------------------- + +(1 row) + +SELECT count(*) FROM pathman_config WHERE partrel = 'calamity.to_be_disabled'::REGCLASS; + count +------- + 0 +(1 row) + +SELECT count(*) FROM pathman_config_params WHERE partrel = 'calamity.to_be_disabled'::REGCLASS; + count +------- + 0 +(1 row) + +/* check function get_part_range_by_idx() */ +CREATE TABLE calamity.test_range_idx(val INT4 NOT NULL); +SELECT create_range_partitions('calamity.test_range_idx', 'val', 1, 10, 1); + create_range_partitions +------------------------- + 1 +(1 row) + +SELECT get_part_range(NULL, 1, NULL::INT4); /* not ok */ +ERROR: 'parent_relid' should not be NULL +SELECT get_part_range('calamity.test_range_idx', NULL, NULL::INT4); /* not ok */ +ERROR: 'partition_idx' should not be NULL +SELECT get_part_range('calamity.test_range_idx', 0, NULL::INT2); /* not ok */ +ERROR: pg_typeof(dummy) should be integer +SELECT get_part_range('calamity.test_range_idx', -2, NULL::INT4); /* not ok */ +ERROR: negative indices other than -1 (last partition) are not allowed +SELECT get_part_range('calamity.test_range_idx', 4, NULL::INT4); /* not ok */ +ERROR: partition #4 does not exist (total amount is 1) +SELECT get_part_range('calamity.test_range_idx', 0, NULL::INT4); /* OK */ + get_part_range +---------------- + {1,11} +(1 row) + +DROP TABLE calamity.test_range_idx CASCADE; +NOTICE: drop cascades to 2 other objects +/* check function get_part_range_by_oid() */ +CREATE TABLE calamity.test_range_oid(val INT4 NOT NULL); +SELECT create_range_partitions('calamity.test_range_oid', 'val', 1, 10, 1); + create_range_partitions +------------------------- + 1 +(1 row) + +SELECT get_part_range(NULL, NULL::INT4); /* not ok */ +ERROR: 'partition_relid' should not be NULL +SELECT get_part_range('pg_class', NULL::INT4); /* not ok */ +ERROR: relation "pg_class" is not a partition +SELECT get_part_range('calamity.test_range_oid_1', NULL::INT2); /* not ok */ +ERROR: pg_typeof(dummy) should be integer +SELECT get_part_range('calamity.test_range_oid_1', NULL::INT4); /* OK */ + get_part_range +---------------- + {1,11} +(1 row) + +DROP TABLE calamity.test_range_oid CASCADE; +NOTICE: drop cascades to 2 other objects +/* check function merge_range_partitions() */ +SELECT merge_range_partitions('pg_class'); /* not ok */ +ERROR: cannot merge partitions +SELECT merge_range_partitions('pg_class', 'pg_inherits'); /* not ok */ +ERROR: cannot merge partitions +CREATE TABLE calamity.merge_test_a(val INT4 NOT NULL); +CREATE TABLE calamity.merge_test_b(val INT4 NOT NULL); +SELECT create_range_partitions('calamity.merge_test_a', 'val', 1, 10, 2); + create_range_partitions +------------------------- + 2 +(1 row) + +SELECT create_range_partitions('calamity.merge_test_b', 'val', 1, 10, 2); + create_range_partitions +------------------------- + 2 +(1 row) + +SELECT merge_range_partitions('calamity.merge_test_a_1', + 'calamity.merge_test_b_1'); /* not ok */ +ERROR: cannot merge partitions +DROP TABLE calamity.merge_test_a,calamity.merge_test_b CASCADE; +NOTICE: drop cascades to 6 other objects +DROP DOMAIN calamity.test_domain; +DROP TABLE calamity.part_test CASCADE; +NOTICE: drop cascades to table calamity.wrong_partition +DROP TABLE calamity.part_ok CASCADE; +NOTICE: drop cascades to 4 other objects +DROP TABLE calamity.hash_two_times CASCADE; +NOTICE: drop cascades to 2 other objects +DROP TABLE calamity.to_be_disabled CASCADE; +NOTICE: drop cascades to 3 other objects +DROP SCHEMA calamity; +DROP EXTENSION pg_pathman; +/* + * ------------------------------- + * Special tests (SET statement) + * ------------------------------- + */ +CREATE EXTENSION pg_pathman; +SET pg_pathman.enable = false; +NOTICE: RuntimeAppend, RuntimeMergeAppend and PartitionFilter nodes and some other options have been disabled +SET pg_pathman.enable = true; +NOTICE: RuntimeAppend, RuntimeMergeAppend and PartitionFilter nodes and some other options have been enabled +SET pg_pathman.enable = false; +NOTICE: RuntimeAppend, RuntimeMergeAppend and PartitionFilter nodes and some other options have been disabled +RESET pg_pathman.enable; +NOTICE: RuntimeAppend, RuntimeMergeAppend and PartitionFilter nodes and some other options have been enabled +RESET ALL; +BEGIN; ROLLBACK; +BEGIN ISOLATION LEVEL SERIALIZABLE; ROLLBACK; +BEGIN; SET TRANSACTION ISOLATION LEVEL SERIALIZABLE; ROLLBACK; +DROP EXTENSION pg_pathman; +/* + * ------------------------------------- + * Special tests (pathman_cache_stats) + * ------------------------------------- + */ +CREATE SCHEMA calamity; +CREATE EXTENSION pg_pathman; +/* check that cache loading is lazy */ +CREATE TABLE calamity.test_pathman_cache_stats(val NUMERIC NOT NULL); +SELECT create_range_partitions('calamity.test_pathman_cache_stats', 'val', 1, 10, 10); + create_range_partitions +------------------------- + 10 +(1 row) + +SELECT context, entries FROM pathman_cache_stats + WHERE context != 'partition status cache' ORDER BY context; /* OK */ + context | entries +-------------------------+--------- + maintenance | 0 + partition bounds cache | 0 + partition parents cache | 0 +(3 rows) + +DROP TABLE calamity.test_pathman_cache_stats CASCADE; +NOTICE: drop cascades to 11 other objects +SELECT context, entries FROM pathman_cache_stats + WHERE context != 'partition status cache' ORDER BY context; /* OK */ + context | entries +-------------------------+--------- + maintenance | 0 + partition bounds cache | 0 + partition parents cache | 0 +(3 rows) + +/* Change this setting for code coverage */ +SET pg_pathman.enable_bounds_cache = false; +/* check view pathman_cache_stats (bounds cache disabled) */ +CREATE TABLE calamity.test_pathman_cache_stats(val NUMERIC NOT NULL); +SELECT create_range_partitions('calamity.test_pathman_cache_stats', 'val', 1, 10, 10); + create_range_partitions +------------------------- + 10 +(1 row) + +EXPLAIN (COSTS OFF) SELECT * FROM calamity.test_pathman_cache_stats; + QUERY PLAN +----------------------------------------------- + Append + -> Seq Scan on test_pathman_cache_stats_1 + -> Seq Scan on test_pathman_cache_stats_2 + -> Seq Scan on test_pathman_cache_stats_3 + -> Seq Scan on test_pathman_cache_stats_4 + -> Seq Scan on test_pathman_cache_stats_5 + -> Seq Scan on test_pathman_cache_stats_6 + -> Seq Scan on test_pathman_cache_stats_7 + -> Seq Scan on test_pathman_cache_stats_8 + -> Seq Scan on test_pathman_cache_stats_9 + -> Seq Scan on test_pathman_cache_stats_10 +(11 rows) + +SELECT context, entries FROM pathman_cache_stats + WHERE context != 'partition status cache' ORDER BY context; /* OK */ + context | entries +-------------------------+--------- + maintenance | 0 + partition bounds cache | 0 + partition parents cache | 10 +(3 rows) + +DROP TABLE calamity.test_pathman_cache_stats CASCADE; +NOTICE: drop cascades to 11 other objects +SELECT context, entries FROM pathman_cache_stats + WHERE context != 'partition status cache' ORDER BY context; /* OK */ + context | entries +-------------------------+--------- + maintenance | 0 + partition bounds cache | 0 + partition parents cache | 0 +(3 rows) + +/* Restore this GUC */ +SET pg_pathman.enable_bounds_cache = true; +/* check view pathman_cache_stats (bounds cache enabled) */ +CREATE TABLE calamity.test_pathman_cache_stats(val NUMERIC NOT NULL); +SELECT create_range_partitions('calamity.test_pathman_cache_stats', 'val', 1, 10, 10); + create_range_partitions +------------------------- + 10 +(1 row) + +EXPLAIN (COSTS OFF) SELECT * FROM calamity.test_pathman_cache_stats; + QUERY PLAN +----------------------------------------------- + Append + -> Seq Scan on test_pathman_cache_stats_1 + -> Seq Scan on test_pathman_cache_stats_2 + -> Seq Scan on test_pathman_cache_stats_3 + -> Seq Scan on test_pathman_cache_stats_4 + -> Seq Scan on test_pathman_cache_stats_5 + -> Seq Scan on test_pathman_cache_stats_6 + -> Seq Scan on test_pathman_cache_stats_7 + -> Seq Scan on test_pathman_cache_stats_8 + -> Seq Scan on test_pathman_cache_stats_9 + -> Seq Scan on test_pathman_cache_stats_10 +(11 rows) + +SELECT context, entries FROM pathman_cache_stats + WHERE context != 'partition status cache' ORDER BY context; /* OK */ + context | entries +-------------------------+--------- + maintenance | 0 + partition bounds cache | 10 + partition parents cache | 10 +(3 rows) + +DROP TABLE calamity.test_pathman_cache_stats CASCADE; +NOTICE: drop cascades to 11 other objects +SELECT context, entries FROM pathman_cache_stats + WHERE context != 'partition status cache' ORDER BY context; /* OK */ + context | entries +-------------------------+--------- + maintenance | 0 + partition bounds cache | 0 + partition parents cache | 0 +(3 rows) + +/* check that parents cache has been flushed after partition was dropped */ +CREATE TABLE calamity.test_pathman_cache_stats(val NUMERIC NOT NULL); +SELECT create_range_partitions('calamity.test_pathman_cache_stats', 'val', 1, 10, 10); + create_range_partitions +------------------------- + 10 +(1 row) + +EXPLAIN (COSTS OFF) SELECT * FROM calamity.test_pathman_cache_stats; + QUERY PLAN +----------------------------------------------- + Append + -> Seq Scan on test_pathman_cache_stats_1 + -> Seq Scan on test_pathman_cache_stats_2 + -> Seq Scan on test_pathman_cache_stats_3 + -> Seq Scan on test_pathman_cache_stats_4 + -> Seq Scan on test_pathman_cache_stats_5 + -> Seq Scan on test_pathman_cache_stats_6 + -> Seq Scan on test_pathman_cache_stats_7 + -> Seq Scan on test_pathman_cache_stats_8 + -> Seq Scan on test_pathman_cache_stats_9 + -> Seq Scan on test_pathman_cache_stats_10 +(11 rows) + +SELECT context, entries FROM pathman_cache_stats + WHERE context != 'partition status cache' ORDER BY context; /* OK */ + context | entries +-------------------------+--------- + maintenance | 0 + partition bounds cache | 10 + partition parents cache | 10 +(3 rows) + +SELECT drop_range_partition('calamity.test_pathman_cache_stats_1'); + drop_range_partition +------------------------------------- + calamity.test_pathman_cache_stats_1 +(1 row) + +SELECT context, entries FROM pathman_cache_stats + WHERE context != 'partition status cache' ORDER BY context; /* OK */ + context | entries +-------------------------+--------- + maintenance | 0 + partition bounds cache | 9 + partition parents cache | 9 +(3 rows) + +DROP TABLE calamity.test_pathman_cache_stats CASCADE; +NOTICE: drop cascades to 10 other objects +SELECT context, entries FROM pathman_cache_stats + WHERE context != 'partition status cache' ORDER BY context; /* OK */ + context | entries +-------------------------+--------- + maintenance | 0 + partition bounds cache | 0 + partition parents cache | 0 +(3 rows) + +DROP SCHEMA calamity; +DROP EXTENSION pg_pathman; +/* + * ------------------------------------------ + * Special tests (uninitialized pg_pathman) + * ------------------------------------------ + */ +CREATE SCHEMA calamity; +CREATE EXTENSION pg_pathman; +/* check function pathman_cache_search_relid() */ +CREATE TABLE calamity.survivor(val INT NOT NULL); +SELECT create_range_partitions('calamity.survivor', 'val', 1, 10, 2); + create_range_partitions +------------------------- + 2 +(1 row) + +DROP EXTENSION pg_pathman CASCADE; +SET pg_pathman.enable = f; /* DON'T LOAD CONFIG */ +NOTICE: RuntimeAppend, RuntimeMergeAppend and PartitionFilter nodes and some other options have been disabled +CREATE EXTENSION pg_pathman; +SHOW pg_pathman.enable; + pg_pathman.enable +------------------- + off +(1 row) + +SELECT add_to_pathman_config('calamity.survivor', 'val', '10'); /* not ok */ +ERROR: pg_pathman is disabled +SELECT * FROM pathman_partition_list; /* not ok */ +ERROR: pg_pathman is not initialized yet +SELECT get_part_range('calamity.survivor', 0, NULL::INT); /* not ok */ +ERROR: pg_pathman is disabled +EXPLAIN (COSTS OFF) SELECT * FROM calamity.survivor; /* OK */ + QUERY PLAN +----------------------------------------- + Append + -> Seq Scan on survivor survivor_1 + -> Seq Scan on survivor_1 survivor_2 + -> Seq Scan on survivor_2 survivor_3 +(4 rows) + +SET pg_pathman.enable = t; /* LOAD CONFIG */ +NOTICE: RuntimeAppend, RuntimeMergeAppend and PartitionFilter nodes and some other options have been enabled +SELECT add_to_pathman_config('calamity.survivor', 'val', '10'); /* OK */ + add_to_pathman_config +----------------------- + t +(1 row) + +SELECT * FROM pathman_partition_list; /* OK */ + parent | partition | parttype | expr | range_min | range_max +-------------------+---------------------+----------+------+-----------+----------- + calamity.survivor | calamity.survivor_1 | 2 | val | 1 | 11 + calamity.survivor | calamity.survivor_2 | 2 | val | 11 | 21 +(2 rows) + +SELECT get_part_range('calamity.survivor', 0, NULL::INT); /* OK */ + get_part_range +---------------- + {1,11} +(1 row) + +EXPLAIN (COSTS OFF) SELECT * FROM calamity.survivor; /* OK */ + QUERY PLAN +------------------------------ + Append + -> Seq Scan on survivor_1 + -> Seq Scan on survivor_2 +(3 rows) + +DROP TABLE calamity.survivor CASCADE; +NOTICE: drop cascades to 3 other objects +DROP SCHEMA calamity; +DROP EXTENSION pg_pathman; diff --git a/expected/pathman_calamity_3.out b/expected/pathman_calamity_3.out new file mode 100644 index 00000000..a8879ef7 --- /dev/null +++ b/expected/pathman_calamity_3.out @@ -0,0 +1,1076 @@ +/* + * pathman_calamity.out and pathman_calamity_1.out differ only in that since + * 12 we get + * ERROR: invalid input syntax for type integer: "abc" + * instead of + * ERROR: invalid input syntax for integer: "15.6" + * + * Since 55a1954da16 and 6ef77cf46e8 (>= 13) output of EXPLAIN was changed, + * now it includes aliases for inherited tables. + */ +\set VERBOSITY terse +SET search_path = 'public'; +CREATE EXTENSION pg_pathman; +CREATE SCHEMA calamity; +/* call for coverage test */ +set client_min_messages = ERROR; +SELECT debug_capture(); + debug_capture +--------------- + +(1 row) + +SELECT pathman_version(); + pathman_version +----------------- + 1.5.12 +(1 row) + +set client_min_messages = NOTICE; +/* create table to be partitioned */ +CREATE TABLE calamity.part_test(val serial); +/* test pg_pathman's cache */ +INSERT INTO calamity.part_test SELECT generate_series(1, 30); +SELECT create_range_partitions('calamity.part_test', 'val', 1, 10); + create_range_partitions +------------------------- + 3 +(1 row) + +SELECT drop_partitions('calamity.part_test'); +NOTICE: 10 rows copied from calamity.part_test_1 +NOTICE: 10 rows copied from calamity.part_test_2 +NOTICE: 10 rows copied from calamity.part_test_3 + drop_partitions +----------------- + 3 +(1 row) + +SELECT create_range_partitions('calamity.part_test', 'val', 1, 10); + create_range_partitions +------------------------- + 3 +(1 row) + +SELECT drop_partitions('calamity.part_test'); +NOTICE: 10 rows copied from calamity.part_test_1 +NOTICE: 10 rows copied from calamity.part_test_2 +NOTICE: 10 rows copied from calamity.part_test_3 + drop_partitions +----------------- + 3 +(1 row) + +SELECT create_range_partitions('calamity.part_test', 'val', 1, 10); + create_range_partitions +------------------------- + 3 +(1 row) + +SELECT append_range_partition('calamity.part_test'); + append_range_partition +------------------------ + calamity.part_test_4 +(1 row) + +SELECT drop_partitions('calamity.part_test'); +NOTICE: 10 rows copied from calamity.part_test_1 +NOTICE: 10 rows copied from calamity.part_test_2 +NOTICE: 10 rows copied from calamity.part_test_3 +NOTICE: 0 rows copied from calamity.part_test_4 + drop_partitions +----------------- + 4 +(1 row) + +SELECT create_range_partitions('calamity.part_test', 'val', 1, 10); + create_range_partitions +------------------------- + 3 +(1 row) + +SELECT append_range_partition('calamity.part_test'); + append_range_partition +------------------------ + calamity.part_test_4 +(1 row) + +SELECT drop_partitions('calamity.part_test'); +NOTICE: 10 rows copied from calamity.part_test_1 +NOTICE: 10 rows copied from calamity.part_test_2 +NOTICE: 10 rows copied from calamity.part_test_3 +NOTICE: 0 rows copied from calamity.part_test_4 + drop_partitions +----------------- + 4 +(1 row) + +SELECT count(*) FROM calamity.part_test; + count +------- + 30 +(1 row) + +DELETE FROM calamity.part_test; +/* test function create_single_range_partition() */ +SELECT create_single_range_partition(NULL, NULL::INT4, NULL); /* not ok */ +ERROR: 'parent_relid' should not be NULL +SELECT create_single_range_partition('pg_class', NULL::INT4, NULL); /* not ok */ +ERROR: table "pg_class" is not partitioned by RANGE +SELECT add_to_pathman_config('calamity.part_test', 'val'); + add_to_pathman_config +----------------------- + t +(1 row) + +SELECT create_single_range_partition('calamity.part_test', NULL::INT4, NULL); /* not ok */ +ERROR: table "part_test" is not partitioned by RANGE +DELETE FROM pathman_config WHERE partrel = 'calamity.part_test'::REGCLASS; +/* test function create_range_partitions_internal() */ +SELECT create_range_partitions_internal(NULL, '{}'::INT[], NULL, NULL); /* not ok */ +ERROR: 'parent_relid' should not be NULL +SELECT create_range_partitions_internal('calamity.part_test', + NULL::INT[], NULL, NULL); /* not ok */ +ERROR: 'bounds' should not be NULL +SELECT create_range_partitions_internal('calamity.part_test', '{1}'::INT[], + '{part_1}'::TEXT[], NULL); /* not ok */ +ERROR: wrong length of 'partition_names' array +SELECT create_range_partitions_internal('calamity.part_test', '{1}'::INT[], + NULL, '{tblspc_1}'::TEXT[]); /* not ok */ +ERROR: wrong length of 'tablespaces' array +SELECT create_range_partitions_internal('calamity.part_test', + '{1, NULL}'::INT[], NULL, NULL); /* not ok */ +ERROR: only first bound can be NULL +SELECT create_range_partitions_internal('calamity.part_test', + '{2, 1}'::INT[], NULL, NULL); /* not ok */ +ERROR: 'bounds' array must be ascending +/* test function create_hash_partitions() */ +SELECT create_hash_partitions('calamity.part_test', 'val', 2, + partition_names := ARRAY[]::TEXT[]); /* not ok */ +ERROR: array should not be empty +SELECT create_hash_partitions('calamity.part_test', 'val', 2, + partition_names := ARRAY[ 'p1', NULL ]::TEXT[]); /* not ok */ +ERROR: array should not contain NULLs +SELECT create_hash_partitions('calamity.part_test', 'val', 2, + partition_names := ARRAY[ ['p1'], ['p2'] ]::TEXT[]); /* not ok */ +ERROR: array should contain only 1 dimension +SELECT create_hash_partitions('calamity.part_test', 'val', 2, + partition_names := ARRAY['calamity.p1']::TEXT[]); /* not ok */ +ERROR: size of 'partition_names' must be equal to 'partitions_count' +SELECT create_hash_partitions('calamity.part_test', 'val', 2, + tablespaces := ARRAY['abcd']::TEXT[]); /* not ok */ +ERROR: size of 'tablespaces' must be equal to 'partitions_count' +/* test case when naming sequence does not exist */ +CREATE TABLE calamity.no_naming_seq(val INT4 NOT NULL); +SELECT add_to_pathman_config('calamity.no_naming_seq', 'val', '100'); + add_to_pathman_config +----------------------- + t +(1 row) + +select add_range_partition(' calamity.no_naming_seq', 10, 20); +ERROR: auto naming sequence "no_naming_seq_seq" does not exist +DROP TABLE calamity.no_naming_seq CASCADE; +/* test (-inf, +inf) partition creation */ +CREATE TABLE calamity.double_inf(val INT4 NOT NULL); +SELECT add_to_pathman_config('calamity.double_inf', 'val', '10'); + add_to_pathman_config +----------------------- + t +(1 row) + +select add_range_partition('calamity.double_inf', NULL::INT4, NULL::INT4, + partition_name := 'double_inf_part'); +ERROR: cannot create partition with range (-inf, +inf) +DROP TABLE calamity.double_inf CASCADE; +/* test stub 'enable_parent' value for PATHMAN_CONFIG_PARAMS */ +INSERT INTO calamity.part_test SELECT generate_series(1, 30); +SELECT create_range_partitions('calamity.part_test', 'val', 1, 10); + create_range_partitions +------------------------- + 3 +(1 row) + +DELETE FROM pathman_config_params WHERE partrel = 'calamity.part_test'::regclass; +SELECT append_range_partition('calamity.part_test'); + append_range_partition +------------------------ + calamity.part_test_4 +(1 row) + +EXPLAIN (COSTS OFF) SELECT * FROM calamity.part_test; + QUERY PLAN +------------------------------- + Append + -> Seq Scan on part_test_1 + -> Seq Scan on part_test_2 + -> Seq Scan on part_test_3 + -> Seq Scan on part_test_4 +(5 rows) + +SELECT drop_partitions('calamity.part_test', true); + drop_partitions +----------------- + 4 +(1 row) + +DELETE FROM calamity.part_test; +/* check function validate_interval_value() */ +SELECT set_interval('pg_catalog.pg_class', 100); /* not ok */ +ERROR: table "pg_class" is not partitioned by RANGE +INSERT INTO calamity.part_test SELECT generate_series(1, 30); +SELECT create_range_partitions('calamity.part_test', 'val', 1, 10); + create_range_partitions +------------------------- + 3 +(1 row) + +SELECT set_interval('calamity.part_test', 100); /* ok */ + set_interval +-------------- + +(1 row) + +SELECT set_interval('calamity.part_test', 15.6); /* not ok */ +ERROR: invalid input syntax for type integer: "15.6" +SELECT set_interval('calamity.part_test', 'abc'::text); /* not ok */ +ERROR: invalid input syntax for type integer: "abc" +SELECT drop_partitions('calamity.part_test', true); + drop_partitions +----------------- + 3 +(1 row) + +DELETE FROM calamity.part_test; +/* check function build_hash_condition() */ +SELECT build_hash_condition('int4', 'val', 10, 1); + build_hash_condition +------------------------------------------------- + public.get_hash_part_idx(hashint4(val), 10) = 1 +(1 row) + +SELECT build_hash_condition('text', 'val', 10, 1); + build_hash_condition +------------------------------------------------- + public.get_hash_part_idx(hashtext(val), 10) = 1 +(1 row) + +SELECT build_hash_condition('int4', 'val', 1, 1); +ERROR: 'partition_index' must be lower than 'partitions_count' +SELECT build_hash_condition('int4', 'val', 10, 20); +ERROR: 'partition_index' must be lower than 'partitions_count' +SELECT build_hash_condition('text', 'val', 10, NULL) IS NULL; + ?column? +---------- + t +(1 row) + +SELECT build_hash_condition('calamity.part_test', 'val', 10, 1); + build_hash_condition +---------------------------------------------------- + public.get_hash_part_idx(hash_record(val), 10) = 1 +(1 row) + +/* check function build_range_condition() */ +SELECT build_range_condition(NULL, 'val', 10, 20); /* not ok */ +ERROR: 'partition_relid' should not be NULL +SELECT build_range_condition('calamity.part_test', NULL, 10, 20); /* not ok */ +ERROR: 'expression' should not be NULL +SELECT build_range_condition('calamity.part_test', 'val', 10, 20); /* OK */ + build_range_condition +------------------------------ + ((val >= 10) AND (val < 20)) +(1 row) + +SELECT build_range_condition('calamity.part_test', 'val', 10, NULL); /* OK */ + build_range_condition +----------------------- + ((val >= 10)) +(1 row) + +SELECT build_range_condition('calamity.part_test', 'val', NULL, 10); /* OK */ + build_range_condition +----------------------- + ((val < 10)) +(1 row) + +/* check function validate_interval_value() */ +SELECT validate_interval_value(1::REGCLASS, 'expr', 2, '1 mon'); /* not ok */ +ERROR: relation "1" does not exist +SELECT validate_interval_value(NULL, 'expr', 2, '1 mon'); /* not ok */ +ERROR: 'partrel' should not be NULL +SELECT validate_interval_value('pg_class', NULL, 2, '1 mon'); /* not ok */ +ERROR: 'expression' should not be NULL +SELECT validate_interval_value('pg_class', 'relname', NULL, '1 mon'); /* not ok */ +ERROR: 'parttype' should not be NULL +SELECT validate_interval_value('pg_class', 'relname', 1, 'HASH'); /* not ok */ +ERROR: interval should be NULL for HASH partitioned table +SELECT validate_interval_value('pg_class', 'expr', 2, '1 mon'); /* not ok */ +ERROR: failed to analyze partitioning expression "expr" +SELECT validate_interval_value('pg_class', 'expr', 2, NULL); /* not ok */ +ERROR: failed to analyze partitioning expression "expr" +SELECT validate_interval_value('pg_class', 'EXPR', 1, 'HASH'); /* not ok */ +ERROR: failed to analyze partitioning expression "EXPR" +/* check function validate_relname() */ +SELECT validate_relname('calamity.part_test'); + validate_relname +------------------ + +(1 row) + +SELECT validate_relname(1::REGCLASS); +ERROR: relation "1" does not exist +SELECT validate_relname(NULL); +ERROR: relation should not be NULL +/* check function validate_expression() */ +SELECT validate_expression(1::regclass, NULL); /* not ok */ +ERROR: identifier "1" must be normal Oid +SELECT validate_expression(NULL::regclass, NULL); /* not ok */ +ERROR: 'relid' should not be NULL +SELECT validate_expression('calamity.part_test', NULL); /* not ok */ +ERROR: 'expression' should not be NULL +SELECT validate_expression('calamity.part_test', 'valval'); /* not ok */ +ERROR: failed to analyze partitioning expression "valval" +SELECT validate_expression('calamity.part_test', 'random()'); /* not ok */ +ERROR: failed to analyze partitioning expression "random()" +SELECT validate_expression('calamity.part_test', 'val'); /* OK */ + validate_expression +--------------------- + +(1 row) + +SELECT validate_expression('calamity.part_test', 'VaL'); /* OK */ + validate_expression +--------------------- + +(1 row) + +/* check function get_number_of_partitions() */ +SELECT get_number_of_partitions('calamity.part_test'); + get_number_of_partitions +-------------------------- + 0 +(1 row) + +SELECT get_number_of_partitions(NULL) IS NULL; + ?column? +---------- + t +(1 row) + +/* check function get_parent_of_partition() */ +SELECT get_parent_of_partition('calamity.part_test'); +ERROR: "part_test" is not a partition +SELECT get_parent_of_partition(NULL) IS NULL; + ?column? +---------- + t +(1 row) + +/* check function get_base_type() */ +CREATE DOMAIN calamity.test_domain AS INT4; +SELECT get_base_type('int4'::regtype); + get_base_type +--------------- + integer +(1 row) + +SELECT get_base_type('calamity.test_domain'::regtype); + get_base_type +--------------- + integer +(1 row) + +SELECT get_base_type(NULL) IS NULL; + ?column? +---------- + t +(1 row) + +/* check function get_partition_key_type() */ +SELECT get_partition_key_type('calamity.part_test'); +ERROR: relation "part_test" has no partitions +SELECT get_partition_key_type(0::regclass); +ERROR: relation "0" has no partitions +SELECT get_partition_key_type(NULL) IS NULL; + ?column? +---------- + t +(1 row) + +/* check function build_check_constraint_name() */ +SELECT build_check_constraint_name('calamity.part_test'); /* OK */ + build_check_constraint_name +----------------------------- + pathman_part_test_check +(1 row) + +SELECT build_check_constraint_name(0::REGCLASS); /* not ok */ +ERROR: relation "0" does not exist +SELECT build_check_constraint_name(NULL) IS NULL; + ?column? +---------- + t +(1 row) + +/* check function build_sequence_name() */ +SELECT build_sequence_name('calamity.part_test'); /* OK */ + build_sequence_name +------------------------ + calamity.part_test_seq +(1 row) + +SELECT build_sequence_name(1::REGCLASS); /* not ok */ +ERROR: relation "1" does not exist +SELECT build_sequence_name(NULL) IS NULL; + ?column? +---------- + t +(1 row) + +/* check function partition_table_concurrently() */ +SELECT partition_table_concurrently(1::REGCLASS); /* not ok */ +ERROR: identifier "1" must be normal Oid +SELECT partition_table_concurrently('pg_class', 0); /* not ok */ +ERROR: 'batch_size' should not be less than 1 or greater than 10000 +SELECT partition_table_concurrently('pg_class', 1, 1E-5); /* not ok */ +ERROR: 'sleep_time' should not be less than 0.5 +SELECT partition_table_concurrently('pg_class'); /* not ok */ +ERROR: identifier "1259" must be normal Oid +/* check function stop_concurrent_part_task() */ +SELECT stop_concurrent_part_task(1::REGCLASS); /* not ok */ +ERROR: cannot find worker for relation "1" +/* check function drop_range_partition_expand_next() */ +SELECT drop_range_partition_expand_next('pg_class'); /* not ok */ +ERROR: identifier "1259" must be normal Oid +SELECT drop_range_partition_expand_next(NULL) IS NULL; + ?column? +---------- + t +(1 row) + +/* check function generate_range_bounds() */ +SELECT generate_range_bounds(NULL, 100, 10) IS NULL; + ?column? +---------- + t +(1 row) + +SELECT generate_range_bounds(0, NULL::INT4, 10) IS NULL; + ?column? +---------- + t +(1 row) + +SELECT generate_range_bounds(0, 100, NULL) IS NULL; + ?column? +---------- + t +(1 row) + +SELECT generate_range_bounds(0, 100, 0); /* not ok */ +ERROR: 'p_count' must be greater than zero +SELECT generate_range_bounds('a'::TEXT, 'test'::TEXT, 10); /* not ok */ +ERROR: cannot find operator +(text, text) +SELECT generate_range_bounds('a'::TEXT, '1 mon'::INTERVAL, 10); /* not ok */ +ERROR: cannot find operator +(text, interval) +SELECT generate_range_bounds(0::NUMERIC, 1::NUMERIC, 10); /* OK */ + generate_range_bounds +-------------------------- + {0,1,2,3,4,5,6,7,8,9,10} +(1 row) + +SELECT generate_range_bounds('1-jan-2017'::DATE, + '1 day'::INTERVAL, + 4); /* OK */ + generate_range_bounds +---------------------------------------------------------- + {01-01-2017,01-02-2017,01-03-2017,01-04-2017,01-05-2017} +(1 row) + +SELECT check_range_available(NULL, NULL::INT4, NULL); /* not ok */ +ERROR: 'parent_relid' should not be NULL +SELECT check_range_available('pg_class', 1, 10); /* OK (not partitioned) */ +WARNING: table "pg_class" is not partitioned + check_range_available +----------------------- + +(1 row) + +/* check invoke_on_partition_created_callback() */ +CREATE FUNCTION calamity.dummy_cb(arg jsonb) RETURNS void AS $$ + begin + raise warning 'arg: %', arg::text; + end +$$ LANGUAGE plpgsql; +/* Invalid args */ +SELECT invoke_on_partition_created_callback(NULL, 'calamity.part_test', 1); +ERROR: 'parent_relid' should not be NULL +SELECT invoke_on_partition_created_callback('calamity.part_test', NULL, 1); +ERROR: 'partition_relid' should not be NULL +SELECT invoke_on_partition_created_callback('calamity.part_test', 'calamity.part_test', 0); + invoke_on_partition_created_callback +-------------------------------------- + +(1 row) + +SELECT invoke_on_partition_created_callback('calamity.part_test', 'calamity.part_test', 1); +ERROR: callback function 1 does not exist +SELECT invoke_on_partition_created_callback('calamity.part_test', 'calamity.part_test', NULL); + invoke_on_partition_created_callback +-------------------------------------- + +(1 row) + +/* HASH */ +SELECT invoke_on_partition_created_callback(0::regclass, 1::regclass, 'calamity.dummy_cb(jsonb)'::regprocedure); +WARNING: arg: {"parent": null, "parttype": "1", "partition": null, "parent_schema": null, "partition_schema": null} + invoke_on_partition_created_callback +-------------------------------------- + +(1 row) + +/* RANGE */ +SELECT invoke_on_partition_created_callback('calamity.part_test'::regclass, 'pg_class'::regclass, 'calamity.dummy_cb(jsonb)'::regprocedure, NULL::int, NULL); +WARNING: arg: {"parent": "part_test", "parttype": "2", "partition": "pg_class", "range_max": null, "range_min": null, "parent_schema": "calamity", "partition_schema": "pg_catalog"} + invoke_on_partition_created_callback +-------------------------------------- + +(1 row) + +SELECT invoke_on_partition_created_callback(0::regclass, 1::regclass, 'calamity.dummy_cb(jsonb)'::regprocedure, NULL::int, NULL); +WARNING: arg: {"parent": null, "parttype": "2", "partition": null, "range_max": null, "range_min": null, "parent_schema": null, "partition_schema": null} + invoke_on_partition_created_callback +-------------------------------------- + +(1 row) + +SELECT invoke_on_partition_created_callback(0::regclass, 1::regclass, 'calamity.dummy_cb(jsonb)'::regprocedure, 1, NULL); +WARNING: arg: {"parent": null, "parttype": "2", "partition": null, "range_max": null, "range_min": "1", "parent_schema": null, "partition_schema": null} + invoke_on_partition_created_callback +-------------------------------------- + +(1 row) + +SELECT invoke_on_partition_created_callback(0::regclass, 1::regclass, 'calamity.dummy_cb(jsonb)'::regprocedure, NULL, 1); +WARNING: arg: {"parent": null, "parttype": "2", "partition": null, "range_max": "1", "range_min": null, "parent_schema": null, "partition_schema": null} + invoke_on_partition_created_callback +-------------------------------------- + +(1 row) + +DROP FUNCTION calamity.dummy_cb(arg jsonb); +/* check function add_to_pathman_config() -- PHASE #1 */ +SELECT add_to_pathman_config(NULL, 'val'); /* no table */ +ERROR: 'parent_relid' should not be NULL +SELECT add_to_pathman_config(0::REGCLASS, 'val'); /* no table (oid) */ +ERROR: identifier "0" must be normal Oid +SELECT add_to_pathman_config('calamity.part_test', NULL); /* no expr */ +ERROR: 'expression' should not be NULL +SELECT add_to_pathman_config('calamity.part_test', 'V_A_L'); /* wrong expr */ +ERROR: failed to analyze partitioning expression "V_A_L" +SELECT add_to_pathman_config('calamity.part_test', 'val'); /* OK */ + add_to_pathman_config +----------------------- + t +(1 row) + +SELECT disable_pathman_for('calamity.part_test'); + disable_pathman_for +--------------------- + +(1 row) + +SELECT add_to_pathman_config('calamity.part_test', 'val', '10'); /* OK */ + add_to_pathman_config +----------------------- + t +(1 row) + +SELECT disable_pathman_for('calamity.part_test'); + disable_pathman_for +--------------------- + +(1 row) + +/* check function add_to_pathman_config() -- PHASE #2 */ +CREATE TABLE calamity.part_ok(val serial); +INSERT INTO calamity.part_ok SELECT generate_series(1, 2); +SELECT create_hash_partitions('calamity.part_ok', 'val', 4); + create_hash_partitions +------------------------ + 4 +(1 row) + +CREATE TABLE calamity.wrong_partition (LIKE calamity.part_test) INHERITS (calamity.part_test); /* wrong partition w\o constraints */ +NOTICE: merging column "val" with inherited definition +SELECT add_to_pathman_config('calamity.part_test', 'val'); +ERROR: constraint "pathman_wrong_partition_check" of partition "wrong_partition" does not exist +EXPLAIN (COSTS OFF) SELECT * FROM calamity.part_ok; /* check that pathman is enabled */ + QUERY PLAN +--------------------------------------- + Append + -> Seq Scan on part_ok_0 part_ok_1 + -> Seq Scan on part_ok_1 part_ok_2 + -> Seq Scan on part_ok_2 part_ok_3 + -> Seq Scan on part_ok_3 part_ok_4 +(5 rows) + +SELECT add_to_pathman_config('calamity.part_test', 'val', '10'); +ERROR: constraint "pathman_wrong_partition_check" of partition "wrong_partition" does not exist +EXPLAIN (COSTS OFF) SELECT * FROM calamity.part_ok; /* check that pathman is enabled */ + QUERY PLAN +--------------------------------------- + Append + -> Seq Scan on part_ok_0 part_ok_1 + -> Seq Scan on part_ok_1 part_ok_2 + -> Seq Scan on part_ok_2 part_ok_3 + -> Seq Scan on part_ok_3 part_ok_4 +(5 rows) + +ALTER TABLE calamity.wrong_partition +ADD CONSTRAINT pathman_wrong_partition_check +CHECK (val = 1 OR val = 2); /* wrong constraint */ +SELECT add_to_pathman_config('calamity.part_test', 'val', '10'); +ERROR: wrong constraint format for RANGE partition "wrong_partition" +EXPLAIN (COSTS OFF) SELECT * FROM calamity.part_ok; /* check that pathman is enabled */ + QUERY PLAN +--------------------------------------- + Append + -> Seq Scan on part_ok_0 part_ok_1 + -> Seq Scan on part_ok_1 part_ok_2 + -> Seq Scan on part_ok_2 part_ok_3 + -> Seq Scan on part_ok_3 part_ok_4 +(5 rows) + +ALTER TABLE calamity.wrong_partition DROP CONSTRAINT pathman_wrong_partition_check; +ALTER TABLE calamity.wrong_partition +ADD CONSTRAINT pathman_wrong_partition_check +CHECK (val >= 10 AND val = 2); /* wrong constraint */ +SELECT add_to_pathman_config('calamity.part_test', 'val', '10'); +ERROR: wrong constraint format for RANGE partition "wrong_partition" +EXPLAIN (COSTS OFF) SELECT * FROM calamity.part_ok; /* check that pathman is enabled */ + QUERY PLAN +--------------------------------------- + Append + -> Seq Scan on part_ok_0 part_ok_1 + -> Seq Scan on part_ok_1 part_ok_2 + -> Seq Scan on part_ok_2 part_ok_3 + -> Seq Scan on part_ok_3 part_ok_4 +(5 rows) + +ALTER TABLE calamity.wrong_partition DROP CONSTRAINT pathman_wrong_partition_check; +/* check GUC variable */ +SHOW pg_pathman.enable; + pg_pathman.enable +------------------- + on +(1 row) + +/* check function create_hash_partitions_internal() (called for the 2nd time) */ +CREATE TABLE calamity.hash_two_times(val serial); +SELECT create_hash_partitions_internal('calamity.hash_two_times', 'val', 2); +ERROR: table "hash_two_times" is not partitioned +SELECT create_hash_partitions('calamity.hash_two_times', 'val', 2); + create_hash_partitions +------------------------ + 2 +(1 row) + +SELECT create_hash_partitions_internal('calamity.hash_two_times', 'val', 2); +ERROR: cannot add new HASH partitions +/* check function disable_pathman_for() */ +CREATE TABLE calamity.to_be_disabled(val INT NOT NULL); +SELECT create_hash_partitions('calamity.to_be_disabled', 'val', 3); /* add row to main config */ + create_hash_partitions +------------------------ + 3 +(1 row) + +SELECT set_enable_parent('calamity.to_be_disabled', true); /* add row to params */ + set_enable_parent +------------------- + +(1 row) + +SELECT disable_pathman_for('calamity.to_be_disabled'); /* should delete both rows */ + disable_pathman_for +--------------------- + +(1 row) + +SELECT count(*) FROM pathman_config WHERE partrel = 'calamity.to_be_disabled'::REGCLASS; + count +------- + 0 +(1 row) + +SELECT count(*) FROM pathman_config_params WHERE partrel = 'calamity.to_be_disabled'::REGCLASS; + count +------- + 0 +(1 row) + +/* check function get_part_range_by_idx() */ +CREATE TABLE calamity.test_range_idx(val INT4 NOT NULL); +SELECT create_range_partitions('calamity.test_range_idx', 'val', 1, 10, 1); + create_range_partitions +------------------------- + 1 +(1 row) + +SELECT get_part_range(NULL, 1, NULL::INT4); /* not ok */ +ERROR: 'parent_relid' should not be NULL +SELECT get_part_range('calamity.test_range_idx', NULL, NULL::INT4); /* not ok */ +ERROR: 'partition_idx' should not be NULL +SELECT get_part_range('calamity.test_range_idx', 0, NULL::INT2); /* not ok */ +ERROR: pg_typeof(dummy) should be integer +SELECT get_part_range('calamity.test_range_idx', -2, NULL::INT4); /* not ok */ +ERROR: negative indices other than -1 (last partition) are not allowed +SELECT get_part_range('calamity.test_range_idx', 4, NULL::INT4); /* not ok */ +ERROR: partition #4 does not exist (total amount is 1) +SELECT get_part_range('calamity.test_range_idx', 0, NULL::INT4); /* OK */ + get_part_range +---------------- + {1,11} +(1 row) + +DROP TABLE calamity.test_range_idx CASCADE; +NOTICE: drop cascades to 2 other objects +/* check function get_part_range_by_oid() */ +CREATE TABLE calamity.test_range_oid(val INT4 NOT NULL); +SELECT create_range_partitions('calamity.test_range_oid', 'val', 1, 10, 1); + create_range_partitions +------------------------- + 1 +(1 row) + +SELECT get_part_range(NULL, NULL::INT4); /* not ok */ +ERROR: 'partition_relid' should not be NULL +SELECT get_part_range('pg_class', NULL::INT4); /* not ok */ +ERROR: relation "pg_class" is not a partition +SELECT get_part_range('calamity.test_range_oid_1', NULL::INT2); /* not ok */ +ERROR: pg_typeof(dummy) should be integer +SELECT get_part_range('calamity.test_range_oid_1', NULL::INT4); /* OK */ + get_part_range +---------------- + {1,11} +(1 row) + +DROP TABLE calamity.test_range_oid CASCADE; +NOTICE: drop cascades to 2 other objects +/* check function merge_range_partitions() */ +SELECT merge_range_partitions('pg_class'); /* not ok */ +ERROR: cannot merge partitions +SELECT merge_range_partitions('pg_class', 'pg_inherits'); /* not ok */ +ERROR: cannot merge partitions +CREATE TABLE calamity.merge_test_a(val INT4 NOT NULL); +CREATE TABLE calamity.merge_test_b(val INT4 NOT NULL); +SELECT create_range_partitions('calamity.merge_test_a', 'val', 1, 10, 2); + create_range_partitions +------------------------- + 2 +(1 row) + +SELECT create_range_partitions('calamity.merge_test_b', 'val', 1, 10, 2); + create_range_partitions +------------------------- + 2 +(1 row) + +SELECT merge_range_partitions('calamity.merge_test_a_1', + 'calamity.merge_test_b_1'); /* not ok */ +ERROR: cannot merge partitions +DROP TABLE calamity.merge_test_a,calamity.merge_test_b CASCADE; +NOTICE: drop cascades to 6 other objects +DROP DOMAIN calamity.test_domain; +DROP TABLE calamity.part_test CASCADE; +NOTICE: drop cascades to table calamity.wrong_partition +DROP TABLE calamity.part_ok CASCADE; +NOTICE: drop cascades to 4 other objects +DROP TABLE calamity.hash_two_times CASCADE; +NOTICE: drop cascades to 2 other objects +DROP TABLE calamity.to_be_disabled CASCADE; +NOTICE: drop cascades to 3 other objects +DROP SCHEMA calamity; +DROP EXTENSION pg_pathman; +/* + * ------------------------------- + * Special tests (SET statement) + * ------------------------------- + */ +CREATE EXTENSION pg_pathman; +SET pg_pathman.enable = false; +NOTICE: RuntimeAppend, RuntimeMergeAppend and PartitionFilter nodes and some other options have been disabled +SET pg_pathman.enable = true; +NOTICE: RuntimeAppend, RuntimeMergeAppend and PartitionFilter nodes and some other options have been enabled +SET pg_pathman.enable = false; +NOTICE: RuntimeAppend, RuntimeMergeAppend and PartitionFilter nodes and some other options have been disabled +RESET pg_pathman.enable; +NOTICE: RuntimeAppend, RuntimeMergeAppend and PartitionFilter nodes and some other options have been enabled +RESET ALL; +BEGIN; ROLLBACK; +BEGIN ISOLATION LEVEL SERIALIZABLE; ROLLBACK; +BEGIN; SET TRANSACTION ISOLATION LEVEL SERIALIZABLE; ROLLBACK; +DROP EXTENSION pg_pathman; +/* + * ------------------------------------- + * Special tests (pathman_cache_stats) + * ------------------------------------- + */ +CREATE SCHEMA calamity; +CREATE EXTENSION pg_pathman; +/* check that cache loading is lazy */ +CREATE TABLE calamity.test_pathman_cache_stats(val NUMERIC NOT NULL); +SELECT create_range_partitions('calamity.test_pathman_cache_stats', 'val', 1, 10, 10); + create_range_partitions +------------------------- + 10 +(1 row) + +SELECT context, entries FROM pathman_cache_stats + WHERE context != 'partition status cache' ORDER BY context; /* OK */ + context | entries +-------------------------+--------- + maintenance | 0 + partition bounds cache | 0 + partition parents cache | 0 +(3 rows) + +DROP TABLE calamity.test_pathman_cache_stats CASCADE; +NOTICE: drop cascades to 11 other objects +SELECT context, entries FROM pathman_cache_stats + WHERE context != 'partition status cache' ORDER BY context; /* OK */ + context | entries +-------------------------+--------- + maintenance | 0 + partition bounds cache | 0 + partition parents cache | 0 +(3 rows) + +/* Change this setting for code coverage */ +SET pg_pathman.enable_bounds_cache = false; +/* check view pathman_cache_stats (bounds cache disabled) */ +CREATE TABLE calamity.test_pathman_cache_stats(val NUMERIC NOT NULL); +SELECT create_range_partitions('calamity.test_pathman_cache_stats', 'val', 1, 10, 10); + create_range_partitions +------------------------- + 10 +(1 row) + +EXPLAIN (COSTS OFF) SELECT * FROM calamity.test_pathman_cache_stats; + QUERY PLAN +----------------------------------------------- + Append + -> Seq Scan on test_pathman_cache_stats_1 + -> Seq Scan on test_pathman_cache_stats_2 + -> Seq Scan on test_pathman_cache_stats_3 + -> Seq Scan on test_pathman_cache_stats_4 + -> Seq Scan on test_pathman_cache_stats_5 + -> Seq Scan on test_pathman_cache_stats_6 + -> Seq Scan on test_pathman_cache_stats_7 + -> Seq Scan on test_pathman_cache_stats_8 + -> Seq Scan on test_pathman_cache_stats_9 + -> Seq Scan on test_pathman_cache_stats_10 +(11 rows) + +SELECT context, entries FROM pathman_cache_stats + WHERE context != 'partition status cache' ORDER BY context; /* OK */ + context | entries +-------------------------+--------- + maintenance | 0 + partition bounds cache | 0 + partition parents cache | 10 +(3 rows) + +DROP TABLE calamity.test_pathman_cache_stats CASCADE; +NOTICE: drop cascades to 11 other objects +SELECT context, entries FROM pathman_cache_stats + WHERE context != 'partition status cache' ORDER BY context; /* OK */ + context | entries +-------------------------+--------- + maintenance | 0 + partition bounds cache | 0 + partition parents cache | 0 +(3 rows) + +/* Restore this GUC */ +SET pg_pathman.enable_bounds_cache = true; +/* check view pathman_cache_stats (bounds cache enabled) */ +CREATE TABLE calamity.test_pathman_cache_stats(val NUMERIC NOT NULL); +SELECT create_range_partitions('calamity.test_pathman_cache_stats', 'val', 1, 10, 10); + create_range_partitions +------------------------- + 10 +(1 row) + +EXPLAIN (COSTS OFF) SELECT * FROM calamity.test_pathman_cache_stats; + QUERY PLAN +----------------------------------------------- + Append + -> Seq Scan on test_pathman_cache_stats_1 + -> Seq Scan on test_pathman_cache_stats_2 + -> Seq Scan on test_pathman_cache_stats_3 + -> Seq Scan on test_pathman_cache_stats_4 + -> Seq Scan on test_pathman_cache_stats_5 + -> Seq Scan on test_pathman_cache_stats_6 + -> Seq Scan on test_pathman_cache_stats_7 + -> Seq Scan on test_pathman_cache_stats_8 + -> Seq Scan on test_pathman_cache_stats_9 + -> Seq Scan on test_pathman_cache_stats_10 +(11 rows) + +SELECT context, entries FROM pathman_cache_stats + WHERE context != 'partition status cache' ORDER BY context; /* OK */ + context | entries +-------------------------+--------- + maintenance | 0 + partition bounds cache | 10 + partition parents cache | 10 +(3 rows) + +DROP TABLE calamity.test_pathman_cache_stats CASCADE; +NOTICE: drop cascades to 11 other objects +SELECT context, entries FROM pathman_cache_stats + WHERE context != 'partition status cache' ORDER BY context; /* OK */ + context | entries +-------------------------+--------- + maintenance | 0 + partition bounds cache | 0 + partition parents cache | 0 +(3 rows) + +/* check that parents cache has been flushed after partition was dropped */ +CREATE TABLE calamity.test_pathman_cache_stats(val NUMERIC NOT NULL); +SELECT create_range_partitions('calamity.test_pathman_cache_stats', 'val', 1, 10, 10); + create_range_partitions +------------------------- + 10 +(1 row) + +EXPLAIN (COSTS OFF) SELECT * FROM calamity.test_pathman_cache_stats; + QUERY PLAN +----------------------------------------------- + Append + -> Seq Scan on test_pathman_cache_stats_1 + -> Seq Scan on test_pathman_cache_stats_2 + -> Seq Scan on test_pathman_cache_stats_3 + -> Seq Scan on test_pathman_cache_stats_4 + -> Seq Scan on test_pathman_cache_stats_5 + -> Seq Scan on test_pathman_cache_stats_6 + -> Seq Scan on test_pathman_cache_stats_7 + -> Seq Scan on test_pathman_cache_stats_8 + -> Seq Scan on test_pathman_cache_stats_9 + -> Seq Scan on test_pathman_cache_stats_10 +(11 rows) + +SELECT context, entries FROM pathman_cache_stats + WHERE context != 'partition status cache' ORDER BY context; /* OK */ + context | entries +-------------------------+--------- + maintenance | 0 + partition bounds cache | 10 + partition parents cache | 10 +(3 rows) + +SELECT drop_range_partition('calamity.test_pathman_cache_stats_1'); + drop_range_partition +------------------------------------- + calamity.test_pathman_cache_stats_1 +(1 row) + +SELECT context, entries FROM pathman_cache_stats + WHERE context != 'partition status cache' ORDER BY context; /* OK */ + context | entries +-------------------------+--------- + maintenance | 0 + partition bounds cache | 9 + partition parents cache | 9 +(3 rows) + +DROP TABLE calamity.test_pathman_cache_stats CASCADE; +NOTICE: drop cascades to 10 other objects +SELECT context, entries FROM pathman_cache_stats + WHERE context != 'partition status cache' ORDER BY context; /* OK */ + context | entries +-------------------------+--------- + maintenance | 0 + partition bounds cache | 0 + partition parents cache | 0 +(3 rows) + +DROP SCHEMA calamity; +DROP EXTENSION pg_pathman; +/* + * ------------------------------------------ + * Special tests (uninitialized pg_pathman) + * ------------------------------------------ + */ +CREATE SCHEMA calamity; +CREATE EXTENSION pg_pathman; +/* check function pathman_cache_search_relid() */ +CREATE TABLE calamity.survivor(val INT NOT NULL); +SELECT create_range_partitions('calamity.survivor', 'val', 1, 10, 2); + create_range_partitions +------------------------- + 2 +(1 row) + +DROP EXTENSION pg_pathman CASCADE; +SET pg_pathman.enable = f; /* DON'T LOAD CONFIG */ +NOTICE: RuntimeAppend, RuntimeMergeAppend and PartitionFilter nodes and some other options have been disabled +CREATE EXTENSION pg_pathman; +SHOW pg_pathman.enable; + pg_pathman.enable +------------------- + off +(1 row) + +SELECT add_to_pathman_config('calamity.survivor', 'val', '10'); /* not ok */ +ERROR: pg_pathman is disabled +SELECT * FROM pathman_partition_list; /* not ok */ +ERROR: pg_pathman is not initialized yet +SELECT get_part_range('calamity.survivor', 0, NULL::INT); /* not ok */ +ERROR: pg_pathman is disabled +EXPLAIN (COSTS OFF) SELECT * FROM calamity.survivor; /* OK */ + QUERY PLAN +----------------------------------------- + Append + -> Seq Scan on survivor survivor_1 + -> Seq Scan on survivor_1 survivor_2 + -> Seq Scan on survivor_2 survivor_3 +(4 rows) + +SET pg_pathman.enable = t; /* LOAD CONFIG */ +NOTICE: RuntimeAppend, RuntimeMergeAppend and PartitionFilter nodes and some other options have been enabled +SELECT add_to_pathman_config('calamity.survivor', 'val', '10'); /* OK */ + add_to_pathman_config +----------------------- + t +(1 row) + +SELECT * FROM pathman_partition_list; /* OK */ + parent | partition | parttype | expr | range_min | range_max +-------------------+---------------------+----------+------+-----------+----------- + calamity.survivor | calamity.survivor_1 | 2 | val | 1 | 11 + calamity.survivor | calamity.survivor_2 | 2 | val | 11 | 21 +(2 rows) + +SELECT get_part_range('calamity.survivor', 0, NULL::INT); /* OK */ + get_part_range +---------------- + {1,11} +(1 row) + +EXPLAIN (COSTS OFF) SELECT * FROM calamity.survivor; /* OK */ + QUERY PLAN +------------------------------ + Append + -> Seq Scan on survivor_1 + -> Seq Scan on survivor_2 +(3 rows) + +DROP TABLE calamity.survivor CASCADE; +NOTICE: drop cascades to 3 other objects +DROP SCHEMA calamity; +DROP EXTENSION pg_pathman; diff --git a/expected/pathman_callbacks.out b/expected/pathman_callbacks.out new file mode 100644 index 00000000..8427dae7 --- /dev/null +++ b/expected/pathman_callbacks.out @@ -0,0 +1,418 @@ +\set VERBOSITY terse +SET search_path = 'public'; +CREATE EXTENSION pg_pathman; +CREATE SCHEMA callbacks; +/* callback #1 */ +CREATE OR REPLACE FUNCTION callbacks.abc_on_part_created_callback(args JSONB) +RETURNS VOID AS $$ +BEGIN + RAISE WARNING 'callback arg: %', args::TEXT; +END +$$ language plpgsql; +/* callback #2 */ +CREATE OR REPLACE FUNCTION public.dummy_cb(args JSONB) +RETURNS VOID AS $$ +BEGIN +END +$$ language plpgsql; +CREATE TABLE callbacks.abc(a serial, b int); +SELECT create_range_partitions('callbacks.abc', 'a', 1, 100, 2); + create_range_partitions +------------------------- + 2 +(1 row) + +SELECT set_init_callback('callbacks.abc', 'public.dummy_cb(jsonb)'); + set_init_callback +------------------- + +(1 row) + +/* check that callback is schema-qualified */ +SELECT init_callback FROM pathman_config_params +WHERE partrel = 'callbacks.abc'::REGCLASS; + init_callback +------------------------ + public.dummy_cb(jsonb) +(1 row) + +/* reset callback */ +SELECT set_init_callback('callbacks.abc'); + set_init_callback +------------------- + +(1 row) + +/* should return NULL */ +SELECT init_callback FROM pathman_config_params +WHERE partrel = 'callbacks.abc'::REGCLASS; + init_callback +--------------- + +(1 row) + +SELECT set_init_callback('callbacks.abc', + 'callbacks.abc_on_part_created_callback(jsonb)'); + set_init_callback +------------------- + +(1 row) + +/* check that callback is schema-qualified */ +SELECT init_callback FROM pathman_config_params +WHERE partrel = 'callbacks.abc'::REGCLASS; + init_callback +----------------------------------------------- + callbacks.abc_on_part_created_callback(jsonb) +(1 row) + +DROP TABLE callbacks.abc CASCADE; +NOTICE: drop cascades to 3 other objects +/* set callback to be called on RANGE partitions */ +CREATE TABLE callbacks.abc(a serial, b int); +SELECT create_range_partitions('callbacks.abc', 'a', 1, 100, 2); + create_range_partitions +------------------------- + 2 +(1 row) + +SELECT set_init_callback('callbacks.abc', + 'callbacks.abc_on_part_created_callback(jsonb)'); + set_init_callback +------------------- + +(1 row) + +INSERT INTO callbacks.abc VALUES (123, 1); +INSERT INTO callbacks.abc VALUES (223, 1); /* show warning */ +WARNING: callback arg: {"parent": "abc", "parttype": "2", "partition": "abc_3", "range_max": "301", "range_min": "201", "parent_schema": "callbacks", "partition_schema": "callbacks"} +SELECT set_spawn_using_bgw('callbacks.abc', true); + set_spawn_using_bgw +--------------------- + +(1 row) + +SELECT get_number_of_partitions('callbacks.abc'); + get_number_of_partitions +-------------------------- + 3 +(1 row) + +INSERT INTO callbacks.abc VALUES (323, 1); +SELECT get_number_of_partitions('callbacks.abc'); /* +1 partition (created by BGW) */ + get_number_of_partitions +-------------------------- + 4 +(1 row) + +SELECT set_spawn_using_bgw('callbacks.abc', false); + set_spawn_using_bgw +--------------------- + +(1 row) + +SELECT append_range_partition('callbacks.abc'); +WARNING: callback arg: {"parent": "abc", "parttype": "2", "partition": "abc_5", "range_max": "501", "range_min": "401", "parent_schema": "callbacks", "partition_schema": "callbacks"} + append_range_partition +------------------------ + callbacks.abc_5 +(1 row) + +SELECT prepend_range_partition('callbacks.abc'); +WARNING: callback arg: {"parent": "abc", "parttype": "2", "partition": "abc_6", "range_max": "1", "range_min": "-99", "parent_schema": "callbacks", "partition_schema": "callbacks"} + prepend_range_partition +------------------------- + callbacks.abc_6 +(1 row) + +SELECT add_range_partition('callbacks.abc', 501, 602); +WARNING: callback arg: {"parent": "abc", "parttype": "2", "partition": "abc_7", "range_max": "602", "range_min": "501", "parent_schema": "callbacks", "partition_schema": "callbacks"} + add_range_partition +--------------------- + callbacks.abc_7 +(1 row) + +SELECT drop_partitions('callbacks.abc'); +NOTICE: 0 rows copied from callbacks.abc_1 +NOTICE: 1 rows copied from callbacks.abc_2 +NOTICE: 1 rows copied from callbacks.abc_3 +NOTICE: 1 rows copied from callbacks.abc_4 +NOTICE: 0 rows copied from callbacks.abc_5 +NOTICE: 0 rows copied from callbacks.abc_6 +NOTICE: 0 rows copied from callbacks.abc_7 + drop_partitions +----------------- + 7 +(1 row) + +/* set callback to be called on HASH partitions */ +SELECT set_init_callback('callbacks.abc', + 'callbacks.abc_on_part_created_callback(jsonb)'); + set_init_callback +------------------- + +(1 row) + +SELECT create_hash_partitions('callbacks.abc', 'a', 5); +WARNING: callback arg: {"parent": "abc", "parttype": "1", "partition": "abc_0", "parent_schema": "callbacks", "partition_schema": "callbacks"} +WARNING: callback arg: {"parent": "abc", "parttype": "1", "partition": "abc_1", "parent_schema": "callbacks", "partition_schema": "callbacks"} +WARNING: callback arg: {"parent": "abc", "parttype": "1", "partition": "abc_2", "parent_schema": "callbacks", "partition_schema": "callbacks"} +WARNING: callback arg: {"parent": "abc", "parttype": "1", "partition": "abc_3", "parent_schema": "callbacks", "partition_schema": "callbacks"} +WARNING: callback arg: {"parent": "abc", "parttype": "1", "partition": "abc_4", "parent_schema": "callbacks", "partition_schema": "callbacks"} + create_hash_partitions +------------------------ + 5 +(1 row) + +DROP TABLE callbacks.abc CASCADE; +NOTICE: drop cascades to 5 other objects +/* test the temprary deletion of callback function */ +CREATE TABLE callbacks.abc(a serial, b int); +SELECT set_init_callback('callbacks.abc', + 'callbacks.abc_on_part_created_callback(jsonb)'); + set_init_callback +------------------- + +(1 row) + +SELECT create_range_partitions('callbacks.abc', 'a', 1, 100, 2); +WARNING: callback arg: {"parent": "abc", "parttype": "2", "partition": "abc_1", "range_max": "101", "range_min": "1", "parent_schema": "callbacks", "partition_schema": "callbacks"} +WARNING: callback arg: {"parent": "abc", "parttype": "2", "partition": "abc_2", "range_max": "201", "range_min": "101", "parent_schema": "callbacks", "partition_schema": "callbacks"} + create_range_partitions +------------------------- + 2 +(1 row) + +INSERT INTO callbacks.abc VALUES (201, 0); /* +1 new partition */ +WARNING: callback arg: {"parent": "abc", "parttype": "2", "partition": "abc_3", "range_max": "301", "range_min": "201", "parent_schema": "callbacks", "partition_schema": "callbacks"} +BEGIN; +DROP FUNCTION callbacks.abc_on_part_created_callback(jsonb); +INSERT INTO callbacks.abc VALUES (301, 0); /* +0 new partitions (ERROR) */ +ERROR: callback function "callbacks.abc_on_part_created_callback(jsonb)" does not exist +ROLLBACK; +INSERT INTO callbacks.abc VALUES (301, 0); /* +1 new partition */ +WARNING: callback arg: {"parent": "abc", "parttype": "2", "partition": "abc_5", "range_max": "401", "range_min": "301", "parent_schema": "callbacks", "partition_schema": "callbacks"} +DROP TABLE callbacks.abc CASCADE; +NOTICE: drop cascades to 5 other objects +/* more complex test using rotation of tables */ +CREATE TABLE callbacks.abc(a INT4 NOT NULL); +INSERT INTO callbacks.abc + SELECT a FROM generate_series(1, 100) a; +SELECT create_range_partitions('callbacks.abc', 'a', 1, 10, 10); + create_range_partitions +------------------------- + 10 +(1 row) + +CREATE OR REPLACE FUNCTION callbacks.rotation_callback(params jsonb) +RETURNS VOID AS +$$ +DECLARE + relation regclass; + parent_rel regclass; +BEGIN + parent_rel := concat(params->>'partition_schema', '.', params->>'parent')::regclass; + + -- drop "old" partitions + FOR relation IN (SELECT partition FROM + (SELECT partition, range_min::INT4 FROM pathman_partition_list + WHERE parent = parent_rel + ORDER BY range_min::INT4 DESC + OFFSET 4) t -- remain 4 last partitions + ORDER BY range_min) + LOOP + RAISE NOTICE 'dropping partition %', relation; + PERFORM drop_range_partition(relation); + END LOOP; +END +$$ LANGUAGE plpgsql; +SELECT * FROM pathman_partition_list +WHERE parent = 'callbacks.abc'::REGCLASS +ORDER BY range_min::INT4; + parent | partition | parttype | expr | range_min | range_max +---------------+------------------+----------+------+-----------+----------- + callbacks.abc | callbacks.abc_1 | 2 | a | 1 | 11 + callbacks.abc | callbacks.abc_2 | 2 | a | 11 | 21 + callbacks.abc | callbacks.abc_3 | 2 | a | 21 | 31 + callbacks.abc | callbacks.abc_4 | 2 | a | 31 | 41 + callbacks.abc | callbacks.abc_5 | 2 | a | 41 | 51 + callbacks.abc | callbacks.abc_6 | 2 | a | 51 | 61 + callbacks.abc | callbacks.abc_7 | 2 | a | 61 | 71 + callbacks.abc | callbacks.abc_8 | 2 | a | 71 | 81 + callbacks.abc | callbacks.abc_9 | 2 | a | 81 | 91 + callbacks.abc | callbacks.abc_10 | 2 | a | 91 | 101 +(10 rows) + +SELECT set_init_callback('callbacks.abc', + 'callbacks.rotation_callback(jsonb)'); + set_init_callback +------------------- + +(1 row) + +INSERT INTO callbacks.abc VALUES (1000); +NOTICE: dropping partition callbacks.abc_1 +NOTICE: dropping partition callbacks.abc_2 +NOTICE: dropping partition callbacks.abc_3 +NOTICE: dropping partition callbacks.abc_4 +NOTICE: dropping partition callbacks.abc_5 +NOTICE: dropping partition callbacks.abc_6 +NOTICE: dropping partition callbacks.abc_7 +NOTICE: dropping partition callbacks.abc_8 +NOTICE: dropping partition callbacks.abc_9 +NOTICE: dropping partition callbacks.abc_10 +NOTICE: dropping partition callbacks.abc_11 +NOTICE: dropping partition callbacks.abc_12 +NOTICE: dropping partition callbacks.abc_13 +NOTICE: dropping partition callbacks.abc_14 +NOTICE: dropping partition callbacks.abc_15 +NOTICE: dropping partition callbacks.abc_16 +NOTICE: dropping partition callbacks.abc_17 +NOTICE: dropping partition callbacks.abc_18 +NOTICE: dropping partition callbacks.abc_19 +NOTICE: dropping partition callbacks.abc_20 +NOTICE: dropping partition callbacks.abc_21 +NOTICE: dropping partition callbacks.abc_22 +NOTICE: dropping partition callbacks.abc_23 +NOTICE: dropping partition callbacks.abc_24 +NOTICE: dropping partition callbacks.abc_25 +NOTICE: dropping partition callbacks.abc_26 +NOTICE: dropping partition callbacks.abc_27 +NOTICE: dropping partition callbacks.abc_28 +NOTICE: dropping partition callbacks.abc_29 +NOTICE: dropping partition callbacks.abc_30 +NOTICE: dropping partition callbacks.abc_31 +NOTICE: dropping partition callbacks.abc_32 +NOTICE: dropping partition callbacks.abc_33 +NOTICE: dropping partition callbacks.abc_34 +NOTICE: dropping partition callbacks.abc_35 +NOTICE: dropping partition callbacks.abc_36 +NOTICE: dropping partition callbacks.abc_37 +NOTICE: dropping partition callbacks.abc_38 +NOTICE: dropping partition callbacks.abc_39 +NOTICE: dropping partition callbacks.abc_40 +NOTICE: dropping partition callbacks.abc_41 +NOTICE: dropping partition callbacks.abc_42 +NOTICE: dropping partition callbacks.abc_43 +NOTICE: dropping partition callbacks.abc_44 +NOTICE: dropping partition callbacks.abc_45 +NOTICE: dropping partition callbacks.abc_46 +NOTICE: dropping partition callbacks.abc_47 +NOTICE: dropping partition callbacks.abc_48 +NOTICE: dropping partition callbacks.abc_49 +NOTICE: dropping partition callbacks.abc_50 +NOTICE: dropping partition callbacks.abc_51 +NOTICE: dropping partition callbacks.abc_52 +NOTICE: dropping partition callbacks.abc_53 +NOTICE: dropping partition callbacks.abc_54 +NOTICE: dropping partition callbacks.abc_55 +NOTICE: dropping partition callbacks.abc_56 +NOTICE: dropping partition callbacks.abc_57 +NOTICE: dropping partition callbacks.abc_58 +NOTICE: dropping partition callbacks.abc_59 +NOTICE: dropping partition callbacks.abc_60 +NOTICE: dropping partition callbacks.abc_61 +NOTICE: dropping partition callbacks.abc_62 +NOTICE: dropping partition callbacks.abc_63 +NOTICE: dropping partition callbacks.abc_64 +NOTICE: dropping partition callbacks.abc_65 +NOTICE: dropping partition callbacks.abc_66 +NOTICE: dropping partition callbacks.abc_67 +NOTICE: dropping partition callbacks.abc_68 +NOTICE: dropping partition callbacks.abc_69 +NOTICE: dropping partition callbacks.abc_70 +NOTICE: dropping partition callbacks.abc_71 +NOTICE: dropping partition callbacks.abc_72 +NOTICE: dropping partition callbacks.abc_73 +NOTICE: dropping partition callbacks.abc_74 +NOTICE: dropping partition callbacks.abc_75 +NOTICE: dropping partition callbacks.abc_76 +NOTICE: dropping partition callbacks.abc_77 +NOTICE: dropping partition callbacks.abc_78 +NOTICE: dropping partition callbacks.abc_79 +NOTICE: dropping partition callbacks.abc_80 +NOTICE: dropping partition callbacks.abc_81 +NOTICE: dropping partition callbacks.abc_82 +NOTICE: dropping partition callbacks.abc_83 +NOTICE: dropping partition callbacks.abc_84 +NOTICE: dropping partition callbacks.abc_85 +NOTICE: dropping partition callbacks.abc_86 +NOTICE: dropping partition callbacks.abc_87 +NOTICE: dropping partition callbacks.abc_88 +NOTICE: dropping partition callbacks.abc_89 +NOTICE: dropping partition callbacks.abc_90 +NOTICE: dropping partition callbacks.abc_91 +NOTICE: dropping partition callbacks.abc_92 +NOTICE: dropping partition callbacks.abc_93 +NOTICE: dropping partition callbacks.abc_94 +NOTICE: dropping partition callbacks.abc_95 +NOTICE: dropping partition callbacks.abc_96 +INSERT INTO callbacks.abc VALUES (1500); +NOTICE: dropping partition callbacks.abc_97 +NOTICE: dropping partition callbacks.abc_98 +NOTICE: dropping partition callbacks.abc_99 +NOTICE: dropping partition callbacks.abc_100 +NOTICE: dropping partition callbacks.abc_101 +NOTICE: dropping partition callbacks.abc_102 +NOTICE: dropping partition callbacks.abc_103 +NOTICE: dropping partition callbacks.abc_104 +NOTICE: dropping partition callbacks.abc_105 +NOTICE: dropping partition callbacks.abc_106 +NOTICE: dropping partition callbacks.abc_107 +NOTICE: dropping partition callbacks.abc_108 +NOTICE: dropping partition callbacks.abc_109 +NOTICE: dropping partition callbacks.abc_110 +NOTICE: dropping partition callbacks.abc_111 +NOTICE: dropping partition callbacks.abc_112 +NOTICE: dropping partition callbacks.abc_113 +NOTICE: dropping partition callbacks.abc_114 +NOTICE: dropping partition callbacks.abc_115 +NOTICE: dropping partition callbacks.abc_116 +NOTICE: dropping partition callbacks.abc_117 +NOTICE: dropping partition callbacks.abc_118 +NOTICE: dropping partition callbacks.abc_119 +NOTICE: dropping partition callbacks.abc_120 +NOTICE: dropping partition callbacks.abc_121 +NOTICE: dropping partition callbacks.abc_122 +NOTICE: dropping partition callbacks.abc_123 +NOTICE: dropping partition callbacks.abc_124 +NOTICE: dropping partition callbacks.abc_125 +NOTICE: dropping partition callbacks.abc_126 +NOTICE: dropping partition callbacks.abc_127 +NOTICE: dropping partition callbacks.abc_128 +NOTICE: dropping partition callbacks.abc_129 +NOTICE: dropping partition callbacks.abc_130 +NOTICE: dropping partition callbacks.abc_131 +NOTICE: dropping partition callbacks.abc_132 +NOTICE: dropping partition callbacks.abc_133 +NOTICE: dropping partition callbacks.abc_134 +NOTICE: dropping partition callbacks.abc_135 +NOTICE: dropping partition callbacks.abc_136 +NOTICE: dropping partition callbacks.abc_137 +NOTICE: dropping partition callbacks.abc_138 +NOTICE: dropping partition callbacks.abc_139 +NOTICE: dropping partition callbacks.abc_140 +NOTICE: dropping partition callbacks.abc_141 +NOTICE: dropping partition callbacks.abc_142 +NOTICE: dropping partition callbacks.abc_143 +NOTICE: dropping partition callbacks.abc_144 +NOTICE: dropping partition callbacks.abc_145 +NOTICE: dropping partition callbacks.abc_146 +SELECT * FROM pathman_partition_list +WHERE parent = 'callbacks.abc'::REGCLASS +ORDER BY range_min::INT4; + parent | partition | parttype | expr | range_min | range_max +---------------+-------------------+----------+------+-----------+----------- + callbacks.abc | callbacks.abc_147 | 2 | a | 1461 | 1471 + callbacks.abc | callbacks.abc_148 | 2 | a | 1471 | 1481 + callbacks.abc | callbacks.abc_149 | 2 | a | 1481 | 1491 + callbacks.abc | callbacks.abc_150 | 2 | a | 1491 | 1501 +(4 rows) + +DROP TABLE callbacks.abc CASCADE; +NOTICE: drop cascades to 5 other objects +DROP FUNCTION callbacks.abc_on_part_created_callback(jsonb); +DROP FUNCTION public.dummy_cb(jsonb); +DROP FUNCTION callbacks.rotation_callback(jsonb); +DROP SCHEMA callbacks; +DROP EXTENSION pg_pathman CASCADE; diff --git a/tests/__init__.py b/expected/pathman_check.out similarity index 100% rename from tests/__init__.py rename to expected/pathman_check.out diff --git a/expected/pathman_column_type.out b/expected/pathman_column_type.out new file mode 100644 index 00000000..c77acbb2 --- /dev/null +++ b/expected/pathman_column_type.out @@ -0,0 +1,203 @@ +/* + * In 9ce77d75c5a (>= 13) struct Var was changed, which caused the output + * of get_partition_cooked_key to change. + */ +\set VERBOSITY terse +SET search_path = 'public'; +CREATE EXTENSION pg_pathman; +CREATE SCHEMA test_column_type; +/* + * RANGE partitioning. + */ +/* create new table (val int) */ +CREATE TABLE test_column_type.test(val INT4 NOT NULL); +SELECT create_range_partitions('test_column_type.test', 'val', 1, 10, 10); + create_range_partitions +------------------------- + 10 +(1 row) + +/* make sure that bounds and dispatch info has been cached */ +SELECT * FROM test_column_type.test; + val +----- +(0 rows) + +SELECT context, entries FROM pathman_cache_stats + WHERE context != 'partition status cache' ORDER BY context; + context | entries +-------------------------+--------- + maintenance | 0 + partition bounds cache | 10 + partition parents cache | 10 +(3 rows) + +/* + * Get parsed and analyzed expression. + */ +CREATE FUNCTION get_cached_partition_cooked_key(REGCLASS) +RETURNS TEXT AS 'pg_pathman', 'get_cached_partition_cooked_key_pl' +LANGUAGE C STRICT; +SELECT get_partition_cooked_key('test_column_type.test'::REGCLASS); + get_partition_cooked_key +----------------------------------------------------------------------------------------------------------------------- + {VAR :varno 1 :varattno 1 :vartype 23 :vartypmod -1 :varcollid 0 :varlevelsup 0 :varnoold 1 :varoattno 1 :location 8} +(1 row) + +SELECT get_cached_partition_cooked_key('test_column_type.test'::REGCLASS); + get_cached_partition_cooked_key +----------------------------------------------------------------------------------------------------------------------- + {VAR :varno 1 :varattno 1 :vartype 23 :vartypmod -1 :varcollid 0 :varlevelsup 0 :varnoold 1 :varoattno 1 :location 8} +(1 row) + +SELECT get_partition_key_type('test_column_type.test'::REGCLASS); + get_partition_key_type +------------------------ + integer +(1 row) + +/* change column's type (should also flush caches) */ +ALTER TABLE test_column_type.test ALTER val TYPE NUMERIC; +/* check that correct expression has been built */ +SELECT get_partition_key_type('test_column_type.test'::REGCLASS); + get_partition_key_type +------------------------ + numeric +(1 row) + +SELECT get_partition_cooked_key('test_column_type.test'::REGCLASS); + get_partition_cooked_key +------------------------------------------------------------------------------------------------------------------------- + {VAR :varno 1 :varattno 1 :vartype 1700 :vartypmod -1 :varcollid 0 :varlevelsup 0 :varnoold 1 :varoattno 1 :location 8} +(1 row) + +SELECT get_cached_partition_cooked_key('test_column_type.test'::REGCLASS); + get_cached_partition_cooked_key +------------------------------------------------------------------------------------------------------------------------- + {VAR :varno 1 :varattno 1 :vartype 1700 :vartypmod -1 :varcollid 0 :varlevelsup 0 :varnoold 1 :varoattno 1 :location 8} +(1 row) + +DROP FUNCTION get_cached_partition_cooked_key(REGCLASS); +/* make sure that everything works properly */ +SELECT * FROM test_column_type.test; + val +----- +(0 rows) + +SELECT context, entries FROM pathman_cache_stats + WHERE context != 'partition status cache' ORDER BY context; + context | entries +-------------------------+--------- + maintenance | 0 + partition bounds cache | 10 + partition parents cache | 10 +(3 rows) + +/* check insert dispatching */ +INSERT INTO test_column_type.test VALUES (1); +SELECT tableoid::regclass, * FROM test_column_type.test; + tableoid | val +-------------------------+----- + test_column_type.test_1 | 1 +(1 row) + +SELECT drop_partitions('test_column_type.test'); +NOTICE: 1 rows copied from test_column_type.test_1 +NOTICE: 0 rows copied from test_column_type.test_2 +NOTICE: 0 rows copied from test_column_type.test_3 +NOTICE: 0 rows copied from test_column_type.test_4 +NOTICE: 0 rows copied from test_column_type.test_5 +NOTICE: 0 rows copied from test_column_type.test_6 +NOTICE: 0 rows copied from test_column_type.test_7 +NOTICE: 0 rows copied from test_column_type.test_8 +NOTICE: 0 rows copied from test_column_type.test_9 +NOTICE: 0 rows copied from test_column_type.test_10 + drop_partitions +----------------- + 10 +(1 row) + +DROP TABLE test_column_type.test CASCADE; +/* + * HASH partitioning. + */ +/* create new table (id int, val int) */ +CREATE TABLE test_column_type.test(id INT4 NOT NULL, val INT4); +SELECT create_hash_partitions('test_column_type.test', 'id', 5); + create_hash_partitions +------------------------ + 5 +(1 row) + +/* make sure that bounds and dispatch info has been cached */ +SELECT * FROM test_column_type.test; + id | val +----+----- +(0 rows) + +SELECT context, entries FROM pathman_cache_stats + WHERE context != 'partition status cache' ORDER BY context; + context | entries +-------------------------+--------- + maintenance | 0 + partition bounds cache | 5 + partition parents cache | 5 +(3 rows) + +/* change column's type (should NOT work) */ +ALTER TABLE test_column_type.test ALTER id TYPE NUMERIC; +ERROR: cannot change type of column "id" of table "test" partitioned by HASH +/* make sure that everything works properly */ +SELECT * FROM test_column_type.test; + id | val +----+----- +(0 rows) + +SELECT context, entries FROM pathman_cache_stats + WHERE context != 'partition status cache' ORDER BY context; + context | entries +-------------------------+--------- + maintenance | 0 + partition bounds cache | 5 + partition parents cache | 5 +(3 rows) + +/* change column's type (should flush caches) */ +ALTER TABLE test_column_type.test ALTER val TYPE NUMERIC; +/* make sure that everything works properly */ +SELECT * FROM test_column_type.test; + id | val +----+----- +(0 rows) + +SELECT context, entries FROM pathman_cache_stats + WHERE context != 'partition status cache' ORDER BY context; + context | entries +-------------------------+--------- + maintenance | 0 + partition bounds cache | 5 + partition parents cache | 5 +(3 rows) + +/* check insert dispatching */ +INSERT INTO test_column_type.test VALUES (1); +SELECT tableoid::regclass, * FROM test_column_type.test; + tableoid | id | val +-------------------------+----+----- + test_column_type.test_0 | 1 | +(1 row) + +SELECT drop_partitions('test_column_type.test'); +NOTICE: 1 rows copied from test_column_type.test_0 +NOTICE: 0 rows copied from test_column_type.test_1 +NOTICE: 0 rows copied from test_column_type.test_2 +NOTICE: 0 rows copied from test_column_type.test_3 +NOTICE: 0 rows copied from test_column_type.test_4 + drop_partitions +----------------- + 5 +(1 row) + +DROP TABLE test_column_type.test CASCADE; +DROP SCHEMA test_column_type; +DROP EXTENSION pg_pathman; diff --git a/expected/pathman_column_type_1.out b/expected/pathman_column_type_1.out new file mode 100644 index 00000000..06b61387 --- /dev/null +++ b/expected/pathman_column_type_1.out @@ -0,0 +1,203 @@ +/* + * In 9ce77d75c5a (>= 13) struct Var was changed, which caused the output + * of get_partition_cooked_key to change. + */ +\set VERBOSITY terse +SET search_path = 'public'; +CREATE EXTENSION pg_pathman; +CREATE SCHEMA test_column_type; +/* + * RANGE partitioning. + */ +/* create new table (val int) */ +CREATE TABLE test_column_type.test(val INT4 NOT NULL); +SELECT create_range_partitions('test_column_type.test', 'val', 1, 10, 10); + create_range_partitions +------------------------- + 10 +(1 row) + +/* make sure that bounds and dispatch info has been cached */ +SELECT * FROM test_column_type.test; + val +----- +(0 rows) + +SELECT context, entries FROM pathman_cache_stats + WHERE context != 'partition status cache' ORDER BY context; + context | entries +-------------------------+--------- + maintenance | 0 + partition bounds cache | 10 + partition parents cache | 10 +(3 rows) + +/* + * Get parsed and analyzed expression. + */ +CREATE FUNCTION get_cached_partition_cooked_key(REGCLASS) +RETURNS TEXT AS 'pg_pathman', 'get_cached_partition_cooked_key_pl' +LANGUAGE C STRICT; +SELECT get_partition_cooked_key('test_column_type.test'::REGCLASS); + get_partition_cooked_key +------------------------------------------------------------------------------------------------------------------------- + {VAR :varno 1 :varattno 1 :vartype 23 :vartypmod -1 :varcollid 0 :varlevelsup 0 :varnosyn 1 :varattnosyn 1 :location 8} +(1 row) + +SELECT get_cached_partition_cooked_key('test_column_type.test'::REGCLASS); + get_cached_partition_cooked_key +------------------------------------------------------------------------------------------------------------------------- + {VAR :varno 1 :varattno 1 :vartype 23 :vartypmod -1 :varcollid 0 :varlevelsup 0 :varnosyn 1 :varattnosyn 1 :location 8} +(1 row) + +SELECT get_partition_key_type('test_column_type.test'::REGCLASS); + get_partition_key_type +------------------------ + integer +(1 row) + +/* change column's type (should also flush caches) */ +ALTER TABLE test_column_type.test ALTER val TYPE NUMERIC; +/* check that correct expression has been built */ +SELECT get_partition_key_type('test_column_type.test'::REGCLASS); + get_partition_key_type +------------------------ + numeric +(1 row) + +SELECT get_partition_cooked_key('test_column_type.test'::REGCLASS); + get_partition_cooked_key +--------------------------------------------------------------------------------------------------------------------------- + {VAR :varno 1 :varattno 1 :vartype 1700 :vartypmod -1 :varcollid 0 :varlevelsup 0 :varnosyn 1 :varattnosyn 1 :location 8} +(1 row) + +SELECT get_cached_partition_cooked_key('test_column_type.test'::REGCLASS); + get_cached_partition_cooked_key +--------------------------------------------------------------------------------------------------------------------------- + {VAR :varno 1 :varattno 1 :vartype 1700 :vartypmod -1 :varcollid 0 :varlevelsup 0 :varnosyn 1 :varattnosyn 1 :location 8} +(1 row) + +DROP FUNCTION get_cached_partition_cooked_key(REGCLASS); +/* make sure that everything works properly */ +SELECT * FROM test_column_type.test; + val +----- +(0 rows) + +SELECT context, entries FROM pathman_cache_stats + WHERE context != 'partition status cache' ORDER BY context; + context | entries +-------------------------+--------- + maintenance | 0 + partition bounds cache | 10 + partition parents cache | 10 +(3 rows) + +/* check insert dispatching */ +INSERT INTO test_column_type.test VALUES (1); +SELECT tableoid::regclass, * FROM test_column_type.test; + tableoid | val +-------------------------+----- + test_column_type.test_1 | 1 +(1 row) + +SELECT drop_partitions('test_column_type.test'); +NOTICE: 1 rows copied from test_column_type.test_1 +NOTICE: 0 rows copied from test_column_type.test_2 +NOTICE: 0 rows copied from test_column_type.test_3 +NOTICE: 0 rows copied from test_column_type.test_4 +NOTICE: 0 rows copied from test_column_type.test_5 +NOTICE: 0 rows copied from test_column_type.test_6 +NOTICE: 0 rows copied from test_column_type.test_7 +NOTICE: 0 rows copied from test_column_type.test_8 +NOTICE: 0 rows copied from test_column_type.test_9 +NOTICE: 0 rows copied from test_column_type.test_10 + drop_partitions +----------------- + 10 +(1 row) + +DROP TABLE test_column_type.test CASCADE; +/* + * HASH partitioning. + */ +/* create new table (id int, val int) */ +CREATE TABLE test_column_type.test(id INT4 NOT NULL, val INT4); +SELECT create_hash_partitions('test_column_type.test', 'id', 5); + create_hash_partitions +------------------------ + 5 +(1 row) + +/* make sure that bounds and dispatch info has been cached */ +SELECT * FROM test_column_type.test; + id | val +----+----- +(0 rows) + +SELECT context, entries FROM pathman_cache_stats + WHERE context != 'partition status cache' ORDER BY context; + context | entries +-------------------------+--------- + maintenance | 0 + partition bounds cache | 5 + partition parents cache | 5 +(3 rows) + +/* change column's type (should NOT work) */ +ALTER TABLE test_column_type.test ALTER id TYPE NUMERIC; +ERROR: cannot change type of column "id" of table "test" partitioned by HASH +/* make sure that everything works properly */ +SELECT * FROM test_column_type.test; + id | val +----+----- +(0 rows) + +SELECT context, entries FROM pathman_cache_stats + WHERE context != 'partition status cache' ORDER BY context; + context | entries +-------------------------+--------- + maintenance | 0 + partition bounds cache | 5 + partition parents cache | 5 +(3 rows) + +/* change column's type (should flush caches) */ +ALTER TABLE test_column_type.test ALTER val TYPE NUMERIC; +/* make sure that everything works properly */ +SELECT * FROM test_column_type.test; + id | val +----+----- +(0 rows) + +SELECT context, entries FROM pathman_cache_stats + WHERE context != 'partition status cache' ORDER BY context; + context | entries +-------------------------+--------- + maintenance | 0 + partition bounds cache | 5 + partition parents cache | 5 +(3 rows) + +/* check insert dispatching */ +INSERT INTO test_column_type.test VALUES (1); +SELECT tableoid::regclass, * FROM test_column_type.test; + tableoid | id | val +-------------------------+----+----- + test_column_type.test_0 | 1 | +(1 row) + +SELECT drop_partitions('test_column_type.test'); +NOTICE: 1 rows copied from test_column_type.test_0 +NOTICE: 0 rows copied from test_column_type.test_1 +NOTICE: 0 rows copied from test_column_type.test_2 +NOTICE: 0 rows copied from test_column_type.test_3 +NOTICE: 0 rows copied from test_column_type.test_4 + drop_partitions +----------------- + 5 +(1 row) + +DROP TABLE test_column_type.test CASCADE; +DROP SCHEMA test_column_type; +DROP EXTENSION pg_pathman; diff --git a/expected/pathman_column_type_2.out b/expected/pathman_column_type_2.out new file mode 100644 index 00000000..0fbd0793 --- /dev/null +++ b/expected/pathman_column_type_2.out @@ -0,0 +1,203 @@ +/* + * In 9ce77d75c5a (>= 13) struct Var was changed, which caused the output + * of get_partition_cooked_key to change. + */ +\set VERBOSITY terse +SET search_path = 'public'; +CREATE EXTENSION pg_pathman; +CREATE SCHEMA test_column_type; +/* + * RANGE partitioning. + */ +/* create new table (val int) */ +CREATE TABLE test_column_type.test(val INT4 NOT NULL); +SELECT create_range_partitions('test_column_type.test', 'val', 1, 10, 10); + create_range_partitions +------------------------- + 10 +(1 row) + +/* make sure that bounds and dispatch info has been cached */ +SELECT * FROM test_column_type.test; + val +----- +(0 rows) + +SELECT context, entries FROM pathman_cache_stats + WHERE context != 'partition status cache' ORDER BY context; + context | entries +-------------------------+--------- + maintenance | 0 + partition bounds cache | 10 + partition parents cache | 10 +(3 rows) + +/* + * Get parsed and analyzed expression. + */ +CREATE FUNCTION get_cached_partition_cooked_key(REGCLASS) +RETURNS TEXT AS 'pg_pathman', 'get_cached_partition_cooked_key_pl' +LANGUAGE C STRICT; +SELECT get_partition_cooked_key('test_column_type.test'::REGCLASS); + get_partition_cooked_key +--------------------------------------------------------------------------------------------------------------------------------------------- + {VAR :varno 1 :varattno 1 :vartype 23 :vartypmod -1 :varcollid 0 :varnullingrels (b) :varlevelsup 0 :varnosyn 1 :varattnosyn 1 :location 8} +(1 row) + +SELECT get_cached_partition_cooked_key('test_column_type.test'::REGCLASS); + get_cached_partition_cooked_key +--------------------------------------------------------------------------------------------------------------------------------------------- + {VAR :varno 1 :varattno 1 :vartype 23 :vartypmod -1 :varcollid 0 :varnullingrels (b) :varlevelsup 0 :varnosyn 1 :varattnosyn 1 :location 8} +(1 row) + +SELECT get_partition_key_type('test_column_type.test'::REGCLASS); + get_partition_key_type +------------------------ + integer +(1 row) + +/* change column's type (should also flush caches) */ +ALTER TABLE test_column_type.test ALTER val TYPE NUMERIC; +/* check that correct expression has been built */ +SELECT get_partition_key_type('test_column_type.test'::REGCLASS); + get_partition_key_type +------------------------ + numeric +(1 row) + +SELECT get_partition_cooked_key('test_column_type.test'::REGCLASS); + get_partition_cooked_key +----------------------------------------------------------------------------------------------------------------------------------------------- + {VAR :varno 1 :varattno 1 :vartype 1700 :vartypmod -1 :varcollid 0 :varnullingrels (b) :varlevelsup 0 :varnosyn 1 :varattnosyn 1 :location 8} +(1 row) + +SELECT get_cached_partition_cooked_key('test_column_type.test'::REGCLASS); + get_cached_partition_cooked_key +----------------------------------------------------------------------------------------------------------------------------------------------- + {VAR :varno 1 :varattno 1 :vartype 1700 :vartypmod -1 :varcollid 0 :varnullingrels (b) :varlevelsup 0 :varnosyn 1 :varattnosyn 1 :location 8} +(1 row) + +DROP FUNCTION get_cached_partition_cooked_key(REGCLASS); +/* make sure that everything works properly */ +SELECT * FROM test_column_type.test; + val +----- +(0 rows) + +SELECT context, entries FROM pathman_cache_stats + WHERE context != 'partition status cache' ORDER BY context; + context | entries +-------------------------+--------- + maintenance | 0 + partition bounds cache | 10 + partition parents cache | 10 +(3 rows) + +/* check insert dispatching */ +INSERT INTO test_column_type.test VALUES (1); +SELECT tableoid::regclass, * FROM test_column_type.test; + tableoid | val +-------------------------+----- + test_column_type.test_1 | 1 +(1 row) + +SELECT drop_partitions('test_column_type.test'); +NOTICE: 1 rows copied from test_column_type.test_1 +NOTICE: 0 rows copied from test_column_type.test_2 +NOTICE: 0 rows copied from test_column_type.test_3 +NOTICE: 0 rows copied from test_column_type.test_4 +NOTICE: 0 rows copied from test_column_type.test_5 +NOTICE: 0 rows copied from test_column_type.test_6 +NOTICE: 0 rows copied from test_column_type.test_7 +NOTICE: 0 rows copied from test_column_type.test_8 +NOTICE: 0 rows copied from test_column_type.test_9 +NOTICE: 0 rows copied from test_column_type.test_10 + drop_partitions +----------------- + 10 +(1 row) + +DROP TABLE test_column_type.test CASCADE; +/* + * HASH partitioning. + */ +/* create new table (id int, val int) */ +CREATE TABLE test_column_type.test(id INT4 NOT NULL, val INT4); +SELECT create_hash_partitions('test_column_type.test', 'id', 5); + create_hash_partitions +------------------------ + 5 +(1 row) + +/* make sure that bounds and dispatch info has been cached */ +SELECT * FROM test_column_type.test; + id | val +----+----- +(0 rows) + +SELECT context, entries FROM pathman_cache_stats + WHERE context != 'partition status cache' ORDER BY context; + context | entries +-------------------------+--------- + maintenance | 0 + partition bounds cache | 5 + partition parents cache | 5 +(3 rows) + +/* change column's type (should NOT work) */ +ALTER TABLE test_column_type.test ALTER id TYPE NUMERIC; +ERROR: cannot change type of column "id" of table "test" partitioned by HASH +/* make sure that everything works properly */ +SELECT * FROM test_column_type.test; + id | val +----+----- +(0 rows) + +SELECT context, entries FROM pathman_cache_stats + WHERE context != 'partition status cache' ORDER BY context; + context | entries +-------------------------+--------- + maintenance | 0 + partition bounds cache | 5 + partition parents cache | 5 +(3 rows) + +/* change column's type (should flush caches) */ +ALTER TABLE test_column_type.test ALTER val TYPE NUMERIC; +/* make sure that everything works properly */ +SELECT * FROM test_column_type.test; + id | val +----+----- +(0 rows) + +SELECT context, entries FROM pathman_cache_stats + WHERE context != 'partition status cache' ORDER BY context; + context | entries +-------------------------+--------- + maintenance | 0 + partition bounds cache | 5 + partition parents cache | 5 +(3 rows) + +/* check insert dispatching */ +INSERT INTO test_column_type.test VALUES (1); +SELECT tableoid::regclass, * FROM test_column_type.test; + tableoid | id | val +-------------------------+----+----- + test_column_type.test_0 | 1 | +(1 row) + +SELECT drop_partitions('test_column_type.test'); +NOTICE: 1 rows copied from test_column_type.test_0 +NOTICE: 0 rows copied from test_column_type.test_1 +NOTICE: 0 rows copied from test_column_type.test_2 +NOTICE: 0 rows copied from test_column_type.test_3 +NOTICE: 0 rows copied from test_column_type.test_4 + drop_partitions +----------------- + 5 +(1 row) + +DROP TABLE test_column_type.test CASCADE; +DROP SCHEMA test_column_type; +DROP EXTENSION pg_pathman; diff --git a/expected/pathman_cte.out b/expected/pathman_cte.out new file mode 100644 index 00000000..33821ac0 --- /dev/null +++ b/expected/pathman_cte.out @@ -0,0 +1,277 @@ +/* + * Test simple CTE queries. + * Since 12 (608b167f9f), CTEs which are scanned once are no longer an + * optimization fence, which changes practically all plans here. There is + * an option to forcibly make them MATERIALIZED, but we also need to run tests + * on older versions, so create pathman_cte_1.out instead. + */ +\set VERBOSITY terse +SET search_path = 'public'; +CREATE EXTENSION pg_pathman; +CREATE SCHEMA test_cte; +CREATE TABLE test_cte.range_rel ( + id INT4, + dt TIMESTAMP NOT NULL, + txt TEXT); +INSERT INTO test_cte.range_rel (dt, txt) +SELECT g, md5(g::TEXT) +FROM generate_series('2015-01-01', '2015-04-30', '1 day'::interval) AS g; +SELECT create_range_partitions('test_cte.range_rel', 'DT', '2015-01-01'::DATE, '1 month'::INTERVAL); + create_range_partitions +------------------------- + 4 +(1 row) + +/* perform a query */ +EXPLAIN (COSTS OFF) + WITH ttt AS (SELECT * FROM test_cte.range_rel WHERE dt >= '2015-02-01' AND dt < '2015-03-15') +SELECT * FROM ttt; + QUERY PLAN +---------------------------------------------------------------------------------------- + CTE Scan on ttt + CTE ttt + -> Append + -> Seq Scan on range_rel_2 + -> Seq Scan on range_rel_3 + Filter: (dt < 'Sun Mar 15 00:00:00 2015'::timestamp without time zone) +(6 rows) + +DROP TABLE test_cte.range_rel CASCADE; +NOTICE: drop cascades to 5 other objects +CREATE TABLE test_cte.hash_rel ( + id INT4, + value INTEGER NOT NULL); +INSERT INTO test_cte.hash_rel VALUES (1, 1); +INSERT INTO test_cte.hash_rel VALUES (2, 2); +INSERT INTO test_cte.hash_rel VALUES (3, 3); +SELECT create_hash_partitions('test_cte.hash_rel', 'value', 3); + create_hash_partitions +------------------------ + 3 +(1 row) + +/* perform a query */ +EXPLAIN (COSTS OFF) + WITH ttt AS (SELECT * FROM test_cte.hash_rel WHERE value = 2) +SELECT * FROM ttt; + QUERY PLAN +-------------------------------------- + CTE Scan on ttt + CTE ttt + -> Append + -> Seq Scan on hash_rel_1 + Filter: (value = 2) +(5 rows) + +DROP TABLE test_cte.hash_rel CASCADE; +NOTICE: drop cascades to 3 other objects +/* + * Test CTE query - by @parihaaraka (add varno to WalkerContext) + */ +CREATE TABLE test_cte.cte_del_xacts (id BIGSERIAL PRIMARY KEY, pdate DATE NOT NULL); +INSERT INTO test_cte.cte_del_xacts (pdate) +SELECT gen_date +FROM generate_series('2016-01-01'::date, '2016-04-9'::date, '1 day') AS gen_date; +CREATE TABLE test_cte.cte_del_xacts_specdata +( + tid BIGINT PRIMARY KEY, + test_mode SMALLINT, + state_code SMALLINT NOT NULL DEFAULT 8, + regtime TIMESTAMP WITHOUT TIME ZONE NOT NULL +); +INSERT INTO test_cte.cte_del_xacts_specdata VALUES (1, 1, 1, current_timestamp); /* for subquery test */ +/* create 2 partitions */ +SELECT create_range_partitions('test_cte.cte_del_xacts'::regclass, 'pdate', + '2016-01-01'::date, '50 days'::interval); + create_range_partitions +------------------------- + 2 +(1 row) + +EXPLAIN (COSTS OFF) +WITH tmp AS ( + SELECT tid, test_mode, regtime::DATE AS pdate, state_code + FROM test_cte.cte_del_xacts_specdata) +DELETE FROM test_cte.cte_del_xacts t USING tmp +WHERE t.id = tmp.tid AND t.pdate = tmp.pdate AND tmp.test_mode > 0; + QUERY PLAN +--------------------------------------------------------------------- + Delete on cte_del_xacts t + Delete on cte_del_xacts t + Delete on cte_del_xacts_1 t_1 + Delete on cte_del_xacts_2 t_2 + CTE tmp + -> Seq Scan on cte_del_xacts_specdata + -> Hash Join + Hash Cond: ((tmp.tid = t.id) AND (tmp.pdate = t.pdate)) + -> CTE Scan on tmp + Filter: (test_mode > 0) + -> Hash + -> Seq Scan on cte_del_xacts t + -> Hash Join + Hash Cond: ((tmp.tid = t_1.id) AND (tmp.pdate = t_1.pdate)) + -> CTE Scan on tmp + Filter: (test_mode > 0) + -> Hash + -> Seq Scan on cte_del_xacts_1 t_1 + -> Hash Join + Hash Cond: ((tmp.tid = t_2.id) AND (tmp.pdate = t_2.pdate)) + -> CTE Scan on tmp + Filter: (test_mode > 0) + -> Hash + -> Seq Scan on cte_del_xacts_2 t_2 +(24 rows) + +SELECT drop_partitions('test_cte.cte_del_xacts'); /* now drop partitions */ +NOTICE: 50 rows copied from test_cte.cte_del_xacts_1 +NOTICE: 50 rows copied from test_cte.cte_del_xacts_2 + drop_partitions +----------------- + 2 +(1 row) + +/* create 1 partition */ +SELECT create_range_partitions('test_cte.cte_del_xacts'::regclass, 'pdate', + '2016-01-01'::date, '1 year'::interval); + create_range_partitions +------------------------- + 1 +(1 row) + +/* parent enabled! */ +SELECT set_enable_parent('test_cte.cte_del_xacts', true); + set_enable_parent +------------------- + +(1 row) + +EXPLAIN (COSTS OFF) +WITH tmp AS ( + SELECT tid, test_mode, regtime::DATE AS pdate, state_code + FROM test_cte.cte_del_xacts_specdata) +DELETE FROM test_cte.cte_del_xacts t USING tmp +WHERE t.id = tmp.tid AND t.pdate = tmp.pdate AND tmp.test_mode > 0; + QUERY PLAN +--------------------------------------------------------------------- + Delete on cte_del_xacts t + Delete on cte_del_xacts t + Delete on cte_del_xacts_1 t_1 + CTE tmp + -> Seq Scan on cte_del_xacts_specdata + -> Hash Join + Hash Cond: ((tmp.tid = t.id) AND (tmp.pdate = t.pdate)) + -> CTE Scan on tmp + Filter: (test_mode > 0) + -> Hash + -> Seq Scan on cte_del_xacts t + -> Hash Join + Hash Cond: ((tmp.tid = t_1.id) AND (tmp.pdate = t_1.pdate)) + -> CTE Scan on tmp + Filter: (test_mode > 0) + -> Hash + -> Seq Scan on cte_del_xacts_1 t_1 +(17 rows) + +/* parent disabled! */ +SELECT set_enable_parent('test_cte.cte_del_xacts', false); + set_enable_parent +------------------- + +(1 row) + +EXPLAIN (COSTS OFF) +WITH tmp AS ( + SELECT tid, test_mode, regtime::DATE AS pdate, state_code + FROM test_cte.cte_del_xacts_specdata) +DELETE FROM test_cte.cte_del_xacts t USING tmp +WHERE t.id = tmp.tid AND t.pdate = tmp.pdate AND tmp.test_mode > 0; + QUERY PLAN +----------------------------------------------------------------- + Delete on cte_del_xacts_1 t + CTE tmp + -> Seq Scan on cte_del_xacts_specdata + -> Hash Join + Hash Cond: ((tmp.tid = t.id) AND (tmp.pdate = t.pdate)) + -> CTE Scan on tmp + Filter: (test_mode > 0) + -> Hash + -> Seq Scan on cte_del_xacts_1 t +(9 rows) + +/* create stub pl/PgSQL function */ +CREATE OR REPLACE FUNCTION test_cte.cte_del_xacts_stab(name TEXT) +RETURNS smallint AS +$$ +begin + return 2::smallint; +end +$$ +LANGUAGE plpgsql STABLE; +/* test subquery planning */ +WITH tmp AS ( + SELECT tid FROM test_cte.cte_del_xacts_specdata + WHERE state_code != test_cte.cte_del_xacts_stab('test')) +SELECT * FROM test_cte.cte_del_xacts t JOIN tmp ON t.id = tmp.tid; + id | pdate | tid +----+------------+----- + 1 | 01-01-2016 | 1 +(1 row) + +/* test subquery planning (one more time) */ +WITH tmp AS ( + SELECT tid FROM test_cte.cte_del_xacts_specdata + WHERE state_code != test_cte.cte_del_xacts_stab('test')) +SELECT * FROM test_cte.cte_del_xacts t JOIN tmp ON t.id = tmp.tid; + id | pdate | tid +----+------------+----- + 1 | 01-01-2016 | 1 +(1 row) + +DROP FUNCTION test_cte.cte_del_xacts_stab(TEXT); +DROP TABLE test_cte.cte_del_xacts, test_cte.cte_del_xacts_specdata CASCADE; +NOTICE: drop cascades to 2 other objects +/* Test recursive CTE */ +CREATE TABLE test_cte.recursive_cte_test_tbl(id INT NOT NULL, name TEXT NOT NULL); +SELECT create_hash_partitions('test_cte.recursive_cte_test_tbl', 'id', 2); + create_hash_partitions +------------------------ + 2 +(1 row) + +INSERT INTO test_cte.recursive_cte_test_tbl (id, name) +SELECT id, 'name'||id FROM generate_series(1,100) f(id); +INSERT INTO test_cte.recursive_cte_test_tbl (id, name) +SELECT id, 'name'||(id + 1) FROM generate_series(1,100) f(id); +INSERT INTO test_cte.recursive_cte_test_tbl (id, name) +SELECT id, 'name'||(id + 2) FROM generate_series(1,100) f(id); +SELECT * FROM test_cte.recursive_cte_test_tbl WHERE id = 5; + id | name +----+------- + 5 | name5 + 5 | name6 + 5 | name7 +(3 rows) + +WITH RECURSIVE test AS ( + SELECT min(name) AS name + FROM test_cte.recursive_cte_test_tbl + WHERE id = 5 + UNION ALL + SELECT (SELECT min(name) + FROM test_cte.recursive_cte_test_tbl + WHERE id = 5 AND name > test.name) + FROM test + WHERE name IS NOT NULL) +SELECT * FROM test; + name +------- + name5 + name6 + name7 + +(4 rows) + +DROP TABLE test_cte.recursive_cte_test_tbl CASCADE; +NOTICE: drop cascades to 2 other objects +DROP SCHEMA test_cte; +DROP EXTENSION pg_pathman; diff --git a/expected/pathman_cte_1.out b/expected/pathman_cte_1.out new file mode 100644 index 00000000..5e30e188 --- /dev/null +++ b/expected/pathman_cte_1.out @@ -0,0 +1,266 @@ +/* + * Test simple CTE queries. + * Since 12 (608b167f9f), CTEs which are scanned once are no longer an + * optimization fence, which changes practically all plans here. There is + * an option to forcibly make them MATERIALIZED, but we also need to run tests + * on older versions, so create pathman_cte_1.out instead. + */ +\set VERBOSITY terse +SET search_path = 'public'; +CREATE EXTENSION pg_pathman; +CREATE SCHEMA test_cte; +CREATE TABLE test_cte.range_rel ( + id INT4, + dt TIMESTAMP NOT NULL, + txt TEXT); +INSERT INTO test_cte.range_rel (dt, txt) +SELECT g, md5(g::TEXT) +FROM generate_series('2015-01-01', '2015-04-30', '1 day'::interval) AS g; +SELECT create_range_partitions('test_cte.range_rel', 'DT', '2015-01-01'::DATE, '1 month'::INTERVAL); + create_range_partitions +------------------------- + 4 +(1 row) + +/* perform a query */ +EXPLAIN (COSTS OFF) + WITH ttt AS (SELECT * FROM test_cte.range_rel WHERE dt >= '2015-02-01' AND dt < '2015-03-15') +SELECT * FROM ttt; + QUERY PLAN +-------------------------------------------------------------------------------- + Append + -> Seq Scan on range_rel_2 + -> Seq Scan on range_rel_3 + Filter: (dt < 'Sun Mar 15 00:00:00 2015'::timestamp without time zone) +(4 rows) + +DROP TABLE test_cte.range_rel CASCADE; +NOTICE: drop cascades to 5 other objects +CREATE TABLE test_cte.hash_rel ( + id INT4, + value INTEGER NOT NULL); +INSERT INTO test_cte.hash_rel VALUES (1, 1); +INSERT INTO test_cte.hash_rel VALUES (2, 2); +INSERT INTO test_cte.hash_rel VALUES (3, 3); +SELECT create_hash_partitions('test_cte.hash_rel', 'value', 3); + create_hash_partitions +------------------------ + 3 +(1 row) + +/* perform a query */ +EXPLAIN (COSTS OFF) + WITH ttt AS (SELECT * FROM test_cte.hash_rel WHERE value = 2) +SELECT * FROM ttt; + QUERY PLAN +------------------------ + Seq Scan on hash_rel_1 + Filter: (value = 2) +(2 rows) + +DROP TABLE test_cte.hash_rel CASCADE; +NOTICE: drop cascades to 3 other objects +/* + * Test CTE query - by @parihaaraka (add varno to WalkerContext) + */ +CREATE TABLE test_cte.cte_del_xacts (id BIGSERIAL PRIMARY KEY, pdate DATE NOT NULL); +INSERT INTO test_cte.cte_del_xacts (pdate) +SELECT gen_date +FROM generate_series('2016-01-01'::date, '2016-04-9'::date, '1 day') AS gen_date; +CREATE TABLE test_cte.cte_del_xacts_specdata +( + tid BIGINT PRIMARY KEY, + test_mode SMALLINT, + state_code SMALLINT NOT NULL DEFAULT 8, + regtime TIMESTAMP WITHOUT TIME ZONE NOT NULL +); +INSERT INTO test_cte.cte_del_xacts_specdata VALUES (1, 1, 1, current_timestamp); /* for subquery test */ +/* create 2 partitions */ +SELECT create_range_partitions('test_cte.cte_del_xacts'::regclass, 'pdate', + '2016-01-01'::date, '50 days'::interval); + create_range_partitions +------------------------- + 2 +(1 row) + +EXPLAIN (COSTS OFF) +WITH tmp AS ( + SELECT tid, test_mode, regtime::DATE AS pdate, state_code + FROM test_cte.cte_del_xacts_specdata) +DELETE FROM test_cte.cte_del_xacts t USING tmp +WHERE t.id = tmp.tid AND t.pdate = tmp.pdate AND tmp.test_mode > 0; + QUERY PLAN +--------------------------------------------------------------------------------------------------------------------- + Delete on cte_del_xacts t + Delete on cte_del_xacts t + Delete on cte_del_xacts_1 t_1 + Delete on cte_del_xacts_2 t_2 + -> Hash Join + Hash Cond: ((cte_del_xacts_specdata.tid = t.id) AND ((cte_del_xacts_specdata.regtime)::date = t.pdate)) + -> Seq Scan on cte_del_xacts_specdata + Filter: (test_mode > 0) + -> Hash + -> Seq Scan on cte_del_xacts t + -> Hash Join + Hash Cond: ((t_1.id = cte_del_xacts_specdata.tid) AND (t_1.pdate = (cte_del_xacts_specdata.regtime)::date)) + -> Seq Scan on cte_del_xacts_1 t_1 + -> Hash + -> Seq Scan on cte_del_xacts_specdata + Filter: (test_mode > 0) + -> Hash Join + Hash Cond: ((t_2.id = cte_del_xacts_specdata.tid) AND (t_2.pdate = (cte_del_xacts_specdata.regtime)::date)) + -> Seq Scan on cte_del_xacts_2 t_2 + -> Hash + -> Seq Scan on cte_del_xacts_specdata + Filter: (test_mode > 0) +(22 rows) + +SELECT drop_partitions('test_cte.cte_del_xacts'); /* now drop partitions */ +NOTICE: 50 rows copied from test_cte.cte_del_xacts_1 +NOTICE: 50 rows copied from test_cte.cte_del_xacts_2 + drop_partitions +----------------- + 2 +(1 row) + +/* create 1 partition */ +SELECT create_range_partitions('test_cte.cte_del_xacts'::regclass, 'pdate', + '2016-01-01'::date, '1 year'::interval); + create_range_partitions +------------------------- + 1 +(1 row) + +/* parent enabled! */ +SELECT set_enable_parent('test_cte.cte_del_xacts', true); + set_enable_parent +------------------- + +(1 row) + +EXPLAIN (COSTS OFF) +WITH tmp AS ( + SELECT tid, test_mode, regtime::DATE AS pdate, state_code + FROM test_cte.cte_del_xacts_specdata) +DELETE FROM test_cte.cte_del_xacts t USING tmp +WHERE t.id = tmp.tid AND t.pdate = tmp.pdate AND tmp.test_mode > 0; + QUERY PLAN +--------------------------------------------------------------------------------------------------------------------- + Delete on cte_del_xacts t + Delete on cte_del_xacts t + Delete on cte_del_xacts_1 t_1 + -> Hash Join + Hash Cond: ((cte_del_xacts_specdata.tid = t.id) AND ((cte_del_xacts_specdata.regtime)::date = t.pdate)) + -> Seq Scan on cte_del_xacts_specdata + Filter: (test_mode > 0) + -> Hash + -> Seq Scan on cte_del_xacts t + -> Hash Join + Hash Cond: ((t_1.id = cte_del_xacts_specdata.tid) AND (t_1.pdate = (cte_del_xacts_specdata.regtime)::date)) + -> Seq Scan on cte_del_xacts_1 t_1 + -> Hash + -> Seq Scan on cte_del_xacts_specdata + Filter: (test_mode > 0) +(15 rows) + +/* parent disabled! */ +SELECT set_enable_parent('test_cte.cte_del_xacts', false); + set_enable_parent +------------------- + +(1 row) + +EXPLAIN (COSTS OFF) +WITH tmp AS ( + SELECT tid, test_mode, regtime::DATE AS pdate, state_code + FROM test_cte.cte_del_xacts_specdata) +DELETE FROM test_cte.cte_del_xacts t USING tmp +WHERE t.id = tmp.tid AND t.pdate = tmp.pdate AND tmp.test_mode > 0; + QUERY PLAN +----------------------------------------------------------------------------------------------------------------- + Delete on cte_del_xacts_1 t + -> Hash Join + Hash Cond: ((t.id = cte_del_xacts_specdata.tid) AND (t.pdate = (cte_del_xacts_specdata.regtime)::date)) + -> Seq Scan on cte_del_xacts_1 t + -> Hash + -> Seq Scan on cte_del_xacts_specdata + Filter: (test_mode > 0) +(7 rows) + +/* create stub pl/PgSQL function */ +CREATE OR REPLACE FUNCTION test_cte.cte_del_xacts_stab(name TEXT) +RETURNS smallint AS +$$ +begin + return 2::smallint; +end +$$ +LANGUAGE plpgsql STABLE; +/* test subquery planning */ +WITH tmp AS ( + SELECT tid FROM test_cte.cte_del_xacts_specdata + WHERE state_code != test_cte.cte_del_xacts_stab('test')) +SELECT * FROM test_cte.cte_del_xacts t JOIN tmp ON t.id = tmp.tid; + id | pdate | tid +----+------------+----- + 1 | 01-01-2016 | 1 +(1 row) + +/* test subquery planning (one more time) */ +WITH tmp AS ( + SELECT tid FROM test_cte.cte_del_xacts_specdata + WHERE state_code != test_cte.cte_del_xacts_stab('test')) +SELECT * FROM test_cte.cte_del_xacts t JOIN tmp ON t.id = tmp.tid; + id | pdate | tid +----+------------+----- + 1 | 01-01-2016 | 1 +(1 row) + +DROP FUNCTION test_cte.cte_del_xacts_stab(TEXT); +DROP TABLE test_cte.cte_del_xacts, test_cte.cte_del_xacts_specdata CASCADE; +NOTICE: drop cascades to 2 other objects +/* Test recursive CTE */ +CREATE TABLE test_cte.recursive_cte_test_tbl(id INT NOT NULL, name TEXT NOT NULL); +SELECT create_hash_partitions('test_cte.recursive_cte_test_tbl', 'id', 2); + create_hash_partitions +------------------------ + 2 +(1 row) + +INSERT INTO test_cte.recursive_cte_test_tbl (id, name) +SELECT id, 'name'||id FROM generate_series(1,100) f(id); +INSERT INTO test_cte.recursive_cte_test_tbl (id, name) +SELECT id, 'name'||(id + 1) FROM generate_series(1,100) f(id); +INSERT INTO test_cte.recursive_cte_test_tbl (id, name) +SELECT id, 'name'||(id + 2) FROM generate_series(1,100) f(id); +SELECT * FROM test_cte.recursive_cte_test_tbl WHERE id = 5; + id | name +----+------- + 5 | name5 + 5 | name6 + 5 | name7 +(3 rows) + +WITH RECURSIVE test AS ( + SELECT min(name) AS name + FROM test_cte.recursive_cte_test_tbl + WHERE id = 5 + UNION ALL + SELECT (SELECT min(name) + FROM test_cte.recursive_cte_test_tbl + WHERE id = 5 AND name > test.name) + FROM test + WHERE name IS NOT NULL) +SELECT * FROM test; + name +------- + name5 + name6 + name7 + +(4 rows) + +DROP TABLE test_cte.recursive_cte_test_tbl CASCADE; +NOTICE: drop cascades to 2 other objects +DROP SCHEMA test_cte; +DROP EXTENSION pg_pathman; diff --git a/expected/pathman_cte_2.out b/expected/pathman_cte_2.out new file mode 100644 index 00000000..b9bf8730 --- /dev/null +++ b/expected/pathman_cte_2.out @@ -0,0 +1,253 @@ +/* + * Test simple CTE queries. + * Since 12 (608b167f9f), CTEs which are scanned once are no longer an + * optimization fence, which changes practically all plans here. There is + * an option to forcibly make them MATERIALIZED, but we also need to run tests + * on older versions, so create pathman_cte_1.out instead. + */ +\set VERBOSITY terse +SET search_path = 'public'; +CREATE EXTENSION pg_pathman; +CREATE SCHEMA test_cte; +CREATE TABLE test_cte.range_rel ( + id INT4, + dt TIMESTAMP NOT NULL, + txt TEXT); +INSERT INTO test_cte.range_rel (dt, txt) +SELECT g, md5(g::TEXT) +FROM generate_series('2015-01-01', '2015-04-30', '1 day'::interval) AS g; +SELECT create_range_partitions('test_cte.range_rel', 'DT', '2015-01-01'::DATE, '1 month'::INTERVAL); + create_range_partitions +------------------------- + 4 +(1 row) + +/* perform a query */ +EXPLAIN (COSTS OFF) + WITH ttt AS (SELECT * FROM test_cte.range_rel WHERE dt >= '2015-02-01' AND dt < '2015-03-15') +SELECT * FROM ttt; + QUERY PLAN +-------------------------------------------------------------------------------- + Append + -> Seq Scan on range_rel_2 range_rel_1 + -> Seq Scan on range_rel_3 range_rel_2 + Filter: (dt < 'Sun Mar 15 00:00:00 2015'::timestamp without time zone) +(4 rows) + +DROP TABLE test_cte.range_rel CASCADE; +NOTICE: drop cascades to 5 other objects +CREATE TABLE test_cte.hash_rel ( + id INT4, + value INTEGER NOT NULL); +INSERT INTO test_cte.hash_rel VALUES (1, 1); +INSERT INTO test_cte.hash_rel VALUES (2, 2); +INSERT INTO test_cte.hash_rel VALUES (3, 3); +SELECT create_hash_partitions('test_cte.hash_rel', 'value', 3); + create_hash_partitions +------------------------ + 3 +(1 row) + +/* perform a query */ +EXPLAIN (COSTS OFF) + WITH ttt AS (SELECT * FROM test_cte.hash_rel WHERE value = 2) +SELECT * FROM ttt; + QUERY PLAN +--------------------------------- + Seq Scan on hash_rel_1 hash_rel + Filter: (value = 2) +(2 rows) + +DROP TABLE test_cte.hash_rel CASCADE; +NOTICE: drop cascades to 3 other objects +/* + * Test CTE query - by @parihaaraka (add varno to WalkerContext) + */ +CREATE TABLE test_cte.cte_del_xacts (id BIGSERIAL PRIMARY KEY, pdate DATE NOT NULL); +INSERT INTO test_cte.cte_del_xacts (pdate) +SELECT gen_date +FROM generate_series('2016-01-01'::date, '2016-04-9'::date, '1 day') AS gen_date; +CREATE TABLE test_cte.cte_del_xacts_specdata +( + tid BIGINT PRIMARY KEY, + test_mode SMALLINT, + state_code SMALLINT NOT NULL DEFAULT 8, + regtime TIMESTAMP WITHOUT TIME ZONE NOT NULL +); +INSERT INTO test_cte.cte_del_xacts_specdata VALUES (1, 1, 1, current_timestamp); /* for subquery test */ +/* create 2 partitions */ +SELECT create_range_partitions('test_cte.cte_del_xacts'::regclass, 'pdate', + '2016-01-01'::date, '50 days'::interval); + create_range_partitions +------------------------- + 2 +(1 row) + +EXPLAIN (COSTS OFF) +WITH tmp AS ( + SELECT tid, test_mode, regtime::DATE AS pdate, state_code + FROM test_cte.cte_del_xacts_specdata) +DELETE FROM test_cte.cte_del_xacts t USING tmp +WHERE t.id = tmp.tid AND t.pdate = tmp.pdate AND tmp.test_mode > 0; + QUERY PLAN +----------------------------------------------------------------------------------------------------------------- + Delete on cte_del_xacts t + Delete on cte_del_xacts t_1 + Delete on cte_del_xacts_1 t_2 + Delete on cte_del_xacts_2 t_3 + -> Hash Join + Hash Cond: ((t.id = cte_del_xacts_specdata.tid) AND (t.pdate = (cte_del_xacts_specdata.regtime)::date)) + -> Append + -> Seq Scan on cte_del_xacts t_1 + -> Seq Scan on cte_del_xacts_1 t_2 + -> Seq Scan on cte_del_xacts_2 t_3 + -> Hash + -> Seq Scan on cte_del_xacts_specdata + Filter: (test_mode > 0) +(13 rows) + +SELECT drop_partitions('test_cte.cte_del_xacts'); /* now drop partitions */ +NOTICE: 50 rows copied from test_cte.cte_del_xacts_1 +NOTICE: 50 rows copied from test_cte.cte_del_xacts_2 + drop_partitions +----------------- + 2 +(1 row) + +/* create 1 partition */ +SELECT create_range_partitions('test_cte.cte_del_xacts'::regclass, 'pdate', + '2016-01-01'::date, '1 year'::interval); + create_range_partitions +------------------------- + 1 +(1 row) + +/* parent enabled! */ +SELECT set_enable_parent('test_cte.cte_del_xacts', true); + set_enable_parent +------------------- + +(1 row) + +EXPLAIN (COSTS OFF) +WITH tmp AS ( + SELECT tid, test_mode, regtime::DATE AS pdate, state_code + FROM test_cte.cte_del_xacts_specdata) +DELETE FROM test_cte.cte_del_xacts t USING tmp +WHERE t.id = tmp.tid AND t.pdate = tmp.pdate AND tmp.test_mode > 0; + QUERY PLAN +----------------------------------------------------------------------------------------------------------------- + Delete on cte_del_xacts t + Delete on cte_del_xacts t_1 + Delete on cte_del_xacts_1 t_2 + -> Hash Join + Hash Cond: ((t.id = cte_del_xacts_specdata.tid) AND (t.pdate = (cte_del_xacts_specdata.regtime)::date)) + -> Append + -> Seq Scan on cte_del_xacts t_1 + -> Seq Scan on cte_del_xacts_1 t_2 + -> Hash + -> Seq Scan on cte_del_xacts_specdata + Filter: (test_mode > 0) +(11 rows) + +/* parent disabled! */ +SELECT set_enable_parent('test_cte.cte_del_xacts', false); + set_enable_parent +------------------- + +(1 row) + +EXPLAIN (COSTS OFF) +WITH tmp AS ( + SELECT tid, test_mode, regtime::DATE AS pdate, state_code + FROM test_cte.cte_del_xacts_specdata) +DELETE FROM test_cte.cte_del_xacts t USING tmp +WHERE t.id = tmp.tid AND t.pdate = tmp.pdate AND tmp.test_mode > 0; + QUERY PLAN +----------------------------------------------------------------------------------------------------------------- + Delete on cte_del_xacts_1 t + -> Hash Join + Hash Cond: ((t.id = cte_del_xacts_specdata.tid) AND (t.pdate = (cte_del_xacts_specdata.regtime)::date)) + -> Seq Scan on cte_del_xacts_1 t + -> Hash + -> Seq Scan on cte_del_xacts_specdata + Filter: (test_mode > 0) +(7 rows) + +/* create stub pl/PgSQL function */ +CREATE OR REPLACE FUNCTION test_cte.cte_del_xacts_stab(name TEXT) +RETURNS smallint AS +$$ +begin + return 2::smallint; +end +$$ +LANGUAGE plpgsql STABLE; +/* test subquery planning */ +WITH tmp AS ( + SELECT tid FROM test_cte.cte_del_xacts_specdata + WHERE state_code != test_cte.cte_del_xacts_stab('test')) +SELECT * FROM test_cte.cte_del_xacts t JOIN tmp ON t.id = tmp.tid; + id | pdate | tid +----+------------+----- + 1 | 01-01-2016 | 1 +(1 row) + +/* test subquery planning (one more time) */ +WITH tmp AS ( + SELECT tid FROM test_cte.cte_del_xacts_specdata + WHERE state_code != test_cte.cte_del_xacts_stab('test')) +SELECT * FROM test_cte.cte_del_xacts t JOIN tmp ON t.id = tmp.tid; + id | pdate | tid +----+------------+----- + 1 | 01-01-2016 | 1 +(1 row) + +DROP FUNCTION test_cte.cte_del_xacts_stab(TEXT); +DROP TABLE test_cte.cte_del_xacts, test_cte.cte_del_xacts_specdata CASCADE; +NOTICE: drop cascades to 2 other objects +/* Test recursive CTE */ +CREATE TABLE test_cte.recursive_cte_test_tbl(id INT NOT NULL, name TEXT NOT NULL); +SELECT create_hash_partitions('test_cte.recursive_cte_test_tbl', 'id', 2); + create_hash_partitions +------------------------ + 2 +(1 row) + +INSERT INTO test_cte.recursive_cte_test_tbl (id, name) +SELECT id, 'name'||id FROM generate_series(1,100) f(id); +INSERT INTO test_cte.recursive_cte_test_tbl (id, name) +SELECT id, 'name'||(id + 1) FROM generate_series(1,100) f(id); +INSERT INTO test_cte.recursive_cte_test_tbl (id, name) +SELECT id, 'name'||(id + 2) FROM generate_series(1,100) f(id); +SELECT * FROM test_cte.recursive_cte_test_tbl WHERE id = 5; + id | name +----+------- + 5 | name5 + 5 | name6 + 5 | name7 +(3 rows) + +WITH RECURSIVE test AS ( + SELECT min(name) AS name + FROM test_cte.recursive_cte_test_tbl + WHERE id = 5 + UNION ALL + SELECT (SELECT min(name) + FROM test_cte.recursive_cte_test_tbl + WHERE id = 5 AND name > test.name) + FROM test + WHERE name IS NOT NULL) +SELECT * FROM test; + name +------- + name5 + name6 + name7 + +(4 rows) + +DROP TABLE test_cte.recursive_cte_test_tbl CASCADE; +NOTICE: drop cascades to 2 other objects +DROP SCHEMA test_cte; +DROP EXTENSION pg_pathman; diff --git a/expected/pathman_cte_3.out b/expected/pathman_cte_3.out new file mode 100644 index 00000000..a7f3acd0 --- /dev/null +++ b/expected/pathman_cte_3.out @@ -0,0 +1,266 @@ +/* + * Test simple CTE queries. + * Since 12 (608b167f9f), CTEs which are scanned once are no longer an + * optimization fence, which changes practically all plans here. There is + * an option to forcibly make them MATERIALIZED, but we also need to run tests + * on older versions, so create pathman_cte_1.out instead. + */ +\set VERBOSITY terse +SET search_path = 'public'; +CREATE EXTENSION pg_pathman; +CREATE SCHEMA test_cte; +CREATE TABLE test_cte.range_rel ( + id INT4, + dt TIMESTAMP NOT NULL, + txt TEXT); +INSERT INTO test_cte.range_rel (dt, txt) +SELECT g, md5(g::TEXT) +FROM generate_series('2015-01-01', '2015-04-30', '1 day'::interval) AS g; +SELECT create_range_partitions('test_cte.range_rel', 'DT', '2015-01-01'::DATE, '1 month'::INTERVAL); + create_range_partitions +------------------------- + 4 +(1 row) + +/* perform a query */ +EXPLAIN (COSTS OFF) + WITH ttt AS (SELECT * FROM test_cte.range_rel WHERE dt >= '2015-02-01' AND dt < '2015-03-15') +SELECT * FROM ttt; + QUERY PLAN +-------------------------------------------------------------------------------- + Append + -> Seq Scan on range_rel_2 range_rel_1 + -> Seq Scan on range_rel_3 range_rel_2 + Filter: (dt < 'Sun Mar 15 00:00:00 2015'::timestamp without time zone) +(4 rows) + +DROP TABLE test_cte.range_rel CASCADE; +NOTICE: drop cascades to 5 other objects +CREATE TABLE test_cte.hash_rel ( + id INT4, + value INTEGER NOT NULL); +INSERT INTO test_cte.hash_rel VALUES (1, 1); +INSERT INTO test_cte.hash_rel VALUES (2, 2); +INSERT INTO test_cte.hash_rel VALUES (3, 3); +SELECT create_hash_partitions('test_cte.hash_rel', 'value', 3); + create_hash_partitions +------------------------ + 3 +(1 row) + +/* perform a query */ +EXPLAIN (COSTS OFF) + WITH ttt AS (SELECT * FROM test_cte.hash_rel WHERE value = 2) +SELECT * FROM ttt; + QUERY PLAN +--------------------------------- + Seq Scan on hash_rel_1 hash_rel + Filter: (value = 2) +(2 rows) + +DROP TABLE test_cte.hash_rel CASCADE; +NOTICE: drop cascades to 3 other objects +/* + * Test CTE query - by @parihaaraka (add varno to WalkerContext) + */ +CREATE TABLE test_cte.cte_del_xacts (id BIGSERIAL PRIMARY KEY, pdate DATE NOT NULL); +INSERT INTO test_cte.cte_del_xacts (pdate) +SELECT gen_date +FROM generate_series('2016-01-01'::date, '2016-04-9'::date, '1 day') AS gen_date; +CREATE TABLE test_cte.cte_del_xacts_specdata +( + tid BIGINT PRIMARY KEY, + test_mode SMALLINT, + state_code SMALLINT NOT NULL DEFAULT 8, + regtime TIMESTAMP WITHOUT TIME ZONE NOT NULL +); +INSERT INTO test_cte.cte_del_xacts_specdata VALUES (1, 1, 1, current_timestamp); /* for subquery test */ +/* create 2 partitions */ +SELECT create_range_partitions('test_cte.cte_del_xacts'::regclass, 'pdate', + '2016-01-01'::date, '50 days'::interval); + create_range_partitions +------------------------- + 2 +(1 row) + +EXPLAIN (COSTS OFF) +WITH tmp AS ( + SELECT tid, test_mode, regtime::DATE AS pdate, state_code + FROM test_cte.cte_del_xacts_specdata) +DELETE FROM test_cte.cte_del_xacts t USING tmp +WHERE t.id = tmp.tid AND t.pdate = tmp.pdate AND tmp.test_mode > 0; + QUERY PLAN +--------------------------------------------------------------------------------------------------------------------- + Delete on cte_del_xacts t + Delete on cte_del_xacts t + Delete on cte_del_xacts_1 t_1 + Delete on cte_del_xacts_2 t_2 + -> Hash Join + Hash Cond: ((cte_del_xacts_specdata.tid = t.id) AND ((cte_del_xacts_specdata.regtime)::date = t.pdate)) + -> Seq Scan on cte_del_xacts_specdata + Filter: (test_mode > 0) + -> Hash + -> Seq Scan on cte_del_xacts t + -> Hash Join + Hash Cond: ((t_1.id = cte_del_xacts_specdata.tid) AND (t_1.pdate = (cte_del_xacts_specdata.regtime)::date)) + -> Seq Scan on cte_del_xacts_1 t_1 + -> Hash + -> Seq Scan on cte_del_xacts_specdata + Filter: (test_mode > 0) + -> Hash Join + Hash Cond: ((t_2.id = cte_del_xacts_specdata.tid) AND (t_2.pdate = (cte_del_xacts_specdata.regtime)::date)) + -> Seq Scan on cte_del_xacts_2 t_2 + -> Hash + -> Seq Scan on cte_del_xacts_specdata + Filter: (test_mode > 0) +(22 rows) + +SELECT drop_partitions('test_cte.cte_del_xacts'); /* now drop partitions */ +NOTICE: 50 rows copied from test_cte.cte_del_xacts_1 +NOTICE: 50 rows copied from test_cte.cte_del_xacts_2 + drop_partitions +----------------- + 2 +(1 row) + +/* create 1 partition */ +SELECT create_range_partitions('test_cte.cte_del_xacts'::regclass, 'pdate', + '2016-01-01'::date, '1 year'::interval); + create_range_partitions +------------------------- + 1 +(1 row) + +/* parent enabled! */ +SELECT set_enable_parent('test_cte.cte_del_xacts', true); + set_enable_parent +------------------- + +(1 row) + +EXPLAIN (COSTS OFF) +WITH tmp AS ( + SELECT tid, test_mode, regtime::DATE AS pdate, state_code + FROM test_cte.cte_del_xacts_specdata) +DELETE FROM test_cte.cte_del_xacts t USING tmp +WHERE t.id = tmp.tid AND t.pdate = tmp.pdate AND tmp.test_mode > 0; + QUERY PLAN +--------------------------------------------------------------------------------------------------------------------- + Delete on cte_del_xacts t + Delete on cte_del_xacts t + Delete on cte_del_xacts_1 t_1 + -> Hash Join + Hash Cond: ((cte_del_xacts_specdata.tid = t.id) AND ((cte_del_xacts_specdata.regtime)::date = t.pdate)) + -> Seq Scan on cte_del_xacts_specdata + Filter: (test_mode > 0) + -> Hash + -> Seq Scan on cte_del_xacts t + -> Hash Join + Hash Cond: ((t_1.id = cte_del_xacts_specdata.tid) AND (t_1.pdate = (cte_del_xacts_specdata.regtime)::date)) + -> Seq Scan on cte_del_xacts_1 t_1 + -> Hash + -> Seq Scan on cte_del_xacts_specdata + Filter: (test_mode > 0) +(15 rows) + +/* parent disabled! */ +SELECT set_enable_parent('test_cte.cte_del_xacts', false); + set_enable_parent +------------------- + +(1 row) + +EXPLAIN (COSTS OFF) +WITH tmp AS ( + SELECT tid, test_mode, regtime::DATE AS pdate, state_code + FROM test_cte.cte_del_xacts_specdata) +DELETE FROM test_cte.cte_del_xacts t USING tmp +WHERE t.id = tmp.tid AND t.pdate = tmp.pdate AND tmp.test_mode > 0; + QUERY PLAN +----------------------------------------------------------------------------------------------------------------- + Delete on cte_del_xacts_1 t + -> Hash Join + Hash Cond: ((t.id = cte_del_xacts_specdata.tid) AND (t.pdate = (cte_del_xacts_specdata.regtime)::date)) + -> Seq Scan on cte_del_xacts_1 t + -> Hash + -> Seq Scan on cte_del_xacts_specdata + Filter: (test_mode > 0) +(7 rows) + +/* create stub pl/PgSQL function */ +CREATE OR REPLACE FUNCTION test_cte.cte_del_xacts_stab(name TEXT) +RETURNS smallint AS +$$ +begin + return 2::smallint; +end +$$ +LANGUAGE plpgsql STABLE; +/* test subquery planning */ +WITH tmp AS ( + SELECT tid FROM test_cte.cte_del_xacts_specdata + WHERE state_code != test_cte.cte_del_xacts_stab('test')) +SELECT * FROM test_cte.cte_del_xacts t JOIN tmp ON t.id = tmp.tid; + id | pdate | tid +----+------------+----- + 1 | 01-01-2016 | 1 +(1 row) + +/* test subquery planning (one more time) */ +WITH tmp AS ( + SELECT tid FROM test_cte.cte_del_xacts_specdata + WHERE state_code != test_cte.cte_del_xacts_stab('test')) +SELECT * FROM test_cte.cte_del_xacts t JOIN tmp ON t.id = tmp.tid; + id | pdate | tid +----+------------+----- + 1 | 01-01-2016 | 1 +(1 row) + +DROP FUNCTION test_cte.cte_del_xacts_stab(TEXT); +DROP TABLE test_cte.cte_del_xacts, test_cte.cte_del_xacts_specdata CASCADE; +NOTICE: drop cascades to 2 other objects +/* Test recursive CTE */ +CREATE TABLE test_cte.recursive_cte_test_tbl(id INT NOT NULL, name TEXT NOT NULL); +SELECT create_hash_partitions('test_cte.recursive_cte_test_tbl', 'id', 2); + create_hash_partitions +------------------------ + 2 +(1 row) + +INSERT INTO test_cte.recursive_cte_test_tbl (id, name) +SELECT id, 'name'||id FROM generate_series(1,100) f(id); +INSERT INTO test_cte.recursive_cte_test_tbl (id, name) +SELECT id, 'name'||(id + 1) FROM generate_series(1,100) f(id); +INSERT INTO test_cte.recursive_cte_test_tbl (id, name) +SELECT id, 'name'||(id + 2) FROM generate_series(1,100) f(id); +SELECT * FROM test_cte.recursive_cte_test_tbl WHERE id = 5; + id | name +----+------- + 5 | name5 + 5 | name6 + 5 | name7 +(3 rows) + +WITH RECURSIVE test AS ( + SELECT min(name) AS name + FROM test_cte.recursive_cte_test_tbl + WHERE id = 5 + UNION ALL + SELECT (SELECT min(name) + FROM test_cte.recursive_cte_test_tbl + WHERE id = 5 AND name > test.name) + FROM test + WHERE name IS NOT NULL) +SELECT * FROM test; + name +------- + name5 + name6 + name7 + +(4 rows) + +DROP TABLE test_cte.recursive_cte_test_tbl CASCADE; +NOTICE: drop cascades to 2 other objects +DROP SCHEMA test_cte; +DROP EXTENSION pg_pathman; diff --git a/expected/pathman_declarative.out b/expected/pathman_declarative.out new file mode 100644 index 00000000..2915ecfb --- /dev/null +++ b/expected/pathman_declarative.out @@ -0,0 +1,107 @@ +\set VERBOSITY terse +SET search_path = 'public'; +CREATE SCHEMA pathman; +CREATE EXTENSION pg_pathman SCHEMA pathman; +CREATE SCHEMA test; +CREATE TABLE test.range_rel ( + id SERIAL PRIMARY KEY, + dt DATE NOT NULL +); +CREATE TABLE test.r2 (LIKE test.range_rel); +ALTER TABLE test.range_rel ATTACH PARTITION test.r2 + FOR VALUES FROM ('2015-05-01') TO ('2015-06-01'); +ERROR: "range_rel" is not partitioned +INSERT INTO test.range_rel (dt) +SELECT g FROM generate_series('2015-01-01', '2015-04-30', '1 day'::interval) AS g; +SELECT pathman.create_range_partitions('test.range_rel', 'dt', + '2015-01-01'::DATE, '1 month'::INTERVAL); + create_range_partitions +------------------------- + 4 +(1 row) + +SELECT * FROM pathman.pathman_partition_list; + parent | partition | parttype | expr | range_min | range_max +----------------+------------------+----------+------+------------+------------ + test.range_rel | test.range_rel_1 | 2 | dt | 01-01-2015 | 02-01-2015 + test.range_rel | test.range_rel_2 | 2 | dt | 02-01-2015 | 03-01-2015 + test.range_rel | test.range_rel_3 | 2 | dt | 03-01-2015 | 04-01-2015 + test.range_rel | test.range_rel_4 | 2 | dt | 04-01-2015 | 05-01-2015 +(4 rows) + +ALTER TABLE test.range_rel ATTACH PARTITION test.r2 + FOR VALUES IN ('2015-05-01', '2015-06-01'); +ERROR: pg_pathman only supports queries for range partitions +ALTER TABLE test.range_rel ATTACH PARTITION test.r2 + FOR VALUES FROM ('2014-05-01') TO ('2015-06-01'); +ERROR: specified range [05-01-2014, 06-01-2015) overlaps with existing partitions +ALTER TABLE test.range_rel ATTACH PARTITION test.r2 + FOR VALUES FROM ('2015-05-01') TO ('2015-06-01'); +SELECT * FROM pathman.pathman_partition_list; + parent | partition | parttype | expr | range_min | range_max +----------------+------------------+----------+------+------------+------------ + test.range_rel | test.range_rel_1 | 2 | dt | 01-01-2015 | 02-01-2015 + test.range_rel | test.range_rel_2 | 2 | dt | 02-01-2015 | 03-01-2015 + test.range_rel | test.range_rel_3 | 2 | dt | 03-01-2015 | 04-01-2015 + test.range_rel | test.range_rel_4 | 2 | dt | 04-01-2015 | 05-01-2015 + test.range_rel | test.r2 | 2 | dt | 05-01-2015 | 06-01-2015 +(5 rows) + +\d+ test.r2; + Table "test.r2" + Column | Type | Collation | Nullable | Default | Storage | Stats target | Description +--------+---------+-----------+----------+---------+---------+--------------+------------- + id | integer | | not null | | plain | | + dt | date | | not null | | plain | | +Check constraints: + "pathman_r2_check" CHECK (dt >= '05-01-2015'::date AND dt < '06-01-2015'::date) +Inherits: test.range_rel + +ALTER TABLE test.range_rel DETACH PARTITION test.r2; +SELECT * FROM pathman.pathman_partition_list; + parent | partition | parttype | expr | range_min | range_max +----------------+------------------+----------+------+------------+------------ + test.range_rel | test.range_rel_1 | 2 | dt | 01-01-2015 | 02-01-2015 + test.range_rel | test.range_rel_2 | 2 | dt | 02-01-2015 | 03-01-2015 + test.range_rel | test.range_rel_3 | 2 | dt | 03-01-2015 | 04-01-2015 + test.range_rel | test.range_rel_4 | 2 | dt | 04-01-2015 | 05-01-2015 +(4 rows) + +\d+ test.r2; + Table "test.r2" + Column | Type | Collation | Nullable | Default | Storage | Stats target | Description +--------+---------+-----------+----------+---------+---------+--------------+------------- + id | integer | | not null | | plain | | + dt | date | | not null | | plain | | + +CREATE TABLE test.r4 PARTITION OF test.range_rel + FOR VALUES IN ('2015-05-01', '2015-06-01'); +ERROR: pg_pathman only supports queries for range partitions +CREATE TABLE test.r4 PARTITION OF test.range_rel + FOR VALUES FROM ('2014-05-01') TO ('2015-06-01'); +ERROR: specified range [05-01-2014, 06-01-2015) overlaps with existing partitions +CREATE TABLE test.r4 PARTITION OF test.range_rel + FOR VALUES FROM ('2015-06-01') TO ('2016-01-01'); +\d+ test.r4; + Table "test.r4" + Column | Type | Collation | Nullable | Default | Storage | Stats target | Description +--------+---------+-----------+----------+--------------------------------------------+---------+--------------+------------- + id | integer | | not null | nextval('test.range_rel_id_seq'::regclass) | plain | | + dt | date | | not null | | plain | | +Indexes: + "r4_pkey" PRIMARY KEY, btree (id) +Check constraints: + "pathman_r4_check" CHECK (dt >= '06-01-2015'::date AND dt < '01-01-2016'::date) +Inherits: test.range_rel + +/* Note: PG-10 doesn't support ATTACH PARTITION ... DEFAULT */ +ALTER TABLE IF EXISTS test.nonexistent_table ATTACH PARTITION baz FOR VALUES IN (42); +NOTICE: relation "nonexistent_table" does not exist, skipping +ALTER TABLE IF EXISTS test.nonexistent_table DETACH PARTITION baz; +NOTICE: relation "nonexistent_table" does not exist, skipping +DROP TABLE test.r2 CASCADE; +DROP TABLE test.range_rel CASCADE; +NOTICE: drop cascades to 6 other objects +DROP SCHEMA test; +DROP EXTENSION pg_pathman CASCADE; +DROP SCHEMA pathman; diff --git a/expected/pathman_declarative_1.out b/expected/pathman_declarative_1.out new file mode 100644 index 00000000..dede4941 --- /dev/null +++ b/expected/pathman_declarative_1.out @@ -0,0 +1,107 @@ +\set VERBOSITY terse +SET search_path = 'public'; +CREATE SCHEMA pathman; +CREATE EXTENSION pg_pathman SCHEMA pathman; +CREATE SCHEMA test; +CREATE TABLE test.range_rel ( + id SERIAL PRIMARY KEY, + dt DATE NOT NULL +); +CREATE TABLE test.r2 (LIKE test.range_rel); +ALTER TABLE test.range_rel ATTACH PARTITION test.r2 + FOR VALUES FROM ('2015-05-01') TO ('2015-06-01'); +ERROR: table "range_rel" is not partitioned +INSERT INTO test.range_rel (dt) +SELECT g FROM generate_series('2015-01-01', '2015-04-30', '1 day'::interval) AS g; +SELECT pathman.create_range_partitions('test.range_rel', 'dt', + '2015-01-01'::DATE, '1 month'::INTERVAL); + create_range_partitions +------------------------- + 4 +(1 row) + +SELECT * FROM pathman.pathman_partition_list; + parent | partition | parttype | expr | range_min | range_max +----------------+------------------+----------+------+------------+------------ + test.range_rel | test.range_rel_1 | 2 | dt | 01-01-2015 | 02-01-2015 + test.range_rel | test.range_rel_2 | 2 | dt | 02-01-2015 | 03-01-2015 + test.range_rel | test.range_rel_3 | 2 | dt | 03-01-2015 | 04-01-2015 + test.range_rel | test.range_rel_4 | 2 | dt | 04-01-2015 | 05-01-2015 +(4 rows) + +ALTER TABLE test.range_rel ATTACH PARTITION test.r2 + FOR VALUES IN ('2015-05-01', '2015-06-01'); +ERROR: pg_pathman only supports queries for range partitions +ALTER TABLE test.range_rel ATTACH PARTITION test.r2 + FOR VALUES FROM ('2014-05-01') TO ('2015-06-01'); +ERROR: specified range [05-01-2014, 06-01-2015) overlaps with existing partitions +ALTER TABLE test.range_rel ATTACH PARTITION test.r2 + FOR VALUES FROM ('2015-05-01') TO ('2015-06-01'); +SELECT * FROM pathman.pathman_partition_list; + parent | partition | parttype | expr | range_min | range_max +----------------+------------------+----------+------+------------+------------ + test.range_rel | test.range_rel_1 | 2 | dt | 01-01-2015 | 02-01-2015 + test.range_rel | test.range_rel_2 | 2 | dt | 02-01-2015 | 03-01-2015 + test.range_rel | test.range_rel_3 | 2 | dt | 03-01-2015 | 04-01-2015 + test.range_rel | test.range_rel_4 | 2 | dt | 04-01-2015 | 05-01-2015 + test.range_rel | test.r2 | 2 | dt | 05-01-2015 | 06-01-2015 +(5 rows) + +\d+ test.r2; + Table "test.r2" + Column | Type | Collation | Nullable | Default | Storage | Stats target | Description +--------+---------+-----------+----------+---------+---------+--------------+------------- + id | integer | | not null | | plain | | + dt | date | | not null | | plain | | +Check constraints: + "pathman_r2_check" CHECK (dt >= '05-01-2015'::date AND dt < '06-01-2015'::date) +Inherits: test.range_rel + +ALTER TABLE test.range_rel DETACH PARTITION test.r2; +SELECT * FROM pathman.pathman_partition_list; + parent | partition | parttype | expr | range_min | range_max +----------------+------------------+----------+------+------------+------------ + test.range_rel | test.range_rel_1 | 2 | dt | 01-01-2015 | 02-01-2015 + test.range_rel | test.range_rel_2 | 2 | dt | 02-01-2015 | 03-01-2015 + test.range_rel | test.range_rel_3 | 2 | dt | 03-01-2015 | 04-01-2015 + test.range_rel | test.range_rel_4 | 2 | dt | 04-01-2015 | 05-01-2015 +(4 rows) + +\d+ test.r2; + Table "test.r2" + Column | Type | Collation | Nullable | Default | Storage | Stats target | Description +--------+---------+-----------+----------+---------+---------+--------------+------------- + id | integer | | not null | | plain | | + dt | date | | not null | | plain | | + +CREATE TABLE test.r4 PARTITION OF test.range_rel + FOR VALUES IN ('2015-05-01', '2015-06-01'); +ERROR: pg_pathman only supports queries for range partitions +CREATE TABLE test.r4 PARTITION OF test.range_rel + FOR VALUES FROM ('2014-05-01') TO ('2015-06-01'); +ERROR: specified range [05-01-2014, 06-01-2015) overlaps with existing partitions +CREATE TABLE test.r4 PARTITION OF test.range_rel + FOR VALUES FROM ('2015-06-01') TO ('2016-01-01'); +\d+ test.r4; + Table "test.r4" + Column | Type | Collation | Nullable | Default | Storage | Stats target | Description +--------+---------+-----------+----------+--------------------------------------------+---------+--------------+------------- + id | integer | | not null | nextval('test.range_rel_id_seq'::regclass) | plain | | + dt | date | | not null | | plain | | +Indexes: + "r4_pkey" PRIMARY KEY, btree (id) +Check constraints: + "pathman_r4_check" CHECK (dt >= '06-01-2015'::date AND dt < '01-01-2016'::date) +Inherits: test.range_rel + +/* Note: PG-10 doesn't support ATTACH PARTITION ... DEFAULT */ +ALTER TABLE IF EXISTS test.nonexistent_table ATTACH PARTITION baz FOR VALUES IN (42); +NOTICE: relation "nonexistent_table" does not exist, skipping +ALTER TABLE IF EXISTS test.nonexistent_table DETACH PARTITION baz; +NOTICE: relation "nonexistent_table" does not exist, skipping +DROP TABLE test.r2 CASCADE; +DROP TABLE test.range_rel CASCADE; +NOTICE: drop cascades to 6 other objects +DROP SCHEMA test; +DROP EXTENSION pg_pathman CASCADE; +DROP SCHEMA pathman; diff --git a/expected/pathman_domains.out b/expected/pathman_domains.out new file mode 100644 index 00000000..cc32ce0c --- /dev/null +++ b/expected/pathman_domains.out @@ -0,0 +1,131 @@ +\set VERBOSITY terse +SET search_path = 'public'; +CREATE EXTENSION pg_pathman; +CREATE SCHEMA domains; +CREATE DOMAIN domains.dom_test AS numeric CHECK (value < 1200); +CREATE TABLE domains.dom_table(val domains.dom_test NOT NULL); +INSERT INTO domains.dom_table SELECT generate_series(1, 999); +SELECT create_range_partitions('domains.dom_table', 'val', 1, 100); + create_range_partitions +------------------------- + 10 +(1 row) + +EXPLAIN (COSTS OFF) +SELECT * FROM domains.dom_table +WHERE val < 250; + QUERY PLAN +--------------------------------------------------- + Append + -> Seq Scan on dom_table_1 + -> Seq Scan on dom_table_2 + -> Seq Scan on dom_table_3 + Filter: ((val)::numeric < '250'::numeric) +(5 rows) + +INSERT INTO domains.dom_table VALUES(1500); +ERROR: value for domain domains.dom_test violates check constraint "dom_test_check" +INSERT INTO domains.dom_table VALUES(-10); +SELECT append_range_partition('domains.dom_table'); + append_range_partition +------------------------ + domains.dom_table_12 +(1 row) + +SELECT prepend_range_partition('domains.dom_table'); + prepend_range_partition +------------------------- + domains.dom_table_13 +(1 row) + +SELECT merge_range_partitions('domains.dom_table_1', 'domains.dom_table_2'); + merge_range_partitions +------------------------ + domains.dom_table_1 +(1 row) + +SELECT split_range_partition('domains.dom_table_1', 50); + split_range_partition +----------------------- + domains.dom_table_14 +(1 row) + +INSERT INTO domains.dom_table VALUES(1101); +EXPLAIN (COSTS OFF) +SELECT * FROM domains.dom_table +WHERE val < 450; + QUERY PLAN +--------------------------------------------------- + Append + -> Seq Scan on dom_table_13 + -> Seq Scan on dom_table_11 + -> Seq Scan on dom_table_1 + -> Seq Scan on dom_table_14 + -> Seq Scan on dom_table_3 + -> Seq Scan on dom_table_4 + -> Seq Scan on dom_table_5 + Filter: ((val)::numeric < '450'::numeric) +(9 rows) + +SELECT * FROM pathman_partition_list +ORDER BY range_min::INT, range_max::INT; + parent | partition | parttype | expr | range_min | range_max +-------------------+----------------------+----------+------+-----------+----------- + domains.dom_table | domains.dom_table_13 | 2 | val | -199 | -99 + domains.dom_table | domains.dom_table_11 | 2 | val | -99 | 1 + domains.dom_table | domains.dom_table_1 | 2 | val | 1 | 50 + domains.dom_table | domains.dom_table_14 | 2 | val | 50 | 201 + domains.dom_table | domains.dom_table_3 | 2 | val | 201 | 301 + domains.dom_table | domains.dom_table_4 | 2 | val | 301 | 401 + domains.dom_table | domains.dom_table_5 | 2 | val | 401 | 501 + domains.dom_table | domains.dom_table_6 | 2 | val | 501 | 601 + domains.dom_table | domains.dom_table_7 | 2 | val | 601 | 701 + domains.dom_table | domains.dom_table_8 | 2 | val | 701 | 801 + domains.dom_table | domains.dom_table_9 | 2 | val | 801 | 901 + domains.dom_table | domains.dom_table_10 | 2 | val | 901 | 1001 + domains.dom_table | domains.dom_table_12 | 2 | val | 1001 | 1101 + domains.dom_table | domains.dom_table_15 | 2 | val | 1101 | 1201 +(14 rows) + +SELECT drop_partitions('domains.dom_table'); +NOTICE: 49 rows copied from domains.dom_table_1 +NOTICE: 100 rows copied from domains.dom_table_3 +NOTICE: 100 rows copied from domains.dom_table_4 +NOTICE: 100 rows copied from domains.dom_table_5 +NOTICE: 100 rows copied from domains.dom_table_6 +NOTICE: 100 rows copied from domains.dom_table_7 +NOTICE: 100 rows copied from domains.dom_table_8 +NOTICE: 100 rows copied from domains.dom_table_9 +NOTICE: 99 rows copied from domains.dom_table_10 +NOTICE: 1 rows copied from domains.dom_table_11 +NOTICE: 0 rows copied from domains.dom_table_12 +NOTICE: 0 rows copied from domains.dom_table_13 +NOTICE: 151 rows copied from domains.dom_table_14 +NOTICE: 1 rows copied from domains.dom_table_15 + drop_partitions +----------------- + 14 +(1 row) + +SELECT create_hash_partitions('domains.dom_table', 'val', 5); + create_hash_partitions +------------------------ + 5 +(1 row) + +SELECT * FROM pathman_partition_list +ORDER BY "partition"::TEXT; + parent | partition | parttype | expr | range_min | range_max +-------------------+---------------------+----------+------+-----------+----------- + domains.dom_table | domains.dom_table_0 | 1 | val | | + domains.dom_table | domains.dom_table_1 | 1 | val | | + domains.dom_table | domains.dom_table_2 | 1 | val | | + domains.dom_table | domains.dom_table_3 | 1 | val | | + domains.dom_table | domains.dom_table_4 | 1 | val | | +(5 rows) + +DROP TABLE domains.dom_table CASCADE; +NOTICE: drop cascades to 5 other objects +DROP DOMAIN domains.dom_test CASCADE; +DROP SCHEMA domains; +DROP EXTENSION pg_pathman CASCADE; diff --git a/expected/pathman_domains_1.out b/expected/pathman_domains_1.out new file mode 100644 index 00000000..aaa0867f --- /dev/null +++ b/expected/pathman_domains_1.out @@ -0,0 +1,131 @@ +\set VERBOSITY terse +SET search_path = 'public'; +CREATE EXTENSION pg_pathman; +CREATE SCHEMA domains; +CREATE DOMAIN domains.dom_test AS numeric CHECK (value < 1200); +CREATE TABLE domains.dom_table(val domains.dom_test NOT NULL); +INSERT INTO domains.dom_table SELECT generate_series(1, 999); +SELECT create_range_partitions('domains.dom_table', 'val', 1, 100); + create_range_partitions +------------------------- + 10 +(1 row) + +EXPLAIN (COSTS OFF) +SELECT * FROM domains.dom_table +WHERE val < 250; + QUERY PLAN +--------------------------------------------------- + Append + -> Seq Scan on dom_table_1 + -> Seq Scan on dom_table_2 + -> Seq Scan on dom_table_3 + Filter: ((val)::numeric < '250'::numeric) +(5 rows) + +INSERT INTO domains.dom_table VALUES(1500); +ERROR: value for domain domains.dom_test violates check constraint "dom_test_check" +INSERT INTO domains.dom_table VALUES(-10); +SELECT append_range_partition('domains.dom_table'); + append_range_partition +------------------------ + domains.dom_table_12 +(1 row) + +SELECT prepend_range_partition('domains.dom_table'); + prepend_range_partition +------------------------- + domains.dom_table_13 +(1 row) + +SELECT merge_range_partitions('domains.dom_table_1', 'domains.dom_table_2'); + merge_range_partitions +------------------------ + domains.dom_table_1 +(1 row) + +SELECT split_range_partition('domains.dom_table_1', 50); + split_range_partition +----------------------- + domains.dom_table_14 +(1 row) + +INSERT INTO domains.dom_table VALUES(1101); +EXPLAIN (COSTS OFF) +SELECT * FROM domains.dom_table +WHERE val < 450; + QUERY PLAN +--------------------------------------------------- + Append + -> Seq Scan on dom_table_13 dom_table_1 + -> Seq Scan on dom_table_11 dom_table_2 + -> Seq Scan on dom_table_1 dom_table_3 + -> Seq Scan on dom_table_14 dom_table_4 + -> Seq Scan on dom_table_3 dom_table_5 + -> Seq Scan on dom_table_4 dom_table_6 + -> Seq Scan on dom_table_5 dom_table_7 + Filter: ((val)::numeric < '450'::numeric) +(9 rows) + +SELECT * FROM pathman_partition_list +ORDER BY range_min::INT, range_max::INT; + parent | partition | parttype | expr | range_min | range_max +-------------------+----------------------+----------+------+-----------+----------- + domains.dom_table | domains.dom_table_13 | 2 | val | -199 | -99 + domains.dom_table | domains.dom_table_11 | 2 | val | -99 | 1 + domains.dom_table | domains.dom_table_1 | 2 | val | 1 | 50 + domains.dom_table | domains.dom_table_14 | 2 | val | 50 | 201 + domains.dom_table | domains.dom_table_3 | 2 | val | 201 | 301 + domains.dom_table | domains.dom_table_4 | 2 | val | 301 | 401 + domains.dom_table | domains.dom_table_5 | 2 | val | 401 | 501 + domains.dom_table | domains.dom_table_6 | 2 | val | 501 | 601 + domains.dom_table | domains.dom_table_7 | 2 | val | 601 | 701 + domains.dom_table | domains.dom_table_8 | 2 | val | 701 | 801 + domains.dom_table | domains.dom_table_9 | 2 | val | 801 | 901 + domains.dom_table | domains.dom_table_10 | 2 | val | 901 | 1001 + domains.dom_table | domains.dom_table_12 | 2 | val | 1001 | 1101 + domains.dom_table | domains.dom_table_15 | 2 | val | 1101 | 1201 +(14 rows) + +SELECT drop_partitions('domains.dom_table'); +NOTICE: 49 rows copied from domains.dom_table_1 +NOTICE: 100 rows copied from domains.dom_table_3 +NOTICE: 100 rows copied from domains.dom_table_4 +NOTICE: 100 rows copied from domains.dom_table_5 +NOTICE: 100 rows copied from domains.dom_table_6 +NOTICE: 100 rows copied from domains.dom_table_7 +NOTICE: 100 rows copied from domains.dom_table_8 +NOTICE: 100 rows copied from domains.dom_table_9 +NOTICE: 99 rows copied from domains.dom_table_10 +NOTICE: 1 rows copied from domains.dom_table_11 +NOTICE: 0 rows copied from domains.dom_table_12 +NOTICE: 0 rows copied from domains.dom_table_13 +NOTICE: 151 rows copied from domains.dom_table_14 +NOTICE: 1 rows copied from domains.dom_table_15 + drop_partitions +----------------- + 14 +(1 row) + +SELECT create_hash_partitions('domains.dom_table', 'val', 5); + create_hash_partitions +------------------------ + 5 +(1 row) + +SELECT * FROM pathman_partition_list +ORDER BY "partition"::TEXT; + parent | partition | parttype | expr | range_min | range_max +-------------------+---------------------+----------+------+-----------+----------- + domains.dom_table | domains.dom_table_0 | 1 | val | | + domains.dom_table | domains.dom_table_1 | 1 | val | | + domains.dom_table | domains.dom_table_2 | 1 | val | | + domains.dom_table | domains.dom_table_3 | 1 | val | | + domains.dom_table | domains.dom_table_4 | 1 | val | | +(5 rows) + +DROP TABLE domains.dom_table CASCADE; +NOTICE: drop cascades to 5 other objects +DROP DOMAIN domains.dom_test CASCADE; +DROP SCHEMA domains; +DROP EXTENSION pg_pathman CASCADE; diff --git a/expected/pathman_dropped_cols.out b/expected/pathman_dropped_cols.out new file mode 100644 index 00000000..826931d3 --- /dev/null +++ b/expected/pathman_dropped_cols.out @@ -0,0 +1,209 @@ +\set VERBOSITY terse +SET search_path = 'public'; +CREATE EXTENSION pg_pathman; +CREATE SCHEMA dropped_cols; +/* + * we should be able to manage tables with dropped columns + */ +create table test_range(a int, b int, key int not null); +alter table test_range drop column a; +select create_range_partitions('test_range', 'key', 1, 10, 2); + create_range_partitions +------------------------- + 2 +(1 row) + +alter table test_range drop column b; +select prepend_range_partition('test_range'); + prepend_range_partition +------------------------- + test_range_3 +(1 row) + +select * from pathman_partition_list order by parent, partition; + parent | partition | parttype | expr | range_min | range_max +------------+--------------+----------+------+-----------+----------- + test_range | test_range_1 | 2 | key | 1 | 11 + test_range | test_range_2 | 2 | key | 11 | 21 + test_range | test_range_3 | 2 | key | -9 | 1 +(3 rows) + +select pg_get_constraintdef(oid, true) from pg_constraint where conname = 'pathman_test_range_1_check'; + pg_get_constraintdef +------------------------------- + CHECK (key >= 1 AND key < 11) +(1 row) + +select pg_get_constraintdef(oid, true) from pg_constraint where conname = 'pathman_test_range_3_check'; + pg_get_constraintdef +------------------------------------------ + CHECK (key >= '-9'::integer AND key < 1) +(1 row) + +drop table test_range cascade; +NOTICE: drop cascades to 4 other objects +create table test_hash(a int, b int, key int not null); +alter table test_hash drop column a; +select create_hash_partitions('test_hash', 'key', 3); + create_hash_partitions +------------------------ + 3 +(1 row) + +alter table test_hash drop column b; +create table test_dummy (like test_hash); +select replace_hash_partition('test_hash_2', 'test_dummy', true); + replace_hash_partition +------------------------ + test_dummy +(1 row) + +select * from pathman_partition_list order by parent, partition; + parent | partition | parttype | expr | range_min | range_max +-----------+-------------+----------+------+-----------+----------- + test_hash | test_hash_0 | 1 | key | | + test_hash | test_hash_1 | 1 | key | | + test_hash | test_dummy | 1 | key | | +(3 rows) + +select pg_get_constraintdef(oid, true) from pg_constraint where conname = 'pathman_test_hash_1_check'; + pg_get_constraintdef +------------------------------------------------- + CHECK (get_hash_part_idx(hashint4(key), 3) = 1) +(1 row) + +select pg_get_constraintdef(oid, true) from pg_constraint where conname = 'pathman_test_dummy_check'; + pg_get_constraintdef +------------------------------------------------- + CHECK (get_hash_part_idx(hashint4(key), 3) = 2) +(1 row) + +drop table test_hash cascade; +NOTICE: drop cascades to 3 other objects +-- Yury Smirnov case +CREATE TABLE root_dict ( + id BIGSERIAL PRIMARY KEY NOT NULL, + root_id BIGINT NOT NULL, + start_date DATE, + num TEXT, + main TEXT, + dict_code TEXT, + dict_name TEXT, + edit_num TEXT, + edit_date DATE, + sign CHAR(4) +); +CREATE INDEX "root_dict_root_id_idx" ON "root_dict" ("root_id"); +DO +$$ +DECLARE + r RECORD; +BEGIN + FOR r IN SELECT * FROM generate_series(1, 3) r + LOOP + FOR d IN 1..2 LOOP + INSERT INTO root_dict (root_id, start_date, num, main, dict_code, dict_name, edit_num, edit_date, sign) VALUES + (r.r, '2010-10-10'::date, 'num_' || d, (d % 2) + 1, 'code_' || d, 'name_' || d, NULL, NULL, '2014'); + END LOOP; + END LOOP; +END +$$; +ALTER TABLE root_dict ADD COLUMN dict_id BIGINT DEFAULT 3; +ALTER TABLE root_dict DROP COLUMN dict_code, + DROP COLUMN dict_name, + DROP COLUMN sign; +SELECT create_hash_partitions('root_dict' :: REGCLASS, + 'root_id', + 3, + true); + create_hash_partitions +------------------------ + 3 +(1 row) + +VACUUM FULL ANALYZE "root_dict"; +SELECT set_enable_parent('root_dict' :: REGCLASS, FALSE); + set_enable_parent +------------------- + +(1 row) + +PREPARE getbyroot AS +SELECT + id, root_id, start_date, num, main, edit_num, edit_date, dict_id +FROM root_dict +WHERE root_id = $1; +EXECUTE getbyroot(2); + id | root_id | start_date | num | main | edit_num | edit_date | dict_id +----+---------+------------+-------+------+----------+-----------+--------- + 3 | 2 | 10-10-2010 | num_1 | 2 | | | 3 + 4 | 2 | 10-10-2010 | num_2 | 1 | | | 3 +(2 rows) + +EXECUTE getbyroot(2); + id | root_id | start_date | num | main | edit_num | edit_date | dict_id +----+---------+------------+-------+------+----------+-----------+--------- + 3 | 2 | 10-10-2010 | num_1 | 2 | | | 3 + 4 | 2 | 10-10-2010 | num_2 | 1 | | | 3 +(2 rows) + +EXECUTE getbyroot(2); + id | root_id | start_date | num | main | edit_num | edit_date | dict_id +----+---------+------------+-------+------+----------+-----------+--------- + 3 | 2 | 10-10-2010 | num_1 | 2 | | | 3 + 4 | 2 | 10-10-2010 | num_2 | 1 | | | 3 +(2 rows) + +EXECUTE getbyroot(2); + id | root_id | start_date | num | main | edit_num | edit_date | dict_id +----+---------+------------+-------+------+----------+-----------+--------- + 3 | 2 | 10-10-2010 | num_1 | 2 | | | 3 + 4 | 2 | 10-10-2010 | num_2 | 1 | | | 3 +(2 rows) + +EXECUTE getbyroot(2); + id | root_id | start_date | num | main | edit_num | edit_date | dict_id +----+---------+------------+-------+------+----------+-----------+--------- + 3 | 2 | 10-10-2010 | num_1 | 2 | | | 3 + 4 | 2 | 10-10-2010 | num_2 | 1 | | | 3 +(2 rows) + +-- errors usually start here +EXECUTE getbyroot(2); + id | root_id | start_date | num | main | edit_num | edit_date | dict_id +----+---------+------------+-------+------+----------+-----------+--------- + 3 | 2 | 10-10-2010 | num_1 | 2 | | | 3 + 4 | 2 | 10-10-2010 | num_2 | 1 | | | 3 +(2 rows) + +EXECUTE getbyroot(2); + id | root_id | start_date | num | main | edit_num | edit_date | dict_id +----+---------+------------+-------+------+----------+-----------+--------- + 3 | 2 | 10-10-2010 | num_1 | 2 | | | 3 + 4 | 2 | 10-10-2010 | num_2 | 1 | | | 3 +(2 rows) + +EXPLAIN (COSTS OFF) EXECUTE getbyroot(2); + QUERY PLAN +---------------------------------------------------------- + Custom Scan (RuntimeAppend) + Prune by: (root_dict.root_id = $1) + -> Bitmap Heap Scan on root_dict_0 root_dict + Recheck Cond: (root_id = $1) + -> Bitmap Index Scan on root_dict_0_root_id_idx + Index Cond: (root_id = $1) + -> Bitmap Heap Scan on root_dict_1 root_dict + Recheck Cond: (root_id = $1) + -> Bitmap Index Scan on root_dict_1_root_id_idx + Index Cond: (root_id = $1) + -> Bitmap Heap Scan on root_dict_2 root_dict + Recheck Cond: (root_id = $1) + -> Bitmap Index Scan on root_dict_2_root_id_idx + Index Cond: (root_id = $1) +(14 rows) + +DEALLOCATE getbyroot; +DROP TABLE root_dict CASCADE; +NOTICE: drop cascades to 3 other objects +DROP SCHEMA dropped_cols; +DROP EXTENSION pg_pathman; diff --git a/expected/pathman_expressions.out b/expected/pathman_expressions.out new file mode 100644 index 00000000..cd629b8e --- /dev/null +++ b/expected/pathman_expressions.out @@ -0,0 +1,441 @@ +/* + * ------------------------------------------- + * NOTE: This test behaves differenly on < 11 because planner now turns + * Row(Const, Const) into just Const of record type, apparently since 3decd150 + * ------------------------------------------- + * + * Also since 8edd0e794 (>= 12) Append nodes with single subplan are eliminated, + * causing different output; pathman_expressions_2.out is the updated version. + */ +\set VERBOSITY terse +SET search_path = 'public'; +CREATE EXTENSION pg_pathman; +CREATE SCHEMA test_exprs; +/* + * Test partitioning expression canonicalization process + */ +CREATE TABLE test_exprs.canon(c JSONB NOT NULL); +SELECT create_range_partitions('test_exprs.canon', '(C->>''key'')::int8', 1, 10, 2); + create_range_partitions +------------------------- + 2 +(1 row) + +SELECT expr FROM pathman_config; /* check expression */ + expr +------------------------------- + ((c ->> 'key'::text))::bigint +(1 row) + +INSERT INTO test_exprs.canon VALUES ('{ "key": 2, "value": 0 }'); +SELECT *, tableoid::REGCLASS FROM test_exprs.canon; + c | tableoid +------------------------+-------------------- + {"key": 2, "value": 0} | test_exprs.canon_1 +(1 row) + +DROP TABLE test_exprs.canon CASCADE; +NOTICE: drop cascades to 3 other objects +CREATE TABLE test_exprs.canon(val TEXT NOT NULL); +CREATE SEQUENCE test_exprs.canon_seq; +SELECT add_to_pathman_config('test_exprs.canon', 'VAL collate "C"', NULL); + add_to_pathman_config +----------------------- + t +(1 row) + +SELECT add_range_partition('test_exprs.canon', 'a'::TEXT, 'b'); + add_range_partition +--------------------- + test_exprs.canon_1 +(1 row) + +SELECT add_range_partition('test_exprs.canon', 'b'::TEXT, 'c'); + add_range_partition +--------------------- + test_exprs.canon_2 +(1 row) + +SELECT add_range_partition('test_exprs.canon', 'c'::TEXT, 'd'); + add_range_partition +--------------------- + test_exprs.canon_3 +(1 row) + +SELECT add_range_partition('test_exprs.canon', 'd'::TEXT, 'e'); + add_range_partition +--------------------- + test_exprs.canon_4 +(1 row) + +SELECT expr FROM pathman_config; /* check expression */ + expr +------------------- + (val COLLATE "C") +(1 row) + +INSERT INTO test_exprs.canon VALUES ('b'); +SELECT *, tableoid::REGCLASS FROM test_exprs.canon; + val | tableoid +-----+-------------------- + b | test_exprs.canon_2 +(1 row) + +EXPLAIN (COSTS OFF) SELECT * FROM test_exprs.canon WHERE val COLLATE "C" < ALL (array['b', 'c']); + QUERY PLAN +--------------------------- + Append + -> Seq Scan on canon_1 +(2 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM test_exprs.canon WHERE val COLLATE "POSIX" < ALL (array['b', 'c']); + QUERY PLAN +----------------------------------------------------------- + Append + -> Seq Scan on canon_1 + Filter: ((val)::text < 'b'::text COLLATE "POSIX") + -> Seq Scan on canon_2 + Filter: ((val)::text < 'b'::text COLLATE "POSIX") + -> Seq Scan on canon_3 + Filter: ((val)::text < 'b'::text COLLATE "POSIX") + -> Seq Scan on canon_4 + Filter: ((val)::text < 'b'::text COLLATE "POSIX") +(9 rows) + +DROP TABLE test_exprs.canon CASCADE; +NOTICE: drop cascades to 5 other objects +/* + * Test composite key. + */ +CREATE TABLE test_exprs.composite(a INT4 NOT NULL, b TEXT NOT NULL); +CREATE SEQUENCE test_exprs.composite_seq; +SELECT add_to_pathman_config('test_exprs.composite', + '(a, b)::test_exprs.composite', + NULL); + add_to_pathman_config +----------------------- + t +(1 row) + +SELECT add_range_partition('test_exprs.composite', + '(1,a)'::test_exprs.composite, + '(10,a)'::test_exprs.composite); + add_range_partition +------------------------ + test_exprs.composite_1 +(1 row) + +SELECT add_range_partition('test_exprs.composite', + '(10,a)'::test_exprs.composite, + '(20,a)'::test_exprs.composite); + add_range_partition +------------------------ + test_exprs.composite_2 +(1 row) + +SELECT add_range_partition('test_exprs.composite', + '(20,a)'::test_exprs.composite, + '(30,a)'::test_exprs.composite); + add_range_partition +------------------------ + test_exprs.composite_3 +(1 row) + +SELECT add_range_partition('test_exprs.composite', + '(30,a)'::test_exprs.composite, + '(40,a)'::test_exprs.composite); + add_range_partition +------------------------ + test_exprs.composite_4 +(1 row) + +SELECT expr FROM pathman_config; /* check expression */ + expr +--------------------------------- + ROW(a, b)::test_exprs.composite +(1 row) + +INSERT INTO test_exprs.composite VALUES(2, 'a'); +INSERT INTO test_exprs.composite VALUES(11, 'a'); +INSERT INTO test_exprs.composite VALUES(2, 'b'); +INSERT INTO test_exprs.composite VALUES(50, 'b'); +ERROR: cannot spawn new partition for key '(50,b)' +SELECT *, tableoid::REGCLASS FROM test_exprs.composite; + a | b | tableoid +----+---+------------------------ + 2 | a | test_exprs.composite_1 + 2 | b | test_exprs.composite_1 + 11 | a | test_exprs.composite_2 +(3 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM test_exprs.composite WHERE (a, b)::test_exprs.composite < (21, 0)::test_exprs.composite; + QUERY PLAN +------------------------------------------------------------------------------------ + Append + -> Seq Scan on composite_1 + -> Seq Scan on composite_2 + -> Seq Scan on composite_3 + Filter: (ROW(a, b)::test_exprs.composite < '(21,0)'::test_exprs.composite) +(5 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM test_exprs.composite WHERE (a, b) < (21, 0)::test_exprs.composite; + QUERY PLAN +-------------------------------------------------------------- + Append + -> Seq Scan on composite_1 + Filter: (ROW(a, b) < '(21,0)'::test_exprs.composite) + -> Seq Scan on composite_2 + Filter: (ROW(a, b) < '(21,0)'::test_exprs.composite) + -> Seq Scan on composite_3 + Filter: (ROW(a, b) < '(21,0)'::test_exprs.composite) + -> Seq Scan on composite_4 + Filter: (ROW(a, b) < '(21,0)'::test_exprs.composite) +(9 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM test_exprs.composite WHERE (a, b)::test_exprs.composite < (21, 0); + QUERY PLAN +---------------------------------------------------------------------- + Append + -> Seq Scan on composite_1 + -> Seq Scan on composite_2 + -> Seq Scan on composite_3 + Filter: (ROW(a, b)::test_exprs.composite < '(21,0)'::record) +(5 rows) + +DROP TABLE test_exprs.composite CASCADE; +NOTICE: drop cascades to 5 other objects +/* We use this rel to check 'pathman_hooks_enabled' */ +CREATE TABLE test_exprs.canary(val INT4 NOT NULL); +CREATE TABLE test_exprs.canary_copy (LIKE test_exprs.canary); +SELECT create_hash_partitions('test_exprs.canary', 'val', 5); + create_hash_partitions +------------------------ + 5 +(1 row) + +/* + * Test HASH + */ +CREATE TABLE test_exprs.hash_rel ( + id SERIAL PRIMARY KEY, + value INTEGER NOT NULL, + value2 INTEGER NOT NULL +); +INSERT INTO test_exprs.hash_rel (value, value2) + SELECT val, val * 2 FROM generate_series(1, 5) val; +SELECT COUNT(*) FROM test_exprs.hash_rel; + count +------- + 5 +(1 row) + +\set VERBOSITY default +/* Try using constant expression */ +SELECT create_hash_partitions('test_exprs.hash_rel', '1 + 1', 4); +ERROR: failed to analyze partitioning expression "1 + 1" +DETAIL: partitioning expression should reference table "hash_rel" +CONTEXT: SQL statement "SELECT public.validate_expression(parent_relid, expression)" +PL/pgSQL function prepare_for_partitioning(regclass,text,boolean) line 9 at PERFORM +SQL statement "SELECT public.prepare_for_partitioning(parent_relid, + expression, + partition_data)" +PL/pgSQL function create_hash_partitions(regclass,text,integer,boolean,text[],text[]) line 3 at PERFORM +/* Try using system attributes */ +SELECT create_hash_partitions('test_exprs.hash_rel', 'xmin', 4); +ERROR: failed to analyze partitioning expression "xmin" +DETAIL: system attributes are not supported +CONTEXT: SQL statement "SELECT public.validate_expression(parent_relid, expression)" +PL/pgSQL function prepare_for_partitioning(regclass,text,boolean) line 9 at PERFORM +SQL statement "SELECT public.prepare_for_partitioning(parent_relid, + expression, + partition_data)" +PL/pgSQL function create_hash_partitions(regclass,text,integer,boolean,text[],text[]) line 3 at PERFORM +/* Try using subqueries */ +SELECT create_hash_partitions('test_exprs.hash_rel', + 'value, (select oid from pg_class limit 1)', + 4); +ERROR: failed to analyze partitioning expression "value, (select oid from pg_class limit 1)" +DETAIL: subqueries are not allowed in partitioning expression +CONTEXT: SQL statement "SELECT public.validate_expression(parent_relid, expression)" +PL/pgSQL function prepare_for_partitioning(regclass,text,boolean) line 9 at PERFORM +SQL statement "SELECT public.prepare_for_partitioning(parent_relid, + expression, + partition_data)" +PL/pgSQL function create_hash_partitions(regclass,text,integer,boolean,text[],text[]) line 3 at PERFORM +/* Try using mutable expression */ +SELECT create_hash_partitions('test_exprs.hash_rel', 'random()', 4); +ERROR: failed to analyze partitioning expression "random()" +DETAIL: functions in partitioning expression must be marked IMMUTABLE +CONTEXT: SQL statement "SELECT public.validate_expression(parent_relid, expression)" +PL/pgSQL function prepare_for_partitioning(regclass,text,boolean) line 9 at PERFORM +SQL statement "SELECT public.prepare_for_partitioning(parent_relid, + expression, + partition_data)" +PL/pgSQL function create_hash_partitions(regclass,text,integer,boolean,text[],text[]) line 3 at PERFORM +/* Try using broken parentheses */ +SELECT create_hash_partitions('test_exprs.hash_rel', 'value * value2))', 4); +ERROR: failed to parse partitioning expression "value * value2))" +DETAIL: syntax error at or near ")" +QUERY: SELECT public.validate_expression(parent_relid, expression) +CONTEXT: PL/pgSQL function prepare_for_partitioning(regclass,text,boolean) line 9 at PERFORM +SQL statement "SELECT public.prepare_for_partitioning(parent_relid, + expression, + partition_data)" +PL/pgSQL function create_hash_partitions(regclass,text,integer,boolean,text[],text[]) line 3 at PERFORM +/* Try using missing columns */ +SELECT create_hash_partitions('test_exprs.hash_rel', 'value * value3', 4); +ERROR: failed to analyze partitioning expression "value * value3" +DETAIL: column "value3" does not exist +HINT: Perhaps you meant to reference the column "hash_rel.value" or the column "hash_rel.value2". +QUERY: SELECT public.validate_expression(parent_relid, expression) +CONTEXT: PL/pgSQL function prepare_for_partitioning(regclass,text,boolean) line 9 at PERFORM +SQL statement "SELECT public.prepare_for_partitioning(parent_relid, + expression, + partition_data)" +PL/pgSQL function create_hash_partitions(regclass,text,integer,boolean,text[],text[]) line 3 at PERFORM +/* Check that 'pathman_hooks_enabled' is true (1 partition in plan) */ +EXPLAIN (COSTS OFF) INSERT INTO test_exprs.canary_copy +SELECT * FROM test_exprs.canary WHERE val = 1; + QUERY PLAN +---------------------------------- + Insert on canary_copy + -> Append + -> Seq Scan on canary_0 + Filter: (val = 1) +(4 rows) + +\set VERBOSITY terse +SELECT create_hash_partitions('test_exprs.hash_rel', 'value * value2', 4); + create_hash_partitions +------------------------ + 4 +(1 row) + +SELECT COUNT(*) FROM ONLY test_exprs.hash_rel; + count +------- + 0 +(1 row) + +SELECT COUNT(*) FROM test_exprs.hash_rel; + count +------- + 5 +(1 row) + +INSERT INTO test_exprs.hash_rel (value, value2) + SELECT val, val * 2 FROM generate_series(6, 10) val; +SELECT COUNT(*) FROM ONLY test_exprs.hash_rel; + count +------- + 0 +(1 row) + +SELECT COUNT(*) FROM test_exprs.hash_rel; + count +------- + 10 +(1 row) + +EXPLAIN (COSTS OFF) SELECT * FROM test_exprs.hash_rel WHERE value = 5; + QUERY PLAN +------------------------------ + Append + -> Seq Scan on hash_rel_0 + Filter: (value = 5) + -> Seq Scan on hash_rel_1 + Filter: (value = 5) + -> Seq Scan on hash_rel_2 + Filter: (value = 5) + -> Seq Scan on hash_rel_3 + Filter: (value = 5) +(9 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM test_exprs.hash_rel WHERE (value * value2) = 5; + QUERY PLAN +---------------------------------------- + Append + -> Seq Scan on hash_rel_0 + Filter: ((value * value2) = 5) +(3 rows) + +/* + * Test RANGE + */ +CREATE TABLE test_exprs.range_rel (id SERIAL PRIMARY KEY, dt TIMESTAMP NOT NULL, txt TEXT); +INSERT INTO test_exprs.range_rel (dt, txt) +SELECT g, md5(g::TEXT) FROM generate_series('2015-01-01', '2020-04-30', '1 month'::interval) as g; +\set VERBOSITY default +/* Try using constant expression */ +SELECT create_range_partitions('test_exprs.range_rel', '''16 years''::interval', + '15 years'::INTERVAL, '1 year'::INTERVAL, 10); +ERROR: failed to analyze partitioning expression "'16 years'::interval" +DETAIL: partitioning expression should reference table "range_rel" +CONTEXT: SQL statement "SELECT public.validate_expression(parent_relid, expression)" +PL/pgSQL function prepare_for_partitioning(regclass,text,boolean) line 9 at PERFORM +SQL statement "SELECT public.prepare_for_partitioning(parent_relid, + expression, + partition_data)" +PL/pgSQL function create_range_partitions(regclass,text,anyelement,interval,integer,boolean) line 11 at PERFORM +/* Try using mutable expression */ +SELECT create_range_partitions('test_exprs.range_rel', 'RANDOM()', + '15 years'::INTERVAL, '1 year'::INTERVAL, 10); +ERROR: failed to analyze partitioning expression "RANDOM()" +DETAIL: functions in partitioning expression must be marked IMMUTABLE +CONTEXT: SQL statement "SELECT public.validate_expression(parent_relid, expression)" +PL/pgSQL function prepare_for_partitioning(regclass,text,boolean) line 9 at PERFORM +SQL statement "SELECT public.prepare_for_partitioning(parent_relid, + expression, + partition_data)" +PL/pgSQL function create_range_partitions(regclass,text,anyelement,interval,integer,boolean) line 11 at PERFORM +/* Check that 'pathman_hooks_enabled' is true (1 partition in plan) */ +EXPLAIN (COSTS OFF) INSERT INTO test_exprs.canary_copy +SELECT * FROM test_exprs.canary WHERE val = 1; + QUERY PLAN +---------------------------------- + Insert on canary_copy + -> Append + -> Seq Scan on canary_0 + Filter: (val = 1) +(4 rows) + +\set VERBOSITY terse +SELECT create_range_partitions('test_exprs.range_rel', 'AGE(dt, ''2000-01-01''::DATE)', + '15 years'::INTERVAL, '1 year'::INTERVAL, 10); + create_range_partitions +------------------------- + 10 +(1 row) + +INSERT INTO test_exprs.range_rel_1 (dt, txt) VALUES ('2020-01-01'::DATE, md5('asdf')); +ERROR: new row for relation "range_rel_1" violates check constraint "pathman_range_rel_1_check" +SELECT COUNT(*) FROM test_exprs.range_rel_6; + count +------- + 4 +(1 row) + +INSERT INTO test_exprs.range_rel_6 (dt, txt) VALUES ('2020-01-01'::DATE, md5('asdf')); +SELECT COUNT(*) FROM test_exprs.range_rel_6; + count +------- + 5 +(1 row) + +EXPLAIN (COSTS OFF) SELECT * FROM test_exprs.range_rel WHERE (AGE(dt, '2000-01-01'::DATE)) = '18 years'::interval; + QUERY PLAN +------------------------------------------------------------------------------------------------------------- + Append + -> Seq Scan on range_rel_4 + Filter: (age(dt, 'Sat Jan 01 00:00:00 2000'::timestamp without time zone) = '@ 18 years'::interval) +(3 rows) + +DROP TABLE test_exprs.canary CASCADE; +NOTICE: drop cascades to 5 other objects +DROP TABLE test_exprs.canary_copy CASCADE; +DROP TABLE test_exprs.range_rel CASCADE; +NOTICE: drop cascades to 11 other objects +DROP TABLE test_exprs.hash_rel CASCADE; +NOTICE: drop cascades to 4 other objects +DROP SCHEMA test_exprs; +DROP EXTENSION pg_pathman; diff --git a/expected/pathman_expressions_1.out b/expected/pathman_expressions_1.out new file mode 100644 index 00000000..66e3ea75 --- /dev/null +++ b/expected/pathman_expressions_1.out @@ -0,0 +1,445 @@ +/* + * ------------------------------------------- + * NOTE: This test behaves differenly on < 11 because planner now turns + * Row(Const, Const) into just Const of record type, apparently since 3decd150 + * ------------------------------------------- + * + * Also since 8edd0e794 (>= 12) Append nodes with single subplan are eliminated, + * causing different output; pathman_expressions_2.out is the updated version. + */ +\set VERBOSITY terse +SET search_path = 'public'; +CREATE EXTENSION pg_pathman; +CREATE SCHEMA test_exprs; +/* + * Test partitioning expression canonicalization process + */ +CREATE TABLE test_exprs.canon(c JSONB NOT NULL); +SELECT create_range_partitions('test_exprs.canon', '(C->>''key'')::int8', 1, 10, 2); + create_range_partitions +------------------------- + 2 +(1 row) + +SELECT expr FROM pathman_config; /* check expression */ + expr +------------------------------- + ((c ->> 'key'::text))::bigint +(1 row) + +INSERT INTO test_exprs.canon VALUES ('{ "key": 2, "value": 0 }'); +SELECT *, tableoid::REGCLASS FROM test_exprs.canon; + c | tableoid +------------------------+-------------------- + {"key": 2, "value": 0} | test_exprs.canon_1 +(1 row) + +DROP TABLE test_exprs.canon CASCADE; +NOTICE: drop cascades to 3 other objects +CREATE TABLE test_exprs.canon(val TEXT NOT NULL); +CREATE SEQUENCE test_exprs.canon_seq; +SELECT add_to_pathman_config('test_exprs.canon', 'VAL collate "C"', NULL); + add_to_pathman_config +----------------------- + t +(1 row) + +SELECT add_range_partition('test_exprs.canon', 'a'::TEXT, 'b'); + add_range_partition +--------------------- + test_exprs.canon_1 +(1 row) + +SELECT add_range_partition('test_exprs.canon', 'b'::TEXT, 'c'); + add_range_partition +--------------------- + test_exprs.canon_2 +(1 row) + +SELECT add_range_partition('test_exprs.canon', 'c'::TEXT, 'd'); + add_range_partition +--------------------- + test_exprs.canon_3 +(1 row) + +SELECT add_range_partition('test_exprs.canon', 'd'::TEXT, 'e'); + add_range_partition +--------------------- + test_exprs.canon_4 +(1 row) + +SELECT expr FROM pathman_config; /* check expression */ + expr +------------------- + (val COLLATE "C") +(1 row) + +INSERT INTO test_exprs.canon VALUES ('b'); +SELECT *, tableoid::REGCLASS FROM test_exprs.canon; + val | tableoid +-----+-------------------- + b | test_exprs.canon_2 +(1 row) + +EXPLAIN (COSTS OFF) SELECT * FROM test_exprs.canon WHERE val COLLATE "C" < ALL (array['b', 'c']); + QUERY PLAN +--------------------------- + Append + -> Seq Scan on canon_1 +(2 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM test_exprs.canon WHERE val COLLATE "POSIX" < ALL (array['b', 'c']); + QUERY PLAN +----------------------------------------------------------- + Append + -> Seq Scan on canon_1 + Filter: ((val)::text < 'b'::text COLLATE "POSIX") + -> Seq Scan on canon_2 + Filter: ((val)::text < 'b'::text COLLATE "POSIX") + -> Seq Scan on canon_3 + Filter: ((val)::text < 'b'::text COLLATE "POSIX") + -> Seq Scan on canon_4 + Filter: ((val)::text < 'b'::text COLLATE "POSIX") +(9 rows) + +DROP TABLE test_exprs.canon CASCADE; +NOTICE: drop cascades to 5 other objects +/* + * Test composite key. + */ +CREATE TABLE test_exprs.composite(a INT4 NOT NULL, b TEXT NOT NULL); +CREATE SEQUENCE test_exprs.composite_seq; +SELECT add_to_pathman_config('test_exprs.composite', + '(a, b)::test_exprs.composite', + NULL); + add_to_pathman_config +----------------------- + t +(1 row) + +SELECT add_range_partition('test_exprs.composite', + '(1,a)'::test_exprs.composite, + '(10,a)'::test_exprs.composite); + add_range_partition +------------------------ + test_exprs.composite_1 +(1 row) + +SELECT add_range_partition('test_exprs.composite', + '(10,a)'::test_exprs.composite, + '(20,a)'::test_exprs.composite); + add_range_partition +------------------------ + test_exprs.composite_2 +(1 row) + +SELECT add_range_partition('test_exprs.composite', + '(20,a)'::test_exprs.composite, + '(30,a)'::test_exprs.composite); + add_range_partition +------------------------ + test_exprs.composite_3 +(1 row) + +SELECT add_range_partition('test_exprs.composite', + '(30,a)'::test_exprs.composite, + '(40,a)'::test_exprs.composite); + add_range_partition +------------------------ + test_exprs.composite_4 +(1 row) + +SELECT expr FROM pathman_config; /* check expression */ + expr +--------------------------------- + ROW(a, b)::test_exprs.composite +(1 row) + +INSERT INTO test_exprs.composite VALUES(2, 'a'); +INSERT INTO test_exprs.composite VALUES(11, 'a'); +INSERT INTO test_exprs.composite VALUES(2, 'b'); +INSERT INTO test_exprs.composite VALUES(50, 'b'); +ERROR: cannot spawn new partition for key '(50,b)' +SELECT *, tableoid::REGCLASS FROM test_exprs.composite; + a | b | tableoid +----+---+------------------------ + 2 | a | test_exprs.composite_1 + 2 | b | test_exprs.composite_1 + 11 | a | test_exprs.composite_2 +(3 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM test_exprs.composite WHERE (a, b)::test_exprs.composite < (21, 0)::test_exprs.composite; + QUERY PLAN +---------------------------------------------------------------------------------------------- + Append + -> Seq Scan on composite_1 + -> Seq Scan on composite_2 + -> Seq Scan on composite_3 + Filter: (ROW(a, b)::test_exprs.composite < ROW(21, '0'::text)::test_exprs.composite) +(5 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM test_exprs.composite WHERE (a, b) < (21, 0)::test_exprs.composite; + QUERY PLAN +------------------------------------------------------------------------ + Append + -> Seq Scan on composite_1 + Filter: (ROW(a, b) < ROW(21, '0'::text)::test_exprs.composite) + -> Seq Scan on composite_2 + Filter: (ROW(a, b) < ROW(21, '0'::text)::test_exprs.composite) + -> Seq Scan on composite_3 + Filter: (ROW(a, b) < ROW(21, '0'::text)::test_exprs.composite) + -> Seq Scan on composite_4 + Filter: (ROW(a, b) < ROW(21, '0'::text)::test_exprs.composite) +(9 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM test_exprs.composite WHERE (a, b)::test_exprs.composite < (21, 0); + QUERY PLAN +---------------------------------------------------------------- + Append + -> Seq Scan on composite_1 + Filter: (ROW(a, b)::test_exprs.composite < ROW(21, 0)) + -> Seq Scan on composite_2 + Filter: (ROW(a, b)::test_exprs.composite < ROW(21, 0)) + -> Seq Scan on composite_3 + Filter: (ROW(a, b)::test_exprs.composite < ROW(21, 0)) + -> Seq Scan on composite_4 + Filter: (ROW(a, b)::test_exprs.composite < ROW(21, 0)) +(9 rows) + +DROP TABLE test_exprs.composite CASCADE; +NOTICE: drop cascades to 5 other objects +/* We use this rel to check 'pathman_hooks_enabled' */ +CREATE TABLE test_exprs.canary(val INT4 NOT NULL); +CREATE TABLE test_exprs.canary_copy (LIKE test_exprs.canary); +SELECT create_hash_partitions('test_exprs.canary', 'val', 5); + create_hash_partitions +------------------------ + 5 +(1 row) + +/* + * Test HASH + */ +CREATE TABLE test_exprs.hash_rel ( + id SERIAL PRIMARY KEY, + value INTEGER NOT NULL, + value2 INTEGER NOT NULL +); +INSERT INTO test_exprs.hash_rel (value, value2) + SELECT val, val * 2 FROM generate_series(1, 5) val; +SELECT COUNT(*) FROM test_exprs.hash_rel; + count +------- + 5 +(1 row) + +\set VERBOSITY default +/* Try using constant expression */ +SELECT create_hash_partitions('test_exprs.hash_rel', '1 + 1', 4); +ERROR: failed to analyze partitioning expression "1 + 1" +DETAIL: partitioning expression should reference table "hash_rel" +CONTEXT: SQL statement "SELECT public.validate_expression(parent_relid, expression)" +PL/pgSQL function prepare_for_partitioning(regclass,text,boolean) line 9 at PERFORM +SQL statement "SELECT public.prepare_for_partitioning(parent_relid, + expression, + partition_data)" +PL/pgSQL function create_hash_partitions(regclass,text,integer,boolean,text[],text[]) line 3 at PERFORM +/* Try using system attributes */ +SELECT create_hash_partitions('test_exprs.hash_rel', 'xmin', 4); +ERROR: failed to analyze partitioning expression "xmin" +DETAIL: system attributes are not supported +CONTEXT: SQL statement "SELECT public.validate_expression(parent_relid, expression)" +PL/pgSQL function prepare_for_partitioning(regclass,text,boolean) line 9 at PERFORM +SQL statement "SELECT public.prepare_for_partitioning(parent_relid, + expression, + partition_data)" +PL/pgSQL function create_hash_partitions(regclass,text,integer,boolean,text[],text[]) line 3 at PERFORM +/* Try using subqueries */ +SELECT create_hash_partitions('test_exprs.hash_rel', + 'value, (select oid from pg_class limit 1)', + 4); +ERROR: failed to analyze partitioning expression "value, (select oid from pg_class limit 1)" +DETAIL: subqueries are not allowed in partitioning expression +CONTEXT: SQL statement "SELECT public.validate_expression(parent_relid, expression)" +PL/pgSQL function prepare_for_partitioning(regclass,text,boolean) line 9 at PERFORM +SQL statement "SELECT public.prepare_for_partitioning(parent_relid, + expression, + partition_data)" +PL/pgSQL function create_hash_partitions(regclass,text,integer,boolean,text[],text[]) line 3 at PERFORM +/* Try using mutable expression */ +SELECT create_hash_partitions('test_exprs.hash_rel', 'random()', 4); +ERROR: failed to analyze partitioning expression "random()" +DETAIL: functions in partitioning expression must be marked IMMUTABLE +CONTEXT: SQL statement "SELECT public.validate_expression(parent_relid, expression)" +PL/pgSQL function prepare_for_partitioning(regclass,text,boolean) line 9 at PERFORM +SQL statement "SELECT public.prepare_for_partitioning(parent_relid, + expression, + partition_data)" +PL/pgSQL function create_hash_partitions(regclass,text,integer,boolean,text[],text[]) line 3 at PERFORM +/* Try using broken parentheses */ +SELECT create_hash_partitions('test_exprs.hash_rel', 'value * value2))', 4); +ERROR: failed to parse partitioning expression "value * value2))" +DETAIL: syntax error at or near ")" +QUERY: SELECT public.validate_expression(parent_relid, expression) +CONTEXT: PL/pgSQL function prepare_for_partitioning(regclass,text,boolean) line 9 at PERFORM +SQL statement "SELECT public.prepare_for_partitioning(parent_relid, + expression, + partition_data)" +PL/pgSQL function create_hash_partitions(regclass,text,integer,boolean,text[],text[]) line 3 at PERFORM +/* Try using missing columns */ +SELECT create_hash_partitions('test_exprs.hash_rel', 'value * value3', 4); +ERROR: failed to analyze partitioning expression "value * value3" +DETAIL: column "value3" does not exist +HINT: Perhaps you meant to reference the column "hash_rel.value" or the column "hash_rel.value2". +QUERY: SELECT public.validate_expression(parent_relid, expression) +CONTEXT: PL/pgSQL function prepare_for_partitioning(regclass,text,boolean) line 9 at PERFORM +SQL statement "SELECT public.prepare_for_partitioning(parent_relid, + expression, + partition_data)" +PL/pgSQL function create_hash_partitions(regclass,text,integer,boolean,text[],text[]) line 3 at PERFORM +/* Check that 'pathman_hooks_enabled' is true (1 partition in plan) */ +EXPLAIN (COSTS OFF) INSERT INTO test_exprs.canary_copy +SELECT * FROM test_exprs.canary WHERE val = 1; + QUERY PLAN +---------------------------------- + Insert on canary_copy + -> Append + -> Seq Scan on canary_0 + Filter: (val = 1) +(4 rows) + +\set VERBOSITY terse +SELECT create_hash_partitions('test_exprs.hash_rel', 'value * value2', 4); + create_hash_partitions +------------------------ + 4 +(1 row) + +SELECT COUNT(*) FROM ONLY test_exprs.hash_rel; + count +------- + 0 +(1 row) + +SELECT COUNT(*) FROM test_exprs.hash_rel; + count +------- + 5 +(1 row) + +INSERT INTO test_exprs.hash_rel (value, value2) + SELECT val, val * 2 FROM generate_series(6, 10) val; +SELECT COUNT(*) FROM ONLY test_exprs.hash_rel; + count +------- + 0 +(1 row) + +SELECT COUNT(*) FROM test_exprs.hash_rel; + count +------- + 10 +(1 row) + +EXPLAIN (COSTS OFF) SELECT * FROM test_exprs.hash_rel WHERE value = 5; + QUERY PLAN +------------------------------ + Append + -> Seq Scan on hash_rel_0 + Filter: (value = 5) + -> Seq Scan on hash_rel_1 + Filter: (value = 5) + -> Seq Scan on hash_rel_2 + Filter: (value = 5) + -> Seq Scan on hash_rel_3 + Filter: (value = 5) +(9 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM test_exprs.hash_rel WHERE (value * value2) = 5; + QUERY PLAN +---------------------------------------- + Append + -> Seq Scan on hash_rel_0 + Filter: ((value * value2) = 5) +(3 rows) + +/* + * Test RANGE + */ +CREATE TABLE test_exprs.range_rel (id SERIAL PRIMARY KEY, dt TIMESTAMP NOT NULL, txt TEXT); +INSERT INTO test_exprs.range_rel (dt, txt) +SELECT g, md5(g::TEXT) FROM generate_series('2015-01-01', '2020-04-30', '1 month'::interval) as g; +\set VERBOSITY default +/* Try using constant expression */ +SELECT create_range_partitions('test_exprs.range_rel', '''16 years''::interval', + '15 years'::INTERVAL, '1 year'::INTERVAL, 10); +ERROR: failed to analyze partitioning expression "'16 years'::interval" +DETAIL: partitioning expression should reference table "range_rel" +CONTEXT: SQL statement "SELECT public.validate_expression(parent_relid, expression)" +PL/pgSQL function prepare_for_partitioning(regclass,text,boolean) line 9 at PERFORM +SQL statement "SELECT public.prepare_for_partitioning(parent_relid, + expression, + partition_data)" +PL/pgSQL function create_range_partitions(regclass,text,anyelement,interval,integer,boolean) line 11 at PERFORM +/* Try using mutable expression */ +SELECT create_range_partitions('test_exprs.range_rel', 'RANDOM()', + '15 years'::INTERVAL, '1 year'::INTERVAL, 10); +ERROR: failed to analyze partitioning expression "RANDOM()" +DETAIL: functions in partitioning expression must be marked IMMUTABLE +CONTEXT: SQL statement "SELECT public.validate_expression(parent_relid, expression)" +PL/pgSQL function prepare_for_partitioning(regclass,text,boolean) line 9 at PERFORM +SQL statement "SELECT public.prepare_for_partitioning(parent_relid, + expression, + partition_data)" +PL/pgSQL function create_range_partitions(regclass,text,anyelement,interval,integer,boolean) line 11 at PERFORM +/* Check that 'pathman_hooks_enabled' is true (1 partition in plan) */ +EXPLAIN (COSTS OFF) INSERT INTO test_exprs.canary_copy +SELECT * FROM test_exprs.canary WHERE val = 1; + QUERY PLAN +---------------------------------- + Insert on canary_copy + -> Append + -> Seq Scan on canary_0 + Filter: (val = 1) +(4 rows) + +\set VERBOSITY terse +SELECT create_range_partitions('test_exprs.range_rel', 'AGE(dt, ''2000-01-01''::DATE)', + '15 years'::INTERVAL, '1 year'::INTERVAL, 10); + create_range_partitions +------------------------- + 10 +(1 row) + +INSERT INTO test_exprs.range_rel_1 (dt, txt) VALUES ('2020-01-01'::DATE, md5('asdf')); +ERROR: new row for relation "range_rel_1" violates check constraint "pathman_range_rel_1_check" +SELECT COUNT(*) FROM test_exprs.range_rel_6; + count +------- + 4 +(1 row) + +INSERT INTO test_exprs.range_rel_6 (dt, txt) VALUES ('2020-01-01'::DATE, md5('asdf')); +SELECT COUNT(*) FROM test_exprs.range_rel_6; + count +------- + 5 +(1 row) + +EXPLAIN (COSTS OFF) SELECT * FROM test_exprs.range_rel WHERE (AGE(dt, '2000-01-01'::DATE)) = '18 years'::interval; + QUERY PLAN +------------------------------------------------------------------------------------------------------------- + Append + -> Seq Scan on range_rel_4 + Filter: (age(dt, 'Sat Jan 01 00:00:00 2000'::timestamp without time zone) = '@ 18 years'::interval) +(3 rows) + +DROP TABLE test_exprs.canary CASCADE; +NOTICE: drop cascades to 5 other objects +DROP TABLE test_exprs.canary_copy CASCADE; +DROP TABLE test_exprs.range_rel CASCADE; +NOTICE: drop cascades to 11 other objects +DROP TABLE test_exprs.hash_rel CASCADE; +NOTICE: drop cascades to 4 other objects +DROP SCHEMA test_exprs; +DROP EXTENSION pg_pathman; diff --git a/expected/pathman_expressions_2.out b/expected/pathman_expressions_2.out new file mode 100644 index 00000000..89bf24ef --- /dev/null +++ b/expected/pathman_expressions_2.out @@ -0,0 +1,436 @@ +/* + * ------------------------------------------- + * NOTE: This test behaves differenly on < 11 because planner now turns + * Row(Const, Const) into just Const of record type, apparently since 3decd150 + * ------------------------------------------- + * + * Also since 8edd0e794 (>= 12) Append nodes with single subplan are eliminated, + * causing different output; pathman_expressions_2.out is the updated version. + */ +\set VERBOSITY terse +SET search_path = 'public'; +CREATE EXTENSION pg_pathman; +CREATE SCHEMA test_exprs; +/* + * Test partitioning expression canonicalization process + */ +CREATE TABLE test_exprs.canon(c JSONB NOT NULL); +SELECT create_range_partitions('test_exprs.canon', '(C->>''key'')::int8', 1, 10, 2); + create_range_partitions +------------------------- + 2 +(1 row) + +SELECT expr FROM pathman_config; /* check expression */ + expr +------------------------------- + ((c ->> 'key'::text))::bigint +(1 row) + +INSERT INTO test_exprs.canon VALUES ('{ "key": 2, "value": 0 }'); +SELECT *, tableoid::REGCLASS FROM test_exprs.canon; + c | tableoid +------------------------+-------------------- + {"key": 2, "value": 0} | test_exprs.canon_1 +(1 row) + +DROP TABLE test_exprs.canon CASCADE; +NOTICE: drop cascades to 3 other objects +CREATE TABLE test_exprs.canon(val TEXT NOT NULL); +CREATE SEQUENCE test_exprs.canon_seq; +SELECT add_to_pathman_config('test_exprs.canon', 'VAL collate "C"', NULL); + add_to_pathman_config +----------------------- + t +(1 row) + +SELECT add_range_partition('test_exprs.canon', 'a'::TEXT, 'b'); + add_range_partition +--------------------- + test_exprs.canon_1 +(1 row) + +SELECT add_range_partition('test_exprs.canon', 'b'::TEXT, 'c'); + add_range_partition +--------------------- + test_exprs.canon_2 +(1 row) + +SELECT add_range_partition('test_exprs.canon', 'c'::TEXT, 'd'); + add_range_partition +--------------------- + test_exprs.canon_3 +(1 row) + +SELECT add_range_partition('test_exprs.canon', 'd'::TEXT, 'e'); + add_range_partition +--------------------- + test_exprs.canon_4 +(1 row) + +SELECT expr FROM pathman_config; /* check expression */ + expr +------------------- + (val COLLATE "C") +(1 row) + +INSERT INTO test_exprs.canon VALUES ('b'); +SELECT *, tableoid::REGCLASS FROM test_exprs.canon; + val | tableoid +-----+-------------------- + b | test_exprs.canon_2 +(1 row) + +EXPLAIN (COSTS OFF) SELECT * FROM test_exprs.canon WHERE val COLLATE "C" < ALL (array['b', 'c']); + QUERY PLAN +--------------------- + Seq Scan on canon_1 +(1 row) + +EXPLAIN (COSTS OFF) SELECT * FROM test_exprs.canon WHERE val COLLATE "POSIX" < ALL (array['b', 'c']); + QUERY PLAN +----------------------------------------------------------- + Append + -> Seq Scan on canon_1 + Filter: ((val)::text < 'b'::text COLLATE "POSIX") + -> Seq Scan on canon_2 + Filter: ((val)::text < 'b'::text COLLATE "POSIX") + -> Seq Scan on canon_3 + Filter: ((val)::text < 'b'::text COLLATE "POSIX") + -> Seq Scan on canon_4 + Filter: ((val)::text < 'b'::text COLLATE "POSIX") +(9 rows) + +DROP TABLE test_exprs.canon CASCADE; +NOTICE: drop cascades to 5 other objects +/* + * Test composite key. + */ +CREATE TABLE test_exprs.composite(a INT4 NOT NULL, b TEXT NOT NULL); +CREATE SEQUENCE test_exprs.composite_seq; +SELECT add_to_pathman_config('test_exprs.composite', + '(a, b)::test_exprs.composite', + NULL); + add_to_pathman_config +----------------------- + t +(1 row) + +SELECT add_range_partition('test_exprs.composite', + '(1,a)'::test_exprs.composite, + '(10,a)'::test_exprs.composite); + add_range_partition +------------------------ + test_exprs.composite_1 +(1 row) + +SELECT add_range_partition('test_exprs.composite', + '(10,a)'::test_exprs.composite, + '(20,a)'::test_exprs.composite); + add_range_partition +------------------------ + test_exprs.composite_2 +(1 row) + +SELECT add_range_partition('test_exprs.composite', + '(20,a)'::test_exprs.composite, + '(30,a)'::test_exprs.composite); + add_range_partition +------------------------ + test_exprs.composite_3 +(1 row) + +SELECT add_range_partition('test_exprs.composite', + '(30,a)'::test_exprs.composite, + '(40,a)'::test_exprs.composite); + add_range_partition +------------------------ + test_exprs.composite_4 +(1 row) + +SELECT expr FROM pathman_config; /* check expression */ + expr +--------------------------------- + ROW(a, b)::test_exprs.composite +(1 row) + +INSERT INTO test_exprs.composite VALUES(2, 'a'); +INSERT INTO test_exprs.composite VALUES(11, 'a'); +INSERT INTO test_exprs.composite VALUES(2, 'b'); +INSERT INTO test_exprs.composite VALUES(50, 'b'); +ERROR: cannot spawn new partition for key '(50,b)' +SELECT *, tableoid::REGCLASS FROM test_exprs.composite; + a | b | tableoid +----+---+------------------------ + 2 | a | test_exprs.composite_1 + 2 | b | test_exprs.composite_1 + 11 | a | test_exprs.composite_2 +(3 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM test_exprs.composite WHERE (a, b)::test_exprs.composite < (21, 0)::test_exprs.composite; + QUERY PLAN +------------------------------------------------------------------------------------ + Append + -> Seq Scan on composite_1 + -> Seq Scan on composite_2 + -> Seq Scan on composite_3 + Filter: (ROW(a, b)::test_exprs.composite < '(21,0)'::test_exprs.composite) +(5 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM test_exprs.composite WHERE (a, b) < (21, 0)::test_exprs.composite; + QUERY PLAN +-------------------------------------------------------------- + Append + -> Seq Scan on composite_1 + Filter: (ROW(a, b) < '(21,0)'::test_exprs.composite) + -> Seq Scan on composite_2 + Filter: (ROW(a, b) < '(21,0)'::test_exprs.composite) + -> Seq Scan on composite_3 + Filter: (ROW(a, b) < '(21,0)'::test_exprs.composite) + -> Seq Scan on composite_4 + Filter: (ROW(a, b) < '(21,0)'::test_exprs.composite) +(9 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM test_exprs.composite WHERE (a, b)::test_exprs.composite < (21, 0); + QUERY PLAN +---------------------------------------------------------------------- + Append + -> Seq Scan on composite_1 + -> Seq Scan on composite_2 + -> Seq Scan on composite_3 + Filter: (ROW(a, b)::test_exprs.composite < '(21,0)'::record) +(5 rows) + +DROP TABLE test_exprs.composite CASCADE; +NOTICE: drop cascades to 5 other objects +/* We use this rel to check 'pathman_hooks_enabled' */ +CREATE TABLE test_exprs.canary(val INT4 NOT NULL); +CREATE TABLE test_exprs.canary_copy (LIKE test_exprs.canary); +SELECT create_hash_partitions('test_exprs.canary', 'val', 5); + create_hash_partitions +------------------------ + 5 +(1 row) + +/* + * Test HASH + */ +CREATE TABLE test_exprs.hash_rel ( + id SERIAL PRIMARY KEY, + value INTEGER NOT NULL, + value2 INTEGER NOT NULL +); +INSERT INTO test_exprs.hash_rel (value, value2) + SELECT val, val * 2 FROM generate_series(1, 5) val; +SELECT COUNT(*) FROM test_exprs.hash_rel; + count +------- + 5 +(1 row) + +\set VERBOSITY default +/* Try using constant expression */ +SELECT create_hash_partitions('test_exprs.hash_rel', '1 + 1', 4); +ERROR: failed to analyze partitioning expression "1 + 1" +DETAIL: partitioning expression should reference table "hash_rel" +CONTEXT: SQL statement "SELECT public.validate_expression(parent_relid, expression)" +PL/pgSQL function prepare_for_partitioning(regclass,text,boolean) line 9 at PERFORM +SQL statement "SELECT public.prepare_for_partitioning(parent_relid, + expression, + partition_data)" +PL/pgSQL function create_hash_partitions(regclass,text,integer,boolean,text[],text[]) line 3 at PERFORM +/* Try using system attributes */ +SELECT create_hash_partitions('test_exprs.hash_rel', 'xmin', 4); +ERROR: failed to analyze partitioning expression "xmin" +DETAIL: system attributes are not supported +CONTEXT: SQL statement "SELECT public.validate_expression(parent_relid, expression)" +PL/pgSQL function prepare_for_partitioning(regclass,text,boolean) line 9 at PERFORM +SQL statement "SELECT public.prepare_for_partitioning(parent_relid, + expression, + partition_data)" +PL/pgSQL function create_hash_partitions(regclass,text,integer,boolean,text[],text[]) line 3 at PERFORM +/* Try using subqueries */ +SELECT create_hash_partitions('test_exprs.hash_rel', + 'value, (select oid from pg_class limit 1)', + 4); +ERROR: failed to analyze partitioning expression "value, (select oid from pg_class limit 1)" +DETAIL: subqueries are not allowed in partitioning expression +CONTEXT: SQL statement "SELECT public.validate_expression(parent_relid, expression)" +PL/pgSQL function prepare_for_partitioning(regclass,text,boolean) line 9 at PERFORM +SQL statement "SELECT public.prepare_for_partitioning(parent_relid, + expression, + partition_data)" +PL/pgSQL function create_hash_partitions(regclass,text,integer,boolean,text[],text[]) line 3 at PERFORM +/* Try using mutable expression */ +SELECT create_hash_partitions('test_exprs.hash_rel', 'random()', 4); +ERROR: failed to analyze partitioning expression "random()" +DETAIL: functions in partitioning expression must be marked IMMUTABLE +CONTEXT: SQL statement "SELECT public.validate_expression(parent_relid, expression)" +PL/pgSQL function prepare_for_partitioning(regclass,text,boolean) line 9 at PERFORM +SQL statement "SELECT public.prepare_for_partitioning(parent_relid, + expression, + partition_data)" +PL/pgSQL function create_hash_partitions(regclass,text,integer,boolean,text[],text[]) line 3 at PERFORM +/* Try using broken parentheses */ +SELECT create_hash_partitions('test_exprs.hash_rel', 'value * value2))', 4); +ERROR: failed to parse partitioning expression "value * value2))" +DETAIL: syntax error at or near ")" +QUERY: SELECT public.validate_expression(parent_relid, expression) +CONTEXT: PL/pgSQL function prepare_for_partitioning(regclass,text,boolean) line 9 at PERFORM +SQL statement "SELECT public.prepare_for_partitioning(parent_relid, + expression, + partition_data)" +PL/pgSQL function create_hash_partitions(regclass,text,integer,boolean,text[],text[]) line 3 at PERFORM +/* Try using missing columns */ +SELECT create_hash_partitions('test_exprs.hash_rel', 'value * value3', 4); +ERROR: failed to analyze partitioning expression "value * value3" +DETAIL: column "value3" does not exist +HINT: Perhaps you meant to reference the column "hash_rel.value" or the column "hash_rel.value2". +QUERY: SELECT public.validate_expression(parent_relid, expression) +CONTEXT: PL/pgSQL function prepare_for_partitioning(regclass,text,boolean) line 9 at PERFORM +SQL statement "SELECT public.prepare_for_partitioning(parent_relid, + expression, + partition_data)" +PL/pgSQL function create_hash_partitions(regclass,text,integer,boolean,text[],text[]) line 3 at PERFORM +/* Check that 'pathman_hooks_enabled' is true (1 partition in plan) */ +EXPLAIN (COSTS OFF) INSERT INTO test_exprs.canary_copy +SELECT * FROM test_exprs.canary WHERE val = 1; + QUERY PLAN +---------------------------- + Insert on canary_copy + -> Seq Scan on canary_0 + Filter: (val = 1) +(3 rows) + +\set VERBOSITY terse +SELECT create_hash_partitions('test_exprs.hash_rel', 'value * value2', 4); + create_hash_partitions +------------------------ + 4 +(1 row) + +SELECT COUNT(*) FROM ONLY test_exprs.hash_rel; + count +------- + 0 +(1 row) + +SELECT COUNT(*) FROM test_exprs.hash_rel; + count +------- + 5 +(1 row) + +INSERT INTO test_exprs.hash_rel (value, value2) + SELECT val, val * 2 FROM generate_series(6, 10) val; +SELECT COUNT(*) FROM ONLY test_exprs.hash_rel; + count +------- + 0 +(1 row) + +SELECT COUNT(*) FROM test_exprs.hash_rel; + count +------- + 10 +(1 row) + +EXPLAIN (COSTS OFF) SELECT * FROM test_exprs.hash_rel WHERE value = 5; + QUERY PLAN +------------------------------ + Append + -> Seq Scan on hash_rel_0 + Filter: (value = 5) + -> Seq Scan on hash_rel_1 + Filter: (value = 5) + -> Seq Scan on hash_rel_2 + Filter: (value = 5) + -> Seq Scan on hash_rel_3 + Filter: (value = 5) +(9 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM test_exprs.hash_rel WHERE (value * value2) = 5; + QUERY PLAN +---------------------------------- + Seq Scan on hash_rel_0 + Filter: ((value * value2) = 5) +(2 rows) + +/* + * Test RANGE + */ +CREATE TABLE test_exprs.range_rel (id SERIAL PRIMARY KEY, dt TIMESTAMP NOT NULL, txt TEXT); +INSERT INTO test_exprs.range_rel (dt, txt) +SELECT g, md5(g::TEXT) FROM generate_series('2015-01-01', '2020-04-30', '1 month'::interval) as g; +\set VERBOSITY default +/* Try using constant expression */ +SELECT create_range_partitions('test_exprs.range_rel', '''16 years''::interval', + '15 years'::INTERVAL, '1 year'::INTERVAL, 10); +ERROR: failed to analyze partitioning expression "'16 years'::interval" +DETAIL: partitioning expression should reference table "range_rel" +CONTEXT: SQL statement "SELECT public.validate_expression(parent_relid, expression)" +PL/pgSQL function prepare_for_partitioning(regclass,text,boolean) line 9 at PERFORM +SQL statement "SELECT public.prepare_for_partitioning(parent_relid, + expression, + partition_data)" +PL/pgSQL function create_range_partitions(regclass,text,anyelement,interval,integer,boolean) line 11 at PERFORM +/* Try using mutable expression */ +SELECT create_range_partitions('test_exprs.range_rel', 'RANDOM()', + '15 years'::INTERVAL, '1 year'::INTERVAL, 10); +ERROR: failed to analyze partitioning expression "RANDOM()" +DETAIL: functions in partitioning expression must be marked IMMUTABLE +CONTEXT: SQL statement "SELECT public.validate_expression(parent_relid, expression)" +PL/pgSQL function prepare_for_partitioning(regclass,text,boolean) line 9 at PERFORM +SQL statement "SELECT public.prepare_for_partitioning(parent_relid, + expression, + partition_data)" +PL/pgSQL function create_range_partitions(regclass,text,anyelement,interval,integer,boolean) line 11 at PERFORM +/* Check that 'pathman_hooks_enabled' is true (1 partition in plan) */ +EXPLAIN (COSTS OFF) INSERT INTO test_exprs.canary_copy +SELECT * FROM test_exprs.canary WHERE val = 1; + QUERY PLAN +---------------------------- + Insert on canary_copy + -> Seq Scan on canary_0 + Filter: (val = 1) +(3 rows) + +\set VERBOSITY terse +SELECT create_range_partitions('test_exprs.range_rel', 'AGE(dt, ''2000-01-01''::DATE)', + '15 years'::INTERVAL, '1 year'::INTERVAL, 10); + create_range_partitions +------------------------- + 10 +(1 row) + +INSERT INTO test_exprs.range_rel_1 (dt, txt) VALUES ('2020-01-01'::DATE, md5('asdf')); +ERROR: new row for relation "range_rel_1" violates check constraint "pathman_range_rel_1_check" +SELECT COUNT(*) FROM test_exprs.range_rel_6; + count +------- + 4 +(1 row) + +INSERT INTO test_exprs.range_rel_6 (dt, txt) VALUES ('2020-01-01'::DATE, md5('asdf')); +SELECT COUNT(*) FROM test_exprs.range_rel_6; + count +------- + 5 +(1 row) + +EXPLAIN (COSTS OFF) SELECT * FROM test_exprs.range_rel WHERE (AGE(dt, '2000-01-01'::DATE)) = '18 years'::interval; + QUERY PLAN +------------------------------------------------------------------------------------------------------- + Seq Scan on range_rel_4 + Filter: (age(dt, 'Sat Jan 01 00:00:00 2000'::timestamp without time zone) = '@ 18 years'::interval) +(2 rows) + +DROP TABLE test_exprs.canary CASCADE; +NOTICE: drop cascades to 5 other objects +DROP TABLE test_exprs.canary_copy CASCADE; +DROP TABLE test_exprs.range_rel CASCADE; +NOTICE: drop cascades to 11 other objects +DROP TABLE test_exprs.hash_rel CASCADE; +NOTICE: drop cascades to 4 other objects +DROP SCHEMA test_exprs; +DROP EXTENSION pg_pathman; diff --git a/expected/pathman_expressions_3.out b/expected/pathman_expressions_3.out new file mode 100644 index 00000000..eacb1009 --- /dev/null +++ b/expected/pathman_expressions_3.out @@ -0,0 +1,436 @@ +/* + * ------------------------------------------- + * NOTE: This test behaves differenly on < 11 because planner now turns + * Row(Const, Const) into just Const of record type, apparently since 3decd150 + * ------------------------------------------- + * + * Also since 8edd0e794 (>= 12) Append nodes with single subplan are eliminated, + * causing different output; pathman_expressions_2.out is the updated version. + */ +\set VERBOSITY terse +SET search_path = 'public'; +CREATE EXTENSION pg_pathman; +CREATE SCHEMA test_exprs; +/* + * Test partitioning expression canonicalization process + */ +CREATE TABLE test_exprs.canon(c JSONB NOT NULL); +SELECT create_range_partitions('test_exprs.canon', '(C->>''key'')::int8', 1, 10, 2); + create_range_partitions +------------------------- + 2 +(1 row) + +SELECT expr FROM pathman_config; /* check expression */ + expr +------------------------------- + ((c ->> 'key'::text))::bigint +(1 row) + +INSERT INTO test_exprs.canon VALUES ('{ "key": 2, "value": 0 }'); +SELECT *, tableoid::REGCLASS FROM test_exprs.canon; + c | tableoid +------------------------+-------------------- + {"key": 2, "value": 0} | test_exprs.canon_1 +(1 row) + +DROP TABLE test_exprs.canon CASCADE; +NOTICE: drop cascades to 3 other objects +CREATE TABLE test_exprs.canon(val TEXT NOT NULL); +CREATE SEQUENCE test_exprs.canon_seq; +SELECT add_to_pathman_config('test_exprs.canon', 'VAL collate "C"', NULL); + add_to_pathman_config +----------------------- + t +(1 row) + +SELECT add_range_partition('test_exprs.canon', 'a'::TEXT, 'b'); + add_range_partition +--------------------- + test_exprs.canon_1 +(1 row) + +SELECT add_range_partition('test_exprs.canon', 'b'::TEXT, 'c'); + add_range_partition +--------------------- + test_exprs.canon_2 +(1 row) + +SELECT add_range_partition('test_exprs.canon', 'c'::TEXT, 'd'); + add_range_partition +--------------------- + test_exprs.canon_3 +(1 row) + +SELECT add_range_partition('test_exprs.canon', 'd'::TEXT, 'e'); + add_range_partition +--------------------- + test_exprs.canon_4 +(1 row) + +SELECT expr FROM pathman_config; /* check expression */ + expr +------------------- + (val COLLATE "C") +(1 row) + +INSERT INTO test_exprs.canon VALUES ('b'); +SELECT *, tableoid::REGCLASS FROM test_exprs.canon; + val | tableoid +-----+-------------------- + b | test_exprs.canon_2 +(1 row) + +EXPLAIN (COSTS OFF) SELECT * FROM test_exprs.canon WHERE val COLLATE "C" < ALL (array['b', 'c']); + QUERY PLAN +--------------------------- + Seq Scan on canon_1 canon +(1 row) + +EXPLAIN (COSTS OFF) SELECT * FROM test_exprs.canon WHERE val COLLATE "POSIX" < ALL (array['b', 'c']); + QUERY PLAN +----------------------------------------------------------- + Append + -> Seq Scan on canon_1 + Filter: ((val)::text < 'b'::text COLLATE "POSIX") + -> Seq Scan on canon_2 + Filter: ((val)::text < 'b'::text COLLATE "POSIX") + -> Seq Scan on canon_3 + Filter: ((val)::text < 'b'::text COLLATE "POSIX") + -> Seq Scan on canon_4 + Filter: ((val)::text < 'b'::text COLLATE "POSIX") +(9 rows) + +DROP TABLE test_exprs.canon CASCADE; +NOTICE: drop cascades to 5 other objects +/* + * Test composite key. + */ +CREATE TABLE test_exprs.composite(a INT4 NOT NULL, b TEXT NOT NULL); +CREATE SEQUENCE test_exprs.composite_seq; +SELECT add_to_pathman_config('test_exprs.composite', + '(a, b)::test_exprs.composite', + NULL); + add_to_pathman_config +----------------------- + t +(1 row) + +SELECT add_range_partition('test_exprs.composite', + '(1,a)'::test_exprs.composite, + '(10,a)'::test_exprs.composite); + add_range_partition +------------------------ + test_exprs.composite_1 +(1 row) + +SELECT add_range_partition('test_exprs.composite', + '(10,a)'::test_exprs.composite, + '(20,a)'::test_exprs.composite); + add_range_partition +------------------------ + test_exprs.composite_2 +(1 row) + +SELECT add_range_partition('test_exprs.composite', + '(20,a)'::test_exprs.composite, + '(30,a)'::test_exprs.composite); + add_range_partition +------------------------ + test_exprs.composite_3 +(1 row) + +SELECT add_range_partition('test_exprs.composite', + '(30,a)'::test_exprs.composite, + '(40,a)'::test_exprs.composite); + add_range_partition +------------------------ + test_exprs.composite_4 +(1 row) + +SELECT expr FROM pathman_config; /* check expression */ + expr +--------------------------------- + ROW(a, b)::test_exprs.composite +(1 row) + +INSERT INTO test_exprs.composite VALUES(2, 'a'); +INSERT INTO test_exprs.composite VALUES(11, 'a'); +INSERT INTO test_exprs.composite VALUES(2, 'b'); +INSERT INTO test_exprs.composite VALUES(50, 'b'); +ERROR: cannot spawn new partition for key '(50,b)' +SELECT *, tableoid::REGCLASS FROM test_exprs.composite; + a | b | tableoid +----+---+------------------------ + 2 | a | test_exprs.composite_1 + 2 | b | test_exprs.composite_1 + 11 | a | test_exprs.composite_2 +(3 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM test_exprs.composite WHERE (a, b)::test_exprs.composite < (21, 0)::test_exprs.composite; + QUERY PLAN +------------------------------------------------------------------------------------ + Append + -> Seq Scan on composite_1 + -> Seq Scan on composite_2 + -> Seq Scan on composite_3 + Filter: (ROW(a, b)::test_exprs.composite < '(21,0)'::test_exprs.composite) +(5 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM test_exprs.composite WHERE (a, b) < (21, 0)::test_exprs.composite; + QUERY PLAN +-------------------------------------------------------------- + Append + -> Seq Scan on composite_1 + Filter: (ROW(a, b) < '(21,0)'::test_exprs.composite) + -> Seq Scan on composite_2 + Filter: (ROW(a, b) < '(21,0)'::test_exprs.composite) + -> Seq Scan on composite_3 + Filter: (ROW(a, b) < '(21,0)'::test_exprs.composite) + -> Seq Scan on composite_4 + Filter: (ROW(a, b) < '(21,0)'::test_exprs.composite) +(9 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM test_exprs.composite WHERE (a, b)::test_exprs.composite < (21, 0); + QUERY PLAN +---------------------------------------------------------------------- + Append + -> Seq Scan on composite_1 + -> Seq Scan on composite_2 + -> Seq Scan on composite_3 + Filter: (ROW(a, b)::test_exprs.composite < '(21,0)'::record) +(5 rows) + +DROP TABLE test_exprs.composite CASCADE; +NOTICE: drop cascades to 5 other objects +/* We use this rel to check 'pathman_hooks_enabled' */ +CREATE TABLE test_exprs.canary(val INT4 NOT NULL); +CREATE TABLE test_exprs.canary_copy (LIKE test_exprs.canary); +SELECT create_hash_partitions('test_exprs.canary', 'val', 5); + create_hash_partitions +------------------------ + 5 +(1 row) + +/* + * Test HASH + */ +CREATE TABLE test_exprs.hash_rel ( + id SERIAL PRIMARY KEY, + value INTEGER NOT NULL, + value2 INTEGER NOT NULL +); +INSERT INTO test_exprs.hash_rel (value, value2) + SELECT val, val * 2 FROM generate_series(1, 5) val; +SELECT COUNT(*) FROM test_exprs.hash_rel; + count +------- + 5 +(1 row) + +\set VERBOSITY default +/* Try using constant expression */ +SELECT create_hash_partitions('test_exprs.hash_rel', '1 + 1', 4); +ERROR: failed to analyze partitioning expression "1 + 1" +DETAIL: partitioning expression should reference table "hash_rel" +CONTEXT: SQL statement "SELECT public.validate_expression(parent_relid, expression)" +PL/pgSQL function prepare_for_partitioning(regclass,text,boolean) line 9 at PERFORM +SQL statement "SELECT public.prepare_for_partitioning(parent_relid, + expression, + partition_data)" +PL/pgSQL function create_hash_partitions(regclass,text,integer,boolean,text[],text[]) line 3 at PERFORM +/* Try using system attributes */ +SELECT create_hash_partitions('test_exprs.hash_rel', 'xmin', 4); +ERROR: failed to analyze partitioning expression "xmin" +DETAIL: system attributes are not supported +CONTEXT: SQL statement "SELECT public.validate_expression(parent_relid, expression)" +PL/pgSQL function prepare_for_partitioning(regclass,text,boolean) line 9 at PERFORM +SQL statement "SELECT public.prepare_for_partitioning(parent_relid, + expression, + partition_data)" +PL/pgSQL function create_hash_partitions(regclass,text,integer,boolean,text[],text[]) line 3 at PERFORM +/* Try using subqueries */ +SELECT create_hash_partitions('test_exprs.hash_rel', + 'value, (select oid from pg_class limit 1)', + 4); +ERROR: failed to analyze partitioning expression "value, (select oid from pg_class limit 1)" +DETAIL: subqueries are not allowed in partitioning expression +CONTEXT: SQL statement "SELECT public.validate_expression(parent_relid, expression)" +PL/pgSQL function prepare_for_partitioning(regclass,text,boolean) line 9 at PERFORM +SQL statement "SELECT public.prepare_for_partitioning(parent_relid, + expression, + partition_data)" +PL/pgSQL function create_hash_partitions(regclass,text,integer,boolean,text[],text[]) line 3 at PERFORM +/* Try using mutable expression */ +SELECT create_hash_partitions('test_exprs.hash_rel', 'random()', 4); +ERROR: failed to analyze partitioning expression "random()" +DETAIL: functions in partitioning expression must be marked IMMUTABLE +CONTEXT: SQL statement "SELECT public.validate_expression(parent_relid, expression)" +PL/pgSQL function prepare_for_partitioning(regclass,text,boolean) line 9 at PERFORM +SQL statement "SELECT public.prepare_for_partitioning(parent_relid, + expression, + partition_data)" +PL/pgSQL function create_hash_partitions(regclass,text,integer,boolean,text[],text[]) line 3 at PERFORM +/* Try using broken parentheses */ +SELECT create_hash_partitions('test_exprs.hash_rel', 'value * value2))', 4); +ERROR: failed to parse partitioning expression "value * value2))" +DETAIL: syntax error at or near ")" +QUERY: SELECT public.validate_expression(parent_relid, expression) +CONTEXT: PL/pgSQL function prepare_for_partitioning(regclass,text,boolean) line 9 at PERFORM +SQL statement "SELECT public.prepare_for_partitioning(parent_relid, + expression, + partition_data)" +PL/pgSQL function create_hash_partitions(regclass,text,integer,boolean,text[],text[]) line 3 at PERFORM +/* Try using missing columns */ +SELECT create_hash_partitions('test_exprs.hash_rel', 'value * value3', 4); +ERROR: failed to analyze partitioning expression "value * value3" +DETAIL: column "value3" does not exist +HINT: Perhaps you meant to reference the column "hash_rel.value" or the column "hash_rel.value2". +QUERY: SELECT public.validate_expression(parent_relid, expression) +CONTEXT: PL/pgSQL function prepare_for_partitioning(regclass,text,boolean) line 9 at PERFORM +SQL statement "SELECT public.prepare_for_partitioning(parent_relid, + expression, + partition_data)" +PL/pgSQL function create_hash_partitions(regclass,text,integer,boolean,text[],text[]) line 3 at PERFORM +/* Check that 'pathman_hooks_enabled' is true (1 partition in plan) */ +EXPLAIN (COSTS OFF) INSERT INTO test_exprs.canary_copy +SELECT * FROM test_exprs.canary WHERE val = 1; + QUERY PLAN +----------------------------------- + Insert on canary_copy + -> Seq Scan on canary_0 canary + Filter: (val = 1) +(3 rows) + +\set VERBOSITY terse +SELECT create_hash_partitions('test_exprs.hash_rel', 'value * value2', 4); + create_hash_partitions +------------------------ + 4 +(1 row) + +SELECT COUNT(*) FROM ONLY test_exprs.hash_rel; + count +------- + 0 +(1 row) + +SELECT COUNT(*) FROM test_exprs.hash_rel; + count +------- + 5 +(1 row) + +INSERT INTO test_exprs.hash_rel (value, value2) + SELECT val, val * 2 FROM generate_series(6, 10) val; +SELECT COUNT(*) FROM ONLY test_exprs.hash_rel; + count +------- + 0 +(1 row) + +SELECT COUNT(*) FROM test_exprs.hash_rel; + count +------- + 10 +(1 row) + +EXPLAIN (COSTS OFF) SELECT * FROM test_exprs.hash_rel WHERE value = 5; + QUERY PLAN +----------------------------------------- + Append + -> Seq Scan on hash_rel_0 hash_rel_1 + Filter: (value = 5) + -> Seq Scan on hash_rel_1 hash_rel_2 + Filter: (value = 5) + -> Seq Scan on hash_rel_2 hash_rel_3 + Filter: (value = 5) + -> Seq Scan on hash_rel_3 hash_rel_4 + Filter: (value = 5) +(9 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM test_exprs.hash_rel WHERE (value * value2) = 5; + QUERY PLAN +---------------------------------- + Seq Scan on hash_rel_0 hash_rel + Filter: ((value * value2) = 5) +(2 rows) + +/* + * Test RANGE + */ +CREATE TABLE test_exprs.range_rel (id SERIAL PRIMARY KEY, dt TIMESTAMP NOT NULL, txt TEXT); +INSERT INTO test_exprs.range_rel (dt, txt) +SELECT g, md5(g::TEXT) FROM generate_series('2015-01-01', '2020-04-30', '1 month'::interval) as g; +\set VERBOSITY default +/* Try using constant expression */ +SELECT create_range_partitions('test_exprs.range_rel', '''16 years''::interval', + '15 years'::INTERVAL, '1 year'::INTERVAL, 10); +ERROR: failed to analyze partitioning expression "'16 years'::interval" +DETAIL: partitioning expression should reference table "range_rel" +CONTEXT: SQL statement "SELECT public.validate_expression(parent_relid, expression)" +PL/pgSQL function prepare_for_partitioning(regclass,text,boolean) line 9 at PERFORM +SQL statement "SELECT public.prepare_for_partitioning(parent_relid, + expression, + partition_data)" +PL/pgSQL function create_range_partitions(regclass,text,anyelement,interval,integer,boolean) line 11 at PERFORM +/* Try using mutable expression */ +SELECT create_range_partitions('test_exprs.range_rel', 'RANDOM()', + '15 years'::INTERVAL, '1 year'::INTERVAL, 10); +ERROR: failed to analyze partitioning expression "RANDOM()" +DETAIL: functions in partitioning expression must be marked IMMUTABLE +CONTEXT: SQL statement "SELECT public.validate_expression(parent_relid, expression)" +PL/pgSQL function prepare_for_partitioning(regclass,text,boolean) line 9 at PERFORM +SQL statement "SELECT public.prepare_for_partitioning(parent_relid, + expression, + partition_data)" +PL/pgSQL function create_range_partitions(regclass,text,anyelement,interval,integer,boolean) line 11 at PERFORM +/* Check that 'pathman_hooks_enabled' is true (1 partition in plan) */ +EXPLAIN (COSTS OFF) INSERT INTO test_exprs.canary_copy +SELECT * FROM test_exprs.canary WHERE val = 1; + QUERY PLAN +----------------------------------- + Insert on canary_copy + -> Seq Scan on canary_0 canary + Filter: (val = 1) +(3 rows) + +\set VERBOSITY terse +SELECT create_range_partitions('test_exprs.range_rel', 'AGE(dt, ''2000-01-01''::DATE)', + '15 years'::INTERVAL, '1 year'::INTERVAL, 10); + create_range_partitions +------------------------- + 10 +(1 row) + +INSERT INTO test_exprs.range_rel_1 (dt, txt) VALUES ('2020-01-01'::DATE, md5('asdf')); +ERROR: new row for relation "range_rel_1" violates check constraint "pathman_range_rel_1_check" +SELECT COUNT(*) FROM test_exprs.range_rel_6; + count +------- + 4 +(1 row) + +INSERT INTO test_exprs.range_rel_6 (dt, txt) VALUES ('2020-01-01'::DATE, md5('asdf')); +SELECT COUNT(*) FROM test_exprs.range_rel_6; + count +------- + 5 +(1 row) + +EXPLAIN (COSTS OFF) SELECT * FROM test_exprs.range_rel WHERE (AGE(dt, '2000-01-01'::DATE)) = '18 years'::interval; + QUERY PLAN +------------------------------------------------------------------------------------------------------- + Seq Scan on range_rel_4 range_rel + Filter: (age(dt, 'Sat Jan 01 00:00:00 2000'::timestamp without time zone) = '@ 18 years'::interval) +(2 rows) + +DROP TABLE test_exprs.canary CASCADE; +NOTICE: drop cascades to 5 other objects +DROP TABLE test_exprs.canary_copy CASCADE; +DROP TABLE test_exprs.range_rel CASCADE; +NOTICE: drop cascades to 11 other objects +DROP TABLE test_exprs.hash_rel CASCADE; +NOTICE: drop cascades to 4 other objects +DROP SCHEMA test_exprs; +DROP EXTENSION pg_pathman; diff --git a/expected/pathman_foreign_keys.out b/expected/pathman_foreign_keys.out new file mode 100644 index 00000000..34fc75ad --- /dev/null +++ b/expected/pathman_foreign_keys.out @@ -0,0 +1,96 @@ +\set VERBOSITY terse +SET search_path = 'public'; +CREATE EXTENSION pg_pathman; +CREATE SCHEMA fkeys; +/* Check primary keys generation */ +CREATE TABLE fkeys.test_ref(comment TEXT UNIQUE); +INSERT INTO fkeys.test_ref VALUES('test'); +CREATE TABLE fkeys.test_fkey( + id INT NOT NULL, + comment TEXT, + FOREIGN KEY (comment) REFERENCES fkeys.test_ref(comment)); +INSERT INTO fkeys.test_fkey SELECT generate_series(1, 1000), 'test'; +SELECT create_range_partitions('fkeys.test_fkey', 'id', 1, 100); + create_range_partitions +------------------------- + 10 +(1 row) + +INSERT INTO fkeys.test_fkey VALUES(1, 'wrong'); +ERROR: insert or update on table "test_fkey_1" violates foreign key constraint "test_fkey_1_comment_fkey" +INSERT INTO fkeys.test_fkey VALUES(1, 'test'); +SELECT drop_partitions('fkeys.test_fkey'); +NOTICE: 101 rows copied from fkeys.test_fkey_1 +NOTICE: 100 rows copied from fkeys.test_fkey_2 +NOTICE: 100 rows copied from fkeys.test_fkey_3 +NOTICE: 100 rows copied from fkeys.test_fkey_4 +NOTICE: 100 rows copied from fkeys.test_fkey_5 +NOTICE: 100 rows copied from fkeys.test_fkey_6 +NOTICE: 100 rows copied from fkeys.test_fkey_7 +NOTICE: 100 rows copied from fkeys.test_fkey_8 +NOTICE: 100 rows copied from fkeys.test_fkey_9 +NOTICE: 100 rows copied from fkeys.test_fkey_10 + drop_partitions +----------------- + 10 +(1 row) + +SELECT create_hash_partitions('fkeys.test_fkey', 'id', 10); + create_hash_partitions +------------------------ + 10 +(1 row) + +INSERT INTO fkeys.test_fkey VALUES(1, 'wrong'); +ERROR: insert or update on table "test_fkey_0" violates foreign key constraint "test_fkey_0_comment_fkey" +INSERT INTO fkeys.test_fkey VALUES(1, 'test'); +SELECT drop_partitions('fkeys.test_fkey'); +NOTICE: 100 rows copied from fkeys.test_fkey_0 +NOTICE: 90 rows copied from fkeys.test_fkey_1 +NOTICE: 90 rows copied from fkeys.test_fkey_2 +NOTICE: 116 rows copied from fkeys.test_fkey_3 +NOTICE: 101 rows copied from fkeys.test_fkey_4 +NOTICE: 90 rows copied from fkeys.test_fkey_5 +NOTICE: 95 rows copied from fkeys.test_fkey_6 +NOTICE: 118 rows copied from fkeys.test_fkey_7 +NOTICE: 108 rows copied from fkeys.test_fkey_8 +NOTICE: 94 rows copied from fkeys.test_fkey_9 + drop_partitions +----------------- + 10 +(1 row) + +/* Try to partition table that's being referenced */ +CREATE TABLE fkeys.messages( + id SERIAL PRIMARY KEY, + msg TEXT); +CREATE TABLE fkeys.replies( + id SERIAL PRIMARY KEY, + message_id INTEGER REFERENCES fkeys.messages(id), + msg TEXT); +INSERT INTO fkeys.messages SELECT g, md5(g::text) FROM generate_series(1, 10) as g; +INSERT INTO fkeys.replies SELECT g, g, md5(g::text) FROM generate_series(1, 10) as g; +SELECT create_range_partitions('fkeys.messages', 'id', 1, 100, 2); /* not ok */ +WARNING: foreign key "replies_message_id_fkey" references table "fkeys.messages" +ERROR: table "fkeys.messages" is referenced from other tables +ALTER TABLE fkeys.replies DROP CONSTRAINT replies_message_id_fkey; +SELECT create_range_partitions('fkeys.messages', 'id', 1, 100, 2); /* ok */ + create_range_partitions +------------------------- + 2 +(1 row) + +EXPLAIN (COSTS OFF) SELECT * FROM fkeys.messages; + QUERY PLAN +------------------------------ + Append + -> Seq Scan on messages_1 + -> Seq Scan on messages_2 +(3 rows) + +DROP TABLE fkeys.messages, fkeys.replies CASCADE; +NOTICE: drop cascades to 3 other objects +DROP TABLE fkeys.test_fkey CASCADE; +DROP TABLE fkeys.test_ref CASCADE; +DROP SCHEMA fkeys; +DROP EXTENSION pg_pathman CASCADE; diff --git a/expected/pathman_gaps.out b/expected/pathman_gaps.out new file mode 100644 index 00000000..530beca9 --- /dev/null +++ b/expected/pathman_gaps.out @@ -0,0 +1,834 @@ +/* + * Since 8edd0e794 (>= 12) Append nodes with single subplan are eliminated, + * causing different output; pathman_gaps_1.out is the updated version. + */ +\set VERBOSITY terse +SET search_path = 'public'; +CREATE EXTENSION pg_pathman; +CREATE SCHEMA gaps; +CREATE TABLE gaps.test_1(val INT8 NOT NULL); +SELECT create_range_partitions('gaps.test_1', 'val', 1, 10, 3); + create_range_partitions +------------------------- + 3 +(1 row) + +DROP TABLE gaps.test_1_2; +CREATE TABLE gaps.test_2(val INT8 NOT NULL); +SELECT create_range_partitions('gaps.test_2', 'val', 1, 10, 5); + create_range_partitions +------------------------- + 5 +(1 row) + +DROP TABLE gaps.test_2_3; +CREATE TABLE gaps.test_3(val INT8 NOT NULL); +SELECT create_range_partitions('gaps.test_3', 'val', 1, 10, 8); + create_range_partitions +------------------------- + 8 +(1 row) + +DROP TABLE gaps.test_3_4; +CREATE TABLE gaps.test_4(val INT8 NOT NULL); +SELECT create_range_partitions('gaps.test_4', 'val', 1, 10, 11); + create_range_partitions +------------------------- + 11 +(1 row) + +DROP TABLE gaps.test_4_4; +DROP TABLE gaps.test_4_5; +/* Check existing partitions */ +SELECT * FROM pathman_partition_list ORDER BY parent, partition; + parent | partition | parttype | expr | range_min | range_max +-------------+----------------+----------+------+-----------+----------- + gaps.test_1 | gaps.test_1_1 | 2 | val | 1 | 11 + gaps.test_1 | gaps.test_1_3 | 2 | val | 21 | 31 + gaps.test_2 | gaps.test_2_1 | 2 | val | 1 | 11 + gaps.test_2 | gaps.test_2_2 | 2 | val | 11 | 21 + gaps.test_2 | gaps.test_2_4 | 2 | val | 31 | 41 + gaps.test_2 | gaps.test_2_5 | 2 | val | 41 | 51 + gaps.test_3 | gaps.test_3_1 | 2 | val | 1 | 11 + gaps.test_3 | gaps.test_3_2 | 2 | val | 11 | 21 + gaps.test_3 | gaps.test_3_3 | 2 | val | 21 | 31 + gaps.test_3 | gaps.test_3_5 | 2 | val | 41 | 51 + gaps.test_3 | gaps.test_3_6 | 2 | val | 51 | 61 + gaps.test_3 | gaps.test_3_7 | 2 | val | 61 | 71 + gaps.test_3 | gaps.test_3_8 | 2 | val | 71 | 81 + gaps.test_4 | gaps.test_4_1 | 2 | val | 1 | 11 + gaps.test_4 | gaps.test_4_2 | 2 | val | 11 | 21 + gaps.test_4 | gaps.test_4_3 | 2 | val | 21 | 31 + gaps.test_4 | gaps.test_4_6 | 2 | val | 51 | 61 + gaps.test_4 | gaps.test_4_7 | 2 | val | 61 | 71 + gaps.test_4 | gaps.test_4_8 | 2 | val | 71 | 81 + gaps.test_4 | gaps.test_4_9 | 2 | val | 81 | 91 + gaps.test_4 | gaps.test_4_10 | 2 | val | 91 | 101 + gaps.test_4 | gaps.test_4_11 | 2 | val | 101 | 111 +(22 rows) + +/* Pivot values */ +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_1 WHERE val = 11; + QUERY PLAN +-------------------------- + Result + One-Time Filter: false +(2 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_1 WHERE val = 16; + QUERY PLAN +-------------------------- + Result + One-Time Filter: false +(2 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_1 WHERE val = 21; + QUERY PLAN +---------------------------- + Append + -> Seq Scan on test_1_3 + Filter: (val = 21) +(3 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_1 WHERE val < 11; + QUERY PLAN +---------------------------- + Append + -> Seq Scan on test_1_1 +(2 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_1 WHERE val <= 11; + QUERY PLAN +---------------------------- + Append + -> Seq Scan on test_1_1 +(2 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_1 WHERE val < 16; + QUERY PLAN +---------------------------- + Append + -> Seq Scan on test_1_1 +(2 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_1 WHERE val <= 16; + QUERY PLAN +---------------------------- + Append + -> Seq Scan on test_1_1 +(2 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_1 WHERE val < 21; + QUERY PLAN +---------------------------- + Append + -> Seq Scan on test_1_1 +(2 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_1 WHERE val <= 21; + QUERY PLAN +----------------------------- + Append + -> Seq Scan on test_1_1 + -> Seq Scan on test_1_3 + Filter: (val <= 21) +(4 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_1 WHERE val > 11; + QUERY PLAN +---------------------------- + Append + -> Seq Scan on test_1_3 +(2 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_1 WHERE val >= 11; + QUERY PLAN +---------------------------- + Append + -> Seq Scan on test_1_3 +(2 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_1 WHERE val > 16; + QUERY PLAN +---------------------------- + Append + -> Seq Scan on test_1_3 +(2 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_1 WHERE val >= 16; + QUERY PLAN +---------------------------- + Append + -> Seq Scan on test_1_3 +(2 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_1 WHERE val > 21; + QUERY PLAN +---------------------------- + Append + -> Seq Scan on test_1_3 + Filter: (val > 21) +(3 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_1 WHERE val >= 21; + QUERY PLAN +---------------------------- + Append + -> Seq Scan on test_1_3 +(2 rows) + +/* Pivot values */ +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_2 WHERE val = 21; + QUERY PLAN +-------------------------- + Result + One-Time Filter: false +(2 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_2 WHERE val = 26; + QUERY PLAN +-------------------------- + Result + One-Time Filter: false +(2 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_2 WHERE val = 31; + QUERY PLAN +---------------------------- + Append + -> Seq Scan on test_2_4 + Filter: (val = 31) +(3 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_2 WHERE val < 21; + QUERY PLAN +---------------------------- + Append + -> Seq Scan on test_2_1 + -> Seq Scan on test_2_2 +(3 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_2 WHERE val <= 21; + QUERY PLAN +---------------------------- + Append + -> Seq Scan on test_2_1 + -> Seq Scan on test_2_2 +(3 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_2 WHERE val < 26; + QUERY PLAN +---------------------------- + Append + -> Seq Scan on test_2_1 + -> Seq Scan on test_2_2 +(3 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_2 WHERE val <= 26; + QUERY PLAN +---------------------------- + Append + -> Seq Scan on test_2_1 + -> Seq Scan on test_2_2 +(3 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_2 WHERE val < 31; + QUERY PLAN +---------------------------- + Append + -> Seq Scan on test_2_1 + -> Seq Scan on test_2_2 +(3 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_2 WHERE val <= 31; + QUERY PLAN +----------------------------- + Append + -> Seq Scan on test_2_1 + -> Seq Scan on test_2_2 + -> Seq Scan on test_2_4 + Filter: (val <= 31) +(5 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_2 WHERE val < 41; + QUERY PLAN +---------------------------- + Append + -> Seq Scan on test_2_1 + -> Seq Scan on test_2_2 + -> Seq Scan on test_2_4 +(4 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_2 WHERE val <= 41; + QUERY PLAN +----------------------------- + Append + -> Seq Scan on test_2_1 + -> Seq Scan on test_2_2 + -> Seq Scan on test_2_4 + -> Seq Scan on test_2_5 + Filter: (val <= 41) +(6 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_2 WHERE val > 11; + QUERY PLAN +---------------------------- + Append + -> Seq Scan on test_2_2 + Filter: (val > 11) + -> Seq Scan on test_2_4 + -> Seq Scan on test_2_5 +(5 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_2 WHERE val >= 11; + QUERY PLAN +---------------------------- + Append + -> Seq Scan on test_2_2 + -> Seq Scan on test_2_4 + -> Seq Scan on test_2_5 +(4 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_2 WHERE val > 21; + QUERY PLAN +---------------------------- + Append + -> Seq Scan on test_2_4 + -> Seq Scan on test_2_5 +(3 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_2 WHERE val >= 21; + QUERY PLAN +---------------------------- + Append + -> Seq Scan on test_2_4 + -> Seq Scan on test_2_5 +(3 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_2 WHERE val > 26; + QUERY PLAN +---------------------------- + Append + -> Seq Scan on test_2_4 + -> Seq Scan on test_2_5 +(3 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_2 WHERE val >= 26; + QUERY PLAN +---------------------------- + Append + -> Seq Scan on test_2_4 + -> Seq Scan on test_2_5 +(3 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_2 WHERE val > 31; + QUERY PLAN +---------------------------- + Append + -> Seq Scan on test_2_4 + Filter: (val > 31) + -> Seq Scan on test_2_5 +(4 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_2 WHERE val >= 31; + QUERY PLAN +---------------------------- + Append + -> Seq Scan on test_2_4 + -> Seq Scan on test_2_5 +(3 rows) + +/* Pivot values */ +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_3 WHERE val = 31; + QUERY PLAN +-------------------------- + Result + One-Time Filter: false +(2 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_3 WHERE val = 36; + QUERY PLAN +-------------------------- + Result + One-Time Filter: false +(2 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_3 WHERE val = 41; + QUERY PLAN +---------------------------- + Append + -> Seq Scan on test_3_5 + Filter: (val = 41) +(3 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_3 WHERE val < 31; + QUERY PLAN +---------------------------- + Append + -> Seq Scan on test_3_1 + -> Seq Scan on test_3_2 + -> Seq Scan on test_3_3 +(4 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_3 WHERE val <= 31; + QUERY PLAN +---------------------------- + Append + -> Seq Scan on test_3_1 + -> Seq Scan on test_3_2 + -> Seq Scan on test_3_3 +(4 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_3 WHERE val < 36; + QUERY PLAN +---------------------------- + Append + -> Seq Scan on test_3_1 + -> Seq Scan on test_3_2 + -> Seq Scan on test_3_3 +(4 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_3 WHERE val <= 36; + QUERY PLAN +---------------------------- + Append + -> Seq Scan on test_3_1 + -> Seq Scan on test_3_2 + -> Seq Scan on test_3_3 +(4 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_3 WHERE val < 41; + QUERY PLAN +---------------------------- + Append + -> Seq Scan on test_3_1 + -> Seq Scan on test_3_2 + -> Seq Scan on test_3_3 +(4 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_3 WHERE val <= 41; + QUERY PLAN +----------------------------- + Append + -> Seq Scan on test_3_1 + -> Seq Scan on test_3_2 + -> Seq Scan on test_3_3 + -> Seq Scan on test_3_5 + Filter: (val <= 41) +(6 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_3 WHERE val < 51; + QUERY PLAN +---------------------------- + Append + -> Seq Scan on test_3_1 + -> Seq Scan on test_3_2 + -> Seq Scan on test_3_3 + -> Seq Scan on test_3_5 +(5 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_3 WHERE val <= 51; + QUERY PLAN +----------------------------- + Append + -> Seq Scan on test_3_1 + -> Seq Scan on test_3_2 + -> Seq Scan on test_3_3 + -> Seq Scan on test_3_5 + -> Seq Scan on test_3_6 + Filter: (val <= 51) +(7 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_3 WHERE val > 21; + QUERY PLAN +---------------------------- + Append + -> Seq Scan on test_3_3 + Filter: (val > 21) + -> Seq Scan on test_3_5 + -> Seq Scan on test_3_6 + -> Seq Scan on test_3_7 + -> Seq Scan on test_3_8 +(7 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_3 WHERE val >= 21; + QUERY PLAN +---------------------------- + Append + -> Seq Scan on test_3_3 + -> Seq Scan on test_3_5 + -> Seq Scan on test_3_6 + -> Seq Scan on test_3_7 + -> Seq Scan on test_3_8 +(6 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_3 WHERE val > 31; + QUERY PLAN +---------------------------- + Append + -> Seq Scan on test_3_5 + -> Seq Scan on test_3_6 + -> Seq Scan on test_3_7 + -> Seq Scan on test_3_8 +(5 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_3 WHERE val >= 31; + QUERY PLAN +---------------------------- + Append + -> Seq Scan on test_3_5 + -> Seq Scan on test_3_6 + -> Seq Scan on test_3_7 + -> Seq Scan on test_3_8 +(5 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_3 WHERE val > 36; + QUERY PLAN +---------------------------- + Append + -> Seq Scan on test_3_5 + -> Seq Scan on test_3_6 + -> Seq Scan on test_3_7 + -> Seq Scan on test_3_8 +(5 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_3 WHERE val >= 36; + QUERY PLAN +---------------------------- + Append + -> Seq Scan on test_3_5 + -> Seq Scan on test_3_6 + -> Seq Scan on test_3_7 + -> Seq Scan on test_3_8 +(5 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_3 WHERE val > 41; + QUERY PLAN +---------------------------- + Append + -> Seq Scan on test_3_5 + Filter: (val > 41) + -> Seq Scan on test_3_6 + -> Seq Scan on test_3_7 + -> Seq Scan on test_3_8 +(6 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_3 WHERE val >= 41; + QUERY PLAN +---------------------------- + Append + -> Seq Scan on test_3_5 + -> Seq Scan on test_3_6 + -> Seq Scan on test_3_7 + -> Seq Scan on test_3_8 +(5 rows) + +/* Pivot values */ +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_4 WHERE val = 31; + QUERY PLAN +-------------------------- + Result + One-Time Filter: false +(2 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_4 WHERE val = 36; + QUERY PLAN +-------------------------- + Result + One-Time Filter: false +(2 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_4 WHERE val = 41; + QUERY PLAN +-------------------------- + Result + One-Time Filter: false +(2 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_4 WHERE val = 46; + QUERY PLAN +-------------------------- + Result + One-Time Filter: false +(2 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_4 WHERE val = 51; + QUERY PLAN +---------------------------- + Append + -> Seq Scan on test_4_6 + Filter: (val = 51) +(3 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_4 WHERE val < 31; + QUERY PLAN +---------------------------- + Append + -> Seq Scan on test_4_1 + -> Seq Scan on test_4_2 + -> Seq Scan on test_4_3 +(4 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_4 WHERE val <= 31; + QUERY PLAN +---------------------------- + Append + -> Seq Scan on test_4_1 + -> Seq Scan on test_4_2 + -> Seq Scan on test_4_3 +(4 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_4 WHERE val < 36; + QUERY PLAN +---------------------------- + Append + -> Seq Scan on test_4_1 + -> Seq Scan on test_4_2 + -> Seq Scan on test_4_3 +(4 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_4 WHERE val <= 36; + QUERY PLAN +---------------------------- + Append + -> Seq Scan on test_4_1 + -> Seq Scan on test_4_2 + -> Seq Scan on test_4_3 +(4 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_4 WHERE val < 41; + QUERY PLAN +---------------------------- + Append + -> Seq Scan on test_4_1 + -> Seq Scan on test_4_2 + -> Seq Scan on test_4_3 +(4 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_4 WHERE val <= 41; + QUERY PLAN +---------------------------- + Append + -> Seq Scan on test_4_1 + -> Seq Scan on test_4_2 + -> Seq Scan on test_4_3 +(4 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_4 WHERE val < 46; + QUERY PLAN +---------------------------- + Append + -> Seq Scan on test_4_1 + -> Seq Scan on test_4_2 + -> Seq Scan on test_4_3 +(4 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_4 WHERE val <= 46; + QUERY PLAN +---------------------------- + Append + -> Seq Scan on test_4_1 + -> Seq Scan on test_4_2 + -> Seq Scan on test_4_3 +(4 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_4 WHERE val < 51; + QUERY PLAN +---------------------------- + Append + -> Seq Scan on test_4_1 + -> Seq Scan on test_4_2 + -> Seq Scan on test_4_3 +(4 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_4 WHERE val <= 51; + QUERY PLAN +----------------------------- + Append + -> Seq Scan on test_4_1 + -> Seq Scan on test_4_2 + -> Seq Scan on test_4_3 + -> Seq Scan on test_4_6 + Filter: (val <= 51) +(6 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_4 WHERE val < 61; + QUERY PLAN +---------------------------- + Append + -> Seq Scan on test_4_1 + -> Seq Scan on test_4_2 + -> Seq Scan on test_4_3 + -> Seq Scan on test_4_6 +(5 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_4 WHERE val <= 61; + QUERY PLAN +----------------------------- + Append + -> Seq Scan on test_4_1 + -> Seq Scan on test_4_2 + -> Seq Scan on test_4_3 + -> Seq Scan on test_4_6 + -> Seq Scan on test_4_7 + Filter: (val <= 61) +(7 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_4 WHERE val > 21; + QUERY PLAN +----------------------------- + Append + -> Seq Scan on test_4_3 + Filter: (val > 21) + -> Seq Scan on test_4_6 + -> Seq Scan on test_4_7 + -> Seq Scan on test_4_8 + -> Seq Scan on test_4_9 + -> Seq Scan on test_4_10 + -> Seq Scan on test_4_11 +(9 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_4 WHERE val >= 21; + QUERY PLAN +----------------------------- + Append + -> Seq Scan on test_4_3 + -> Seq Scan on test_4_6 + -> Seq Scan on test_4_7 + -> Seq Scan on test_4_8 + -> Seq Scan on test_4_9 + -> Seq Scan on test_4_10 + -> Seq Scan on test_4_11 +(8 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_4 WHERE val > 31; + QUERY PLAN +----------------------------- + Append + -> Seq Scan on test_4_6 + -> Seq Scan on test_4_7 + -> Seq Scan on test_4_8 + -> Seq Scan on test_4_9 + -> Seq Scan on test_4_10 + -> Seq Scan on test_4_11 +(7 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_4 WHERE val >= 31; + QUERY PLAN +----------------------------- + Append + -> Seq Scan on test_4_6 + -> Seq Scan on test_4_7 + -> Seq Scan on test_4_8 + -> Seq Scan on test_4_9 + -> Seq Scan on test_4_10 + -> Seq Scan on test_4_11 +(7 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_4 WHERE val > 36; + QUERY PLAN +----------------------------- + Append + -> Seq Scan on test_4_6 + -> Seq Scan on test_4_7 + -> Seq Scan on test_4_8 + -> Seq Scan on test_4_9 + -> Seq Scan on test_4_10 + -> Seq Scan on test_4_11 +(7 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_4 WHERE val >= 36; + QUERY PLAN +----------------------------- + Append + -> Seq Scan on test_4_6 + -> Seq Scan on test_4_7 + -> Seq Scan on test_4_8 + -> Seq Scan on test_4_9 + -> Seq Scan on test_4_10 + -> Seq Scan on test_4_11 +(7 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_4 WHERE val > 41; + QUERY PLAN +----------------------------- + Append + -> Seq Scan on test_4_6 + -> Seq Scan on test_4_7 + -> Seq Scan on test_4_8 + -> Seq Scan on test_4_9 + -> Seq Scan on test_4_10 + -> Seq Scan on test_4_11 +(7 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_4 WHERE val >= 41; + QUERY PLAN +----------------------------- + Append + -> Seq Scan on test_4_6 + -> Seq Scan on test_4_7 + -> Seq Scan on test_4_8 + -> Seq Scan on test_4_9 + -> Seq Scan on test_4_10 + -> Seq Scan on test_4_11 +(7 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_4 WHERE val > 46; + QUERY PLAN +----------------------------- + Append + -> Seq Scan on test_4_6 + -> Seq Scan on test_4_7 + -> Seq Scan on test_4_8 + -> Seq Scan on test_4_9 + -> Seq Scan on test_4_10 + -> Seq Scan on test_4_11 +(7 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_4 WHERE val >= 46; + QUERY PLAN +----------------------------- + Append + -> Seq Scan on test_4_6 + -> Seq Scan on test_4_7 + -> Seq Scan on test_4_8 + -> Seq Scan on test_4_9 + -> Seq Scan on test_4_10 + -> Seq Scan on test_4_11 +(7 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_4 WHERE val > 51; + QUERY PLAN +----------------------------- + Append + -> Seq Scan on test_4_6 + Filter: (val > 51) + -> Seq Scan on test_4_7 + -> Seq Scan on test_4_8 + -> Seq Scan on test_4_9 + -> Seq Scan on test_4_10 + -> Seq Scan on test_4_11 +(8 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_4 WHERE val >= 51; + QUERY PLAN +----------------------------- + Append + -> Seq Scan on test_4_6 + -> Seq Scan on test_4_7 + -> Seq Scan on test_4_8 + -> Seq Scan on test_4_9 + -> Seq Scan on test_4_10 + -> Seq Scan on test_4_11 +(7 rows) + +DROP TABLE gaps.test_1 CASCADE; +NOTICE: drop cascades to 3 other objects +DROP TABLE gaps.test_2 CASCADE; +NOTICE: drop cascades to 5 other objects +DROP TABLE gaps.test_3 CASCADE; +NOTICE: drop cascades to 8 other objects +DROP TABLE gaps.test_4 CASCADE; +NOTICE: drop cascades to 10 other objects +DROP SCHEMA gaps; +DROP EXTENSION pg_pathman; diff --git a/expected/pathman_gaps_1.out b/expected/pathman_gaps_1.out new file mode 100644 index 00000000..b1c0ac34 --- /dev/null +++ b/expected/pathman_gaps_1.out @@ -0,0 +1,819 @@ +/* + * Since 8edd0e794 (>= 12) Append nodes with single subplan are eliminated, + * causing different output; pathman_gaps_1.out is the updated version. + */ +\set VERBOSITY terse +SET search_path = 'public'; +CREATE EXTENSION pg_pathman; +CREATE SCHEMA gaps; +CREATE TABLE gaps.test_1(val INT8 NOT NULL); +SELECT create_range_partitions('gaps.test_1', 'val', 1, 10, 3); + create_range_partitions +------------------------- + 3 +(1 row) + +DROP TABLE gaps.test_1_2; +CREATE TABLE gaps.test_2(val INT8 NOT NULL); +SELECT create_range_partitions('gaps.test_2', 'val', 1, 10, 5); + create_range_partitions +------------------------- + 5 +(1 row) + +DROP TABLE gaps.test_2_3; +CREATE TABLE gaps.test_3(val INT8 NOT NULL); +SELECT create_range_partitions('gaps.test_3', 'val', 1, 10, 8); + create_range_partitions +------------------------- + 8 +(1 row) + +DROP TABLE gaps.test_3_4; +CREATE TABLE gaps.test_4(val INT8 NOT NULL); +SELECT create_range_partitions('gaps.test_4', 'val', 1, 10, 11); + create_range_partitions +------------------------- + 11 +(1 row) + +DROP TABLE gaps.test_4_4; +DROP TABLE gaps.test_4_5; +/* Check existing partitions */ +SELECT * FROM pathman_partition_list ORDER BY parent, partition; + parent | partition | parttype | expr | range_min | range_max +-------------+----------------+----------+------+-----------+----------- + gaps.test_1 | gaps.test_1_1 | 2 | val | 1 | 11 + gaps.test_1 | gaps.test_1_3 | 2 | val | 21 | 31 + gaps.test_2 | gaps.test_2_1 | 2 | val | 1 | 11 + gaps.test_2 | gaps.test_2_2 | 2 | val | 11 | 21 + gaps.test_2 | gaps.test_2_4 | 2 | val | 31 | 41 + gaps.test_2 | gaps.test_2_5 | 2 | val | 41 | 51 + gaps.test_3 | gaps.test_3_1 | 2 | val | 1 | 11 + gaps.test_3 | gaps.test_3_2 | 2 | val | 11 | 21 + gaps.test_3 | gaps.test_3_3 | 2 | val | 21 | 31 + gaps.test_3 | gaps.test_3_5 | 2 | val | 41 | 51 + gaps.test_3 | gaps.test_3_6 | 2 | val | 51 | 61 + gaps.test_3 | gaps.test_3_7 | 2 | val | 61 | 71 + gaps.test_3 | gaps.test_3_8 | 2 | val | 71 | 81 + gaps.test_4 | gaps.test_4_1 | 2 | val | 1 | 11 + gaps.test_4 | gaps.test_4_2 | 2 | val | 11 | 21 + gaps.test_4 | gaps.test_4_3 | 2 | val | 21 | 31 + gaps.test_4 | gaps.test_4_6 | 2 | val | 51 | 61 + gaps.test_4 | gaps.test_4_7 | 2 | val | 61 | 71 + gaps.test_4 | gaps.test_4_8 | 2 | val | 71 | 81 + gaps.test_4 | gaps.test_4_9 | 2 | val | 81 | 91 + gaps.test_4 | gaps.test_4_10 | 2 | val | 91 | 101 + gaps.test_4 | gaps.test_4_11 | 2 | val | 101 | 111 +(22 rows) + +/* Pivot values */ +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_1 WHERE val = 11; + QUERY PLAN +-------------------------- + Result + One-Time Filter: false +(2 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_1 WHERE val = 16; + QUERY PLAN +-------------------------- + Result + One-Time Filter: false +(2 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_1 WHERE val = 21; + QUERY PLAN +---------------------- + Seq Scan on test_1_3 + Filter: (val = 21) +(2 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_1 WHERE val < 11; + QUERY PLAN +---------------------- + Seq Scan on test_1_1 +(1 row) + +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_1 WHERE val <= 11; + QUERY PLAN +---------------------- + Seq Scan on test_1_1 +(1 row) + +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_1 WHERE val < 16; + QUERY PLAN +---------------------- + Seq Scan on test_1_1 +(1 row) + +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_1 WHERE val <= 16; + QUERY PLAN +---------------------- + Seq Scan on test_1_1 +(1 row) + +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_1 WHERE val < 21; + QUERY PLAN +---------------------- + Seq Scan on test_1_1 +(1 row) + +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_1 WHERE val <= 21; + QUERY PLAN +----------------------------- + Append + -> Seq Scan on test_1_1 + -> Seq Scan on test_1_3 + Filter: (val <= 21) +(4 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_1 WHERE val > 11; + QUERY PLAN +---------------------- + Seq Scan on test_1_3 +(1 row) + +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_1 WHERE val >= 11; + QUERY PLAN +---------------------- + Seq Scan on test_1_3 +(1 row) + +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_1 WHERE val > 16; + QUERY PLAN +---------------------- + Seq Scan on test_1_3 +(1 row) + +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_1 WHERE val >= 16; + QUERY PLAN +---------------------- + Seq Scan on test_1_3 +(1 row) + +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_1 WHERE val > 21; + QUERY PLAN +---------------------- + Seq Scan on test_1_3 + Filter: (val > 21) +(2 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_1 WHERE val >= 21; + QUERY PLAN +---------------------- + Seq Scan on test_1_3 +(1 row) + +/* Pivot values */ +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_2 WHERE val = 21; + QUERY PLAN +-------------------------- + Result + One-Time Filter: false +(2 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_2 WHERE val = 26; + QUERY PLAN +-------------------------- + Result + One-Time Filter: false +(2 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_2 WHERE val = 31; + QUERY PLAN +---------------------- + Seq Scan on test_2_4 + Filter: (val = 31) +(2 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_2 WHERE val < 21; + QUERY PLAN +---------------------------- + Append + -> Seq Scan on test_2_1 + -> Seq Scan on test_2_2 +(3 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_2 WHERE val <= 21; + QUERY PLAN +---------------------------- + Append + -> Seq Scan on test_2_1 + -> Seq Scan on test_2_2 +(3 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_2 WHERE val < 26; + QUERY PLAN +---------------------------- + Append + -> Seq Scan on test_2_1 + -> Seq Scan on test_2_2 +(3 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_2 WHERE val <= 26; + QUERY PLAN +---------------------------- + Append + -> Seq Scan on test_2_1 + -> Seq Scan on test_2_2 +(3 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_2 WHERE val < 31; + QUERY PLAN +---------------------------- + Append + -> Seq Scan on test_2_1 + -> Seq Scan on test_2_2 +(3 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_2 WHERE val <= 31; + QUERY PLAN +----------------------------- + Append + -> Seq Scan on test_2_1 + -> Seq Scan on test_2_2 + -> Seq Scan on test_2_4 + Filter: (val <= 31) +(5 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_2 WHERE val < 41; + QUERY PLAN +---------------------------- + Append + -> Seq Scan on test_2_1 + -> Seq Scan on test_2_2 + -> Seq Scan on test_2_4 +(4 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_2 WHERE val <= 41; + QUERY PLAN +----------------------------- + Append + -> Seq Scan on test_2_1 + -> Seq Scan on test_2_2 + -> Seq Scan on test_2_4 + -> Seq Scan on test_2_5 + Filter: (val <= 41) +(6 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_2 WHERE val > 11; + QUERY PLAN +---------------------------- + Append + -> Seq Scan on test_2_2 + Filter: (val > 11) + -> Seq Scan on test_2_4 + -> Seq Scan on test_2_5 +(5 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_2 WHERE val >= 11; + QUERY PLAN +---------------------------- + Append + -> Seq Scan on test_2_2 + -> Seq Scan on test_2_4 + -> Seq Scan on test_2_5 +(4 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_2 WHERE val > 21; + QUERY PLAN +---------------------------- + Append + -> Seq Scan on test_2_4 + -> Seq Scan on test_2_5 +(3 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_2 WHERE val >= 21; + QUERY PLAN +---------------------------- + Append + -> Seq Scan on test_2_4 + -> Seq Scan on test_2_5 +(3 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_2 WHERE val > 26; + QUERY PLAN +---------------------------- + Append + -> Seq Scan on test_2_4 + -> Seq Scan on test_2_5 +(3 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_2 WHERE val >= 26; + QUERY PLAN +---------------------------- + Append + -> Seq Scan on test_2_4 + -> Seq Scan on test_2_5 +(3 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_2 WHERE val > 31; + QUERY PLAN +---------------------------- + Append + -> Seq Scan on test_2_4 + Filter: (val > 31) + -> Seq Scan on test_2_5 +(4 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_2 WHERE val >= 31; + QUERY PLAN +---------------------------- + Append + -> Seq Scan on test_2_4 + -> Seq Scan on test_2_5 +(3 rows) + +/* Pivot values */ +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_3 WHERE val = 31; + QUERY PLAN +-------------------------- + Result + One-Time Filter: false +(2 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_3 WHERE val = 36; + QUERY PLAN +-------------------------- + Result + One-Time Filter: false +(2 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_3 WHERE val = 41; + QUERY PLAN +---------------------- + Seq Scan on test_3_5 + Filter: (val = 41) +(2 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_3 WHERE val < 31; + QUERY PLAN +---------------------------- + Append + -> Seq Scan on test_3_1 + -> Seq Scan on test_3_2 + -> Seq Scan on test_3_3 +(4 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_3 WHERE val <= 31; + QUERY PLAN +---------------------------- + Append + -> Seq Scan on test_3_1 + -> Seq Scan on test_3_2 + -> Seq Scan on test_3_3 +(4 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_3 WHERE val < 36; + QUERY PLAN +---------------------------- + Append + -> Seq Scan on test_3_1 + -> Seq Scan on test_3_2 + -> Seq Scan on test_3_3 +(4 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_3 WHERE val <= 36; + QUERY PLAN +---------------------------- + Append + -> Seq Scan on test_3_1 + -> Seq Scan on test_3_2 + -> Seq Scan on test_3_3 +(4 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_3 WHERE val < 41; + QUERY PLAN +---------------------------- + Append + -> Seq Scan on test_3_1 + -> Seq Scan on test_3_2 + -> Seq Scan on test_3_3 +(4 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_3 WHERE val <= 41; + QUERY PLAN +----------------------------- + Append + -> Seq Scan on test_3_1 + -> Seq Scan on test_3_2 + -> Seq Scan on test_3_3 + -> Seq Scan on test_3_5 + Filter: (val <= 41) +(6 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_3 WHERE val < 51; + QUERY PLAN +---------------------------- + Append + -> Seq Scan on test_3_1 + -> Seq Scan on test_3_2 + -> Seq Scan on test_3_3 + -> Seq Scan on test_3_5 +(5 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_3 WHERE val <= 51; + QUERY PLAN +----------------------------- + Append + -> Seq Scan on test_3_1 + -> Seq Scan on test_3_2 + -> Seq Scan on test_3_3 + -> Seq Scan on test_3_5 + -> Seq Scan on test_3_6 + Filter: (val <= 51) +(7 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_3 WHERE val > 21; + QUERY PLAN +---------------------------- + Append + -> Seq Scan on test_3_3 + Filter: (val > 21) + -> Seq Scan on test_3_5 + -> Seq Scan on test_3_6 + -> Seq Scan on test_3_7 + -> Seq Scan on test_3_8 +(7 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_3 WHERE val >= 21; + QUERY PLAN +---------------------------- + Append + -> Seq Scan on test_3_3 + -> Seq Scan on test_3_5 + -> Seq Scan on test_3_6 + -> Seq Scan on test_3_7 + -> Seq Scan on test_3_8 +(6 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_3 WHERE val > 31; + QUERY PLAN +---------------------------- + Append + -> Seq Scan on test_3_5 + -> Seq Scan on test_3_6 + -> Seq Scan on test_3_7 + -> Seq Scan on test_3_8 +(5 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_3 WHERE val >= 31; + QUERY PLAN +---------------------------- + Append + -> Seq Scan on test_3_5 + -> Seq Scan on test_3_6 + -> Seq Scan on test_3_7 + -> Seq Scan on test_3_8 +(5 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_3 WHERE val > 36; + QUERY PLAN +---------------------------- + Append + -> Seq Scan on test_3_5 + -> Seq Scan on test_3_6 + -> Seq Scan on test_3_7 + -> Seq Scan on test_3_8 +(5 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_3 WHERE val >= 36; + QUERY PLAN +---------------------------- + Append + -> Seq Scan on test_3_5 + -> Seq Scan on test_3_6 + -> Seq Scan on test_3_7 + -> Seq Scan on test_3_8 +(5 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_3 WHERE val > 41; + QUERY PLAN +---------------------------- + Append + -> Seq Scan on test_3_5 + Filter: (val > 41) + -> Seq Scan on test_3_6 + -> Seq Scan on test_3_7 + -> Seq Scan on test_3_8 +(6 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_3 WHERE val >= 41; + QUERY PLAN +---------------------------- + Append + -> Seq Scan on test_3_5 + -> Seq Scan on test_3_6 + -> Seq Scan on test_3_7 + -> Seq Scan on test_3_8 +(5 rows) + +/* Pivot values */ +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_4 WHERE val = 31; + QUERY PLAN +-------------------------- + Result + One-Time Filter: false +(2 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_4 WHERE val = 36; + QUERY PLAN +-------------------------- + Result + One-Time Filter: false +(2 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_4 WHERE val = 41; + QUERY PLAN +-------------------------- + Result + One-Time Filter: false +(2 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_4 WHERE val = 46; + QUERY PLAN +-------------------------- + Result + One-Time Filter: false +(2 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_4 WHERE val = 51; + QUERY PLAN +---------------------- + Seq Scan on test_4_6 + Filter: (val = 51) +(2 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_4 WHERE val < 31; + QUERY PLAN +---------------------------- + Append + -> Seq Scan on test_4_1 + -> Seq Scan on test_4_2 + -> Seq Scan on test_4_3 +(4 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_4 WHERE val <= 31; + QUERY PLAN +---------------------------- + Append + -> Seq Scan on test_4_1 + -> Seq Scan on test_4_2 + -> Seq Scan on test_4_3 +(4 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_4 WHERE val < 36; + QUERY PLAN +---------------------------- + Append + -> Seq Scan on test_4_1 + -> Seq Scan on test_4_2 + -> Seq Scan on test_4_3 +(4 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_4 WHERE val <= 36; + QUERY PLAN +---------------------------- + Append + -> Seq Scan on test_4_1 + -> Seq Scan on test_4_2 + -> Seq Scan on test_4_3 +(4 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_4 WHERE val < 41; + QUERY PLAN +---------------------------- + Append + -> Seq Scan on test_4_1 + -> Seq Scan on test_4_2 + -> Seq Scan on test_4_3 +(4 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_4 WHERE val <= 41; + QUERY PLAN +---------------------------- + Append + -> Seq Scan on test_4_1 + -> Seq Scan on test_4_2 + -> Seq Scan on test_4_3 +(4 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_4 WHERE val < 46; + QUERY PLAN +---------------------------- + Append + -> Seq Scan on test_4_1 + -> Seq Scan on test_4_2 + -> Seq Scan on test_4_3 +(4 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_4 WHERE val <= 46; + QUERY PLAN +---------------------------- + Append + -> Seq Scan on test_4_1 + -> Seq Scan on test_4_2 + -> Seq Scan on test_4_3 +(4 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_4 WHERE val < 51; + QUERY PLAN +---------------------------- + Append + -> Seq Scan on test_4_1 + -> Seq Scan on test_4_2 + -> Seq Scan on test_4_3 +(4 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_4 WHERE val <= 51; + QUERY PLAN +----------------------------- + Append + -> Seq Scan on test_4_1 + -> Seq Scan on test_4_2 + -> Seq Scan on test_4_3 + -> Seq Scan on test_4_6 + Filter: (val <= 51) +(6 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_4 WHERE val < 61; + QUERY PLAN +---------------------------- + Append + -> Seq Scan on test_4_1 + -> Seq Scan on test_4_2 + -> Seq Scan on test_4_3 + -> Seq Scan on test_4_6 +(5 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_4 WHERE val <= 61; + QUERY PLAN +----------------------------- + Append + -> Seq Scan on test_4_1 + -> Seq Scan on test_4_2 + -> Seq Scan on test_4_3 + -> Seq Scan on test_4_6 + -> Seq Scan on test_4_7 + Filter: (val <= 61) +(7 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_4 WHERE val > 21; + QUERY PLAN +----------------------------- + Append + -> Seq Scan on test_4_3 + Filter: (val > 21) + -> Seq Scan on test_4_6 + -> Seq Scan on test_4_7 + -> Seq Scan on test_4_8 + -> Seq Scan on test_4_9 + -> Seq Scan on test_4_10 + -> Seq Scan on test_4_11 +(9 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_4 WHERE val >= 21; + QUERY PLAN +----------------------------- + Append + -> Seq Scan on test_4_3 + -> Seq Scan on test_4_6 + -> Seq Scan on test_4_7 + -> Seq Scan on test_4_8 + -> Seq Scan on test_4_9 + -> Seq Scan on test_4_10 + -> Seq Scan on test_4_11 +(8 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_4 WHERE val > 31; + QUERY PLAN +----------------------------- + Append + -> Seq Scan on test_4_6 + -> Seq Scan on test_4_7 + -> Seq Scan on test_4_8 + -> Seq Scan on test_4_9 + -> Seq Scan on test_4_10 + -> Seq Scan on test_4_11 +(7 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_4 WHERE val >= 31; + QUERY PLAN +----------------------------- + Append + -> Seq Scan on test_4_6 + -> Seq Scan on test_4_7 + -> Seq Scan on test_4_8 + -> Seq Scan on test_4_9 + -> Seq Scan on test_4_10 + -> Seq Scan on test_4_11 +(7 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_4 WHERE val > 36; + QUERY PLAN +----------------------------- + Append + -> Seq Scan on test_4_6 + -> Seq Scan on test_4_7 + -> Seq Scan on test_4_8 + -> Seq Scan on test_4_9 + -> Seq Scan on test_4_10 + -> Seq Scan on test_4_11 +(7 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_4 WHERE val >= 36; + QUERY PLAN +----------------------------- + Append + -> Seq Scan on test_4_6 + -> Seq Scan on test_4_7 + -> Seq Scan on test_4_8 + -> Seq Scan on test_4_9 + -> Seq Scan on test_4_10 + -> Seq Scan on test_4_11 +(7 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_4 WHERE val > 41; + QUERY PLAN +----------------------------- + Append + -> Seq Scan on test_4_6 + -> Seq Scan on test_4_7 + -> Seq Scan on test_4_8 + -> Seq Scan on test_4_9 + -> Seq Scan on test_4_10 + -> Seq Scan on test_4_11 +(7 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_4 WHERE val >= 41; + QUERY PLAN +----------------------------- + Append + -> Seq Scan on test_4_6 + -> Seq Scan on test_4_7 + -> Seq Scan on test_4_8 + -> Seq Scan on test_4_9 + -> Seq Scan on test_4_10 + -> Seq Scan on test_4_11 +(7 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_4 WHERE val > 46; + QUERY PLAN +----------------------------- + Append + -> Seq Scan on test_4_6 + -> Seq Scan on test_4_7 + -> Seq Scan on test_4_8 + -> Seq Scan on test_4_9 + -> Seq Scan on test_4_10 + -> Seq Scan on test_4_11 +(7 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_4 WHERE val >= 46; + QUERY PLAN +----------------------------- + Append + -> Seq Scan on test_4_6 + -> Seq Scan on test_4_7 + -> Seq Scan on test_4_8 + -> Seq Scan on test_4_9 + -> Seq Scan on test_4_10 + -> Seq Scan on test_4_11 +(7 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_4 WHERE val > 51; + QUERY PLAN +----------------------------- + Append + -> Seq Scan on test_4_6 + Filter: (val > 51) + -> Seq Scan on test_4_7 + -> Seq Scan on test_4_8 + -> Seq Scan on test_4_9 + -> Seq Scan on test_4_10 + -> Seq Scan on test_4_11 +(8 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_4 WHERE val >= 51; + QUERY PLAN +----------------------------- + Append + -> Seq Scan on test_4_6 + -> Seq Scan on test_4_7 + -> Seq Scan on test_4_8 + -> Seq Scan on test_4_9 + -> Seq Scan on test_4_10 + -> Seq Scan on test_4_11 +(7 rows) + +DROP TABLE gaps.test_1 CASCADE; +NOTICE: drop cascades to 3 other objects +DROP TABLE gaps.test_2 CASCADE; +NOTICE: drop cascades to 5 other objects +DROP TABLE gaps.test_3 CASCADE; +NOTICE: drop cascades to 8 other objects +DROP TABLE gaps.test_4 CASCADE; +NOTICE: drop cascades to 10 other objects +DROP SCHEMA gaps; +DROP EXTENSION pg_pathman; diff --git a/expected/pathman_gaps_2.out b/expected/pathman_gaps_2.out new file mode 100644 index 00000000..b229be66 --- /dev/null +++ b/expected/pathman_gaps_2.out @@ -0,0 +1,819 @@ +/* + * Since 8edd0e794 (>= 12) Append nodes with single subplan are eliminated, + * causing different output; pathman_gaps_1.out is the updated version. + */ +\set VERBOSITY terse +SET search_path = 'public'; +CREATE EXTENSION pg_pathman; +CREATE SCHEMA gaps; +CREATE TABLE gaps.test_1(val INT8 NOT NULL); +SELECT create_range_partitions('gaps.test_1', 'val', 1, 10, 3); + create_range_partitions +------------------------- + 3 +(1 row) + +DROP TABLE gaps.test_1_2; +CREATE TABLE gaps.test_2(val INT8 NOT NULL); +SELECT create_range_partitions('gaps.test_2', 'val', 1, 10, 5); + create_range_partitions +------------------------- + 5 +(1 row) + +DROP TABLE gaps.test_2_3; +CREATE TABLE gaps.test_3(val INT8 NOT NULL); +SELECT create_range_partitions('gaps.test_3', 'val', 1, 10, 8); + create_range_partitions +------------------------- + 8 +(1 row) + +DROP TABLE gaps.test_3_4; +CREATE TABLE gaps.test_4(val INT8 NOT NULL); +SELECT create_range_partitions('gaps.test_4', 'val', 1, 10, 11); + create_range_partitions +------------------------- + 11 +(1 row) + +DROP TABLE gaps.test_4_4; +DROP TABLE gaps.test_4_5; +/* Check existing partitions */ +SELECT * FROM pathman_partition_list ORDER BY parent, partition; + parent | partition | parttype | expr | range_min | range_max +-------------+----------------+----------+------+-----------+----------- + gaps.test_1 | gaps.test_1_1 | 2 | val | 1 | 11 + gaps.test_1 | gaps.test_1_3 | 2 | val | 21 | 31 + gaps.test_2 | gaps.test_2_1 | 2 | val | 1 | 11 + gaps.test_2 | gaps.test_2_2 | 2 | val | 11 | 21 + gaps.test_2 | gaps.test_2_4 | 2 | val | 31 | 41 + gaps.test_2 | gaps.test_2_5 | 2 | val | 41 | 51 + gaps.test_3 | gaps.test_3_1 | 2 | val | 1 | 11 + gaps.test_3 | gaps.test_3_2 | 2 | val | 11 | 21 + gaps.test_3 | gaps.test_3_3 | 2 | val | 21 | 31 + gaps.test_3 | gaps.test_3_5 | 2 | val | 41 | 51 + gaps.test_3 | gaps.test_3_6 | 2 | val | 51 | 61 + gaps.test_3 | gaps.test_3_7 | 2 | val | 61 | 71 + gaps.test_3 | gaps.test_3_8 | 2 | val | 71 | 81 + gaps.test_4 | gaps.test_4_1 | 2 | val | 1 | 11 + gaps.test_4 | gaps.test_4_2 | 2 | val | 11 | 21 + gaps.test_4 | gaps.test_4_3 | 2 | val | 21 | 31 + gaps.test_4 | gaps.test_4_6 | 2 | val | 51 | 61 + gaps.test_4 | gaps.test_4_7 | 2 | val | 61 | 71 + gaps.test_4 | gaps.test_4_8 | 2 | val | 71 | 81 + gaps.test_4 | gaps.test_4_9 | 2 | val | 81 | 91 + gaps.test_4 | gaps.test_4_10 | 2 | val | 91 | 101 + gaps.test_4 | gaps.test_4_11 | 2 | val | 101 | 111 +(22 rows) + +/* Pivot values */ +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_1 WHERE val = 11; + QUERY PLAN +-------------------------- + Result + One-Time Filter: false +(2 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_1 WHERE val = 16; + QUERY PLAN +-------------------------- + Result + One-Time Filter: false +(2 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_1 WHERE val = 21; + QUERY PLAN +----------------------------- + Seq Scan on test_1_3 test_1 + Filter: (val = 21) +(2 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_1 WHERE val < 11; + QUERY PLAN +----------------------------- + Seq Scan on test_1_1 test_1 +(1 row) + +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_1 WHERE val <= 11; + QUERY PLAN +----------------------------- + Seq Scan on test_1_1 test_1 +(1 row) + +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_1 WHERE val < 16; + QUERY PLAN +----------------------------- + Seq Scan on test_1_1 test_1 +(1 row) + +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_1 WHERE val <= 16; + QUERY PLAN +----------------------------- + Seq Scan on test_1_1 test_1 +(1 row) + +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_1 WHERE val < 21; + QUERY PLAN +----------------------------- + Seq Scan on test_1_1 test_1 +(1 row) + +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_1 WHERE val <= 21; + QUERY PLAN +------------------------------------- + Append + -> Seq Scan on test_1_1 + -> Seq Scan on test_1_3 test_1_2 + Filter: (val <= 21) +(4 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_1 WHERE val > 11; + QUERY PLAN +----------------------------- + Seq Scan on test_1_3 test_1 +(1 row) + +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_1 WHERE val >= 11; + QUERY PLAN +----------------------------- + Seq Scan on test_1_3 test_1 +(1 row) + +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_1 WHERE val > 16; + QUERY PLAN +----------------------------- + Seq Scan on test_1_3 test_1 +(1 row) + +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_1 WHERE val >= 16; + QUERY PLAN +----------------------------- + Seq Scan on test_1_3 test_1 +(1 row) + +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_1 WHERE val > 21; + QUERY PLAN +----------------------------- + Seq Scan on test_1_3 test_1 + Filter: (val > 21) +(2 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_1 WHERE val >= 21; + QUERY PLAN +----------------------------- + Seq Scan on test_1_3 test_1 +(1 row) + +/* Pivot values */ +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_2 WHERE val = 21; + QUERY PLAN +-------------------------- + Result + One-Time Filter: false +(2 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_2 WHERE val = 26; + QUERY PLAN +-------------------------- + Result + One-Time Filter: false +(2 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_2 WHERE val = 31; + QUERY PLAN +----------------------------- + Seq Scan on test_2_4 test_2 + Filter: (val = 31) +(2 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_2 WHERE val < 21; + QUERY PLAN +---------------------------- + Append + -> Seq Scan on test_2_1 + -> Seq Scan on test_2_2 +(3 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_2 WHERE val <= 21; + QUERY PLAN +---------------------------- + Append + -> Seq Scan on test_2_1 + -> Seq Scan on test_2_2 +(3 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_2 WHERE val < 26; + QUERY PLAN +---------------------------- + Append + -> Seq Scan on test_2_1 + -> Seq Scan on test_2_2 +(3 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_2 WHERE val <= 26; + QUERY PLAN +---------------------------- + Append + -> Seq Scan on test_2_1 + -> Seq Scan on test_2_2 +(3 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_2 WHERE val < 31; + QUERY PLAN +---------------------------- + Append + -> Seq Scan on test_2_1 + -> Seq Scan on test_2_2 +(3 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_2 WHERE val <= 31; + QUERY PLAN +------------------------------------- + Append + -> Seq Scan on test_2_1 + -> Seq Scan on test_2_2 + -> Seq Scan on test_2_4 test_2_3 + Filter: (val <= 31) +(5 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_2 WHERE val < 41; + QUERY PLAN +------------------------------------- + Append + -> Seq Scan on test_2_1 + -> Seq Scan on test_2_2 + -> Seq Scan on test_2_4 test_2_3 +(4 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_2 WHERE val <= 41; + QUERY PLAN +------------------------------------- + Append + -> Seq Scan on test_2_1 + -> Seq Scan on test_2_2 + -> Seq Scan on test_2_4 test_2_3 + -> Seq Scan on test_2_5 test_2_4 + Filter: (val <= 41) +(6 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_2 WHERE val > 11; + QUERY PLAN +------------------------------------- + Append + -> Seq Scan on test_2_2 test_2_1 + Filter: (val > 11) + -> Seq Scan on test_2_4 test_2_2 + -> Seq Scan on test_2_5 test_2_3 +(5 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_2 WHERE val >= 11; + QUERY PLAN +------------------------------------- + Append + -> Seq Scan on test_2_2 test_2_1 + -> Seq Scan on test_2_4 test_2_2 + -> Seq Scan on test_2_5 test_2_3 +(4 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_2 WHERE val > 21; + QUERY PLAN +------------------------------------- + Append + -> Seq Scan on test_2_4 test_2_1 + -> Seq Scan on test_2_5 test_2_2 +(3 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_2 WHERE val >= 21; + QUERY PLAN +------------------------------------- + Append + -> Seq Scan on test_2_4 test_2_1 + -> Seq Scan on test_2_5 test_2_2 +(3 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_2 WHERE val > 26; + QUERY PLAN +------------------------------------- + Append + -> Seq Scan on test_2_4 test_2_1 + -> Seq Scan on test_2_5 test_2_2 +(3 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_2 WHERE val >= 26; + QUERY PLAN +------------------------------------- + Append + -> Seq Scan on test_2_4 test_2_1 + -> Seq Scan on test_2_5 test_2_2 +(3 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_2 WHERE val > 31; + QUERY PLAN +------------------------------------- + Append + -> Seq Scan on test_2_4 test_2_1 + Filter: (val > 31) + -> Seq Scan on test_2_5 test_2_2 +(4 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_2 WHERE val >= 31; + QUERY PLAN +------------------------------------- + Append + -> Seq Scan on test_2_4 test_2_1 + -> Seq Scan on test_2_5 test_2_2 +(3 rows) + +/* Pivot values */ +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_3 WHERE val = 31; + QUERY PLAN +-------------------------- + Result + One-Time Filter: false +(2 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_3 WHERE val = 36; + QUERY PLAN +-------------------------- + Result + One-Time Filter: false +(2 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_3 WHERE val = 41; + QUERY PLAN +----------------------------- + Seq Scan on test_3_5 test_3 + Filter: (val = 41) +(2 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_3 WHERE val < 31; + QUERY PLAN +---------------------------- + Append + -> Seq Scan on test_3_1 + -> Seq Scan on test_3_2 + -> Seq Scan on test_3_3 +(4 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_3 WHERE val <= 31; + QUERY PLAN +---------------------------- + Append + -> Seq Scan on test_3_1 + -> Seq Scan on test_3_2 + -> Seq Scan on test_3_3 +(4 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_3 WHERE val < 36; + QUERY PLAN +---------------------------- + Append + -> Seq Scan on test_3_1 + -> Seq Scan on test_3_2 + -> Seq Scan on test_3_3 +(4 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_3 WHERE val <= 36; + QUERY PLAN +---------------------------- + Append + -> Seq Scan on test_3_1 + -> Seq Scan on test_3_2 + -> Seq Scan on test_3_3 +(4 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_3 WHERE val < 41; + QUERY PLAN +---------------------------- + Append + -> Seq Scan on test_3_1 + -> Seq Scan on test_3_2 + -> Seq Scan on test_3_3 +(4 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_3 WHERE val <= 41; + QUERY PLAN +------------------------------------- + Append + -> Seq Scan on test_3_1 + -> Seq Scan on test_3_2 + -> Seq Scan on test_3_3 + -> Seq Scan on test_3_5 test_3_4 + Filter: (val <= 41) +(6 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_3 WHERE val < 51; + QUERY PLAN +------------------------------------- + Append + -> Seq Scan on test_3_1 + -> Seq Scan on test_3_2 + -> Seq Scan on test_3_3 + -> Seq Scan on test_3_5 test_3_4 +(5 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_3 WHERE val <= 51; + QUERY PLAN +------------------------------------- + Append + -> Seq Scan on test_3_1 + -> Seq Scan on test_3_2 + -> Seq Scan on test_3_3 + -> Seq Scan on test_3_5 test_3_4 + -> Seq Scan on test_3_6 test_3_5 + Filter: (val <= 51) +(7 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_3 WHERE val > 21; + QUERY PLAN +------------------------------------- + Append + -> Seq Scan on test_3_3 test_3_1 + Filter: (val > 21) + -> Seq Scan on test_3_5 test_3_2 + -> Seq Scan on test_3_6 test_3_3 + -> Seq Scan on test_3_7 test_3_4 + -> Seq Scan on test_3_8 test_3_5 +(7 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_3 WHERE val >= 21; + QUERY PLAN +------------------------------------- + Append + -> Seq Scan on test_3_3 test_3_1 + -> Seq Scan on test_3_5 test_3_2 + -> Seq Scan on test_3_6 test_3_3 + -> Seq Scan on test_3_7 test_3_4 + -> Seq Scan on test_3_8 test_3_5 +(6 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_3 WHERE val > 31; + QUERY PLAN +------------------------------------- + Append + -> Seq Scan on test_3_5 test_3_1 + -> Seq Scan on test_3_6 test_3_2 + -> Seq Scan on test_3_7 test_3_3 + -> Seq Scan on test_3_8 test_3_4 +(5 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_3 WHERE val >= 31; + QUERY PLAN +------------------------------------- + Append + -> Seq Scan on test_3_5 test_3_1 + -> Seq Scan on test_3_6 test_3_2 + -> Seq Scan on test_3_7 test_3_3 + -> Seq Scan on test_3_8 test_3_4 +(5 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_3 WHERE val > 36; + QUERY PLAN +------------------------------------- + Append + -> Seq Scan on test_3_5 test_3_1 + -> Seq Scan on test_3_6 test_3_2 + -> Seq Scan on test_3_7 test_3_3 + -> Seq Scan on test_3_8 test_3_4 +(5 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_3 WHERE val >= 36; + QUERY PLAN +------------------------------------- + Append + -> Seq Scan on test_3_5 test_3_1 + -> Seq Scan on test_3_6 test_3_2 + -> Seq Scan on test_3_7 test_3_3 + -> Seq Scan on test_3_8 test_3_4 +(5 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_3 WHERE val > 41; + QUERY PLAN +------------------------------------- + Append + -> Seq Scan on test_3_5 test_3_1 + Filter: (val > 41) + -> Seq Scan on test_3_6 test_3_2 + -> Seq Scan on test_3_7 test_3_3 + -> Seq Scan on test_3_8 test_3_4 +(6 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_3 WHERE val >= 41; + QUERY PLAN +------------------------------------- + Append + -> Seq Scan on test_3_5 test_3_1 + -> Seq Scan on test_3_6 test_3_2 + -> Seq Scan on test_3_7 test_3_3 + -> Seq Scan on test_3_8 test_3_4 +(5 rows) + +/* Pivot values */ +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_4 WHERE val = 31; + QUERY PLAN +-------------------------- + Result + One-Time Filter: false +(2 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_4 WHERE val = 36; + QUERY PLAN +-------------------------- + Result + One-Time Filter: false +(2 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_4 WHERE val = 41; + QUERY PLAN +-------------------------- + Result + One-Time Filter: false +(2 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_4 WHERE val = 46; + QUERY PLAN +-------------------------- + Result + One-Time Filter: false +(2 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_4 WHERE val = 51; + QUERY PLAN +----------------------------- + Seq Scan on test_4_6 test_4 + Filter: (val = 51) +(2 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_4 WHERE val < 31; + QUERY PLAN +---------------------------- + Append + -> Seq Scan on test_4_1 + -> Seq Scan on test_4_2 + -> Seq Scan on test_4_3 +(4 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_4 WHERE val <= 31; + QUERY PLAN +---------------------------- + Append + -> Seq Scan on test_4_1 + -> Seq Scan on test_4_2 + -> Seq Scan on test_4_3 +(4 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_4 WHERE val < 36; + QUERY PLAN +---------------------------- + Append + -> Seq Scan on test_4_1 + -> Seq Scan on test_4_2 + -> Seq Scan on test_4_3 +(4 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_4 WHERE val <= 36; + QUERY PLAN +---------------------------- + Append + -> Seq Scan on test_4_1 + -> Seq Scan on test_4_2 + -> Seq Scan on test_4_3 +(4 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_4 WHERE val < 41; + QUERY PLAN +---------------------------- + Append + -> Seq Scan on test_4_1 + -> Seq Scan on test_4_2 + -> Seq Scan on test_4_3 +(4 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_4 WHERE val <= 41; + QUERY PLAN +---------------------------- + Append + -> Seq Scan on test_4_1 + -> Seq Scan on test_4_2 + -> Seq Scan on test_4_3 +(4 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_4 WHERE val < 46; + QUERY PLAN +---------------------------- + Append + -> Seq Scan on test_4_1 + -> Seq Scan on test_4_2 + -> Seq Scan on test_4_3 +(4 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_4 WHERE val <= 46; + QUERY PLAN +---------------------------- + Append + -> Seq Scan on test_4_1 + -> Seq Scan on test_4_2 + -> Seq Scan on test_4_3 +(4 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_4 WHERE val < 51; + QUERY PLAN +---------------------------- + Append + -> Seq Scan on test_4_1 + -> Seq Scan on test_4_2 + -> Seq Scan on test_4_3 +(4 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_4 WHERE val <= 51; + QUERY PLAN +------------------------------------- + Append + -> Seq Scan on test_4_1 + -> Seq Scan on test_4_2 + -> Seq Scan on test_4_3 + -> Seq Scan on test_4_6 test_4_4 + Filter: (val <= 51) +(6 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_4 WHERE val < 61; + QUERY PLAN +------------------------------------- + Append + -> Seq Scan on test_4_1 + -> Seq Scan on test_4_2 + -> Seq Scan on test_4_3 + -> Seq Scan on test_4_6 test_4_4 +(5 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_4 WHERE val <= 61; + QUERY PLAN +------------------------------------- + Append + -> Seq Scan on test_4_1 + -> Seq Scan on test_4_2 + -> Seq Scan on test_4_3 + -> Seq Scan on test_4_6 test_4_4 + -> Seq Scan on test_4_7 test_4_5 + Filter: (val <= 61) +(7 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_4 WHERE val > 21; + QUERY PLAN +-------------------------------------- + Append + -> Seq Scan on test_4_3 test_4_1 + Filter: (val > 21) + -> Seq Scan on test_4_6 test_4_2 + -> Seq Scan on test_4_7 test_4_3 + -> Seq Scan on test_4_8 test_4_4 + -> Seq Scan on test_4_9 test_4_5 + -> Seq Scan on test_4_10 test_4_6 + -> Seq Scan on test_4_11 test_4_7 +(9 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_4 WHERE val >= 21; + QUERY PLAN +-------------------------------------- + Append + -> Seq Scan on test_4_3 test_4_1 + -> Seq Scan on test_4_6 test_4_2 + -> Seq Scan on test_4_7 test_4_3 + -> Seq Scan on test_4_8 test_4_4 + -> Seq Scan on test_4_9 test_4_5 + -> Seq Scan on test_4_10 test_4_6 + -> Seq Scan on test_4_11 test_4_7 +(8 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_4 WHERE val > 31; + QUERY PLAN +-------------------------------------- + Append + -> Seq Scan on test_4_6 test_4_1 + -> Seq Scan on test_4_7 test_4_2 + -> Seq Scan on test_4_8 test_4_3 + -> Seq Scan on test_4_9 test_4_4 + -> Seq Scan on test_4_10 test_4_5 + -> Seq Scan on test_4_11 test_4_6 +(7 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_4 WHERE val >= 31; + QUERY PLAN +-------------------------------------- + Append + -> Seq Scan on test_4_6 test_4_1 + -> Seq Scan on test_4_7 test_4_2 + -> Seq Scan on test_4_8 test_4_3 + -> Seq Scan on test_4_9 test_4_4 + -> Seq Scan on test_4_10 test_4_5 + -> Seq Scan on test_4_11 test_4_6 +(7 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_4 WHERE val > 36; + QUERY PLAN +-------------------------------------- + Append + -> Seq Scan on test_4_6 test_4_1 + -> Seq Scan on test_4_7 test_4_2 + -> Seq Scan on test_4_8 test_4_3 + -> Seq Scan on test_4_9 test_4_4 + -> Seq Scan on test_4_10 test_4_5 + -> Seq Scan on test_4_11 test_4_6 +(7 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_4 WHERE val >= 36; + QUERY PLAN +-------------------------------------- + Append + -> Seq Scan on test_4_6 test_4_1 + -> Seq Scan on test_4_7 test_4_2 + -> Seq Scan on test_4_8 test_4_3 + -> Seq Scan on test_4_9 test_4_4 + -> Seq Scan on test_4_10 test_4_5 + -> Seq Scan on test_4_11 test_4_6 +(7 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_4 WHERE val > 41; + QUERY PLAN +-------------------------------------- + Append + -> Seq Scan on test_4_6 test_4_1 + -> Seq Scan on test_4_7 test_4_2 + -> Seq Scan on test_4_8 test_4_3 + -> Seq Scan on test_4_9 test_4_4 + -> Seq Scan on test_4_10 test_4_5 + -> Seq Scan on test_4_11 test_4_6 +(7 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_4 WHERE val >= 41; + QUERY PLAN +-------------------------------------- + Append + -> Seq Scan on test_4_6 test_4_1 + -> Seq Scan on test_4_7 test_4_2 + -> Seq Scan on test_4_8 test_4_3 + -> Seq Scan on test_4_9 test_4_4 + -> Seq Scan on test_4_10 test_4_5 + -> Seq Scan on test_4_11 test_4_6 +(7 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_4 WHERE val > 46; + QUERY PLAN +-------------------------------------- + Append + -> Seq Scan on test_4_6 test_4_1 + -> Seq Scan on test_4_7 test_4_2 + -> Seq Scan on test_4_8 test_4_3 + -> Seq Scan on test_4_9 test_4_4 + -> Seq Scan on test_4_10 test_4_5 + -> Seq Scan on test_4_11 test_4_6 +(7 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_4 WHERE val >= 46; + QUERY PLAN +-------------------------------------- + Append + -> Seq Scan on test_4_6 test_4_1 + -> Seq Scan on test_4_7 test_4_2 + -> Seq Scan on test_4_8 test_4_3 + -> Seq Scan on test_4_9 test_4_4 + -> Seq Scan on test_4_10 test_4_5 + -> Seq Scan on test_4_11 test_4_6 +(7 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_4 WHERE val > 51; + QUERY PLAN +-------------------------------------- + Append + -> Seq Scan on test_4_6 test_4_1 + Filter: (val > 51) + -> Seq Scan on test_4_7 test_4_2 + -> Seq Scan on test_4_8 test_4_3 + -> Seq Scan on test_4_9 test_4_4 + -> Seq Scan on test_4_10 test_4_5 + -> Seq Scan on test_4_11 test_4_6 +(8 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_4 WHERE val >= 51; + QUERY PLAN +-------------------------------------- + Append + -> Seq Scan on test_4_6 test_4_1 + -> Seq Scan on test_4_7 test_4_2 + -> Seq Scan on test_4_8 test_4_3 + -> Seq Scan on test_4_9 test_4_4 + -> Seq Scan on test_4_10 test_4_5 + -> Seq Scan on test_4_11 test_4_6 +(7 rows) + +DROP TABLE gaps.test_1 CASCADE; +NOTICE: drop cascades to 3 other objects +DROP TABLE gaps.test_2 CASCADE; +NOTICE: drop cascades to 5 other objects +DROP TABLE gaps.test_3 CASCADE; +NOTICE: drop cascades to 8 other objects +DROP TABLE gaps.test_4 CASCADE; +NOTICE: drop cascades to 10 other objects +DROP SCHEMA gaps; +DROP EXTENSION pg_pathman; diff --git a/expected/pathman_hashjoin.out b/expected/pathman_hashjoin.out new file mode 100644 index 00000000..f5ebabdd --- /dev/null +++ b/expected/pathman_hashjoin.out @@ -0,0 +1,84 @@ +/* + * pathman_hashjoin_1.out and pathman_hashjoin_2.out seem to deal with pgpro's + * different behaviour. 8edd0e794 (>= 12) Append nodes with single subplan + * are eliminated, hence pathman_hashjoin_3.out + * + * Since 55a1954da16 and 6ef77cf46e8 (>= 13) output of EXPLAIN was changed, + * now it includes aliases for inherited tables. + */ +\set VERBOSITY terse +SET search_path = 'public'; +CREATE SCHEMA pathman; +CREATE EXTENSION pg_pathman SCHEMA pathman; +CREATE SCHEMA test; +CREATE TABLE test.range_rel ( + id SERIAL PRIMARY KEY, + dt TIMESTAMP NOT NULL, + txt TEXT); +CREATE INDEX ON test.range_rel (dt); +INSERT INTO test.range_rel (dt, txt) + SELECT g, md5(g::TEXT) FROM generate_series('2015-01-01', '2015-04-30', '1 day'::interval) as g; +SELECT pathman.create_range_partitions('test.range_rel', 'DT', '2015-01-01'::DATE, '1 month'::INTERVAL); + create_range_partitions +------------------------- + 4 +(1 row) + +CREATE TABLE test.num_range_rel ( + id SERIAL PRIMARY KEY, + txt TEXT); +SELECT pathman.create_range_partitions('test.num_range_rel', 'id', 0, 1000, 4); + create_range_partitions +------------------------- + 4 +(1 row) + +INSERT INTO test.num_range_rel + SELECT g, md5(g::TEXT) FROM generate_series(1, 3000) as g; +SET pg_pathman.enable_runtimeappend = OFF; +SET pg_pathman.enable_runtimemergeappend = OFF; +VACUUM; +/* + * Hash join + */ +SET enable_indexscan = ON; +SET enable_seqscan = OFF; +SET enable_nestloop = OFF; +SET enable_hashjoin = ON; +SET enable_mergejoin = OFF; +EXPLAIN (COSTS OFF) +SELECT * FROM test.range_rel j1 +JOIN test.range_rel j2 on j2.id = j1.id +JOIN test.num_range_rel j3 on j3.id = j1.id +WHERE j1.dt < '2015-03-01' AND j2.dt >= '2015-02-01' ORDER BY j2.dt; + QUERY PLAN +--------------------------------------------------------------------------------------- + Sort + Sort Key: j2.dt + -> Hash Join + Hash Cond: (j1.id = j2.id) + -> Hash Join + Hash Cond: (j3.id = j1.id) + -> Append + -> Index Scan using num_range_rel_1_pkey on num_range_rel_1 j3 + -> Index Scan using num_range_rel_2_pkey on num_range_rel_2 j3_1 + -> Index Scan using num_range_rel_3_pkey on num_range_rel_3 j3_2 + -> Index Scan using num_range_rel_4_pkey on num_range_rel_4 j3_3 + -> Hash + -> Append + -> Index Scan using range_rel_1_pkey on range_rel_1 j1 + -> Index Scan using range_rel_2_pkey on range_rel_2 j1_1 + -> Hash + -> Append + -> Index Scan using range_rel_2_dt_idx on range_rel_2 j2 + -> Index Scan using range_rel_3_dt_idx on range_rel_3 j2_1 + -> Index Scan using range_rel_4_dt_idx on range_rel_4 j2_2 +(20 rows) + +DROP TABLE test.num_range_rel CASCADE; +NOTICE: drop cascades to 5 other objects +DROP TABLE test.range_rel CASCADE; +NOTICE: drop cascades to 5 other objects +DROP SCHEMA test; +DROP EXTENSION pg_pathman CASCADE; +DROP SCHEMA pathman; diff --git a/expected/pathman_hashjoin_1.out b/expected/pathman_hashjoin_1.out new file mode 100644 index 00000000..df6c0174 --- /dev/null +++ b/expected/pathman_hashjoin_1.out @@ -0,0 +1,84 @@ +/* + * pathman_hashjoin_1.out and pathman_hashjoin_2.out seem to deal with pgpro's + * different behaviour. 8edd0e794 (>= 12) Append nodes with single subplan + * are eliminated, hence pathman_hashjoin_3.out + * + * Since 55a1954da16 and 6ef77cf46e8 (>= 13) output of EXPLAIN was changed, + * now it includes aliases for inherited tables. + */ +\set VERBOSITY terse +SET search_path = 'public'; +CREATE SCHEMA pathman; +CREATE EXTENSION pg_pathman SCHEMA pathman; +CREATE SCHEMA test; +CREATE TABLE test.range_rel ( + id SERIAL PRIMARY KEY, + dt TIMESTAMP NOT NULL, + txt TEXT); +CREATE INDEX ON test.range_rel (dt); +INSERT INTO test.range_rel (dt, txt) + SELECT g, md5(g::TEXT) FROM generate_series('2015-01-01', '2015-04-30', '1 day'::interval) as g; +SELECT pathman.create_range_partitions('test.range_rel', 'DT', '2015-01-01'::DATE, '1 month'::INTERVAL); + create_range_partitions +------------------------- + 4 +(1 row) + +CREATE TABLE test.num_range_rel ( + id SERIAL PRIMARY KEY, + txt TEXT); +SELECT pathman.create_range_partitions('test.num_range_rel', 'id', 0, 1000, 4); + create_range_partitions +------------------------- + 4 +(1 row) + +INSERT INTO test.num_range_rel + SELECT g, md5(g::TEXT) FROM generate_series(1, 3000) as g; +SET pg_pathman.enable_runtimeappend = OFF; +SET pg_pathman.enable_runtimemergeappend = OFF; +VACUUM; +/* + * Hash join + */ +SET enable_indexscan = ON; +SET enable_seqscan = OFF; +SET enable_nestloop = OFF; +SET enable_hashjoin = ON; +SET enable_mergejoin = OFF; +EXPLAIN (COSTS OFF) +SELECT * FROM test.range_rel j1 +JOIN test.range_rel j2 on j2.id = j1.id +JOIN test.num_range_rel j3 on j3.id = j1.id +WHERE j1.dt < '2015-03-01' AND j2.dt >= '2015-02-01' ORDER BY j2.dt; + QUERY PLAN +------------------------------------------------------------------------------------------- + Sort + Sort Key: j2.dt + -> Hash Join + Hash Cond: (j3.id = j2.id) + -> Append + -> Index Scan using num_range_rel_1_pkey on num_range_rel_1 j3 + -> Index Scan using num_range_rel_2_pkey on num_range_rel_2 j3_1 + -> Index Scan using num_range_rel_3_pkey on num_range_rel_3 j3_2 + -> Index Scan using num_range_rel_4_pkey on num_range_rel_4 j3_3 + -> Hash + -> Hash Join + Hash Cond: (j2.id = j1.id) + -> Append + -> Index Scan using range_rel_2_dt_idx on range_rel_2 j2 + -> Index Scan using range_rel_3_dt_idx on range_rel_3 j2_1 + -> Index Scan using range_rel_4_dt_idx on range_rel_4 j2_2 + -> Hash + -> Append + -> Index Scan using range_rel_1_pkey on range_rel_1 j1 + -> Index Scan using range_rel_2_pkey on range_rel_2 j1_1 +(20 rows) + +DROP TABLE test.num_range_rel CASCADE; +NOTICE: drop cascades to 5 other objects +DROP TABLE test.range_rel CASCADE; +NOTICE: drop cascades to 5 other objects +DROP SCHEMA test; +DROP EXTENSION pg_pathman CASCADE; +DROP SCHEMA pathman; diff --git a/expected/pathman_hashjoin_2.out b/expected/pathman_hashjoin_2.out new file mode 100644 index 00000000..69ea5762 --- /dev/null +++ b/expected/pathman_hashjoin_2.out @@ -0,0 +1,77 @@ +/* + * pathman_hashjoin_1.out and pathman_hashjoin_2.out seem to deal with pgpro's + * different behaviour. 8edd0e794 (>= 12) Append nodes with single subplan + * are eliminated, hence pathman_hashjoin_3.out + * + * Since 55a1954da16 and 6ef77cf46e8 (>= 13) output of EXPLAIN was changed, + * now it includes aliases for inherited tables. + */ +\set VERBOSITY terse +SET search_path = 'public'; +CREATE SCHEMA pathman; +CREATE EXTENSION pg_pathman SCHEMA pathman; +CREATE SCHEMA test; +CREATE TABLE test.range_rel ( + id SERIAL PRIMARY KEY, + dt TIMESTAMP NOT NULL, + txt TEXT); +CREATE INDEX ON test.range_rel (dt); +INSERT INTO test.range_rel (dt, txt) + SELECT g, md5(g::TEXT) FROM generate_series('2015-01-01', '2015-04-30', '1 day'::interval) as g; +SELECT pathman.create_range_partitions('test.range_rel', 'DT', '2015-01-01'::DATE, '1 month'::INTERVAL); + create_range_partitions +------------------------- + 4 +(1 row) + +CREATE TABLE test.num_range_rel ( + id SERIAL PRIMARY KEY, + txt TEXT); +SELECT pathman.create_range_partitions('test.num_range_rel', 'id', 0, 1000, 4); + create_range_partitions +------------------------- + 4 +(1 row) + +INSERT INTO test.num_range_rel + SELECT g, md5(g::TEXT) FROM generate_series(1, 3000) as g; +SET pg_pathman.enable_runtimeappend = OFF; +SET pg_pathman.enable_runtimemergeappend = OFF; +VACUUM; +/* + * Hash join + */ +SET enable_indexscan = ON; +SET enable_seqscan = OFF; +SET enable_nestloop = OFF; +SET enable_hashjoin = ON; +SET enable_mergejoin = OFF; +EXPLAIN (COSTS OFF) +SELECT * FROM test.range_rel j1 +JOIN test.range_rel j2 on j2.id = j1.id +JOIN test.num_range_rel j3 on j3.id = j1.id +WHERE j1.dt < '2015-03-01' AND j2.dt >= '2015-02-01' ORDER BY j2.dt; + QUERY PLAN +--------------------------------------------------------------------------------- + Sort + Sort Key: j2.dt + -> Hash Join + Hash Cond: (j3.id = j2.id) + -> Append + -> Index Scan using num_range_rel_1_pkey on num_range_rel_1 j3 + -> Index Scan using num_range_rel_2_pkey on num_range_rel_2 j3_1 + -> Index Scan using num_range_rel_3_pkey on num_range_rel_3 j3_2 + -> Index Scan using num_range_rel_4_pkey on num_range_rel_4 j3_3 + -> Hash + -> Append + -> Index Scan using range_rel_2_dt_idx on range_rel_2 j2 + Filter: (id IS NOT NULL) +(13 rows) + +DROP TABLE test.num_range_rel CASCADE; +NOTICE: drop cascades to 5 other objects +DROP TABLE test.range_rel CASCADE; +NOTICE: drop cascades to 5 other objects +DROP SCHEMA test; +DROP EXTENSION pg_pathman CASCADE; +DROP SCHEMA pathman; diff --git a/expected/pathman_hashjoin_3.out b/expected/pathman_hashjoin_3.out new file mode 100644 index 00000000..e2c8903a --- /dev/null +++ b/expected/pathman_hashjoin_3.out @@ -0,0 +1,76 @@ +/* + * pathman_hashjoin_1.out and pathman_hashjoin_2.out seem to deal with pgpro's + * different behaviour. 8edd0e794 (>= 12) Append nodes with single subplan + * are eliminated, hence pathman_hashjoin_3.out + * + * Since 55a1954da16 and 6ef77cf46e8 (>= 13) output of EXPLAIN was changed, + * now it includes aliases for inherited tables. + */ +\set VERBOSITY terse +SET search_path = 'public'; +CREATE SCHEMA pathman; +CREATE EXTENSION pg_pathman SCHEMA pathman; +CREATE SCHEMA test; +CREATE TABLE test.range_rel ( + id SERIAL PRIMARY KEY, + dt TIMESTAMP NOT NULL, + txt TEXT); +CREATE INDEX ON test.range_rel (dt); +INSERT INTO test.range_rel (dt, txt) + SELECT g, md5(g::TEXT) FROM generate_series('2015-01-01', '2015-04-30', '1 day'::interval) as g; +SELECT pathman.create_range_partitions('test.range_rel', 'DT', '2015-01-01'::DATE, '1 month'::INTERVAL); + create_range_partitions +------------------------- + 4 +(1 row) + +CREATE TABLE test.num_range_rel ( + id SERIAL PRIMARY KEY, + txt TEXT); +SELECT pathman.create_range_partitions('test.num_range_rel', 'id', 0, 1000, 4); + create_range_partitions +------------------------- + 4 +(1 row) + +INSERT INTO test.num_range_rel + SELECT g, md5(g::TEXT) FROM generate_series(1, 3000) as g; +SET pg_pathman.enable_runtimeappend = OFF; +SET pg_pathman.enable_runtimemergeappend = OFF; +VACUUM; +/* + * Hash join + */ +SET enable_indexscan = ON; +SET enable_seqscan = OFF; +SET enable_nestloop = OFF; +SET enable_hashjoin = ON; +SET enable_mergejoin = OFF; +EXPLAIN (COSTS OFF) +SELECT * FROM test.range_rel j1 +JOIN test.range_rel j2 on j2.id = j1.id +JOIN test.num_range_rel j3 on j3.id = j1.id +WHERE j1.dt < '2015-03-01' AND j2.dt >= '2015-02-01' ORDER BY j2.dt; + QUERY PLAN +--------------------------------------------------------------------------------- + Sort + Sort Key: j2.dt + -> Hash Join + Hash Cond: (j3.id = j2.id) + -> Append + -> Index Scan using num_range_rel_1_pkey on num_range_rel_1 j3 + -> Index Scan using num_range_rel_2_pkey on num_range_rel_2 j3_1 + -> Index Scan using num_range_rel_3_pkey on num_range_rel_3 j3_2 + -> Index Scan using num_range_rel_4_pkey on num_range_rel_4 j3_3 + -> Hash + -> Index Scan using range_rel_2_dt_idx on range_rel_2 j2 + Filter: (id IS NOT NULL) +(12 rows) + +DROP TABLE test.num_range_rel CASCADE; +NOTICE: drop cascades to 5 other objects +DROP TABLE test.range_rel CASCADE; +NOTICE: drop cascades to 5 other objects +DROP SCHEMA test; +DROP EXTENSION pg_pathman CASCADE; +DROP SCHEMA pathman; diff --git a/expected/pathman_hashjoin_4.out b/expected/pathman_hashjoin_4.out new file mode 100644 index 00000000..e827628f --- /dev/null +++ b/expected/pathman_hashjoin_4.out @@ -0,0 +1,84 @@ +/* + * pathman_hashjoin_1.out and pathman_hashjoin_2.out seem to deal with pgpro's + * different behaviour. 8edd0e794 (>= 12) Append nodes with single subplan + * are eliminated, hence pathman_hashjoin_3.out + * + * Since 55a1954da16 and 6ef77cf46e8 (>= 13) output of EXPLAIN was changed, + * now it includes aliases for inherited tables. + */ +\set VERBOSITY terse +SET search_path = 'public'; +CREATE SCHEMA pathman; +CREATE EXTENSION pg_pathman SCHEMA pathman; +CREATE SCHEMA test; +CREATE TABLE test.range_rel ( + id SERIAL PRIMARY KEY, + dt TIMESTAMP NOT NULL, + txt TEXT); +CREATE INDEX ON test.range_rel (dt); +INSERT INTO test.range_rel (dt, txt) + SELECT g, md5(g::TEXT) FROM generate_series('2015-01-01', '2015-04-30', '1 day'::interval) as g; +SELECT pathman.create_range_partitions('test.range_rel', 'DT', '2015-01-01'::DATE, '1 month'::INTERVAL); + create_range_partitions +------------------------- + 4 +(1 row) + +CREATE TABLE test.num_range_rel ( + id SERIAL PRIMARY KEY, + txt TEXT); +SELECT pathman.create_range_partitions('test.num_range_rel', 'id', 0, 1000, 4); + create_range_partitions +------------------------- + 4 +(1 row) + +INSERT INTO test.num_range_rel + SELECT g, md5(g::TEXT) FROM generate_series(1, 3000) as g; +SET pg_pathman.enable_runtimeappend = OFF; +SET pg_pathman.enable_runtimemergeappend = OFF; +VACUUM; +/* + * Hash join + */ +SET enable_indexscan = ON; +SET enable_seqscan = OFF; +SET enable_nestloop = OFF; +SET enable_hashjoin = ON; +SET enable_mergejoin = OFF; +EXPLAIN (COSTS OFF) +SELECT * FROM test.range_rel j1 +JOIN test.range_rel j2 on j2.id = j1.id +JOIN test.num_range_rel j3 on j3.id = j1.id +WHERE j1.dt < '2015-03-01' AND j2.dt >= '2015-02-01' ORDER BY j2.dt; + QUERY PLAN +--------------------------------------------------------------------------------------- + Sort + Sort Key: j2.dt + -> Hash Join + Hash Cond: (j1.id = j2.id) + -> Hash Join + Hash Cond: (j3.id = j1.id) + -> Append + -> Index Scan using num_range_rel_1_pkey on num_range_rel_1 j3_1 + -> Index Scan using num_range_rel_2_pkey on num_range_rel_2 j3_2 + -> Index Scan using num_range_rel_3_pkey on num_range_rel_3 j3_3 + -> Index Scan using num_range_rel_4_pkey on num_range_rel_4 j3_4 + -> Hash + -> Append + -> Index Scan using range_rel_1_pkey on range_rel_1 j1_1 + -> Index Scan using range_rel_2_pkey on range_rel_2 j1_2 + -> Hash + -> Append + -> Index Scan using range_rel_2_dt_idx on range_rel_2 j2_1 + -> Index Scan using range_rel_3_dt_idx on range_rel_3 j2_2 + -> Index Scan using range_rel_4_dt_idx on range_rel_4 j2_3 +(20 rows) + +DROP TABLE test.num_range_rel CASCADE; +NOTICE: drop cascades to 5 other objects +DROP TABLE test.range_rel CASCADE; +NOTICE: drop cascades to 5 other objects +DROP SCHEMA test; +DROP EXTENSION pg_pathman CASCADE; +DROP SCHEMA pathman; diff --git a/expected/pathman_hashjoin_5.out b/expected/pathman_hashjoin_5.out new file mode 100644 index 00000000..c66a9306 --- /dev/null +++ b/expected/pathman_hashjoin_5.out @@ -0,0 +1,76 @@ +/* + * pathman_hashjoin_1.out and pathman_hashjoin_2.out seem to deal with pgpro's + * different behaviour. 8edd0e794 (>= 12) Append nodes with single subplan + * are eliminated, hence pathman_hashjoin_3.out + * + * Since 55a1954da16 and 6ef77cf46e8 (>= 13) output of EXPLAIN was changed, + * now it includes aliases for inherited tables. + */ +\set VERBOSITY terse +SET search_path = 'public'; +CREATE SCHEMA pathman; +CREATE EXTENSION pg_pathman SCHEMA pathman; +CREATE SCHEMA test; +CREATE TABLE test.range_rel ( + id SERIAL PRIMARY KEY, + dt TIMESTAMP NOT NULL, + txt TEXT); +CREATE INDEX ON test.range_rel (dt); +INSERT INTO test.range_rel (dt, txt) + SELECT g, md5(g::TEXT) FROM generate_series('2015-01-01', '2015-04-30', '1 day'::interval) as g; +SELECT pathman.create_range_partitions('test.range_rel', 'DT', '2015-01-01'::DATE, '1 month'::INTERVAL); + create_range_partitions +------------------------- + 4 +(1 row) + +CREATE TABLE test.num_range_rel ( + id SERIAL PRIMARY KEY, + txt TEXT); +SELECT pathman.create_range_partitions('test.num_range_rel', 'id', 0, 1000, 4); + create_range_partitions +------------------------- + 4 +(1 row) + +INSERT INTO test.num_range_rel + SELECT g, md5(g::TEXT) FROM generate_series(1, 3000) as g; +SET pg_pathman.enable_runtimeappend = OFF; +SET pg_pathman.enable_runtimemergeappend = OFF; +VACUUM; +/* + * Hash join + */ +SET enable_indexscan = ON; +SET enable_seqscan = OFF; +SET enable_nestloop = OFF; +SET enable_hashjoin = ON; +SET enable_mergejoin = OFF; +EXPLAIN (COSTS OFF) +SELECT * FROM test.range_rel j1 +JOIN test.range_rel j2 on j2.id = j1.id +JOIN test.num_range_rel j3 on j3.id = j1.id +WHERE j1.dt < '2015-03-01' AND j2.dt >= '2015-02-01' ORDER BY j2.dt; + QUERY PLAN +--------------------------------------------------------------------------------- + Sort + Sort Key: j2.dt + -> Hash Join + Hash Cond: (j3.id = j2.id) + -> Append + -> Index Scan using num_range_rel_1_pkey on num_range_rel_1 j3_1 + -> Index Scan using num_range_rel_2_pkey on num_range_rel_2 j3_2 + -> Index Scan using num_range_rel_3_pkey on num_range_rel_3 j3_3 + -> Index Scan using num_range_rel_4_pkey on num_range_rel_4 j3_4 + -> Hash + -> Index Scan using range_rel_2_dt_idx on range_rel_2 j2 + Filter: (id IS NOT NULL) +(12 rows) + +DROP TABLE test.num_range_rel CASCADE; +NOTICE: drop cascades to 5 other objects +DROP TABLE test.range_rel CASCADE; +NOTICE: drop cascades to 5 other objects +DROP SCHEMA test; +DROP EXTENSION pg_pathman CASCADE; +DROP SCHEMA pathman; diff --git a/expected/pathman_hashjoin_6.out b/expected/pathman_hashjoin_6.out new file mode 100644 index 00000000..1c57f49b --- /dev/null +++ b/expected/pathman_hashjoin_6.out @@ -0,0 +1,75 @@ +/* + * pathman_hashjoin_1.out and pathman_hashjoin_2.out seem to deal with pgpro's + * different behaviour. 8edd0e794 (>= 12) Append nodes with single subplan + * are eliminated, hence pathman_hashjoin_3.out + * + * Since 55a1954da16 and 6ef77cf46e8 (>= 13) output of EXPLAIN was changed, + * now it includes aliases for inherited tables. + */ +\set VERBOSITY terse +SET search_path = 'public'; +CREATE SCHEMA pathman; +CREATE EXTENSION pg_pathman SCHEMA pathman; +CREATE SCHEMA test; +CREATE TABLE test.range_rel ( + id SERIAL PRIMARY KEY, + dt TIMESTAMP NOT NULL, + txt TEXT); +CREATE INDEX ON test.range_rel (dt); +INSERT INTO test.range_rel (dt, txt) + SELECT g, md5(g::TEXT) FROM generate_series('2015-01-01', '2015-04-30', '1 day'::interval) as g; +SELECT pathman.create_range_partitions('test.range_rel', 'DT', '2015-01-01'::DATE, '1 month'::INTERVAL); + create_range_partitions +------------------------- + 4 +(1 row) + +CREATE TABLE test.num_range_rel ( + id SERIAL PRIMARY KEY, + txt TEXT); +SELECT pathman.create_range_partitions('test.num_range_rel', 'id', 0, 1000, 4); + create_range_partitions +------------------------- + 4 +(1 row) + +INSERT INTO test.num_range_rel + SELECT g, md5(g::TEXT) FROM generate_series(1, 3000) as g; +SET pg_pathman.enable_runtimeappend = OFF; +SET pg_pathman.enable_runtimemergeappend = OFF; +VACUUM; +/* + * Hash join + */ +SET enable_indexscan = ON; +SET enable_seqscan = OFF; +SET enable_nestloop = OFF; +SET enable_hashjoin = ON; +SET enable_mergejoin = OFF; +EXPLAIN (COSTS OFF) +SELECT * FROM test.range_rel j1 +JOIN test.range_rel j2 on j2.id = j1.id +JOIN test.num_range_rel j3 on j3.id = j1.id +WHERE j1.dt < '2015-03-01' AND j2.dt >= '2015-02-01' ORDER BY j2.dt; + QUERY PLAN +--------------------------------------------------------------------------------- + Sort + Sort Key: j2.dt + -> Hash Join + Hash Cond: (j3.id = j2.id) + -> Append + -> Index Scan using num_range_rel_1_pkey on num_range_rel_1 j3_1 + -> Index Scan using num_range_rel_2_pkey on num_range_rel_2 j3_2 + -> Index Scan using num_range_rel_3_pkey on num_range_rel_3 j3_3 + -> Index Scan using num_range_rel_4_pkey on num_range_rel_4 j3_4 + -> Hash + -> Index Scan using range_rel_2_dt_idx on range_rel_2 j2 +(11 rows) + +DROP TABLE test.num_range_rel CASCADE; +NOTICE: drop cascades to 5 other objects +DROP TABLE test.range_rel CASCADE; +NOTICE: drop cascades to 5 other objects +DROP SCHEMA test; +DROP EXTENSION pg_pathman CASCADE; +DROP SCHEMA pathman; diff --git a/expected/pathman_inserts.out b/expected/pathman_inserts.out new file mode 100644 index 00000000..16656f18 --- /dev/null +++ b/expected/pathman_inserts.out @@ -0,0 +1,1075 @@ +/* + * Since 55a1954da16 and 6ef77cf46e8 (>= 13) output of EXPLAIN was changed, + * now it includes aliases for inherited tables. + */ +\set VERBOSITY terse +SET search_path = 'public'; +CREATE EXTENSION pg_pathman; +CREATE SCHEMA test_inserts; +/* create a partitioned table */ +CREATE TABLE test_inserts.storage(a INT4, b INT4 NOT NULL, c NUMERIC, d TEXT); +INSERT INTO test_inserts.storage SELECT i * 2, i, i, i::text FROM generate_series(1, 100) i; +CREATE UNIQUE INDEX ON test_inserts.storage(a); +SELECT create_range_partitions('test_inserts.storage', 'b', 1, 10); + create_range_partitions +------------------------- + 10 +(1 row) + +/* attach before and after insertion triggers to partitioned table */ +CREATE OR REPLACE FUNCTION test_inserts.print_cols_before_change() RETURNS TRIGGER AS $$ +BEGIN + RAISE NOTICE 'BEFORE INSERTION TRIGGER ON TABLE % HAS EXPIRED. INSERTED ROW: %', tg_table_name, new; + RETURN new; +END; +$$ LANGUAGE plpgsql; +CREATE OR REPLACE FUNCTION test_inserts.print_cols_after_change() RETURNS TRIGGER AS $$ +BEGIN + RAISE NOTICE 'AFTER INSERTION TRIGGER ON TABLE % HAS EXPIRED. INSERTED ROW: %', tg_table_name, new; + RETURN new; +END; +$$ LANGUAGE plpgsql; +/* set triggers on existing first partition and new generated partitions */ +CREATE TRIGGER print_new_row_before_insert BEFORE INSERT ON test_inserts.storage_1 + FOR EACH ROW EXECUTE PROCEDURE test_inserts.print_cols_before_change(); +CREATE TRIGGER print_new_row_after_insert AFTER INSERT ON test_inserts.storage_1 + FOR EACH ROW EXECUTE PROCEDURE test_inserts.print_cols_after_change(); +/* set partition init callback that will add triggers to partitions */ +CREATE OR REPLACE FUNCTION test_inserts.set_triggers(args jsonb) RETURNS VOID AS $$ +BEGIN + EXECUTE format('create trigger print_new_row_before_insert before insert on %s.%s + for each row execute procedure test_inserts.print_cols_before_change();', + args->>'partition_schema', args->>'partition'); + EXECUTE format('create trigger print_new_row_after_insert after insert on %s.%s + for each row execute procedure test_inserts.print_cols_after_change();', + args->>'partition_schema', args->>'partition'); +END; +$$ LANGUAGE plpgsql; +SELECT set_init_callback('test_inserts.storage', 'test_inserts.set_triggers(jsonb)'); + set_init_callback +------------------- + +(1 row) + +/* we don't support ON CONLICT */ +INSERT INTO test_inserts.storage VALUES(0, 0, 0, 'UNSUPPORTED_1') +ON CONFLICT (a) DO UPDATE SET a = 3; +ERROR: ON CONFLICT clause is not supported with partitioned tables +INSERT INTO test_inserts.storage VALUES(0, 0, 0, 'UNSUPPORTED_2') +ON CONFLICT (a) DO NOTHING; +ERROR: ON CONFLICT clause is not supported with partitioned tables +/* implicitly prepend a partition (no columns have been dropped yet) */ +INSERT INTO test_inserts.storage VALUES(0, 0, 0, 'PREPEND.') RETURNING *; +NOTICE: BEFORE INSERTION TRIGGER ON TABLE storage_11 HAS EXPIRED. INSERTED ROW: (0,0,0,PREPEND.) +NOTICE: AFTER INSERTION TRIGGER ON TABLE storage_11 HAS EXPIRED. INSERTED ROW: (0,0,0,PREPEND.) + a | b | c | d +---+---+---+---------- + 0 | 0 | 0 | PREPEND. +(1 row) + +SELECT * FROM test_inserts.storage_11; + a | b | c | d +---+---+---+---------- + 0 | 0 | 0 | PREPEND. +(1 row) + +INSERT INTO test_inserts.storage VALUES(1, 0, 0, 'PREPEND..') RETURNING tableoid::regclass; +NOTICE: BEFORE INSERTION TRIGGER ON TABLE storage_11 HAS EXPIRED. INSERTED ROW: (1,0,0,PREPEND..) +NOTICE: AFTER INSERTION TRIGGER ON TABLE storage_11 HAS EXPIRED. INSERTED ROW: (1,0,0,PREPEND..) + tableoid +------------------------- + test_inserts.storage_11 +(1 row) + +SELECT * FROM test_inserts.storage_11; + a | b | c | d +---+---+---+----------- + 0 | 0 | 0 | PREPEND. + 1 | 0 | 0 | PREPEND.. +(2 rows) + +INSERT INTO test_inserts.storage VALUES(3, 0, 0, 'PREPEND...') RETURNING a + b / 3; +NOTICE: BEFORE INSERTION TRIGGER ON TABLE storage_11 HAS EXPIRED. INSERTED ROW: (3,0,0,PREPEND...) +NOTICE: AFTER INSERTION TRIGGER ON TABLE storage_11 HAS EXPIRED. INSERTED ROW: (3,0,0,PREPEND...) + ?column? +---------- + 3 +(1 row) + +SELECT * FROM test_inserts.storage_11; + a | b | c | d +---+---+---+------------ + 0 | 0 | 0 | PREPEND. + 1 | 0 | 0 | PREPEND.. + 3 | 0 | 0 | PREPEND... +(3 rows) + +/* cause an unique index conflict (a = 0) */ +INSERT INTO test_inserts.storage VALUES(0, 0, 0, 'CONFLICT') RETURNING *; +NOTICE: BEFORE INSERTION TRIGGER ON TABLE storage_11 HAS EXPIRED. INSERTED ROW: (0,0,0,CONFLICT) +ERROR: duplicate key value violates unique constraint "storage_11_a_idx" +/* drop first column */ +ALTER TABLE test_inserts.storage DROP COLUMN a CASCADE; +/* will have 3 columns (b, c, d) */ +SELECT append_range_partition('test_inserts.storage'); + append_range_partition +------------------------- + test_inserts.storage_12 +(1 row) + +INSERT INTO test_inserts.storage (b, c, d) VALUES (101, 17, '3 cols!'); +NOTICE: BEFORE INSERTION TRIGGER ON TABLE storage_12 HAS EXPIRED. INSERTED ROW: (101,17,"3 cols!") +NOTICE: AFTER INSERTION TRIGGER ON TABLE storage_12 HAS EXPIRED. INSERTED ROW: (101,17,"3 cols!") +SELECT * FROM test_inserts.storage_12; /* direct access */ + b | c | d +-----+----+--------- + 101 | 17 | 3 cols! +(1 row) + +SELECT * FROM test_inserts.storage WHERE b > 100; /* via parent */ + b | c | d +-----+----+--------- + 101 | 17 | 3 cols! +(1 row) + +/* spawn a new partition (b, c, d) */ +INSERT INTO test_inserts.storage (b, c, d) VALUES (111, 17, '3 cols as well!'); +NOTICE: BEFORE INSERTION TRIGGER ON TABLE storage_13 HAS EXPIRED. INSERTED ROW: (111,17,"3 cols as well!") +NOTICE: AFTER INSERTION TRIGGER ON TABLE storage_13 HAS EXPIRED. INSERTED ROW: (111,17,"3 cols as well!") +SELECT * FROM test_inserts.storage_13; /* direct access */ + b | c | d +-----+----+----------------- + 111 | 17 | 3 cols as well! +(1 row) + +SELECT * FROM test_inserts.storage WHERE b > 110; /* via parent */ + b | c | d +-----+----+----------------- + 111 | 17 | 3 cols as well! +(1 row) + +/* column 'a' has been dropped */ +INSERT INTO test_inserts.storage VALUES(111, 0, 'DROP_COL_1.') RETURNING *, 17; +NOTICE: BEFORE INSERTION TRIGGER ON TABLE storage_13 HAS EXPIRED. INSERTED ROW: (111,0,DROP_COL_1.) +NOTICE: AFTER INSERTION TRIGGER ON TABLE storage_13 HAS EXPIRED. INSERTED ROW: (111,0,DROP_COL_1.) + b | c | d | ?column? +-----+---+-------------+---------- + 111 | 0 | DROP_COL_1. | 17 +(1 row) + +INSERT INTO test_inserts.storage VALUES(111, 0, 'DROP_COL_1..') RETURNING tableoid::regclass; +NOTICE: BEFORE INSERTION TRIGGER ON TABLE storage_13 HAS EXPIRED. INSERTED ROW: (111,0,DROP_COL_1..) +NOTICE: AFTER INSERTION TRIGGER ON TABLE storage_13 HAS EXPIRED. INSERTED ROW: (111,0,DROP_COL_1..) + tableoid +------------------------- + test_inserts.storage_13 +(1 row) + +INSERT INTO test_inserts.storage VALUES(111, 0, 'DROP_COL_1...') RETURNING b * 2, b; +NOTICE: BEFORE INSERTION TRIGGER ON TABLE storage_13 HAS EXPIRED. INSERTED ROW: (111,0,DROP_COL_1...) +NOTICE: AFTER INSERTION TRIGGER ON TABLE storage_13 HAS EXPIRED. INSERTED ROW: (111,0,DROP_COL_1...) + ?column? | b +----------+----- + 222 | 111 +(1 row) + +/* drop third column */ +ALTER TABLE test_inserts.storage DROP COLUMN c CASCADE; +/* will have 2 columns (b, d) */ +SELECT append_range_partition('test_inserts.storage'); + append_range_partition +------------------------- + test_inserts.storage_14 +(1 row) + +INSERT INTO test_inserts.storage (b, d) VALUES (121, '2 cols!'); +NOTICE: BEFORE INSERTION TRIGGER ON TABLE storage_14 HAS EXPIRED. INSERTED ROW: (121,"2 cols!") +NOTICE: AFTER INSERTION TRIGGER ON TABLE storage_14 HAS EXPIRED. INSERTED ROW: (121,"2 cols!") +SELECT * FROM test_inserts.storage_14; /* direct access */ + b | d +-----+--------- + 121 | 2 cols! +(1 row) + +SELECT * FROM test_inserts.storage WHERE b > 120; /* via parent */ + b | d +-----+--------- + 121 | 2 cols! +(1 row) + +/* column 'c' has been dropped */ +INSERT INTO test_inserts.storage VALUES(121, 'DROP_COL_2.') RETURNING *; +NOTICE: BEFORE INSERTION TRIGGER ON TABLE storage_14 HAS EXPIRED. INSERTED ROW: (121,DROP_COL_2.) +NOTICE: AFTER INSERTION TRIGGER ON TABLE storage_14 HAS EXPIRED. INSERTED ROW: (121,DROP_COL_2.) + b | d +-----+------------- + 121 | DROP_COL_2. +(1 row) + +INSERT INTO test_inserts.storage VALUES(121, 'DROP_COL_2..') RETURNING tableoid::regclass; +NOTICE: BEFORE INSERTION TRIGGER ON TABLE storage_14 HAS EXPIRED. INSERTED ROW: (121,DROP_COL_2..) +NOTICE: AFTER INSERTION TRIGGER ON TABLE storage_14 HAS EXPIRED. INSERTED ROW: (121,DROP_COL_2..) + tableoid +------------------------- + test_inserts.storage_14 +(1 row) + +INSERT INTO test_inserts.storage VALUES(121, 'DROP_COL_2...') RETURNING d || '0_0', b * 3; +NOTICE: BEFORE INSERTION TRIGGER ON TABLE storage_14 HAS EXPIRED. INSERTED ROW: (121,DROP_COL_2...) +NOTICE: AFTER INSERTION TRIGGER ON TABLE storage_14 HAS EXPIRED. INSERTED ROW: (121,DROP_COL_2...) + ?column? | ?column? +------------------+---------- + DROP_COL_2...0_0 | 363 +(1 row) + +INSERT INTO test_inserts.storage VALUES(121, 'query_1') +RETURNING (SELECT 1); +NOTICE: BEFORE INSERTION TRIGGER ON TABLE storage_14 HAS EXPIRED. INSERTED ROW: (121,query_1) +NOTICE: AFTER INSERTION TRIGGER ON TABLE storage_14 HAS EXPIRED. INSERTED ROW: (121,query_1) + ?column? +---------- + 1 +(1 row) + +INSERT INTO test_inserts.storage VALUES(121, 'query_2') +RETURNING (SELECT generate_series(1, 10) LIMIT 1); +NOTICE: BEFORE INSERTION TRIGGER ON TABLE storage_14 HAS EXPIRED. INSERTED ROW: (121,query_2) +NOTICE: AFTER INSERTION TRIGGER ON TABLE storage_14 HAS EXPIRED. INSERTED ROW: (121,query_2) + generate_series +----------------- + 1 +(1 row) + +INSERT INTO test_inserts.storage VALUES(121, 'query_3') +RETURNING (SELECT get_partition_key('test_inserts.storage')); +NOTICE: BEFORE INSERTION TRIGGER ON TABLE storage_14 HAS EXPIRED. INSERTED ROW: (121,query_3) +NOTICE: AFTER INSERTION TRIGGER ON TABLE storage_14 HAS EXPIRED. INSERTED ROW: (121,query_3) + get_partition_key +------------------- + b +(1 row) + +INSERT INTO test_inserts.storage VALUES(121, 'query_4') +RETURNING 1, 2, 3, 4; +NOTICE: BEFORE INSERTION TRIGGER ON TABLE storage_14 HAS EXPIRED. INSERTED ROW: (121,query_4) +NOTICE: AFTER INSERTION TRIGGER ON TABLE storage_14 HAS EXPIRED. INSERTED ROW: (121,query_4) + ?column? | ?column? | ?column? | ?column? +----------+----------+----------+---------- + 1 | 2 | 3 | 4 +(1 row) + +/* show number of columns in each partition */ +SELECT partition, range_min, range_max, count(partition) +FROM pathman_partition_list JOIN pg_attribute ON partition = attrelid +WHERE attnum > 0 +GROUP BY partition, range_min, range_max +ORDER BY range_min::INT4; + partition | range_min | range_max | count +-------------------------+-----------+-----------+------- + test_inserts.storage_11 | -9 | 1 | 4 + test_inserts.storage_1 | 1 | 11 | 4 + test_inserts.storage_2 | 11 | 21 | 4 + test_inserts.storage_3 | 21 | 31 | 4 + test_inserts.storage_4 | 31 | 41 | 4 + test_inserts.storage_5 | 41 | 51 | 4 + test_inserts.storage_6 | 51 | 61 | 4 + test_inserts.storage_7 | 61 | 71 | 4 + test_inserts.storage_8 | 71 | 81 | 4 + test_inserts.storage_9 | 81 | 91 | 4 + test_inserts.storage_10 | 91 | 101 | 4 + test_inserts.storage_12 | 101 | 111 | 3 + test_inserts.storage_13 | 111 | 121 | 3 + test_inserts.storage_14 | 121 | 131 | 2 +(14 rows) + +/* check the data */ +SELECT *, tableoid::regclass FROM test_inserts.storage ORDER BY b, d; + b | d | tableoid +-----+-----------------+------------------------- + 0 | PREPEND. | test_inserts.storage_11 + 0 | PREPEND.. | test_inserts.storage_11 + 0 | PREPEND... | test_inserts.storage_11 + 1 | 1 | test_inserts.storage_1 + 2 | 2 | test_inserts.storage_1 + 3 | 3 | test_inserts.storage_1 + 4 | 4 | test_inserts.storage_1 + 5 | 5 | test_inserts.storage_1 + 6 | 6 | test_inserts.storage_1 + 7 | 7 | test_inserts.storage_1 + 8 | 8 | test_inserts.storage_1 + 9 | 9 | test_inserts.storage_1 + 10 | 10 | test_inserts.storage_1 + 11 | 11 | test_inserts.storage_2 + 12 | 12 | test_inserts.storage_2 + 13 | 13 | test_inserts.storage_2 + 14 | 14 | test_inserts.storage_2 + 15 | 15 | test_inserts.storage_2 + 16 | 16 | test_inserts.storage_2 + 17 | 17 | test_inserts.storage_2 + 18 | 18 | test_inserts.storage_2 + 19 | 19 | test_inserts.storage_2 + 20 | 20 | test_inserts.storage_2 + 21 | 21 | test_inserts.storage_3 + 22 | 22 | test_inserts.storage_3 + 23 | 23 | test_inserts.storage_3 + 24 | 24 | test_inserts.storage_3 + 25 | 25 | test_inserts.storage_3 + 26 | 26 | test_inserts.storage_3 + 27 | 27 | test_inserts.storage_3 + 28 | 28 | test_inserts.storage_3 + 29 | 29 | test_inserts.storage_3 + 30 | 30 | test_inserts.storage_3 + 31 | 31 | test_inserts.storage_4 + 32 | 32 | test_inserts.storage_4 + 33 | 33 | test_inserts.storage_4 + 34 | 34 | test_inserts.storage_4 + 35 | 35 | test_inserts.storage_4 + 36 | 36 | test_inserts.storage_4 + 37 | 37 | test_inserts.storage_4 + 38 | 38 | test_inserts.storage_4 + 39 | 39 | test_inserts.storage_4 + 40 | 40 | test_inserts.storage_4 + 41 | 41 | test_inserts.storage_5 + 42 | 42 | test_inserts.storage_5 + 43 | 43 | test_inserts.storage_5 + 44 | 44 | test_inserts.storage_5 + 45 | 45 | test_inserts.storage_5 + 46 | 46 | test_inserts.storage_5 + 47 | 47 | test_inserts.storage_5 + 48 | 48 | test_inserts.storage_5 + 49 | 49 | test_inserts.storage_5 + 50 | 50 | test_inserts.storage_5 + 51 | 51 | test_inserts.storage_6 + 52 | 52 | test_inserts.storage_6 + 53 | 53 | test_inserts.storage_6 + 54 | 54 | test_inserts.storage_6 + 55 | 55 | test_inserts.storage_6 + 56 | 56 | test_inserts.storage_6 + 57 | 57 | test_inserts.storage_6 + 58 | 58 | test_inserts.storage_6 + 59 | 59 | test_inserts.storage_6 + 60 | 60 | test_inserts.storage_6 + 61 | 61 | test_inserts.storage_7 + 62 | 62 | test_inserts.storage_7 + 63 | 63 | test_inserts.storage_7 + 64 | 64 | test_inserts.storage_7 + 65 | 65 | test_inserts.storage_7 + 66 | 66 | test_inserts.storage_7 + 67 | 67 | test_inserts.storage_7 + 68 | 68 | test_inserts.storage_7 + 69 | 69 | test_inserts.storage_7 + 70 | 70 | test_inserts.storage_7 + 71 | 71 | test_inserts.storage_8 + 72 | 72 | test_inserts.storage_8 + 73 | 73 | test_inserts.storage_8 + 74 | 74 | test_inserts.storage_8 + 75 | 75 | test_inserts.storage_8 + 76 | 76 | test_inserts.storage_8 + 77 | 77 | test_inserts.storage_8 + 78 | 78 | test_inserts.storage_8 + 79 | 79 | test_inserts.storage_8 + 80 | 80 | test_inserts.storage_8 + 81 | 81 | test_inserts.storage_9 + 82 | 82 | test_inserts.storage_9 + 83 | 83 | test_inserts.storage_9 + 84 | 84 | test_inserts.storage_9 + 85 | 85 | test_inserts.storage_9 + 86 | 86 | test_inserts.storage_9 + 87 | 87 | test_inserts.storage_9 + 88 | 88 | test_inserts.storage_9 + 89 | 89 | test_inserts.storage_9 + 90 | 90 | test_inserts.storage_9 + 91 | 91 | test_inserts.storage_10 + 92 | 92 | test_inserts.storage_10 + 93 | 93 | test_inserts.storage_10 + 94 | 94 | test_inserts.storage_10 + 95 | 95 | test_inserts.storage_10 + 96 | 96 | test_inserts.storage_10 + 97 | 97 | test_inserts.storage_10 + 98 | 98 | test_inserts.storage_10 + 99 | 99 | test_inserts.storage_10 + 100 | 100 | test_inserts.storage_10 + 101 | 3 cols! | test_inserts.storage_12 + 111 | 3 cols as well! | test_inserts.storage_13 + 111 | DROP_COL_1. | test_inserts.storage_13 + 111 | DROP_COL_1.. | test_inserts.storage_13 + 111 | DROP_COL_1... | test_inserts.storage_13 + 121 | 2 cols! | test_inserts.storage_14 + 121 | DROP_COL_2. | test_inserts.storage_14 + 121 | DROP_COL_2.. | test_inserts.storage_14 + 121 | DROP_COL_2... | test_inserts.storage_14 + 121 | query_1 | test_inserts.storage_14 + 121 | query_2 | test_inserts.storage_14 + 121 | query_3 | test_inserts.storage_14 + 121 | query_4 | test_inserts.storage_14 +(116 rows) + +/* drop data */ +TRUNCATE test_inserts.storage; +/* one more time! */ +INSERT INTO test_inserts.storage (b, d) SELECT i, i FROM generate_series(-2, 120) i; +NOTICE: BEFORE INSERTION TRIGGER ON TABLE storage_11 HAS EXPIRED. INSERTED ROW: (-2,-2) +NOTICE: BEFORE INSERTION TRIGGER ON TABLE storage_11 HAS EXPIRED. INSERTED ROW: (-1,-1) +NOTICE: BEFORE INSERTION TRIGGER ON TABLE storage_11 HAS EXPIRED. INSERTED ROW: (0,0) +NOTICE: BEFORE INSERTION TRIGGER ON TABLE storage_1 HAS EXPIRED. INSERTED ROW: (1,1) +NOTICE: BEFORE INSERTION TRIGGER ON TABLE storage_1 HAS EXPIRED. INSERTED ROW: (2,2) +NOTICE: BEFORE INSERTION TRIGGER ON TABLE storage_1 HAS EXPIRED. INSERTED ROW: (3,3) +NOTICE: BEFORE INSERTION TRIGGER ON TABLE storage_1 HAS EXPIRED. INSERTED ROW: (4,4) +NOTICE: BEFORE INSERTION TRIGGER ON TABLE storage_1 HAS EXPIRED. INSERTED ROW: (5,5) +NOTICE: BEFORE INSERTION TRIGGER ON TABLE storage_1 HAS EXPIRED. INSERTED ROW: (6,6) +NOTICE: BEFORE INSERTION TRIGGER ON TABLE storage_1 HAS EXPIRED. INSERTED ROW: (7,7) +NOTICE: BEFORE INSERTION TRIGGER ON TABLE storage_1 HAS EXPIRED. INSERTED ROW: (8,8) +NOTICE: BEFORE INSERTION TRIGGER ON TABLE storage_1 HAS EXPIRED. INSERTED ROW: (9,9) +NOTICE: BEFORE INSERTION TRIGGER ON TABLE storage_1 HAS EXPIRED. INSERTED ROW: (10,10) +NOTICE: BEFORE INSERTION TRIGGER ON TABLE storage_12 HAS EXPIRED. INSERTED ROW: (101,101) +NOTICE: BEFORE INSERTION TRIGGER ON TABLE storage_12 HAS EXPIRED. INSERTED ROW: (102,102) +NOTICE: BEFORE INSERTION TRIGGER ON TABLE storage_12 HAS EXPIRED. INSERTED ROW: (103,103) +NOTICE: BEFORE INSERTION TRIGGER ON TABLE storage_12 HAS EXPIRED. INSERTED ROW: (104,104) +NOTICE: BEFORE INSERTION TRIGGER ON TABLE storage_12 HAS EXPIRED. INSERTED ROW: (105,105) +NOTICE: BEFORE INSERTION TRIGGER ON TABLE storage_12 HAS EXPIRED. INSERTED ROW: (106,106) +NOTICE: BEFORE INSERTION TRIGGER ON TABLE storage_12 HAS EXPIRED. INSERTED ROW: (107,107) +NOTICE: BEFORE INSERTION TRIGGER ON TABLE storage_12 HAS EXPIRED. INSERTED ROW: (108,108) +NOTICE: BEFORE INSERTION TRIGGER ON TABLE storage_12 HAS EXPIRED. INSERTED ROW: (109,109) +NOTICE: BEFORE INSERTION TRIGGER ON TABLE storage_12 HAS EXPIRED. INSERTED ROW: (110,110) +NOTICE: BEFORE INSERTION TRIGGER ON TABLE storage_13 HAS EXPIRED. INSERTED ROW: (111,111) +NOTICE: BEFORE INSERTION TRIGGER ON TABLE storage_13 HAS EXPIRED. INSERTED ROW: (112,112) +NOTICE: BEFORE INSERTION TRIGGER ON TABLE storage_13 HAS EXPIRED. INSERTED ROW: (113,113) +NOTICE: BEFORE INSERTION TRIGGER ON TABLE storage_13 HAS EXPIRED. INSERTED ROW: (114,114) +NOTICE: BEFORE INSERTION TRIGGER ON TABLE storage_13 HAS EXPIRED. INSERTED ROW: (115,115) +NOTICE: BEFORE INSERTION TRIGGER ON TABLE storage_13 HAS EXPIRED. INSERTED ROW: (116,116) +NOTICE: BEFORE INSERTION TRIGGER ON TABLE storage_13 HAS EXPIRED. INSERTED ROW: (117,117) +NOTICE: BEFORE INSERTION TRIGGER ON TABLE storage_13 HAS EXPIRED. INSERTED ROW: (118,118) +NOTICE: BEFORE INSERTION TRIGGER ON TABLE storage_13 HAS EXPIRED. INSERTED ROW: (119,119) +NOTICE: BEFORE INSERTION TRIGGER ON TABLE storage_13 HAS EXPIRED. INSERTED ROW: (120,120) +NOTICE: AFTER INSERTION TRIGGER ON TABLE storage_11 HAS EXPIRED. INSERTED ROW: (-2,-2) +NOTICE: AFTER INSERTION TRIGGER ON TABLE storage_11 HAS EXPIRED. INSERTED ROW: (-1,-1) +NOTICE: AFTER INSERTION TRIGGER ON TABLE storage_11 HAS EXPIRED. INSERTED ROW: (0,0) +NOTICE: AFTER INSERTION TRIGGER ON TABLE storage_1 HAS EXPIRED. INSERTED ROW: (1,1) +NOTICE: AFTER INSERTION TRIGGER ON TABLE storage_1 HAS EXPIRED. INSERTED ROW: (2,2) +NOTICE: AFTER INSERTION TRIGGER ON TABLE storage_1 HAS EXPIRED. INSERTED ROW: (3,3) +NOTICE: AFTER INSERTION TRIGGER ON TABLE storage_1 HAS EXPIRED. INSERTED ROW: (4,4) +NOTICE: AFTER INSERTION TRIGGER ON TABLE storage_1 HAS EXPIRED. INSERTED ROW: (5,5) +NOTICE: AFTER INSERTION TRIGGER ON TABLE storage_1 HAS EXPIRED. INSERTED ROW: (6,6) +NOTICE: AFTER INSERTION TRIGGER ON TABLE storage_1 HAS EXPIRED. INSERTED ROW: (7,7) +NOTICE: AFTER INSERTION TRIGGER ON TABLE storage_1 HAS EXPIRED. INSERTED ROW: (8,8) +NOTICE: AFTER INSERTION TRIGGER ON TABLE storage_1 HAS EXPIRED. INSERTED ROW: (9,9) +NOTICE: AFTER INSERTION TRIGGER ON TABLE storage_1 HAS EXPIRED. INSERTED ROW: (10,10) +NOTICE: AFTER INSERTION TRIGGER ON TABLE storage_12 HAS EXPIRED. INSERTED ROW: (101,101) +NOTICE: AFTER INSERTION TRIGGER ON TABLE storage_12 HAS EXPIRED. INSERTED ROW: (102,102) +NOTICE: AFTER INSERTION TRIGGER ON TABLE storage_12 HAS EXPIRED. INSERTED ROW: (103,103) +NOTICE: AFTER INSERTION TRIGGER ON TABLE storage_12 HAS EXPIRED. INSERTED ROW: (104,104) +NOTICE: AFTER INSERTION TRIGGER ON TABLE storage_12 HAS EXPIRED. INSERTED ROW: (105,105) +NOTICE: AFTER INSERTION TRIGGER ON TABLE storage_12 HAS EXPIRED. INSERTED ROW: (106,106) +NOTICE: AFTER INSERTION TRIGGER ON TABLE storage_12 HAS EXPIRED. INSERTED ROW: (107,107) +NOTICE: AFTER INSERTION TRIGGER ON TABLE storage_12 HAS EXPIRED. INSERTED ROW: (108,108) +NOTICE: AFTER INSERTION TRIGGER ON TABLE storage_12 HAS EXPIRED. INSERTED ROW: (109,109) +NOTICE: AFTER INSERTION TRIGGER ON TABLE storage_12 HAS EXPIRED. INSERTED ROW: (110,110) +NOTICE: AFTER INSERTION TRIGGER ON TABLE storage_13 HAS EXPIRED. INSERTED ROW: (111,111) +NOTICE: AFTER INSERTION TRIGGER ON TABLE storage_13 HAS EXPIRED. INSERTED ROW: (112,112) +NOTICE: AFTER INSERTION TRIGGER ON TABLE storage_13 HAS EXPIRED. INSERTED ROW: (113,113) +NOTICE: AFTER INSERTION TRIGGER ON TABLE storage_13 HAS EXPIRED. INSERTED ROW: (114,114) +NOTICE: AFTER INSERTION TRIGGER ON TABLE storage_13 HAS EXPIRED. INSERTED ROW: (115,115) +NOTICE: AFTER INSERTION TRIGGER ON TABLE storage_13 HAS EXPIRED. INSERTED ROW: (116,116) +NOTICE: AFTER INSERTION TRIGGER ON TABLE storage_13 HAS EXPIRED. INSERTED ROW: (117,117) +NOTICE: AFTER INSERTION TRIGGER ON TABLE storage_13 HAS EXPIRED. INSERTED ROW: (118,118) +NOTICE: AFTER INSERTION TRIGGER ON TABLE storage_13 HAS EXPIRED. INSERTED ROW: (119,119) +NOTICE: AFTER INSERTION TRIGGER ON TABLE storage_13 HAS EXPIRED. INSERTED ROW: (120,120) +SELECT *, tableoid::regclass FROM test_inserts.storage ORDER BY b, d; + b | d | tableoid +-----+-----+------------------------- + -2 | -2 | test_inserts.storage_11 + -1 | -1 | test_inserts.storage_11 + 0 | 0 | test_inserts.storage_11 + 1 | 1 | test_inserts.storage_1 + 2 | 2 | test_inserts.storage_1 + 3 | 3 | test_inserts.storage_1 + 4 | 4 | test_inserts.storage_1 + 5 | 5 | test_inserts.storage_1 + 6 | 6 | test_inserts.storage_1 + 7 | 7 | test_inserts.storage_1 + 8 | 8 | test_inserts.storage_1 + 9 | 9 | test_inserts.storage_1 + 10 | 10 | test_inserts.storage_1 + 11 | 11 | test_inserts.storage_2 + 12 | 12 | test_inserts.storage_2 + 13 | 13 | test_inserts.storage_2 + 14 | 14 | test_inserts.storage_2 + 15 | 15 | test_inserts.storage_2 + 16 | 16 | test_inserts.storage_2 + 17 | 17 | test_inserts.storage_2 + 18 | 18 | test_inserts.storage_2 + 19 | 19 | test_inserts.storage_2 + 20 | 20 | test_inserts.storage_2 + 21 | 21 | test_inserts.storage_3 + 22 | 22 | test_inserts.storage_3 + 23 | 23 | test_inserts.storage_3 + 24 | 24 | test_inserts.storage_3 + 25 | 25 | test_inserts.storage_3 + 26 | 26 | test_inserts.storage_3 + 27 | 27 | test_inserts.storage_3 + 28 | 28 | test_inserts.storage_3 + 29 | 29 | test_inserts.storage_3 + 30 | 30 | test_inserts.storage_3 + 31 | 31 | test_inserts.storage_4 + 32 | 32 | test_inserts.storage_4 + 33 | 33 | test_inserts.storage_4 + 34 | 34 | test_inserts.storage_4 + 35 | 35 | test_inserts.storage_4 + 36 | 36 | test_inserts.storage_4 + 37 | 37 | test_inserts.storage_4 + 38 | 38 | test_inserts.storage_4 + 39 | 39 | test_inserts.storage_4 + 40 | 40 | test_inserts.storage_4 + 41 | 41 | test_inserts.storage_5 + 42 | 42 | test_inserts.storage_5 + 43 | 43 | test_inserts.storage_5 + 44 | 44 | test_inserts.storage_5 + 45 | 45 | test_inserts.storage_5 + 46 | 46 | test_inserts.storage_5 + 47 | 47 | test_inserts.storage_5 + 48 | 48 | test_inserts.storage_5 + 49 | 49 | test_inserts.storage_5 + 50 | 50 | test_inserts.storage_5 + 51 | 51 | test_inserts.storage_6 + 52 | 52 | test_inserts.storage_6 + 53 | 53 | test_inserts.storage_6 + 54 | 54 | test_inserts.storage_6 + 55 | 55 | test_inserts.storage_6 + 56 | 56 | test_inserts.storage_6 + 57 | 57 | test_inserts.storage_6 + 58 | 58 | test_inserts.storage_6 + 59 | 59 | test_inserts.storage_6 + 60 | 60 | test_inserts.storage_6 + 61 | 61 | test_inserts.storage_7 + 62 | 62 | test_inserts.storage_7 + 63 | 63 | test_inserts.storage_7 + 64 | 64 | test_inserts.storage_7 + 65 | 65 | test_inserts.storage_7 + 66 | 66 | test_inserts.storage_7 + 67 | 67 | test_inserts.storage_7 + 68 | 68 | test_inserts.storage_7 + 69 | 69 | test_inserts.storage_7 + 70 | 70 | test_inserts.storage_7 + 71 | 71 | test_inserts.storage_8 + 72 | 72 | test_inserts.storage_8 + 73 | 73 | test_inserts.storage_8 + 74 | 74 | test_inserts.storage_8 + 75 | 75 | test_inserts.storage_8 + 76 | 76 | test_inserts.storage_8 + 77 | 77 | test_inserts.storage_8 + 78 | 78 | test_inserts.storage_8 + 79 | 79 | test_inserts.storage_8 + 80 | 80 | test_inserts.storage_8 + 81 | 81 | test_inserts.storage_9 + 82 | 82 | test_inserts.storage_9 + 83 | 83 | test_inserts.storage_9 + 84 | 84 | test_inserts.storage_9 + 85 | 85 | test_inserts.storage_9 + 86 | 86 | test_inserts.storage_9 + 87 | 87 | test_inserts.storage_9 + 88 | 88 | test_inserts.storage_9 + 89 | 89 | test_inserts.storage_9 + 90 | 90 | test_inserts.storage_9 + 91 | 91 | test_inserts.storage_10 + 92 | 92 | test_inserts.storage_10 + 93 | 93 | test_inserts.storage_10 + 94 | 94 | test_inserts.storage_10 + 95 | 95 | test_inserts.storage_10 + 96 | 96 | test_inserts.storage_10 + 97 | 97 | test_inserts.storage_10 + 98 | 98 | test_inserts.storage_10 + 99 | 99 | test_inserts.storage_10 + 100 | 100 | test_inserts.storage_10 + 101 | 101 | test_inserts.storage_12 + 102 | 102 | test_inserts.storage_12 + 103 | 103 | test_inserts.storage_12 + 104 | 104 | test_inserts.storage_12 + 105 | 105 | test_inserts.storage_12 + 106 | 106 | test_inserts.storage_12 + 107 | 107 | test_inserts.storage_12 + 108 | 108 | test_inserts.storage_12 + 109 | 109 | test_inserts.storage_12 + 110 | 110 | test_inserts.storage_12 + 111 | 111 | test_inserts.storage_13 + 112 | 112 | test_inserts.storage_13 + 113 | 113 | test_inserts.storage_13 + 114 | 114 | test_inserts.storage_13 + 115 | 115 | test_inserts.storage_13 + 116 | 116 | test_inserts.storage_13 + 117 | 117 | test_inserts.storage_13 + 118 | 118 | test_inserts.storage_13 + 119 | 119 | test_inserts.storage_13 + 120 | 120 | test_inserts.storage_13 +(123 rows) + +/* drop data */ +TRUNCATE test_inserts.storage; +/* add new column */ +ALTER TABLE test_inserts.storage ADD COLUMN e INT8 NOT NULL; +/* one more time! x2 */ +INSERT INTO test_inserts.storage (b, d, e) SELECT i, i, i FROM generate_series(-2, 120) i; +NOTICE: BEFORE INSERTION TRIGGER ON TABLE storage_11 HAS EXPIRED. INSERTED ROW: (-2,-2,-2) +NOTICE: BEFORE INSERTION TRIGGER ON TABLE storage_11 HAS EXPIRED. INSERTED ROW: (-1,-1,-1) +NOTICE: BEFORE INSERTION TRIGGER ON TABLE storage_11 HAS EXPIRED. INSERTED ROW: (0,0,0) +NOTICE: BEFORE INSERTION TRIGGER ON TABLE storage_1 HAS EXPIRED. INSERTED ROW: (1,1,1) +NOTICE: BEFORE INSERTION TRIGGER ON TABLE storage_1 HAS EXPIRED. INSERTED ROW: (2,2,2) +NOTICE: BEFORE INSERTION TRIGGER ON TABLE storage_1 HAS EXPIRED. INSERTED ROW: (3,3,3) +NOTICE: BEFORE INSERTION TRIGGER ON TABLE storage_1 HAS EXPIRED. INSERTED ROW: (4,4,4) +NOTICE: BEFORE INSERTION TRIGGER ON TABLE storage_1 HAS EXPIRED. INSERTED ROW: (5,5,5) +NOTICE: BEFORE INSERTION TRIGGER ON TABLE storage_1 HAS EXPIRED. INSERTED ROW: (6,6,6) +NOTICE: BEFORE INSERTION TRIGGER ON TABLE storage_1 HAS EXPIRED. INSERTED ROW: (7,7,7) +NOTICE: BEFORE INSERTION TRIGGER ON TABLE storage_1 HAS EXPIRED. INSERTED ROW: (8,8,8) +NOTICE: BEFORE INSERTION TRIGGER ON TABLE storage_1 HAS EXPIRED. INSERTED ROW: (9,9,9) +NOTICE: BEFORE INSERTION TRIGGER ON TABLE storage_1 HAS EXPIRED. INSERTED ROW: (10,10,10) +NOTICE: BEFORE INSERTION TRIGGER ON TABLE storage_12 HAS EXPIRED. INSERTED ROW: (101,101,101) +NOTICE: BEFORE INSERTION TRIGGER ON TABLE storage_12 HAS EXPIRED. INSERTED ROW: (102,102,102) +NOTICE: BEFORE INSERTION TRIGGER ON TABLE storage_12 HAS EXPIRED. INSERTED ROW: (103,103,103) +NOTICE: BEFORE INSERTION TRIGGER ON TABLE storage_12 HAS EXPIRED. INSERTED ROW: (104,104,104) +NOTICE: BEFORE INSERTION TRIGGER ON TABLE storage_12 HAS EXPIRED. INSERTED ROW: (105,105,105) +NOTICE: BEFORE INSERTION TRIGGER ON TABLE storage_12 HAS EXPIRED. INSERTED ROW: (106,106,106) +NOTICE: BEFORE INSERTION TRIGGER ON TABLE storage_12 HAS EXPIRED. INSERTED ROW: (107,107,107) +NOTICE: BEFORE INSERTION TRIGGER ON TABLE storage_12 HAS EXPIRED. INSERTED ROW: (108,108,108) +NOTICE: BEFORE INSERTION TRIGGER ON TABLE storage_12 HAS EXPIRED. INSERTED ROW: (109,109,109) +NOTICE: BEFORE INSERTION TRIGGER ON TABLE storage_12 HAS EXPIRED. INSERTED ROW: (110,110,110) +NOTICE: BEFORE INSERTION TRIGGER ON TABLE storage_13 HAS EXPIRED. INSERTED ROW: (111,111,111) +NOTICE: BEFORE INSERTION TRIGGER ON TABLE storage_13 HAS EXPIRED. INSERTED ROW: (112,112,112) +NOTICE: BEFORE INSERTION TRIGGER ON TABLE storage_13 HAS EXPIRED. INSERTED ROW: (113,113,113) +NOTICE: BEFORE INSERTION TRIGGER ON TABLE storage_13 HAS EXPIRED. INSERTED ROW: (114,114,114) +NOTICE: BEFORE INSERTION TRIGGER ON TABLE storage_13 HAS EXPIRED. INSERTED ROW: (115,115,115) +NOTICE: BEFORE INSERTION TRIGGER ON TABLE storage_13 HAS EXPIRED. INSERTED ROW: (116,116,116) +NOTICE: BEFORE INSERTION TRIGGER ON TABLE storage_13 HAS EXPIRED. INSERTED ROW: (117,117,117) +NOTICE: BEFORE INSERTION TRIGGER ON TABLE storage_13 HAS EXPIRED. INSERTED ROW: (118,118,118) +NOTICE: BEFORE INSERTION TRIGGER ON TABLE storage_13 HAS EXPIRED. INSERTED ROW: (119,119,119) +NOTICE: BEFORE INSERTION TRIGGER ON TABLE storage_13 HAS EXPIRED. INSERTED ROW: (120,120,120) +NOTICE: AFTER INSERTION TRIGGER ON TABLE storage_11 HAS EXPIRED. INSERTED ROW: (-2,-2,-2) +NOTICE: AFTER INSERTION TRIGGER ON TABLE storage_11 HAS EXPIRED. INSERTED ROW: (-1,-1,-1) +NOTICE: AFTER INSERTION TRIGGER ON TABLE storage_11 HAS EXPIRED. INSERTED ROW: (0,0,0) +NOTICE: AFTER INSERTION TRIGGER ON TABLE storage_1 HAS EXPIRED. INSERTED ROW: (1,1,1) +NOTICE: AFTER INSERTION TRIGGER ON TABLE storage_1 HAS EXPIRED. INSERTED ROW: (2,2,2) +NOTICE: AFTER INSERTION TRIGGER ON TABLE storage_1 HAS EXPIRED. INSERTED ROW: (3,3,3) +NOTICE: AFTER INSERTION TRIGGER ON TABLE storage_1 HAS EXPIRED. INSERTED ROW: (4,4,4) +NOTICE: AFTER INSERTION TRIGGER ON TABLE storage_1 HAS EXPIRED. INSERTED ROW: (5,5,5) +NOTICE: AFTER INSERTION TRIGGER ON TABLE storage_1 HAS EXPIRED. INSERTED ROW: (6,6,6) +NOTICE: AFTER INSERTION TRIGGER ON TABLE storage_1 HAS EXPIRED. INSERTED ROW: (7,7,7) +NOTICE: AFTER INSERTION TRIGGER ON TABLE storage_1 HAS EXPIRED. INSERTED ROW: (8,8,8) +NOTICE: AFTER INSERTION TRIGGER ON TABLE storage_1 HAS EXPIRED. INSERTED ROW: (9,9,9) +NOTICE: AFTER INSERTION TRIGGER ON TABLE storage_1 HAS EXPIRED. INSERTED ROW: (10,10,10) +NOTICE: AFTER INSERTION TRIGGER ON TABLE storage_12 HAS EXPIRED. INSERTED ROW: (101,101,101) +NOTICE: AFTER INSERTION TRIGGER ON TABLE storage_12 HAS EXPIRED. INSERTED ROW: (102,102,102) +NOTICE: AFTER INSERTION TRIGGER ON TABLE storage_12 HAS EXPIRED. INSERTED ROW: (103,103,103) +NOTICE: AFTER INSERTION TRIGGER ON TABLE storage_12 HAS EXPIRED. INSERTED ROW: (104,104,104) +NOTICE: AFTER INSERTION TRIGGER ON TABLE storage_12 HAS EXPIRED. INSERTED ROW: (105,105,105) +NOTICE: AFTER INSERTION TRIGGER ON TABLE storage_12 HAS EXPIRED. INSERTED ROW: (106,106,106) +NOTICE: AFTER INSERTION TRIGGER ON TABLE storage_12 HAS EXPIRED. INSERTED ROW: (107,107,107) +NOTICE: AFTER INSERTION TRIGGER ON TABLE storage_12 HAS EXPIRED. INSERTED ROW: (108,108,108) +NOTICE: AFTER INSERTION TRIGGER ON TABLE storage_12 HAS EXPIRED. INSERTED ROW: (109,109,109) +NOTICE: AFTER INSERTION TRIGGER ON TABLE storage_12 HAS EXPIRED. INSERTED ROW: (110,110,110) +NOTICE: AFTER INSERTION TRIGGER ON TABLE storage_13 HAS EXPIRED. INSERTED ROW: (111,111,111) +NOTICE: AFTER INSERTION TRIGGER ON TABLE storage_13 HAS EXPIRED. INSERTED ROW: (112,112,112) +NOTICE: AFTER INSERTION TRIGGER ON TABLE storage_13 HAS EXPIRED. INSERTED ROW: (113,113,113) +NOTICE: AFTER INSERTION TRIGGER ON TABLE storage_13 HAS EXPIRED. INSERTED ROW: (114,114,114) +NOTICE: AFTER INSERTION TRIGGER ON TABLE storage_13 HAS EXPIRED. INSERTED ROW: (115,115,115) +NOTICE: AFTER INSERTION TRIGGER ON TABLE storage_13 HAS EXPIRED. INSERTED ROW: (116,116,116) +NOTICE: AFTER INSERTION TRIGGER ON TABLE storage_13 HAS EXPIRED. INSERTED ROW: (117,117,117) +NOTICE: AFTER INSERTION TRIGGER ON TABLE storage_13 HAS EXPIRED. INSERTED ROW: (118,118,118) +NOTICE: AFTER INSERTION TRIGGER ON TABLE storage_13 HAS EXPIRED. INSERTED ROW: (119,119,119) +NOTICE: AFTER INSERTION TRIGGER ON TABLE storage_13 HAS EXPIRED. INSERTED ROW: (120,120,120) +SELECT *, tableoid::regclass FROM test_inserts.storage ORDER BY b, d; + b | d | e | tableoid +-----+-----+-----+------------------------- + -2 | -2 | -2 | test_inserts.storage_11 + -1 | -1 | -1 | test_inserts.storage_11 + 0 | 0 | 0 | test_inserts.storage_11 + 1 | 1 | 1 | test_inserts.storage_1 + 2 | 2 | 2 | test_inserts.storage_1 + 3 | 3 | 3 | test_inserts.storage_1 + 4 | 4 | 4 | test_inserts.storage_1 + 5 | 5 | 5 | test_inserts.storage_1 + 6 | 6 | 6 | test_inserts.storage_1 + 7 | 7 | 7 | test_inserts.storage_1 + 8 | 8 | 8 | test_inserts.storage_1 + 9 | 9 | 9 | test_inserts.storage_1 + 10 | 10 | 10 | test_inserts.storage_1 + 11 | 11 | 11 | test_inserts.storage_2 + 12 | 12 | 12 | test_inserts.storage_2 + 13 | 13 | 13 | test_inserts.storage_2 + 14 | 14 | 14 | test_inserts.storage_2 + 15 | 15 | 15 | test_inserts.storage_2 + 16 | 16 | 16 | test_inserts.storage_2 + 17 | 17 | 17 | test_inserts.storage_2 + 18 | 18 | 18 | test_inserts.storage_2 + 19 | 19 | 19 | test_inserts.storage_2 + 20 | 20 | 20 | test_inserts.storage_2 + 21 | 21 | 21 | test_inserts.storage_3 + 22 | 22 | 22 | test_inserts.storage_3 + 23 | 23 | 23 | test_inserts.storage_3 + 24 | 24 | 24 | test_inserts.storage_3 + 25 | 25 | 25 | test_inserts.storage_3 + 26 | 26 | 26 | test_inserts.storage_3 + 27 | 27 | 27 | test_inserts.storage_3 + 28 | 28 | 28 | test_inserts.storage_3 + 29 | 29 | 29 | test_inserts.storage_3 + 30 | 30 | 30 | test_inserts.storage_3 + 31 | 31 | 31 | test_inserts.storage_4 + 32 | 32 | 32 | test_inserts.storage_4 + 33 | 33 | 33 | test_inserts.storage_4 + 34 | 34 | 34 | test_inserts.storage_4 + 35 | 35 | 35 | test_inserts.storage_4 + 36 | 36 | 36 | test_inserts.storage_4 + 37 | 37 | 37 | test_inserts.storage_4 + 38 | 38 | 38 | test_inserts.storage_4 + 39 | 39 | 39 | test_inserts.storage_4 + 40 | 40 | 40 | test_inserts.storage_4 + 41 | 41 | 41 | test_inserts.storage_5 + 42 | 42 | 42 | test_inserts.storage_5 + 43 | 43 | 43 | test_inserts.storage_5 + 44 | 44 | 44 | test_inserts.storage_5 + 45 | 45 | 45 | test_inserts.storage_5 + 46 | 46 | 46 | test_inserts.storage_5 + 47 | 47 | 47 | test_inserts.storage_5 + 48 | 48 | 48 | test_inserts.storage_5 + 49 | 49 | 49 | test_inserts.storage_5 + 50 | 50 | 50 | test_inserts.storage_5 + 51 | 51 | 51 | test_inserts.storage_6 + 52 | 52 | 52 | test_inserts.storage_6 + 53 | 53 | 53 | test_inserts.storage_6 + 54 | 54 | 54 | test_inserts.storage_6 + 55 | 55 | 55 | test_inserts.storage_6 + 56 | 56 | 56 | test_inserts.storage_6 + 57 | 57 | 57 | test_inserts.storage_6 + 58 | 58 | 58 | test_inserts.storage_6 + 59 | 59 | 59 | test_inserts.storage_6 + 60 | 60 | 60 | test_inserts.storage_6 + 61 | 61 | 61 | test_inserts.storage_7 + 62 | 62 | 62 | test_inserts.storage_7 + 63 | 63 | 63 | test_inserts.storage_7 + 64 | 64 | 64 | test_inserts.storage_7 + 65 | 65 | 65 | test_inserts.storage_7 + 66 | 66 | 66 | test_inserts.storage_7 + 67 | 67 | 67 | test_inserts.storage_7 + 68 | 68 | 68 | test_inserts.storage_7 + 69 | 69 | 69 | test_inserts.storage_7 + 70 | 70 | 70 | test_inserts.storage_7 + 71 | 71 | 71 | test_inserts.storage_8 + 72 | 72 | 72 | test_inserts.storage_8 + 73 | 73 | 73 | test_inserts.storage_8 + 74 | 74 | 74 | test_inserts.storage_8 + 75 | 75 | 75 | test_inserts.storage_8 + 76 | 76 | 76 | test_inserts.storage_8 + 77 | 77 | 77 | test_inserts.storage_8 + 78 | 78 | 78 | test_inserts.storage_8 + 79 | 79 | 79 | test_inserts.storage_8 + 80 | 80 | 80 | test_inserts.storage_8 + 81 | 81 | 81 | test_inserts.storage_9 + 82 | 82 | 82 | test_inserts.storage_9 + 83 | 83 | 83 | test_inserts.storage_9 + 84 | 84 | 84 | test_inserts.storage_9 + 85 | 85 | 85 | test_inserts.storage_9 + 86 | 86 | 86 | test_inserts.storage_9 + 87 | 87 | 87 | test_inserts.storage_9 + 88 | 88 | 88 | test_inserts.storage_9 + 89 | 89 | 89 | test_inserts.storage_9 + 90 | 90 | 90 | test_inserts.storage_9 + 91 | 91 | 91 | test_inserts.storage_10 + 92 | 92 | 92 | test_inserts.storage_10 + 93 | 93 | 93 | test_inserts.storage_10 + 94 | 94 | 94 | test_inserts.storage_10 + 95 | 95 | 95 | test_inserts.storage_10 + 96 | 96 | 96 | test_inserts.storage_10 + 97 | 97 | 97 | test_inserts.storage_10 + 98 | 98 | 98 | test_inserts.storage_10 + 99 | 99 | 99 | test_inserts.storage_10 + 100 | 100 | 100 | test_inserts.storage_10 + 101 | 101 | 101 | test_inserts.storage_12 + 102 | 102 | 102 | test_inserts.storage_12 + 103 | 103 | 103 | test_inserts.storage_12 + 104 | 104 | 104 | test_inserts.storage_12 + 105 | 105 | 105 | test_inserts.storage_12 + 106 | 106 | 106 | test_inserts.storage_12 + 107 | 107 | 107 | test_inserts.storage_12 + 108 | 108 | 108 | test_inserts.storage_12 + 109 | 109 | 109 | test_inserts.storage_12 + 110 | 110 | 110 | test_inserts.storage_12 + 111 | 111 | 111 | test_inserts.storage_13 + 112 | 112 | 112 | test_inserts.storage_13 + 113 | 113 | 113 | test_inserts.storage_13 + 114 | 114 | 114 | test_inserts.storage_13 + 115 | 115 | 115 | test_inserts.storage_13 + 116 | 116 | 116 | test_inserts.storage_13 + 117 | 117 | 117 | test_inserts.storage_13 + 118 | 118 | 118 | test_inserts.storage_13 + 119 | 119 | 119 | test_inserts.storage_13 + 120 | 120 | 120 | test_inserts.storage_13 +(123 rows) + +/* drop data */ +TRUNCATE test_inserts.storage; +/* now test RETURNING list using our new column 'e' */ +INSERT INTO test_inserts.storage (b, d, e) SELECT i, i, i +FROM generate_series(-2, 130, 5) i +RETURNING e * 2, b, tableoid::regclass; +NOTICE: BEFORE INSERTION TRIGGER ON TABLE storage_11 HAS EXPIRED. INSERTED ROW: (-2,-2,-2) +NOTICE: BEFORE INSERTION TRIGGER ON TABLE storage_1 HAS EXPIRED. INSERTED ROW: (3,3,3) +NOTICE: BEFORE INSERTION TRIGGER ON TABLE storage_1 HAS EXPIRED. INSERTED ROW: (8,8,8) +NOTICE: BEFORE INSERTION TRIGGER ON TABLE storage_12 HAS EXPIRED. INSERTED ROW: (103,103,103) +NOTICE: BEFORE INSERTION TRIGGER ON TABLE storage_12 HAS EXPIRED. INSERTED ROW: (108,108,108) +NOTICE: BEFORE INSERTION TRIGGER ON TABLE storage_13 HAS EXPIRED. INSERTED ROW: (113,113,113) +NOTICE: BEFORE INSERTION TRIGGER ON TABLE storage_13 HAS EXPIRED. INSERTED ROW: (118,118,118) +NOTICE: BEFORE INSERTION TRIGGER ON TABLE storage_14 HAS EXPIRED. INSERTED ROW: (123,123,123) +NOTICE: BEFORE INSERTION TRIGGER ON TABLE storage_14 HAS EXPIRED. INSERTED ROW: (128,128,128) +NOTICE: AFTER INSERTION TRIGGER ON TABLE storage_11 HAS EXPIRED. INSERTED ROW: (-2,-2,-2) +NOTICE: AFTER INSERTION TRIGGER ON TABLE storage_1 HAS EXPIRED. INSERTED ROW: (3,3,3) +NOTICE: AFTER INSERTION TRIGGER ON TABLE storage_1 HAS EXPIRED. INSERTED ROW: (8,8,8) +NOTICE: AFTER INSERTION TRIGGER ON TABLE storage_12 HAS EXPIRED. INSERTED ROW: (103,103,103) +NOTICE: AFTER INSERTION TRIGGER ON TABLE storage_12 HAS EXPIRED. INSERTED ROW: (108,108,108) +NOTICE: AFTER INSERTION TRIGGER ON TABLE storage_13 HAS EXPIRED. INSERTED ROW: (113,113,113) +NOTICE: AFTER INSERTION TRIGGER ON TABLE storage_13 HAS EXPIRED. INSERTED ROW: (118,118,118) +NOTICE: AFTER INSERTION TRIGGER ON TABLE storage_14 HAS EXPIRED. INSERTED ROW: (123,123,123) +NOTICE: AFTER INSERTION TRIGGER ON TABLE storage_14 HAS EXPIRED. INSERTED ROW: (128,128,128) + ?column? | b | tableoid +----------+-----+------------------------- + -4 | -2 | test_inserts.storage_11 + 6 | 3 | test_inserts.storage_1 + 16 | 8 | test_inserts.storage_1 + 26 | 13 | test_inserts.storage_2 + 36 | 18 | test_inserts.storage_2 + 46 | 23 | test_inserts.storage_3 + 56 | 28 | test_inserts.storage_3 + 66 | 33 | test_inserts.storage_4 + 76 | 38 | test_inserts.storage_4 + 86 | 43 | test_inserts.storage_5 + 96 | 48 | test_inserts.storage_5 + 106 | 53 | test_inserts.storage_6 + 116 | 58 | test_inserts.storage_6 + 126 | 63 | test_inserts.storage_7 + 136 | 68 | test_inserts.storage_7 + 146 | 73 | test_inserts.storage_8 + 156 | 78 | test_inserts.storage_8 + 166 | 83 | test_inserts.storage_9 + 176 | 88 | test_inserts.storage_9 + 186 | 93 | test_inserts.storage_10 + 196 | 98 | test_inserts.storage_10 + 206 | 103 | test_inserts.storage_12 + 216 | 108 | test_inserts.storage_12 + 226 | 113 | test_inserts.storage_13 + 236 | 118 | test_inserts.storage_13 + 246 | 123 | test_inserts.storage_14 + 256 | 128 | test_inserts.storage_14 +(27 rows) + +/* test EXPLAIN (VERBOSE) - for PartitionFilter's targetlists */ +EXPLAIN (VERBOSE, COSTS OFF) +INSERT INTO test_inserts.storage (b, d, e) SELECT i, i, i +FROM generate_series(1, 10) i +RETURNING e * 2, b, tableoid::regclass; + QUERY PLAN +------------------------------------------------------------------------------- + Insert on test_inserts.storage + Output: (storage.e * 2), storage.b, (storage.tableoid)::regclass + -> Custom Scan (PartitionFilter) + Output: NULL::integer, storage.b, NULL::integer, storage.d, storage.e + -> Function Scan on pg_catalog.generate_series i + Output: NULL::integer, i.i, NULL::integer, i.i, i.i + Function Call: generate_series(1, 10) +(7 rows) + +EXPLAIN (VERBOSE, COSTS OFF) +INSERT INTO test_inserts.storage (d, e) SELECT i, i +FROM generate_series(1, 10) i; + QUERY PLAN +----------------------------------------------------------------------------------- + Insert on test_inserts.storage + -> Custom Scan (PartitionFilter) + Output: NULL::integer, NULL::integer, NULL::integer, storage.d, storage.e + -> Function Scan on pg_catalog.generate_series i + Output: NULL::integer, NULL::integer, NULL::integer, i.i, i.i + Function Call: generate_series(1, 10) +(6 rows) + +EXPLAIN (VERBOSE, COSTS OFF) +INSERT INTO test_inserts.storage (b) SELECT i +FROM generate_series(1, 10) i; + QUERY PLAN +----------------------------------------------------------------------------------- + Insert on test_inserts.storage + -> Custom Scan (PartitionFilter) + Output: NULL::integer, storage.b, NULL::integer, NULL::text, NULL::bigint + -> Function Scan on pg_catalog.generate_series i + Output: NULL::integer, i.i, NULL::integer, NULL::text, NULL::bigint + Function Call: generate_series(1, 10) +(6 rows) + +EXPLAIN (VERBOSE, COSTS OFF) +INSERT INTO test_inserts.storage (b, d, e) SELECT b, d, e +FROM test_inserts.storage; + QUERY PLAN +---------------------------------------------------------------------------------------------- + Insert on test_inserts.storage + -> Custom Scan (PartitionFilter) + Output: NULL::integer, storage.b, NULL::integer, storage.d, storage.e + -> Result + Output: NULL::integer, storage_11.b, NULL::integer, storage_11.d, storage_11.e + -> Append + -> Seq Scan on test_inserts.storage_11 + Output: storage_11.b, storage_11.d, storage_11.e + -> Seq Scan on test_inserts.storage_1 + Output: storage_1.b, storage_1.d, storage_1.e + -> Seq Scan on test_inserts.storage_2 + Output: storage_2.b, storage_2.d, storage_2.e + -> Seq Scan on test_inserts.storage_3 + Output: storage_3.b, storage_3.d, storage_3.e + -> Seq Scan on test_inserts.storage_4 + Output: storage_4.b, storage_4.d, storage_4.e + -> Seq Scan on test_inserts.storage_5 + Output: storage_5.b, storage_5.d, storage_5.e + -> Seq Scan on test_inserts.storage_6 + Output: storage_6.b, storage_6.d, storage_6.e + -> Seq Scan on test_inserts.storage_7 + Output: storage_7.b, storage_7.d, storage_7.e + -> Seq Scan on test_inserts.storage_8 + Output: storage_8.b, storage_8.d, storage_8.e + -> Seq Scan on test_inserts.storage_9 + Output: storage_9.b, storage_9.d, storage_9.e + -> Seq Scan on test_inserts.storage_10 + Output: storage_10.b, storage_10.d, storage_10.e + -> Seq Scan on test_inserts.storage_12 + Output: storage_12.b, storage_12.d, storage_12.e + -> Seq Scan on test_inserts.storage_13 + Output: storage_13.b, storage_13.d, storage_13.e + -> Seq Scan on test_inserts.storage_14 + Output: storage_14.b, storage_14.d, storage_14.e +(34 rows) + +EXPLAIN (VERBOSE, COSTS OFF) +INSERT INTO test_inserts.storage (b, d) SELECT b, d +FROM test_inserts.storage; + QUERY PLAN +---------------------------------------------------------------------------------------------- + Insert on test_inserts.storage + -> Custom Scan (PartitionFilter) + Output: NULL::integer, storage.b, NULL::integer, storage.d, NULL::bigint + -> Result + Output: NULL::integer, storage_11.b, NULL::integer, storage_11.d, NULL::bigint + -> Append + -> Seq Scan on test_inserts.storage_11 + Output: storage_11.b, storage_11.d + -> Seq Scan on test_inserts.storage_1 + Output: storage_1.b, storage_1.d + -> Seq Scan on test_inserts.storage_2 + Output: storage_2.b, storage_2.d + -> Seq Scan on test_inserts.storage_3 + Output: storage_3.b, storage_3.d + -> Seq Scan on test_inserts.storage_4 + Output: storage_4.b, storage_4.d + -> Seq Scan on test_inserts.storage_5 + Output: storage_5.b, storage_5.d + -> Seq Scan on test_inserts.storage_6 + Output: storage_6.b, storage_6.d + -> Seq Scan on test_inserts.storage_7 + Output: storage_7.b, storage_7.d + -> Seq Scan on test_inserts.storage_8 + Output: storage_8.b, storage_8.d + -> Seq Scan on test_inserts.storage_9 + Output: storage_9.b, storage_9.d + -> Seq Scan on test_inserts.storage_10 + Output: storage_10.b, storage_10.d + -> Seq Scan on test_inserts.storage_12 + Output: storage_12.b, storage_12.d + -> Seq Scan on test_inserts.storage_13 + Output: storage_13.b, storage_13.d + -> Seq Scan on test_inserts.storage_14 + Output: storage_14.b, storage_14.d +(34 rows) + +EXPLAIN (VERBOSE, COSTS OFF) +INSERT INTO test_inserts.storage (b) SELECT b +FROM test_inserts.storage; + QUERY PLAN +-------------------------------------------------------------------------------------------- + Insert on test_inserts.storage + -> Custom Scan (PartitionFilter) + Output: NULL::integer, storage.b, NULL::integer, NULL::text, NULL::bigint + -> Result + Output: NULL::integer, storage_11.b, NULL::integer, NULL::text, NULL::bigint + -> Append + -> Seq Scan on test_inserts.storage_11 + Output: storage_11.b + -> Seq Scan on test_inserts.storage_1 + Output: storage_1.b + -> Seq Scan on test_inserts.storage_2 + Output: storage_2.b + -> Seq Scan on test_inserts.storage_3 + Output: storage_3.b + -> Seq Scan on test_inserts.storage_4 + Output: storage_4.b + -> Seq Scan on test_inserts.storage_5 + Output: storage_5.b + -> Seq Scan on test_inserts.storage_6 + Output: storage_6.b + -> Seq Scan on test_inserts.storage_7 + Output: storage_7.b + -> Seq Scan on test_inserts.storage_8 + Output: storage_8.b + -> Seq Scan on test_inserts.storage_9 + Output: storage_9.b + -> Seq Scan on test_inserts.storage_10 + Output: storage_10.b + -> Seq Scan on test_inserts.storage_12 + Output: storage_12.b + -> Seq Scan on test_inserts.storage_13 + Output: storage_13.b + -> Seq Scan on test_inserts.storage_14 + Output: storage_14.b +(34 rows) + +/* test gap case (missing partition in between) */ +CREATE TABLE test_inserts.test_gap(val INT NOT NULL); +INSERT INTO test_inserts.test_gap SELECT generate_series(1, 30); +SELECT create_range_partitions('test_inserts.test_gap', 'val', 1, 10); + create_range_partitions +------------------------- + 3 +(1 row) + +DROP TABLE test_inserts.test_gap_2; /* make a gap */ +INSERT INTO test_inserts.test_gap VALUES(15); /* not ok */ +ERROR: cannot spawn a partition +DROP TABLE test_inserts.test_gap CASCADE; +NOTICE: drop cascades to 3 other objects +/* test a few "special" ONLY queries used in pg_repack */ +CREATE TABLE test_inserts.test_special_only(val INT NOT NULL); +INSERT INTO test_inserts.test_special_only SELECT generate_series(1, 30); +SELECT create_hash_partitions('test_inserts.test_special_only', 'val', 4); + create_hash_partitions +------------------------ + 4 +(1 row) + +/* create table as select only */ +CREATE TABLE test_inserts.special_1 AS SELECT * FROM ONLY test_inserts.test_special_only; +SELECT count(*) FROM test_inserts.special_1; + count +------- + 0 +(1 row) + +DROP TABLE test_inserts.special_1; +/* insert into ... select only */ +CREATE TABLE test_inserts.special_2 AS SELECT * FROM ONLY test_inserts.test_special_only WITH NO DATA; +INSERT INTO test_inserts.special_2 SELECT * FROM ONLY test_inserts.test_special_only; +SELECT count(*) FROM test_inserts.special_2; + count +------- + 0 +(1 row) + +DROP TABLE test_inserts.special_2; +DROP TABLE test_inserts.test_special_only CASCADE; +NOTICE: drop cascades to 4 other objects +DROP TABLE test_inserts.storage CASCADE; +NOTICE: drop cascades to 15 other objects +DROP FUNCTION test_inserts.set_triggers(jsonb); +DROP FUNCTION test_inserts.print_cols_before_change(); +DROP FUNCTION test_inserts.print_cols_after_change(); +DROP SCHEMA test_inserts; +DROP EXTENSION pg_pathman CASCADE; diff --git a/expected/pathman_inserts_1.out b/expected/pathman_inserts_1.out new file mode 100644 index 00000000..3479c12d --- /dev/null +++ b/expected/pathman_inserts_1.out @@ -0,0 +1,1075 @@ +/* + * Since 55a1954da16 and 6ef77cf46e8 (>= 13) output of EXPLAIN was changed, + * now it includes aliases for inherited tables. + */ +\set VERBOSITY terse +SET search_path = 'public'; +CREATE EXTENSION pg_pathman; +CREATE SCHEMA test_inserts; +/* create a partitioned table */ +CREATE TABLE test_inserts.storage(a INT4, b INT4 NOT NULL, c NUMERIC, d TEXT); +INSERT INTO test_inserts.storage SELECT i * 2, i, i, i::text FROM generate_series(1, 100) i; +CREATE UNIQUE INDEX ON test_inserts.storage(a); +SELECT create_range_partitions('test_inserts.storage', 'b', 1, 10); + create_range_partitions +------------------------- + 10 +(1 row) + +/* attach before and after insertion triggers to partitioned table */ +CREATE OR REPLACE FUNCTION test_inserts.print_cols_before_change() RETURNS TRIGGER AS $$ +BEGIN + RAISE NOTICE 'BEFORE INSERTION TRIGGER ON TABLE % HAS EXPIRED. INSERTED ROW: %', tg_table_name, new; + RETURN new; +END; +$$ LANGUAGE plpgsql; +CREATE OR REPLACE FUNCTION test_inserts.print_cols_after_change() RETURNS TRIGGER AS $$ +BEGIN + RAISE NOTICE 'AFTER INSERTION TRIGGER ON TABLE % HAS EXPIRED. INSERTED ROW: %', tg_table_name, new; + RETURN new; +END; +$$ LANGUAGE plpgsql; +/* set triggers on existing first partition and new generated partitions */ +CREATE TRIGGER print_new_row_before_insert BEFORE INSERT ON test_inserts.storage_1 + FOR EACH ROW EXECUTE PROCEDURE test_inserts.print_cols_before_change(); +CREATE TRIGGER print_new_row_after_insert AFTER INSERT ON test_inserts.storage_1 + FOR EACH ROW EXECUTE PROCEDURE test_inserts.print_cols_after_change(); +/* set partition init callback that will add triggers to partitions */ +CREATE OR REPLACE FUNCTION test_inserts.set_triggers(args jsonb) RETURNS VOID AS $$ +BEGIN + EXECUTE format('create trigger print_new_row_before_insert before insert on %s.%s + for each row execute procedure test_inserts.print_cols_before_change();', + args->>'partition_schema', args->>'partition'); + EXECUTE format('create trigger print_new_row_after_insert after insert on %s.%s + for each row execute procedure test_inserts.print_cols_after_change();', + args->>'partition_schema', args->>'partition'); +END; +$$ LANGUAGE plpgsql; +SELECT set_init_callback('test_inserts.storage', 'test_inserts.set_triggers(jsonb)'); + set_init_callback +------------------- + +(1 row) + +/* we don't support ON CONLICT */ +INSERT INTO test_inserts.storage VALUES(0, 0, 0, 'UNSUPPORTED_1') +ON CONFLICT (a) DO UPDATE SET a = 3; +ERROR: ON CONFLICT clause is not supported with partitioned tables +INSERT INTO test_inserts.storage VALUES(0, 0, 0, 'UNSUPPORTED_2') +ON CONFLICT (a) DO NOTHING; +ERROR: ON CONFLICT clause is not supported with partitioned tables +/* implicitly prepend a partition (no columns have been dropped yet) */ +INSERT INTO test_inserts.storage VALUES(0, 0, 0, 'PREPEND.') RETURNING *; +NOTICE: BEFORE INSERTION TRIGGER ON TABLE storage_11 HAS EXPIRED. INSERTED ROW: (0,0,0,PREPEND.) +NOTICE: AFTER INSERTION TRIGGER ON TABLE storage_11 HAS EXPIRED. INSERTED ROW: (0,0,0,PREPEND.) + a | b | c | d +---+---+---+---------- + 0 | 0 | 0 | PREPEND. +(1 row) + +SELECT * FROM test_inserts.storage_11; + a | b | c | d +---+---+---+---------- + 0 | 0 | 0 | PREPEND. +(1 row) + +INSERT INTO test_inserts.storage VALUES(1, 0, 0, 'PREPEND..') RETURNING tableoid::regclass; +NOTICE: BEFORE INSERTION TRIGGER ON TABLE storage_11 HAS EXPIRED. INSERTED ROW: (1,0,0,PREPEND..) +NOTICE: AFTER INSERTION TRIGGER ON TABLE storage_11 HAS EXPIRED. INSERTED ROW: (1,0,0,PREPEND..) + tableoid +------------------------- + test_inserts.storage_11 +(1 row) + +SELECT * FROM test_inserts.storage_11; + a | b | c | d +---+---+---+----------- + 0 | 0 | 0 | PREPEND. + 1 | 0 | 0 | PREPEND.. +(2 rows) + +INSERT INTO test_inserts.storage VALUES(3, 0, 0, 'PREPEND...') RETURNING a + b / 3; +NOTICE: BEFORE INSERTION TRIGGER ON TABLE storage_11 HAS EXPIRED. INSERTED ROW: (3,0,0,PREPEND...) +NOTICE: AFTER INSERTION TRIGGER ON TABLE storage_11 HAS EXPIRED. INSERTED ROW: (3,0,0,PREPEND...) + ?column? +---------- + 3 +(1 row) + +SELECT * FROM test_inserts.storage_11; + a | b | c | d +---+---+---+------------ + 0 | 0 | 0 | PREPEND. + 1 | 0 | 0 | PREPEND.. + 3 | 0 | 0 | PREPEND... +(3 rows) + +/* cause an unique index conflict (a = 0) */ +INSERT INTO test_inserts.storage VALUES(0, 0, 0, 'CONFLICT') RETURNING *; +NOTICE: BEFORE INSERTION TRIGGER ON TABLE storage_11 HAS EXPIRED. INSERTED ROW: (0,0,0,CONFLICT) +ERROR: duplicate key value violates unique constraint "storage_11_a_idx" +/* drop first column */ +ALTER TABLE test_inserts.storage DROP COLUMN a CASCADE; +/* will have 3 columns (b, c, d) */ +SELECT append_range_partition('test_inserts.storage'); + append_range_partition +------------------------- + test_inserts.storage_12 +(1 row) + +INSERT INTO test_inserts.storage (b, c, d) VALUES (101, 17, '3 cols!'); +NOTICE: BEFORE INSERTION TRIGGER ON TABLE storage_12 HAS EXPIRED. INSERTED ROW: (101,17,"3 cols!") +NOTICE: AFTER INSERTION TRIGGER ON TABLE storage_12 HAS EXPIRED. INSERTED ROW: (101,17,"3 cols!") +SELECT * FROM test_inserts.storage_12; /* direct access */ + b | c | d +-----+----+--------- + 101 | 17 | 3 cols! +(1 row) + +SELECT * FROM test_inserts.storage WHERE b > 100; /* via parent */ + b | c | d +-----+----+--------- + 101 | 17 | 3 cols! +(1 row) + +/* spawn a new partition (b, c, d) */ +INSERT INTO test_inserts.storage (b, c, d) VALUES (111, 17, '3 cols as well!'); +NOTICE: BEFORE INSERTION TRIGGER ON TABLE storage_13 HAS EXPIRED. INSERTED ROW: (111,17,"3 cols as well!") +NOTICE: AFTER INSERTION TRIGGER ON TABLE storage_13 HAS EXPIRED. INSERTED ROW: (111,17,"3 cols as well!") +SELECT * FROM test_inserts.storage_13; /* direct access */ + b | c | d +-----+----+----------------- + 111 | 17 | 3 cols as well! +(1 row) + +SELECT * FROM test_inserts.storage WHERE b > 110; /* via parent */ + b | c | d +-----+----+----------------- + 111 | 17 | 3 cols as well! +(1 row) + +/* column 'a' has been dropped */ +INSERT INTO test_inserts.storage VALUES(111, 0, 'DROP_COL_1.') RETURNING *, 17; +NOTICE: BEFORE INSERTION TRIGGER ON TABLE storage_13 HAS EXPIRED. INSERTED ROW: (111,0,DROP_COL_1.) +NOTICE: AFTER INSERTION TRIGGER ON TABLE storage_13 HAS EXPIRED. INSERTED ROW: (111,0,DROP_COL_1.) + b | c | d | ?column? +-----+---+-------------+---------- + 111 | 0 | DROP_COL_1. | 17 +(1 row) + +INSERT INTO test_inserts.storage VALUES(111, 0, 'DROP_COL_1..') RETURNING tableoid::regclass; +NOTICE: BEFORE INSERTION TRIGGER ON TABLE storage_13 HAS EXPIRED. INSERTED ROW: (111,0,DROP_COL_1..) +NOTICE: AFTER INSERTION TRIGGER ON TABLE storage_13 HAS EXPIRED. INSERTED ROW: (111,0,DROP_COL_1..) + tableoid +------------------------- + test_inserts.storage_13 +(1 row) + +INSERT INTO test_inserts.storage VALUES(111, 0, 'DROP_COL_1...') RETURNING b * 2, b; +NOTICE: BEFORE INSERTION TRIGGER ON TABLE storage_13 HAS EXPIRED. INSERTED ROW: (111,0,DROP_COL_1...) +NOTICE: AFTER INSERTION TRIGGER ON TABLE storage_13 HAS EXPIRED. INSERTED ROW: (111,0,DROP_COL_1...) + ?column? | b +----------+----- + 222 | 111 +(1 row) + +/* drop third column */ +ALTER TABLE test_inserts.storage DROP COLUMN c CASCADE; +/* will have 2 columns (b, d) */ +SELECT append_range_partition('test_inserts.storage'); + append_range_partition +------------------------- + test_inserts.storage_14 +(1 row) + +INSERT INTO test_inserts.storage (b, d) VALUES (121, '2 cols!'); +NOTICE: BEFORE INSERTION TRIGGER ON TABLE storage_14 HAS EXPIRED. INSERTED ROW: (121,"2 cols!") +NOTICE: AFTER INSERTION TRIGGER ON TABLE storage_14 HAS EXPIRED. INSERTED ROW: (121,"2 cols!") +SELECT * FROM test_inserts.storage_14; /* direct access */ + b | d +-----+--------- + 121 | 2 cols! +(1 row) + +SELECT * FROM test_inserts.storage WHERE b > 120; /* via parent */ + b | d +-----+--------- + 121 | 2 cols! +(1 row) + +/* column 'c' has been dropped */ +INSERT INTO test_inserts.storage VALUES(121, 'DROP_COL_2.') RETURNING *; +NOTICE: BEFORE INSERTION TRIGGER ON TABLE storage_14 HAS EXPIRED. INSERTED ROW: (121,DROP_COL_2.) +NOTICE: AFTER INSERTION TRIGGER ON TABLE storage_14 HAS EXPIRED. INSERTED ROW: (121,DROP_COL_2.) + b | d +-----+------------- + 121 | DROP_COL_2. +(1 row) + +INSERT INTO test_inserts.storage VALUES(121, 'DROP_COL_2..') RETURNING tableoid::regclass; +NOTICE: BEFORE INSERTION TRIGGER ON TABLE storage_14 HAS EXPIRED. INSERTED ROW: (121,DROP_COL_2..) +NOTICE: AFTER INSERTION TRIGGER ON TABLE storage_14 HAS EXPIRED. INSERTED ROW: (121,DROP_COL_2..) + tableoid +------------------------- + test_inserts.storage_14 +(1 row) + +INSERT INTO test_inserts.storage VALUES(121, 'DROP_COL_2...') RETURNING d || '0_0', b * 3; +NOTICE: BEFORE INSERTION TRIGGER ON TABLE storage_14 HAS EXPIRED. INSERTED ROW: (121,DROP_COL_2...) +NOTICE: AFTER INSERTION TRIGGER ON TABLE storage_14 HAS EXPIRED. INSERTED ROW: (121,DROP_COL_2...) + ?column? | ?column? +------------------+---------- + DROP_COL_2...0_0 | 363 +(1 row) + +INSERT INTO test_inserts.storage VALUES(121, 'query_1') +RETURNING (SELECT 1); +NOTICE: BEFORE INSERTION TRIGGER ON TABLE storage_14 HAS EXPIRED. INSERTED ROW: (121,query_1) +NOTICE: AFTER INSERTION TRIGGER ON TABLE storage_14 HAS EXPIRED. INSERTED ROW: (121,query_1) + ?column? +---------- + 1 +(1 row) + +INSERT INTO test_inserts.storage VALUES(121, 'query_2') +RETURNING (SELECT generate_series(1, 10) LIMIT 1); +NOTICE: BEFORE INSERTION TRIGGER ON TABLE storage_14 HAS EXPIRED. INSERTED ROW: (121,query_2) +NOTICE: AFTER INSERTION TRIGGER ON TABLE storage_14 HAS EXPIRED. INSERTED ROW: (121,query_2) + generate_series +----------------- + 1 +(1 row) + +INSERT INTO test_inserts.storage VALUES(121, 'query_3') +RETURNING (SELECT get_partition_key('test_inserts.storage')); +NOTICE: BEFORE INSERTION TRIGGER ON TABLE storage_14 HAS EXPIRED. INSERTED ROW: (121,query_3) +NOTICE: AFTER INSERTION TRIGGER ON TABLE storage_14 HAS EXPIRED. INSERTED ROW: (121,query_3) + get_partition_key +------------------- + b +(1 row) + +INSERT INTO test_inserts.storage VALUES(121, 'query_4') +RETURNING 1, 2, 3, 4; +NOTICE: BEFORE INSERTION TRIGGER ON TABLE storage_14 HAS EXPIRED. INSERTED ROW: (121,query_4) +NOTICE: AFTER INSERTION TRIGGER ON TABLE storage_14 HAS EXPIRED. INSERTED ROW: (121,query_4) + ?column? | ?column? | ?column? | ?column? +----------+----------+----------+---------- + 1 | 2 | 3 | 4 +(1 row) + +/* show number of columns in each partition */ +SELECT partition, range_min, range_max, count(partition) +FROM pathman_partition_list JOIN pg_attribute ON partition = attrelid +WHERE attnum > 0 +GROUP BY partition, range_min, range_max +ORDER BY range_min::INT4; + partition | range_min | range_max | count +-------------------------+-----------+-----------+------- + test_inserts.storage_11 | -9 | 1 | 4 + test_inserts.storage_1 | 1 | 11 | 4 + test_inserts.storage_2 | 11 | 21 | 4 + test_inserts.storage_3 | 21 | 31 | 4 + test_inserts.storage_4 | 31 | 41 | 4 + test_inserts.storage_5 | 41 | 51 | 4 + test_inserts.storage_6 | 51 | 61 | 4 + test_inserts.storage_7 | 61 | 71 | 4 + test_inserts.storage_8 | 71 | 81 | 4 + test_inserts.storage_9 | 81 | 91 | 4 + test_inserts.storage_10 | 91 | 101 | 4 + test_inserts.storage_12 | 101 | 111 | 3 + test_inserts.storage_13 | 111 | 121 | 3 + test_inserts.storage_14 | 121 | 131 | 2 +(14 rows) + +/* check the data */ +SELECT *, tableoid::regclass FROM test_inserts.storage ORDER BY b, d; + b | d | tableoid +-----+-----------------+------------------------- + 0 | PREPEND. | test_inserts.storage_11 + 0 | PREPEND.. | test_inserts.storage_11 + 0 | PREPEND... | test_inserts.storage_11 + 1 | 1 | test_inserts.storage_1 + 2 | 2 | test_inserts.storage_1 + 3 | 3 | test_inserts.storage_1 + 4 | 4 | test_inserts.storage_1 + 5 | 5 | test_inserts.storage_1 + 6 | 6 | test_inserts.storage_1 + 7 | 7 | test_inserts.storage_1 + 8 | 8 | test_inserts.storage_1 + 9 | 9 | test_inserts.storage_1 + 10 | 10 | test_inserts.storage_1 + 11 | 11 | test_inserts.storage_2 + 12 | 12 | test_inserts.storage_2 + 13 | 13 | test_inserts.storage_2 + 14 | 14 | test_inserts.storage_2 + 15 | 15 | test_inserts.storage_2 + 16 | 16 | test_inserts.storage_2 + 17 | 17 | test_inserts.storage_2 + 18 | 18 | test_inserts.storage_2 + 19 | 19 | test_inserts.storage_2 + 20 | 20 | test_inserts.storage_2 + 21 | 21 | test_inserts.storage_3 + 22 | 22 | test_inserts.storage_3 + 23 | 23 | test_inserts.storage_3 + 24 | 24 | test_inserts.storage_3 + 25 | 25 | test_inserts.storage_3 + 26 | 26 | test_inserts.storage_3 + 27 | 27 | test_inserts.storage_3 + 28 | 28 | test_inserts.storage_3 + 29 | 29 | test_inserts.storage_3 + 30 | 30 | test_inserts.storage_3 + 31 | 31 | test_inserts.storage_4 + 32 | 32 | test_inserts.storage_4 + 33 | 33 | test_inserts.storage_4 + 34 | 34 | test_inserts.storage_4 + 35 | 35 | test_inserts.storage_4 + 36 | 36 | test_inserts.storage_4 + 37 | 37 | test_inserts.storage_4 + 38 | 38 | test_inserts.storage_4 + 39 | 39 | test_inserts.storage_4 + 40 | 40 | test_inserts.storage_4 + 41 | 41 | test_inserts.storage_5 + 42 | 42 | test_inserts.storage_5 + 43 | 43 | test_inserts.storage_5 + 44 | 44 | test_inserts.storage_5 + 45 | 45 | test_inserts.storage_5 + 46 | 46 | test_inserts.storage_5 + 47 | 47 | test_inserts.storage_5 + 48 | 48 | test_inserts.storage_5 + 49 | 49 | test_inserts.storage_5 + 50 | 50 | test_inserts.storage_5 + 51 | 51 | test_inserts.storage_6 + 52 | 52 | test_inserts.storage_6 + 53 | 53 | test_inserts.storage_6 + 54 | 54 | test_inserts.storage_6 + 55 | 55 | test_inserts.storage_6 + 56 | 56 | test_inserts.storage_6 + 57 | 57 | test_inserts.storage_6 + 58 | 58 | test_inserts.storage_6 + 59 | 59 | test_inserts.storage_6 + 60 | 60 | test_inserts.storage_6 + 61 | 61 | test_inserts.storage_7 + 62 | 62 | test_inserts.storage_7 + 63 | 63 | test_inserts.storage_7 + 64 | 64 | test_inserts.storage_7 + 65 | 65 | test_inserts.storage_7 + 66 | 66 | test_inserts.storage_7 + 67 | 67 | test_inserts.storage_7 + 68 | 68 | test_inserts.storage_7 + 69 | 69 | test_inserts.storage_7 + 70 | 70 | test_inserts.storage_7 + 71 | 71 | test_inserts.storage_8 + 72 | 72 | test_inserts.storage_8 + 73 | 73 | test_inserts.storage_8 + 74 | 74 | test_inserts.storage_8 + 75 | 75 | test_inserts.storage_8 + 76 | 76 | test_inserts.storage_8 + 77 | 77 | test_inserts.storage_8 + 78 | 78 | test_inserts.storage_8 + 79 | 79 | test_inserts.storage_8 + 80 | 80 | test_inserts.storage_8 + 81 | 81 | test_inserts.storage_9 + 82 | 82 | test_inserts.storage_9 + 83 | 83 | test_inserts.storage_9 + 84 | 84 | test_inserts.storage_9 + 85 | 85 | test_inserts.storage_9 + 86 | 86 | test_inserts.storage_9 + 87 | 87 | test_inserts.storage_9 + 88 | 88 | test_inserts.storage_9 + 89 | 89 | test_inserts.storage_9 + 90 | 90 | test_inserts.storage_9 + 91 | 91 | test_inserts.storage_10 + 92 | 92 | test_inserts.storage_10 + 93 | 93 | test_inserts.storage_10 + 94 | 94 | test_inserts.storage_10 + 95 | 95 | test_inserts.storage_10 + 96 | 96 | test_inserts.storage_10 + 97 | 97 | test_inserts.storage_10 + 98 | 98 | test_inserts.storage_10 + 99 | 99 | test_inserts.storage_10 + 100 | 100 | test_inserts.storage_10 + 101 | 3 cols! | test_inserts.storage_12 + 111 | 3 cols as well! | test_inserts.storage_13 + 111 | DROP_COL_1. | test_inserts.storage_13 + 111 | DROP_COL_1.. | test_inserts.storage_13 + 111 | DROP_COL_1... | test_inserts.storage_13 + 121 | 2 cols! | test_inserts.storage_14 + 121 | DROP_COL_2. | test_inserts.storage_14 + 121 | DROP_COL_2.. | test_inserts.storage_14 + 121 | DROP_COL_2... | test_inserts.storage_14 + 121 | query_1 | test_inserts.storage_14 + 121 | query_2 | test_inserts.storage_14 + 121 | query_3 | test_inserts.storage_14 + 121 | query_4 | test_inserts.storage_14 +(116 rows) + +/* drop data */ +TRUNCATE test_inserts.storage; +/* one more time! */ +INSERT INTO test_inserts.storage (b, d) SELECT i, i FROM generate_series(-2, 120) i; +NOTICE: BEFORE INSERTION TRIGGER ON TABLE storage_11 HAS EXPIRED. INSERTED ROW: (-2,-2) +NOTICE: BEFORE INSERTION TRIGGER ON TABLE storage_11 HAS EXPIRED. INSERTED ROW: (-1,-1) +NOTICE: BEFORE INSERTION TRIGGER ON TABLE storage_11 HAS EXPIRED. INSERTED ROW: (0,0) +NOTICE: BEFORE INSERTION TRIGGER ON TABLE storage_1 HAS EXPIRED. INSERTED ROW: (1,1) +NOTICE: BEFORE INSERTION TRIGGER ON TABLE storage_1 HAS EXPIRED. INSERTED ROW: (2,2) +NOTICE: BEFORE INSERTION TRIGGER ON TABLE storage_1 HAS EXPIRED. INSERTED ROW: (3,3) +NOTICE: BEFORE INSERTION TRIGGER ON TABLE storage_1 HAS EXPIRED. INSERTED ROW: (4,4) +NOTICE: BEFORE INSERTION TRIGGER ON TABLE storage_1 HAS EXPIRED. INSERTED ROW: (5,5) +NOTICE: BEFORE INSERTION TRIGGER ON TABLE storage_1 HAS EXPIRED. INSERTED ROW: (6,6) +NOTICE: BEFORE INSERTION TRIGGER ON TABLE storage_1 HAS EXPIRED. INSERTED ROW: (7,7) +NOTICE: BEFORE INSERTION TRIGGER ON TABLE storage_1 HAS EXPIRED. INSERTED ROW: (8,8) +NOTICE: BEFORE INSERTION TRIGGER ON TABLE storage_1 HAS EXPIRED. INSERTED ROW: (9,9) +NOTICE: BEFORE INSERTION TRIGGER ON TABLE storage_1 HAS EXPIRED. INSERTED ROW: (10,10) +NOTICE: BEFORE INSERTION TRIGGER ON TABLE storage_12 HAS EXPIRED. INSERTED ROW: (101,101) +NOTICE: BEFORE INSERTION TRIGGER ON TABLE storage_12 HAS EXPIRED. INSERTED ROW: (102,102) +NOTICE: BEFORE INSERTION TRIGGER ON TABLE storage_12 HAS EXPIRED. INSERTED ROW: (103,103) +NOTICE: BEFORE INSERTION TRIGGER ON TABLE storage_12 HAS EXPIRED. INSERTED ROW: (104,104) +NOTICE: BEFORE INSERTION TRIGGER ON TABLE storage_12 HAS EXPIRED. INSERTED ROW: (105,105) +NOTICE: BEFORE INSERTION TRIGGER ON TABLE storage_12 HAS EXPIRED. INSERTED ROW: (106,106) +NOTICE: BEFORE INSERTION TRIGGER ON TABLE storage_12 HAS EXPIRED. INSERTED ROW: (107,107) +NOTICE: BEFORE INSERTION TRIGGER ON TABLE storage_12 HAS EXPIRED. INSERTED ROW: (108,108) +NOTICE: BEFORE INSERTION TRIGGER ON TABLE storage_12 HAS EXPIRED. INSERTED ROW: (109,109) +NOTICE: BEFORE INSERTION TRIGGER ON TABLE storage_12 HAS EXPIRED. INSERTED ROW: (110,110) +NOTICE: BEFORE INSERTION TRIGGER ON TABLE storage_13 HAS EXPIRED. INSERTED ROW: (111,111) +NOTICE: BEFORE INSERTION TRIGGER ON TABLE storage_13 HAS EXPIRED. INSERTED ROW: (112,112) +NOTICE: BEFORE INSERTION TRIGGER ON TABLE storage_13 HAS EXPIRED. INSERTED ROW: (113,113) +NOTICE: BEFORE INSERTION TRIGGER ON TABLE storage_13 HAS EXPIRED. INSERTED ROW: (114,114) +NOTICE: BEFORE INSERTION TRIGGER ON TABLE storage_13 HAS EXPIRED. INSERTED ROW: (115,115) +NOTICE: BEFORE INSERTION TRIGGER ON TABLE storage_13 HAS EXPIRED. INSERTED ROW: (116,116) +NOTICE: BEFORE INSERTION TRIGGER ON TABLE storage_13 HAS EXPIRED. INSERTED ROW: (117,117) +NOTICE: BEFORE INSERTION TRIGGER ON TABLE storage_13 HAS EXPIRED. INSERTED ROW: (118,118) +NOTICE: BEFORE INSERTION TRIGGER ON TABLE storage_13 HAS EXPIRED. INSERTED ROW: (119,119) +NOTICE: BEFORE INSERTION TRIGGER ON TABLE storage_13 HAS EXPIRED. INSERTED ROW: (120,120) +NOTICE: AFTER INSERTION TRIGGER ON TABLE storage_11 HAS EXPIRED. INSERTED ROW: (-2,-2) +NOTICE: AFTER INSERTION TRIGGER ON TABLE storage_11 HAS EXPIRED. INSERTED ROW: (-1,-1) +NOTICE: AFTER INSERTION TRIGGER ON TABLE storage_11 HAS EXPIRED. INSERTED ROW: (0,0) +NOTICE: AFTER INSERTION TRIGGER ON TABLE storage_1 HAS EXPIRED. INSERTED ROW: (1,1) +NOTICE: AFTER INSERTION TRIGGER ON TABLE storage_1 HAS EXPIRED. INSERTED ROW: (2,2) +NOTICE: AFTER INSERTION TRIGGER ON TABLE storage_1 HAS EXPIRED. INSERTED ROW: (3,3) +NOTICE: AFTER INSERTION TRIGGER ON TABLE storage_1 HAS EXPIRED. INSERTED ROW: (4,4) +NOTICE: AFTER INSERTION TRIGGER ON TABLE storage_1 HAS EXPIRED. INSERTED ROW: (5,5) +NOTICE: AFTER INSERTION TRIGGER ON TABLE storage_1 HAS EXPIRED. INSERTED ROW: (6,6) +NOTICE: AFTER INSERTION TRIGGER ON TABLE storage_1 HAS EXPIRED. INSERTED ROW: (7,7) +NOTICE: AFTER INSERTION TRIGGER ON TABLE storage_1 HAS EXPIRED. INSERTED ROW: (8,8) +NOTICE: AFTER INSERTION TRIGGER ON TABLE storage_1 HAS EXPIRED. INSERTED ROW: (9,9) +NOTICE: AFTER INSERTION TRIGGER ON TABLE storage_1 HAS EXPIRED. INSERTED ROW: (10,10) +NOTICE: AFTER INSERTION TRIGGER ON TABLE storage_12 HAS EXPIRED. INSERTED ROW: (101,101) +NOTICE: AFTER INSERTION TRIGGER ON TABLE storage_12 HAS EXPIRED. INSERTED ROW: (102,102) +NOTICE: AFTER INSERTION TRIGGER ON TABLE storage_12 HAS EXPIRED. INSERTED ROW: (103,103) +NOTICE: AFTER INSERTION TRIGGER ON TABLE storage_12 HAS EXPIRED. INSERTED ROW: (104,104) +NOTICE: AFTER INSERTION TRIGGER ON TABLE storage_12 HAS EXPIRED. INSERTED ROW: (105,105) +NOTICE: AFTER INSERTION TRIGGER ON TABLE storage_12 HAS EXPIRED. INSERTED ROW: (106,106) +NOTICE: AFTER INSERTION TRIGGER ON TABLE storage_12 HAS EXPIRED. INSERTED ROW: (107,107) +NOTICE: AFTER INSERTION TRIGGER ON TABLE storage_12 HAS EXPIRED. INSERTED ROW: (108,108) +NOTICE: AFTER INSERTION TRIGGER ON TABLE storage_12 HAS EXPIRED. INSERTED ROW: (109,109) +NOTICE: AFTER INSERTION TRIGGER ON TABLE storage_12 HAS EXPIRED. INSERTED ROW: (110,110) +NOTICE: AFTER INSERTION TRIGGER ON TABLE storage_13 HAS EXPIRED. INSERTED ROW: (111,111) +NOTICE: AFTER INSERTION TRIGGER ON TABLE storage_13 HAS EXPIRED. INSERTED ROW: (112,112) +NOTICE: AFTER INSERTION TRIGGER ON TABLE storage_13 HAS EXPIRED. INSERTED ROW: (113,113) +NOTICE: AFTER INSERTION TRIGGER ON TABLE storage_13 HAS EXPIRED. INSERTED ROW: (114,114) +NOTICE: AFTER INSERTION TRIGGER ON TABLE storage_13 HAS EXPIRED. INSERTED ROW: (115,115) +NOTICE: AFTER INSERTION TRIGGER ON TABLE storage_13 HAS EXPIRED. INSERTED ROW: (116,116) +NOTICE: AFTER INSERTION TRIGGER ON TABLE storage_13 HAS EXPIRED. INSERTED ROW: (117,117) +NOTICE: AFTER INSERTION TRIGGER ON TABLE storage_13 HAS EXPIRED. INSERTED ROW: (118,118) +NOTICE: AFTER INSERTION TRIGGER ON TABLE storage_13 HAS EXPIRED. INSERTED ROW: (119,119) +NOTICE: AFTER INSERTION TRIGGER ON TABLE storage_13 HAS EXPIRED. INSERTED ROW: (120,120) +SELECT *, tableoid::regclass FROM test_inserts.storage ORDER BY b, d; + b | d | tableoid +-----+-----+------------------------- + -2 | -2 | test_inserts.storage_11 + -1 | -1 | test_inserts.storage_11 + 0 | 0 | test_inserts.storage_11 + 1 | 1 | test_inserts.storage_1 + 2 | 2 | test_inserts.storage_1 + 3 | 3 | test_inserts.storage_1 + 4 | 4 | test_inserts.storage_1 + 5 | 5 | test_inserts.storage_1 + 6 | 6 | test_inserts.storage_1 + 7 | 7 | test_inserts.storage_1 + 8 | 8 | test_inserts.storage_1 + 9 | 9 | test_inserts.storage_1 + 10 | 10 | test_inserts.storage_1 + 11 | 11 | test_inserts.storage_2 + 12 | 12 | test_inserts.storage_2 + 13 | 13 | test_inserts.storage_2 + 14 | 14 | test_inserts.storage_2 + 15 | 15 | test_inserts.storage_2 + 16 | 16 | test_inserts.storage_2 + 17 | 17 | test_inserts.storage_2 + 18 | 18 | test_inserts.storage_2 + 19 | 19 | test_inserts.storage_2 + 20 | 20 | test_inserts.storage_2 + 21 | 21 | test_inserts.storage_3 + 22 | 22 | test_inserts.storage_3 + 23 | 23 | test_inserts.storage_3 + 24 | 24 | test_inserts.storage_3 + 25 | 25 | test_inserts.storage_3 + 26 | 26 | test_inserts.storage_3 + 27 | 27 | test_inserts.storage_3 + 28 | 28 | test_inserts.storage_3 + 29 | 29 | test_inserts.storage_3 + 30 | 30 | test_inserts.storage_3 + 31 | 31 | test_inserts.storage_4 + 32 | 32 | test_inserts.storage_4 + 33 | 33 | test_inserts.storage_4 + 34 | 34 | test_inserts.storage_4 + 35 | 35 | test_inserts.storage_4 + 36 | 36 | test_inserts.storage_4 + 37 | 37 | test_inserts.storage_4 + 38 | 38 | test_inserts.storage_4 + 39 | 39 | test_inserts.storage_4 + 40 | 40 | test_inserts.storage_4 + 41 | 41 | test_inserts.storage_5 + 42 | 42 | test_inserts.storage_5 + 43 | 43 | test_inserts.storage_5 + 44 | 44 | test_inserts.storage_5 + 45 | 45 | test_inserts.storage_5 + 46 | 46 | test_inserts.storage_5 + 47 | 47 | test_inserts.storage_5 + 48 | 48 | test_inserts.storage_5 + 49 | 49 | test_inserts.storage_5 + 50 | 50 | test_inserts.storage_5 + 51 | 51 | test_inserts.storage_6 + 52 | 52 | test_inserts.storage_6 + 53 | 53 | test_inserts.storage_6 + 54 | 54 | test_inserts.storage_6 + 55 | 55 | test_inserts.storage_6 + 56 | 56 | test_inserts.storage_6 + 57 | 57 | test_inserts.storage_6 + 58 | 58 | test_inserts.storage_6 + 59 | 59 | test_inserts.storage_6 + 60 | 60 | test_inserts.storage_6 + 61 | 61 | test_inserts.storage_7 + 62 | 62 | test_inserts.storage_7 + 63 | 63 | test_inserts.storage_7 + 64 | 64 | test_inserts.storage_7 + 65 | 65 | test_inserts.storage_7 + 66 | 66 | test_inserts.storage_7 + 67 | 67 | test_inserts.storage_7 + 68 | 68 | test_inserts.storage_7 + 69 | 69 | test_inserts.storage_7 + 70 | 70 | test_inserts.storage_7 + 71 | 71 | test_inserts.storage_8 + 72 | 72 | test_inserts.storage_8 + 73 | 73 | test_inserts.storage_8 + 74 | 74 | test_inserts.storage_8 + 75 | 75 | test_inserts.storage_8 + 76 | 76 | test_inserts.storage_8 + 77 | 77 | test_inserts.storage_8 + 78 | 78 | test_inserts.storage_8 + 79 | 79 | test_inserts.storage_8 + 80 | 80 | test_inserts.storage_8 + 81 | 81 | test_inserts.storage_9 + 82 | 82 | test_inserts.storage_9 + 83 | 83 | test_inserts.storage_9 + 84 | 84 | test_inserts.storage_9 + 85 | 85 | test_inserts.storage_9 + 86 | 86 | test_inserts.storage_9 + 87 | 87 | test_inserts.storage_9 + 88 | 88 | test_inserts.storage_9 + 89 | 89 | test_inserts.storage_9 + 90 | 90 | test_inserts.storage_9 + 91 | 91 | test_inserts.storage_10 + 92 | 92 | test_inserts.storage_10 + 93 | 93 | test_inserts.storage_10 + 94 | 94 | test_inserts.storage_10 + 95 | 95 | test_inserts.storage_10 + 96 | 96 | test_inserts.storage_10 + 97 | 97 | test_inserts.storage_10 + 98 | 98 | test_inserts.storage_10 + 99 | 99 | test_inserts.storage_10 + 100 | 100 | test_inserts.storage_10 + 101 | 101 | test_inserts.storage_12 + 102 | 102 | test_inserts.storage_12 + 103 | 103 | test_inserts.storage_12 + 104 | 104 | test_inserts.storage_12 + 105 | 105 | test_inserts.storage_12 + 106 | 106 | test_inserts.storage_12 + 107 | 107 | test_inserts.storage_12 + 108 | 108 | test_inserts.storage_12 + 109 | 109 | test_inserts.storage_12 + 110 | 110 | test_inserts.storage_12 + 111 | 111 | test_inserts.storage_13 + 112 | 112 | test_inserts.storage_13 + 113 | 113 | test_inserts.storage_13 + 114 | 114 | test_inserts.storage_13 + 115 | 115 | test_inserts.storage_13 + 116 | 116 | test_inserts.storage_13 + 117 | 117 | test_inserts.storage_13 + 118 | 118 | test_inserts.storage_13 + 119 | 119 | test_inserts.storage_13 + 120 | 120 | test_inserts.storage_13 +(123 rows) + +/* drop data */ +TRUNCATE test_inserts.storage; +/* add new column */ +ALTER TABLE test_inserts.storage ADD COLUMN e INT8 NOT NULL; +/* one more time! x2 */ +INSERT INTO test_inserts.storage (b, d, e) SELECT i, i, i FROM generate_series(-2, 120) i; +NOTICE: BEFORE INSERTION TRIGGER ON TABLE storage_11 HAS EXPIRED. INSERTED ROW: (-2,-2,-2) +NOTICE: BEFORE INSERTION TRIGGER ON TABLE storage_11 HAS EXPIRED. INSERTED ROW: (-1,-1,-1) +NOTICE: BEFORE INSERTION TRIGGER ON TABLE storage_11 HAS EXPIRED. INSERTED ROW: (0,0,0) +NOTICE: BEFORE INSERTION TRIGGER ON TABLE storage_1 HAS EXPIRED. INSERTED ROW: (1,1,1) +NOTICE: BEFORE INSERTION TRIGGER ON TABLE storage_1 HAS EXPIRED. INSERTED ROW: (2,2,2) +NOTICE: BEFORE INSERTION TRIGGER ON TABLE storage_1 HAS EXPIRED. INSERTED ROW: (3,3,3) +NOTICE: BEFORE INSERTION TRIGGER ON TABLE storage_1 HAS EXPIRED. INSERTED ROW: (4,4,4) +NOTICE: BEFORE INSERTION TRIGGER ON TABLE storage_1 HAS EXPIRED. INSERTED ROW: (5,5,5) +NOTICE: BEFORE INSERTION TRIGGER ON TABLE storage_1 HAS EXPIRED. INSERTED ROW: (6,6,6) +NOTICE: BEFORE INSERTION TRIGGER ON TABLE storage_1 HAS EXPIRED. INSERTED ROW: (7,7,7) +NOTICE: BEFORE INSERTION TRIGGER ON TABLE storage_1 HAS EXPIRED. INSERTED ROW: (8,8,8) +NOTICE: BEFORE INSERTION TRIGGER ON TABLE storage_1 HAS EXPIRED. INSERTED ROW: (9,9,9) +NOTICE: BEFORE INSERTION TRIGGER ON TABLE storage_1 HAS EXPIRED. INSERTED ROW: (10,10,10) +NOTICE: BEFORE INSERTION TRIGGER ON TABLE storage_12 HAS EXPIRED. INSERTED ROW: (101,101,101) +NOTICE: BEFORE INSERTION TRIGGER ON TABLE storage_12 HAS EXPIRED. INSERTED ROW: (102,102,102) +NOTICE: BEFORE INSERTION TRIGGER ON TABLE storage_12 HAS EXPIRED. INSERTED ROW: (103,103,103) +NOTICE: BEFORE INSERTION TRIGGER ON TABLE storage_12 HAS EXPIRED. INSERTED ROW: (104,104,104) +NOTICE: BEFORE INSERTION TRIGGER ON TABLE storage_12 HAS EXPIRED. INSERTED ROW: (105,105,105) +NOTICE: BEFORE INSERTION TRIGGER ON TABLE storage_12 HAS EXPIRED. INSERTED ROW: (106,106,106) +NOTICE: BEFORE INSERTION TRIGGER ON TABLE storage_12 HAS EXPIRED. INSERTED ROW: (107,107,107) +NOTICE: BEFORE INSERTION TRIGGER ON TABLE storage_12 HAS EXPIRED. INSERTED ROW: (108,108,108) +NOTICE: BEFORE INSERTION TRIGGER ON TABLE storage_12 HAS EXPIRED. INSERTED ROW: (109,109,109) +NOTICE: BEFORE INSERTION TRIGGER ON TABLE storage_12 HAS EXPIRED. INSERTED ROW: (110,110,110) +NOTICE: BEFORE INSERTION TRIGGER ON TABLE storage_13 HAS EXPIRED. INSERTED ROW: (111,111,111) +NOTICE: BEFORE INSERTION TRIGGER ON TABLE storage_13 HAS EXPIRED. INSERTED ROW: (112,112,112) +NOTICE: BEFORE INSERTION TRIGGER ON TABLE storage_13 HAS EXPIRED. INSERTED ROW: (113,113,113) +NOTICE: BEFORE INSERTION TRIGGER ON TABLE storage_13 HAS EXPIRED. INSERTED ROW: (114,114,114) +NOTICE: BEFORE INSERTION TRIGGER ON TABLE storage_13 HAS EXPIRED. INSERTED ROW: (115,115,115) +NOTICE: BEFORE INSERTION TRIGGER ON TABLE storage_13 HAS EXPIRED. INSERTED ROW: (116,116,116) +NOTICE: BEFORE INSERTION TRIGGER ON TABLE storage_13 HAS EXPIRED. INSERTED ROW: (117,117,117) +NOTICE: BEFORE INSERTION TRIGGER ON TABLE storage_13 HAS EXPIRED. INSERTED ROW: (118,118,118) +NOTICE: BEFORE INSERTION TRIGGER ON TABLE storage_13 HAS EXPIRED. INSERTED ROW: (119,119,119) +NOTICE: BEFORE INSERTION TRIGGER ON TABLE storage_13 HAS EXPIRED. INSERTED ROW: (120,120,120) +NOTICE: AFTER INSERTION TRIGGER ON TABLE storage_11 HAS EXPIRED. INSERTED ROW: (-2,-2,-2) +NOTICE: AFTER INSERTION TRIGGER ON TABLE storage_11 HAS EXPIRED. INSERTED ROW: (-1,-1,-1) +NOTICE: AFTER INSERTION TRIGGER ON TABLE storage_11 HAS EXPIRED. INSERTED ROW: (0,0,0) +NOTICE: AFTER INSERTION TRIGGER ON TABLE storage_1 HAS EXPIRED. INSERTED ROW: (1,1,1) +NOTICE: AFTER INSERTION TRIGGER ON TABLE storage_1 HAS EXPIRED. INSERTED ROW: (2,2,2) +NOTICE: AFTER INSERTION TRIGGER ON TABLE storage_1 HAS EXPIRED. INSERTED ROW: (3,3,3) +NOTICE: AFTER INSERTION TRIGGER ON TABLE storage_1 HAS EXPIRED. INSERTED ROW: (4,4,4) +NOTICE: AFTER INSERTION TRIGGER ON TABLE storage_1 HAS EXPIRED. INSERTED ROW: (5,5,5) +NOTICE: AFTER INSERTION TRIGGER ON TABLE storage_1 HAS EXPIRED. INSERTED ROW: (6,6,6) +NOTICE: AFTER INSERTION TRIGGER ON TABLE storage_1 HAS EXPIRED. INSERTED ROW: (7,7,7) +NOTICE: AFTER INSERTION TRIGGER ON TABLE storage_1 HAS EXPIRED. INSERTED ROW: (8,8,8) +NOTICE: AFTER INSERTION TRIGGER ON TABLE storage_1 HAS EXPIRED. INSERTED ROW: (9,9,9) +NOTICE: AFTER INSERTION TRIGGER ON TABLE storage_1 HAS EXPIRED. INSERTED ROW: (10,10,10) +NOTICE: AFTER INSERTION TRIGGER ON TABLE storage_12 HAS EXPIRED. INSERTED ROW: (101,101,101) +NOTICE: AFTER INSERTION TRIGGER ON TABLE storage_12 HAS EXPIRED. INSERTED ROW: (102,102,102) +NOTICE: AFTER INSERTION TRIGGER ON TABLE storage_12 HAS EXPIRED. INSERTED ROW: (103,103,103) +NOTICE: AFTER INSERTION TRIGGER ON TABLE storage_12 HAS EXPIRED. INSERTED ROW: (104,104,104) +NOTICE: AFTER INSERTION TRIGGER ON TABLE storage_12 HAS EXPIRED. INSERTED ROW: (105,105,105) +NOTICE: AFTER INSERTION TRIGGER ON TABLE storage_12 HAS EXPIRED. INSERTED ROW: (106,106,106) +NOTICE: AFTER INSERTION TRIGGER ON TABLE storage_12 HAS EXPIRED. INSERTED ROW: (107,107,107) +NOTICE: AFTER INSERTION TRIGGER ON TABLE storage_12 HAS EXPIRED. INSERTED ROW: (108,108,108) +NOTICE: AFTER INSERTION TRIGGER ON TABLE storage_12 HAS EXPIRED. INSERTED ROW: (109,109,109) +NOTICE: AFTER INSERTION TRIGGER ON TABLE storage_12 HAS EXPIRED. INSERTED ROW: (110,110,110) +NOTICE: AFTER INSERTION TRIGGER ON TABLE storage_13 HAS EXPIRED. INSERTED ROW: (111,111,111) +NOTICE: AFTER INSERTION TRIGGER ON TABLE storage_13 HAS EXPIRED. INSERTED ROW: (112,112,112) +NOTICE: AFTER INSERTION TRIGGER ON TABLE storage_13 HAS EXPIRED. INSERTED ROW: (113,113,113) +NOTICE: AFTER INSERTION TRIGGER ON TABLE storage_13 HAS EXPIRED. INSERTED ROW: (114,114,114) +NOTICE: AFTER INSERTION TRIGGER ON TABLE storage_13 HAS EXPIRED. INSERTED ROW: (115,115,115) +NOTICE: AFTER INSERTION TRIGGER ON TABLE storage_13 HAS EXPIRED. INSERTED ROW: (116,116,116) +NOTICE: AFTER INSERTION TRIGGER ON TABLE storage_13 HAS EXPIRED. INSERTED ROW: (117,117,117) +NOTICE: AFTER INSERTION TRIGGER ON TABLE storage_13 HAS EXPIRED. INSERTED ROW: (118,118,118) +NOTICE: AFTER INSERTION TRIGGER ON TABLE storage_13 HAS EXPIRED. INSERTED ROW: (119,119,119) +NOTICE: AFTER INSERTION TRIGGER ON TABLE storage_13 HAS EXPIRED. INSERTED ROW: (120,120,120) +SELECT *, tableoid::regclass FROM test_inserts.storage ORDER BY b, d; + b | d | e | tableoid +-----+-----+-----+------------------------- + -2 | -2 | -2 | test_inserts.storage_11 + -1 | -1 | -1 | test_inserts.storage_11 + 0 | 0 | 0 | test_inserts.storage_11 + 1 | 1 | 1 | test_inserts.storage_1 + 2 | 2 | 2 | test_inserts.storage_1 + 3 | 3 | 3 | test_inserts.storage_1 + 4 | 4 | 4 | test_inserts.storage_1 + 5 | 5 | 5 | test_inserts.storage_1 + 6 | 6 | 6 | test_inserts.storage_1 + 7 | 7 | 7 | test_inserts.storage_1 + 8 | 8 | 8 | test_inserts.storage_1 + 9 | 9 | 9 | test_inserts.storage_1 + 10 | 10 | 10 | test_inserts.storage_1 + 11 | 11 | 11 | test_inserts.storage_2 + 12 | 12 | 12 | test_inserts.storage_2 + 13 | 13 | 13 | test_inserts.storage_2 + 14 | 14 | 14 | test_inserts.storage_2 + 15 | 15 | 15 | test_inserts.storage_2 + 16 | 16 | 16 | test_inserts.storage_2 + 17 | 17 | 17 | test_inserts.storage_2 + 18 | 18 | 18 | test_inserts.storage_2 + 19 | 19 | 19 | test_inserts.storage_2 + 20 | 20 | 20 | test_inserts.storage_2 + 21 | 21 | 21 | test_inserts.storage_3 + 22 | 22 | 22 | test_inserts.storage_3 + 23 | 23 | 23 | test_inserts.storage_3 + 24 | 24 | 24 | test_inserts.storage_3 + 25 | 25 | 25 | test_inserts.storage_3 + 26 | 26 | 26 | test_inserts.storage_3 + 27 | 27 | 27 | test_inserts.storage_3 + 28 | 28 | 28 | test_inserts.storage_3 + 29 | 29 | 29 | test_inserts.storage_3 + 30 | 30 | 30 | test_inserts.storage_3 + 31 | 31 | 31 | test_inserts.storage_4 + 32 | 32 | 32 | test_inserts.storage_4 + 33 | 33 | 33 | test_inserts.storage_4 + 34 | 34 | 34 | test_inserts.storage_4 + 35 | 35 | 35 | test_inserts.storage_4 + 36 | 36 | 36 | test_inserts.storage_4 + 37 | 37 | 37 | test_inserts.storage_4 + 38 | 38 | 38 | test_inserts.storage_4 + 39 | 39 | 39 | test_inserts.storage_4 + 40 | 40 | 40 | test_inserts.storage_4 + 41 | 41 | 41 | test_inserts.storage_5 + 42 | 42 | 42 | test_inserts.storage_5 + 43 | 43 | 43 | test_inserts.storage_5 + 44 | 44 | 44 | test_inserts.storage_5 + 45 | 45 | 45 | test_inserts.storage_5 + 46 | 46 | 46 | test_inserts.storage_5 + 47 | 47 | 47 | test_inserts.storage_5 + 48 | 48 | 48 | test_inserts.storage_5 + 49 | 49 | 49 | test_inserts.storage_5 + 50 | 50 | 50 | test_inserts.storage_5 + 51 | 51 | 51 | test_inserts.storage_6 + 52 | 52 | 52 | test_inserts.storage_6 + 53 | 53 | 53 | test_inserts.storage_6 + 54 | 54 | 54 | test_inserts.storage_6 + 55 | 55 | 55 | test_inserts.storage_6 + 56 | 56 | 56 | test_inserts.storage_6 + 57 | 57 | 57 | test_inserts.storage_6 + 58 | 58 | 58 | test_inserts.storage_6 + 59 | 59 | 59 | test_inserts.storage_6 + 60 | 60 | 60 | test_inserts.storage_6 + 61 | 61 | 61 | test_inserts.storage_7 + 62 | 62 | 62 | test_inserts.storage_7 + 63 | 63 | 63 | test_inserts.storage_7 + 64 | 64 | 64 | test_inserts.storage_7 + 65 | 65 | 65 | test_inserts.storage_7 + 66 | 66 | 66 | test_inserts.storage_7 + 67 | 67 | 67 | test_inserts.storage_7 + 68 | 68 | 68 | test_inserts.storage_7 + 69 | 69 | 69 | test_inserts.storage_7 + 70 | 70 | 70 | test_inserts.storage_7 + 71 | 71 | 71 | test_inserts.storage_8 + 72 | 72 | 72 | test_inserts.storage_8 + 73 | 73 | 73 | test_inserts.storage_8 + 74 | 74 | 74 | test_inserts.storage_8 + 75 | 75 | 75 | test_inserts.storage_8 + 76 | 76 | 76 | test_inserts.storage_8 + 77 | 77 | 77 | test_inserts.storage_8 + 78 | 78 | 78 | test_inserts.storage_8 + 79 | 79 | 79 | test_inserts.storage_8 + 80 | 80 | 80 | test_inserts.storage_8 + 81 | 81 | 81 | test_inserts.storage_9 + 82 | 82 | 82 | test_inserts.storage_9 + 83 | 83 | 83 | test_inserts.storage_9 + 84 | 84 | 84 | test_inserts.storage_9 + 85 | 85 | 85 | test_inserts.storage_9 + 86 | 86 | 86 | test_inserts.storage_9 + 87 | 87 | 87 | test_inserts.storage_9 + 88 | 88 | 88 | test_inserts.storage_9 + 89 | 89 | 89 | test_inserts.storage_9 + 90 | 90 | 90 | test_inserts.storage_9 + 91 | 91 | 91 | test_inserts.storage_10 + 92 | 92 | 92 | test_inserts.storage_10 + 93 | 93 | 93 | test_inserts.storage_10 + 94 | 94 | 94 | test_inserts.storage_10 + 95 | 95 | 95 | test_inserts.storage_10 + 96 | 96 | 96 | test_inserts.storage_10 + 97 | 97 | 97 | test_inserts.storage_10 + 98 | 98 | 98 | test_inserts.storage_10 + 99 | 99 | 99 | test_inserts.storage_10 + 100 | 100 | 100 | test_inserts.storage_10 + 101 | 101 | 101 | test_inserts.storage_12 + 102 | 102 | 102 | test_inserts.storage_12 + 103 | 103 | 103 | test_inserts.storage_12 + 104 | 104 | 104 | test_inserts.storage_12 + 105 | 105 | 105 | test_inserts.storage_12 + 106 | 106 | 106 | test_inserts.storage_12 + 107 | 107 | 107 | test_inserts.storage_12 + 108 | 108 | 108 | test_inserts.storage_12 + 109 | 109 | 109 | test_inserts.storage_12 + 110 | 110 | 110 | test_inserts.storage_12 + 111 | 111 | 111 | test_inserts.storage_13 + 112 | 112 | 112 | test_inserts.storage_13 + 113 | 113 | 113 | test_inserts.storage_13 + 114 | 114 | 114 | test_inserts.storage_13 + 115 | 115 | 115 | test_inserts.storage_13 + 116 | 116 | 116 | test_inserts.storage_13 + 117 | 117 | 117 | test_inserts.storage_13 + 118 | 118 | 118 | test_inserts.storage_13 + 119 | 119 | 119 | test_inserts.storage_13 + 120 | 120 | 120 | test_inserts.storage_13 +(123 rows) + +/* drop data */ +TRUNCATE test_inserts.storage; +/* now test RETURNING list using our new column 'e' */ +INSERT INTO test_inserts.storage (b, d, e) SELECT i, i, i +FROM generate_series(-2, 130, 5) i +RETURNING e * 2, b, tableoid::regclass; +NOTICE: BEFORE INSERTION TRIGGER ON TABLE storage_11 HAS EXPIRED. INSERTED ROW: (-2,-2,-2) +NOTICE: BEFORE INSERTION TRIGGER ON TABLE storage_1 HAS EXPIRED. INSERTED ROW: (3,3,3) +NOTICE: BEFORE INSERTION TRIGGER ON TABLE storage_1 HAS EXPIRED. INSERTED ROW: (8,8,8) +NOTICE: BEFORE INSERTION TRIGGER ON TABLE storage_12 HAS EXPIRED. INSERTED ROW: (103,103,103) +NOTICE: BEFORE INSERTION TRIGGER ON TABLE storage_12 HAS EXPIRED. INSERTED ROW: (108,108,108) +NOTICE: BEFORE INSERTION TRIGGER ON TABLE storage_13 HAS EXPIRED. INSERTED ROW: (113,113,113) +NOTICE: BEFORE INSERTION TRIGGER ON TABLE storage_13 HAS EXPIRED. INSERTED ROW: (118,118,118) +NOTICE: BEFORE INSERTION TRIGGER ON TABLE storage_14 HAS EXPIRED. INSERTED ROW: (123,123,123) +NOTICE: BEFORE INSERTION TRIGGER ON TABLE storage_14 HAS EXPIRED. INSERTED ROW: (128,128,128) +NOTICE: AFTER INSERTION TRIGGER ON TABLE storage_11 HAS EXPIRED. INSERTED ROW: (-2,-2,-2) +NOTICE: AFTER INSERTION TRIGGER ON TABLE storage_1 HAS EXPIRED. INSERTED ROW: (3,3,3) +NOTICE: AFTER INSERTION TRIGGER ON TABLE storage_1 HAS EXPIRED. INSERTED ROW: (8,8,8) +NOTICE: AFTER INSERTION TRIGGER ON TABLE storage_12 HAS EXPIRED. INSERTED ROW: (103,103,103) +NOTICE: AFTER INSERTION TRIGGER ON TABLE storage_12 HAS EXPIRED. INSERTED ROW: (108,108,108) +NOTICE: AFTER INSERTION TRIGGER ON TABLE storage_13 HAS EXPIRED. INSERTED ROW: (113,113,113) +NOTICE: AFTER INSERTION TRIGGER ON TABLE storage_13 HAS EXPIRED. INSERTED ROW: (118,118,118) +NOTICE: AFTER INSERTION TRIGGER ON TABLE storage_14 HAS EXPIRED. INSERTED ROW: (123,123,123) +NOTICE: AFTER INSERTION TRIGGER ON TABLE storage_14 HAS EXPIRED. INSERTED ROW: (128,128,128) + ?column? | b | tableoid +----------+-----+------------------------- + -4 | -2 | test_inserts.storage_11 + 6 | 3 | test_inserts.storage_1 + 16 | 8 | test_inserts.storage_1 + 26 | 13 | test_inserts.storage_2 + 36 | 18 | test_inserts.storage_2 + 46 | 23 | test_inserts.storage_3 + 56 | 28 | test_inserts.storage_3 + 66 | 33 | test_inserts.storage_4 + 76 | 38 | test_inserts.storage_4 + 86 | 43 | test_inserts.storage_5 + 96 | 48 | test_inserts.storage_5 + 106 | 53 | test_inserts.storage_6 + 116 | 58 | test_inserts.storage_6 + 126 | 63 | test_inserts.storage_7 + 136 | 68 | test_inserts.storage_7 + 146 | 73 | test_inserts.storage_8 + 156 | 78 | test_inserts.storage_8 + 166 | 83 | test_inserts.storage_9 + 176 | 88 | test_inserts.storage_9 + 186 | 93 | test_inserts.storage_10 + 196 | 98 | test_inserts.storage_10 + 206 | 103 | test_inserts.storage_12 + 216 | 108 | test_inserts.storage_12 + 226 | 113 | test_inserts.storage_13 + 236 | 118 | test_inserts.storage_13 + 246 | 123 | test_inserts.storage_14 + 256 | 128 | test_inserts.storage_14 +(27 rows) + +/* test EXPLAIN (VERBOSE) - for PartitionFilter's targetlists */ +EXPLAIN (VERBOSE, COSTS OFF) +INSERT INTO test_inserts.storage (b, d, e) SELECT i, i, i +FROM generate_series(1, 10) i +RETURNING e * 2, b, tableoid::regclass; + QUERY PLAN +------------------------------------------------------------------------------- + Insert on test_inserts.storage + Output: (storage.e * 2), storage.b, (storage.tableoid)::regclass + -> Custom Scan (PartitionFilter) + Output: NULL::integer, storage.b, NULL::integer, storage.d, storage.e + -> Function Scan on pg_catalog.generate_series i + Output: NULL::integer, i, NULL::integer, i, i + Function Call: generate_series(1, 10) +(7 rows) + +EXPLAIN (VERBOSE, COSTS OFF) +INSERT INTO test_inserts.storage (d, e) SELECT i, i +FROM generate_series(1, 10) i; + QUERY PLAN +----------------------------------------------------------------------------------- + Insert on test_inserts.storage + -> Custom Scan (PartitionFilter) + Output: NULL::integer, NULL::integer, NULL::integer, storage.d, storage.e + -> Function Scan on pg_catalog.generate_series i + Output: NULL::integer, NULL::integer, NULL::integer, i, i + Function Call: generate_series(1, 10) +(6 rows) + +EXPLAIN (VERBOSE, COSTS OFF) +INSERT INTO test_inserts.storage (b) SELECT i +FROM generate_series(1, 10) i; + QUERY PLAN +----------------------------------------------------------------------------------- + Insert on test_inserts.storage + -> Custom Scan (PartitionFilter) + Output: NULL::integer, storage.b, NULL::integer, NULL::text, NULL::bigint + -> Function Scan on pg_catalog.generate_series i + Output: NULL::integer, i, NULL::integer, NULL::text, NULL::bigint + Function Call: generate_series(1, 10) +(6 rows) + +EXPLAIN (VERBOSE, COSTS OFF) +INSERT INTO test_inserts.storage (b, d, e) SELECT b, d, e +FROM test_inserts.storage; + QUERY PLAN +------------------------------------------------------------------------------- + Insert on test_inserts.storage + -> Custom Scan (PartitionFilter) + Output: NULL::integer, storage.b, NULL::integer, storage.d, storage.e + -> Result + Output: NULL::integer, b, NULL::integer, d, e + -> Append + -> Seq Scan on test_inserts.storage_11 storage + Output: b, d, e + -> Seq Scan on test_inserts.storage_1 storage + Output: b, d, e + -> Seq Scan on test_inserts.storage_2 storage + Output: b, d, e + -> Seq Scan on test_inserts.storage_3 storage + Output: b, d, e + -> Seq Scan on test_inserts.storage_4 storage + Output: b, d, e + -> Seq Scan on test_inserts.storage_5 storage + Output: b, d, e + -> Seq Scan on test_inserts.storage_6 storage + Output: b, d, e + -> Seq Scan on test_inserts.storage_7 storage + Output: b, d, e + -> Seq Scan on test_inserts.storage_8 storage + Output: b, d, e + -> Seq Scan on test_inserts.storage_9 storage + Output: b, d, e + -> Seq Scan on test_inserts.storage_10 storage + Output: b, d, e + -> Seq Scan on test_inserts.storage_12 storage + Output: b, d, e + -> Seq Scan on test_inserts.storage_13 storage + Output: b, d, e + -> Seq Scan on test_inserts.storage_14 storage + Output: b, d, e +(34 rows) + +EXPLAIN (VERBOSE, COSTS OFF) +INSERT INTO test_inserts.storage (b, d) SELECT b, d +FROM test_inserts.storage; + QUERY PLAN +---------------------------------------------------------------------------------- + Insert on test_inserts.storage + -> Custom Scan (PartitionFilter) + Output: NULL::integer, storage.b, NULL::integer, storage.d, NULL::bigint + -> Result + Output: NULL::integer, b, NULL::integer, d, NULL::bigint + -> Append + -> Seq Scan on test_inserts.storage_11 storage + Output: b, d + -> Seq Scan on test_inserts.storage_1 storage + Output: b, d + -> Seq Scan on test_inserts.storage_2 storage + Output: b, d + -> Seq Scan on test_inserts.storage_3 storage + Output: b, d + -> Seq Scan on test_inserts.storage_4 storage + Output: b, d + -> Seq Scan on test_inserts.storage_5 storage + Output: b, d + -> Seq Scan on test_inserts.storage_6 storage + Output: b, d + -> Seq Scan on test_inserts.storage_7 storage + Output: b, d + -> Seq Scan on test_inserts.storage_8 storage + Output: b, d + -> Seq Scan on test_inserts.storage_9 storage + Output: b, d + -> Seq Scan on test_inserts.storage_10 storage + Output: b, d + -> Seq Scan on test_inserts.storage_12 storage + Output: b, d + -> Seq Scan on test_inserts.storage_13 storage + Output: b, d + -> Seq Scan on test_inserts.storage_14 storage + Output: b, d +(34 rows) + +EXPLAIN (VERBOSE, COSTS OFF) +INSERT INTO test_inserts.storage (b) SELECT b +FROM test_inserts.storage; + QUERY PLAN +----------------------------------------------------------------------------------- + Insert on test_inserts.storage + -> Custom Scan (PartitionFilter) + Output: NULL::integer, storage.b, NULL::integer, NULL::text, NULL::bigint + -> Result + Output: NULL::integer, b, NULL::integer, NULL::text, NULL::bigint + -> Append + -> Seq Scan on test_inserts.storage_11 storage + Output: b + -> Seq Scan on test_inserts.storage_1 storage + Output: b + -> Seq Scan on test_inserts.storage_2 storage + Output: b + -> Seq Scan on test_inserts.storage_3 storage + Output: b + -> Seq Scan on test_inserts.storage_4 storage + Output: b + -> Seq Scan on test_inserts.storage_5 storage + Output: b + -> Seq Scan on test_inserts.storage_6 storage + Output: b + -> Seq Scan on test_inserts.storage_7 storage + Output: b + -> Seq Scan on test_inserts.storage_8 storage + Output: b + -> Seq Scan on test_inserts.storage_9 storage + Output: b + -> Seq Scan on test_inserts.storage_10 storage + Output: b + -> Seq Scan on test_inserts.storage_12 storage + Output: b + -> Seq Scan on test_inserts.storage_13 storage + Output: b + -> Seq Scan on test_inserts.storage_14 storage + Output: b +(34 rows) + +/* test gap case (missing partition in between) */ +CREATE TABLE test_inserts.test_gap(val INT NOT NULL); +INSERT INTO test_inserts.test_gap SELECT generate_series(1, 30); +SELECT create_range_partitions('test_inserts.test_gap', 'val', 1, 10); + create_range_partitions +------------------------- + 3 +(1 row) + +DROP TABLE test_inserts.test_gap_2; /* make a gap */ +INSERT INTO test_inserts.test_gap VALUES(15); /* not ok */ +ERROR: cannot spawn a partition +DROP TABLE test_inserts.test_gap CASCADE; +NOTICE: drop cascades to 3 other objects +/* test a few "special" ONLY queries used in pg_repack */ +CREATE TABLE test_inserts.test_special_only(val INT NOT NULL); +INSERT INTO test_inserts.test_special_only SELECT generate_series(1, 30); +SELECT create_hash_partitions('test_inserts.test_special_only', 'val', 4); + create_hash_partitions +------------------------ + 4 +(1 row) + +/* create table as select only */ +CREATE TABLE test_inserts.special_1 AS SELECT * FROM ONLY test_inserts.test_special_only; +SELECT count(*) FROM test_inserts.special_1; + count +------- + 0 +(1 row) + +DROP TABLE test_inserts.special_1; +/* insert into ... select only */ +CREATE TABLE test_inserts.special_2 AS SELECT * FROM ONLY test_inserts.test_special_only WITH NO DATA; +INSERT INTO test_inserts.special_2 SELECT * FROM ONLY test_inserts.test_special_only; +SELECT count(*) FROM test_inserts.special_2; + count +------- + 0 +(1 row) + +DROP TABLE test_inserts.special_2; +DROP TABLE test_inserts.test_special_only CASCADE; +NOTICE: drop cascades to 4 other objects +DROP TABLE test_inserts.storage CASCADE; +NOTICE: drop cascades to 15 other objects +DROP FUNCTION test_inserts.set_triggers(jsonb); +DROP FUNCTION test_inserts.print_cols_before_change(); +DROP FUNCTION test_inserts.print_cols_after_change(); +DROP SCHEMA test_inserts; +DROP EXTENSION pg_pathman CASCADE; diff --git a/expected/pathman_inserts_2.out b/expected/pathman_inserts_2.out new file mode 100644 index 00000000..3c31fc53 --- /dev/null +++ b/expected/pathman_inserts_2.out @@ -0,0 +1,1075 @@ +/* + * Since 55a1954da16 and 6ef77cf46e8 (>= 13) output of EXPLAIN was changed, + * now it includes aliases for inherited tables. + */ +\set VERBOSITY terse +SET search_path = 'public'; +CREATE EXTENSION pg_pathman; +CREATE SCHEMA test_inserts; +/* create a partitioned table */ +CREATE TABLE test_inserts.storage(a INT4, b INT4 NOT NULL, c NUMERIC, d TEXT); +INSERT INTO test_inserts.storage SELECT i * 2, i, i, i::text FROM generate_series(1, 100) i; +CREATE UNIQUE INDEX ON test_inserts.storage(a); +SELECT create_range_partitions('test_inserts.storage', 'b', 1, 10); + create_range_partitions +------------------------- + 10 +(1 row) + +/* attach before and after insertion triggers to partitioned table */ +CREATE OR REPLACE FUNCTION test_inserts.print_cols_before_change() RETURNS TRIGGER AS $$ +BEGIN + RAISE NOTICE 'BEFORE INSERTION TRIGGER ON TABLE % HAS EXPIRED. INSERTED ROW: %', tg_table_name, new; + RETURN new; +END; +$$ LANGUAGE plpgsql; +CREATE OR REPLACE FUNCTION test_inserts.print_cols_after_change() RETURNS TRIGGER AS $$ +BEGIN + RAISE NOTICE 'AFTER INSERTION TRIGGER ON TABLE % HAS EXPIRED. INSERTED ROW: %', tg_table_name, new; + RETURN new; +END; +$$ LANGUAGE plpgsql; +/* set triggers on existing first partition and new generated partitions */ +CREATE TRIGGER print_new_row_before_insert BEFORE INSERT ON test_inserts.storage_1 + FOR EACH ROW EXECUTE PROCEDURE test_inserts.print_cols_before_change(); +CREATE TRIGGER print_new_row_after_insert AFTER INSERT ON test_inserts.storage_1 + FOR EACH ROW EXECUTE PROCEDURE test_inserts.print_cols_after_change(); +/* set partition init callback that will add triggers to partitions */ +CREATE OR REPLACE FUNCTION test_inserts.set_triggers(args jsonb) RETURNS VOID AS $$ +BEGIN + EXECUTE format('create trigger print_new_row_before_insert before insert on %s.%s + for each row execute procedure test_inserts.print_cols_before_change();', + args->>'partition_schema', args->>'partition'); + EXECUTE format('create trigger print_new_row_after_insert after insert on %s.%s + for each row execute procedure test_inserts.print_cols_after_change();', + args->>'partition_schema', args->>'partition'); +END; +$$ LANGUAGE plpgsql; +SELECT set_init_callback('test_inserts.storage', 'test_inserts.set_triggers(jsonb)'); + set_init_callback +------------------- + +(1 row) + +/* we don't support ON CONLICT */ +INSERT INTO test_inserts.storage VALUES(0, 0, 0, 'UNSUPPORTED_1') +ON CONFLICT (a) DO UPDATE SET a = 3; +ERROR: ON CONFLICT clause is not supported with partitioned tables +INSERT INTO test_inserts.storage VALUES(0, 0, 0, 'UNSUPPORTED_2') +ON CONFLICT (a) DO NOTHING; +ERROR: ON CONFLICT clause is not supported with partitioned tables +/* implicitly prepend a partition (no columns have been dropped yet) */ +INSERT INTO test_inserts.storage VALUES(0, 0, 0, 'PREPEND.') RETURNING *; +NOTICE: BEFORE INSERTION TRIGGER ON TABLE storage_11 HAS EXPIRED. INSERTED ROW: (0,0,0,PREPEND.) +NOTICE: AFTER INSERTION TRIGGER ON TABLE storage_11 HAS EXPIRED. INSERTED ROW: (0,0,0,PREPEND.) + a | b | c | d +---+---+---+---------- + 0 | 0 | 0 | PREPEND. +(1 row) + +SELECT * FROM test_inserts.storage_11; + a | b | c | d +---+---+---+---------- + 0 | 0 | 0 | PREPEND. +(1 row) + +INSERT INTO test_inserts.storage VALUES(1, 0, 0, 'PREPEND..') RETURNING tableoid::regclass; +NOTICE: BEFORE INSERTION TRIGGER ON TABLE storage_11 HAS EXPIRED. INSERTED ROW: (1,0,0,PREPEND..) +NOTICE: AFTER INSERTION TRIGGER ON TABLE storage_11 HAS EXPIRED. INSERTED ROW: (1,0,0,PREPEND..) + tableoid +------------------------- + test_inserts.storage_11 +(1 row) + +SELECT * FROM test_inserts.storage_11; + a | b | c | d +---+---+---+----------- + 0 | 0 | 0 | PREPEND. + 1 | 0 | 0 | PREPEND.. +(2 rows) + +INSERT INTO test_inserts.storage VALUES(3, 0, 0, 'PREPEND...') RETURNING a + b / 3; +NOTICE: BEFORE INSERTION TRIGGER ON TABLE storage_11 HAS EXPIRED. INSERTED ROW: (3,0,0,PREPEND...) +NOTICE: AFTER INSERTION TRIGGER ON TABLE storage_11 HAS EXPIRED. INSERTED ROW: (3,0,0,PREPEND...) + ?column? +---------- + 3 +(1 row) + +SELECT * FROM test_inserts.storage_11; + a | b | c | d +---+---+---+------------ + 0 | 0 | 0 | PREPEND. + 1 | 0 | 0 | PREPEND.. + 3 | 0 | 0 | PREPEND... +(3 rows) + +/* cause an unique index conflict (a = 0) */ +INSERT INTO test_inserts.storage VALUES(0, 0, 0, 'CONFLICT') RETURNING *; +NOTICE: BEFORE INSERTION TRIGGER ON TABLE storage_11 HAS EXPIRED. INSERTED ROW: (0,0,0,CONFLICT) +ERROR: duplicate key value violates unique constraint "storage_11_a_idx" +/* drop first column */ +ALTER TABLE test_inserts.storage DROP COLUMN a CASCADE; +/* will have 3 columns (b, c, d) */ +SELECT append_range_partition('test_inserts.storage'); + append_range_partition +------------------------- + test_inserts.storage_12 +(1 row) + +INSERT INTO test_inserts.storage (b, c, d) VALUES (101, 17, '3 cols!'); +NOTICE: BEFORE INSERTION TRIGGER ON TABLE storage_12 HAS EXPIRED. INSERTED ROW: (101,17,"3 cols!") +NOTICE: AFTER INSERTION TRIGGER ON TABLE storage_12 HAS EXPIRED. INSERTED ROW: (101,17,"3 cols!") +SELECT * FROM test_inserts.storage_12; /* direct access */ + b | c | d +-----+----+--------- + 101 | 17 | 3 cols! +(1 row) + +SELECT * FROM test_inserts.storage WHERE b > 100; /* via parent */ + b | c | d +-----+----+--------- + 101 | 17 | 3 cols! +(1 row) + +/* spawn a new partition (b, c, d) */ +INSERT INTO test_inserts.storage (b, c, d) VALUES (111, 17, '3 cols as well!'); +NOTICE: BEFORE INSERTION TRIGGER ON TABLE storage_13 HAS EXPIRED. INSERTED ROW: (111,17,"3 cols as well!") +NOTICE: AFTER INSERTION TRIGGER ON TABLE storage_13 HAS EXPIRED. INSERTED ROW: (111,17,"3 cols as well!") +SELECT * FROM test_inserts.storage_13; /* direct access */ + b | c | d +-----+----+----------------- + 111 | 17 | 3 cols as well! +(1 row) + +SELECT * FROM test_inserts.storage WHERE b > 110; /* via parent */ + b | c | d +-----+----+----------------- + 111 | 17 | 3 cols as well! +(1 row) + +/* column 'a' has been dropped */ +INSERT INTO test_inserts.storage VALUES(111, 0, 'DROP_COL_1.') RETURNING *, 17; +NOTICE: BEFORE INSERTION TRIGGER ON TABLE storage_13 HAS EXPIRED. INSERTED ROW: (111,0,DROP_COL_1.) +NOTICE: AFTER INSERTION TRIGGER ON TABLE storage_13 HAS EXPIRED. INSERTED ROW: (111,0,DROP_COL_1.) + b | c | d | ?column? +-----+---+-------------+---------- + 111 | 0 | DROP_COL_1. | 17 +(1 row) + +INSERT INTO test_inserts.storage VALUES(111, 0, 'DROP_COL_1..') RETURNING tableoid::regclass; +NOTICE: BEFORE INSERTION TRIGGER ON TABLE storage_13 HAS EXPIRED. INSERTED ROW: (111,0,DROP_COL_1..) +NOTICE: AFTER INSERTION TRIGGER ON TABLE storage_13 HAS EXPIRED. INSERTED ROW: (111,0,DROP_COL_1..) + tableoid +------------------------- + test_inserts.storage_13 +(1 row) + +INSERT INTO test_inserts.storage VALUES(111, 0, 'DROP_COL_1...') RETURNING b * 2, b; +NOTICE: BEFORE INSERTION TRIGGER ON TABLE storage_13 HAS EXPIRED. INSERTED ROW: (111,0,DROP_COL_1...) +NOTICE: AFTER INSERTION TRIGGER ON TABLE storage_13 HAS EXPIRED. INSERTED ROW: (111,0,DROP_COL_1...) + ?column? | b +----------+----- + 222 | 111 +(1 row) + +/* drop third column */ +ALTER TABLE test_inserts.storage DROP COLUMN c CASCADE; +/* will have 2 columns (b, d) */ +SELECT append_range_partition('test_inserts.storage'); + append_range_partition +------------------------- + test_inserts.storage_14 +(1 row) + +INSERT INTO test_inserts.storage (b, d) VALUES (121, '2 cols!'); +NOTICE: BEFORE INSERTION TRIGGER ON TABLE storage_14 HAS EXPIRED. INSERTED ROW: (121,"2 cols!") +NOTICE: AFTER INSERTION TRIGGER ON TABLE storage_14 HAS EXPIRED. INSERTED ROW: (121,"2 cols!") +SELECT * FROM test_inserts.storage_14; /* direct access */ + b | d +-----+--------- + 121 | 2 cols! +(1 row) + +SELECT * FROM test_inserts.storage WHERE b > 120; /* via parent */ + b | d +-----+--------- + 121 | 2 cols! +(1 row) + +/* column 'c' has been dropped */ +INSERT INTO test_inserts.storage VALUES(121, 'DROP_COL_2.') RETURNING *; +NOTICE: BEFORE INSERTION TRIGGER ON TABLE storage_14 HAS EXPIRED. INSERTED ROW: (121,DROP_COL_2.) +NOTICE: AFTER INSERTION TRIGGER ON TABLE storage_14 HAS EXPIRED. INSERTED ROW: (121,DROP_COL_2.) + b | d +-----+------------- + 121 | DROP_COL_2. +(1 row) + +INSERT INTO test_inserts.storage VALUES(121, 'DROP_COL_2..') RETURNING tableoid::regclass; +NOTICE: BEFORE INSERTION TRIGGER ON TABLE storage_14 HAS EXPIRED. INSERTED ROW: (121,DROP_COL_2..) +NOTICE: AFTER INSERTION TRIGGER ON TABLE storage_14 HAS EXPIRED. INSERTED ROW: (121,DROP_COL_2..) + tableoid +------------------------- + test_inserts.storage_14 +(1 row) + +INSERT INTO test_inserts.storage VALUES(121, 'DROP_COL_2...') RETURNING d || '0_0', b * 3; +NOTICE: BEFORE INSERTION TRIGGER ON TABLE storage_14 HAS EXPIRED. INSERTED ROW: (121,DROP_COL_2...) +NOTICE: AFTER INSERTION TRIGGER ON TABLE storage_14 HAS EXPIRED. INSERTED ROW: (121,DROP_COL_2...) + ?column? | ?column? +------------------+---------- + DROP_COL_2...0_0 | 363 +(1 row) + +INSERT INTO test_inserts.storage VALUES(121, 'query_1') +RETURNING (SELECT 1); +NOTICE: BEFORE INSERTION TRIGGER ON TABLE storage_14 HAS EXPIRED. INSERTED ROW: (121,query_1) +NOTICE: AFTER INSERTION TRIGGER ON TABLE storage_14 HAS EXPIRED. INSERTED ROW: (121,query_1) + ?column? +---------- + 1 +(1 row) + +INSERT INTO test_inserts.storage VALUES(121, 'query_2') +RETURNING (SELECT generate_series(1, 10) LIMIT 1); +NOTICE: BEFORE INSERTION TRIGGER ON TABLE storage_14 HAS EXPIRED. INSERTED ROW: (121,query_2) +NOTICE: AFTER INSERTION TRIGGER ON TABLE storage_14 HAS EXPIRED. INSERTED ROW: (121,query_2) + generate_series +----------------- + 1 +(1 row) + +INSERT INTO test_inserts.storage VALUES(121, 'query_3') +RETURNING (SELECT get_partition_key('test_inserts.storage')); +NOTICE: BEFORE INSERTION TRIGGER ON TABLE storage_14 HAS EXPIRED. INSERTED ROW: (121,query_3) +NOTICE: AFTER INSERTION TRIGGER ON TABLE storage_14 HAS EXPIRED. INSERTED ROW: (121,query_3) + get_partition_key +------------------- + b +(1 row) + +INSERT INTO test_inserts.storage VALUES(121, 'query_4') +RETURNING 1, 2, 3, 4; +NOTICE: BEFORE INSERTION TRIGGER ON TABLE storage_14 HAS EXPIRED. INSERTED ROW: (121,query_4) +NOTICE: AFTER INSERTION TRIGGER ON TABLE storage_14 HAS EXPIRED. INSERTED ROW: (121,query_4) + ?column? | ?column? | ?column? | ?column? +----------+----------+----------+---------- + 1 | 2 | 3 | 4 +(1 row) + +/* show number of columns in each partition */ +SELECT partition, range_min, range_max, count(partition) +FROM pathman_partition_list JOIN pg_attribute ON partition = attrelid +WHERE attnum > 0 +GROUP BY partition, range_min, range_max +ORDER BY range_min::INT4; + partition | range_min | range_max | count +-------------------------+-----------+-----------+------- + test_inserts.storage_11 | -9 | 1 | 4 + test_inserts.storage_1 | 1 | 11 | 4 + test_inserts.storage_2 | 11 | 21 | 4 + test_inserts.storage_3 | 21 | 31 | 4 + test_inserts.storage_4 | 31 | 41 | 4 + test_inserts.storage_5 | 41 | 51 | 4 + test_inserts.storage_6 | 51 | 61 | 4 + test_inserts.storage_7 | 61 | 71 | 4 + test_inserts.storage_8 | 71 | 81 | 4 + test_inserts.storage_9 | 81 | 91 | 4 + test_inserts.storage_10 | 91 | 101 | 4 + test_inserts.storage_12 | 101 | 111 | 3 + test_inserts.storage_13 | 111 | 121 | 3 + test_inserts.storage_14 | 121 | 131 | 2 +(14 rows) + +/* check the data */ +SELECT *, tableoid::regclass FROM test_inserts.storage ORDER BY b, d; + b | d | tableoid +-----+-----------------+------------------------- + 0 | PREPEND. | test_inserts.storage_11 + 0 | PREPEND.. | test_inserts.storage_11 + 0 | PREPEND... | test_inserts.storage_11 + 1 | 1 | test_inserts.storage_1 + 2 | 2 | test_inserts.storage_1 + 3 | 3 | test_inserts.storage_1 + 4 | 4 | test_inserts.storage_1 + 5 | 5 | test_inserts.storage_1 + 6 | 6 | test_inserts.storage_1 + 7 | 7 | test_inserts.storage_1 + 8 | 8 | test_inserts.storage_1 + 9 | 9 | test_inserts.storage_1 + 10 | 10 | test_inserts.storage_1 + 11 | 11 | test_inserts.storage_2 + 12 | 12 | test_inserts.storage_2 + 13 | 13 | test_inserts.storage_2 + 14 | 14 | test_inserts.storage_2 + 15 | 15 | test_inserts.storage_2 + 16 | 16 | test_inserts.storage_2 + 17 | 17 | test_inserts.storage_2 + 18 | 18 | test_inserts.storage_2 + 19 | 19 | test_inserts.storage_2 + 20 | 20 | test_inserts.storage_2 + 21 | 21 | test_inserts.storage_3 + 22 | 22 | test_inserts.storage_3 + 23 | 23 | test_inserts.storage_3 + 24 | 24 | test_inserts.storage_3 + 25 | 25 | test_inserts.storage_3 + 26 | 26 | test_inserts.storage_3 + 27 | 27 | test_inserts.storage_3 + 28 | 28 | test_inserts.storage_3 + 29 | 29 | test_inserts.storage_3 + 30 | 30 | test_inserts.storage_3 + 31 | 31 | test_inserts.storage_4 + 32 | 32 | test_inserts.storage_4 + 33 | 33 | test_inserts.storage_4 + 34 | 34 | test_inserts.storage_4 + 35 | 35 | test_inserts.storage_4 + 36 | 36 | test_inserts.storage_4 + 37 | 37 | test_inserts.storage_4 + 38 | 38 | test_inserts.storage_4 + 39 | 39 | test_inserts.storage_4 + 40 | 40 | test_inserts.storage_4 + 41 | 41 | test_inserts.storage_5 + 42 | 42 | test_inserts.storage_5 + 43 | 43 | test_inserts.storage_5 + 44 | 44 | test_inserts.storage_5 + 45 | 45 | test_inserts.storage_5 + 46 | 46 | test_inserts.storage_5 + 47 | 47 | test_inserts.storage_5 + 48 | 48 | test_inserts.storage_5 + 49 | 49 | test_inserts.storage_5 + 50 | 50 | test_inserts.storage_5 + 51 | 51 | test_inserts.storage_6 + 52 | 52 | test_inserts.storage_6 + 53 | 53 | test_inserts.storage_6 + 54 | 54 | test_inserts.storage_6 + 55 | 55 | test_inserts.storage_6 + 56 | 56 | test_inserts.storage_6 + 57 | 57 | test_inserts.storage_6 + 58 | 58 | test_inserts.storage_6 + 59 | 59 | test_inserts.storage_6 + 60 | 60 | test_inserts.storage_6 + 61 | 61 | test_inserts.storage_7 + 62 | 62 | test_inserts.storage_7 + 63 | 63 | test_inserts.storage_7 + 64 | 64 | test_inserts.storage_7 + 65 | 65 | test_inserts.storage_7 + 66 | 66 | test_inserts.storage_7 + 67 | 67 | test_inserts.storage_7 + 68 | 68 | test_inserts.storage_7 + 69 | 69 | test_inserts.storage_7 + 70 | 70 | test_inserts.storage_7 + 71 | 71 | test_inserts.storage_8 + 72 | 72 | test_inserts.storage_8 + 73 | 73 | test_inserts.storage_8 + 74 | 74 | test_inserts.storage_8 + 75 | 75 | test_inserts.storage_8 + 76 | 76 | test_inserts.storage_8 + 77 | 77 | test_inserts.storage_8 + 78 | 78 | test_inserts.storage_8 + 79 | 79 | test_inserts.storage_8 + 80 | 80 | test_inserts.storage_8 + 81 | 81 | test_inserts.storage_9 + 82 | 82 | test_inserts.storage_9 + 83 | 83 | test_inserts.storage_9 + 84 | 84 | test_inserts.storage_9 + 85 | 85 | test_inserts.storage_9 + 86 | 86 | test_inserts.storage_9 + 87 | 87 | test_inserts.storage_9 + 88 | 88 | test_inserts.storage_9 + 89 | 89 | test_inserts.storage_9 + 90 | 90 | test_inserts.storage_9 + 91 | 91 | test_inserts.storage_10 + 92 | 92 | test_inserts.storage_10 + 93 | 93 | test_inserts.storage_10 + 94 | 94 | test_inserts.storage_10 + 95 | 95 | test_inserts.storage_10 + 96 | 96 | test_inserts.storage_10 + 97 | 97 | test_inserts.storage_10 + 98 | 98 | test_inserts.storage_10 + 99 | 99 | test_inserts.storage_10 + 100 | 100 | test_inserts.storage_10 + 101 | 3 cols! | test_inserts.storage_12 + 111 | 3 cols as well! | test_inserts.storage_13 + 111 | DROP_COL_1. | test_inserts.storage_13 + 111 | DROP_COL_1.. | test_inserts.storage_13 + 111 | DROP_COL_1... | test_inserts.storage_13 + 121 | 2 cols! | test_inserts.storage_14 + 121 | DROP_COL_2. | test_inserts.storage_14 + 121 | DROP_COL_2.. | test_inserts.storage_14 + 121 | DROP_COL_2... | test_inserts.storage_14 + 121 | query_1 | test_inserts.storage_14 + 121 | query_2 | test_inserts.storage_14 + 121 | query_3 | test_inserts.storage_14 + 121 | query_4 | test_inserts.storage_14 +(116 rows) + +/* drop data */ +TRUNCATE test_inserts.storage; +/* one more time! */ +INSERT INTO test_inserts.storage (b, d) SELECT i, i FROM generate_series(-2, 120) i; +NOTICE: BEFORE INSERTION TRIGGER ON TABLE storage_11 HAS EXPIRED. INSERTED ROW: (-2,-2) +NOTICE: BEFORE INSERTION TRIGGER ON TABLE storage_11 HAS EXPIRED. INSERTED ROW: (-1,-1) +NOTICE: BEFORE INSERTION TRIGGER ON TABLE storage_11 HAS EXPIRED. INSERTED ROW: (0,0) +NOTICE: BEFORE INSERTION TRIGGER ON TABLE storage_1 HAS EXPIRED. INSERTED ROW: (1,1) +NOTICE: BEFORE INSERTION TRIGGER ON TABLE storage_1 HAS EXPIRED. INSERTED ROW: (2,2) +NOTICE: BEFORE INSERTION TRIGGER ON TABLE storage_1 HAS EXPIRED. INSERTED ROW: (3,3) +NOTICE: BEFORE INSERTION TRIGGER ON TABLE storage_1 HAS EXPIRED. INSERTED ROW: (4,4) +NOTICE: BEFORE INSERTION TRIGGER ON TABLE storage_1 HAS EXPIRED. INSERTED ROW: (5,5) +NOTICE: BEFORE INSERTION TRIGGER ON TABLE storage_1 HAS EXPIRED. INSERTED ROW: (6,6) +NOTICE: BEFORE INSERTION TRIGGER ON TABLE storage_1 HAS EXPIRED. INSERTED ROW: (7,7) +NOTICE: BEFORE INSERTION TRIGGER ON TABLE storage_1 HAS EXPIRED. INSERTED ROW: (8,8) +NOTICE: BEFORE INSERTION TRIGGER ON TABLE storage_1 HAS EXPIRED. INSERTED ROW: (9,9) +NOTICE: BEFORE INSERTION TRIGGER ON TABLE storage_1 HAS EXPIRED. INSERTED ROW: (10,10) +NOTICE: BEFORE INSERTION TRIGGER ON TABLE storage_12 HAS EXPIRED. INSERTED ROW: (101,101) +NOTICE: BEFORE INSERTION TRIGGER ON TABLE storage_12 HAS EXPIRED. INSERTED ROW: (102,102) +NOTICE: BEFORE INSERTION TRIGGER ON TABLE storage_12 HAS EXPIRED. INSERTED ROW: (103,103) +NOTICE: BEFORE INSERTION TRIGGER ON TABLE storage_12 HAS EXPIRED. INSERTED ROW: (104,104) +NOTICE: BEFORE INSERTION TRIGGER ON TABLE storage_12 HAS EXPIRED. INSERTED ROW: (105,105) +NOTICE: BEFORE INSERTION TRIGGER ON TABLE storage_12 HAS EXPIRED. INSERTED ROW: (106,106) +NOTICE: BEFORE INSERTION TRIGGER ON TABLE storage_12 HAS EXPIRED. INSERTED ROW: (107,107) +NOTICE: BEFORE INSERTION TRIGGER ON TABLE storage_12 HAS EXPIRED. INSERTED ROW: (108,108) +NOTICE: BEFORE INSERTION TRIGGER ON TABLE storage_12 HAS EXPIRED. INSERTED ROW: (109,109) +NOTICE: BEFORE INSERTION TRIGGER ON TABLE storage_12 HAS EXPIRED. INSERTED ROW: (110,110) +NOTICE: BEFORE INSERTION TRIGGER ON TABLE storage_13 HAS EXPIRED. INSERTED ROW: (111,111) +NOTICE: BEFORE INSERTION TRIGGER ON TABLE storage_13 HAS EXPIRED. INSERTED ROW: (112,112) +NOTICE: BEFORE INSERTION TRIGGER ON TABLE storage_13 HAS EXPIRED. INSERTED ROW: (113,113) +NOTICE: BEFORE INSERTION TRIGGER ON TABLE storage_13 HAS EXPIRED. INSERTED ROW: (114,114) +NOTICE: BEFORE INSERTION TRIGGER ON TABLE storage_13 HAS EXPIRED. INSERTED ROW: (115,115) +NOTICE: BEFORE INSERTION TRIGGER ON TABLE storage_13 HAS EXPIRED. INSERTED ROW: (116,116) +NOTICE: BEFORE INSERTION TRIGGER ON TABLE storage_13 HAS EXPIRED. INSERTED ROW: (117,117) +NOTICE: BEFORE INSERTION TRIGGER ON TABLE storage_13 HAS EXPIRED. INSERTED ROW: (118,118) +NOTICE: BEFORE INSERTION TRIGGER ON TABLE storage_13 HAS EXPIRED. INSERTED ROW: (119,119) +NOTICE: BEFORE INSERTION TRIGGER ON TABLE storage_13 HAS EXPIRED. INSERTED ROW: (120,120) +NOTICE: AFTER INSERTION TRIGGER ON TABLE storage_11 HAS EXPIRED. INSERTED ROW: (-2,-2) +NOTICE: AFTER INSERTION TRIGGER ON TABLE storage_11 HAS EXPIRED. INSERTED ROW: (-1,-1) +NOTICE: AFTER INSERTION TRIGGER ON TABLE storage_11 HAS EXPIRED. INSERTED ROW: (0,0) +NOTICE: AFTER INSERTION TRIGGER ON TABLE storage_1 HAS EXPIRED. INSERTED ROW: (1,1) +NOTICE: AFTER INSERTION TRIGGER ON TABLE storage_1 HAS EXPIRED. INSERTED ROW: (2,2) +NOTICE: AFTER INSERTION TRIGGER ON TABLE storage_1 HAS EXPIRED. INSERTED ROW: (3,3) +NOTICE: AFTER INSERTION TRIGGER ON TABLE storage_1 HAS EXPIRED. INSERTED ROW: (4,4) +NOTICE: AFTER INSERTION TRIGGER ON TABLE storage_1 HAS EXPIRED. INSERTED ROW: (5,5) +NOTICE: AFTER INSERTION TRIGGER ON TABLE storage_1 HAS EXPIRED. INSERTED ROW: (6,6) +NOTICE: AFTER INSERTION TRIGGER ON TABLE storage_1 HAS EXPIRED. INSERTED ROW: (7,7) +NOTICE: AFTER INSERTION TRIGGER ON TABLE storage_1 HAS EXPIRED. INSERTED ROW: (8,8) +NOTICE: AFTER INSERTION TRIGGER ON TABLE storage_1 HAS EXPIRED. INSERTED ROW: (9,9) +NOTICE: AFTER INSERTION TRIGGER ON TABLE storage_1 HAS EXPIRED. INSERTED ROW: (10,10) +NOTICE: AFTER INSERTION TRIGGER ON TABLE storage_12 HAS EXPIRED. INSERTED ROW: (101,101) +NOTICE: AFTER INSERTION TRIGGER ON TABLE storage_12 HAS EXPIRED. INSERTED ROW: (102,102) +NOTICE: AFTER INSERTION TRIGGER ON TABLE storage_12 HAS EXPIRED. INSERTED ROW: (103,103) +NOTICE: AFTER INSERTION TRIGGER ON TABLE storage_12 HAS EXPIRED. INSERTED ROW: (104,104) +NOTICE: AFTER INSERTION TRIGGER ON TABLE storage_12 HAS EXPIRED. INSERTED ROW: (105,105) +NOTICE: AFTER INSERTION TRIGGER ON TABLE storage_12 HAS EXPIRED. INSERTED ROW: (106,106) +NOTICE: AFTER INSERTION TRIGGER ON TABLE storage_12 HAS EXPIRED. INSERTED ROW: (107,107) +NOTICE: AFTER INSERTION TRIGGER ON TABLE storage_12 HAS EXPIRED. INSERTED ROW: (108,108) +NOTICE: AFTER INSERTION TRIGGER ON TABLE storage_12 HAS EXPIRED. INSERTED ROW: (109,109) +NOTICE: AFTER INSERTION TRIGGER ON TABLE storage_12 HAS EXPIRED. INSERTED ROW: (110,110) +NOTICE: AFTER INSERTION TRIGGER ON TABLE storage_13 HAS EXPIRED. INSERTED ROW: (111,111) +NOTICE: AFTER INSERTION TRIGGER ON TABLE storage_13 HAS EXPIRED. INSERTED ROW: (112,112) +NOTICE: AFTER INSERTION TRIGGER ON TABLE storage_13 HAS EXPIRED. INSERTED ROW: (113,113) +NOTICE: AFTER INSERTION TRIGGER ON TABLE storage_13 HAS EXPIRED. INSERTED ROW: (114,114) +NOTICE: AFTER INSERTION TRIGGER ON TABLE storage_13 HAS EXPIRED. INSERTED ROW: (115,115) +NOTICE: AFTER INSERTION TRIGGER ON TABLE storage_13 HAS EXPIRED. INSERTED ROW: (116,116) +NOTICE: AFTER INSERTION TRIGGER ON TABLE storage_13 HAS EXPIRED. INSERTED ROW: (117,117) +NOTICE: AFTER INSERTION TRIGGER ON TABLE storage_13 HAS EXPIRED. INSERTED ROW: (118,118) +NOTICE: AFTER INSERTION TRIGGER ON TABLE storage_13 HAS EXPIRED. INSERTED ROW: (119,119) +NOTICE: AFTER INSERTION TRIGGER ON TABLE storage_13 HAS EXPIRED. INSERTED ROW: (120,120) +SELECT *, tableoid::regclass FROM test_inserts.storage ORDER BY b, d; + b | d | tableoid +-----+-----+------------------------- + -2 | -2 | test_inserts.storage_11 + -1 | -1 | test_inserts.storage_11 + 0 | 0 | test_inserts.storage_11 + 1 | 1 | test_inserts.storage_1 + 2 | 2 | test_inserts.storage_1 + 3 | 3 | test_inserts.storage_1 + 4 | 4 | test_inserts.storage_1 + 5 | 5 | test_inserts.storage_1 + 6 | 6 | test_inserts.storage_1 + 7 | 7 | test_inserts.storage_1 + 8 | 8 | test_inserts.storage_1 + 9 | 9 | test_inserts.storage_1 + 10 | 10 | test_inserts.storage_1 + 11 | 11 | test_inserts.storage_2 + 12 | 12 | test_inserts.storage_2 + 13 | 13 | test_inserts.storage_2 + 14 | 14 | test_inserts.storage_2 + 15 | 15 | test_inserts.storage_2 + 16 | 16 | test_inserts.storage_2 + 17 | 17 | test_inserts.storage_2 + 18 | 18 | test_inserts.storage_2 + 19 | 19 | test_inserts.storage_2 + 20 | 20 | test_inserts.storage_2 + 21 | 21 | test_inserts.storage_3 + 22 | 22 | test_inserts.storage_3 + 23 | 23 | test_inserts.storage_3 + 24 | 24 | test_inserts.storage_3 + 25 | 25 | test_inserts.storage_3 + 26 | 26 | test_inserts.storage_3 + 27 | 27 | test_inserts.storage_3 + 28 | 28 | test_inserts.storage_3 + 29 | 29 | test_inserts.storage_3 + 30 | 30 | test_inserts.storage_3 + 31 | 31 | test_inserts.storage_4 + 32 | 32 | test_inserts.storage_4 + 33 | 33 | test_inserts.storage_4 + 34 | 34 | test_inserts.storage_4 + 35 | 35 | test_inserts.storage_4 + 36 | 36 | test_inserts.storage_4 + 37 | 37 | test_inserts.storage_4 + 38 | 38 | test_inserts.storage_4 + 39 | 39 | test_inserts.storage_4 + 40 | 40 | test_inserts.storage_4 + 41 | 41 | test_inserts.storage_5 + 42 | 42 | test_inserts.storage_5 + 43 | 43 | test_inserts.storage_5 + 44 | 44 | test_inserts.storage_5 + 45 | 45 | test_inserts.storage_5 + 46 | 46 | test_inserts.storage_5 + 47 | 47 | test_inserts.storage_5 + 48 | 48 | test_inserts.storage_5 + 49 | 49 | test_inserts.storage_5 + 50 | 50 | test_inserts.storage_5 + 51 | 51 | test_inserts.storage_6 + 52 | 52 | test_inserts.storage_6 + 53 | 53 | test_inserts.storage_6 + 54 | 54 | test_inserts.storage_6 + 55 | 55 | test_inserts.storage_6 + 56 | 56 | test_inserts.storage_6 + 57 | 57 | test_inserts.storage_6 + 58 | 58 | test_inserts.storage_6 + 59 | 59 | test_inserts.storage_6 + 60 | 60 | test_inserts.storage_6 + 61 | 61 | test_inserts.storage_7 + 62 | 62 | test_inserts.storage_7 + 63 | 63 | test_inserts.storage_7 + 64 | 64 | test_inserts.storage_7 + 65 | 65 | test_inserts.storage_7 + 66 | 66 | test_inserts.storage_7 + 67 | 67 | test_inserts.storage_7 + 68 | 68 | test_inserts.storage_7 + 69 | 69 | test_inserts.storage_7 + 70 | 70 | test_inserts.storage_7 + 71 | 71 | test_inserts.storage_8 + 72 | 72 | test_inserts.storage_8 + 73 | 73 | test_inserts.storage_8 + 74 | 74 | test_inserts.storage_8 + 75 | 75 | test_inserts.storage_8 + 76 | 76 | test_inserts.storage_8 + 77 | 77 | test_inserts.storage_8 + 78 | 78 | test_inserts.storage_8 + 79 | 79 | test_inserts.storage_8 + 80 | 80 | test_inserts.storage_8 + 81 | 81 | test_inserts.storage_9 + 82 | 82 | test_inserts.storage_9 + 83 | 83 | test_inserts.storage_9 + 84 | 84 | test_inserts.storage_9 + 85 | 85 | test_inserts.storage_9 + 86 | 86 | test_inserts.storage_9 + 87 | 87 | test_inserts.storage_9 + 88 | 88 | test_inserts.storage_9 + 89 | 89 | test_inserts.storage_9 + 90 | 90 | test_inserts.storage_9 + 91 | 91 | test_inserts.storage_10 + 92 | 92 | test_inserts.storage_10 + 93 | 93 | test_inserts.storage_10 + 94 | 94 | test_inserts.storage_10 + 95 | 95 | test_inserts.storage_10 + 96 | 96 | test_inserts.storage_10 + 97 | 97 | test_inserts.storage_10 + 98 | 98 | test_inserts.storage_10 + 99 | 99 | test_inserts.storage_10 + 100 | 100 | test_inserts.storage_10 + 101 | 101 | test_inserts.storage_12 + 102 | 102 | test_inserts.storage_12 + 103 | 103 | test_inserts.storage_12 + 104 | 104 | test_inserts.storage_12 + 105 | 105 | test_inserts.storage_12 + 106 | 106 | test_inserts.storage_12 + 107 | 107 | test_inserts.storage_12 + 108 | 108 | test_inserts.storage_12 + 109 | 109 | test_inserts.storage_12 + 110 | 110 | test_inserts.storage_12 + 111 | 111 | test_inserts.storage_13 + 112 | 112 | test_inserts.storage_13 + 113 | 113 | test_inserts.storage_13 + 114 | 114 | test_inserts.storage_13 + 115 | 115 | test_inserts.storage_13 + 116 | 116 | test_inserts.storage_13 + 117 | 117 | test_inserts.storage_13 + 118 | 118 | test_inserts.storage_13 + 119 | 119 | test_inserts.storage_13 + 120 | 120 | test_inserts.storage_13 +(123 rows) + +/* drop data */ +TRUNCATE test_inserts.storage; +/* add new column */ +ALTER TABLE test_inserts.storage ADD COLUMN e INT8 NOT NULL; +/* one more time! x2 */ +INSERT INTO test_inserts.storage (b, d, e) SELECT i, i, i FROM generate_series(-2, 120) i; +NOTICE: BEFORE INSERTION TRIGGER ON TABLE storage_11 HAS EXPIRED. INSERTED ROW: (-2,-2,-2) +NOTICE: BEFORE INSERTION TRIGGER ON TABLE storage_11 HAS EXPIRED. INSERTED ROW: (-1,-1,-1) +NOTICE: BEFORE INSERTION TRIGGER ON TABLE storage_11 HAS EXPIRED. INSERTED ROW: (0,0,0) +NOTICE: BEFORE INSERTION TRIGGER ON TABLE storage_1 HAS EXPIRED. INSERTED ROW: (1,1,1) +NOTICE: BEFORE INSERTION TRIGGER ON TABLE storage_1 HAS EXPIRED. INSERTED ROW: (2,2,2) +NOTICE: BEFORE INSERTION TRIGGER ON TABLE storage_1 HAS EXPIRED. INSERTED ROW: (3,3,3) +NOTICE: BEFORE INSERTION TRIGGER ON TABLE storage_1 HAS EXPIRED. INSERTED ROW: (4,4,4) +NOTICE: BEFORE INSERTION TRIGGER ON TABLE storage_1 HAS EXPIRED. INSERTED ROW: (5,5,5) +NOTICE: BEFORE INSERTION TRIGGER ON TABLE storage_1 HAS EXPIRED. INSERTED ROW: (6,6,6) +NOTICE: BEFORE INSERTION TRIGGER ON TABLE storage_1 HAS EXPIRED. INSERTED ROW: (7,7,7) +NOTICE: BEFORE INSERTION TRIGGER ON TABLE storage_1 HAS EXPIRED. INSERTED ROW: (8,8,8) +NOTICE: BEFORE INSERTION TRIGGER ON TABLE storage_1 HAS EXPIRED. INSERTED ROW: (9,9,9) +NOTICE: BEFORE INSERTION TRIGGER ON TABLE storage_1 HAS EXPIRED. INSERTED ROW: (10,10,10) +NOTICE: BEFORE INSERTION TRIGGER ON TABLE storage_12 HAS EXPIRED. INSERTED ROW: (101,101,101) +NOTICE: BEFORE INSERTION TRIGGER ON TABLE storage_12 HAS EXPIRED. INSERTED ROW: (102,102,102) +NOTICE: BEFORE INSERTION TRIGGER ON TABLE storage_12 HAS EXPIRED. INSERTED ROW: (103,103,103) +NOTICE: BEFORE INSERTION TRIGGER ON TABLE storage_12 HAS EXPIRED. INSERTED ROW: (104,104,104) +NOTICE: BEFORE INSERTION TRIGGER ON TABLE storage_12 HAS EXPIRED. INSERTED ROW: (105,105,105) +NOTICE: BEFORE INSERTION TRIGGER ON TABLE storage_12 HAS EXPIRED. INSERTED ROW: (106,106,106) +NOTICE: BEFORE INSERTION TRIGGER ON TABLE storage_12 HAS EXPIRED. INSERTED ROW: (107,107,107) +NOTICE: BEFORE INSERTION TRIGGER ON TABLE storage_12 HAS EXPIRED. INSERTED ROW: (108,108,108) +NOTICE: BEFORE INSERTION TRIGGER ON TABLE storage_12 HAS EXPIRED. INSERTED ROW: (109,109,109) +NOTICE: BEFORE INSERTION TRIGGER ON TABLE storage_12 HAS EXPIRED. INSERTED ROW: (110,110,110) +NOTICE: BEFORE INSERTION TRIGGER ON TABLE storage_13 HAS EXPIRED. INSERTED ROW: (111,111,111) +NOTICE: BEFORE INSERTION TRIGGER ON TABLE storage_13 HAS EXPIRED. INSERTED ROW: (112,112,112) +NOTICE: BEFORE INSERTION TRIGGER ON TABLE storage_13 HAS EXPIRED. INSERTED ROW: (113,113,113) +NOTICE: BEFORE INSERTION TRIGGER ON TABLE storage_13 HAS EXPIRED. INSERTED ROW: (114,114,114) +NOTICE: BEFORE INSERTION TRIGGER ON TABLE storage_13 HAS EXPIRED. INSERTED ROW: (115,115,115) +NOTICE: BEFORE INSERTION TRIGGER ON TABLE storage_13 HAS EXPIRED. INSERTED ROW: (116,116,116) +NOTICE: BEFORE INSERTION TRIGGER ON TABLE storage_13 HAS EXPIRED. INSERTED ROW: (117,117,117) +NOTICE: BEFORE INSERTION TRIGGER ON TABLE storage_13 HAS EXPIRED. INSERTED ROW: (118,118,118) +NOTICE: BEFORE INSERTION TRIGGER ON TABLE storage_13 HAS EXPIRED. INSERTED ROW: (119,119,119) +NOTICE: BEFORE INSERTION TRIGGER ON TABLE storage_13 HAS EXPIRED. INSERTED ROW: (120,120,120) +NOTICE: AFTER INSERTION TRIGGER ON TABLE storage_11 HAS EXPIRED. INSERTED ROW: (-2,-2,-2) +NOTICE: AFTER INSERTION TRIGGER ON TABLE storage_11 HAS EXPIRED. INSERTED ROW: (-1,-1,-1) +NOTICE: AFTER INSERTION TRIGGER ON TABLE storage_11 HAS EXPIRED. INSERTED ROW: (0,0,0) +NOTICE: AFTER INSERTION TRIGGER ON TABLE storage_1 HAS EXPIRED. INSERTED ROW: (1,1,1) +NOTICE: AFTER INSERTION TRIGGER ON TABLE storage_1 HAS EXPIRED. INSERTED ROW: (2,2,2) +NOTICE: AFTER INSERTION TRIGGER ON TABLE storage_1 HAS EXPIRED. INSERTED ROW: (3,3,3) +NOTICE: AFTER INSERTION TRIGGER ON TABLE storage_1 HAS EXPIRED. INSERTED ROW: (4,4,4) +NOTICE: AFTER INSERTION TRIGGER ON TABLE storage_1 HAS EXPIRED. INSERTED ROW: (5,5,5) +NOTICE: AFTER INSERTION TRIGGER ON TABLE storage_1 HAS EXPIRED. INSERTED ROW: (6,6,6) +NOTICE: AFTER INSERTION TRIGGER ON TABLE storage_1 HAS EXPIRED. INSERTED ROW: (7,7,7) +NOTICE: AFTER INSERTION TRIGGER ON TABLE storage_1 HAS EXPIRED. INSERTED ROW: (8,8,8) +NOTICE: AFTER INSERTION TRIGGER ON TABLE storage_1 HAS EXPIRED. INSERTED ROW: (9,9,9) +NOTICE: AFTER INSERTION TRIGGER ON TABLE storage_1 HAS EXPIRED. INSERTED ROW: (10,10,10) +NOTICE: AFTER INSERTION TRIGGER ON TABLE storage_12 HAS EXPIRED. INSERTED ROW: (101,101,101) +NOTICE: AFTER INSERTION TRIGGER ON TABLE storage_12 HAS EXPIRED. INSERTED ROW: (102,102,102) +NOTICE: AFTER INSERTION TRIGGER ON TABLE storage_12 HAS EXPIRED. INSERTED ROW: (103,103,103) +NOTICE: AFTER INSERTION TRIGGER ON TABLE storage_12 HAS EXPIRED. INSERTED ROW: (104,104,104) +NOTICE: AFTER INSERTION TRIGGER ON TABLE storage_12 HAS EXPIRED. INSERTED ROW: (105,105,105) +NOTICE: AFTER INSERTION TRIGGER ON TABLE storage_12 HAS EXPIRED. INSERTED ROW: (106,106,106) +NOTICE: AFTER INSERTION TRIGGER ON TABLE storage_12 HAS EXPIRED. INSERTED ROW: (107,107,107) +NOTICE: AFTER INSERTION TRIGGER ON TABLE storage_12 HAS EXPIRED. INSERTED ROW: (108,108,108) +NOTICE: AFTER INSERTION TRIGGER ON TABLE storage_12 HAS EXPIRED. INSERTED ROW: (109,109,109) +NOTICE: AFTER INSERTION TRIGGER ON TABLE storage_12 HAS EXPIRED. INSERTED ROW: (110,110,110) +NOTICE: AFTER INSERTION TRIGGER ON TABLE storage_13 HAS EXPIRED. INSERTED ROW: (111,111,111) +NOTICE: AFTER INSERTION TRIGGER ON TABLE storage_13 HAS EXPIRED. INSERTED ROW: (112,112,112) +NOTICE: AFTER INSERTION TRIGGER ON TABLE storage_13 HAS EXPIRED. INSERTED ROW: (113,113,113) +NOTICE: AFTER INSERTION TRIGGER ON TABLE storage_13 HAS EXPIRED. INSERTED ROW: (114,114,114) +NOTICE: AFTER INSERTION TRIGGER ON TABLE storage_13 HAS EXPIRED. INSERTED ROW: (115,115,115) +NOTICE: AFTER INSERTION TRIGGER ON TABLE storage_13 HAS EXPIRED. INSERTED ROW: (116,116,116) +NOTICE: AFTER INSERTION TRIGGER ON TABLE storage_13 HAS EXPIRED. INSERTED ROW: (117,117,117) +NOTICE: AFTER INSERTION TRIGGER ON TABLE storage_13 HAS EXPIRED. INSERTED ROW: (118,118,118) +NOTICE: AFTER INSERTION TRIGGER ON TABLE storage_13 HAS EXPIRED. INSERTED ROW: (119,119,119) +NOTICE: AFTER INSERTION TRIGGER ON TABLE storage_13 HAS EXPIRED. INSERTED ROW: (120,120,120) +SELECT *, tableoid::regclass FROM test_inserts.storage ORDER BY b, d; + b | d | e | tableoid +-----+-----+-----+------------------------- + -2 | -2 | -2 | test_inserts.storage_11 + -1 | -1 | -1 | test_inserts.storage_11 + 0 | 0 | 0 | test_inserts.storage_11 + 1 | 1 | 1 | test_inserts.storage_1 + 2 | 2 | 2 | test_inserts.storage_1 + 3 | 3 | 3 | test_inserts.storage_1 + 4 | 4 | 4 | test_inserts.storage_1 + 5 | 5 | 5 | test_inserts.storage_1 + 6 | 6 | 6 | test_inserts.storage_1 + 7 | 7 | 7 | test_inserts.storage_1 + 8 | 8 | 8 | test_inserts.storage_1 + 9 | 9 | 9 | test_inserts.storage_1 + 10 | 10 | 10 | test_inserts.storage_1 + 11 | 11 | 11 | test_inserts.storage_2 + 12 | 12 | 12 | test_inserts.storage_2 + 13 | 13 | 13 | test_inserts.storage_2 + 14 | 14 | 14 | test_inserts.storage_2 + 15 | 15 | 15 | test_inserts.storage_2 + 16 | 16 | 16 | test_inserts.storage_2 + 17 | 17 | 17 | test_inserts.storage_2 + 18 | 18 | 18 | test_inserts.storage_2 + 19 | 19 | 19 | test_inserts.storage_2 + 20 | 20 | 20 | test_inserts.storage_2 + 21 | 21 | 21 | test_inserts.storage_3 + 22 | 22 | 22 | test_inserts.storage_3 + 23 | 23 | 23 | test_inserts.storage_3 + 24 | 24 | 24 | test_inserts.storage_3 + 25 | 25 | 25 | test_inserts.storage_3 + 26 | 26 | 26 | test_inserts.storage_3 + 27 | 27 | 27 | test_inserts.storage_3 + 28 | 28 | 28 | test_inserts.storage_3 + 29 | 29 | 29 | test_inserts.storage_3 + 30 | 30 | 30 | test_inserts.storage_3 + 31 | 31 | 31 | test_inserts.storage_4 + 32 | 32 | 32 | test_inserts.storage_4 + 33 | 33 | 33 | test_inserts.storage_4 + 34 | 34 | 34 | test_inserts.storage_4 + 35 | 35 | 35 | test_inserts.storage_4 + 36 | 36 | 36 | test_inserts.storage_4 + 37 | 37 | 37 | test_inserts.storage_4 + 38 | 38 | 38 | test_inserts.storage_4 + 39 | 39 | 39 | test_inserts.storage_4 + 40 | 40 | 40 | test_inserts.storage_4 + 41 | 41 | 41 | test_inserts.storage_5 + 42 | 42 | 42 | test_inserts.storage_5 + 43 | 43 | 43 | test_inserts.storage_5 + 44 | 44 | 44 | test_inserts.storage_5 + 45 | 45 | 45 | test_inserts.storage_5 + 46 | 46 | 46 | test_inserts.storage_5 + 47 | 47 | 47 | test_inserts.storage_5 + 48 | 48 | 48 | test_inserts.storage_5 + 49 | 49 | 49 | test_inserts.storage_5 + 50 | 50 | 50 | test_inserts.storage_5 + 51 | 51 | 51 | test_inserts.storage_6 + 52 | 52 | 52 | test_inserts.storage_6 + 53 | 53 | 53 | test_inserts.storage_6 + 54 | 54 | 54 | test_inserts.storage_6 + 55 | 55 | 55 | test_inserts.storage_6 + 56 | 56 | 56 | test_inserts.storage_6 + 57 | 57 | 57 | test_inserts.storage_6 + 58 | 58 | 58 | test_inserts.storage_6 + 59 | 59 | 59 | test_inserts.storage_6 + 60 | 60 | 60 | test_inserts.storage_6 + 61 | 61 | 61 | test_inserts.storage_7 + 62 | 62 | 62 | test_inserts.storage_7 + 63 | 63 | 63 | test_inserts.storage_7 + 64 | 64 | 64 | test_inserts.storage_7 + 65 | 65 | 65 | test_inserts.storage_7 + 66 | 66 | 66 | test_inserts.storage_7 + 67 | 67 | 67 | test_inserts.storage_7 + 68 | 68 | 68 | test_inserts.storage_7 + 69 | 69 | 69 | test_inserts.storage_7 + 70 | 70 | 70 | test_inserts.storage_7 + 71 | 71 | 71 | test_inserts.storage_8 + 72 | 72 | 72 | test_inserts.storage_8 + 73 | 73 | 73 | test_inserts.storage_8 + 74 | 74 | 74 | test_inserts.storage_8 + 75 | 75 | 75 | test_inserts.storage_8 + 76 | 76 | 76 | test_inserts.storage_8 + 77 | 77 | 77 | test_inserts.storage_8 + 78 | 78 | 78 | test_inserts.storage_8 + 79 | 79 | 79 | test_inserts.storage_8 + 80 | 80 | 80 | test_inserts.storage_8 + 81 | 81 | 81 | test_inserts.storage_9 + 82 | 82 | 82 | test_inserts.storage_9 + 83 | 83 | 83 | test_inserts.storage_9 + 84 | 84 | 84 | test_inserts.storage_9 + 85 | 85 | 85 | test_inserts.storage_9 + 86 | 86 | 86 | test_inserts.storage_9 + 87 | 87 | 87 | test_inserts.storage_9 + 88 | 88 | 88 | test_inserts.storage_9 + 89 | 89 | 89 | test_inserts.storage_9 + 90 | 90 | 90 | test_inserts.storage_9 + 91 | 91 | 91 | test_inserts.storage_10 + 92 | 92 | 92 | test_inserts.storage_10 + 93 | 93 | 93 | test_inserts.storage_10 + 94 | 94 | 94 | test_inserts.storage_10 + 95 | 95 | 95 | test_inserts.storage_10 + 96 | 96 | 96 | test_inserts.storage_10 + 97 | 97 | 97 | test_inserts.storage_10 + 98 | 98 | 98 | test_inserts.storage_10 + 99 | 99 | 99 | test_inserts.storage_10 + 100 | 100 | 100 | test_inserts.storage_10 + 101 | 101 | 101 | test_inserts.storage_12 + 102 | 102 | 102 | test_inserts.storage_12 + 103 | 103 | 103 | test_inserts.storage_12 + 104 | 104 | 104 | test_inserts.storage_12 + 105 | 105 | 105 | test_inserts.storage_12 + 106 | 106 | 106 | test_inserts.storage_12 + 107 | 107 | 107 | test_inserts.storage_12 + 108 | 108 | 108 | test_inserts.storage_12 + 109 | 109 | 109 | test_inserts.storage_12 + 110 | 110 | 110 | test_inserts.storage_12 + 111 | 111 | 111 | test_inserts.storage_13 + 112 | 112 | 112 | test_inserts.storage_13 + 113 | 113 | 113 | test_inserts.storage_13 + 114 | 114 | 114 | test_inserts.storage_13 + 115 | 115 | 115 | test_inserts.storage_13 + 116 | 116 | 116 | test_inserts.storage_13 + 117 | 117 | 117 | test_inserts.storage_13 + 118 | 118 | 118 | test_inserts.storage_13 + 119 | 119 | 119 | test_inserts.storage_13 + 120 | 120 | 120 | test_inserts.storage_13 +(123 rows) + +/* drop data */ +TRUNCATE test_inserts.storage; +/* now test RETURNING list using our new column 'e' */ +INSERT INTO test_inserts.storage (b, d, e) SELECT i, i, i +FROM generate_series(-2, 130, 5) i +RETURNING e * 2, b, tableoid::regclass; +NOTICE: BEFORE INSERTION TRIGGER ON TABLE storage_11 HAS EXPIRED. INSERTED ROW: (-2,-2,-2) +NOTICE: BEFORE INSERTION TRIGGER ON TABLE storage_1 HAS EXPIRED. INSERTED ROW: (3,3,3) +NOTICE: BEFORE INSERTION TRIGGER ON TABLE storage_1 HAS EXPIRED. INSERTED ROW: (8,8,8) +NOTICE: BEFORE INSERTION TRIGGER ON TABLE storage_12 HAS EXPIRED. INSERTED ROW: (103,103,103) +NOTICE: BEFORE INSERTION TRIGGER ON TABLE storage_12 HAS EXPIRED. INSERTED ROW: (108,108,108) +NOTICE: BEFORE INSERTION TRIGGER ON TABLE storage_13 HAS EXPIRED. INSERTED ROW: (113,113,113) +NOTICE: BEFORE INSERTION TRIGGER ON TABLE storage_13 HAS EXPIRED. INSERTED ROW: (118,118,118) +NOTICE: BEFORE INSERTION TRIGGER ON TABLE storage_14 HAS EXPIRED. INSERTED ROW: (123,123,123) +NOTICE: BEFORE INSERTION TRIGGER ON TABLE storage_14 HAS EXPIRED. INSERTED ROW: (128,128,128) +NOTICE: AFTER INSERTION TRIGGER ON TABLE storage_11 HAS EXPIRED. INSERTED ROW: (-2,-2,-2) +NOTICE: AFTER INSERTION TRIGGER ON TABLE storage_1 HAS EXPIRED. INSERTED ROW: (3,3,3) +NOTICE: AFTER INSERTION TRIGGER ON TABLE storage_1 HAS EXPIRED. INSERTED ROW: (8,8,8) +NOTICE: AFTER INSERTION TRIGGER ON TABLE storage_12 HAS EXPIRED. INSERTED ROW: (103,103,103) +NOTICE: AFTER INSERTION TRIGGER ON TABLE storage_12 HAS EXPIRED. INSERTED ROW: (108,108,108) +NOTICE: AFTER INSERTION TRIGGER ON TABLE storage_13 HAS EXPIRED. INSERTED ROW: (113,113,113) +NOTICE: AFTER INSERTION TRIGGER ON TABLE storage_13 HAS EXPIRED. INSERTED ROW: (118,118,118) +NOTICE: AFTER INSERTION TRIGGER ON TABLE storage_14 HAS EXPIRED. INSERTED ROW: (123,123,123) +NOTICE: AFTER INSERTION TRIGGER ON TABLE storage_14 HAS EXPIRED. INSERTED ROW: (128,128,128) + ?column? | b | tableoid +----------+-----+------------------------- + -4 | -2 | test_inserts.storage_11 + 6 | 3 | test_inserts.storage_1 + 16 | 8 | test_inserts.storage_1 + 26 | 13 | test_inserts.storage_2 + 36 | 18 | test_inserts.storage_2 + 46 | 23 | test_inserts.storage_3 + 56 | 28 | test_inserts.storage_3 + 66 | 33 | test_inserts.storage_4 + 76 | 38 | test_inserts.storage_4 + 86 | 43 | test_inserts.storage_5 + 96 | 48 | test_inserts.storage_5 + 106 | 53 | test_inserts.storage_6 + 116 | 58 | test_inserts.storage_6 + 126 | 63 | test_inserts.storage_7 + 136 | 68 | test_inserts.storage_7 + 146 | 73 | test_inserts.storage_8 + 156 | 78 | test_inserts.storage_8 + 166 | 83 | test_inserts.storage_9 + 176 | 88 | test_inserts.storage_9 + 186 | 93 | test_inserts.storage_10 + 196 | 98 | test_inserts.storage_10 + 206 | 103 | test_inserts.storage_12 + 216 | 108 | test_inserts.storage_12 + 226 | 113 | test_inserts.storage_13 + 236 | 118 | test_inserts.storage_13 + 246 | 123 | test_inserts.storage_14 + 256 | 128 | test_inserts.storage_14 +(27 rows) + +/* test EXPLAIN (VERBOSE) - for PartitionFilter's targetlists */ +EXPLAIN (VERBOSE, COSTS OFF) +INSERT INTO test_inserts.storage (b, d, e) SELECT i, i, i +FROM generate_series(1, 10) i +RETURNING e * 2, b, tableoid::regclass; + QUERY PLAN +------------------------------------------------------------------------------- + Insert on test_inserts.storage + Output: (storage.e * 2), storage.b, (storage.tableoid)::regclass + -> Custom Scan (PartitionFilter) + Output: NULL::integer, storage.b, NULL::integer, storage.d, storage.e + -> Function Scan on pg_catalog.generate_series i + Output: NULL::integer, i.i, NULL::integer, i.i, i.i + Function Call: generate_series(1, 10) +(7 rows) + +EXPLAIN (VERBOSE, COSTS OFF) +INSERT INTO test_inserts.storage (d, e) SELECT i, i +FROM generate_series(1, 10) i; + QUERY PLAN +----------------------------------------------------------------------------------- + Insert on test_inserts.storage + -> Custom Scan (PartitionFilter) + Output: NULL::integer, NULL::integer, NULL::integer, storage.d, storage.e + -> Function Scan on pg_catalog.generate_series i + Output: NULL::integer, NULL::integer, NULL::integer, i.i, i.i + Function Call: generate_series(1, 10) +(6 rows) + +EXPLAIN (VERBOSE, COSTS OFF) +INSERT INTO test_inserts.storage (b) SELECT i +FROM generate_series(1, 10) i; + QUERY PLAN +----------------------------------------------------------------------------------- + Insert on test_inserts.storage + -> Custom Scan (PartitionFilter) + Output: NULL::integer, storage.b, NULL::integer, NULL::text, NULL::bigint + -> Function Scan on pg_catalog.generate_series i + Output: NULL::integer, i.i, NULL::integer, NULL::text, NULL::bigint + Function Call: generate_series(1, 10) +(6 rows) + +EXPLAIN (VERBOSE, COSTS OFF) +INSERT INTO test_inserts.storage (b, d, e) SELECT b, d, e +FROM test_inserts.storage; + QUERY PLAN +------------------------------------------------------------------------------------------- + Insert on test_inserts.storage + -> Custom Scan (PartitionFilter) + Output: NULL::integer, storage.b, NULL::integer, storage.d, storage.e + -> Result + Output: NULL::integer, storage_1.b, NULL::integer, storage_1.d, storage_1.e + -> Append + -> Seq Scan on test_inserts.storage_11 storage_2 + Output: storage_2.b, storage_2.d, storage_2.e + -> Seq Scan on test_inserts.storage_1 storage_3 + Output: storage_3.b, storage_3.d, storage_3.e + -> Seq Scan on test_inserts.storage_2 storage_4 + Output: storage_4.b, storage_4.d, storage_4.e + -> Seq Scan on test_inserts.storage_3 storage_5 + Output: storage_5.b, storage_5.d, storage_5.e + -> Seq Scan on test_inserts.storage_4 storage_6 + Output: storage_6.b, storage_6.d, storage_6.e + -> Seq Scan on test_inserts.storage_5 storage_7 + Output: storage_7.b, storage_7.d, storage_7.e + -> Seq Scan on test_inserts.storage_6 storage_8 + Output: storage_8.b, storage_8.d, storage_8.e + -> Seq Scan on test_inserts.storage_7 storage_9 + Output: storage_9.b, storage_9.d, storage_9.e + -> Seq Scan on test_inserts.storage_8 storage_10 + Output: storage_10.b, storage_10.d, storage_10.e + -> Seq Scan on test_inserts.storage_9 storage_11 + Output: storage_11.b, storage_11.d, storage_11.e + -> Seq Scan on test_inserts.storage_10 storage_12 + Output: storage_12.b, storage_12.d, storage_12.e + -> Seq Scan on test_inserts.storage_12 storage_13 + Output: storage_13.b, storage_13.d, storage_13.e + -> Seq Scan on test_inserts.storage_13 storage_14 + Output: storage_14.b, storage_14.d, storage_14.e + -> Seq Scan on test_inserts.storage_14 storage_15 + Output: storage_15.b, storage_15.d, storage_15.e +(34 rows) + +EXPLAIN (VERBOSE, COSTS OFF) +INSERT INTO test_inserts.storage (b, d) SELECT b, d +FROM test_inserts.storage; + QUERY PLAN +-------------------------------------------------------------------------------------------- + Insert on test_inserts.storage + -> Custom Scan (PartitionFilter) + Output: NULL::integer, storage.b, NULL::integer, storage.d, NULL::bigint + -> Result + Output: NULL::integer, storage_1.b, NULL::integer, storage_1.d, NULL::bigint + -> Append + -> Seq Scan on test_inserts.storage_11 storage_2 + Output: storage_2.b, storage_2.d + -> Seq Scan on test_inserts.storage_1 storage_3 + Output: storage_3.b, storage_3.d + -> Seq Scan on test_inserts.storage_2 storage_4 + Output: storage_4.b, storage_4.d + -> Seq Scan on test_inserts.storage_3 storage_5 + Output: storage_5.b, storage_5.d + -> Seq Scan on test_inserts.storage_4 storage_6 + Output: storage_6.b, storage_6.d + -> Seq Scan on test_inserts.storage_5 storage_7 + Output: storage_7.b, storage_7.d + -> Seq Scan on test_inserts.storage_6 storage_8 + Output: storage_8.b, storage_8.d + -> Seq Scan on test_inserts.storage_7 storage_9 + Output: storage_9.b, storage_9.d + -> Seq Scan on test_inserts.storage_8 storage_10 + Output: storage_10.b, storage_10.d + -> Seq Scan on test_inserts.storage_9 storage_11 + Output: storage_11.b, storage_11.d + -> Seq Scan on test_inserts.storage_10 storage_12 + Output: storage_12.b, storage_12.d + -> Seq Scan on test_inserts.storage_12 storage_13 + Output: storage_13.b, storage_13.d + -> Seq Scan on test_inserts.storage_13 storage_14 + Output: storage_14.b, storage_14.d + -> Seq Scan on test_inserts.storage_14 storage_15 + Output: storage_15.b, storage_15.d +(34 rows) + +EXPLAIN (VERBOSE, COSTS OFF) +INSERT INTO test_inserts.storage (b) SELECT b +FROM test_inserts.storage; + QUERY PLAN +------------------------------------------------------------------------------------------- + Insert on test_inserts.storage + -> Custom Scan (PartitionFilter) + Output: NULL::integer, storage.b, NULL::integer, NULL::text, NULL::bigint + -> Result + Output: NULL::integer, storage_1.b, NULL::integer, NULL::text, NULL::bigint + -> Append + -> Seq Scan on test_inserts.storage_11 storage_2 + Output: storage_2.b + -> Seq Scan on test_inserts.storage_1 storage_3 + Output: storage_3.b + -> Seq Scan on test_inserts.storage_2 storage_4 + Output: storage_4.b + -> Seq Scan on test_inserts.storage_3 storage_5 + Output: storage_5.b + -> Seq Scan on test_inserts.storage_4 storage_6 + Output: storage_6.b + -> Seq Scan on test_inserts.storage_5 storage_7 + Output: storage_7.b + -> Seq Scan on test_inserts.storage_6 storage_8 + Output: storage_8.b + -> Seq Scan on test_inserts.storage_7 storage_9 + Output: storage_9.b + -> Seq Scan on test_inserts.storage_8 storage_10 + Output: storage_10.b + -> Seq Scan on test_inserts.storage_9 storage_11 + Output: storage_11.b + -> Seq Scan on test_inserts.storage_10 storage_12 + Output: storage_12.b + -> Seq Scan on test_inserts.storage_12 storage_13 + Output: storage_13.b + -> Seq Scan on test_inserts.storage_13 storage_14 + Output: storage_14.b + -> Seq Scan on test_inserts.storage_14 storage_15 + Output: storage_15.b +(34 rows) + +/* test gap case (missing partition in between) */ +CREATE TABLE test_inserts.test_gap(val INT NOT NULL); +INSERT INTO test_inserts.test_gap SELECT generate_series(1, 30); +SELECT create_range_partitions('test_inserts.test_gap', 'val', 1, 10); + create_range_partitions +------------------------- + 3 +(1 row) + +DROP TABLE test_inserts.test_gap_2; /* make a gap */ +INSERT INTO test_inserts.test_gap VALUES(15); /* not ok */ +ERROR: cannot spawn a partition +DROP TABLE test_inserts.test_gap CASCADE; +NOTICE: drop cascades to 3 other objects +/* test a few "special" ONLY queries used in pg_repack */ +CREATE TABLE test_inserts.test_special_only(val INT NOT NULL); +INSERT INTO test_inserts.test_special_only SELECT generate_series(1, 30); +SELECT create_hash_partitions('test_inserts.test_special_only', 'val', 4); + create_hash_partitions +------------------------ + 4 +(1 row) + +/* create table as select only */ +CREATE TABLE test_inserts.special_1 AS SELECT * FROM ONLY test_inserts.test_special_only; +SELECT count(*) FROM test_inserts.special_1; + count +------- + 0 +(1 row) + +DROP TABLE test_inserts.special_1; +/* insert into ... select only */ +CREATE TABLE test_inserts.special_2 AS SELECT * FROM ONLY test_inserts.test_special_only WITH NO DATA; +INSERT INTO test_inserts.special_2 SELECT * FROM ONLY test_inserts.test_special_only; +SELECT count(*) FROM test_inserts.special_2; + count +------- + 0 +(1 row) + +DROP TABLE test_inserts.special_2; +DROP TABLE test_inserts.test_special_only CASCADE; +NOTICE: drop cascades to 4 other objects +DROP TABLE test_inserts.storage CASCADE; +NOTICE: drop cascades to 15 other objects +DROP FUNCTION test_inserts.set_triggers(jsonb); +DROP FUNCTION test_inserts.print_cols_before_change(); +DROP FUNCTION test_inserts.print_cols_after_change(); +DROP SCHEMA test_inserts; +DROP EXTENSION pg_pathman CASCADE; diff --git a/expected/pathman_interval.out b/expected/pathman_interval.out new file mode 100644 index 00000000..e4741522 --- /dev/null +++ b/expected/pathman_interval.out @@ -0,0 +1,275 @@ +\set VERBOSITY terse +SET search_path = 'public'; +CREATE EXTENSION pg_pathman; +CREATE SCHEMA test_interval; +/* Range partitions for INT2 type */ +CREATE TABLE test_interval.abc (id INT2 NOT NULL); +SELECT create_range_partitions('test_interval.abc', 'id', 0, 100, 2); + create_range_partitions +------------------------- + 2 +(1 row) + +SELECT set_interval('test_interval.abc', NULL::INT2); + set_interval +-------------- + +(1 row) + +/* pg_pathman shouldn't be able to create a new partition */ +INSERT INTO test_interval.abc VALUES (250); +ERROR: cannot spawn new partition for key '250' +/* Set a trivial interval */ +SELECT set_interval('test_interval.abc', 0); +ERROR: interval should not be trivial +/* Set a negative interval */ +SELECT set_interval('test_interval.abc', -100); +ERROR: interval should not be negative +/* We also shouldn't be able to set a trivial interval directly */ +UPDATE pathman_config SET range_interval = '0' +WHERE partrel = 'test_interval.abc'::REGCLASS; +ERROR: interval should not be trivial +/* Set a normal interval */ +SELECT set_interval('test_interval.abc', 1000); + set_interval +-------------- + +(1 row) + +INSERT INTO test_interval.abc VALUES (250); +SELECT partrel, range_interval FROM pathman_config; + partrel | range_interval +-------------------+---------------- + test_interval.abc | 1000 +(1 row) + +DROP TABLE test_interval.abc CASCADE; +NOTICE: drop cascades to 4 other objects +/* Range partitions for INT4 type */ +CREATE TABLE test_interval.abc (id INT4 NOT NULL); +SELECT create_range_partitions('test_interval.abc', 'id', 0, 100, 2); + create_range_partitions +------------------------- + 2 +(1 row) + +SELECT set_interval('test_interval.abc', NULL::INT4); + set_interval +-------------- + +(1 row) + +/* pg_pathman shouldn't be able to create a new partition */ +INSERT INTO test_interval.abc VALUES (250); +ERROR: cannot spawn new partition for key '250' +/* Set a trivial interval */ +SELECT set_interval('test_interval.abc', 0); +ERROR: interval should not be trivial +/* Set a negative interval */ +SELECT set_interval('test_interval.abc', -100); +ERROR: interval should not be negative +/* We also shouldn't be able to set a trivial interval directly */ +UPDATE pathman_config SET range_interval = '0' +WHERE partrel = 'test_interval.abc'::REGCLASS; +ERROR: interval should not be trivial +/* Set a normal interval */ +SELECT set_interval('test_interval.abc', 1000); + set_interval +-------------- + +(1 row) + +INSERT INTO test_interval.abc VALUES (250); +SELECT partrel, range_interval FROM pathman_config; + partrel | range_interval +-------------------+---------------- + test_interval.abc | 1000 +(1 row) + +DROP TABLE test_interval.abc CASCADE; +NOTICE: drop cascades to 4 other objects +/* Range partitions for INT8 type */ +CREATE TABLE test_interval.abc (id INT8 NOT NULL); +SELECT create_range_partitions('test_interval.abc', 'id', 0, 100, 2); + create_range_partitions +------------------------- + 2 +(1 row) + +SELECT set_interval('test_interval.abc', NULL::INT8); + set_interval +-------------- + +(1 row) + +/* pg_pathman shouldn't be able to create a new partition */ +INSERT INTO test_interval.abc VALUES (250); +ERROR: cannot spawn new partition for key '250' +/* Set a trivial interval */ +SELECT set_interval('test_interval.abc', 0); +ERROR: interval should not be trivial +/* Set a negative interval */ +SELECT set_interval('test_interval.abc', -100); +ERROR: interval should not be negative +/* We also shouldn't be able to set a trivial interval directly */ +UPDATE pathman_config SET range_interval = '0' +WHERE partrel = 'test_interval.abc'::REGCLASS; +ERROR: interval should not be trivial +/* Set a normal interval */ +SELECT set_interval('test_interval.abc', 1000); + set_interval +-------------- + +(1 row) + +INSERT INTO test_interval.abc VALUES (250); +SELECT partrel, range_interval FROM pathman_config; + partrel | range_interval +-------------------+---------------- + test_interval.abc | 1000 +(1 row) + +DROP TABLE test_interval.abc CASCADE; +NOTICE: drop cascades to 4 other objects +/* Range partitions for DATE type */ +CREATE TABLE test_interval.abc (dt DATE NOT NULL); +SELECT create_range_partitions('test_interval.abc', 'dt', + '2016-01-01'::DATE, '1 day'::INTERVAL, 2); + create_range_partitions +------------------------- + 2 +(1 row) + +SELECT set_interval('test_interval.abc', NULL::INTERVAL); + set_interval +-------------- + +(1 row) + +/* Set a trivial interval */ +SELECT set_interval('test_interval.abc', '1 second'::INTERVAL); +ERROR: interval should not be trivial +/* Set a normal interval */ +SELECT set_interval('test_interval.abc', '1 month'::INTERVAL); + set_interval +-------------- + +(1 row) + +SELECT partrel, range_interval FROM pathman_config; + partrel | range_interval +-------------------+---------------- + test_interval.abc | @ 1 mon +(1 row) + +DROP TABLE test_interval.abc CASCADE; +NOTICE: drop cascades to 3 other objects +/* Range partitions for FLOAT4 type */ +CREATE TABLE test_interval.abc (x FLOAT4 NOT NULL); +SELECT create_range_partitions('test_interval.abc', 'x', 0, 100, 2); + create_range_partitions +------------------------- + 2 +(1 row) + +SELECT set_interval('test_interval.abc', NULL::FLOAT4); + set_interval +-------------- + +(1 row) + +/* Set a trivial interval */ +SELECT set_interval('test_interval.abc', 0); +ERROR: interval should not be trivial +/* Set NaN float as interval */ +SELECT set_interval('test_interval.abc', 'NaN'::FLOAT4); +ERROR: invalid floating point interval +/* Set INF float as interval */ +SELECT set_interval('test_interval.abc', 'Infinity'::FLOAT4); +ERROR: invalid floating point interval +/* Set a normal interval */ +SELECT set_interval('test_interval.abc', 100); + set_interval +-------------- + +(1 row) + +DROP TABLE test_interval.abc CASCADE; +NOTICE: drop cascades to 3 other objects +/* Range partitions for FLOAT8 type */ +CREATE TABLE test_interval.abc (x FLOAT8 NOT NULL); +SELECT create_range_partitions('test_interval.abc', 'x', 0, 100, 2); + create_range_partitions +------------------------- + 2 +(1 row) + +SELECT set_interval('test_interval.abc', NULL::FLOAT8); + set_interval +-------------- + +(1 row) + +/* Set a trivial interval */ +SELECT set_interval('test_interval.abc', 0); +ERROR: interval should not be trivial +/* Set NaN float as interval */ +SELECT set_interval('test_interval.abc', 'NaN'::FLOAT8); +ERROR: invalid floating point interval +/* Set INF float as interval */ +SELECT set_interval('test_interval.abc', 'Infinity'::FLOAT8); +ERROR: invalid floating point interval +/* Set a normal interval */ +SELECT set_interval('test_interval.abc', 100); + set_interval +-------------- + +(1 row) + +DROP TABLE test_interval.abc CASCADE; +NOTICE: drop cascades to 3 other objects +/* Range partitions for NUMERIC type */ +CREATE TABLE test_interval.abc (x NUMERIC NOT NULL); +SELECT create_range_partitions('test_interval.abc', 'x', 0, 100, 2); + create_range_partitions +------------------------- + 2 +(1 row) + +SELECT set_interval('test_interval.abc', NULL::NUMERIC); + set_interval +-------------- + +(1 row) + +/* Set a trivial interval */ +SELECT set_interval('test_interval.abc', 0); +ERROR: interval should not be trivial +/* Set NaN numeric as interval */ +SELECT set_interval('test_interval.abc', 'NaN'::NUMERIC); +ERROR: invalid numeric interval +/* Set a normal interval */ +SELECT set_interval('test_interval.abc', 100); + set_interval +-------------- + +(1 row) + +DROP TABLE test_interval.abc CASCADE; +NOTICE: drop cascades to 3 other objects +/* Hash partitioned table shouldn't accept any interval value */ +CREATE TABLE test_interval.abc (id SERIAL); +SELECT create_hash_partitions('test_interval.abc', 'id', 3); + create_hash_partitions +------------------------ + 3 +(1 row) + +SELECT set_interval('test_interval.abc', 100); +ERROR: table "test_interval.abc" is not partitioned by RANGE +SELECT set_interval('test_interval.abc', NULL::INTEGER); +ERROR: table "test_interval.abc" is not partitioned by RANGE +DROP TABLE test_interval.abc CASCADE; +NOTICE: drop cascades to 3 other objects +DROP SCHEMA test_interval; +DROP EXTENSION pg_pathman; diff --git a/expected/pathman_join_clause.out b/expected/pathman_join_clause.out new file mode 100644 index 00000000..7654d4ca --- /dev/null +++ b/expected/pathman_join_clause.out @@ -0,0 +1,183 @@ +/* + * Since 8edd0e794 (>= 12) Append nodes with single subplan are eliminated, + * causing different output; pathman_gaps_1.out is the updated version. + */ +\set VERBOSITY terse +SET search_path = 'public'; +CREATE SCHEMA pathman; +CREATE EXTENSION pg_pathman SCHEMA pathman; +CREATE SCHEMA test; +/* + * Test push down a join clause into child nodes of append + */ +/* create test tables */ +CREATE TABLE test.fk ( + id1 INT NOT NULL, + id2 INT NOT NULL, + start_key INT, + end_key INT, + PRIMARY KEY (id1, id2)); +CREATE TABLE test.mytbl ( + id1 INT NOT NULL, + id2 INT NOT NULL, + key INT NOT NULL, + CONSTRAINT fk_fk FOREIGN KEY (id1, id2) REFERENCES test.fk(id1, id2), + PRIMARY KEY (id1, key)); +SELECT pathman.create_hash_partitions('test.mytbl', 'id1', 8); + create_hash_partitions +------------------------ + 8 +(1 row) + +/* ...fill out with test data */ +INSERT INTO test.fk VALUES (1, 1); +INSERT INTO test.mytbl VALUES (1, 1, 5), (1, 1, 6); +/* gather statistics on test tables to have deterministic plans */ +ANALYZE; +/* run test queries */ +EXPLAIN (COSTS OFF) /* test plan */ +SELECT m.tableoid::regclass, id1, id2, key, start_key, end_key +FROM test.mytbl m JOIN test.fk USING(id1, id2) +WHERE NOT key <@ int4range(6, end_key); + QUERY PLAN +------------------------------------------------------------------------------------------------------- + Nested Loop + -> Seq Scan on fk + -> Custom Scan (RuntimeAppend) + Prune by: (fk.id1 = m.id1) + -> Bitmap Heap Scan on mytbl_0 m + Recheck Cond: (id1 = fk.id1) + Filter: ((fk.id2 = id2) AND (NOT (key <@ int4range(6, fk.end_key)))) + -> Bitmap Index Scan on mytbl_0_pkey + Index Cond: (id1 = fk.id1) + -> Bitmap Heap Scan on mytbl_1 m + Recheck Cond: (id1 = fk.id1) + Filter: ((fk.id2 = id2) AND (NOT (key <@ int4range(6, fk.end_key)))) + -> Bitmap Index Scan on mytbl_1_pkey + Index Cond: (id1 = fk.id1) + -> Bitmap Heap Scan on mytbl_2 m + Recheck Cond: (id1 = fk.id1) + Filter: ((fk.id2 = id2) AND (NOT (key <@ int4range(6, fk.end_key)))) + -> Bitmap Index Scan on mytbl_2_pkey + Index Cond: (id1 = fk.id1) + -> Bitmap Heap Scan on mytbl_3 m + Recheck Cond: (id1 = fk.id1) + Filter: ((fk.id2 = id2) AND (NOT (key <@ int4range(6, fk.end_key)))) + -> Bitmap Index Scan on mytbl_3_pkey + Index Cond: (id1 = fk.id1) + -> Bitmap Heap Scan on mytbl_4 m + Recheck Cond: (id1 = fk.id1) + Filter: ((fk.id2 = id2) AND (NOT (key <@ int4range(6, fk.end_key)))) + -> Bitmap Index Scan on mytbl_4_pkey + Index Cond: (id1 = fk.id1) + -> Bitmap Heap Scan on mytbl_5 m + Recheck Cond: (id1 = fk.id1) + Filter: ((fk.id2 = id2) AND (NOT (key <@ int4range(6, fk.end_key)))) + -> Bitmap Index Scan on mytbl_5_pkey + Index Cond: (id1 = fk.id1) + -> Seq Scan on mytbl_6 m + Filter: ((fk.id1 = id1) AND (fk.id2 = id2) AND (NOT (key <@ int4range(6, fk.end_key)))) + -> Bitmap Heap Scan on mytbl_7 m + Recheck Cond: (id1 = fk.id1) + Filter: ((fk.id2 = id2) AND (NOT (key <@ int4range(6, fk.end_key)))) + -> Bitmap Index Scan on mytbl_7_pkey + Index Cond: (id1 = fk.id1) +(41 rows) + +/* test joint data */ +SELECT m.tableoid::regclass, id1, id2, key, start_key, end_key +FROM test.mytbl m JOIN test.fk USING(id1, id2) +WHERE NOT key <@ int4range(6, end_key); + tableoid | id1 | id2 | key | start_key | end_key +--------------+-----+-----+-----+-----------+--------- + test.mytbl_6 | 1 | 1 | 5 | | +(1 row) + +/* + * Test case by @dimarick + */ +CREATE TABLE test.parent ( + id SERIAL NOT NULL, + owner_id INTEGER NOT NULL +); +CREATE TABLE test.child ( + parent_id INTEGER NOT NULL, + owner_id INTEGER NOT NULL +); +CREATE TABLE test.child_nopart ( + parent_id INTEGER NOT NULL, + owner_id INTEGER NOT NULL +); +INSERT INTO test.parent (owner_id) VALUES (1), (2), (3), (3); +INSERT INTO test.child (parent_id, owner_id) VALUES (1, 1), (2, 2), (3, 3), (5, 3); +INSERT INTO test.child_nopart (parent_id, owner_id) VALUES (1, 1), (2, 2), (3, 3), (5, 3); +SELECT pathman.create_hash_partitions('test.child', 'owner_id', 2); + create_hash_partitions +------------------------ + 2 +(1 row) + +/* gather statistics on test tables to have deterministic plans */ +ANALYZE; +/* Query #1 */ +EXPLAIN (COSTS OFF) SELECT * FROM test.parent +LEFT JOIN test.child ON test.child.parent_id = test.parent.id AND + test.child.owner_id = test.parent.owner_id +WHERE test.parent.owner_id = 3 and test.parent.id IN (3, 4); + QUERY PLAN +----------------------------------------------------------------------------------------------------- + Nested Loop Left Join + -> Seq Scan on parent + Filter: ((id = ANY ('{3,4}'::integer[])) AND (owner_id = 3)) + -> Custom Scan (RuntimeAppend) + Prune by: ((child.owner_id = 3) AND (child.owner_id = parent.owner_id)) + -> Seq Scan on child_1 child + Filter: ((owner_id = 3) AND (owner_id = parent.owner_id) AND (parent_id = parent.id)) +(7 rows) + +SELECT * FROM test.parent +LEFT JOIN test.child ON test.child.parent_id = test.parent.id AND + test.child.owner_id = test.parent.owner_id +WHERE test.parent.owner_id = 3 and test.parent.id IN (3, 4); + id | owner_id | parent_id | owner_id +----+----------+-----------+---------- + 3 | 3 | 3 | 3 + 4 | 3 | | +(2 rows) + +/* Query #2 */ +EXPLAIN (COSTS OFF) SELECT * FROM test.parent +LEFT JOIN test.child ON test.child.parent_id = test.parent.id AND + test.child.owner_id = 3 +WHERE test.parent.owner_id = 3 and test.parent.id IN (3, 4); + QUERY PLAN +---------------------------------------------------------------------- + Nested Loop Left Join + Join Filter: (child_1.parent_id = parent.id) + -> Seq Scan on parent + Filter: ((id = ANY ('{3,4}'::integer[])) AND (owner_id = 3)) + -> Append + -> Seq Scan on child_1 + Filter: (owner_id = 3) +(7 rows) + +SELECT * FROM test.parent +LEFT JOIN test.child ON test.child.parent_id = test.parent.id AND + test.child.owner_id = 3 +WHERE test.parent.owner_id = 3 and test.parent.id IN (3, 4); + id | owner_id | parent_id | owner_id +----+----------+-----------+---------- + 3 | 3 | 3 | 3 + 4 | 3 | | +(2 rows) + +DROP TABLE test.child CASCADE; +NOTICE: drop cascades to 2 other objects +DROP TABLE test.child_nopart CASCADE; +DROP TABLE test.mytbl CASCADE; +NOTICE: drop cascades to 8 other objects +DROP TABLE test.fk CASCADE; +DROP TABLE test.parent CASCADE; +DROP SCHEMA test; +DROP EXTENSION pg_pathman CASCADE; +DROP SCHEMA pathman; diff --git a/expected/pathman_join_clause_1.out b/expected/pathman_join_clause_1.out new file mode 100644 index 00000000..d65131c7 --- /dev/null +++ b/expected/pathman_join_clause_1.out @@ -0,0 +1,182 @@ +/* + * Since 8edd0e794 (>= 12) Append nodes with single subplan are eliminated, + * causing different output; pathman_gaps_1.out is the updated version. + */ +\set VERBOSITY terse +SET search_path = 'public'; +CREATE SCHEMA pathman; +CREATE EXTENSION pg_pathman SCHEMA pathman; +CREATE SCHEMA test; +/* + * Test push down a join clause into child nodes of append + */ +/* create test tables */ +CREATE TABLE test.fk ( + id1 INT NOT NULL, + id2 INT NOT NULL, + start_key INT, + end_key INT, + PRIMARY KEY (id1, id2)); +CREATE TABLE test.mytbl ( + id1 INT NOT NULL, + id2 INT NOT NULL, + key INT NOT NULL, + CONSTRAINT fk_fk FOREIGN KEY (id1, id2) REFERENCES test.fk(id1, id2), + PRIMARY KEY (id1, key)); +SELECT pathman.create_hash_partitions('test.mytbl', 'id1', 8); + create_hash_partitions +------------------------ + 8 +(1 row) + +/* ...fill out with test data */ +INSERT INTO test.fk VALUES (1, 1); +INSERT INTO test.mytbl VALUES (1, 1, 5), (1, 1, 6); +/* gather statistics on test tables to have deterministic plans */ +ANALYZE; +/* run test queries */ +EXPLAIN (COSTS OFF) /* test plan */ +SELECT m.tableoid::regclass, id1, id2, key, start_key, end_key +FROM test.mytbl m JOIN test.fk USING(id1, id2) +WHERE NOT key <@ int4range(6, end_key); + QUERY PLAN +------------------------------------------------------------------------------------------------------- + Nested Loop + -> Seq Scan on fk + -> Custom Scan (RuntimeAppend) + Prune by: (fk.id1 = m.id1) + -> Bitmap Heap Scan on mytbl_0 m + Recheck Cond: (id1 = fk.id1) + Filter: ((fk.id2 = id2) AND (NOT (key <@ int4range(6, fk.end_key)))) + -> Bitmap Index Scan on mytbl_0_pkey + Index Cond: (id1 = fk.id1) + -> Bitmap Heap Scan on mytbl_1 m + Recheck Cond: (id1 = fk.id1) + Filter: ((fk.id2 = id2) AND (NOT (key <@ int4range(6, fk.end_key)))) + -> Bitmap Index Scan on mytbl_1_pkey + Index Cond: (id1 = fk.id1) + -> Bitmap Heap Scan on mytbl_2 m + Recheck Cond: (id1 = fk.id1) + Filter: ((fk.id2 = id2) AND (NOT (key <@ int4range(6, fk.end_key)))) + -> Bitmap Index Scan on mytbl_2_pkey + Index Cond: (id1 = fk.id1) + -> Bitmap Heap Scan on mytbl_3 m + Recheck Cond: (id1 = fk.id1) + Filter: ((fk.id2 = id2) AND (NOT (key <@ int4range(6, fk.end_key)))) + -> Bitmap Index Scan on mytbl_3_pkey + Index Cond: (id1 = fk.id1) + -> Bitmap Heap Scan on mytbl_4 m + Recheck Cond: (id1 = fk.id1) + Filter: ((fk.id2 = id2) AND (NOT (key <@ int4range(6, fk.end_key)))) + -> Bitmap Index Scan on mytbl_4_pkey + Index Cond: (id1 = fk.id1) + -> Bitmap Heap Scan on mytbl_5 m + Recheck Cond: (id1 = fk.id1) + Filter: ((fk.id2 = id2) AND (NOT (key <@ int4range(6, fk.end_key)))) + -> Bitmap Index Scan on mytbl_5_pkey + Index Cond: (id1 = fk.id1) + -> Seq Scan on mytbl_6 m + Filter: ((fk.id1 = id1) AND (fk.id2 = id2) AND (NOT (key <@ int4range(6, fk.end_key)))) + -> Bitmap Heap Scan on mytbl_7 m + Recheck Cond: (id1 = fk.id1) + Filter: ((fk.id2 = id2) AND (NOT (key <@ int4range(6, fk.end_key)))) + -> Bitmap Index Scan on mytbl_7_pkey + Index Cond: (id1 = fk.id1) +(41 rows) + +/* test joint data */ +SELECT m.tableoid::regclass, id1, id2, key, start_key, end_key +FROM test.mytbl m JOIN test.fk USING(id1, id2) +WHERE NOT key <@ int4range(6, end_key); + tableoid | id1 | id2 | key | start_key | end_key +--------------+-----+-----+-----+-----------+--------- + test.mytbl_6 | 1 | 1 | 5 | | +(1 row) + +/* + * Test case by @dimarick + */ +CREATE TABLE test.parent ( + id SERIAL NOT NULL, + owner_id INTEGER NOT NULL +); +CREATE TABLE test.child ( + parent_id INTEGER NOT NULL, + owner_id INTEGER NOT NULL +); +CREATE TABLE test.child_nopart ( + parent_id INTEGER NOT NULL, + owner_id INTEGER NOT NULL +); +INSERT INTO test.parent (owner_id) VALUES (1), (2), (3), (3); +INSERT INTO test.child (parent_id, owner_id) VALUES (1, 1), (2, 2), (3, 3), (5, 3); +INSERT INTO test.child_nopart (parent_id, owner_id) VALUES (1, 1), (2, 2), (3, 3), (5, 3); +SELECT pathman.create_hash_partitions('test.child', 'owner_id', 2); + create_hash_partitions +------------------------ + 2 +(1 row) + +/* gather statistics on test tables to have deterministic plans */ +ANALYZE; +/* Query #1 */ +EXPLAIN (COSTS OFF) SELECT * FROM test.parent +LEFT JOIN test.child ON test.child.parent_id = test.parent.id AND + test.child.owner_id = test.parent.owner_id +WHERE test.parent.owner_id = 3 and test.parent.id IN (3, 4); + QUERY PLAN +----------------------------------------------------------------------------------------------------- + Nested Loop Left Join + -> Seq Scan on parent + Filter: ((id = ANY ('{3,4}'::integer[])) AND (owner_id = 3)) + -> Custom Scan (RuntimeAppend) + Prune by: ((child.owner_id = 3) AND (child.owner_id = parent.owner_id)) + -> Seq Scan on child_1 child + Filter: ((owner_id = 3) AND (owner_id = parent.owner_id) AND (parent_id = parent.id)) +(7 rows) + +SELECT * FROM test.parent +LEFT JOIN test.child ON test.child.parent_id = test.parent.id AND + test.child.owner_id = test.parent.owner_id +WHERE test.parent.owner_id = 3 and test.parent.id IN (3, 4); + id | owner_id | parent_id | owner_id +----+----------+-----------+---------- + 3 | 3 | 3 | 3 + 4 | 3 | | +(2 rows) + +/* Query #2 */ +EXPLAIN (COSTS OFF) SELECT * FROM test.parent +LEFT JOIN test.child ON test.child.parent_id = test.parent.id AND + test.child.owner_id = 3 +WHERE test.parent.owner_id = 3 and test.parent.id IN (3, 4); + QUERY PLAN +---------------------------------------------------------------------- + Nested Loop Left Join + Join Filter: (child_1.parent_id = parent.id) + -> Seq Scan on parent + Filter: ((id = ANY ('{3,4}'::integer[])) AND (owner_id = 3)) + -> Seq Scan on child_1 + Filter: (owner_id = 3) +(6 rows) + +SELECT * FROM test.parent +LEFT JOIN test.child ON test.child.parent_id = test.parent.id AND + test.child.owner_id = 3 +WHERE test.parent.owner_id = 3 and test.parent.id IN (3, 4); + id | owner_id | parent_id | owner_id +----+----------+-----------+---------- + 3 | 3 | 3 | 3 + 4 | 3 | | +(2 rows) + +DROP TABLE test.child CASCADE; +NOTICE: drop cascades to 2 other objects +DROP TABLE test.child_nopart CASCADE; +DROP TABLE test.mytbl CASCADE; +NOTICE: drop cascades to 8 other objects +DROP TABLE test.fk CASCADE; +DROP TABLE test.parent CASCADE; +DROP SCHEMA test; +DROP EXTENSION pg_pathman CASCADE; +DROP SCHEMA pathman; diff --git a/expected/pathman_join_clause_2.out b/expected/pathman_join_clause_2.out new file mode 100644 index 00000000..df2ea0a5 --- /dev/null +++ b/expected/pathman_join_clause_2.out @@ -0,0 +1,161 @@ +/* + * Since 8edd0e794 (>= 12) Append nodes with single subplan are eliminated, + * causing different output; pathman_gaps_1.out is the updated version. + */ +\set VERBOSITY terse +SET search_path = 'public'; +CREATE SCHEMA pathman; +CREATE EXTENSION pg_pathman SCHEMA pathman; +CREATE SCHEMA test; +/* + * Test push down a join clause into child nodes of append + */ +/* create test tables */ +CREATE TABLE test.fk ( + id1 INT NOT NULL, + id2 INT NOT NULL, + start_key INT, + end_key INT, + PRIMARY KEY (id1, id2)); +CREATE TABLE test.mytbl ( + id1 INT NOT NULL, + id2 INT NOT NULL, + key INT NOT NULL, + CONSTRAINT fk_fk FOREIGN KEY (id1, id2) REFERENCES test.fk(id1, id2), + PRIMARY KEY (id1, key)); +SELECT pathman.create_hash_partitions('test.mytbl', 'id1', 8); + create_hash_partitions +------------------------ + 8 +(1 row) + +/* ...fill out with test data */ +INSERT INTO test.fk VALUES (1, 1); +INSERT INTO test.mytbl VALUES (1, 1, 5), (1, 1, 6); +/* gather statistics on test tables to have deterministic plans */ +ANALYZE; +/* run test queries */ +EXPLAIN (COSTS OFF) /* test plan */ +SELECT m.tableoid::regclass, id1, id2, key, start_key, end_key +FROM test.mytbl m JOIN test.fk USING(id1, id2) +WHERE NOT key <@ int4range(6, end_key); + QUERY PLAN +------------------------------------------------------------------------------------------------------- + Nested Loop + -> Seq Scan on fk + -> Custom Scan (RuntimeAppend) + Prune by: (fk.id1 = m.id1) + -> Seq Scan on mytbl_0 m + Filter: ((fk.id1 = id1) AND (fk.id2 = id2) AND (NOT (key <@ int4range(6, fk.end_key)))) + -> Seq Scan on mytbl_1 m + Filter: ((fk.id1 = id1) AND (fk.id2 = id2) AND (NOT (key <@ int4range(6, fk.end_key)))) + -> Seq Scan on mytbl_2 m + Filter: ((fk.id1 = id1) AND (fk.id2 = id2) AND (NOT (key <@ int4range(6, fk.end_key)))) + -> Seq Scan on mytbl_3 m + Filter: ((fk.id1 = id1) AND (fk.id2 = id2) AND (NOT (key <@ int4range(6, fk.end_key)))) + -> Seq Scan on mytbl_4 m + Filter: ((fk.id1 = id1) AND (fk.id2 = id2) AND (NOT (key <@ int4range(6, fk.end_key)))) + -> Seq Scan on mytbl_5 m + Filter: ((fk.id1 = id1) AND (fk.id2 = id2) AND (NOT (key <@ int4range(6, fk.end_key)))) + -> Seq Scan on mytbl_6 m + Filter: ((fk.id1 = id1) AND (fk.id2 = id2) AND (NOT (key <@ int4range(6, fk.end_key)))) + -> Seq Scan on mytbl_7 m + Filter: ((fk.id1 = id1) AND (fk.id2 = id2) AND (NOT (key <@ int4range(6, fk.end_key)))) +(20 rows) + +/* test joint data */ +SELECT m.tableoid::regclass, id1, id2, key, start_key, end_key +FROM test.mytbl m JOIN test.fk USING(id1, id2) +WHERE NOT key <@ int4range(6, end_key); + tableoid | id1 | id2 | key | start_key | end_key +--------------+-----+-----+-----+-----------+--------- + test.mytbl_6 | 1 | 1 | 5 | | +(1 row) + +/* + * Test case by @dimarick + */ +CREATE TABLE test.parent ( + id SERIAL NOT NULL, + owner_id INTEGER NOT NULL +); +CREATE TABLE test.child ( + parent_id INTEGER NOT NULL, + owner_id INTEGER NOT NULL +); +CREATE TABLE test.child_nopart ( + parent_id INTEGER NOT NULL, + owner_id INTEGER NOT NULL +); +INSERT INTO test.parent (owner_id) VALUES (1), (2), (3), (3); +INSERT INTO test.child (parent_id, owner_id) VALUES (1, 1), (2, 2), (3, 3), (5, 3); +INSERT INTO test.child_nopart (parent_id, owner_id) VALUES (1, 1), (2, 2), (3, 3), (5, 3); +SELECT pathman.create_hash_partitions('test.child', 'owner_id', 2); + create_hash_partitions +------------------------ + 2 +(1 row) + +/* gather statistics on test tables to have deterministic plans */ +ANALYZE; +/* Query #1 */ +EXPLAIN (COSTS OFF) SELECT * FROM test.parent +LEFT JOIN test.child ON test.child.parent_id = test.parent.id AND + test.child.owner_id = test.parent.owner_id +WHERE test.parent.owner_id = 3 and test.parent.id IN (3, 4); + QUERY PLAN +----------------------------------------------------------------------------------------------------- + Nested Loop Left Join + -> Seq Scan on parent + Filter: ((id = ANY ('{3,4}'::integer[])) AND (owner_id = 3)) + -> Custom Scan (RuntimeAppend) + Prune by: ((child.owner_id = 3) AND (child.owner_id = parent.owner_id)) + -> Seq Scan on child_1 child + Filter: ((owner_id = 3) AND (owner_id = parent.owner_id) AND (parent_id = parent.id)) +(7 rows) + +SELECT * FROM test.parent +LEFT JOIN test.child ON test.child.parent_id = test.parent.id AND + test.child.owner_id = test.parent.owner_id +WHERE test.parent.owner_id = 3 and test.parent.id IN (3, 4); + id | owner_id | parent_id | owner_id +----+----------+-----------+---------- + 3 | 3 | 3 | 3 + 4 | 3 | | +(2 rows) + +/* Query #2 */ +EXPLAIN (COSTS OFF) SELECT * FROM test.parent +LEFT JOIN test.child ON test.child.parent_id = test.parent.id AND + test.child.owner_id = 3 +WHERE test.parent.owner_id = 3 and test.parent.id IN (3, 4); + QUERY PLAN +---------------------------------------------------------------------- + Nested Loop Left Join + Join Filter: (child.parent_id = parent.id) + -> Seq Scan on parent + Filter: ((id = ANY ('{3,4}'::integer[])) AND (owner_id = 3)) + -> Seq Scan on child_1 child + Filter: (owner_id = 3) +(6 rows) + +SELECT * FROM test.parent +LEFT JOIN test.child ON test.child.parent_id = test.parent.id AND + test.child.owner_id = 3 +WHERE test.parent.owner_id = 3 and test.parent.id IN (3, 4); + id | owner_id | parent_id | owner_id +----+----------+-----------+---------- + 3 | 3 | 3 | 3 + 4 | 3 | | +(2 rows) + +DROP TABLE test.child CASCADE; +NOTICE: drop cascades to 2 other objects +DROP TABLE test.child_nopart CASCADE; +DROP TABLE test.mytbl CASCADE; +NOTICE: drop cascades to 8 other objects +DROP TABLE test.fk CASCADE; +DROP TABLE test.parent CASCADE; +DROP SCHEMA test; +DROP EXTENSION pg_pathman CASCADE; +DROP SCHEMA pathman; diff --git a/expected/pathman_join_clause_3.out b/expected/pathman_join_clause_3.out new file mode 100644 index 00000000..80b8de4c --- /dev/null +++ b/expected/pathman_join_clause_3.out @@ -0,0 +1,182 @@ +/* + * Since 8edd0e794 (>= 12) Append nodes with single subplan are eliminated, + * causing different output; pathman_gaps_1.out is the updated version. + */ +\set VERBOSITY terse +SET search_path = 'public'; +CREATE SCHEMA pathman; +CREATE EXTENSION pg_pathman SCHEMA pathman; +CREATE SCHEMA test; +/* + * Test push down a join clause into child nodes of append + */ +/* create test tables */ +CREATE TABLE test.fk ( + id1 INT NOT NULL, + id2 INT NOT NULL, + start_key INT, + end_key INT, + PRIMARY KEY (id1, id2)); +CREATE TABLE test.mytbl ( + id1 INT NOT NULL, + id2 INT NOT NULL, + key INT NOT NULL, + CONSTRAINT fk_fk FOREIGN KEY (id1, id2) REFERENCES test.fk(id1, id2), + PRIMARY KEY (id1, key)); +SELECT pathman.create_hash_partitions('test.mytbl', 'id1', 8); + create_hash_partitions +------------------------ + 8 +(1 row) + +/* ...fill out with test data */ +INSERT INTO test.fk VALUES (1, 1); +INSERT INTO test.mytbl VALUES (1, 1, 5), (1, 1, 6); +/* gather statistics on test tables to have deterministic plans */ +ANALYZE; +/* run test queries */ +EXPLAIN (COSTS OFF) /* test plan */ +SELECT m.tableoid::regclass, id1, id2, key, start_key, end_key +FROM test.mytbl m JOIN test.fk USING(id1, id2) +WHERE NOT key <@ int4range(6, end_key); + QUERY PLAN +------------------------------------------------------------------------------------------------------- + Nested Loop + -> Seq Scan on fk + -> Custom Scan (RuntimeAppend) + Prune by: (fk.id1 = m.id1) + -> Bitmap Heap Scan on mytbl_0 m + Recheck Cond: (id1 = fk.id1) + Filter: ((fk.id2 = id2) AND (NOT (key <@ int4range(6, fk.end_key)))) + -> Bitmap Index Scan on mytbl_0_pkey + Index Cond: (id1 = fk.id1) + -> Bitmap Heap Scan on mytbl_1 m + Recheck Cond: (id1 = fk.id1) + Filter: ((fk.id2 = id2) AND (NOT (key <@ int4range(6, fk.end_key)))) + -> Bitmap Index Scan on mytbl_1_pkey + Index Cond: (id1 = fk.id1) + -> Bitmap Heap Scan on mytbl_2 m + Recheck Cond: (id1 = fk.id1) + Filter: ((fk.id2 = id2) AND (NOT (key <@ int4range(6, fk.end_key)))) + -> Bitmap Index Scan on mytbl_2_pkey + Index Cond: (id1 = fk.id1) + -> Bitmap Heap Scan on mytbl_3 m + Recheck Cond: (id1 = fk.id1) + Filter: ((fk.id2 = id2) AND (NOT (key <@ int4range(6, fk.end_key)))) + -> Bitmap Index Scan on mytbl_3_pkey + Index Cond: (id1 = fk.id1) + -> Bitmap Heap Scan on mytbl_4 m + Recheck Cond: (id1 = fk.id1) + Filter: ((fk.id2 = id2) AND (NOT (key <@ int4range(6, fk.end_key)))) + -> Bitmap Index Scan on mytbl_4_pkey + Index Cond: (id1 = fk.id1) + -> Bitmap Heap Scan on mytbl_5 m + Recheck Cond: (id1 = fk.id1) + Filter: ((fk.id2 = id2) AND (NOT (key <@ int4range(6, fk.end_key)))) + -> Bitmap Index Scan on mytbl_5_pkey + Index Cond: (id1 = fk.id1) + -> Seq Scan on mytbl_6 m + Filter: ((fk.id1 = id1) AND (fk.id2 = id2) AND (NOT (key <@ int4range(6, fk.end_key)))) + -> Bitmap Heap Scan on mytbl_7 m + Recheck Cond: (id1 = fk.id1) + Filter: ((fk.id2 = id2) AND (NOT (key <@ int4range(6, fk.end_key)))) + -> Bitmap Index Scan on mytbl_7_pkey + Index Cond: (id1 = fk.id1) +(41 rows) + +/* test joint data */ +SELECT m.tableoid::regclass, id1, id2, key, start_key, end_key +FROM test.mytbl m JOIN test.fk USING(id1, id2) +WHERE NOT key <@ int4range(6, end_key); + tableoid | id1 | id2 | key | start_key | end_key +--------------+-----+-----+-----+-----------+--------- + test.mytbl_6 | 1 | 1 | 5 | | +(1 row) + +/* + * Test case by @dimarick + */ +CREATE TABLE test.parent ( + id SERIAL NOT NULL, + owner_id INTEGER NOT NULL +); +CREATE TABLE test.child ( + parent_id INTEGER NOT NULL, + owner_id INTEGER NOT NULL +); +CREATE TABLE test.child_nopart ( + parent_id INTEGER NOT NULL, + owner_id INTEGER NOT NULL +); +INSERT INTO test.parent (owner_id) VALUES (1), (2), (3), (3); +INSERT INTO test.child (parent_id, owner_id) VALUES (1, 1), (2, 2), (3, 3), (5, 3); +INSERT INTO test.child_nopart (parent_id, owner_id) VALUES (1, 1), (2, 2), (3, 3), (5, 3); +SELECT pathman.create_hash_partitions('test.child', 'owner_id', 2); + create_hash_partitions +------------------------ + 2 +(1 row) + +/* gather statistics on test tables to have deterministic plans */ +ANALYZE; +/* Query #1 */ +EXPLAIN (COSTS OFF) SELECT * FROM test.parent +LEFT JOIN test.child ON test.child.parent_id = test.parent.id AND + test.child.owner_id = test.parent.owner_id +WHERE test.parent.owner_id = 3 and test.parent.id IN (3, 4); + QUERY PLAN +----------------------------------------------------------------------------------------------------- + Nested Loop Left Join + -> Seq Scan on parent + Filter: ((id = ANY ('{3,4}'::integer[])) AND (owner_id = 3)) + -> Custom Scan (RuntimeAppend) + Prune by: ((child.owner_id = 3) AND (child.owner_id = parent.owner_id)) + -> Seq Scan on child_1 child + Filter: ((owner_id = 3) AND (owner_id = parent.owner_id) AND (parent_id = parent.id)) +(7 rows) + +SELECT * FROM test.parent +LEFT JOIN test.child ON test.child.parent_id = test.parent.id AND + test.child.owner_id = test.parent.owner_id +WHERE test.parent.owner_id = 3 and test.parent.id IN (3, 4); + id | owner_id | parent_id | owner_id +----+----------+-----------+---------- + 3 | 3 | 3 | 3 + 4 | 3 | | +(2 rows) + +/* Query #2 */ +EXPLAIN (COSTS OFF) SELECT * FROM test.parent +LEFT JOIN test.child ON test.child.parent_id = test.parent.id AND + test.child.owner_id = 3 +WHERE test.parent.owner_id = 3 and test.parent.id IN (3, 4); + QUERY PLAN +---------------------------------------------------------------------- + Nested Loop Left Join + Join Filter: (child.parent_id = parent.id) + -> Seq Scan on parent + Filter: ((id = ANY ('{3,4}'::integer[])) AND (owner_id = 3)) + -> Seq Scan on child_1 child + Filter: (owner_id = 3) +(6 rows) + +SELECT * FROM test.parent +LEFT JOIN test.child ON test.child.parent_id = test.parent.id AND + test.child.owner_id = 3 +WHERE test.parent.owner_id = 3 and test.parent.id IN (3, 4); + id | owner_id | parent_id | owner_id +----+----------+-----------+---------- + 3 | 3 | 3 | 3 + 4 | 3 | | +(2 rows) + +DROP TABLE test.child CASCADE; +NOTICE: drop cascades to 2 other objects +DROP TABLE test.child_nopart CASCADE; +DROP TABLE test.mytbl CASCADE; +NOTICE: drop cascades to 8 other objects +DROP TABLE test.fk CASCADE; +DROP TABLE test.parent CASCADE; +DROP SCHEMA test; +DROP EXTENSION pg_pathman CASCADE; +DROP SCHEMA pathman; diff --git a/expected/pathman_join_clause_4.out b/expected/pathman_join_clause_4.out new file mode 100644 index 00000000..17791fb9 --- /dev/null +++ b/expected/pathman_join_clause_4.out @@ -0,0 +1,161 @@ +/* + * Since 8edd0e794 (>= 12) Append nodes with single subplan are eliminated, + * causing different output; pathman_gaps_1.out is the updated version. + */ +\set VERBOSITY terse +SET search_path = 'public'; +CREATE SCHEMA pathman; +CREATE EXTENSION pg_pathman SCHEMA pathman; +CREATE SCHEMA test; +/* + * Test push down a join clause into child nodes of append + */ +/* create test tables */ +CREATE TABLE test.fk ( + id1 INT NOT NULL, + id2 INT NOT NULL, + start_key INT, + end_key INT, + PRIMARY KEY (id1, id2)); +CREATE TABLE test.mytbl ( + id1 INT NOT NULL, + id2 INT NOT NULL, + key INT NOT NULL, + CONSTRAINT fk_fk FOREIGN KEY (id1, id2) REFERENCES test.fk(id1, id2), + PRIMARY KEY (id1, key)); +SELECT pathman.create_hash_partitions('test.mytbl', 'id1', 8); + create_hash_partitions +------------------------ + 8 +(1 row) + +/* ...fill out with test data */ +INSERT INTO test.fk VALUES (1, 1); +INSERT INTO test.mytbl VALUES (1, 1, 5), (1, 1, 6); +/* gather statistics on test tables to have deterministic plans */ +ANALYZE; +/* run test queries */ +EXPLAIN (COSTS OFF) /* test plan */ +SELECT m.tableoid::regclass, id1, id2, key, start_key, end_key +FROM test.mytbl m JOIN test.fk USING(id1, id2) +WHERE NOT key <@ int4range(6, end_key); + QUERY PLAN +------------------------------------------------------------------------------------------------------- + Nested Loop + -> Seq Scan on fk + -> Custom Scan (RuntimeAppend) + Prune by: (m.id1 = fk.id1) + -> Seq Scan on mytbl_0 m + Filter: ((id1 = fk.id1) AND (fk.id2 = id2) AND (NOT (key <@ int4range(6, fk.end_key)))) + -> Seq Scan on mytbl_1 m + Filter: ((id1 = fk.id1) AND (fk.id2 = id2) AND (NOT (key <@ int4range(6, fk.end_key)))) + -> Seq Scan on mytbl_2 m + Filter: ((id1 = fk.id1) AND (fk.id2 = id2) AND (NOT (key <@ int4range(6, fk.end_key)))) + -> Seq Scan on mytbl_3 m + Filter: ((id1 = fk.id1) AND (fk.id2 = id2) AND (NOT (key <@ int4range(6, fk.end_key)))) + -> Seq Scan on mytbl_4 m + Filter: ((id1 = fk.id1) AND (fk.id2 = id2) AND (NOT (key <@ int4range(6, fk.end_key)))) + -> Seq Scan on mytbl_5 m + Filter: ((id1 = fk.id1) AND (fk.id2 = id2) AND (NOT (key <@ int4range(6, fk.end_key)))) + -> Seq Scan on mytbl_6 m + Filter: ((id1 = fk.id1) AND (fk.id2 = id2) AND (NOT (key <@ int4range(6, fk.end_key)))) + -> Seq Scan on mytbl_7 m + Filter: ((id1 = fk.id1) AND (fk.id2 = id2) AND (NOT (key <@ int4range(6, fk.end_key)))) +(20 rows) + +/* test joint data */ +SELECT m.tableoid::regclass, id1, id2, key, start_key, end_key +FROM test.mytbl m JOIN test.fk USING(id1, id2) +WHERE NOT key <@ int4range(6, end_key); + tableoid | id1 | id2 | key | start_key | end_key +--------------+-----+-----+-----+-----------+--------- + test.mytbl_6 | 1 | 1 | 5 | | +(1 row) + +/* + * Test case by @dimarick + */ +CREATE TABLE test.parent ( + id SERIAL NOT NULL, + owner_id INTEGER NOT NULL +); +CREATE TABLE test.child ( + parent_id INTEGER NOT NULL, + owner_id INTEGER NOT NULL +); +CREATE TABLE test.child_nopart ( + parent_id INTEGER NOT NULL, + owner_id INTEGER NOT NULL +); +INSERT INTO test.parent (owner_id) VALUES (1), (2), (3), (3); +INSERT INTO test.child (parent_id, owner_id) VALUES (1, 1), (2, 2), (3, 3), (5, 3); +INSERT INTO test.child_nopart (parent_id, owner_id) VALUES (1, 1), (2, 2), (3, 3), (5, 3); +SELECT pathman.create_hash_partitions('test.child', 'owner_id', 2); + create_hash_partitions +------------------------ + 2 +(1 row) + +/* gather statistics on test tables to have deterministic plans */ +ANALYZE; +/* Query #1 */ +EXPLAIN (COSTS OFF) SELECT * FROM test.parent +LEFT JOIN test.child ON test.child.parent_id = test.parent.id AND + test.child.owner_id = test.parent.owner_id +WHERE test.parent.owner_id = 3 and test.parent.id IN (3, 4); + QUERY PLAN +----------------------------------------------------------------------------------------------------- + Nested Loop Left Join + -> Seq Scan on parent + Filter: ((id = ANY ('{3,4}'::integer[])) AND (owner_id = 3)) + -> Custom Scan (RuntimeAppend) + Prune by: ((child.owner_id = 3) AND (child.owner_id = parent.owner_id)) + -> Seq Scan on child_1 child + Filter: ((owner_id = 3) AND (owner_id = parent.owner_id) AND (parent_id = parent.id)) +(7 rows) + +SELECT * FROM test.parent +LEFT JOIN test.child ON test.child.parent_id = test.parent.id AND + test.child.owner_id = test.parent.owner_id +WHERE test.parent.owner_id = 3 and test.parent.id IN (3, 4); + id | owner_id | parent_id | owner_id +----+----------+-----------+---------- + 3 | 3 | 3 | 3 + 4 | 3 | | +(2 rows) + +/* Query #2 */ +EXPLAIN (COSTS OFF) SELECT * FROM test.parent +LEFT JOIN test.child ON test.child.parent_id = test.parent.id AND + test.child.owner_id = 3 +WHERE test.parent.owner_id = 3 and test.parent.id IN (3, 4); + QUERY PLAN +---------------------------------------------------------------------- + Nested Loop Left Join + Join Filter: (child.parent_id = parent.id) + -> Seq Scan on parent + Filter: ((id = ANY ('{3,4}'::integer[])) AND (owner_id = 3)) + -> Seq Scan on child_1 child + Filter: (owner_id = 3) +(6 rows) + +SELECT * FROM test.parent +LEFT JOIN test.child ON test.child.parent_id = test.parent.id AND + test.child.owner_id = 3 +WHERE test.parent.owner_id = 3 and test.parent.id IN (3, 4); + id | owner_id | parent_id | owner_id +----+----------+-----------+---------- + 3 | 3 | 3 | 3 + 4 | 3 | | +(2 rows) + +DROP TABLE test.child CASCADE; +NOTICE: drop cascades to 2 other objects +DROP TABLE test.child_nopart CASCADE; +DROP TABLE test.mytbl CASCADE; +NOTICE: drop cascades to 8 other objects +DROP TABLE test.fk CASCADE; +DROP TABLE test.parent CASCADE; +DROP SCHEMA test; +DROP EXTENSION pg_pathman CASCADE; +DROP SCHEMA pathman; diff --git a/expected/pathman_join_clause_5.out b/expected/pathman_join_clause_5.out new file mode 100644 index 00000000..179f50f7 --- /dev/null +++ b/expected/pathman_join_clause_5.out @@ -0,0 +1,160 @@ +/* + * Since 8edd0e794 (>= 12) Append nodes with single subplan are eliminated, + * causing different output; pathman_gaps_1.out is the updated version. + */ +\set VERBOSITY terse +SET search_path = 'public'; +CREATE SCHEMA pathman; +CREATE EXTENSION pg_pathman SCHEMA pathman; +CREATE SCHEMA test; +/* + * Test push down a join clause into child nodes of append + */ +/* create test tables */ +CREATE TABLE test.fk ( + id1 INT NOT NULL, + id2 INT NOT NULL, + start_key INT, + end_key INT, + PRIMARY KEY (id1, id2)); +CREATE TABLE test.mytbl ( + id1 INT NOT NULL, + id2 INT NOT NULL, + key INT NOT NULL, + CONSTRAINT fk_fk FOREIGN KEY (id1, id2) REFERENCES test.fk(id1, id2), + PRIMARY KEY (id1, key)); +SELECT pathman.create_hash_partitions('test.mytbl', 'id1', 8); + create_hash_partitions +------------------------ + 8 +(1 row) + +/* ...fill out with test data */ +INSERT INTO test.fk VALUES (1, 1); +INSERT INTO test.mytbl VALUES (1, 1, 5), (1, 1, 6); +/* gather statistics on test tables to have deterministic plans */ +ANALYZE; +/* run test queries */ +EXPLAIN (COSTS OFF) /* test plan */ +SELECT m.tableoid::regclass, id1, id2, key, start_key, end_key +FROM test.mytbl m JOIN test.fk USING(id1, id2) +WHERE NOT key <@ int4range(6, end_key); + QUERY PLAN +------------------------------------------------------------------------------------------------------- + Nested Loop + -> Seq Scan on fk + -> Custom Scan (RuntimeAppend) + Prune by: (m.id1 = fk.id1) + -> Seq Scan on mytbl_0 m + Filter: ((id1 = fk.id1) AND (fk.id2 = id2) AND (NOT (key <@ int4range(6, fk.end_key)))) + -> Seq Scan on mytbl_1 m + Filter: ((id1 = fk.id1) AND (fk.id2 = id2) AND (NOT (key <@ int4range(6, fk.end_key)))) + -> Seq Scan on mytbl_2 m + Filter: ((id1 = fk.id1) AND (fk.id2 = id2) AND (NOT (key <@ int4range(6, fk.end_key)))) + -> Seq Scan on mytbl_3 m + Filter: ((id1 = fk.id1) AND (fk.id2 = id2) AND (NOT (key <@ int4range(6, fk.end_key)))) + -> Seq Scan on mytbl_4 m + Filter: ((id1 = fk.id1) AND (fk.id2 = id2) AND (NOT (key <@ int4range(6, fk.end_key)))) + -> Seq Scan on mytbl_5 m + Filter: ((id1 = fk.id1) AND (fk.id2 = id2) AND (NOT (key <@ int4range(6, fk.end_key)))) + -> Seq Scan on mytbl_6 m + Filter: ((id1 = fk.id1) AND (fk.id2 = id2) AND (NOT (key <@ int4range(6, fk.end_key)))) + -> Seq Scan on mytbl_7 m + Filter: ((id1 = fk.id1) AND (fk.id2 = id2) AND (NOT (key <@ int4range(6, fk.end_key)))) +(20 rows) + +/* test joint data */ +SELECT m.tableoid::regclass, id1, id2, key, start_key, end_key +FROM test.mytbl m JOIN test.fk USING(id1, id2) +WHERE NOT key <@ int4range(6, end_key); + tableoid | id1 | id2 | key | start_key | end_key +--------------+-----+-----+-----+-----------+--------- + test.mytbl_6 | 1 | 1 | 5 | | +(1 row) + +/* + * Test case by @dimarick + */ +CREATE TABLE test.parent ( + id SERIAL NOT NULL, + owner_id INTEGER NOT NULL +); +CREATE TABLE test.child ( + parent_id INTEGER NOT NULL, + owner_id INTEGER NOT NULL +); +CREATE TABLE test.child_nopart ( + parent_id INTEGER NOT NULL, + owner_id INTEGER NOT NULL +); +INSERT INTO test.parent (owner_id) VALUES (1), (2), (3), (3); +INSERT INTO test.child (parent_id, owner_id) VALUES (1, 1), (2, 2), (3, 3), (5, 3); +INSERT INTO test.child_nopart (parent_id, owner_id) VALUES (1, 1), (2, 2), (3, 3), (5, 3); +SELECT pathman.create_hash_partitions('test.child', 'owner_id', 2); + create_hash_partitions +------------------------ + 2 +(1 row) + +/* gather statistics on test tables to have deterministic plans */ +ANALYZE; +/* Query #1 */ +EXPLAIN (COSTS OFF) SELECT * FROM test.parent +LEFT JOIN test.child ON test.child.parent_id = test.parent.id AND + test.child.owner_id = test.parent.owner_id +WHERE test.parent.owner_id = 3 and test.parent.id IN (3, 4); + QUERY PLAN +---------------------------------------------------------------------- + Nested Loop Left Join + Join Filter: (child.parent_id = parent.id) + -> Seq Scan on parent + Filter: ((id = ANY ('{3,4}'::integer[])) AND (owner_id = 3)) + -> Seq Scan on child_1 child + Filter: (owner_id = 3) +(6 rows) + +SELECT * FROM test.parent +LEFT JOIN test.child ON test.child.parent_id = test.parent.id AND + test.child.owner_id = test.parent.owner_id +WHERE test.parent.owner_id = 3 and test.parent.id IN (3, 4); + id | owner_id | parent_id | owner_id +----+----------+-----------+---------- + 3 | 3 | 3 | 3 + 4 | 3 | | +(2 rows) + +/* Query #2 */ +EXPLAIN (COSTS OFF) SELECT * FROM test.parent +LEFT JOIN test.child ON test.child.parent_id = test.parent.id AND + test.child.owner_id = 3 +WHERE test.parent.owner_id = 3 and test.parent.id IN (3, 4); + QUERY PLAN +---------------------------------------------------------------------- + Nested Loop Left Join + Join Filter: (child.parent_id = parent.id) + -> Seq Scan on parent + Filter: ((id = ANY ('{3,4}'::integer[])) AND (owner_id = 3)) + -> Seq Scan on child_1 child + Filter: (owner_id = 3) +(6 rows) + +SELECT * FROM test.parent +LEFT JOIN test.child ON test.child.parent_id = test.parent.id AND + test.child.owner_id = 3 +WHERE test.parent.owner_id = 3 and test.parent.id IN (3, 4); + id | owner_id | parent_id | owner_id +----+----------+-----------+---------- + 3 | 3 | 3 | 3 + 4 | 3 | | +(2 rows) + +DROP TABLE test.child CASCADE; +NOTICE: drop cascades to 2 other objects +DROP TABLE test.child_nopart CASCADE; +DROP TABLE test.mytbl CASCADE; +NOTICE: drop cascades to 8 other objects +DROP TABLE test.fk CASCADE; +DROP TABLE test.parent CASCADE; +DROP SCHEMA test; +DROP EXTENSION pg_pathman CASCADE; +DROP SCHEMA pathman; diff --git a/expected/pathman_lateral.out b/expected/pathman_lateral.out new file mode 100644 index 00000000..53edc3d2 --- /dev/null +++ b/expected/pathman_lateral.out @@ -0,0 +1,128 @@ +/* + * Sometimes join selectivity improvements patches in pgpro force nested loop + * members swap -- in pathman_lateral_1.out and pathman_lateral_3.out + * + * Since 55a1954da16 and 6ef77cf46e8 (>= 13) output of EXPLAIN was changed, + * now it includes aliases for inherited tables. + */ +\set VERBOSITY terse +SET search_path = 'public'; +CREATE EXTENSION pg_pathman; +CREATE SCHEMA test_lateral; +/* create table partitioned by HASH */ +create table test_lateral.data(id int8 not null); +select create_hash_partitions('test_lateral.data', 'id', 10); + create_hash_partitions +------------------------ + 10 +(1 row) + +insert into test_lateral.data select generate_series(1, 10000); +VACUUM ANALYZE; +set enable_hashjoin = off; +set enable_mergejoin = off; +/* all credits go to Ivan Frolkov */ +explain (costs off) +select * from + test_lateral.data as t1, + lateral(select * from test_lateral.data as t2 where t2.id > t1.id) t2, + lateral(select * from test_lateral.data as t3 where t3.id = t2.id + t1.id) t3 + where t1.id between 1 and 100 and + t2.id between 2 and 299 and + t1.id > t2.id and + exists(select * from test_lateral.data t + where t1.id = t2.id and t.id = t3.id); + QUERY PLAN +-------------------------------------------------------------------------------------------- + Nested Loop + -> Nested Loop + Join Filter: ((t2.id + t1.id) = t.id) + -> HashAggregate + Group Key: t.id + -> Append + -> Seq Scan on data_0 t + -> Seq Scan on data_1 t_1 + -> Seq Scan on data_2 t_2 + -> Seq Scan on data_3 t_3 + -> Seq Scan on data_4 t_4 + -> Seq Scan on data_5 t_5 + -> Seq Scan on data_6 t_6 + -> Seq Scan on data_7 t_7 + -> Seq Scan on data_8 t_8 + -> Seq Scan on data_9 t_9 + -> Materialize + -> Nested Loop + Join Filter: ((t2.id > t1.id) AND (t1.id > t2.id) AND (t1.id = t2.id)) + -> Append + -> Seq Scan on data_0 t2 + Filter: ((id >= 2) AND (id <= 299)) + -> Seq Scan on data_1 t2_1 + Filter: ((id >= 2) AND (id <= 299)) + -> Seq Scan on data_2 t2_2 + Filter: ((id >= 2) AND (id <= 299)) + -> Seq Scan on data_3 t2_3 + Filter: ((id >= 2) AND (id <= 299)) + -> Seq Scan on data_4 t2_4 + Filter: ((id >= 2) AND (id <= 299)) + -> Seq Scan on data_5 t2_5 + Filter: ((id >= 2) AND (id <= 299)) + -> Seq Scan on data_6 t2_6 + Filter: ((id >= 2) AND (id <= 299)) + -> Seq Scan on data_7 t2_7 + Filter: ((id >= 2) AND (id <= 299)) + -> Seq Scan on data_8 t2_8 + Filter: ((id >= 2) AND (id <= 299)) + -> Seq Scan on data_9 t2_9 + Filter: ((id >= 2) AND (id <= 299)) + -> Materialize + -> Append + -> Seq Scan on data_0 t1 + Filter: ((id >= 1) AND (id <= 100)) + -> Seq Scan on data_1 t1_1 + Filter: ((id >= 1) AND (id <= 100)) + -> Seq Scan on data_2 t1_2 + Filter: ((id >= 1) AND (id <= 100)) + -> Seq Scan on data_3 t1_3 + Filter: ((id >= 1) AND (id <= 100)) + -> Seq Scan on data_4 t1_4 + Filter: ((id >= 1) AND (id <= 100)) + -> Seq Scan on data_5 t1_5 + Filter: ((id >= 1) AND (id <= 100)) + -> Seq Scan on data_6 t1_6 + Filter: ((id >= 1) AND (id <= 100)) + -> Seq Scan on data_7 t1_7 + Filter: ((id >= 1) AND (id <= 100)) + -> Seq Scan on data_8 t1_8 + Filter: ((id >= 1) AND (id <= 100)) + -> Seq Scan on data_9 t1_9 + Filter: ((id >= 1) AND (id <= 100)) + -> Custom Scan (RuntimeAppend) + Prune by: (t.id = t3.id) + -> Seq Scan on data_0 t3 + Filter: (t.id = id) + -> Seq Scan on data_1 t3 + Filter: (t.id = id) + -> Seq Scan on data_2 t3 + Filter: (t.id = id) + -> Seq Scan on data_3 t3 + Filter: (t.id = id) + -> Seq Scan on data_4 t3 + Filter: (t.id = id) + -> Seq Scan on data_5 t3 + Filter: (t.id = id) + -> Seq Scan on data_6 t3 + Filter: (t.id = id) + -> Seq Scan on data_7 t3 + Filter: (t.id = id) + -> Seq Scan on data_8 t3 + Filter: (t.id = id) + -> Seq Scan on data_9 t3 + Filter: (t.id = id) +(84 rows) + +set enable_hashjoin = on; +set enable_mergejoin = on; +DROP TABLE test_lateral.data CASCADE; +NOTICE: drop cascades to 10 other objects +DROP SCHEMA test_lateral; +DROP EXTENSION pg_pathman; diff --git a/expected/pathman_lateral_1.out b/expected/pathman_lateral_1.out new file mode 100644 index 00000000..12995290 --- /dev/null +++ b/expected/pathman_lateral_1.out @@ -0,0 +1,122 @@ +-- Sometimes join selectivity improvements patches in pgpro force nested loop +-- members swap -- in pathman_lateral_1.out +\set VERBOSITY terse +SET search_path = 'public'; +CREATE EXTENSION pg_pathman; +CREATE SCHEMA test_lateral; +/* create table partitioned by HASH */ +create table test_lateral.data(id int8 not null); +select create_hash_partitions('test_lateral.data', 'id', 10); + create_hash_partitions +------------------------ + 10 +(1 row) + +insert into test_lateral.data select generate_series(1, 10000); +VACUUM ANALYZE; +set enable_hashjoin = off; +set enable_mergejoin = off; +/* all credits go to Ivan Frolkov */ +explain (costs off) +select * from + test_lateral.data as t1, + lateral(select * from test_lateral.data as t2 where t2.id > t1.id) t2, + lateral(select * from test_lateral.data as t3 where t3.id = t2.id + t1.id) t3 + where t1.id between 1 and 100 and + t2.id between 2 and 299 and + t1.id > t2.id and + exists(select * from test_lateral.data t + where t1.id = t2.id and t.id = t3.id); + QUERY PLAN +-------------------------------------------------------------------------------------- + Nested Loop + -> Nested Loop + Join Filter: ((t2.id + t1.id) = t.id) + -> Nested Loop + Join Filter: ((t2.id > t1.id) AND (t1.id > t2.id) AND (t1.id = t2.id)) + -> Append + -> Seq Scan on data_0 t2 + Filter: ((id >= 2) AND (id <= 299)) + -> Seq Scan on data_1 t2_1 + Filter: ((id >= 2) AND (id <= 299)) + -> Seq Scan on data_2 t2_2 + Filter: ((id >= 2) AND (id <= 299)) + -> Seq Scan on data_3 t2_3 + Filter: ((id >= 2) AND (id <= 299)) + -> Seq Scan on data_4 t2_4 + Filter: ((id >= 2) AND (id <= 299)) + -> Seq Scan on data_5 t2_5 + Filter: ((id >= 2) AND (id <= 299)) + -> Seq Scan on data_6 t2_6 + Filter: ((id >= 2) AND (id <= 299)) + -> Seq Scan on data_7 t2_7 + Filter: ((id >= 2) AND (id <= 299)) + -> Seq Scan on data_8 t2_8 + Filter: ((id >= 2) AND (id <= 299)) + -> Seq Scan on data_9 t2_9 + Filter: ((id >= 2) AND (id <= 299)) + -> Materialize + -> Append + -> Seq Scan on data_0 t1 + Filter: ((id >= 1) AND (id <= 100)) + -> Seq Scan on data_1 t1_1 + Filter: ((id >= 1) AND (id <= 100)) + -> Seq Scan on data_2 t1_2 + Filter: ((id >= 1) AND (id <= 100)) + -> Seq Scan on data_3 t1_3 + Filter: ((id >= 1) AND (id <= 100)) + -> Seq Scan on data_4 t1_4 + Filter: ((id >= 1) AND (id <= 100)) + -> Seq Scan on data_5 t1_5 + Filter: ((id >= 1) AND (id <= 100)) + -> Seq Scan on data_6 t1_6 + Filter: ((id >= 1) AND (id <= 100)) + -> Seq Scan on data_7 t1_7 + Filter: ((id >= 1) AND (id <= 100)) + -> Seq Scan on data_8 t1_8 + Filter: ((id >= 1) AND (id <= 100)) + -> Seq Scan on data_9 t1_9 + Filter: ((id >= 1) AND (id <= 100)) + -> HashAggregate + Group Key: t.id + -> Append + -> Seq Scan on data_0 t + -> Seq Scan on data_1 t_1 + -> Seq Scan on data_2 t_2 + -> Seq Scan on data_3 t_3 + -> Seq Scan on data_4 t_4 + -> Seq Scan on data_5 t_5 + -> Seq Scan on data_6 t_6 + -> Seq Scan on data_7 t_7 + -> Seq Scan on data_8 t_8 + -> Seq Scan on data_9 t_9 + -> Custom Scan (RuntimeAppend) + Prune by: (t.id = t3.id) + -> Seq Scan on data_0 t3 + Filter: (t.id = id) + -> Seq Scan on data_1 t3 + Filter: (t.id = id) + -> Seq Scan on data_2 t3 + Filter: (t.id = id) + -> Seq Scan on data_3 t3 + Filter: (t.id = id) + -> Seq Scan on data_4 t3 + Filter: (t.id = id) + -> Seq Scan on data_5 t3 + Filter: (t.id = id) + -> Seq Scan on data_6 t3 + Filter: (t.id = id) + -> Seq Scan on data_7 t3 + Filter: (t.id = id) + -> Seq Scan on data_8 t3 + Filter: (t.id = id) + -> Seq Scan on data_9 t3 + Filter: (t.id = id) +(83 rows) + +set enable_hashjoin = on; +set enable_mergejoin = on; +DROP TABLE test_lateral.data CASCADE; +NOTICE: drop cascades to 10 other objects +DROP SCHEMA test_lateral; +DROP EXTENSION pg_pathman; diff --git a/expected/pathman_lateral_2.out b/expected/pathman_lateral_2.out new file mode 100644 index 00000000..e4a64a56 --- /dev/null +++ b/expected/pathman_lateral_2.out @@ -0,0 +1,128 @@ +/* + * Sometimes join selectivity improvements patches in pgpro force nested loop + * members swap -- in pathman_lateral_1.out and pathman_lateral_3.out + * + * Since 55a1954da16 and 6ef77cf46e8 (>= 13) output of EXPLAIN was changed, + * now it includes aliases for inherited tables. + */ +\set VERBOSITY terse +SET search_path = 'public'; +CREATE EXTENSION pg_pathman; +CREATE SCHEMA test_lateral; +/* create table partitioned by HASH */ +create table test_lateral.data(id int8 not null); +select create_hash_partitions('test_lateral.data', 'id', 10); + create_hash_partitions +------------------------ + 10 +(1 row) + +insert into test_lateral.data select generate_series(1, 10000); +VACUUM ANALYZE; +set enable_hashjoin = off; +set enable_mergejoin = off; +/* all credits go to Ivan Frolkov */ +explain (costs off) +select * from + test_lateral.data as t1, + lateral(select * from test_lateral.data as t2 where t2.id > t1.id) t2, + lateral(select * from test_lateral.data as t3 where t3.id = t2.id + t1.id) t3 + where t1.id between 1 and 100 and + t2.id between 2 and 299 and + t1.id > t2.id and + exists(select * from test_lateral.data t + where t1.id = t2.id and t.id = t3.id); + QUERY PLAN +-------------------------------------------------------------------------------------------- + Nested Loop + -> Nested Loop + Join Filter: ((t2.id + t1.id) = t.id) + -> HashAggregate + Group Key: t.id + -> Append + -> Seq Scan on data_0 t_1 + -> Seq Scan on data_1 t_2 + -> Seq Scan on data_2 t_3 + -> Seq Scan on data_3 t_4 + -> Seq Scan on data_4 t_5 + -> Seq Scan on data_5 t_6 + -> Seq Scan on data_6 t_7 + -> Seq Scan on data_7 t_8 + -> Seq Scan on data_8 t_9 + -> Seq Scan on data_9 t_10 + -> Materialize + -> Nested Loop + Join Filter: ((t2.id > t1.id) AND (t1.id > t2.id) AND (t1.id = t2.id)) + -> Append + -> Seq Scan on data_0 t2_1 + Filter: ((id >= 2) AND (id <= 299)) + -> Seq Scan on data_1 t2_2 + Filter: ((id >= 2) AND (id <= 299)) + -> Seq Scan on data_2 t2_3 + Filter: ((id >= 2) AND (id <= 299)) + -> Seq Scan on data_3 t2_4 + Filter: ((id >= 2) AND (id <= 299)) + -> Seq Scan on data_4 t2_5 + Filter: ((id >= 2) AND (id <= 299)) + -> Seq Scan on data_5 t2_6 + Filter: ((id >= 2) AND (id <= 299)) + -> Seq Scan on data_6 t2_7 + Filter: ((id >= 2) AND (id <= 299)) + -> Seq Scan on data_7 t2_8 + Filter: ((id >= 2) AND (id <= 299)) + -> Seq Scan on data_8 t2_9 + Filter: ((id >= 2) AND (id <= 299)) + -> Seq Scan on data_9 t2_10 + Filter: ((id >= 2) AND (id <= 299)) + -> Materialize + -> Append + -> Seq Scan on data_0 t1_1 + Filter: ((id >= 1) AND (id <= 100)) + -> Seq Scan on data_1 t1_2 + Filter: ((id >= 1) AND (id <= 100)) + -> Seq Scan on data_2 t1_3 + Filter: ((id >= 1) AND (id <= 100)) + -> Seq Scan on data_3 t1_4 + Filter: ((id >= 1) AND (id <= 100)) + -> Seq Scan on data_4 t1_5 + Filter: ((id >= 1) AND (id <= 100)) + -> Seq Scan on data_5 t1_6 + Filter: ((id >= 1) AND (id <= 100)) + -> Seq Scan on data_6 t1_7 + Filter: ((id >= 1) AND (id <= 100)) + -> Seq Scan on data_7 t1_8 + Filter: ((id >= 1) AND (id <= 100)) + -> Seq Scan on data_8 t1_9 + Filter: ((id >= 1) AND (id <= 100)) + -> Seq Scan on data_9 t1_10 + Filter: ((id >= 1) AND (id <= 100)) + -> Custom Scan (RuntimeAppend) + Prune by: (t.id = t3.id) + -> Seq Scan on data_0 t3 + Filter: (t.id = id) + -> Seq Scan on data_1 t3 + Filter: (t.id = id) + -> Seq Scan on data_2 t3 + Filter: (t.id = id) + -> Seq Scan on data_3 t3 + Filter: (t.id = id) + -> Seq Scan on data_4 t3 + Filter: (t.id = id) + -> Seq Scan on data_5 t3 + Filter: (t.id = id) + -> Seq Scan on data_6 t3 + Filter: (t.id = id) + -> Seq Scan on data_7 t3 + Filter: (t.id = id) + -> Seq Scan on data_8 t3 + Filter: (t.id = id) + -> Seq Scan on data_9 t3 + Filter: (t.id = id) +(84 rows) + +set enable_hashjoin = on; +set enable_mergejoin = on; +DROP TABLE test_lateral.data CASCADE; +NOTICE: drop cascades to 10 other objects +DROP SCHEMA test_lateral; +DROP EXTENSION pg_pathman; diff --git a/expected/pathman_lateral_3.out b/expected/pathman_lateral_3.out new file mode 100644 index 00000000..4bc385de --- /dev/null +++ b/expected/pathman_lateral_3.out @@ -0,0 +1,127 @@ +/* + * Sometimes join selectivity improvements patches in pgpro force nested loop + * members swap -- in pathman_lateral_1.out and pathman_lateral_3.out + * + * Since 55a1954da16 and 6ef77cf46e8 (>= 13) output of EXPLAIN was changed, + * now it includes aliases for inherited tables. + */ +\set VERBOSITY terse +SET search_path = 'public'; +CREATE EXTENSION pg_pathman; +CREATE SCHEMA test_lateral; +/* create table partitioned by HASH */ +create table test_lateral.data(id int8 not null); +select create_hash_partitions('test_lateral.data', 'id', 10); + create_hash_partitions +------------------------ + 10 +(1 row) + +insert into test_lateral.data select generate_series(1, 10000); +VACUUM ANALYZE; +set enable_hashjoin = off; +set enable_mergejoin = off; +/* all credits go to Ivan Frolkov */ +explain (costs off) +select * from + test_lateral.data as t1, + lateral(select * from test_lateral.data as t2 where t2.id > t1.id) t2, + lateral(select * from test_lateral.data as t3 where t3.id = t2.id + t1.id) t3 + where t1.id between 1 and 100 and + t2.id between 2 and 299 and + t1.id > t2.id and + exists(select * from test_lateral.data t + where t1.id = t2.id and t.id = t3.id); + QUERY PLAN +-------------------------------------------------------------------------------------------------- + Nested Loop + -> Nested Loop + Join Filter: ((t2_1.id + t1_1.id) = t_1.id) + -> Nested Loop + Join Filter: ((t2_1.id > t1_1.id) AND (t1_1.id > t2_1.id) AND (t1_1.id = t2_1.id)) + -> Append + -> Seq Scan on data_0 t2_1 + Filter: ((id >= 2) AND (id <= 299)) + -> Seq Scan on data_1 t2_2 + Filter: ((id >= 2) AND (id <= 299)) + -> Seq Scan on data_2 t2_3 + Filter: ((id >= 2) AND (id <= 299)) + -> Seq Scan on data_3 t2_4 + Filter: ((id >= 2) AND (id <= 299)) + -> Seq Scan on data_4 t2_5 + Filter: ((id >= 2) AND (id <= 299)) + -> Seq Scan on data_5 t2_6 + Filter: ((id >= 2) AND (id <= 299)) + -> Seq Scan on data_6 t2_7 + Filter: ((id >= 2) AND (id <= 299)) + -> Seq Scan on data_7 t2_8 + Filter: ((id >= 2) AND (id <= 299)) + -> Seq Scan on data_8 t2_9 + Filter: ((id >= 2) AND (id <= 299)) + -> Seq Scan on data_9 t2_10 + Filter: ((id >= 2) AND (id <= 299)) + -> Materialize + -> Append + -> Seq Scan on data_0 t1_1 + Filter: ((id >= 1) AND (id <= 100)) + -> Seq Scan on data_1 t1_2 + Filter: ((id >= 1) AND (id <= 100)) + -> Seq Scan on data_2 t1_3 + Filter: ((id >= 1) AND (id <= 100)) + -> Seq Scan on data_3 t1_4 + Filter: ((id >= 1) AND (id <= 100)) + -> Seq Scan on data_4 t1_5 + Filter: ((id >= 1) AND (id <= 100)) + -> Seq Scan on data_5 t1_6 + Filter: ((id >= 1) AND (id <= 100)) + -> Seq Scan on data_6 t1_7 + Filter: ((id >= 1) AND (id <= 100)) + -> Seq Scan on data_7 t1_8 + Filter: ((id >= 1) AND (id <= 100)) + -> Seq Scan on data_8 t1_9 + Filter: ((id >= 1) AND (id <= 100)) + -> Seq Scan on data_9 t1_10 + Filter: ((id >= 1) AND (id <= 100)) + -> HashAggregate + Group Key: t_1.id + -> Append + -> Seq Scan on data_0 t_1 + -> Seq Scan on data_1 t_2 + -> Seq Scan on data_2 t_3 + -> Seq Scan on data_3 t_4 + -> Seq Scan on data_4 t_5 + -> Seq Scan on data_5 t_6 + -> Seq Scan on data_6 t_7 + -> Seq Scan on data_7 t_8 + -> Seq Scan on data_8 t_9 + -> Seq Scan on data_9 t_10 + -> Custom Scan (RuntimeAppend) + Prune by: (t_1.id = t3.id) + -> Seq Scan on data_0 t3 + Filter: (t_1.id = id) + -> Seq Scan on data_1 t3 + Filter: (t_1.id = id) + -> Seq Scan on data_2 t3 + Filter: (t_1.id = id) + -> Seq Scan on data_3 t3 + Filter: (t_1.id = id) + -> Seq Scan on data_4 t3 + Filter: (t_1.id = id) + -> Seq Scan on data_5 t3 + Filter: (t_1.id = id) + -> Seq Scan on data_6 t3 + Filter: (t_1.id = id) + -> Seq Scan on data_7 t3 + Filter: (t_1.id = id) + -> Seq Scan on data_8 t3 + Filter: (t_1.id = id) + -> Seq Scan on data_9 t3 + Filter: (t_1.id = id) +(83 rows) + +set enable_hashjoin = on; +set enable_mergejoin = on; +DROP TABLE test_lateral.data CASCADE; +NOTICE: drop cascades to 10 other objects +DROP SCHEMA test_lateral; +DROP EXTENSION pg_pathman; diff --git a/expected/pathman_lateral_4.out b/expected/pathman_lateral_4.out new file mode 100644 index 00000000..d35da608 --- /dev/null +++ b/expected/pathman_lateral_4.out @@ -0,0 +1,128 @@ +/* + * Sometimes join selectivity improvements patches in pgpro force nested loop + * members swap -- in pathman_lateral_1.out and pathman_lateral_3.out + * + * Since 55a1954da16 and 6ef77cf46e8 (>= 13) output of EXPLAIN was changed, + * now it includes aliases for inherited tables. + */ +\set VERBOSITY terse +SET search_path = 'public'; +CREATE EXTENSION pg_pathman; +CREATE SCHEMA test_lateral; +/* create table partitioned by HASH */ +create table test_lateral.data(id int8 not null); +select create_hash_partitions('test_lateral.data', 'id', 10); + create_hash_partitions +------------------------ + 10 +(1 row) + +insert into test_lateral.data select generate_series(1, 10000); +VACUUM ANALYZE; +set enable_hashjoin = off; +set enable_mergejoin = off; +/* all credits go to Ivan Frolkov */ +explain (costs off) +select * from + test_lateral.data as t1, + lateral(select * from test_lateral.data as t2 where t2.id > t1.id) t2, + lateral(select * from test_lateral.data as t3 where t3.id = t2.id + t1.id) t3 + where t1.id between 1 and 100 and + t2.id between 2 and 299 and + t1.id > t2.id and + exists(select * from test_lateral.data t + where t1.id = t2.id and t.id = t3.id); + QUERY PLAN +-------------------------------------------------------------------------------------------- + Nested Loop + -> Nested Loop + Join Filter: ((t2.id + t1.id) = t.id) + -> HashAggregate + Group Key: t.id + -> Append + -> Seq Scan on data_0 t_1 + -> Seq Scan on data_1 t_2 + -> Seq Scan on data_2 t_3 + -> Seq Scan on data_3 t_4 + -> Seq Scan on data_4 t_5 + -> Seq Scan on data_5 t_6 + -> Seq Scan on data_6 t_7 + -> Seq Scan on data_7 t_8 + -> Seq Scan on data_8 t_9 + -> Seq Scan on data_9 t_10 + -> Materialize + -> Nested Loop + Join Filter: ((t2.id > t1.id) AND (t1.id > t2.id) AND (t1.id = t2.id)) + -> Append + -> Seq Scan on data_0 t2_1 + Filter: ((id >= 2) AND (id <= 299)) + -> Seq Scan on data_1 t2_2 + Filter: ((id >= 2) AND (id <= 299)) + -> Seq Scan on data_2 t2_3 + Filter: ((id >= 2) AND (id <= 299)) + -> Seq Scan on data_3 t2_4 + Filter: ((id >= 2) AND (id <= 299)) + -> Seq Scan on data_4 t2_5 + Filter: ((id >= 2) AND (id <= 299)) + -> Seq Scan on data_5 t2_6 + Filter: ((id >= 2) AND (id <= 299)) + -> Seq Scan on data_6 t2_7 + Filter: ((id >= 2) AND (id <= 299)) + -> Seq Scan on data_7 t2_8 + Filter: ((id >= 2) AND (id <= 299)) + -> Seq Scan on data_8 t2_9 + Filter: ((id >= 2) AND (id <= 299)) + -> Seq Scan on data_9 t2_10 + Filter: ((id >= 2) AND (id <= 299)) + -> Materialize + -> Append + -> Seq Scan on data_0 t1_1 + Filter: ((id >= 1) AND (id <= 100)) + -> Seq Scan on data_1 t1_2 + Filter: ((id >= 1) AND (id <= 100)) + -> Seq Scan on data_2 t1_3 + Filter: ((id >= 1) AND (id <= 100)) + -> Seq Scan on data_3 t1_4 + Filter: ((id >= 1) AND (id <= 100)) + -> Seq Scan on data_4 t1_5 + Filter: ((id >= 1) AND (id <= 100)) + -> Seq Scan on data_5 t1_6 + Filter: ((id >= 1) AND (id <= 100)) + -> Seq Scan on data_6 t1_7 + Filter: ((id >= 1) AND (id <= 100)) + -> Seq Scan on data_7 t1_8 + Filter: ((id >= 1) AND (id <= 100)) + -> Seq Scan on data_8 t1_9 + Filter: ((id >= 1) AND (id <= 100)) + -> Seq Scan on data_9 t1_10 + Filter: ((id >= 1) AND (id <= 100)) + -> Custom Scan (RuntimeAppend) + Prune by: (t3.id = t.id) + -> Seq Scan on data_0 t3 + Filter: (t.id = id) + -> Seq Scan on data_1 t3 + Filter: (t.id = id) + -> Seq Scan on data_2 t3 + Filter: (t.id = id) + -> Seq Scan on data_3 t3 + Filter: (t.id = id) + -> Seq Scan on data_4 t3 + Filter: (t.id = id) + -> Seq Scan on data_5 t3 + Filter: (t.id = id) + -> Seq Scan on data_6 t3 + Filter: (t.id = id) + -> Seq Scan on data_7 t3 + Filter: (t.id = id) + -> Seq Scan on data_8 t3 + Filter: (t.id = id) + -> Seq Scan on data_9 t3 + Filter: (t.id = id) +(84 rows) + +set enable_hashjoin = on; +set enable_mergejoin = on; +DROP TABLE test_lateral.data CASCADE; +NOTICE: drop cascades to 10 other objects +DROP SCHEMA test_lateral; +DROP EXTENSION pg_pathman; diff --git a/expected/pathman_mergejoin.out b/expected/pathman_mergejoin.out new file mode 100644 index 00000000..d8a14371 --- /dev/null +++ b/expected/pathman_mergejoin.out @@ -0,0 +1,92 @@ +/* + * pathman_mergejoin_1.out and pathman_mergejoin_2.out seem to deal with pgpro's + * different behaviour. 8edd0e794 (>= 12) Append nodes with single subplan + * are eliminated, hence pathman_mergejoin_3.out + * + * Since 55a1954da16 and 6ef77cf46e8 (>= 13) output of EXPLAIN was changed, + * now it includes aliases for inherited tables. + * + * --------------------------------------------- + * NOTE: This test behaves differenly on PgPro + * --------------------------------------------- + */ +\set VERBOSITY terse +SET search_path = 'public'; +CREATE SCHEMA pathman; +CREATE EXTENSION pg_pathman SCHEMA pathman; +CREATE SCHEMA test; +CREATE TABLE test.range_rel ( + id SERIAL PRIMARY KEY, + dt TIMESTAMP NOT NULL, + txt TEXT); +CREATE INDEX ON test.range_rel (dt); +INSERT INTO test.range_rel (dt, txt) +SELECT g, md5(g::TEXT) FROM generate_series('2015-01-01', '2015-04-30', '1 day'::interval) as g; +SELECT pathman.create_range_partitions('test.range_rel', 'DT', '2015-01-01'::DATE, '1 month'::INTERVAL); + create_range_partitions +------------------------- + 4 +(1 row) + +CREATE TABLE test.num_range_rel ( + id SERIAL PRIMARY KEY, + txt TEXT); +INSERT INTO test.num_range_rel SELECT g, md5(g::TEXT) FROM generate_series(1, 3000) as g; +SELECT pathman.create_range_partitions('test.num_range_rel', 'id', 0, 1000, 4); + create_range_partitions +------------------------- + 4 +(1 row) + +/* + * Merge join between 3 partitioned tables + * + * test case for the fix of sorting, merge append and index scan issues + * details in commit 54dd0486fc55b2d25cf7d095f83dee6ff4adee06 + */ +SET enable_hashjoin = OFF; +SET enable_nestloop = OFF; +SET enable_mergejoin = ON; +SET enable_indexscan = ON; +SET enable_seqscan = OFF; +EXPLAIN (COSTS OFF) +SELECT * FROM test.range_rel j1 +JOIN test.range_rel j2 on j2.id = j1.id +JOIN test.num_range_rel j3 on j3.id = j1.id +WHERE j1.dt < '2015-03-01' AND j2.dt >= '2015-02-01' ORDER BY j2.dt; + QUERY PLAN +------------------------------------------------------------------------------------------- + Sort + Sort Key: j2.dt + -> Merge Join + Merge Cond: (j3.id = j2.id) + -> Append + -> Index Scan using num_range_rel_1_pkey on num_range_rel_1 j3 + -> Index Scan using num_range_rel_2_pkey on num_range_rel_2 j3_1 + -> Index Scan using num_range_rel_3_pkey on num_range_rel_3 j3_2 + -> Index Scan using num_range_rel_4_pkey on num_range_rel_4 j3_3 + -> Materialize + -> Merge Join + Merge Cond: (j2.id = j1.id) + -> Merge Append + Sort Key: j2.id + -> Index Scan using range_rel_2_pkey on range_rel_2 j2 + -> Index Scan using range_rel_3_pkey on range_rel_3 j2_1 + -> Index Scan using range_rel_4_pkey on range_rel_4 j2_2 + -> Materialize + -> Merge Append + Sort Key: j1.id + -> Index Scan using range_rel_1_pkey on range_rel_1 j1 + -> Index Scan using range_rel_2_pkey on range_rel_2 j1_1 +(22 rows) + +SET enable_hashjoin = ON; +SET enable_nestloop = ON; +SET enable_seqscan = ON; +DROP TABLE test.num_range_rel CASCADE; +NOTICE: drop cascades to 5 other objects +DROP TABLE test.range_rel CASCADE; +NOTICE: drop cascades to 5 other objects +DROP SCHEMA test; +DROP EXTENSION pg_pathman; +DROP SCHEMA pathman; diff --git a/expected/pathman_mergejoin_1.out b/expected/pathman_mergejoin_1.out new file mode 100644 index 00000000..bcd6c272 --- /dev/null +++ b/expected/pathman_mergejoin_1.out @@ -0,0 +1,90 @@ +/* + * pathman_mergejoin_1.out and pathman_mergejoin_2.out seem to deal with pgpro's + * different behaviour. 8edd0e794 (>= 12) Append nodes with single subplan + * are eliminated, hence pathman_mergejoin_3.out + * + * Since 55a1954da16 and 6ef77cf46e8 (>= 13) output of EXPLAIN was changed, + * now it includes aliases for inherited tables. + * + * --------------------------------------------- + * NOTE: This test behaves differenly on PgPro + * --------------------------------------------- + */ +\set VERBOSITY terse +SET search_path = 'public'; +CREATE SCHEMA pathman; +CREATE EXTENSION pg_pathman SCHEMA pathman; +CREATE SCHEMA test; +CREATE TABLE test.range_rel ( + id SERIAL PRIMARY KEY, + dt TIMESTAMP NOT NULL, + txt TEXT); +CREATE INDEX ON test.range_rel (dt); +INSERT INTO test.range_rel (dt, txt) +SELECT g, md5(g::TEXT) FROM generate_series('2015-01-01', '2015-04-30', '1 day'::interval) as g; +SELECT pathman.create_range_partitions('test.range_rel', 'DT', '2015-01-01'::DATE, '1 month'::INTERVAL); + create_range_partitions +------------------------- + 4 +(1 row) + +CREATE TABLE test.num_range_rel ( + id SERIAL PRIMARY KEY, + txt TEXT); +INSERT INTO test.num_range_rel SELECT g, md5(g::TEXT) FROM generate_series(1, 3000) as g; +SELECT pathman.create_range_partitions('test.num_range_rel', 'id', 0, 1000, 4); + create_range_partitions +------------------------- + 4 +(1 row) + +/* + * Merge join between 3 partitioned tables + * + * test case for the fix of sorting, merge append and index scan issues + * details in commit 54dd0486fc55b2d25cf7d095f83dee6ff4adee06 + */ +SET enable_hashjoin = OFF; +SET enable_nestloop = OFF; +SET enable_mergejoin = ON; +SET enable_indexscan = ON; +SET enable_seqscan = OFF; +EXPLAIN (COSTS OFF) +SELECT * FROM test.range_rel j1 +JOIN test.range_rel j2 on j2.id = j1.id +JOIN test.num_range_rel j3 on j3.id = j1.id +WHERE j1.dt < '2015-03-01' AND j2.dt >= '2015-02-01' ORDER BY j2.dt; + QUERY PLAN +--------------------------------------------------------------------------------- + Sort + Sort Key: j2.dt + -> Merge Join + Merge Cond: (j2.id = j3.id) + -> Merge Join + Merge Cond: (j1.id = j2.id) + -> Merge Append + Sort Key: j1.id + -> Index Scan using range_rel_1_pkey on range_rel_1 j1 + -> Index Scan using range_rel_2_pkey on range_rel_2 j1_1 + -> Merge Append + Sort Key: j2.id + -> Index Scan using range_rel_2_pkey on range_rel_2 j2 + -> Index Scan using range_rel_3_pkey on range_rel_3 j2_1 + -> Index Scan using range_rel_4_pkey on range_rel_4 j2_2 + -> Append + -> Index Scan using num_range_rel_1_pkey on num_range_rel_1 j3 + -> Index Scan using num_range_rel_2_pkey on num_range_rel_2 j3_1 + -> Index Scan using num_range_rel_3_pkey on num_range_rel_3 j3_2 + -> Index Scan using num_range_rel_4_pkey on num_range_rel_4 j3_3 +(20 rows) + +SET enable_hashjoin = ON; +SET enable_nestloop = ON; +SET enable_seqscan = ON; +DROP TABLE test.num_range_rel CASCADE; +NOTICE: drop cascades to 5 other objects +DROP TABLE test.range_rel CASCADE; +NOTICE: drop cascades to 5 other objects +DROP SCHEMA test; +DROP EXTENSION pg_pathman; +DROP SCHEMA pathman; diff --git a/expected/pathman_mergejoin_2.out b/expected/pathman_mergejoin_2.out new file mode 100644 index 00000000..aed697d2 --- /dev/null +++ b/expected/pathman_mergejoin_2.out @@ -0,0 +1,83 @@ +/* + * pathman_mergejoin_1.out and pathman_mergejoin_2.out seem to deal with pgpro's + * different behaviour. 8edd0e794 (>= 12) Append nodes with single subplan + * are eliminated, hence pathman_mergejoin_3.out + * + * Since 55a1954da16 and 6ef77cf46e8 (>= 13) output of EXPLAIN was changed, + * now it includes aliases for inherited tables. + * + * --------------------------------------------- + * NOTE: This test behaves differenly on PgPro + * --------------------------------------------- + */ +\set VERBOSITY terse +SET search_path = 'public'; +CREATE SCHEMA pathman; +CREATE EXTENSION pg_pathman SCHEMA pathman; +CREATE SCHEMA test; +CREATE TABLE test.range_rel ( + id SERIAL PRIMARY KEY, + dt TIMESTAMP NOT NULL, + txt TEXT); +CREATE INDEX ON test.range_rel (dt); +INSERT INTO test.range_rel (dt, txt) +SELECT g, md5(g::TEXT) FROM generate_series('2015-01-01', '2015-04-30', '1 day'::interval) as g; +SELECT pathman.create_range_partitions('test.range_rel', 'DT', '2015-01-01'::DATE, '1 month'::INTERVAL); + create_range_partitions +------------------------- + 4 +(1 row) + +CREATE TABLE test.num_range_rel ( + id SERIAL PRIMARY KEY, + txt TEXT); +INSERT INTO test.num_range_rel SELECT g, md5(g::TEXT) FROM generate_series(1, 3000) as g; +SELECT pathman.create_range_partitions('test.num_range_rel', 'id', 0, 1000, 4); + create_range_partitions +------------------------- + 4 +(1 row) + +/* + * Merge join between 3 partitioned tables + * + * test case for the fix of sorting, merge append and index scan issues + * details in commit 54dd0486fc55b2d25cf7d095f83dee6ff4adee06 + */ +SET enable_hashjoin = OFF; +SET enable_nestloop = OFF; +SET enable_mergejoin = ON; +SET enable_indexscan = ON; +SET enable_seqscan = OFF; +EXPLAIN (COSTS OFF) +SELECT * FROM test.range_rel j1 +JOIN test.range_rel j2 on j2.id = j1.id +JOIN test.num_range_rel j3 on j3.id = j1.id +WHERE j1.dt < '2015-03-01' AND j2.dt >= '2015-02-01' ORDER BY j2.dt; + QUERY PLAN +--------------------------------------------------------------------------------- + Sort + Sort Key: j2.dt + -> Merge Join + Merge Cond: (j2.id = j3.id) + -> Merge Append + Sort Key: j2.id + -> Index Scan using range_rel_2_pkey on range_rel_2 j2 + Index Cond: (id IS NOT NULL) + -> Append + -> Index Scan using num_range_rel_1_pkey on num_range_rel_1 j3 + -> Index Scan using num_range_rel_2_pkey on num_range_rel_2 j3_1 + -> Index Scan using num_range_rel_3_pkey on num_range_rel_3 j3_2 + -> Index Scan using num_range_rel_4_pkey on num_range_rel_4 j3_3 +(13 rows) + +SET enable_hashjoin = ON; +SET enable_nestloop = ON; +SET enable_seqscan = ON; +DROP TABLE test.num_range_rel CASCADE; +NOTICE: drop cascades to 5 other objects +DROP TABLE test.range_rel CASCADE; +NOTICE: drop cascades to 5 other objects +DROP SCHEMA test; +DROP EXTENSION pg_pathman; +DROP SCHEMA pathman; diff --git a/expected/pathman_mergejoin_3.out b/expected/pathman_mergejoin_3.out new file mode 100644 index 00000000..85414544 --- /dev/null +++ b/expected/pathman_mergejoin_3.out @@ -0,0 +1,81 @@ +/* + * pathman_mergejoin_1.out and pathman_mergejoin_2.out seem to deal with pgpro's + * different behaviour. 8edd0e794 (>= 12) Append nodes with single subplan + * are eliminated, hence pathman_mergejoin_3.out + * + * Since 55a1954da16 and 6ef77cf46e8 (>= 13) output of EXPLAIN was changed, + * now it includes aliases for inherited tables. + * + * --------------------------------------------- + * NOTE: This test behaves differenly on PgPro + * --------------------------------------------- + */ +\set VERBOSITY terse +SET search_path = 'public'; +CREATE SCHEMA pathman; +CREATE EXTENSION pg_pathman SCHEMA pathman; +CREATE SCHEMA test; +CREATE TABLE test.range_rel ( + id SERIAL PRIMARY KEY, + dt TIMESTAMP NOT NULL, + txt TEXT); +CREATE INDEX ON test.range_rel (dt); +INSERT INTO test.range_rel (dt, txt) +SELECT g, md5(g::TEXT) FROM generate_series('2015-01-01', '2015-04-30', '1 day'::interval) as g; +SELECT pathman.create_range_partitions('test.range_rel', 'DT', '2015-01-01'::DATE, '1 month'::INTERVAL); + create_range_partitions +------------------------- + 4 +(1 row) + +CREATE TABLE test.num_range_rel ( + id SERIAL PRIMARY KEY, + txt TEXT); +INSERT INTO test.num_range_rel SELECT g, md5(g::TEXT) FROM generate_series(1, 3000) as g; +SELECT pathman.create_range_partitions('test.num_range_rel', 'id', 0, 1000, 4); + create_range_partitions +------------------------- + 4 +(1 row) + +/* + * Merge join between 3 partitioned tables + * + * test case for the fix of sorting, merge append and index scan issues + * details in commit 54dd0486fc55b2d25cf7d095f83dee6ff4adee06 + */ +SET enable_hashjoin = OFF; +SET enable_nestloop = OFF; +SET enable_mergejoin = ON; +SET enable_indexscan = ON; +SET enable_seqscan = OFF; +EXPLAIN (COSTS OFF) +SELECT * FROM test.range_rel j1 +JOIN test.range_rel j2 on j2.id = j1.id +JOIN test.num_range_rel j3 on j3.id = j1.id +WHERE j1.dt < '2015-03-01' AND j2.dt >= '2015-02-01' ORDER BY j2.dt; + QUERY PLAN +--------------------------------------------------------------------------------- + Sort + Sort Key: j2.dt + -> Merge Join + Merge Cond: (j2.id = j3.id) + -> Index Scan using range_rel_2_pkey on range_rel_2 j2 + Index Cond: (id IS NOT NULL) + -> Append + -> Index Scan using num_range_rel_1_pkey on num_range_rel_1 j3 + -> Index Scan using num_range_rel_2_pkey on num_range_rel_2 j3_1 + -> Index Scan using num_range_rel_3_pkey on num_range_rel_3 j3_2 + -> Index Scan using num_range_rel_4_pkey on num_range_rel_4 j3_3 +(11 rows) + +SET enable_hashjoin = ON; +SET enable_nestloop = ON; +SET enable_seqscan = ON; +DROP TABLE test.num_range_rel CASCADE; +NOTICE: drop cascades to 5 other objects +DROP TABLE test.range_rel CASCADE; +NOTICE: drop cascades to 5 other objects +DROP SCHEMA test; +DROP EXTENSION pg_pathman; +DROP SCHEMA pathman; diff --git a/expected/pathman_mergejoin_4.out b/expected/pathman_mergejoin_4.out new file mode 100644 index 00000000..fc9bc95f --- /dev/null +++ b/expected/pathman_mergejoin_4.out @@ -0,0 +1,90 @@ +/* + * pathman_mergejoin_1.out and pathman_mergejoin_2.out seem to deal with pgpro's + * different behaviour. 8edd0e794 (>= 12) Append nodes with single subplan + * are eliminated, hence pathman_mergejoin_3.out + * + * Since 55a1954da16 and 6ef77cf46e8 (>= 13) output of EXPLAIN was changed, + * now it includes aliases for inherited tables. + * + * --------------------------------------------- + * NOTE: This test behaves differenly on PgPro + * --------------------------------------------- + */ +\set VERBOSITY terse +SET search_path = 'public'; +CREATE SCHEMA pathman; +CREATE EXTENSION pg_pathman SCHEMA pathman; +CREATE SCHEMA test; +CREATE TABLE test.range_rel ( + id SERIAL PRIMARY KEY, + dt TIMESTAMP NOT NULL, + txt TEXT); +CREATE INDEX ON test.range_rel (dt); +INSERT INTO test.range_rel (dt, txt) +SELECT g, md5(g::TEXT) FROM generate_series('2015-01-01', '2015-04-30', '1 day'::interval) as g; +SELECT pathman.create_range_partitions('test.range_rel', 'DT', '2015-01-01'::DATE, '1 month'::INTERVAL); + create_range_partitions +------------------------- + 4 +(1 row) + +CREATE TABLE test.num_range_rel ( + id SERIAL PRIMARY KEY, + txt TEXT); +INSERT INTO test.num_range_rel SELECT g, md5(g::TEXT) FROM generate_series(1, 3000) as g; +SELECT pathman.create_range_partitions('test.num_range_rel', 'id', 0, 1000, 4); + create_range_partitions +------------------------- + 4 +(1 row) + +/* + * Merge join between 3 partitioned tables + * + * test case for the fix of sorting, merge append and index scan issues + * details in commit 54dd0486fc55b2d25cf7d095f83dee6ff4adee06 + */ +SET enable_hashjoin = OFF; +SET enable_nestloop = OFF; +SET enable_mergejoin = ON; +SET enable_indexscan = ON; +SET enable_seqscan = OFF; +EXPLAIN (COSTS OFF) +SELECT * FROM test.range_rel j1 +JOIN test.range_rel j2 on j2.id = j1.id +JOIN test.num_range_rel j3 on j3.id = j1.id +WHERE j1.dt < '2015-03-01' AND j2.dt >= '2015-02-01' ORDER BY j2.dt; + QUERY PLAN +--------------------------------------------------------------------------------- + Sort + Sort Key: j2.dt + -> Merge Join + Merge Cond: (j2.id = j3.id) + -> Merge Join + Merge Cond: (j1.id = j2.id) + -> Merge Append + Sort Key: j1.id + -> Index Scan using range_rel_1_pkey on range_rel_1 j1_1 + -> Index Scan using range_rel_2_pkey on range_rel_2 j1_2 + -> Merge Append + Sort Key: j2.id + -> Index Scan using range_rel_2_pkey on range_rel_2 j2_1 + -> Index Scan using range_rel_3_pkey on range_rel_3 j2_2 + -> Index Scan using range_rel_4_pkey on range_rel_4 j2_3 + -> Append + -> Index Scan using num_range_rel_1_pkey on num_range_rel_1 j3_1 + -> Index Scan using num_range_rel_2_pkey on num_range_rel_2 j3_2 + -> Index Scan using num_range_rel_3_pkey on num_range_rel_3 j3_3 + -> Index Scan using num_range_rel_4_pkey on num_range_rel_4 j3_4 +(20 rows) + +SET enable_hashjoin = ON; +SET enable_nestloop = ON; +SET enable_seqscan = ON; +DROP TABLE test.num_range_rel CASCADE; +NOTICE: drop cascades to 5 other objects +DROP TABLE test.range_rel CASCADE; +NOTICE: drop cascades to 5 other objects +DROP SCHEMA test; +DROP EXTENSION pg_pathman; +DROP SCHEMA pathman; diff --git a/expected/pathman_mergejoin_5.out b/expected/pathman_mergejoin_5.out new file mode 100644 index 00000000..b99e40db --- /dev/null +++ b/expected/pathman_mergejoin_5.out @@ -0,0 +1,81 @@ +/* + * pathman_mergejoin_1.out and pathman_mergejoin_2.out seem to deal with pgpro's + * different behaviour. 8edd0e794 (>= 12) Append nodes with single subplan + * are eliminated, hence pathman_mergejoin_3.out + * + * Since 55a1954da16 and 6ef77cf46e8 (>= 13) output of EXPLAIN was changed, + * now it includes aliases for inherited tables. + * + * --------------------------------------------- + * NOTE: This test behaves differenly on PgPro + * --------------------------------------------- + */ +\set VERBOSITY terse +SET search_path = 'public'; +CREATE SCHEMA pathman; +CREATE EXTENSION pg_pathman SCHEMA pathman; +CREATE SCHEMA test; +CREATE TABLE test.range_rel ( + id SERIAL PRIMARY KEY, + dt TIMESTAMP NOT NULL, + txt TEXT); +CREATE INDEX ON test.range_rel (dt); +INSERT INTO test.range_rel (dt, txt) +SELECT g, md5(g::TEXT) FROM generate_series('2015-01-01', '2015-04-30', '1 day'::interval) as g; +SELECT pathman.create_range_partitions('test.range_rel', 'DT', '2015-01-01'::DATE, '1 month'::INTERVAL); + create_range_partitions +------------------------- + 4 +(1 row) + +CREATE TABLE test.num_range_rel ( + id SERIAL PRIMARY KEY, + txt TEXT); +INSERT INTO test.num_range_rel SELECT g, md5(g::TEXT) FROM generate_series(1, 3000) as g; +SELECT pathman.create_range_partitions('test.num_range_rel', 'id', 0, 1000, 4); + create_range_partitions +------------------------- + 4 +(1 row) + +/* + * Merge join between 3 partitioned tables + * + * test case for the fix of sorting, merge append and index scan issues + * details in commit 54dd0486fc55b2d25cf7d095f83dee6ff4adee06 + */ +SET enable_hashjoin = OFF; +SET enable_nestloop = OFF; +SET enable_mergejoin = ON; +SET enable_indexscan = ON; +SET enable_seqscan = OFF; +EXPLAIN (COSTS OFF) +SELECT * FROM test.range_rel j1 +JOIN test.range_rel j2 on j2.id = j1.id +JOIN test.num_range_rel j3 on j3.id = j1.id +WHERE j1.dt < '2015-03-01' AND j2.dt >= '2015-02-01' ORDER BY j2.dt; + QUERY PLAN +--------------------------------------------------------------------------------- + Sort + Sort Key: j2.dt + -> Merge Join + Merge Cond: (j2.id = j3.id) + -> Index Scan using range_rel_2_pkey on range_rel_2 j2 + Index Cond: (id IS NOT NULL) + -> Append + -> Index Scan using num_range_rel_1_pkey on num_range_rel_1 j3_1 + -> Index Scan using num_range_rel_2_pkey on num_range_rel_2 j3_2 + -> Index Scan using num_range_rel_3_pkey on num_range_rel_3 j3_3 + -> Index Scan using num_range_rel_4_pkey on num_range_rel_4 j3_4 +(11 rows) + +SET enable_hashjoin = ON; +SET enable_nestloop = ON; +SET enable_seqscan = ON; +DROP TABLE test.num_range_rel CASCADE; +NOTICE: drop cascades to 5 other objects +DROP TABLE test.range_rel CASCADE; +NOTICE: drop cascades to 5 other objects +DROP SCHEMA test; +DROP EXTENSION pg_pathman; +DROP SCHEMA pathman; diff --git a/expected/pathman_mergejoin_6.out b/expected/pathman_mergejoin_6.out new file mode 100644 index 00000000..0cca2aef --- /dev/null +++ b/expected/pathman_mergejoin_6.out @@ -0,0 +1,80 @@ +/* + * pathman_mergejoin_1.out and pathman_mergejoin_2.out seem to deal with pgpro's + * different behaviour. 8edd0e794 (>= 12) Append nodes with single subplan + * are eliminated, hence pathman_mergejoin_3.out + * + * Since 55a1954da16 and 6ef77cf46e8 (>= 13) output of EXPLAIN was changed, + * now it includes aliases for inherited tables. + * + * --------------------------------------------- + * NOTE: This test behaves differenly on PgPro + * --------------------------------------------- + */ +\set VERBOSITY terse +SET search_path = 'public'; +CREATE SCHEMA pathman; +CREATE EXTENSION pg_pathman SCHEMA pathman; +CREATE SCHEMA test; +CREATE TABLE test.range_rel ( + id SERIAL PRIMARY KEY, + dt TIMESTAMP NOT NULL, + txt TEXT); +CREATE INDEX ON test.range_rel (dt); +INSERT INTO test.range_rel (dt, txt) +SELECT g, md5(g::TEXT) FROM generate_series('2015-01-01', '2015-04-30', '1 day'::interval) as g; +SELECT pathman.create_range_partitions('test.range_rel', 'DT', '2015-01-01'::DATE, '1 month'::INTERVAL); + create_range_partitions +------------------------- + 4 +(1 row) + +CREATE TABLE test.num_range_rel ( + id SERIAL PRIMARY KEY, + txt TEXT); +INSERT INTO test.num_range_rel SELECT g, md5(g::TEXT) FROM generate_series(1, 3000) as g; +SELECT pathman.create_range_partitions('test.num_range_rel', 'id', 0, 1000, 4); + create_range_partitions +------------------------- + 4 +(1 row) + +/* + * Merge join between 3 partitioned tables + * + * test case for the fix of sorting, merge append and index scan issues + * details in commit 54dd0486fc55b2d25cf7d095f83dee6ff4adee06 + */ +SET enable_hashjoin = OFF; +SET enable_nestloop = OFF; +SET enable_mergejoin = ON; +SET enable_indexscan = ON; +SET enable_seqscan = OFF; +EXPLAIN (COSTS OFF) +SELECT * FROM test.range_rel j1 +JOIN test.range_rel j2 on j2.id = j1.id +JOIN test.num_range_rel j3 on j3.id = j1.id +WHERE j1.dt < '2015-03-01' AND j2.dt >= '2015-02-01' ORDER BY j2.dt; + QUERY PLAN +--------------------------------------------------------------------------------- + Sort + Sort Key: j2.dt + -> Merge Join + Merge Cond: (j2.id = j3.id) + -> Index Scan using range_rel_2_pkey on range_rel_2 j2 + -> Append + -> Index Scan using num_range_rel_1_pkey on num_range_rel_1 j3_1 + -> Index Scan using num_range_rel_2_pkey on num_range_rel_2 j3_2 + -> Index Scan using num_range_rel_3_pkey on num_range_rel_3 j3_3 + -> Index Scan using num_range_rel_4_pkey on num_range_rel_4 j3_4 +(10 rows) + +SET enable_hashjoin = ON; +SET enable_nestloop = ON; +SET enable_seqscan = ON; +DROP TABLE test.num_range_rel CASCADE; +NOTICE: drop cascades to 5 other objects +DROP TABLE test.range_rel CASCADE; +NOTICE: drop cascades to 5 other objects +DROP SCHEMA test; +DROP EXTENSION pg_pathman; +DROP SCHEMA pathman; diff --git a/expected/pathman_only.out b/expected/pathman_only.out new file mode 100644 index 00000000..f44f2256 --- /dev/null +++ b/expected/pathman_only.out @@ -0,0 +1,296 @@ +/* + * --------------------------------------------- + * NOTE: This test behaves differenly on PgPro + * --------------------------------------------- + * + * -------------------- + * pathman_only_1.sql + * -------------------- + * Since 608b167f9f in PostgreSQL 12, CTEs which are scanned once are no longer + * an optimization fence, which changes practically all plans here. There is + * an option to forcibly make them MATERIALIZED, but we also need to run tests + * on older versions, so create pathman_only_1.out instead. + * + * -------------------- + * pathman_only_2.sql + * -------------------- + * Since 55a1954da16 and 6ef77cf46e8 in PostgreSQL 13, output of EXPLAIN was + * changed, now it includes aliases for inherited tables. + * + * -------------------- + * pathman_only_3.sql + * -------------------- + * Since a5fc46414de in PostgreSQL 16, the order of the operands was changed, + * which affected the output of the "Prune by" in EXPLAIN. + * + * -------------------- + * pathman_only_4.sql + * -------------------- + * Since fd0398fcb09 in PostgreSQL 17, output of EXPLAIN was + * changed, now it displays SubPlan nodes and output parameters. + */ +\set VERBOSITY terse +SET search_path = 'public'; +CREATE EXTENSION pg_pathman; +CREATE SCHEMA test_only; +/* Test special case: ONLY statement with not-ONLY for partitioned table */ +CREATE TABLE test_only.from_only_test(val INT NOT NULL); +INSERT INTO test_only.from_only_test SELECT generate_series(1, 20); +SELECT create_range_partitions('test_only.from_only_test', 'val', 1, 2); + create_range_partitions +------------------------- + 10 +(1 row) + +VACUUM ANALYZE; +/* should be OK */ +EXPLAIN (COSTS OFF) +SELECT * FROM ONLY test_only.from_only_test +UNION SELECT * FROM test_only.from_only_test; + QUERY PLAN +------------------------------------------------- + HashAggregate + Group Key: from_only_test.val + -> Append + -> Seq Scan on from_only_test + -> Append + -> Seq Scan on from_only_test_1 + -> Seq Scan on from_only_test_2 + -> Seq Scan on from_only_test_3 + -> Seq Scan on from_only_test_4 + -> Seq Scan on from_only_test_5 + -> Seq Scan on from_only_test_6 + -> Seq Scan on from_only_test_7 + -> Seq Scan on from_only_test_8 + -> Seq Scan on from_only_test_9 + -> Seq Scan on from_only_test_10 +(15 rows) + +/* should be OK */ +EXPLAIN (COSTS OFF) +SELECT * FROM test_only.from_only_test +UNION SELECT * FROM ONLY test_only.from_only_test; + QUERY PLAN +------------------------------------------------- + HashAggregate + Group Key: from_only_test_1.val + -> Append + -> Append + -> Seq Scan on from_only_test_1 + -> Seq Scan on from_only_test_2 + -> Seq Scan on from_only_test_3 + -> Seq Scan on from_only_test_4 + -> Seq Scan on from_only_test_5 + -> Seq Scan on from_only_test_6 + -> Seq Scan on from_only_test_7 + -> Seq Scan on from_only_test_8 + -> Seq Scan on from_only_test_9 + -> Seq Scan on from_only_test_10 + -> Seq Scan on from_only_test +(15 rows) + +/* should be OK */ +EXPLAIN (COSTS OFF) +SELECT * FROM test_only.from_only_test +UNION SELECT * FROM test_only.from_only_test +UNION SELECT * FROM ONLY test_only.from_only_test; + QUERY PLAN +--------------------------------------------------------------------- + HashAggregate + Group Key: from_only_test_1.val + -> Append + -> Append + -> Seq Scan on from_only_test_1 + -> Seq Scan on from_only_test_2 + -> Seq Scan on from_only_test_3 + -> Seq Scan on from_only_test_4 + -> Seq Scan on from_only_test_5 + -> Seq Scan on from_only_test_6 + -> Seq Scan on from_only_test_7 + -> Seq Scan on from_only_test_8 + -> Seq Scan on from_only_test_9 + -> Seq Scan on from_only_test_10 + -> Append + -> Seq Scan on from_only_test_1 from_only_test_1_1 + -> Seq Scan on from_only_test_2 from_only_test_2_1 + -> Seq Scan on from_only_test_3 from_only_test_3_1 + -> Seq Scan on from_only_test_4 from_only_test_4_1 + -> Seq Scan on from_only_test_5 from_only_test_5_1 + -> Seq Scan on from_only_test_6 from_only_test_6_1 + -> Seq Scan on from_only_test_7 from_only_test_7_1 + -> Seq Scan on from_only_test_8 from_only_test_8_1 + -> Seq Scan on from_only_test_9 from_only_test_9_1 + -> Seq Scan on from_only_test_10 from_only_test_10_1 + -> Seq Scan on from_only_test +(26 rows) + +/* should be OK */ +EXPLAIN (COSTS OFF) +SELECT * FROM ONLY test_only.from_only_test +UNION SELECT * FROM test_only.from_only_test +UNION SELECT * FROM test_only.from_only_test; + QUERY PLAN +--------------------------------------------------------------------- + HashAggregate + Group Key: from_only_test.val + -> Append + -> Seq Scan on from_only_test + -> Append + -> Seq Scan on from_only_test_1 + -> Seq Scan on from_only_test_2 + -> Seq Scan on from_only_test_3 + -> Seq Scan on from_only_test_4 + -> Seq Scan on from_only_test_5 + -> Seq Scan on from_only_test_6 + -> Seq Scan on from_only_test_7 + -> Seq Scan on from_only_test_8 + -> Seq Scan on from_only_test_9 + -> Seq Scan on from_only_test_10 + -> Append + -> Seq Scan on from_only_test_1 from_only_test_1_1 + -> Seq Scan on from_only_test_2 from_only_test_2_1 + -> Seq Scan on from_only_test_3 from_only_test_3_1 + -> Seq Scan on from_only_test_4 from_only_test_4_1 + -> Seq Scan on from_only_test_5 from_only_test_5_1 + -> Seq Scan on from_only_test_6 from_only_test_6_1 + -> Seq Scan on from_only_test_7 from_only_test_7_1 + -> Seq Scan on from_only_test_8 from_only_test_8_1 + -> Seq Scan on from_only_test_9 from_only_test_9_1 + -> Seq Scan on from_only_test_10 from_only_test_10_1 +(26 rows) + +/* not ok, ONLY|non-ONLY in one query (this is not the case for PgPro) */ +EXPLAIN (COSTS OFF) +SELECT * FROM test_only.from_only_test a +JOIN ONLY test_only.from_only_test b USING(val); + QUERY PLAN +--------------------------------------------- + Nested Loop + -> Seq Scan on from_only_test b + -> Custom Scan (RuntimeAppend) + Prune by: (b.val = a.val) + -> Seq Scan on from_only_test_1 a + Filter: (b.val = val) + -> Seq Scan on from_only_test_2 a + Filter: (b.val = val) + -> Seq Scan on from_only_test_3 a + Filter: (b.val = val) + -> Seq Scan on from_only_test_4 a + Filter: (b.val = val) + -> Seq Scan on from_only_test_5 a + Filter: (b.val = val) + -> Seq Scan on from_only_test_6 a + Filter: (b.val = val) + -> Seq Scan on from_only_test_7 a + Filter: (b.val = val) + -> Seq Scan on from_only_test_8 a + Filter: (b.val = val) + -> Seq Scan on from_only_test_9 a + Filter: (b.val = val) + -> Seq Scan on from_only_test_10 a + Filter: (b.val = val) +(24 rows) + +/* should be OK */ +EXPLAIN (COSTS OFF) +WITH q1 AS (SELECT * FROM test_only.from_only_test), + q2 AS (SELECT * FROM ONLY test_only.from_only_test) +SELECT * FROM q1 JOIN q2 USING(val); + QUERY PLAN +--------------------------------------------- + Hash Join + Hash Cond: (q1.val = q2.val) + CTE q1 + -> Append + -> Seq Scan on from_only_test_1 + -> Seq Scan on from_only_test_2 + -> Seq Scan on from_only_test_3 + -> Seq Scan on from_only_test_4 + -> Seq Scan on from_only_test_5 + -> Seq Scan on from_only_test_6 + -> Seq Scan on from_only_test_7 + -> Seq Scan on from_only_test_8 + -> Seq Scan on from_only_test_9 + -> Seq Scan on from_only_test_10 + CTE q2 + -> Seq Scan on from_only_test + -> CTE Scan on q1 + -> Hash + -> CTE Scan on q2 +(19 rows) + +/* should be OK */ +EXPLAIN (COSTS OFF) +WITH q1 AS (SELECT * FROM ONLY test_only.from_only_test) +SELECT * FROM test_only.from_only_test JOIN q1 USING(val); + QUERY PLAN +---------------------------------------------------------- + Nested Loop + CTE q1 + -> Seq Scan on from_only_test from_only_test_1 + -> CTE Scan on q1 + -> Custom Scan (RuntimeAppend) + Prune by: (q1.val = from_only_test.val) + -> Seq Scan on from_only_test_1 from_only_test + Filter: (q1.val = val) + -> Seq Scan on from_only_test_2 from_only_test + Filter: (q1.val = val) + -> Seq Scan on from_only_test_3 from_only_test + Filter: (q1.val = val) + -> Seq Scan on from_only_test_4 from_only_test + Filter: (q1.val = val) + -> Seq Scan on from_only_test_5 from_only_test + Filter: (q1.val = val) + -> Seq Scan on from_only_test_6 from_only_test + Filter: (q1.val = val) + -> Seq Scan on from_only_test_7 from_only_test + Filter: (q1.val = val) + -> Seq Scan on from_only_test_8 from_only_test + Filter: (q1.val = val) + -> Seq Scan on from_only_test_9 from_only_test + Filter: (q1.val = val) + -> Seq Scan on from_only_test_10 from_only_test + Filter: (q1.val = val) +(26 rows) + +/* should be OK */ +EXPLAIN (COSTS OFF) +SELECT * FROM test_only.from_only_test +WHERE val = (SELECT val FROM ONLY test_only.from_only_test + ORDER BY val ASC + LIMIT 1); + QUERY PLAN +----------------------------------------------------------------- + Custom Scan (RuntimeAppend) + Prune by: (from_only_test.val = $0) + InitPlan 1 (returns $0) + -> Limit + -> Sort + Sort Key: from_only_test_1.val + -> Seq Scan on from_only_test from_only_test_1 + -> Seq Scan on from_only_test_1 from_only_test + Filter: (val = $0) + -> Seq Scan on from_only_test_2 from_only_test + Filter: (val = $0) + -> Seq Scan on from_only_test_3 from_only_test + Filter: (val = $0) + -> Seq Scan on from_only_test_4 from_only_test + Filter: (val = $0) + -> Seq Scan on from_only_test_5 from_only_test + Filter: (val = $0) + -> Seq Scan on from_only_test_6 from_only_test + Filter: (val = $0) + -> Seq Scan on from_only_test_7 from_only_test + Filter: (val = $0) + -> Seq Scan on from_only_test_8 from_only_test + Filter: (val = $0) + -> Seq Scan on from_only_test_9 from_only_test + Filter: (val = $0) + -> Seq Scan on from_only_test_10 from_only_test + Filter: (val = $0) +(27 rows) + +DROP TABLE test_only.from_only_test CASCADE; +NOTICE: drop cascades to 11 other objects +DROP SCHEMA test_only; +DROP EXTENSION pg_pathman; diff --git a/expected/pathman_only_1.out b/expected/pathman_only_1.out new file mode 100644 index 00000000..ce6fd127 --- /dev/null +++ b/expected/pathman_only_1.out @@ -0,0 +1,299 @@ +/* + * --------------------------------------------- + * NOTE: This test behaves differenly on PgPro + * --------------------------------------------- + * + * -------------------- + * pathman_only_1.sql + * -------------------- + * Since 608b167f9f in PostgreSQL 12, CTEs which are scanned once are no longer + * an optimization fence, which changes practically all plans here. There is + * an option to forcibly make them MATERIALIZED, but we also need to run tests + * on older versions, so create pathman_only_1.out instead. + * + * -------------------- + * pathman_only_2.sql + * -------------------- + * Since 55a1954da16 and 6ef77cf46e8 in PostgreSQL 13, output of EXPLAIN was + * changed, now it includes aliases for inherited tables. + * + * -------------------- + * pathman_only_3.sql + * -------------------- + * Since a5fc46414de in PostgreSQL 16, the order of the operands was changed, + * which affected the output of the "Prune by" in EXPLAIN. + * + * -------------------- + * pathman_only_4.sql + * -------------------- + * Since fd0398fcb09 in PostgreSQL 17, output of EXPLAIN was + * changed, now it displays SubPlan nodes and output parameters. + */ +\set VERBOSITY terse +SET search_path = 'public'; +CREATE EXTENSION pg_pathman; +CREATE SCHEMA test_only; +/* Test special case: ONLY statement with not-ONLY for partitioned table */ +CREATE TABLE test_only.from_only_test(val INT NOT NULL); +INSERT INTO test_only.from_only_test SELECT generate_series(1, 20); +SELECT create_range_partitions('test_only.from_only_test', 'val', 1, 2); + create_range_partitions +------------------------- + 10 +(1 row) + +VACUUM ANALYZE; +/* should be OK */ +EXPLAIN (COSTS OFF) +SELECT * FROM ONLY test_only.from_only_test +UNION SELECT * FROM test_only.from_only_test; + QUERY PLAN +------------------------------------------------- + HashAggregate + Group Key: from_only_test.val + -> Append + -> Seq Scan on from_only_test + -> Append + -> Seq Scan on from_only_test_1 + -> Seq Scan on from_only_test_2 + -> Seq Scan on from_only_test_3 + -> Seq Scan on from_only_test_4 + -> Seq Scan on from_only_test_5 + -> Seq Scan on from_only_test_6 + -> Seq Scan on from_only_test_7 + -> Seq Scan on from_only_test_8 + -> Seq Scan on from_only_test_9 + -> Seq Scan on from_only_test_10 +(15 rows) + +/* should be OK */ +EXPLAIN (COSTS OFF) +SELECT * FROM test_only.from_only_test +UNION SELECT * FROM ONLY test_only.from_only_test; + QUERY PLAN +------------------------------------------------- + HashAggregate + Group Key: from_only_test_1.val + -> Append + -> Append + -> Seq Scan on from_only_test_1 + -> Seq Scan on from_only_test_2 + -> Seq Scan on from_only_test_3 + -> Seq Scan on from_only_test_4 + -> Seq Scan on from_only_test_5 + -> Seq Scan on from_only_test_6 + -> Seq Scan on from_only_test_7 + -> Seq Scan on from_only_test_8 + -> Seq Scan on from_only_test_9 + -> Seq Scan on from_only_test_10 + -> Seq Scan on from_only_test +(15 rows) + +/* should be OK */ +EXPLAIN (COSTS OFF) +SELECT * FROM test_only.from_only_test +UNION SELECT * FROM test_only.from_only_test +UNION SELECT * FROM ONLY test_only.from_only_test; + QUERY PLAN +--------------------------------------------------------------------- + HashAggregate + Group Key: from_only_test_1.val + -> Append + -> Append + -> Seq Scan on from_only_test_1 + -> Seq Scan on from_only_test_2 + -> Seq Scan on from_only_test_3 + -> Seq Scan on from_only_test_4 + -> Seq Scan on from_only_test_5 + -> Seq Scan on from_only_test_6 + -> Seq Scan on from_only_test_7 + -> Seq Scan on from_only_test_8 + -> Seq Scan on from_only_test_9 + -> Seq Scan on from_only_test_10 + -> Append + -> Seq Scan on from_only_test_1 from_only_test_1_1 + -> Seq Scan on from_only_test_2 from_only_test_2_1 + -> Seq Scan on from_only_test_3 from_only_test_3_1 + -> Seq Scan on from_only_test_4 from_only_test_4_1 + -> Seq Scan on from_only_test_5 from_only_test_5_1 + -> Seq Scan on from_only_test_6 from_only_test_6_1 + -> Seq Scan on from_only_test_7 from_only_test_7_1 + -> Seq Scan on from_only_test_8 from_only_test_8_1 + -> Seq Scan on from_only_test_9 from_only_test_9_1 + -> Seq Scan on from_only_test_10 from_only_test_10_1 + -> Seq Scan on from_only_test +(26 rows) + +/* should be OK */ +EXPLAIN (COSTS OFF) +SELECT * FROM ONLY test_only.from_only_test +UNION SELECT * FROM test_only.from_only_test +UNION SELECT * FROM test_only.from_only_test; + QUERY PLAN +--------------------------------------------------------------------- + HashAggregate + Group Key: from_only_test.val + -> Append + -> Seq Scan on from_only_test + -> Append + -> Seq Scan on from_only_test_1 + -> Seq Scan on from_only_test_2 + -> Seq Scan on from_only_test_3 + -> Seq Scan on from_only_test_4 + -> Seq Scan on from_only_test_5 + -> Seq Scan on from_only_test_6 + -> Seq Scan on from_only_test_7 + -> Seq Scan on from_only_test_8 + -> Seq Scan on from_only_test_9 + -> Seq Scan on from_only_test_10 + -> Append + -> Seq Scan on from_only_test_1 from_only_test_1_1 + -> Seq Scan on from_only_test_2 from_only_test_2_1 + -> Seq Scan on from_only_test_3 from_only_test_3_1 + -> Seq Scan on from_only_test_4 from_only_test_4_1 + -> Seq Scan on from_only_test_5 from_only_test_5_1 + -> Seq Scan on from_only_test_6 from_only_test_6_1 + -> Seq Scan on from_only_test_7 from_only_test_7_1 + -> Seq Scan on from_only_test_8 from_only_test_8_1 + -> Seq Scan on from_only_test_9 from_only_test_9_1 + -> Seq Scan on from_only_test_10 from_only_test_10_1 +(26 rows) + +/* not ok, ONLY|non-ONLY in one query (this is not the case for PgPro) */ +EXPLAIN (COSTS OFF) +SELECT * FROM test_only.from_only_test a +JOIN ONLY test_only.from_only_test b USING(val); + QUERY PLAN +--------------------------------------------- + Nested Loop + -> Seq Scan on from_only_test b + -> Custom Scan (RuntimeAppend) + Prune by: (b.val = a.val) + -> Seq Scan on from_only_test_1 a + Filter: (b.val = val) + -> Seq Scan on from_only_test_2 a + Filter: (b.val = val) + -> Seq Scan on from_only_test_3 a + Filter: (b.val = val) + -> Seq Scan on from_only_test_4 a + Filter: (b.val = val) + -> Seq Scan on from_only_test_5 a + Filter: (b.val = val) + -> Seq Scan on from_only_test_6 a + Filter: (b.val = val) + -> Seq Scan on from_only_test_7 a + Filter: (b.val = val) + -> Seq Scan on from_only_test_8 a + Filter: (b.val = val) + -> Seq Scan on from_only_test_9 a + Filter: (b.val = val) + -> Seq Scan on from_only_test_10 a + Filter: (b.val = val) +(24 rows) + +/* should be OK */ +EXPLAIN (COSTS OFF) +WITH q1 AS (SELECT * FROM test_only.from_only_test), + q2 AS (SELECT * FROM ONLY test_only.from_only_test) +SELECT * FROM q1 JOIN q2 USING(val); + QUERY PLAN +--------------------------------------------------------------- + Nested Loop + -> Seq Scan on from_only_test from_only_test_1 + -> Custom Scan (RuntimeAppend) + Prune by: (from_only_test_1.val = from_only_test.val) + -> Seq Scan on from_only_test_1 from_only_test + Filter: (from_only_test_1.val = val) + -> Seq Scan on from_only_test_2 from_only_test + Filter: (from_only_test_1.val = val) + -> Seq Scan on from_only_test_3 from_only_test + Filter: (from_only_test_1.val = val) + -> Seq Scan on from_only_test_4 from_only_test + Filter: (from_only_test_1.val = val) + -> Seq Scan on from_only_test_5 from_only_test + Filter: (from_only_test_1.val = val) + -> Seq Scan on from_only_test_6 from_only_test + Filter: (from_only_test_1.val = val) + -> Seq Scan on from_only_test_7 from_only_test + Filter: (from_only_test_1.val = val) + -> Seq Scan on from_only_test_8 from_only_test + Filter: (from_only_test_1.val = val) + -> Seq Scan on from_only_test_9 from_only_test + Filter: (from_only_test_1.val = val) + -> Seq Scan on from_only_test_10 from_only_test + Filter: (from_only_test_1.val = val) +(24 rows) + +/* should be OK */ +EXPLAIN (COSTS OFF) +WITH q1 AS (SELECT * FROM ONLY test_only.from_only_test) +SELECT * FROM test_only.from_only_test JOIN q1 USING(val); + QUERY PLAN +--------------------------------------------------------------- + Nested Loop + -> Seq Scan on from_only_test from_only_test_1 + -> Custom Scan (RuntimeAppend) + Prune by: (from_only_test_1.val = from_only_test.val) + -> Seq Scan on from_only_test_1 from_only_test + Filter: (from_only_test_1.val = val) + -> Seq Scan on from_only_test_2 from_only_test + Filter: (from_only_test_1.val = val) + -> Seq Scan on from_only_test_3 from_only_test + Filter: (from_only_test_1.val = val) + -> Seq Scan on from_only_test_4 from_only_test + Filter: (from_only_test_1.val = val) + -> Seq Scan on from_only_test_5 from_only_test + Filter: (from_only_test_1.val = val) + -> Seq Scan on from_only_test_6 from_only_test + Filter: (from_only_test_1.val = val) + -> Seq Scan on from_only_test_7 from_only_test + Filter: (from_only_test_1.val = val) + -> Seq Scan on from_only_test_8 from_only_test + Filter: (from_only_test_1.val = val) + -> Seq Scan on from_only_test_9 from_only_test + Filter: (from_only_test_1.val = val) + -> Seq Scan on from_only_test_10 from_only_test + Filter: (from_only_test_1.val = val) +(24 rows) + +/* should be OK */ +EXPLAIN (COSTS OFF) +SELECT * FROM test_only.from_only_test +WHERE val = (SELECT val FROM ONLY test_only.from_only_test + ORDER BY val ASC + LIMIT 1); + QUERY PLAN +----------------------------------------------------------------- + Custom Scan (RuntimeAppend) + Prune by: (from_only_test.val = $0) + InitPlan 1 (returns $0) + -> Limit + -> Sort + Sort Key: from_only_test_1.val + -> Seq Scan on from_only_test from_only_test_1 + -> Seq Scan on from_only_test_1 from_only_test + Filter: (val = $0) + -> Seq Scan on from_only_test_2 from_only_test + Filter: (val = $0) + -> Seq Scan on from_only_test_3 from_only_test + Filter: (val = $0) + -> Seq Scan on from_only_test_4 from_only_test + Filter: (val = $0) + -> Seq Scan on from_only_test_5 from_only_test + Filter: (val = $0) + -> Seq Scan on from_only_test_6 from_only_test + Filter: (val = $0) + -> Seq Scan on from_only_test_7 from_only_test + Filter: (val = $0) + -> Seq Scan on from_only_test_8 from_only_test + Filter: (val = $0) + -> Seq Scan on from_only_test_9 from_only_test + Filter: (val = $0) + -> Seq Scan on from_only_test_10 from_only_test + Filter: (val = $0) +(27 rows) + +DROP TABLE test_only.from_only_test CASCADE; +NOTICE: drop cascades to 11 other objects +DROP SCHEMA test_only; +DROP EXTENSION pg_pathman; diff --git a/expected/pathman_only_2.out b/expected/pathman_only_2.out new file mode 100644 index 00000000..6aeadb76 --- /dev/null +++ b/expected/pathman_only_2.out @@ -0,0 +1,299 @@ +/* + * --------------------------------------------- + * NOTE: This test behaves differenly on PgPro + * --------------------------------------------- + * + * -------------------- + * pathman_only_1.sql + * -------------------- + * Since 608b167f9f in PostgreSQL 12, CTEs which are scanned once are no longer + * an optimization fence, which changes practically all plans here. There is + * an option to forcibly make them MATERIALIZED, but we also need to run tests + * on older versions, so create pathman_only_1.out instead. + * + * -------------------- + * pathman_only_2.sql + * -------------------- + * Since 55a1954da16 and 6ef77cf46e8 in PostgreSQL 13, output of EXPLAIN was + * changed, now it includes aliases for inherited tables. + * + * -------------------- + * pathman_only_3.sql + * -------------------- + * Since a5fc46414de in PostgreSQL 16, the order of the operands was changed, + * which affected the output of the "Prune by" in EXPLAIN. + * + * -------------------- + * pathman_only_4.sql + * -------------------- + * Since fd0398fcb09 in PostgreSQL 17, output of EXPLAIN was + * changed, now it displays SubPlan nodes and output parameters. + */ +\set VERBOSITY terse +SET search_path = 'public'; +CREATE EXTENSION pg_pathman; +CREATE SCHEMA test_only; +/* Test special case: ONLY statement with not-ONLY for partitioned table */ +CREATE TABLE test_only.from_only_test(val INT NOT NULL); +INSERT INTO test_only.from_only_test SELECT generate_series(1, 20); +SELECT create_range_partitions('test_only.from_only_test', 'val', 1, 2); + create_range_partitions +------------------------- + 10 +(1 row) + +VACUUM ANALYZE; +/* should be OK */ +EXPLAIN (COSTS OFF) +SELECT * FROM ONLY test_only.from_only_test +UNION SELECT * FROM test_only.from_only_test; + QUERY PLAN +------------------------------------------------------------------- + HashAggregate + Group Key: from_only_test.val + -> Append + -> Seq Scan on from_only_test + -> Append + -> Seq Scan on from_only_test_1 from_only_test_2 + -> Seq Scan on from_only_test_2 from_only_test_3 + -> Seq Scan on from_only_test_3 from_only_test_4 + -> Seq Scan on from_only_test_4 from_only_test_5 + -> Seq Scan on from_only_test_5 from_only_test_6 + -> Seq Scan on from_only_test_6 from_only_test_7 + -> Seq Scan on from_only_test_7 from_only_test_8 + -> Seq Scan on from_only_test_8 from_only_test_9 + -> Seq Scan on from_only_test_9 from_only_test_10 + -> Seq Scan on from_only_test_10 from_only_test_11 +(15 rows) + +/* should be OK */ +EXPLAIN (COSTS OFF) +SELECT * FROM test_only.from_only_test +UNION SELECT * FROM ONLY test_only.from_only_test; + QUERY PLAN +---------------------------------------------------------- + HashAggregate + Group Key: from_only_test.val + -> Append + -> Append + -> Seq Scan on from_only_test_1 + -> Seq Scan on from_only_test_2 + -> Seq Scan on from_only_test_3 + -> Seq Scan on from_only_test_4 + -> Seq Scan on from_only_test_5 + -> Seq Scan on from_only_test_6 + -> Seq Scan on from_only_test_7 + -> Seq Scan on from_only_test_8 + -> Seq Scan on from_only_test_9 + -> Seq Scan on from_only_test_10 + -> Seq Scan on from_only_test from_only_test_11 +(15 rows) + +/* should be OK */ +EXPLAIN (COSTS OFF) +SELECT * FROM test_only.from_only_test +UNION SELECT * FROM test_only.from_only_test +UNION SELECT * FROM ONLY test_only.from_only_test; + QUERY PLAN +------------------------------------------------------------------- + HashAggregate + Group Key: from_only_test.val + -> Append + -> Append + -> Seq Scan on from_only_test_1 + -> Seq Scan on from_only_test_2 + -> Seq Scan on from_only_test_3 + -> Seq Scan on from_only_test_4 + -> Seq Scan on from_only_test_5 + -> Seq Scan on from_only_test_6 + -> Seq Scan on from_only_test_7 + -> Seq Scan on from_only_test_8 + -> Seq Scan on from_only_test_9 + -> Seq Scan on from_only_test_10 + -> Append + -> Seq Scan on from_only_test_1 from_only_test_12 + -> Seq Scan on from_only_test_2 from_only_test_13 + -> Seq Scan on from_only_test_3 from_only_test_14 + -> Seq Scan on from_only_test_4 from_only_test_15 + -> Seq Scan on from_only_test_5 from_only_test_16 + -> Seq Scan on from_only_test_6 from_only_test_17 + -> Seq Scan on from_only_test_7 from_only_test_18 + -> Seq Scan on from_only_test_8 from_only_test_19 + -> Seq Scan on from_only_test_9 from_only_test_20 + -> Seq Scan on from_only_test_10 from_only_test_21 + -> Seq Scan on from_only_test from_only_test_22 +(26 rows) + +/* should be OK */ +EXPLAIN (COSTS OFF) +SELECT * FROM ONLY test_only.from_only_test +UNION SELECT * FROM test_only.from_only_test +UNION SELECT * FROM test_only.from_only_test; + QUERY PLAN +------------------------------------------------------------------- + HashAggregate + Group Key: from_only_test.val + -> Append + -> Seq Scan on from_only_test + -> Append + -> Seq Scan on from_only_test_1 from_only_test_2 + -> Seq Scan on from_only_test_2 from_only_test_3 + -> Seq Scan on from_only_test_3 from_only_test_4 + -> Seq Scan on from_only_test_4 from_only_test_5 + -> Seq Scan on from_only_test_5 from_only_test_6 + -> Seq Scan on from_only_test_6 from_only_test_7 + -> Seq Scan on from_only_test_7 from_only_test_8 + -> Seq Scan on from_only_test_8 from_only_test_9 + -> Seq Scan on from_only_test_9 from_only_test_10 + -> Seq Scan on from_only_test_10 from_only_test_11 + -> Append + -> Seq Scan on from_only_test_1 from_only_test_13 + -> Seq Scan on from_only_test_2 from_only_test_14 + -> Seq Scan on from_only_test_3 from_only_test_15 + -> Seq Scan on from_only_test_4 from_only_test_16 + -> Seq Scan on from_only_test_5 from_only_test_17 + -> Seq Scan on from_only_test_6 from_only_test_18 + -> Seq Scan on from_only_test_7 from_only_test_19 + -> Seq Scan on from_only_test_8 from_only_test_20 + -> Seq Scan on from_only_test_9 from_only_test_21 + -> Seq Scan on from_only_test_10 from_only_test_22 +(26 rows) + +/* not ok, ONLY|non-ONLY in one query (this is not the case for PgPro) */ +EXPLAIN (COSTS OFF) +SELECT * FROM test_only.from_only_test a +JOIN ONLY test_only.from_only_test b USING(val); + QUERY PLAN +--------------------------------------------- + Nested Loop + -> Seq Scan on from_only_test b + -> Custom Scan (RuntimeAppend) + Prune by: (b.val = a.val) + -> Seq Scan on from_only_test_1 a + Filter: (b.val = val) + -> Seq Scan on from_only_test_2 a + Filter: (b.val = val) + -> Seq Scan on from_only_test_3 a + Filter: (b.val = val) + -> Seq Scan on from_only_test_4 a + Filter: (b.val = val) + -> Seq Scan on from_only_test_5 a + Filter: (b.val = val) + -> Seq Scan on from_only_test_6 a + Filter: (b.val = val) + -> Seq Scan on from_only_test_7 a + Filter: (b.val = val) + -> Seq Scan on from_only_test_8 a + Filter: (b.val = val) + -> Seq Scan on from_only_test_9 a + Filter: (b.val = val) + -> Seq Scan on from_only_test_10 a + Filter: (b.val = val) +(24 rows) + +/* should be OK */ +EXPLAIN (COSTS OFF) +WITH q1 AS (SELECT * FROM test_only.from_only_test), + q2 AS (SELECT * FROM ONLY test_only.from_only_test) +SELECT * FROM q1 JOIN q2 USING(val); + QUERY PLAN +--------------------------------------------------------------- + Nested Loop + -> Seq Scan on from_only_test from_only_test_1 + -> Custom Scan (RuntimeAppend) + Prune by: (from_only_test_1.val = from_only_test.val) + -> Seq Scan on from_only_test_1 from_only_test + Filter: (from_only_test_1.val = val) + -> Seq Scan on from_only_test_2 from_only_test + Filter: (from_only_test_1.val = val) + -> Seq Scan on from_only_test_3 from_only_test + Filter: (from_only_test_1.val = val) + -> Seq Scan on from_only_test_4 from_only_test + Filter: (from_only_test_1.val = val) + -> Seq Scan on from_only_test_5 from_only_test + Filter: (from_only_test_1.val = val) + -> Seq Scan on from_only_test_6 from_only_test + Filter: (from_only_test_1.val = val) + -> Seq Scan on from_only_test_7 from_only_test + Filter: (from_only_test_1.val = val) + -> Seq Scan on from_only_test_8 from_only_test + Filter: (from_only_test_1.val = val) + -> Seq Scan on from_only_test_9 from_only_test + Filter: (from_only_test_1.val = val) + -> Seq Scan on from_only_test_10 from_only_test + Filter: (from_only_test_1.val = val) +(24 rows) + +/* should be OK */ +EXPLAIN (COSTS OFF) +WITH q1 AS (SELECT * FROM ONLY test_only.from_only_test) +SELECT * FROM test_only.from_only_test JOIN q1 USING(val); + QUERY PLAN +--------------------------------------------------------------- + Nested Loop + -> Seq Scan on from_only_test from_only_test_1 + -> Custom Scan (RuntimeAppend) + Prune by: (from_only_test_1.val = from_only_test.val) + -> Seq Scan on from_only_test_1 from_only_test + Filter: (from_only_test_1.val = val) + -> Seq Scan on from_only_test_2 from_only_test + Filter: (from_only_test_1.val = val) + -> Seq Scan on from_only_test_3 from_only_test + Filter: (from_only_test_1.val = val) + -> Seq Scan on from_only_test_4 from_only_test + Filter: (from_only_test_1.val = val) + -> Seq Scan on from_only_test_5 from_only_test + Filter: (from_only_test_1.val = val) + -> Seq Scan on from_only_test_6 from_only_test + Filter: (from_only_test_1.val = val) + -> Seq Scan on from_only_test_7 from_only_test + Filter: (from_only_test_1.val = val) + -> Seq Scan on from_only_test_8 from_only_test + Filter: (from_only_test_1.val = val) + -> Seq Scan on from_only_test_9 from_only_test + Filter: (from_only_test_1.val = val) + -> Seq Scan on from_only_test_10 from_only_test + Filter: (from_only_test_1.val = val) +(24 rows) + +/* should be OK */ +EXPLAIN (COSTS OFF) +SELECT * FROM test_only.from_only_test +WHERE val = (SELECT val FROM ONLY test_only.from_only_test + ORDER BY val ASC + LIMIT 1); + QUERY PLAN +----------------------------------------------------------------- + Custom Scan (RuntimeAppend) + Prune by: (from_only_test.val = $0) + InitPlan 1 (returns $0) + -> Limit + -> Sort + Sort Key: from_only_test_1.val + -> Seq Scan on from_only_test from_only_test_1 + -> Seq Scan on from_only_test_1 from_only_test + Filter: (val = $0) + -> Seq Scan on from_only_test_2 from_only_test + Filter: (val = $0) + -> Seq Scan on from_only_test_3 from_only_test + Filter: (val = $0) + -> Seq Scan on from_only_test_4 from_only_test + Filter: (val = $0) + -> Seq Scan on from_only_test_5 from_only_test + Filter: (val = $0) + -> Seq Scan on from_only_test_6 from_only_test + Filter: (val = $0) + -> Seq Scan on from_only_test_7 from_only_test + Filter: (val = $0) + -> Seq Scan on from_only_test_8 from_only_test + Filter: (val = $0) + -> Seq Scan on from_only_test_9 from_only_test + Filter: (val = $0) + -> Seq Scan on from_only_test_10 from_only_test + Filter: (val = $0) +(27 rows) + +DROP TABLE test_only.from_only_test CASCADE; +NOTICE: drop cascades to 11 other objects +DROP SCHEMA test_only; +DROP EXTENSION pg_pathman; diff --git a/expected/pathman_only_3.out b/expected/pathman_only_3.out new file mode 100644 index 00000000..1999309d --- /dev/null +++ b/expected/pathman_only_3.out @@ -0,0 +1,299 @@ +/* + * --------------------------------------------- + * NOTE: This test behaves differenly on PgPro + * --------------------------------------------- + * + * -------------------- + * pathman_only_1.sql + * -------------------- + * Since 608b167f9f in PostgreSQL 12, CTEs which are scanned once are no longer + * an optimization fence, which changes practically all plans here. There is + * an option to forcibly make them MATERIALIZED, but we also need to run tests + * on older versions, so create pathman_only_1.out instead. + * + * -------------------- + * pathman_only_2.sql + * -------------------- + * Since 55a1954da16 and 6ef77cf46e8 in PostgreSQL 13, output of EXPLAIN was + * changed, now it includes aliases for inherited tables. + * + * -------------------- + * pathman_only_3.sql + * -------------------- + * Since a5fc46414de in PostgreSQL 16, the order of the operands was changed, + * which affected the output of the "Prune by" in EXPLAIN. + * + * -------------------- + * pathman_only_4.sql + * -------------------- + * Since fd0398fcb09 in PostgreSQL 17, output of EXPLAIN was + * changed, now it displays SubPlan nodes and output parameters. + */ +\set VERBOSITY terse +SET search_path = 'public'; +CREATE EXTENSION pg_pathman; +CREATE SCHEMA test_only; +/* Test special case: ONLY statement with not-ONLY for partitioned table */ +CREATE TABLE test_only.from_only_test(val INT NOT NULL); +INSERT INTO test_only.from_only_test SELECT generate_series(1, 20); +SELECT create_range_partitions('test_only.from_only_test', 'val', 1, 2); + create_range_partitions +------------------------- + 10 +(1 row) + +VACUUM ANALYZE; +/* should be OK */ +EXPLAIN (COSTS OFF) +SELECT * FROM ONLY test_only.from_only_test +UNION SELECT * FROM test_only.from_only_test; + QUERY PLAN +------------------------------------------------------------------- + HashAggregate + Group Key: from_only_test.val + -> Append + -> Seq Scan on from_only_test + -> Append + -> Seq Scan on from_only_test_1 from_only_test_2 + -> Seq Scan on from_only_test_2 from_only_test_3 + -> Seq Scan on from_only_test_3 from_only_test_4 + -> Seq Scan on from_only_test_4 from_only_test_5 + -> Seq Scan on from_only_test_5 from_only_test_6 + -> Seq Scan on from_only_test_6 from_only_test_7 + -> Seq Scan on from_only_test_7 from_only_test_8 + -> Seq Scan on from_only_test_8 from_only_test_9 + -> Seq Scan on from_only_test_9 from_only_test_10 + -> Seq Scan on from_only_test_10 from_only_test_11 +(15 rows) + +/* should be OK */ +EXPLAIN (COSTS OFF) +SELECT * FROM test_only.from_only_test +UNION SELECT * FROM ONLY test_only.from_only_test; + QUERY PLAN +---------------------------------------------------------- + HashAggregate + Group Key: from_only_test.val + -> Append + -> Append + -> Seq Scan on from_only_test_1 + -> Seq Scan on from_only_test_2 + -> Seq Scan on from_only_test_3 + -> Seq Scan on from_only_test_4 + -> Seq Scan on from_only_test_5 + -> Seq Scan on from_only_test_6 + -> Seq Scan on from_only_test_7 + -> Seq Scan on from_only_test_8 + -> Seq Scan on from_only_test_9 + -> Seq Scan on from_only_test_10 + -> Seq Scan on from_only_test from_only_test_11 +(15 rows) + +/* should be OK */ +EXPLAIN (COSTS OFF) +SELECT * FROM test_only.from_only_test +UNION SELECT * FROM test_only.from_only_test +UNION SELECT * FROM ONLY test_only.from_only_test; + QUERY PLAN +------------------------------------------------------------------- + HashAggregate + Group Key: from_only_test.val + -> Append + -> Append + -> Seq Scan on from_only_test_1 + -> Seq Scan on from_only_test_2 + -> Seq Scan on from_only_test_3 + -> Seq Scan on from_only_test_4 + -> Seq Scan on from_only_test_5 + -> Seq Scan on from_only_test_6 + -> Seq Scan on from_only_test_7 + -> Seq Scan on from_only_test_8 + -> Seq Scan on from_only_test_9 + -> Seq Scan on from_only_test_10 + -> Append + -> Seq Scan on from_only_test_1 from_only_test_12 + -> Seq Scan on from_only_test_2 from_only_test_13 + -> Seq Scan on from_only_test_3 from_only_test_14 + -> Seq Scan on from_only_test_4 from_only_test_15 + -> Seq Scan on from_only_test_5 from_only_test_16 + -> Seq Scan on from_only_test_6 from_only_test_17 + -> Seq Scan on from_only_test_7 from_only_test_18 + -> Seq Scan on from_only_test_8 from_only_test_19 + -> Seq Scan on from_only_test_9 from_only_test_20 + -> Seq Scan on from_only_test_10 from_only_test_21 + -> Seq Scan on from_only_test from_only_test_22 +(26 rows) + +/* should be OK */ +EXPLAIN (COSTS OFF) +SELECT * FROM ONLY test_only.from_only_test +UNION SELECT * FROM test_only.from_only_test +UNION SELECT * FROM test_only.from_only_test; + QUERY PLAN +------------------------------------------------------------------- + HashAggregate + Group Key: from_only_test.val + -> Append + -> Seq Scan on from_only_test + -> Append + -> Seq Scan on from_only_test_1 from_only_test_2 + -> Seq Scan on from_only_test_2 from_only_test_3 + -> Seq Scan on from_only_test_3 from_only_test_4 + -> Seq Scan on from_only_test_4 from_only_test_5 + -> Seq Scan on from_only_test_5 from_only_test_6 + -> Seq Scan on from_only_test_6 from_only_test_7 + -> Seq Scan on from_only_test_7 from_only_test_8 + -> Seq Scan on from_only_test_8 from_only_test_9 + -> Seq Scan on from_only_test_9 from_only_test_10 + -> Seq Scan on from_only_test_10 from_only_test_11 + -> Append + -> Seq Scan on from_only_test_1 from_only_test_13 + -> Seq Scan on from_only_test_2 from_only_test_14 + -> Seq Scan on from_only_test_3 from_only_test_15 + -> Seq Scan on from_only_test_4 from_only_test_16 + -> Seq Scan on from_only_test_5 from_only_test_17 + -> Seq Scan on from_only_test_6 from_only_test_18 + -> Seq Scan on from_only_test_7 from_only_test_19 + -> Seq Scan on from_only_test_8 from_only_test_20 + -> Seq Scan on from_only_test_9 from_only_test_21 + -> Seq Scan on from_only_test_10 from_only_test_22 +(26 rows) + +/* not ok, ONLY|non-ONLY in one query (this is not the case for PgPro) */ +EXPLAIN (COSTS OFF) +SELECT * FROM test_only.from_only_test a +JOIN ONLY test_only.from_only_test b USING(val); + QUERY PLAN +--------------------------------------------- + Nested Loop + -> Seq Scan on from_only_test b + -> Custom Scan (RuntimeAppend) + Prune by: (a.val = b.val) + -> Seq Scan on from_only_test_1 a + Filter: (b.val = val) + -> Seq Scan on from_only_test_2 a + Filter: (b.val = val) + -> Seq Scan on from_only_test_3 a + Filter: (b.val = val) + -> Seq Scan on from_only_test_4 a + Filter: (b.val = val) + -> Seq Scan on from_only_test_5 a + Filter: (b.val = val) + -> Seq Scan on from_only_test_6 a + Filter: (b.val = val) + -> Seq Scan on from_only_test_7 a + Filter: (b.val = val) + -> Seq Scan on from_only_test_8 a + Filter: (b.val = val) + -> Seq Scan on from_only_test_9 a + Filter: (b.val = val) + -> Seq Scan on from_only_test_10 a + Filter: (b.val = val) +(24 rows) + +/* should be OK */ +EXPLAIN (COSTS OFF) +WITH q1 AS (SELECT * FROM test_only.from_only_test), + q2 AS (SELECT * FROM ONLY test_only.from_only_test) +SELECT * FROM q1 JOIN q2 USING(val); + QUERY PLAN +--------------------------------------------------------------- + Nested Loop + -> Seq Scan on from_only_test from_only_test_1 + -> Custom Scan (RuntimeAppend) + Prune by: (from_only_test.val = from_only_test_1.val) + -> Seq Scan on from_only_test_1 from_only_test + Filter: (from_only_test_1.val = val) + -> Seq Scan on from_only_test_2 from_only_test + Filter: (from_only_test_1.val = val) + -> Seq Scan on from_only_test_3 from_only_test + Filter: (from_only_test_1.val = val) + -> Seq Scan on from_only_test_4 from_only_test + Filter: (from_only_test_1.val = val) + -> Seq Scan on from_only_test_5 from_only_test + Filter: (from_only_test_1.val = val) + -> Seq Scan on from_only_test_6 from_only_test + Filter: (from_only_test_1.val = val) + -> Seq Scan on from_only_test_7 from_only_test + Filter: (from_only_test_1.val = val) + -> Seq Scan on from_only_test_8 from_only_test + Filter: (from_only_test_1.val = val) + -> Seq Scan on from_only_test_9 from_only_test + Filter: (from_only_test_1.val = val) + -> Seq Scan on from_only_test_10 from_only_test + Filter: (from_only_test_1.val = val) +(24 rows) + +/* should be OK */ +EXPLAIN (COSTS OFF) +WITH q1 AS (SELECT * FROM ONLY test_only.from_only_test) +SELECT * FROM test_only.from_only_test JOIN q1 USING(val); + QUERY PLAN +--------------------------------------------------------------- + Nested Loop + -> Seq Scan on from_only_test from_only_test_1 + -> Custom Scan (RuntimeAppend) + Prune by: (from_only_test.val = from_only_test_1.val) + -> Seq Scan on from_only_test_1 from_only_test + Filter: (from_only_test_1.val = val) + -> Seq Scan on from_only_test_2 from_only_test + Filter: (from_only_test_1.val = val) + -> Seq Scan on from_only_test_3 from_only_test + Filter: (from_only_test_1.val = val) + -> Seq Scan on from_only_test_4 from_only_test + Filter: (from_only_test_1.val = val) + -> Seq Scan on from_only_test_5 from_only_test + Filter: (from_only_test_1.val = val) + -> Seq Scan on from_only_test_6 from_only_test + Filter: (from_only_test_1.val = val) + -> Seq Scan on from_only_test_7 from_only_test + Filter: (from_only_test_1.val = val) + -> Seq Scan on from_only_test_8 from_only_test + Filter: (from_only_test_1.val = val) + -> Seq Scan on from_only_test_9 from_only_test + Filter: (from_only_test_1.val = val) + -> Seq Scan on from_only_test_10 from_only_test + Filter: (from_only_test_1.val = val) +(24 rows) + +/* should be OK */ +EXPLAIN (COSTS OFF) +SELECT * FROM test_only.from_only_test +WHERE val = (SELECT val FROM ONLY test_only.from_only_test + ORDER BY val ASC + LIMIT 1); + QUERY PLAN +----------------------------------------------------------------- + Custom Scan (RuntimeAppend) + Prune by: (from_only_test.val = $0) + InitPlan 1 (returns $0) + -> Limit + -> Sort + Sort Key: from_only_test_1.val + -> Seq Scan on from_only_test from_only_test_1 + -> Seq Scan on from_only_test_1 from_only_test + Filter: (val = $0) + -> Seq Scan on from_only_test_2 from_only_test + Filter: (val = $0) + -> Seq Scan on from_only_test_3 from_only_test + Filter: (val = $0) + -> Seq Scan on from_only_test_4 from_only_test + Filter: (val = $0) + -> Seq Scan on from_only_test_5 from_only_test + Filter: (val = $0) + -> Seq Scan on from_only_test_6 from_only_test + Filter: (val = $0) + -> Seq Scan on from_only_test_7 from_only_test + Filter: (val = $0) + -> Seq Scan on from_only_test_8 from_only_test + Filter: (val = $0) + -> Seq Scan on from_only_test_9 from_only_test + Filter: (val = $0) + -> Seq Scan on from_only_test_10 from_only_test + Filter: (val = $0) +(27 rows) + +DROP TABLE test_only.from_only_test CASCADE; +NOTICE: drop cascades to 11 other objects +DROP SCHEMA test_only; +DROP EXTENSION pg_pathman; diff --git a/expected/pathman_only_4.out b/expected/pathman_only_4.out new file mode 100644 index 00000000..fbcc397c --- /dev/null +++ b/expected/pathman_only_4.out @@ -0,0 +1,299 @@ +/* + * --------------------------------------------- + * NOTE: This test behaves differenly on PgPro + * --------------------------------------------- + * + * -------------------- + * pathman_only_1.sql + * -------------------- + * Since 608b167f9f in PostgreSQL 12, CTEs which are scanned once are no longer + * an optimization fence, which changes practically all plans here. There is + * an option to forcibly make them MATERIALIZED, but we also need to run tests + * on older versions, so create pathman_only_1.out instead. + * + * -------------------- + * pathman_only_2.sql + * -------------------- + * Since 55a1954da16 and 6ef77cf46e8 in PostgreSQL 13, output of EXPLAIN was + * changed, now it includes aliases for inherited tables. + * + * -------------------- + * pathman_only_3.sql + * -------------------- + * Since a5fc46414de in PostgreSQL 16, the order of the operands was changed, + * which affected the output of the "Prune by" in EXPLAIN. + * + * -------------------- + * pathman_only_4.sql + * -------------------- + * Since fd0398fcb09 in PostgreSQL 17, output of EXPLAIN was + * changed, now it displays SubPlan nodes and output parameters. + */ +\set VERBOSITY terse +SET search_path = 'public'; +CREATE EXTENSION pg_pathman; +CREATE SCHEMA test_only; +/* Test special case: ONLY statement with not-ONLY for partitioned table */ +CREATE TABLE test_only.from_only_test(val INT NOT NULL); +INSERT INTO test_only.from_only_test SELECT generate_series(1, 20); +SELECT create_range_partitions('test_only.from_only_test', 'val', 1, 2); + create_range_partitions +------------------------- + 10 +(1 row) + +VACUUM ANALYZE; +/* should be OK */ +EXPLAIN (COSTS OFF) +SELECT * FROM ONLY test_only.from_only_test +UNION SELECT * FROM test_only.from_only_test; + QUERY PLAN +------------------------------------------------------------------- + HashAggregate + Group Key: from_only_test.val + -> Append + -> Seq Scan on from_only_test + -> Append + -> Seq Scan on from_only_test_1 from_only_test_2 + -> Seq Scan on from_only_test_2 from_only_test_3 + -> Seq Scan on from_only_test_3 from_only_test_4 + -> Seq Scan on from_only_test_4 from_only_test_5 + -> Seq Scan on from_only_test_5 from_only_test_6 + -> Seq Scan on from_only_test_6 from_only_test_7 + -> Seq Scan on from_only_test_7 from_only_test_8 + -> Seq Scan on from_only_test_8 from_only_test_9 + -> Seq Scan on from_only_test_9 from_only_test_10 + -> Seq Scan on from_only_test_10 from_only_test_11 +(15 rows) + +/* should be OK */ +EXPLAIN (COSTS OFF) +SELECT * FROM test_only.from_only_test +UNION SELECT * FROM ONLY test_only.from_only_test; + QUERY PLAN +---------------------------------------------------------- + HashAggregate + Group Key: from_only_test.val + -> Append + -> Append + -> Seq Scan on from_only_test_1 + -> Seq Scan on from_only_test_2 + -> Seq Scan on from_only_test_3 + -> Seq Scan on from_only_test_4 + -> Seq Scan on from_only_test_5 + -> Seq Scan on from_only_test_6 + -> Seq Scan on from_only_test_7 + -> Seq Scan on from_only_test_8 + -> Seq Scan on from_only_test_9 + -> Seq Scan on from_only_test_10 + -> Seq Scan on from_only_test from_only_test_11 +(15 rows) + +/* should be OK */ +EXPLAIN (COSTS OFF) +SELECT * FROM test_only.from_only_test +UNION SELECT * FROM test_only.from_only_test +UNION SELECT * FROM ONLY test_only.from_only_test; + QUERY PLAN +------------------------------------------------------------------- + HashAggregate + Group Key: from_only_test.val + -> Append + -> Append + -> Seq Scan on from_only_test_1 + -> Seq Scan on from_only_test_2 + -> Seq Scan on from_only_test_3 + -> Seq Scan on from_only_test_4 + -> Seq Scan on from_only_test_5 + -> Seq Scan on from_only_test_6 + -> Seq Scan on from_only_test_7 + -> Seq Scan on from_only_test_8 + -> Seq Scan on from_only_test_9 + -> Seq Scan on from_only_test_10 + -> Append + -> Seq Scan on from_only_test_1 from_only_test_12 + -> Seq Scan on from_only_test_2 from_only_test_13 + -> Seq Scan on from_only_test_3 from_only_test_14 + -> Seq Scan on from_only_test_4 from_only_test_15 + -> Seq Scan on from_only_test_5 from_only_test_16 + -> Seq Scan on from_only_test_6 from_only_test_17 + -> Seq Scan on from_only_test_7 from_only_test_18 + -> Seq Scan on from_only_test_8 from_only_test_19 + -> Seq Scan on from_only_test_9 from_only_test_20 + -> Seq Scan on from_only_test_10 from_only_test_21 + -> Seq Scan on from_only_test from_only_test_22 +(26 rows) + +/* should be OK */ +EXPLAIN (COSTS OFF) +SELECT * FROM ONLY test_only.from_only_test +UNION SELECT * FROM test_only.from_only_test +UNION SELECT * FROM test_only.from_only_test; + QUERY PLAN +------------------------------------------------------------------- + HashAggregate + Group Key: from_only_test.val + -> Append + -> Seq Scan on from_only_test + -> Append + -> Seq Scan on from_only_test_1 from_only_test_2 + -> Seq Scan on from_only_test_2 from_only_test_3 + -> Seq Scan on from_only_test_3 from_only_test_4 + -> Seq Scan on from_only_test_4 from_only_test_5 + -> Seq Scan on from_only_test_5 from_only_test_6 + -> Seq Scan on from_only_test_6 from_only_test_7 + -> Seq Scan on from_only_test_7 from_only_test_8 + -> Seq Scan on from_only_test_8 from_only_test_9 + -> Seq Scan on from_only_test_9 from_only_test_10 + -> Seq Scan on from_only_test_10 from_only_test_11 + -> Append + -> Seq Scan on from_only_test_1 from_only_test_13 + -> Seq Scan on from_only_test_2 from_only_test_14 + -> Seq Scan on from_only_test_3 from_only_test_15 + -> Seq Scan on from_only_test_4 from_only_test_16 + -> Seq Scan on from_only_test_5 from_only_test_17 + -> Seq Scan on from_only_test_6 from_only_test_18 + -> Seq Scan on from_only_test_7 from_only_test_19 + -> Seq Scan on from_only_test_8 from_only_test_20 + -> Seq Scan on from_only_test_9 from_only_test_21 + -> Seq Scan on from_only_test_10 from_only_test_22 +(26 rows) + +/* not ok, ONLY|non-ONLY in one query (this is not the case for PgPro) */ +EXPLAIN (COSTS OFF) +SELECT * FROM test_only.from_only_test a +JOIN ONLY test_only.from_only_test b USING(val); + QUERY PLAN +--------------------------------------------- + Nested Loop + -> Seq Scan on from_only_test b + -> Custom Scan (RuntimeAppend) + Prune by: (a.val = b.val) + -> Seq Scan on from_only_test_1 a + Filter: (b.val = val) + -> Seq Scan on from_only_test_2 a + Filter: (b.val = val) + -> Seq Scan on from_only_test_3 a + Filter: (b.val = val) + -> Seq Scan on from_only_test_4 a + Filter: (b.val = val) + -> Seq Scan on from_only_test_5 a + Filter: (b.val = val) + -> Seq Scan on from_only_test_6 a + Filter: (b.val = val) + -> Seq Scan on from_only_test_7 a + Filter: (b.val = val) + -> Seq Scan on from_only_test_8 a + Filter: (b.val = val) + -> Seq Scan on from_only_test_9 a + Filter: (b.val = val) + -> Seq Scan on from_only_test_10 a + Filter: (b.val = val) +(24 rows) + +/* should be OK */ +EXPLAIN (COSTS OFF) +WITH q1 AS (SELECT * FROM test_only.from_only_test), + q2 AS (SELECT * FROM ONLY test_only.from_only_test) +SELECT * FROM q1 JOIN q2 USING(val); + QUERY PLAN +--------------------------------------------------------------- + Nested Loop + -> Seq Scan on from_only_test from_only_test_1 + -> Custom Scan (RuntimeAppend) + Prune by: (from_only_test.val = from_only_test_1.val) + -> Seq Scan on from_only_test_1 from_only_test + Filter: (from_only_test_1.val = val) + -> Seq Scan on from_only_test_2 from_only_test + Filter: (from_only_test_1.val = val) + -> Seq Scan on from_only_test_3 from_only_test + Filter: (from_only_test_1.val = val) + -> Seq Scan on from_only_test_4 from_only_test + Filter: (from_only_test_1.val = val) + -> Seq Scan on from_only_test_5 from_only_test + Filter: (from_only_test_1.val = val) + -> Seq Scan on from_only_test_6 from_only_test + Filter: (from_only_test_1.val = val) + -> Seq Scan on from_only_test_7 from_only_test + Filter: (from_only_test_1.val = val) + -> Seq Scan on from_only_test_8 from_only_test + Filter: (from_only_test_1.val = val) + -> Seq Scan on from_only_test_9 from_only_test + Filter: (from_only_test_1.val = val) + -> Seq Scan on from_only_test_10 from_only_test + Filter: (from_only_test_1.val = val) +(24 rows) + +/* should be OK */ +EXPLAIN (COSTS OFF) +WITH q1 AS (SELECT * FROM ONLY test_only.from_only_test) +SELECT * FROM test_only.from_only_test JOIN q1 USING(val); + QUERY PLAN +--------------------------------------------------------------- + Nested Loop + -> Seq Scan on from_only_test from_only_test_1 + -> Custom Scan (RuntimeAppend) + Prune by: (from_only_test.val = from_only_test_1.val) + -> Seq Scan on from_only_test_1 from_only_test + Filter: (from_only_test_1.val = val) + -> Seq Scan on from_only_test_2 from_only_test + Filter: (from_only_test_1.val = val) + -> Seq Scan on from_only_test_3 from_only_test + Filter: (from_only_test_1.val = val) + -> Seq Scan on from_only_test_4 from_only_test + Filter: (from_only_test_1.val = val) + -> Seq Scan on from_only_test_5 from_only_test + Filter: (from_only_test_1.val = val) + -> Seq Scan on from_only_test_6 from_only_test + Filter: (from_only_test_1.val = val) + -> Seq Scan on from_only_test_7 from_only_test + Filter: (from_only_test_1.val = val) + -> Seq Scan on from_only_test_8 from_only_test + Filter: (from_only_test_1.val = val) + -> Seq Scan on from_only_test_9 from_only_test + Filter: (from_only_test_1.val = val) + -> Seq Scan on from_only_test_10 from_only_test + Filter: (from_only_test_1.val = val) +(24 rows) + +/* should be OK */ +EXPLAIN (COSTS OFF) +SELECT * FROM test_only.from_only_test +WHERE val = (SELECT val FROM ONLY test_only.from_only_test + ORDER BY val ASC + LIMIT 1); + QUERY PLAN +----------------------------------------------------------------- + Custom Scan (RuntimeAppend) + Prune by: (from_only_test.val = (InitPlan 1).col1) + InitPlan 1 + -> Limit + -> Sort + Sort Key: from_only_test_1.val + -> Seq Scan on from_only_test from_only_test_1 + -> Seq Scan on from_only_test_1 from_only_test + Filter: (val = (InitPlan 1).col1) + -> Seq Scan on from_only_test_2 from_only_test + Filter: (val = (InitPlan 1).col1) + -> Seq Scan on from_only_test_3 from_only_test + Filter: (val = (InitPlan 1).col1) + -> Seq Scan on from_only_test_4 from_only_test + Filter: (val = (InitPlan 1).col1) + -> Seq Scan on from_only_test_5 from_only_test + Filter: (val = (InitPlan 1).col1) + -> Seq Scan on from_only_test_6 from_only_test + Filter: (val = (InitPlan 1).col1) + -> Seq Scan on from_only_test_7 from_only_test + Filter: (val = (InitPlan 1).col1) + -> Seq Scan on from_only_test_8 from_only_test + Filter: (val = (InitPlan 1).col1) + -> Seq Scan on from_only_test_9 from_only_test + Filter: (val = (InitPlan 1).col1) + -> Seq Scan on from_only_test_10 from_only_test + Filter: (val = (InitPlan 1).col1) +(27 rows) + +DROP TABLE test_only.from_only_test CASCADE; +NOTICE: drop cascades to 11 other objects +DROP SCHEMA test_only; +DROP EXTENSION pg_pathman; diff --git a/expected/pathman_param_upd_del.out b/expected/pathman_param_upd_del.out new file mode 100644 index 00000000..28fa616d --- /dev/null +++ b/expected/pathman_param_upd_del.out @@ -0,0 +1,191 @@ +\set VERBOSITY terse +SET search_path = 'public'; +CREATE EXTENSION pg_pathman; +CREATE SCHEMA param_upd_del; +CREATE TABLE param_upd_del.test(key INT4 NOT NULL, val INT4); +SELECT create_hash_partitions('param_upd_del.test', 'key', 10); + create_hash_partitions +------------------------ + 10 +(1 row) + +INSERT INTO param_upd_del.test SELECT i, i FROM generate_series(1, 1000) i; +ANALYZE; +PREPARE upd(INT4) AS UPDATE param_upd_del.test SET val = val + 1 WHERE key = $1; +EXPLAIN (COSTS OFF) EXECUTE upd(10); + QUERY PLAN +---------------------------- + Update on test_3 + -> Seq Scan on test_3 + Filter: (key = 10) +(3 rows) + +EXPLAIN (COSTS OFF) EXECUTE upd(10); + QUERY PLAN +---------------------------- + Update on test_3 + -> Seq Scan on test_3 + Filter: (key = 10) +(3 rows) + +EXPLAIN (COSTS OFF) EXECUTE upd(10); + QUERY PLAN +---------------------------- + Update on test_3 + -> Seq Scan on test_3 + Filter: (key = 10) +(3 rows) + +EXPLAIN (COSTS OFF) EXECUTE upd(10); + QUERY PLAN +---------------------------- + Update on test_3 + -> Seq Scan on test_3 + Filter: (key = 10) +(3 rows) + +EXPLAIN (COSTS OFF) EXECUTE upd(10); + QUERY PLAN +---------------------------- + Update on test_3 + -> Seq Scan on test_3 + Filter: (key = 10) +(3 rows) + +EXPLAIN (COSTS OFF) EXECUTE upd(10); + QUERY PLAN +---------------------------- + Update on test_3 + -> Seq Scan on test_3 + Filter: (key = 10) +(3 rows) + +EXPLAIN (COSTS OFF) EXECUTE upd(11); + QUERY PLAN +---------------------------- + Update on test_9 + -> Seq Scan on test_9 + Filter: (key = 11) +(3 rows) + +DEALLOCATE upd; +PREPARE upd(INT4) AS UPDATE param_upd_del.test SET val = val + 1 WHERE key = ($1 + 3) * 2; +EXPLAIN (COSTS OFF) EXECUTE upd(5); + QUERY PLAN +---------------------------- + Update on test_7 + -> Seq Scan on test_7 + Filter: (key = 16) +(3 rows) + +EXPLAIN (COSTS OFF) EXECUTE upd(5); + QUERY PLAN +---------------------------- + Update on test_7 + -> Seq Scan on test_7 + Filter: (key = 16) +(3 rows) + +EXPLAIN (COSTS OFF) EXECUTE upd(5); + QUERY PLAN +---------------------------- + Update on test_7 + -> Seq Scan on test_7 + Filter: (key = 16) +(3 rows) + +EXPLAIN (COSTS OFF) EXECUTE upd(5); + QUERY PLAN +---------------------------- + Update on test_7 + -> Seq Scan on test_7 + Filter: (key = 16) +(3 rows) + +EXPLAIN (COSTS OFF) EXECUTE upd(5); + QUERY PLAN +---------------------------- + Update on test_7 + -> Seq Scan on test_7 + Filter: (key = 16) +(3 rows) + +EXPLAIN (COSTS OFF) EXECUTE upd(5); + QUERY PLAN +---------------------------- + Update on test_7 + -> Seq Scan on test_7 + Filter: (key = 16) +(3 rows) + +EXPLAIN (COSTS OFF) EXECUTE upd(6); + QUERY PLAN +---------------------------- + Update on test_3 + -> Seq Scan on test_3 + Filter: (key = 18) +(3 rows) + +DEALLOCATE upd; +PREPARE del(INT4) AS DELETE FROM param_upd_del.test WHERE key = $1; +EXPLAIN (COSTS OFF) EXECUTE del(10); + QUERY PLAN +---------------------------- + Delete on test_3 + -> Seq Scan on test_3 + Filter: (key = 10) +(3 rows) + +EXPLAIN (COSTS OFF) EXECUTE del(10); + QUERY PLAN +---------------------------- + Delete on test_3 + -> Seq Scan on test_3 + Filter: (key = 10) +(3 rows) + +EXPLAIN (COSTS OFF) EXECUTE del(10); + QUERY PLAN +---------------------------- + Delete on test_3 + -> Seq Scan on test_3 + Filter: (key = 10) +(3 rows) + +EXPLAIN (COSTS OFF) EXECUTE del(10); + QUERY PLAN +---------------------------- + Delete on test_3 + -> Seq Scan on test_3 + Filter: (key = 10) +(3 rows) + +EXPLAIN (COSTS OFF) EXECUTE del(10); + QUERY PLAN +---------------------------- + Delete on test_3 + -> Seq Scan on test_3 + Filter: (key = 10) +(3 rows) + +EXPLAIN (COSTS OFF) EXECUTE del(10); + QUERY PLAN +---------------------------- + Delete on test_3 + -> Seq Scan on test_3 + Filter: (key = 10) +(3 rows) + +EXPLAIN (COSTS OFF) EXECUTE del(11); + QUERY PLAN +---------------------------- + Delete on test_9 + -> Seq Scan on test_9 + Filter: (key = 11) +(3 rows) + +DEALLOCATE del; +DROP TABLE param_upd_del.test CASCADE; +NOTICE: drop cascades to 10 other objects +DROP SCHEMA param_upd_del; +DROP EXTENSION pg_pathman; diff --git a/expected/pathman_permissions.out b/expected/pathman_permissions.out new file mode 100644 index 00000000..a29865d0 --- /dev/null +++ b/expected/pathman_permissions.out @@ -0,0 +1,263 @@ +\set VERBOSITY terse +SET search_path = 'public'; +CREATE EXTENSION pg_pathman; +CREATE SCHEMA permissions; +CREATE ROLE pathman_user1 LOGIN; +CREATE ROLE pathman_user2 LOGIN; +GRANT USAGE, CREATE ON SCHEMA permissions TO pathman_user1; +GRANT USAGE, CREATE ON SCHEMA permissions TO pathman_user2; +/* Switch to #1 */ +SET ROLE pathman_user1; +CREATE TABLE permissions.pathman_user1_table(id serial, a int); +INSERT INTO permissions.pathman_user1_table SELECT g, g FROM generate_series(1, 20) as g; +/* Should fail (can't SELECT) */ +SET ROLE pathman_user2; +DO $$ +BEGIN + SELECT create_range_partitions('permissions.pathman_user1_table', 'id', 1, 10, 2); +EXCEPTION + WHEN insufficient_privilege THEN + RAISE NOTICE 'Insufficient priviliges'; +END$$; +NOTICE: Insufficient priviliges +/* Grant SELECT to pathman_user2 */ +SET ROLE pathman_user1; +GRANT SELECT ON permissions.pathman_user1_table TO pathman_user2; +/* Should fail (don't own parent) */ +SET ROLE pathman_user2; +DO $$ +BEGIN + SELECT create_range_partitions('permissions.pathman_user1_table', 'id', 1, 10, 2); +EXCEPTION + WHEN insufficient_privilege THEN + RAISE NOTICE 'Insufficient priviliges'; +END$$; +NOTICE: Insufficient priviliges +/* Should be ok */ +SET ROLE pathman_user1; +SELECT create_range_partitions('permissions.pathman_user1_table', 'id', 1, 10, 2); + create_range_partitions +------------------------- + 2 +(1 row) + +/* Should be able to see */ +SET ROLE pathman_user2; +SELECT * FROM pathman_config; + partrel | expr | parttype | range_interval +---------------------------------+------+----------+---------------- + permissions.pathman_user1_table | id | 2 | 10 +(1 row) + +SELECT * FROM pathman_config_params; + partrel | enable_parent | auto | init_callback | spawn_using_bgw +---------------------------------+---------------+------+---------------+----------------- + permissions.pathman_user1_table | f | t | | f +(1 row) + +/* Should fail */ +SET ROLE pathman_user2; +SELECT set_enable_parent('permissions.pathman_user1_table', true); +WARNING: only the owner or superuser can change partitioning configuration of table "pathman_user1_table" +ERROR: new row violates row-level security policy for table "pathman_config_params" +SELECT set_auto('permissions.pathman_user1_table', false); +WARNING: only the owner or superuser can change partitioning configuration of table "pathman_user1_table" +ERROR: new row violates row-level security policy for table "pathman_config_params" +/* Should fail */ +SET ROLE pathman_user2; +DELETE FROM pathman_config +WHERE partrel = 'permissions.pathman_user1_table'::regclass; +WARNING: only the owner or superuser can change partitioning configuration of table "pathman_user1_table" +/* No rights to insert, should fail */ +SET ROLE pathman_user2; +DO $$ +BEGIN + INSERT INTO permissions.pathman_user1_table (id, a) VALUES (35, 0); +EXCEPTION + WHEN insufficient_privilege THEN + RAISE NOTICE 'Insufficient priviliges'; +END$$; +NOTICE: Insufficient priviliges +/* No rights to create partitions (need INSERT privilege) */ +SET ROLE pathman_user2; +SELECT prepend_range_partition('permissions.pathman_user1_table'); +ERROR: permission denied for parent relation "pathman_user1_table" +/* Allow pathman_user2 to create partitions */ +SET ROLE pathman_user1; +GRANT INSERT ON permissions.pathman_user1_table TO pathman_user2; +GRANT UPDATE(a) ON permissions.pathman_user1_table TO pathman_user2; /* per-column ACL */ +/* Should be able to prepend a partition */ +SET ROLE pathman_user2; +SELECT prepend_range_partition('permissions.pathman_user1_table'); + prepend_range_partition +----------------------------------- + permissions.pathman_user1_table_4 +(1 row) + +SELECT attname, attacl FROM pg_attribute +WHERE attrelid = (SELECT "partition" FROM pathman_partition_list + WHERE parent = 'permissions.pathman_user1_table'::REGCLASS + ORDER BY range_min::int ASC /* prepend */ + LIMIT 1) +ORDER BY attname; /* check ACL for each column */ + attname | attacl +----------+--------------------------------- + a | {pathman_user2=w/pathman_user1} + cmax | + cmin | + ctid | + id | + tableoid | + xmax | + xmin | +(8 rows) + +/* Have rights, should be ok (parent's ACL is shared by new children) */ +SET ROLE pathman_user2; +INSERT INTO permissions.pathman_user1_table (id, a) VALUES (35, 0) RETURNING *; + id | a +----+--- + 35 | 0 +(1 row) + +SELECT relname, relacl FROM pg_class +WHERE oid = ANY (SELECT "partition" FROM pathman_partition_list + WHERE parent = 'permissions.pathman_user1_table'::REGCLASS + ORDER BY range_max::int DESC /* append */ + LIMIT 3) +ORDER BY relname; /* we also check ACL for "pathman_user1_table_2" */ + relname | relacl +-----------------------+---------------------------------------------------------------------- + pathman_user1_table_2 | {pathman_user1=arwdDxt/pathman_user1,pathman_user2=r/pathman_user1} + pathman_user1_table_5 | {pathman_user1=arwdDxt/pathman_user1,pathman_user2=ar/pathman_user1} + pathman_user1_table_6 | {pathman_user1=arwdDxt/pathman_user1,pathman_user2=ar/pathman_user1} +(3 rows) + +/* Try to drop partition, should fail */ +DO $$ +BEGIN + SELECT drop_range_partition('permissions.pathman_user1_table_4'); +EXCEPTION + WHEN insufficient_privilege THEN + RAISE NOTICE 'Insufficient priviliges'; +END$$; +NOTICE: Insufficient priviliges +/* Disable automatic partition creation */ +SET ROLE pathman_user1; +SELECT set_auto('permissions.pathman_user1_table', false); + set_auto +---------- + +(1 row) + +/* Partition creation, should fail */ +SET ROLE pathman_user2; +INSERT INTO permissions.pathman_user1_table (id, a) VALUES (55, 0) RETURNING *; +ERROR: no suitable partition for key '55' +/* Finally drop partitions */ +SET ROLE pathman_user1; +SELECT drop_partitions('permissions.pathman_user1_table'); +NOTICE: 10 rows copied from permissions.pathman_user1_table_1 +NOTICE: 10 rows copied from permissions.pathman_user1_table_2 +NOTICE: 0 rows copied from permissions.pathman_user1_table_4 +NOTICE: 0 rows copied from permissions.pathman_user1_table_5 +NOTICE: 1 rows copied from permissions.pathman_user1_table_6 + drop_partitions +----------------- + 5 +(1 row) + +/* Switch to #2 */ +SET ROLE pathman_user2; +/* Test ddl event trigger */ +CREATE TABLE permissions.pathman_user2_table(id serial); +SELECT create_hash_partitions('permissions.pathman_user2_table', 'id', 3); + create_hash_partitions +------------------------ + 3 +(1 row) + +INSERT INTO permissions.pathman_user2_table SELECT generate_series(1, 30); +SELECT drop_partitions('permissions.pathman_user2_table'); +NOTICE: 9 rows copied from permissions.pathman_user2_table_0 +NOTICE: 11 rows copied from permissions.pathman_user2_table_1 +NOTICE: 10 rows copied from permissions.pathman_user2_table_2 + drop_partitions +----------------- + 3 +(1 row) + +/* Switch to #1 */ +SET ROLE pathman_user1; +CREATE TABLE permissions.dropped_column(a int, val int not null, b int, c int); +INSERT INTO permissions.dropped_column SELECT i,i,i,i FROM generate_series(1, 30) i; +GRANT SELECT(val), INSERT(val) ON permissions.dropped_column TO pathman_user2; +SELECT create_range_partitions('permissions.dropped_column', 'val', 1, 10); + create_range_partitions +------------------------- + 3 +(1 row) + +SELECT attrelid::regclass, attname, attacl FROM pg_attribute +WHERE attrelid = ANY (SELECT "partition" FROM pathman_partition_list + WHERE parent = 'permissions.dropped_column'::REGCLASS) + AND attacl IS NOT NULL +ORDER BY attrelid::regclass::text; /* check ACL for each column */ + attrelid | attname | attacl +------------------------------+---------+---------------------------------- + permissions.dropped_column_1 | val | {pathman_user2=ar/pathman_user1} + permissions.dropped_column_2 | val | {pathman_user2=ar/pathman_user1} + permissions.dropped_column_3 | val | {pathman_user2=ar/pathman_user1} +(3 rows) + +ALTER TABLE permissions.dropped_column DROP COLUMN a; /* DROP "a" */ +SELECT append_range_partition('permissions.dropped_column'); + append_range_partition +------------------------------ + permissions.dropped_column_4 +(1 row) + +SELECT attrelid::regclass, attname, attacl FROM pg_attribute +WHERE attrelid = ANY (SELECT "partition" FROM pathman_partition_list + WHERE parent = 'permissions.dropped_column'::REGCLASS) + AND attacl IS NOT NULL +ORDER BY attrelid::regclass::text; /* check ACL for each column (+1 partition) */ + attrelid | attname | attacl +------------------------------+---------+---------------------------------- + permissions.dropped_column_1 | val | {pathman_user2=ar/pathman_user1} + permissions.dropped_column_2 | val | {pathman_user2=ar/pathman_user1} + permissions.dropped_column_3 | val | {pathman_user2=ar/pathman_user1} + permissions.dropped_column_4 | val | {pathman_user2=ar/pathman_user1} +(4 rows) + +ALTER TABLE permissions.dropped_column DROP COLUMN b; /* DROP "b" */ +SELECT append_range_partition('permissions.dropped_column'); + append_range_partition +------------------------------ + permissions.dropped_column_5 +(1 row) + +SELECT attrelid::regclass, attname, attacl FROM pg_attribute +WHERE attrelid = ANY (SELECT "partition" FROM pathman_partition_list + WHERE parent = 'permissions.dropped_column'::REGCLASS) + AND attacl IS NOT NULL +ORDER BY attrelid::regclass::text; /* check ACL for each column (+1 partition) */ + attrelid | attname | attacl +------------------------------+---------+---------------------------------- + permissions.dropped_column_1 | val | {pathman_user2=ar/pathman_user1} + permissions.dropped_column_2 | val | {pathman_user2=ar/pathman_user1} + permissions.dropped_column_3 | val | {pathman_user2=ar/pathman_user1} + permissions.dropped_column_4 | val | {pathman_user2=ar/pathman_user1} + permissions.dropped_column_5 | val | {pathman_user2=ar/pathman_user1} +(5 rows) + +DROP TABLE permissions.dropped_column CASCADE; +NOTICE: drop cascades to 6 other objects +/* Finally reset user */ +RESET ROLE; +DROP OWNED BY pathman_user1; +DROP OWNED BY pathman_user2; +DROP USER pathman_user1; +DROP USER pathman_user2; +DROP SCHEMA permissions; +DROP EXTENSION pg_pathman; diff --git a/expected/pathman_permissions_1.out b/expected/pathman_permissions_1.out new file mode 100644 index 00000000..dc976aae --- /dev/null +++ b/expected/pathman_permissions_1.out @@ -0,0 +1,263 @@ +\set VERBOSITY terse +SET search_path = 'public'; +CREATE EXTENSION pg_pathman; +CREATE SCHEMA permissions; +CREATE ROLE pathman_user1 LOGIN; +CREATE ROLE pathman_user2 LOGIN; +GRANT USAGE, CREATE ON SCHEMA permissions TO pathman_user1; +GRANT USAGE, CREATE ON SCHEMA permissions TO pathman_user2; +/* Switch to #1 */ +SET ROLE pathman_user1; +CREATE TABLE permissions.pathman_user1_table(id serial, a int); +INSERT INTO permissions.pathman_user1_table SELECT g, g FROM generate_series(1, 20) as g; +/* Should fail (can't SELECT) */ +SET ROLE pathman_user2; +DO $$ +BEGIN + SELECT create_range_partitions('permissions.pathman_user1_table', 'id', 1, 10, 2); +EXCEPTION + WHEN insufficient_privilege THEN + RAISE NOTICE 'Insufficient priviliges'; +END$$; +NOTICE: Insufficient priviliges +/* Grant SELECT to pathman_user2 */ +SET ROLE pathman_user1; +GRANT SELECT ON permissions.pathman_user1_table TO pathman_user2; +/* Should fail (don't own parent) */ +SET ROLE pathman_user2; +DO $$ +BEGIN + SELECT create_range_partitions('permissions.pathman_user1_table', 'id', 1, 10, 2); +EXCEPTION + WHEN insufficient_privilege THEN + RAISE NOTICE 'Insufficient priviliges'; +END$$; +NOTICE: Insufficient priviliges +/* Should be ok */ +SET ROLE pathman_user1; +SELECT create_range_partitions('permissions.pathman_user1_table', 'id', 1, 10, 2); + create_range_partitions +------------------------- + 2 +(1 row) + +/* Should be able to see */ +SET ROLE pathman_user2; +SELECT * FROM pathman_config; + partrel | expr | parttype | range_interval +---------------------------------+------+----------+---------------- + permissions.pathman_user1_table | id | 2 | 10 +(1 row) + +SELECT * FROM pathman_config_params; + partrel | enable_parent | auto | init_callback | spawn_using_bgw +---------------------------------+---------------+------+---------------+----------------- + permissions.pathman_user1_table | f | t | | f +(1 row) + +/* Should fail */ +SET ROLE pathman_user2; +SELECT set_enable_parent('permissions.pathman_user1_table', true); +WARNING: only the owner or superuser can change partitioning configuration of table "pathman_user1_table" +ERROR: new row violates row-level security policy for table "pathman_config_params" +SELECT set_auto('permissions.pathman_user1_table', false); +WARNING: only the owner or superuser can change partitioning configuration of table "pathman_user1_table" +ERROR: new row violates row-level security policy for table "pathman_config_params" +/* Should fail */ +SET ROLE pathman_user2; +DELETE FROM pathman_config +WHERE partrel = 'permissions.pathman_user1_table'::regclass; +WARNING: only the owner or superuser can change partitioning configuration of table "pathman_user1_table" +/* No rights to insert, should fail */ +SET ROLE pathman_user2; +DO $$ +BEGIN + INSERT INTO permissions.pathman_user1_table (id, a) VALUES (35, 0); +EXCEPTION + WHEN insufficient_privilege THEN + RAISE NOTICE 'Insufficient priviliges'; +END$$; +NOTICE: Insufficient priviliges +/* No rights to create partitions (need INSERT privilege) */ +SET ROLE pathman_user2; +SELECT prepend_range_partition('permissions.pathman_user1_table'); +ERROR: permission denied for parent relation "pathman_user1_table" +/* Allow pathman_user2 to create partitions */ +SET ROLE pathman_user1; +GRANT INSERT ON permissions.pathman_user1_table TO pathman_user2; +GRANT UPDATE(a) ON permissions.pathman_user1_table TO pathman_user2; /* per-column ACL */ +/* Should be able to prepend a partition */ +SET ROLE pathman_user2; +SELECT prepend_range_partition('permissions.pathman_user1_table'); + prepend_range_partition +----------------------------------- + permissions.pathman_user1_table_4 +(1 row) + +SELECT attname, attacl FROM pg_attribute +WHERE attrelid = (SELECT "partition" FROM pathman_partition_list + WHERE parent = 'permissions.pathman_user1_table'::REGCLASS + ORDER BY range_min::int ASC /* prepend */ + LIMIT 1) +ORDER BY attname; /* check ACL for each column */ + attname | attacl +----------+--------------------------------- + a | {pathman_user2=w/pathman_user1} + cmax | + cmin | + ctid | + id | + tableoid | + xmax | + xmin | +(8 rows) + +/* Have rights, should be ok (parent's ACL is shared by new children) */ +SET ROLE pathman_user2; +INSERT INTO permissions.pathman_user1_table (id, a) VALUES (35, 0) RETURNING *; + id | a +----+--- + 35 | 0 +(1 row) + +SELECT relname, relacl FROM pg_class +WHERE oid = ANY (SELECT "partition" FROM pathman_partition_list + WHERE parent = 'permissions.pathman_user1_table'::REGCLASS + ORDER BY range_max::int DESC /* append */ + LIMIT 3) +ORDER BY relname; /* we also check ACL for "pathman_user1_table_2" */ + relname | relacl +-----------------------+----------------------------------------------------------------------- + pathman_user1_table_2 | {pathman_user1=arwdDxtm/pathman_user1,pathman_user2=r/pathman_user1} + pathman_user1_table_5 | {pathman_user1=arwdDxtm/pathman_user1,pathman_user2=ar/pathman_user1} + pathman_user1_table_6 | {pathman_user1=arwdDxtm/pathman_user1,pathman_user2=ar/pathman_user1} +(3 rows) + +/* Try to drop partition, should fail */ +DO $$ +BEGIN + SELECT drop_range_partition('permissions.pathman_user1_table_4'); +EXCEPTION + WHEN insufficient_privilege THEN + RAISE NOTICE 'Insufficient priviliges'; +END$$; +NOTICE: Insufficient priviliges +/* Disable automatic partition creation */ +SET ROLE pathman_user1; +SELECT set_auto('permissions.pathman_user1_table', false); + set_auto +---------- + +(1 row) + +/* Partition creation, should fail */ +SET ROLE pathman_user2; +INSERT INTO permissions.pathman_user1_table (id, a) VALUES (55, 0) RETURNING *; +ERROR: no suitable partition for key '55' +/* Finally drop partitions */ +SET ROLE pathman_user1; +SELECT drop_partitions('permissions.pathman_user1_table'); +NOTICE: 10 rows copied from permissions.pathman_user1_table_1 +NOTICE: 10 rows copied from permissions.pathman_user1_table_2 +NOTICE: 0 rows copied from permissions.pathman_user1_table_4 +NOTICE: 0 rows copied from permissions.pathman_user1_table_5 +NOTICE: 1 rows copied from permissions.pathman_user1_table_6 + drop_partitions +----------------- + 5 +(1 row) + +/* Switch to #2 */ +SET ROLE pathman_user2; +/* Test ddl event trigger */ +CREATE TABLE permissions.pathman_user2_table(id serial); +SELECT create_hash_partitions('permissions.pathman_user2_table', 'id', 3); + create_hash_partitions +------------------------ + 3 +(1 row) + +INSERT INTO permissions.pathman_user2_table SELECT generate_series(1, 30); +SELECT drop_partitions('permissions.pathman_user2_table'); +NOTICE: 9 rows copied from permissions.pathman_user2_table_0 +NOTICE: 11 rows copied from permissions.pathman_user2_table_1 +NOTICE: 10 rows copied from permissions.pathman_user2_table_2 + drop_partitions +----------------- + 3 +(1 row) + +/* Switch to #1 */ +SET ROLE pathman_user1; +CREATE TABLE permissions.dropped_column(a int, val int not null, b int, c int); +INSERT INTO permissions.dropped_column SELECT i,i,i,i FROM generate_series(1, 30) i; +GRANT SELECT(val), INSERT(val) ON permissions.dropped_column TO pathman_user2; +SELECT create_range_partitions('permissions.dropped_column', 'val', 1, 10); + create_range_partitions +------------------------- + 3 +(1 row) + +SELECT attrelid::regclass, attname, attacl FROM pg_attribute +WHERE attrelid = ANY (SELECT "partition" FROM pathman_partition_list + WHERE parent = 'permissions.dropped_column'::REGCLASS) + AND attacl IS NOT NULL +ORDER BY attrelid::regclass::text; /* check ACL for each column */ + attrelid | attname | attacl +------------------------------+---------+---------------------------------- + permissions.dropped_column_1 | val | {pathman_user2=ar/pathman_user1} + permissions.dropped_column_2 | val | {pathman_user2=ar/pathman_user1} + permissions.dropped_column_3 | val | {pathman_user2=ar/pathman_user1} +(3 rows) + +ALTER TABLE permissions.dropped_column DROP COLUMN a; /* DROP "a" */ +SELECT append_range_partition('permissions.dropped_column'); + append_range_partition +------------------------------ + permissions.dropped_column_4 +(1 row) + +SELECT attrelid::regclass, attname, attacl FROM pg_attribute +WHERE attrelid = ANY (SELECT "partition" FROM pathman_partition_list + WHERE parent = 'permissions.dropped_column'::REGCLASS) + AND attacl IS NOT NULL +ORDER BY attrelid::regclass::text; /* check ACL for each column (+1 partition) */ + attrelid | attname | attacl +------------------------------+---------+---------------------------------- + permissions.dropped_column_1 | val | {pathman_user2=ar/pathman_user1} + permissions.dropped_column_2 | val | {pathman_user2=ar/pathman_user1} + permissions.dropped_column_3 | val | {pathman_user2=ar/pathman_user1} + permissions.dropped_column_4 | val | {pathman_user2=ar/pathman_user1} +(4 rows) + +ALTER TABLE permissions.dropped_column DROP COLUMN b; /* DROP "b" */ +SELECT append_range_partition('permissions.dropped_column'); + append_range_partition +------------------------------ + permissions.dropped_column_5 +(1 row) + +SELECT attrelid::regclass, attname, attacl FROM pg_attribute +WHERE attrelid = ANY (SELECT "partition" FROM pathman_partition_list + WHERE parent = 'permissions.dropped_column'::REGCLASS) + AND attacl IS NOT NULL +ORDER BY attrelid::regclass::text; /* check ACL for each column (+1 partition) */ + attrelid | attname | attacl +------------------------------+---------+---------------------------------- + permissions.dropped_column_1 | val | {pathman_user2=ar/pathman_user1} + permissions.dropped_column_2 | val | {pathman_user2=ar/pathman_user1} + permissions.dropped_column_3 | val | {pathman_user2=ar/pathman_user1} + permissions.dropped_column_4 | val | {pathman_user2=ar/pathman_user1} + permissions.dropped_column_5 | val | {pathman_user2=ar/pathman_user1} +(5 rows) + +DROP TABLE permissions.dropped_column CASCADE; +NOTICE: drop cascades to 6 other objects +/* Finally reset user */ +RESET ROLE; +DROP OWNED BY pathman_user1; +DROP OWNED BY pathman_user2; +DROP USER pathman_user1; +DROP USER pathman_user2; +DROP SCHEMA permissions; +DROP EXTENSION pg_pathman; diff --git a/expected/pathman_rebuild_deletes.out b/expected/pathman_rebuild_deletes.out new file mode 100644 index 00000000..a5edc242 --- /dev/null +++ b/expected/pathman_rebuild_deletes.out @@ -0,0 +1,106 @@ +/* + * ------------------------------------------- + * NOTE: This test behaves differenly on < 11 because planner now turns + * Row(Const, Const) into just Const of record type, apparently since 3decd150 + * ------------------------------------------- + */ +\set VERBOSITY terse +SET search_path = 'public'; +CREATE EXTENSION pg_pathman; +CREATE SCHEMA test_deletes; +/* + * Test DELETEs on a partition with different TupleDescriptor. + */ +/* create partitioned table */ +CREATE TABLE test_deletes.test(a FLOAT4, val INT4 NOT NULL, b FLOAT8); +INSERT INTO test_deletes.test SELECT i, i, i FROM generate_series(1, 100) AS i; +SELECT create_range_partitions('test_deletes.test', 'val', 1, 10); + create_range_partitions +------------------------- + 10 +(1 row) + +/* drop column 'a' */ +ALTER TABLE test_deletes.test DROP COLUMN a; +/* append new partition */ +SELECT append_range_partition('test_deletes.test'); + append_range_partition +------------------------ + test_deletes.test_11 +(1 row) + +INSERT INTO test_deletes.test_11 (val, b) VALUES (101, 10); +VACUUM ANALYZE; +/* tuple descs are the same */ +EXPLAIN (COSTS OFF) DELETE FROM test_deletes.test WHERE val = 1; + QUERY PLAN +--------------------------- + Delete on test_1 + -> Seq Scan on test_1 + Filter: (val = 1) +(3 rows) + +DELETE FROM test_deletes.test WHERE val = 1 RETURNING *, tableoid::REGCLASS; + val | b | tableoid +-----+---+--------------------- + 1 | 1 | test_deletes.test_1 +(1 row) + +/* tuple descs are different */ +EXPLAIN (COSTS OFF) DELETE FROM test_deletes.test WHERE val = 101; + QUERY PLAN +----------------------------- + Delete on test_11 + -> Seq Scan on test_11 + Filter: (val = 101) +(3 rows) + +DELETE FROM test_deletes.test WHERE val = 101 RETURNING *, tableoid::REGCLASS; + val | b | tableoid +-----+----+---------------------- + 101 | 10 | test_deletes.test_11 +(1 row) + +CREATE TABLE test_deletes.test_dummy (val INT4); +EXPLAIN (COSTS OFF) DELETE FROM test_deletes.test +WHERE val = 101 AND val = ANY (TABLE test_deletes.test_dummy) +RETURNING *, tableoid::REGCLASS; + QUERY PLAN +------------------------------------ + Delete on test_11 + -> Nested Loop Semi Join + -> Seq Scan on test_11 + Filter: (val = 101) + -> Seq Scan on test_dummy + Filter: (val = 101) +(6 rows) + +EXPLAIN (COSTS OFF) DELETE FROM test_deletes.test t1 +USING test_deletes.test_dummy t2 +WHERE t1.val = 101 AND t1.val = t2.val +RETURNING t1.*, t1.tableoid::REGCLASS; + QUERY PLAN +--------------------------------------- + Delete on test_11 t1 + -> Nested Loop + -> Seq Scan on test_11 t1 + Filter: (val = 101) + -> Seq Scan on test_dummy t2 + Filter: (val = 101) +(6 rows) + +EXPLAIN (COSTS OFF) DELETE FROM test_deletes.test +WHERE val = 101 AND test >= (100, 8) +RETURNING *, tableoid::REGCLASS; + QUERY PLAN +----------------------------------------------------------------------------------------- + Delete on test_11 + -> Seq Scan on test_11 + Filter: (((test_11.*)::test_deletes.test >= '(100,8)'::record) AND (val = 101)) +(3 rows) + +DROP TABLE test_deletes.test_dummy; +DROP TABLE test_deletes.test CASCADE; +NOTICE: drop cascades to 12 other objects +DROP SCHEMA test_deletes; +DROP EXTENSION pg_pathman; diff --git a/expected/pathman_rebuild_deletes_1.out b/expected/pathman_rebuild_deletes_1.out new file mode 100644 index 00000000..eb2f5001 --- /dev/null +++ b/expected/pathman_rebuild_deletes_1.out @@ -0,0 +1,106 @@ +/* + * ------------------------------------------- + * NOTE: This test behaves differenly on < 11 because planner now turns + * Row(Const, Const) into just Const of record type, apparently since 3decd150 + * ------------------------------------------- + */ +\set VERBOSITY terse +SET search_path = 'public'; +CREATE EXTENSION pg_pathman; +CREATE SCHEMA test_deletes; +/* + * Test DELETEs on a partition with different TupleDescriptor. + */ +/* create partitioned table */ +CREATE TABLE test_deletes.test(a FLOAT4, val INT4 NOT NULL, b FLOAT8); +INSERT INTO test_deletes.test SELECT i, i, i FROM generate_series(1, 100) AS i; +SELECT create_range_partitions('test_deletes.test', 'val', 1, 10); + create_range_partitions +------------------------- + 10 +(1 row) + +/* drop column 'a' */ +ALTER TABLE test_deletes.test DROP COLUMN a; +/* append new partition */ +SELECT append_range_partition('test_deletes.test'); + append_range_partition +------------------------ + test_deletes.test_11 +(1 row) + +INSERT INTO test_deletes.test_11 (val, b) VALUES (101, 10); +VACUUM ANALYZE; +/* tuple descs are the same */ +EXPLAIN (COSTS OFF) DELETE FROM test_deletes.test WHERE val = 1; + QUERY PLAN +--------------------------- + Delete on test_1 + -> Seq Scan on test_1 + Filter: (val = 1) +(3 rows) + +DELETE FROM test_deletes.test WHERE val = 1 RETURNING *, tableoid::REGCLASS; + val | b | tableoid +-----+---+--------------------- + 1 | 1 | test_deletes.test_1 +(1 row) + +/* tuple descs are different */ +EXPLAIN (COSTS OFF) DELETE FROM test_deletes.test WHERE val = 101; + QUERY PLAN +----------------------------- + Delete on test_11 + -> Seq Scan on test_11 + Filter: (val = 101) +(3 rows) + +DELETE FROM test_deletes.test WHERE val = 101 RETURNING *, tableoid::REGCLASS; + val | b | tableoid +-----+----+---------------------- + 101 | 10 | test_deletes.test_11 +(1 row) + +CREATE TABLE test_deletes.test_dummy (val INT4); +EXPLAIN (COSTS OFF) DELETE FROM test_deletes.test +WHERE val = 101 AND val = ANY (TABLE test_deletes.test_dummy) +RETURNING *, tableoid::REGCLASS; + QUERY PLAN +------------------------------------ + Delete on test_11 + -> Nested Loop Semi Join + -> Seq Scan on test_11 + Filter: (val = 101) + -> Seq Scan on test_dummy + Filter: (val = 101) +(6 rows) + +EXPLAIN (COSTS OFF) DELETE FROM test_deletes.test t1 +USING test_deletes.test_dummy t2 +WHERE t1.val = 101 AND t1.val = t2.val +RETURNING t1.*, t1.tableoid::REGCLASS; + QUERY PLAN +--------------------------------------- + Delete on test_11 t1 + -> Nested Loop + -> Seq Scan on test_11 t1 + Filter: (val = 101) + -> Seq Scan on test_dummy t2 + Filter: (val = 101) +(6 rows) + +EXPLAIN (COSTS OFF) DELETE FROM test_deletes.test +WHERE val = 101 AND test >= (100, 8) +RETURNING *, tableoid::REGCLASS; + QUERY PLAN +----------------------------------------------------------------------------------- + Delete on test_11 + -> Seq Scan on test_11 + Filter: (((test_11.*)::test_deletes.test >= ROW(100, 8)) AND (val = 101)) +(3 rows) + +DROP TABLE test_deletes.test_dummy; +DROP TABLE test_deletes.test CASCADE; +NOTICE: drop cascades to 12 other objects +DROP SCHEMA test_deletes; +DROP EXTENSION pg_pathman; diff --git a/expected/pathman_rebuild_updates.out b/expected/pathman_rebuild_updates.out new file mode 100644 index 00000000..40c5b048 --- /dev/null +++ b/expected/pathman_rebuild_updates.out @@ -0,0 +1,200 @@ +/* + * ------------------------------------------- + * NOTE: This test behaves differenly on < 11 because planner now turns + * Row(Const, Const) into just Const of record type, apparently since 3decd150 + * ------------------------------------------- + */ +\set VERBOSITY terse +SET search_path = 'public'; +CREATE EXTENSION pg_pathman; +CREATE SCHEMA test_updates; +/* + * Test UPDATEs on a partition with different TupleDescriptor. + */ +/* create partitioned table */ +CREATE TABLE test_updates.test(a FLOAT4, val INT4 NOT NULL, b FLOAT8); +INSERT INTO test_updates.test SELECT i, i, i FROM generate_series(1, 100) AS i; +SELECT create_range_partitions('test_updates.test', 'val', 1, 10); + create_range_partitions +------------------------- + 10 +(1 row) + +/* drop column 'a' */ +ALTER TABLE test_updates.test DROP COLUMN a; +/* append new partition */ +SELECT append_range_partition('test_updates.test'); + append_range_partition +------------------------ + test_updates.test_11 +(1 row) + +INSERT INTO test_updates.test_11 (val, b) VALUES (101, 10); +VACUUM ANALYZE; +/* tuple descs are the same */ +EXPLAIN (COSTS OFF) UPDATE test_updates.test SET b = 0 WHERE val = 1; + QUERY PLAN +--------------------------- + Update on test_1 + -> Seq Scan on test_1 + Filter: (val = 1) +(3 rows) + +UPDATE test_updates.test SET b = 0 WHERE val = 1 RETURNING *, tableoid::REGCLASS; + val | b | tableoid +-----+---+--------------------- + 1 | 0 | test_updates.test_1 +(1 row) + +/* tuple descs are different */ +EXPLAIN (COSTS OFF) UPDATE test_updates.test SET b = 0 WHERE val = 101; + QUERY PLAN +----------------------------- + Update on test_11 + -> Seq Scan on test_11 + Filter: (val = 101) +(3 rows) + +UPDATE test_updates.test SET b = 0 WHERE val = 101 RETURNING *, tableoid::REGCLASS; + val | b | tableoid +-----+---+---------------------- + 101 | 0 | test_updates.test_11 +(1 row) + +CREATE TABLE test_updates.test_dummy (val INT4); +EXPLAIN (COSTS OFF) UPDATE test_updates.test SET val = val + 1 +WHERE val = 101 AND val = ANY (TABLE test_updates.test_dummy) +RETURNING *, tableoid::REGCLASS; + QUERY PLAN +------------------------------------ + Update on test_11 + -> Nested Loop Semi Join + -> Seq Scan on test_11 + Filter: (val = 101) + -> Seq Scan on test_dummy + Filter: (val = 101) +(6 rows) + +EXPLAIN (COSTS OFF) UPDATE test_updates.test t1 SET b = 0 +FROM test_updates.test_dummy t2 +WHERE t1.val = 101 AND t1.val = t2.val +RETURNING t1.*, t1.tableoid::REGCLASS; + QUERY PLAN +--------------------------------------- + Update on test_11 t1 + -> Nested Loop + -> Seq Scan on test_11 t1 + Filter: (val = 101) + -> Seq Scan on test_dummy t2 + Filter: (val = 101) +(6 rows) + +EXPLAIN (COSTS OFF) UPDATE test_updates.test SET b = 0 +WHERE val = 101 AND test >= (100, 8) +RETURNING *, tableoid::REGCLASS; + QUERY PLAN +----------------------------------------------------------------------------------------- + Update on test_11 + -> Seq Scan on test_11 + Filter: (((test_11.*)::test_updates.test >= '(100,8)'::record) AND (val = 101)) +(3 rows) + +/* execute this one */ +UPDATE test_updates.test SET b = 0 +WHERE val = 101 AND test >= (100, -1) +RETURNING test; + test +--------- + (101,0) +(1 row) + +DROP TABLE test_updates.test_dummy; +/* cross-partition updates (& different tuple descs) */ +TRUNCATE test_updates.test; +SET pg_pathman.enable_partitionrouter = ON; +SELECT *, (select count(*) from pg_attribute where attrelid = partition) as columns +FROM pathman_partition_list +ORDER BY range_min::int, range_max::int; + parent | partition | parttype | expr | range_min | range_max | columns +-------------------+----------------------+----------+------+-----------+-----------+--------- + test_updates.test | test_updates.test_1 | 2 | val | 1 | 11 | 9 + test_updates.test | test_updates.test_2 | 2 | val | 11 | 21 | 9 + test_updates.test | test_updates.test_3 | 2 | val | 21 | 31 | 9 + test_updates.test | test_updates.test_4 | 2 | val | 31 | 41 | 9 + test_updates.test | test_updates.test_5 | 2 | val | 41 | 51 | 9 + test_updates.test | test_updates.test_6 | 2 | val | 51 | 61 | 9 + test_updates.test | test_updates.test_7 | 2 | val | 61 | 71 | 9 + test_updates.test | test_updates.test_8 | 2 | val | 71 | 81 | 9 + test_updates.test | test_updates.test_9 | 2 | val | 81 | 91 | 9 + test_updates.test | test_updates.test_10 | 2 | val | 91 | 101 | 9 + test_updates.test | test_updates.test_11 | 2 | val | 101 | 111 | 8 +(11 rows) + +INSERT INTO test_updates.test VALUES (105, 105); +UPDATE test_updates.test SET val = 106 WHERE val = 105 RETURNING *, tableoid::REGCLASS; + val | b | tableoid +-----+-----+---------------------- + 106 | 105 | test_updates.test_11 +(1 row) + +UPDATE test_updates.test SET val = 115 WHERE val = 106 RETURNING *, tableoid::REGCLASS; + val | b | tableoid +-----+-----+---------------------- + 115 | 105 | test_updates.test_12 +(1 row) + +UPDATE test_updates.test SET val = 95 WHERE val = 115 RETURNING *, tableoid::REGCLASS; + val | b | tableoid +-----+-----+---------------------- + 95 | 105 | test_updates.test_10 +(1 row) + +UPDATE test_updates.test SET val = -1 WHERE val = 95 RETURNING *, tableoid::REGCLASS; + val | b | tableoid +-----+-----+---------------------- + -1 | 105 | test_updates.test_13 +(1 row) + +/* basic check for 'ALTER TABLE ... ADD COLUMN'; PGPRO-5113 */ +create table test_updates.test_5113(val int4 not null); +insert into test_updates.test_5113 values (1); +select create_range_partitions('test_updates.test_5113', 'val', 1, 10); + create_range_partitions +------------------------- + 1 +(1 row) + +update test_updates.test_5113 set val = 11 where val = 1; +alter table test_updates.test_5113 add column x varchar; +/* no error here: */ +select * from test_updates.test_5113 where val = 11; + val | x +-----+--- + 11 | +(1 row) + +drop table test_updates.test_5113 cascade; +NOTICE: drop cascades to 3 other objects +create table test_updates.test_5113(val int4 not null); +insert into test_updates.test_5113 values (1); +select create_range_partitions('test_updates.test_5113', 'val', 1, 10); + create_range_partitions +------------------------- + 1 +(1 row) + +update test_updates.test_5113 set val = 11 where val = 1; +alter table test_updates.test_5113 add column x int8; +/* no extra data in column 'x' here: */ +select * from test_updates.test_5113 where val = 11; + val | x +-----+--- + 11 | +(1 row) + +drop table test_updates.test_5113 cascade; +NOTICE: drop cascades to 3 other objects +DROP TABLE test_updates.test CASCADE; +NOTICE: drop cascades to 14 other objects +DROP SCHEMA test_updates; +DROP EXTENSION pg_pathman; diff --git a/expected/pathman_rebuild_updates_1.out b/expected/pathman_rebuild_updates_1.out new file mode 100644 index 00000000..57b3297a --- /dev/null +++ b/expected/pathman_rebuild_updates_1.out @@ -0,0 +1,200 @@ +/* + * ------------------------------------------- + * NOTE: This test behaves differenly on < 11 because planner now turns + * Row(Const, Const) into just Const of record type, apparently since 3decd150 + * ------------------------------------------- + */ +\set VERBOSITY terse +SET search_path = 'public'; +CREATE EXTENSION pg_pathman; +CREATE SCHEMA test_updates; +/* + * Test UPDATEs on a partition with different TupleDescriptor. + */ +/* create partitioned table */ +CREATE TABLE test_updates.test(a FLOAT4, val INT4 NOT NULL, b FLOAT8); +INSERT INTO test_updates.test SELECT i, i, i FROM generate_series(1, 100) AS i; +SELECT create_range_partitions('test_updates.test', 'val', 1, 10); + create_range_partitions +------------------------- + 10 +(1 row) + +/* drop column 'a' */ +ALTER TABLE test_updates.test DROP COLUMN a; +/* append new partition */ +SELECT append_range_partition('test_updates.test'); + append_range_partition +------------------------ + test_updates.test_11 +(1 row) + +INSERT INTO test_updates.test_11 (val, b) VALUES (101, 10); +VACUUM ANALYZE; +/* tuple descs are the same */ +EXPLAIN (COSTS OFF) UPDATE test_updates.test SET b = 0 WHERE val = 1; + QUERY PLAN +--------------------------- + Update on test_1 + -> Seq Scan on test_1 + Filter: (val = 1) +(3 rows) + +UPDATE test_updates.test SET b = 0 WHERE val = 1 RETURNING *, tableoid::REGCLASS; + val | b | tableoid +-----+---+--------------------- + 1 | 0 | test_updates.test_1 +(1 row) + +/* tuple descs are different */ +EXPLAIN (COSTS OFF) UPDATE test_updates.test SET b = 0 WHERE val = 101; + QUERY PLAN +----------------------------- + Update on test_11 + -> Seq Scan on test_11 + Filter: (val = 101) +(3 rows) + +UPDATE test_updates.test SET b = 0 WHERE val = 101 RETURNING *, tableoid::REGCLASS; + val | b | tableoid +-----+---+---------------------- + 101 | 0 | test_updates.test_11 +(1 row) + +CREATE TABLE test_updates.test_dummy (val INT4); +EXPLAIN (COSTS OFF) UPDATE test_updates.test SET val = val + 1 +WHERE val = 101 AND val = ANY (TABLE test_updates.test_dummy) +RETURNING *, tableoid::REGCLASS; + QUERY PLAN +------------------------------------ + Update on test_11 + -> Nested Loop Semi Join + -> Seq Scan on test_11 + Filter: (val = 101) + -> Seq Scan on test_dummy + Filter: (val = 101) +(6 rows) + +EXPLAIN (COSTS OFF) UPDATE test_updates.test t1 SET b = 0 +FROM test_updates.test_dummy t2 +WHERE t1.val = 101 AND t1.val = t2.val +RETURNING t1.*, t1.tableoid::REGCLASS; + QUERY PLAN +--------------------------------------- + Update on test_11 t1 + -> Nested Loop + -> Seq Scan on test_11 t1 + Filter: (val = 101) + -> Seq Scan on test_dummy t2 + Filter: (val = 101) +(6 rows) + +EXPLAIN (COSTS OFF) UPDATE test_updates.test SET b = 0 +WHERE val = 101 AND test >= (100, 8) +RETURNING *, tableoid::REGCLASS; + QUERY PLAN +----------------------------------------------------------------------------------- + Update on test_11 + -> Seq Scan on test_11 + Filter: (((test_11.*)::test_updates.test >= ROW(100, 8)) AND (val = 101)) +(3 rows) + +/* execute this one */ +UPDATE test_updates.test SET b = 0 +WHERE val = 101 AND test >= (100, -1) +RETURNING test; + test +--------- + (101,0) +(1 row) + +DROP TABLE test_updates.test_dummy; +/* cross-partition updates (& different tuple descs) */ +TRUNCATE test_updates.test; +SET pg_pathman.enable_partitionrouter = ON; +SELECT *, (select count(*) from pg_attribute where attrelid = partition) as columns +FROM pathman_partition_list +ORDER BY range_min::int, range_max::int; + parent | partition | parttype | expr | range_min | range_max | columns +-------------------+----------------------+----------+------+-----------+-----------+--------- + test_updates.test | test_updates.test_1 | 2 | val | 1 | 11 | 9 + test_updates.test | test_updates.test_2 | 2 | val | 11 | 21 | 9 + test_updates.test | test_updates.test_3 | 2 | val | 21 | 31 | 9 + test_updates.test | test_updates.test_4 | 2 | val | 31 | 41 | 9 + test_updates.test | test_updates.test_5 | 2 | val | 41 | 51 | 9 + test_updates.test | test_updates.test_6 | 2 | val | 51 | 61 | 9 + test_updates.test | test_updates.test_7 | 2 | val | 61 | 71 | 9 + test_updates.test | test_updates.test_8 | 2 | val | 71 | 81 | 9 + test_updates.test | test_updates.test_9 | 2 | val | 81 | 91 | 9 + test_updates.test | test_updates.test_10 | 2 | val | 91 | 101 | 9 + test_updates.test | test_updates.test_11 | 2 | val | 101 | 111 | 8 +(11 rows) + +INSERT INTO test_updates.test VALUES (105, 105); +UPDATE test_updates.test SET val = 106 WHERE val = 105 RETURNING *, tableoid::REGCLASS; + val | b | tableoid +-----+-----+---------------------- + 106 | 105 | test_updates.test_11 +(1 row) + +UPDATE test_updates.test SET val = 115 WHERE val = 106 RETURNING *, tableoid::REGCLASS; + val | b | tableoid +-----+-----+---------------------- + 115 | 105 | test_updates.test_12 +(1 row) + +UPDATE test_updates.test SET val = 95 WHERE val = 115 RETURNING *, tableoid::REGCLASS; + val | b | tableoid +-----+-----+---------------------- + 95 | 105 | test_updates.test_10 +(1 row) + +UPDATE test_updates.test SET val = -1 WHERE val = 95 RETURNING *, tableoid::REGCLASS; + val | b | tableoid +-----+-----+---------------------- + -1 | 105 | test_updates.test_13 +(1 row) + +/* basic check for 'ALTER TABLE ... ADD COLUMN'; PGPRO-5113 */ +create table test_updates.test_5113(val int4 not null); +insert into test_updates.test_5113 values (1); +select create_range_partitions('test_updates.test_5113', 'val', 1, 10); + create_range_partitions +------------------------- + 1 +(1 row) + +update test_updates.test_5113 set val = 11 where val = 1; +alter table test_updates.test_5113 add column x varchar; +/* no error here: */ +select * from test_updates.test_5113 where val = 11; + val | x +-----+--- + 11 | +(1 row) + +drop table test_updates.test_5113 cascade; +NOTICE: drop cascades to 3 other objects +create table test_updates.test_5113(val int4 not null); +insert into test_updates.test_5113 values (1); +select create_range_partitions('test_updates.test_5113', 'val', 1, 10); + create_range_partitions +------------------------- + 1 +(1 row) + +update test_updates.test_5113 set val = 11 where val = 1; +alter table test_updates.test_5113 add column x int8; +/* no extra data in column 'x' here: */ +select * from test_updates.test_5113 where val = 11; + val | x +-----+--- + 11 | +(1 row) + +drop table test_updates.test_5113 cascade; +NOTICE: drop cascades to 3 other objects +DROP TABLE test_updates.test CASCADE; +NOTICE: drop cascades to 14 other objects +DROP SCHEMA test_updates; +DROP EXTENSION pg_pathman; diff --git a/expected/pathman_rowmarks.out b/expected/pathman_rowmarks.out new file mode 100644 index 00000000..6d4611ee --- /dev/null +++ b/expected/pathman_rowmarks.out @@ -0,0 +1,410 @@ +/* + * ------------------------------------------- + * NOTE: This test behaves differenly on PgPro + * ------------------------------------------- + * + * ------------------------ + * pathman_rowmarks_1.sql + * ------------------------ + * Since PostgreSQL 9.5, output of EXPLAIN was changed. + * + * ------------------------ + * pathman_rowmarks_2.sql + * ------------------------ + * Since 8edd0e794 in PostgreSQL 12, append nodes with single subplan are + * eliminated, causing different output. + * + * ------------------------ + * pathman_rowmarks_3.sql + * ------------------------ + * Since 55a1954da16 and 6ef77cf46e8 in PostgreSQL 13 output of EXPLAIN was + * changed, now it includes aliases for inherited tables. + * + * ------------------------ + * pathman_rowmarks_3.sql + * ------------------------ + * Since fd0398fcb09 in PostgreSQL 17, output of EXPLAIN was + * changed, now it displays SubPlan nodes and output parameters. + */ +SET search_path = 'public'; +CREATE EXTENSION pg_pathman; +CREATE SCHEMA rowmarks; +CREATE TABLE rowmarks.first(id int NOT NULL); +CREATE TABLE rowmarks.second(id int NOT NULL); +INSERT INTO rowmarks.first SELECT generate_series(1, 10); +INSERT INTO rowmarks.second SELECT generate_series(1, 10); +SELECT create_hash_partitions('rowmarks.first', 'id', 5); + create_hash_partitions +------------------------ + 5 +(1 row) + +VACUUM ANALYZE; +/* Not partitioned */ +SELECT * FROM rowmarks.second ORDER BY id FOR UPDATE; + id +---- + 1 + 2 + 3 + 4 + 5 + 6 + 7 + 8 + 9 + 10 +(10 rows) + +/* Simple case (plan) */ +EXPLAIN (COSTS OFF) +SELECT * FROM rowmarks.first ORDER BY id FOR UPDATE; + QUERY PLAN +--------------------------------------- + LockRows + -> Sort + Sort Key: first_0.id + -> Append + -> Seq Scan on first_0 + -> Seq Scan on first_1 + -> Seq Scan on first_2 + -> Seq Scan on first_3 + -> Seq Scan on first_4 +(9 rows) + +/* Simple case (execution) */ +SELECT * FROM rowmarks.first ORDER BY id FOR UPDATE; + id +---- + 1 + 2 + 3 + 4 + 5 + 6 + 7 + 8 + 9 + 10 +(10 rows) + +SELECT FROM rowmarks.first ORDER BY id FOR UPDATE; +-- +(10 rows) + +SELECT tableoid > 0 FROM rowmarks.first ORDER BY id FOR UPDATE; + ?column? +---------- + t + t + t + t + t + t + t + t + t + t +(10 rows) + +/* A little harder (plan) */ +EXPLAIN (COSTS OFF) +SELECT * FROM rowmarks.first +WHERE id = (SELECT id FROM rowmarks.first + ORDER BY id + OFFSET 10 LIMIT 1 + FOR UPDATE) +FOR SHARE; + QUERY PLAN +----------------------------------------------------- + LockRows + InitPlan 1 (returns $1) + -> Limit + -> LockRows + -> Sort + Sort Key: first_0.id + -> Append + -> Seq Scan on first_0 + -> Seq Scan on first_1 + -> Seq Scan on first_2 + -> Seq Scan on first_3 + -> Seq Scan on first_4 + -> Custom Scan (RuntimeAppend) + Prune by: (first.id = $1) + -> Seq Scan on first_0 first + Filter: (id = $1) + -> Seq Scan on first_1 first + Filter: (id = $1) + -> Seq Scan on first_2 first + Filter: (id = $1) + -> Seq Scan on first_3 first + Filter: (id = $1) + -> Seq Scan on first_4 first + Filter: (id = $1) +(24 rows) + +/* A little harder (execution) */ +SELECT * FROM rowmarks.first +WHERE id = (SELECT id FROM rowmarks.first + ORDER BY id + OFFSET 5 LIMIT 1 + FOR UPDATE) +FOR SHARE; + id +---- + 6 +(1 row) + +/* Two tables (plan) */ +EXPLAIN (COSTS OFF) +SELECT * FROM rowmarks.first +WHERE id = (SELECT id FROM rowmarks.second + ORDER BY id + OFFSET 5 LIMIT 1 + FOR UPDATE) +FOR SHARE; + QUERY PLAN +---------------------------------------------- + LockRows + InitPlan 1 (returns $1) + -> Limit + -> LockRows + -> Sort + Sort Key: second.id + -> Seq Scan on second + -> Custom Scan (RuntimeAppend) + Prune by: (first.id = $1) + -> Seq Scan on first_0 first + Filter: (id = $1) + -> Seq Scan on first_1 first + Filter: (id = $1) + -> Seq Scan on first_2 first + Filter: (id = $1) + -> Seq Scan on first_3 first + Filter: (id = $1) + -> Seq Scan on first_4 first + Filter: (id = $1) +(19 rows) + +/* Two tables (execution) */ +SELECT * FROM rowmarks.first +WHERE id = (SELECT id FROM rowmarks.second + ORDER BY id + OFFSET 5 LIMIT 1 + FOR UPDATE) +FOR SHARE; + id +---- + 6 +(1 row) + +/* JOIN (plan) */ +EXPLAIN (COSTS OFF) +SELECT * FROM rowmarks.first +JOIN rowmarks.second USING(id) +ORDER BY id +FOR UPDATE; + QUERY PLAN +--------------------------------------------------- + LockRows + -> Sort + Sort Key: first_0.id + -> Hash Join + Hash Cond: (first_0.id = second.id) + -> Append + -> Seq Scan on first_0 + -> Seq Scan on first_1 + -> Seq Scan on first_2 + -> Seq Scan on first_3 + -> Seq Scan on first_4 + -> Hash + -> Seq Scan on second +(13 rows) + +/* JOIN (execution) */ +SELECT * FROM rowmarks.first +JOIN rowmarks.second USING(id) +ORDER BY id +FOR UPDATE; + id +---- + 1 + 2 + 3 + 4 + 5 + 6 + 7 + 8 + 9 + 10 +(10 rows) + +/* ONLY (plan) */ +EXPLAIN (COSTS OFF) +SELECT * FROM ONLY rowmarks.first FOR SHARE; + QUERY PLAN +------------------------- + LockRows + -> Seq Scan on first +(2 rows) + +/* ONLY (execution) */ +SELECT * FROM ONLY rowmarks.first FOR SHARE; + id +---- +(0 rows) + +/* Check updates (plan) */ +SET enable_hashjoin = f; /* Hash Semi Join on 10 vs Hash Join on 9.6 */ +SET enable_mergejoin = f; /* Merge Semi Join on 10 vs Merge Join on 9.6 */ +EXPLAIN (COSTS OFF) +UPDATE rowmarks.second SET id = 2 +WHERE rowmarks.second.id IN (SELECT id FROM rowmarks.first WHERE id = 1); + QUERY PLAN +--------------------------------------- + Update on second + -> Nested Loop Semi Join + -> Seq Scan on second + Filter: (id = 1) + -> Append + -> Seq Scan on first_0 + Filter: (id = 1) +(7 rows) + +EXPLAIN (COSTS OFF) +UPDATE rowmarks.second SET id = 2 +WHERE rowmarks.second.id IN (SELECT id FROM rowmarks.first WHERE id < 1); + QUERY PLAN +----------------------------------------------- + Update on second + -> Nested Loop Semi Join + Join Filter: (second.id = first_0.id) + -> Seq Scan on second + -> Materialize + -> Append + -> Seq Scan on first_0 + Filter: (id < 1) + -> Seq Scan on first_1 + Filter: (id < 1) + -> Seq Scan on first_2 + Filter: (id < 1) + -> Seq Scan on first_3 + Filter: (id < 1) + -> Seq Scan on first_4 + Filter: (id < 1) +(16 rows) + +EXPLAIN (COSTS OFF) +UPDATE rowmarks.second SET id = 2 +WHERE rowmarks.second.id IN (SELECT id FROM rowmarks.first WHERE id = 1 OR id = 2); + QUERY PLAN +----------------------------------------------- + Update on second + -> Nested Loop Semi Join + Join Filter: (second.id = first_0.id) + -> Seq Scan on second + -> Materialize + -> Append + -> Seq Scan on first_0 + Filter: (id = 1) + -> Seq Scan on first_1 + Filter: (id = 2) +(10 rows) + +EXPLAIN (COSTS OFF) +UPDATE rowmarks.second SET id = 2 +WHERE rowmarks.second.id IN (SELECT id FROM rowmarks.first WHERE id = 1) +RETURNING *, tableoid::regclass; + QUERY PLAN +--------------------------------------- + Update on second + -> Nested Loop Semi Join + -> Seq Scan on second + Filter: (id = 1) + -> Append + -> Seq Scan on first_0 + Filter: (id = 1) +(7 rows) + +SET enable_hashjoin = t; +SET enable_mergejoin = t; +/* Check updates (execution) */ +UPDATE rowmarks.second SET id = 1 +WHERE rowmarks.second.id IN (SELECT id FROM rowmarks.first WHERE id = 1 OR id = 2) +RETURNING *, tableoid::regclass; + id | tableoid +----+----------------- + 1 | rowmarks.second + 1 | rowmarks.second +(2 rows) + +/* Check deletes (plan) */ +SET enable_hashjoin = f; /* Hash Semi Join on 10 vs Hash Join on 9.6 */ +SET enable_mergejoin = f; /* Merge Semi Join on 10 vs Merge Join on 9.6 */ +EXPLAIN (COSTS OFF) +DELETE FROM rowmarks.second +WHERE rowmarks.second.id IN (SELECT id FROM rowmarks.first WHERE id = 1); + QUERY PLAN +--------------------------------------- + Delete on second + -> Nested Loop Semi Join + -> Seq Scan on second + Filter: (id = 1) + -> Append + -> Seq Scan on first_0 + Filter: (id = 1) +(7 rows) + +EXPLAIN (COSTS OFF) +DELETE FROM rowmarks.second +WHERE rowmarks.second.id IN (SELECT id FROM rowmarks.first WHERE id < 1); + QUERY PLAN +----------------------------------------------- + Delete on second + -> Nested Loop Semi Join + Join Filter: (second.id = first_0.id) + -> Seq Scan on second + -> Materialize + -> Append + -> Seq Scan on first_0 + Filter: (id < 1) + -> Seq Scan on first_1 + Filter: (id < 1) + -> Seq Scan on first_2 + Filter: (id < 1) + -> Seq Scan on first_3 + Filter: (id < 1) + -> Seq Scan on first_4 + Filter: (id < 1) +(16 rows) + +EXPLAIN (COSTS OFF) +DELETE FROM rowmarks.second +WHERE rowmarks.second.id IN (SELECT id FROM rowmarks.first WHERE id = 1 OR id = 2); + QUERY PLAN +----------------------------------------------- + Delete on second + -> Nested Loop Semi Join + Join Filter: (second.id = first_0.id) + -> Seq Scan on second + -> Materialize + -> Append + -> Seq Scan on first_0 + Filter: (id = 1) + -> Seq Scan on first_1 + Filter: (id = 2) +(10 rows) + +SET enable_hashjoin = t; +SET enable_mergejoin = t; +DROP TABLE rowmarks.first CASCADE; +NOTICE: drop cascades to 5 other objects +DETAIL: drop cascades to table rowmarks.first_0 +drop cascades to table rowmarks.first_1 +drop cascades to table rowmarks.first_2 +drop cascades to table rowmarks.first_3 +drop cascades to table rowmarks.first_4 +DROP TABLE rowmarks.second CASCADE; +DROP SCHEMA rowmarks; +DROP EXTENSION pg_pathman; diff --git a/expected/pathman_rowmarks_1.out b/expected/pathman_rowmarks_1.out new file mode 100644 index 00000000..063fca8d --- /dev/null +++ b/expected/pathman_rowmarks_1.out @@ -0,0 +1,465 @@ +/* + * ------------------------------------------- + * NOTE: This test behaves differenly on PgPro + * ------------------------------------------- + * + * ------------------------ + * pathman_rowmarks_1.sql + * ------------------------ + * Since PostgreSQL 9.5, output of EXPLAIN was changed. + * + * ------------------------ + * pathman_rowmarks_2.sql + * ------------------------ + * Since 8edd0e794 in PostgreSQL 12, append nodes with single subplan are + * eliminated, causing different output. + * + * ------------------------ + * pathman_rowmarks_3.sql + * ------------------------ + * Since 55a1954da16 and 6ef77cf46e8 in PostgreSQL 13 output of EXPLAIN was + * changed, now it includes aliases for inherited tables. + * + * ------------------------ + * pathman_rowmarks_3.sql + * ------------------------ + * Since fd0398fcb09 in PostgreSQL 17, output of EXPLAIN was + * changed, now it displays SubPlan nodes and output parameters. + */ +SET search_path = 'public'; +CREATE EXTENSION pg_pathman; +CREATE SCHEMA rowmarks; +CREATE TABLE rowmarks.first(id int NOT NULL); +CREATE TABLE rowmarks.second(id int NOT NULL); +INSERT INTO rowmarks.first SELECT generate_series(1, 10); +INSERT INTO rowmarks.second SELECT generate_series(1, 10); +SELECT create_hash_partitions('rowmarks.first', 'id', 5); + create_hash_partitions +------------------------ + 5 +(1 row) + +VACUUM ANALYZE; +/* Not partitioned */ +SELECT * FROM rowmarks.second ORDER BY id FOR UPDATE; + id +---- + 1 + 2 + 3 + 4 + 5 + 6 + 7 + 8 + 9 + 10 +(10 rows) + +/* Simple case (plan) */ +EXPLAIN (COSTS OFF) +SELECT * FROM rowmarks.first ORDER BY id FOR UPDATE; + QUERY PLAN +--------------------------------------- + LockRows + -> Sort + Sort Key: first.id + -> Append + -> Seq Scan on first + -> Seq Scan on first_0 + -> Seq Scan on first_1 + -> Seq Scan on first_2 + -> Seq Scan on first_3 + -> Seq Scan on first_4 +(10 rows) + +/* Simple case (execution) */ +SELECT * FROM rowmarks.first ORDER BY id FOR UPDATE; + id +---- + 1 + 2 + 3 + 4 + 5 + 6 + 7 + 8 + 9 + 10 +(10 rows) + +SELECT FROM rowmarks.first ORDER BY id FOR UPDATE; +-- +(10 rows) + +SELECT tableoid > 0 FROM rowmarks.first ORDER BY id FOR UPDATE; + ?column? +---------- + t + t + t + t + t + t + t + t + t + t +(10 rows) + +/* A little harder (plan) */ +EXPLAIN (COSTS OFF) +SELECT * FROM rowmarks.first +WHERE id = (SELECT id FROM rowmarks.first + ORDER BY id + OFFSET 10 LIMIT 1 + FOR UPDATE) +FOR SHARE; + QUERY PLAN +--------------------------------------------------------------- + LockRows + InitPlan 1 (returns $1) + -> Limit + -> LockRows + -> Sort + Sort Key: first_5.id + -> Append + -> Seq Scan on first first_5 + -> Seq Scan on first_0 first_0_1 + -> Seq Scan on first_1 first_1_1 + -> Seq Scan on first_2 first_2_1 + -> Seq Scan on first_3 first_3_1 + -> Seq Scan on first_4 first_4_1 + -> Append + -> Seq Scan on first + Filter: (id = $1) + -> Seq Scan on first_0 + Filter: (id = $1) + -> Seq Scan on first_1 + Filter: (id = $1) + -> Seq Scan on first_2 + Filter: (id = $1) + -> Seq Scan on first_3 + Filter: (id = $1) + -> Seq Scan on first_4 + Filter: (id = $1) +(26 rows) + +/* A little harder (execution) */ +SELECT * FROM rowmarks.first +WHERE id = (SELECT id FROM rowmarks.first + ORDER BY id + OFFSET 5 LIMIT 1 + FOR UPDATE) +FOR SHARE; + id +---- + 6 +(1 row) + +/* Two tables (plan) */ +EXPLAIN (COSTS OFF) +SELECT * FROM rowmarks.first +WHERE id = (SELECT id FROM rowmarks.second + ORDER BY id + OFFSET 5 LIMIT 1 + FOR UPDATE) +FOR SHARE; + QUERY PLAN +---------------------------------------------- + LockRows + InitPlan 1 (returns $1) + -> Limit + -> LockRows + -> Sort + Sort Key: second.id + -> Seq Scan on second + -> Append + -> Seq Scan on first + Filter: (id = $1) + -> Seq Scan on first_0 + Filter: (id = $1) + -> Seq Scan on first_1 + Filter: (id = $1) + -> Seq Scan on first_2 + Filter: (id = $1) + -> Seq Scan on first_3 + Filter: (id = $1) + -> Seq Scan on first_4 + Filter: (id = $1) +(20 rows) + +/* Two tables (execution) */ +SELECT * FROM rowmarks.first +WHERE id = (SELECT id FROM rowmarks.second + ORDER BY id + OFFSET 5 LIMIT 1 + FOR UPDATE) +FOR SHARE; + id +---- + 6 +(1 row) + +/* JOIN (plan) */ +EXPLAIN (COSTS OFF) +SELECT * FROM rowmarks.first +JOIN rowmarks.second USING(id) +ORDER BY id +FOR UPDATE; + QUERY PLAN +------------------------------------------------- + LockRows + -> Sort + Sort Key: first.id + -> Hash Join + Hash Cond: (first.id = second.id) + -> Append + -> Seq Scan on first + -> Seq Scan on first_0 + -> Seq Scan on first_1 + -> Seq Scan on first_2 + -> Seq Scan on first_3 + -> Seq Scan on first_4 + -> Hash + -> Seq Scan on second +(14 rows) + +/* JOIN (execution) */ +SELECT * FROM rowmarks.first +JOIN rowmarks.second USING(id) +ORDER BY id +FOR UPDATE; + id +---- + 1 + 2 + 3 + 4 + 5 + 6 + 7 + 8 + 9 + 10 +(10 rows) + +/* ONLY (plan) */ +EXPLAIN (COSTS OFF) +SELECT * FROM ONLY rowmarks.first FOR SHARE; + QUERY PLAN +------------------------- + LockRows + -> Seq Scan on first +(2 rows) + +/* ONLY (execution) */ +SELECT * FROM ONLY rowmarks.first FOR SHARE; + id +---- +(0 rows) + +/* Check updates (plan) */ +SET enable_hashjoin = f; /* Hash Semi Join on 10 vs Hash Join on 9.6 */ +SET enable_mergejoin = f; /* Merge Semi Join on 10 vs Merge Join on 9.6 */ +EXPLAIN (COSTS OFF) +UPDATE rowmarks.second SET id = 2 +WHERE rowmarks.second.id IN (SELECT id FROM rowmarks.first WHERE id = 1); + QUERY PLAN +--------------------------------------- + Update on second + -> Nested Loop Semi Join + -> Seq Scan on second + Filter: (id = 1) + -> Append + -> Seq Scan on first + Filter: (id = 1) + -> Seq Scan on first_0 + Filter: (id = 1) + -> Seq Scan on first_1 + Filter: (id = 1) + -> Seq Scan on first_2 + Filter: (id = 1) + -> Seq Scan on first_3 + Filter: (id = 1) + -> Seq Scan on first_4 + Filter: (id = 1) +(17 rows) + +EXPLAIN (COSTS OFF) +UPDATE rowmarks.second SET id = 2 +WHERE rowmarks.second.id IN (SELECT id FROM rowmarks.first WHERE id < 1); + QUERY PLAN +--------------------------------------------- + Update on second + -> Nested Loop Semi Join + Join Filter: (second.id = first.id) + -> Seq Scan on second + -> Materialize + -> Append + -> Seq Scan on first + Filter: (id < 1) + -> Seq Scan on first_0 + Filter: (id < 1) + -> Seq Scan on first_1 + Filter: (id < 1) + -> Seq Scan on first_2 + Filter: (id < 1) + -> Seq Scan on first_3 + Filter: (id < 1) + -> Seq Scan on first_4 + Filter: (id < 1) +(18 rows) + +EXPLAIN (COSTS OFF) +UPDATE rowmarks.second SET id = 2 +WHERE rowmarks.second.id IN (SELECT id FROM rowmarks.first WHERE id = 1 OR id = 2); + QUERY PLAN +---------------------------------------------------------- + Update on second + -> Nested Loop Semi Join + Join Filter: (second.id = first.id) + -> Seq Scan on second + -> Materialize + -> Append + -> Seq Scan on first + Filter: ((id = 1) OR (id = 2)) + -> Seq Scan on first_0 + Filter: ((id = 1) OR (id = 2)) + -> Seq Scan on first_1 + Filter: ((id = 1) OR (id = 2)) + -> Seq Scan on first_2 + Filter: ((id = 1) OR (id = 2)) + -> Seq Scan on first_3 + Filter: ((id = 1) OR (id = 2)) + -> Seq Scan on first_4 + Filter: ((id = 1) OR (id = 2)) +(18 rows) + +EXPLAIN (COSTS OFF) +UPDATE rowmarks.second SET id = 2 +WHERE rowmarks.second.id IN (SELECT id FROM rowmarks.first WHERE id = 1) +RETURNING *, tableoid::regclass; + QUERY PLAN +--------------------------------------- + Update on second + -> Nested Loop Semi Join + -> Seq Scan on second + Filter: (id = 1) + -> Append + -> Seq Scan on first + Filter: (id = 1) + -> Seq Scan on first_0 + Filter: (id = 1) + -> Seq Scan on first_1 + Filter: (id = 1) + -> Seq Scan on first_2 + Filter: (id = 1) + -> Seq Scan on first_3 + Filter: (id = 1) + -> Seq Scan on first_4 + Filter: (id = 1) +(17 rows) + +SET enable_hashjoin = t; +SET enable_mergejoin = t; +/* Check updates (execution) */ +UPDATE rowmarks.second SET id = 1 +WHERE rowmarks.second.id IN (SELECT id FROM rowmarks.first WHERE id = 1 OR id = 2) +RETURNING *, tableoid::regclass; + id | tableoid +----+----------------- + 1 | rowmarks.second + 1 | rowmarks.second +(2 rows) + +/* Check deletes (plan) */ +SET enable_hashjoin = f; /* Hash Semi Join on 10 vs Hash Join on 9.6 */ +SET enable_mergejoin = f; /* Merge Semi Join on 10 vs Merge Join on 9.6 */ +EXPLAIN (COSTS OFF) +DELETE FROM rowmarks.second +WHERE rowmarks.second.id IN (SELECT id FROM rowmarks.first WHERE id = 1); + QUERY PLAN +--------------------------------------- + Delete on second + -> Nested Loop Semi Join + -> Seq Scan on second + Filter: (id = 1) + -> Append + -> Seq Scan on first + Filter: (id = 1) + -> Seq Scan on first_0 + Filter: (id = 1) + -> Seq Scan on first_1 + Filter: (id = 1) + -> Seq Scan on first_2 + Filter: (id = 1) + -> Seq Scan on first_3 + Filter: (id = 1) + -> Seq Scan on first_4 + Filter: (id = 1) +(17 rows) + +EXPLAIN (COSTS OFF) +DELETE FROM rowmarks.second +WHERE rowmarks.second.id IN (SELECT id FROM rowmarks.first WHERE id < 1); + QUERY PLAN +--------------------------------------------- + Delete on second + -> Nested Loop Semi Join + Join Filter: (second.id = first.id) + -> Seq Scan on second + -> Materialize + -> Append + -> Seq Scan on first + Filter: (id < 1) + -> Seq Scan on first_0 + Filter: (id < 1) + -> Seq Scan on first_1 + Filter: (id < 1) + -> Seq Scan on first_2 + Filter: (id < 1) + -> Seq Scan on first_3 + Filter: (id < 1) + -> Seq Scan on first_4 + Filter: (id < 1) +(18 rows) + +EXPLAIN (COSTS OFF) +DELETE FROM rowmarks.second +WHERE rowmarks.second.id IN (SELECT id FROM rowmarks.first WHERE id = 1 OR id = 2); + QUERY PLAN +---------------------------------------------------------- + Delete on second + -> Nested Loop Semi Join + Join Filter: (second.id = first.id) + -> Seq Scan on second + -> Materialize + -> Append + -> Seq Scan on first + Filter: ((id = 1) OR (id = 2)) + -> Seq Scan on first_0 + Filter: ((id = 1) OR (id = 2)) + -> Seq Scan on first_1 + Filter: ((id = 1) OR (id = 2)) + -> Seq Scan on first_2 + Filter: ((id = 1) OR (id = 2)) + -> Seq Scan on first_3 + Filter: ((id = 1) OR (id = 2)) + -> Seq Scan on first_4 + Filter: ((id = 1) OR (id = 2)) +(18 rows) + +SET enable_hashjoin = t; +SET enable_mergejoin = t; +DROP TABLE rowmarks.first CASCADE; +NOTICE: drop cascades to 5 other objects +DETAIL: drop cascades to table rowmarks.first_0 +drop cascades to table rowmarks.first_1 +drop cascades to table rowmarks.first_2 +drop cascades to table rowmarks.first_3 +drop cascades to table rowmarks.first_4 +DROP TABLE rowmarks.second CASCADE; +DROP SCHEMA rowmarks; +DROP EXTENSION pg_pathman; diff --git a/expected/pathman_rowmarks_2.out b/expected/pathman_rowmarks_2.out new file mode 100644 index 00000000..91d7804e --- /dev/null +++ b/expected/pathman_rowmarks_2.out @@ -0,0 +1,407 @@ +/* + * ------------------------------------------- + * NOTE: This test behaves differenly on PgPro + * ------------------------------------------- + * + * ------------------------ + * pathman_rowmarks_1.sql + * ------------------------ + * Since PostgreSQL 9.5, output of EXPLAIN was changed. + * + * ------------------------ + * pathman_rowmarks_2.sql + * ------------------------ + * Since 8edd0e794 in PostgreSQL 12, append nodes with single subplan are + * eliminated, causing different output. + * + * ------------------------ + * pathman_rowmarks_3.sql + * ------------------------ + * Since 55a1954da16 and 6ef77cf46e8 in PostgreSQL 13 output of EXPLAIN was + * changed, now it includes aliases for inherited tables. + * + * ------------------------ + * pathman_rowmarks_3.sql + * ------------------------ + * Since fd0398fcb09 in PostgreSQL 17, output of EXPLAIN was + * changed, now it displays SubPlan nodes and output parameters. + */ +SET search_path = 'public'; +CREATE EXTENSION pg_pathman; +CREATE SCHEMA rowmarks; +CREATE TABLE rowmarks.first(id int NOT NULL); +CREATE TABLE rowmarks.second(id int NOT NULL); +INSERT INTO rowmarks.first SELECT generate_series(1, 10); +INSERT INTO rowmarks.second SELECT generate_series(1, 10); +SELECT create_hash_partitions('rowmarks.first', 'id', 5); + create_hash_partitions +------------------------ + 5 +(1 row) + +VACUUM ANALYZE; +/* Not partitioned */ +SELECT * FROM rowmarks.second ORDER BY id FOR UPDATE; + id +---- + 1 + 2 + 3 + 4 + 5 + 6 + 7 + 8 + 9 + 10 +(10 rows) + +/* Simple case (plan) */ +EXPLAIN (COSTS OFF) +SELECT * FROM rowmarks.first ORDER BY id FOR UPDATE; + QUERY PLAN +--------------------------------------- + LockRows + -> Sort + Sort Key: first_0.id + -> Append + -> Seq Scan on first_0 + -> Seq Scan on first_1 + -> Seq Scan on first_2 + -> Seq Scan on first_3 + -> Seq Scan on first_4 +(9 rows) + +/* Simple case (execution) */ +SELECT * FROM rowmarks.first ORDER BY id FOR UPDATE; + id +---- + 1 + 2 + 3 + 4 + 5 + 6 + 7 + 8 + 9 + 10 +(10 rows) + +SELECT FROM rowmarks.first ORDER BY id FOR UPDATE; +-- +(10 rows) + +SELECT tableoid > 0 FROM rowmarks.first ORDER BY id FOR UPDATE; + ?column? +---------- + t + t + t + t + t + t + t + t + t + t +(10 rows) + +/* A little harder (plan) */ +EXPLAIN (COSTS OFF) +SELECT * FROM rowmarks.first +WHERE id = (SELECT id FROM rowmarks.first + ORDER BY id + OFFSET 10 LIMIT 1 + FOR UPDATE) +FOR SHARE; + QUERY PLAN +----------------------------------------------------- + LockRows + InitPlan 1 (returns $1) + -> Limit + -> LockRows + -> Sort + Sort Key: first_0.id + -> Append + -> Seq Scan on first_0 + -> Seq Scan on first_1 + -> Seq Scan on first_2 + -> Seq Scan on first_3 + -> Seq Scan on first_4 + -> Custom Scan (RuntimeAppend) + Prune by: (first.id = $1) + -> Seq Scan on first_0 first + Filter: (id = $1) + -> Seq Scan on first_1 first + Filter: (id = $1) + -> Seq Scan on first_2 first + Filter: (id = $1) + -> Seq Scan on first_3 first + Filter: (id = $1) + -> Seq Scan on first_4 first + Filter: (id = $1) +(24 rows) + +/* A little harder (execution) */ +SELECT * FROM rowmarks.first +WHERE id = (SELECT id FROM rowmarks.first + ORDER BY id + OFFSET 5 LIMIT 1 + FOR UPDATE) +FOR SHARE; + id +---- + 6 +(1 row) + +/* Two tables (plan) */ +EXPLAIN (COSTS OFF) +SELECT * FROM rowmarks.first +WHERE id = (SELECT id FROM rowmarks.second + ORDER BY id + OFFSET 5 LIMIT 1 + FOR UPDATE) +FOR SHARE; + QUERY PLAN +---------------------------------------------- + LockRows + InitPlan 1 (returns $1) + -> Limit + -> LockRows + -> Sort + Sort Key: second.id + -> Seq Scan on second + -> Custom Scan (RuntimeAppend) + Prune by: (first.id = $1) + -> Seq Scan on first_0 first + Filter: (id = $1) + -> Seq Scan on first_1 first + Filter: (id = $1) + -> Seq Scan on first_2 first + Filter: (id = $1) + -> Seq Scan on first_3 first + Filter: (id = $1) + -> Seq Scan on first_4 first + Filter: (id = $1) +(19 rows) + +/* Two tables (execution) */ +SELECT * FROM rowmarks.first +WHERE id = (SELECT id FROM rowmarks.second + ORDER BY id + OFFSET 5 LIMIT 1 + FOR UPDATE) +FOR SHARE; + id +---- + 6 +(1 row) + +/* JOIN (plan) */ +EXPLAIN (COSTS OFF) +SELECT * FROM rowmarks.first +JOIN rowmarks.second USING(id) +ORDER BY id +FOR UPDATE; + QUERY PLAN +--------------------------------------------------- + LockRows + -> Sort + Sort Key: first_0.id + -> Hash Join + Hash Cond: (first_0.id = second.id) + -> Append + -> Seq Scan on first_0 + -> Seq Scan on first_1 + -> Seq Scan on first_2 + -> Seq Scan on first_3 + -> Seq Scan on first_4 + -> Hash + -> Seq Scan on second +(13 rows) + +/* JOIN (execution) */ +SELECT * FROM rowmarks.first +JOIN rowmarks.second USING(id) +ORDER BY id +FOR UPDATE; + id +---- + 1 + 2 + 3 + 4 + 5 + 6 + 7 + 8 + 9 + 10 +(10 rows) + +/* ONLY (plan) */ +EXPLAIN (COSTS OFF) +SELECT * FROM ONLY rowmarks.first FOR SHARE; + QUERY PLAN +------------------------- + LockRows + -> Seq Scan on first +(2 rows) + +/* ONLY (execution) */ +SELECT * FROM ONLY rowmarks.first FOR SHARE; + id +---- +(0 rows) + +/* Check updates (plan) */ +SET enable_hashjoin = f; /* Hash Semi Join on 10 vs Hash Join on 9.6 */ +SET enable_mergejoin = f; /* Merge Semi Join on 10 vs Merge Join on 9.6 */ +EXPLAIN (COSTS OFF) +UPDATE rowmarks.second SET id = 2 +WHERE rowmarks.second.id IN (SELECT id FROM rowmarks.first WHERE id = 1); + QUERY PLAN +--------------------------------- + Update on second + -> Nested Loop Semi Join + -> Seq Scan on second + Filter: (id = 1) + -> Seq Scan on first_0 + Filter: (id = 1) +(6 rows) + +EXPLAIN (COSTS OFF) +UPDATE rowmarks.second SET id = 2 +WHERE rowmarks.second.id IN (SELECT id FROM rowmarks.first WHERE id < 1); + QUERY PLAN +----------------------------------------------- + Update on second + -> Nested Loop Semi Join + Join Filter: (second.id = first_0.id) + -> Seq Scan on second + -> Materialize + -> Append + -> Seq Scan on first_0 + Filter: (id < 1) + -> Seq Scan on first_1 + Filter: (id < 1) + -> Seq Scan on first_2 + Filter: (id < 1) + -> Seq Scan on first_3 + Filter: (id < 1) + -> Seq Scan on first_4 + Filter: (id < 1) +(16 rows) + +EXPLAIN (COSTS OFF) +UPDATE rowmarks.second SET id = 2 +WHERE rowmarks.second.id IN (SELECT id FROM rowmarks.first WHERE id = 1 OR id = 2); + QUERY PLAN +----------------------------------------------- + Update on second + -> Nested Loop Semi Join + Join Filter: (second.id = first_0.id) + -> Seq Scan on second + -> Materialize + -> Append + -> Seq Scan on first_0 + Filter: (id = 1) + -> Seq Scan on first_1 + Filter: (id = 2) +(10 rows) + +EXPLAIN (COSTS OFF) +UPDATE rowmarks.second SET id = 2 +WHERE rowmarks.second.id IN (SELECT id FROM rowmarks.first WHERE id = 1) +RETURNING *, tableoid::regclass; + QUERY PLAN +--------------------------------- + Update on second + -> Nested Loop Semi Join + -> Seq Scan on second + Filter: (id = 1) + -> Seq Scan on first_0 + Filter: (id = 1) +(6 rows) + +SET enable_hashjoin = t; +SET enable_mergejoin = t; +/* Check updates (execution) */ +UPDATE rowmarks.second SET id = 1 +WHERE rowmarks.second.id IN (SELECT id FROM rowmarks.first WHERE id = 1 OR id = 2) +RETURNING *, tableoid::regclass; + id | tableoid +----+----------------- + 1 | rowmarks.second + 1 | rowmarks.second +(2 rows) + +/* Check deletes (plan) */ +SET enable_hashjoin = f; /* Hash Semi Join on 10 vs Hash Join on 9.6 */ +SET enable_mergejoin = f; /* Merge Semi Join on 10 vs Merge Join on 9.6 */ +EXPLAIN (COSTS OFF) +DELETE FROM rowmarks.second +WHERE rowmarks.second.id IN (SELECT id FROM rowmarks.first WHERE id = 1); + QUERY PLAN +--------------------------------- + Delete on second + -> Nested Loop Semi Join + -> Seq Scan on second + Filter: (id = 1) + -> Seq Scan on first_0 + Filter: (id = 1) +(6 rows) + +EXPLAIN (COSTS OFF) +DELETE FROM rowmarks.second +WHERE rowmarks.second.id IN (SELECT id FROM rowmarks.first WHERE id < 1); + QUERY PLAN +----------------------------------------------- + Delete on second + -> Nested Loop Semi Join + Join Filter: (second.id = first_0.id) + -> Seq Scan on second + -> Materialize + -> Append + -> Seq Scan on first_0 + Filter: (id < 1) + -> Seq Scan on first_1 + Filter: (id < 1) + -> Seq Scan on first_2 + Filter: (id < 1) + -> Seq Scan on first_3 + Filter: (id < 1) + -> Seq Scan on first_4 + Filter: (id < 1) +(16 rows) + +EXPLAIN (COSTS OFF) +DELETE FROM rowmarks.second +WHERE rowmarks.second.id IN (SELECT id FROM rowmarks.first WHERE id = 1 OR id = 2); + QUERY PLAN +----------------------------------------------- + Delete on second + -> Nested Loop Semi Join + Join Filter: (second.id = first_0.id) + -> Seq Scan on second + -> Materialize + -> Append + -> Seq Scan on first_0 + Filter: (id = 1) + -> Seq Scan on first_1 + Filter: (id = 2) +(10 rows) + +SET enable_hashjoin = t; +SET enable_mergejoin = t; +DROP TABLE rowmarks.first CASCADE; +NOTICE: drop cascades to 5 other objects +DETAIL: drop cascades to table rowmarks.first_0 +drop cascades to table rowmarks.first_1 +drop cascades to table rowmarks.first_2 +drop cascades to table rowmarks.first_3 +drop cascades to table rowmarks.first_4 +DROP TABLE rowmarks.second CASCADE; +DROP SCHEMA rowmarks; +DROP EXTENSION pg_pathman; diff --git a/expected/pathman_rowmarks_3.out b/expected/pathman_rowmarks_3.out new file mode 100644 index 00000000..e8644292 --- /dev/null +++ b/expected/pathman_rowmarks_3.out @@ -0,0 +1,407 @@ +/* + * ------------------------------------------- + * NOTE: This test behaves differenly on PgPro + * ------------------------------------------- + * + * ------------------------ + * pathman_rowmarks_1.sql + * ------------------------ + * Since PostgreSQL 9.5, output of EXPLAIN was changed. + * + * ------------------------ + * pathman_rowmarks_2.sql + * ------------------------ + * Since 8edd0e794 in PostgreSQL 12, append nodes with single subplan are + * eliminated, causing different output. + * + * ------------------------ + * pathman_rowmarks_3.sql + * ------------------------ + * Since 55a1954da16 and 6ef77cf46e8 in PostgreSQL 13 output of EXPLAIN was + * changed, now it includes aliases for inherited tables. + * + * ------------------------ + * pathman_rowmarks_3.sql + * ------------------------ + * Since fd0398fcb09 in PostgreSQL 17, output of EXPLAIN was + * changed, now it displays SubPlan nodes and output parameters. + */ +SET search_path = 'public'; +CREATE EXTENSION pg_pathman; +CREATE SCHEMA rowmarks; +CREATE TABLE rowmarks.first(id int NOT NULL); +CREATE TABLE rowmarks.second(id int NOT NULL); +INSERT INTO rowmarks.first SELECT generate_series(1, 10); +INSERT INTO rowmarks.second SELECT generate_series(1, 10); +SELECT create_hash_partitions('rowmarks.first', 'id', 5); + create_hash_partitions +------------------------ + 5 +(1 row) + +VACUUM ANALYZE; +/* Not partitioned */ +SELECT * FROM rowmarks.second ORDER BY id FOR UPDATE; + id +---- + 1 + 2 + 3 + 4 + 5 + 6 + 7 + 8 + 9 + 10 +(10 rows) + +/* Simple case (plan) */ +EXPLAIN (COSTS OFF) +SELECT * FROM rowmarks.first ORDER BY id FOR UPDATE; + QUERY PLAN +----------------------------------------------- + LockRows + -> Sort + Sort Key: first.id + -> Append + -> Seq Scan on first_0 first_1 + -> Seq Scan on first_1 first_2 + -> Seq Scan on first_2 first_3 + -> Seq Scan on first_3 first_4 + -> Seq Scan on first_4 first_5 +(9 rows) + +/* Simple case (execution) */ +SELECT * FROM rowmarks.first ORDER BY id FOR UPDATE; + id +---- + 1 + 2 + 3 + 4 + 5 + 6 + 7 + 8 + 9 + 10 +(10 rows) + +SELECT FROM rowmarks.first ORDER BY id FOR UPDATE; +-- +(10 rows) + +SELECT tableoid > 0 FROM rowmarks.first ORDER BY id FOR UPDATE; + ?column? +---------- + t + t + t + t + t + t + t + t + t + t +(10 rows) + +/* A little harder (plan) */ +EXPLAIN (COSTS OFF) +SELECT * FROM rowmarks.first +WHERE id = (SELECT id FROM rowmarks.first + ORDER BY id + OFFSET 10 LIMIT 1 + FOR UPDATE) +FOR SHARE; + QUERY PLAN +------------------------------------------------------------- + LockRows + InitPlan 1 (returns $1) + -> Limit + -> LockRows + -> Sort + Sort Key: first_1.id + -> Append + -> Seq Scan on first_0 first_2 + -> Seq Scan on first_1 first_3 + -> Seq Scan on first_2 first_4 + -> Seq Scan on first_3 first_5 + -> Seq Scan on first_4 first_6 + -> Custom Scan (RuntimeAppend) + Prune by: (first.id = $1) + -> Seq Scan on first_0 first + Filter: (id = $1) + -> Seq Scan on first_1 first + Filter: (id = $1) + -> Seq Scan on first_2 first + Filter: (id = $1) + -> Seq Scan on first_3 first + Filter: (id = $1) + -> Seq Scan on first_4 first + Filter: (id = $1) +(24 rows) + +/* A little harder (execution) */ +SELECT * FROM rowmarks.first +WHERE id = (SELECT id FROM rowmarks.first + ORDER BY id + OFFSET 5 LIMIT 1 + FOR UPDATE) +FOR SHARE; + id +---- + 6 +(1 row) + +/* Two tables (plan) */ +EXPLAIN (COSTS OFF) +SELECT * FROM rowmarks.first +WHERE id = (SELECT id FROM rowmarks.second + ORDER BY id + OFFSET 5 LIMIT 1 + FOR UPDATE) +FOR SHARE; + QUERY PLAN +---------------------------------------------- + LockRows + InitPlan 1 (returns $1) + -> Limit + -> LockRows + -> Sort + Sort Key: second.id + -> Seq Scan on second + -> Custom Scan (RuntimeAppend) + Prune by: (first.id = $1) + -> Seq Scan on first_0 first + Filter: (id = $1) + -> Seq Scan on first_1 first + Filter: (id = $1) + -> Seq Scan on first_2 first + Filter: (id = $1) + -> Seq Scan on first_3 first + Filter: (id = $1) + -> Seq Scan on first_4 first + Filter: (id = $1) +(19 rows) + +/* Two tables (execution) */ +SELECT * FROM rowmarks.first +WHERE id = (SELECT id FROM rowmarks.second + ORDER BY id + OFFSET 5 LIMIT 1 + FOR UPDATE) +FOR SHARE; + id +---- + 6 +(1 row) + +/* JOIN (plan) */ +EXPLAIN (COSTS OFF) +SELECT * FROM rowmarks.first +JOIN rowmarks.second USING(id) +ORDER BY id +FOR UPDATE; + QUERY PLAN +----------------------------------------------------- + LockRows + -> Sort + Sort Key: first.id + -> Hash Join + Hash Cond: (first.id = second.id) + -> Append + -> Seq Scan on first_0 first_1 + -> Seq Scan on first_1 first_2 + -> Seq Scan on first_2 first_3 + -> Seq Scan on first_3 first_4 + -> Seq Scan on first_4 first_5 + -> Hash + -> Seq Scan on second +(13 rows) + +/* JOIN (execution) */ +SELECT * FROM rowmarks.first +JOIN rowmarks.second USING(id) +ORDER BY id +FOR UPDATE; + id +---- + 1 + 2 + 3 + 4 + 5 + 6 + 7 + 8 + 9 + 10 +(10 rows) + +/* ONLY (plan) */ +EXPLAIN (COSTS OFF) +SELECT * FROM ONLY rowmarks.first FOR SHARE; + QUERY PLAN +------------------------- + LockRows + -> Seq Scan on first +(2 rows) + +/* ONLY (execution) */ +SELECT * FROM ONLY rowmarks.first FOR SHARE; + id +---- +(0 rows) + +/* Check updates (plan) */ +SET enable_hashjoin = f; /* Hash Semi Join on 10 vs Hash Join on 9.6 */ +SET enable_mergejoin = f; /* Merge Semi Join on 10 vs Merge Join on 9.6 */ +EXPLAIN (COSTS OFF) +UPDATE rowmarks.second SET id = 2 +WHERE rowmarks.second.id IN (SELECT id FROM rowmarks.first WHERE id = 1); + QUERY PLAN +--------------------------------------- + Update on second + -> Nested Loop Semi Join + -> Seq Scan on second + Filter: (id = 1) + -> Seq Scan on first_0 first + Filter: (id = 1) +(6 rows) + +EXPLAIN (COSTS OFF) +UPDATE rowmarks.second SET id = 2 +WHERE rowmarks.second.id IN (SELECT id FROM rowmarks.first WHERE id < 1); + QUERY PLAN +----------------------------------------------------- + Update on second + -> Nested Loop Semi Join + Join Filter: (second.id = first.id) + -> Seq Scan on second + -> Materialize + -> Append + -> Seq Scan on first_0 first_1 + Filter: (id < 1) + -> Seq Scan on first_1 first_2 + Filter: (id < 1) + -> Seq Scan on first_2 first_3 + Filter: (id < 1) + -> Seq Scan on first_3 first_4 + Filter: (id < 1) + -> Seq Scan on first_4 first_5 + Filter: (id < 1) +(16 rows) + +EXPLAIN (COSTS OFF) +UPDATE rowmarks.second SET id = 2 +WHERE rowmarks.second.id IN (SELECT id FROM rowmarks.first WHERE id = 1 OR id = 2); + QUERY PLAN +----------------------------------------------------- + Update on second + -> Nested Loop Semi Join + Join Filter: (second.id = first.id) + -> Seq Scan on second + -> Materialize + -> Append + -> Seq Scan on first_0 first_1 + Filter: (id = 1) + -> Seq Scan on first_1 first_2 + Filter: (id = 2) +(10 rows) + +EXPLAIN (COSTS OFF) +UPDATE rowmarks.second SET id = 2 +WHERE rowmarks.second.id IN (SELECT id FROM rowmarks.first WHERE id = 1) +RETURNING *, tableoid::regclass; + QUERY PLAN +--------------------------------------- + Update on second + -> Nested Loop Semi Join + -> Seq Scan on second + Filter: (id = 1) + -> Seq Scan on first_0 first + Filter: (id = 1) +(6 rows) + +SET enable_hashjoin = t; +SET enable_mergejoin = t; +/* Check updates (execution) */ +UPDATE rowmarks.second SET id = 1 +WHERE rowmarks.second.id IN (SELECT id FROM rowmarks.first WHERE id = 1 OR id = 2) +RETURNING *, tableoid::regclass; + id | tableoid +----+----------------- + 1 | rowmarks.second + 1 | rowmarks.second +(2 rows) + +/* Check deletes (plan) */ +SET enable_hashjoin = f; /* Hash Semi Join on 10 vs Hash Join on 9.6 */ +SET enable_mergejoin = f; /* Merge Semi Join on 10 vs Merge Join on 9.6 */ +EXPLAIN (COSTS OFF) +DELETE FROM rowmarks.second +WHERE rowmarks.second.id IN (SELECT id FROM rowmarks.first WHERE id = 1); + QUERY PLAN +--------------------------------------- + Delete on second + -> Nested Loop Semi Join + -> Seq Scan on second + Filter: (id = 1) + -> Seq Scan on first_0 first + Filter: (id = 1) +(6 rows) + +EXPLAIN (COSTS OFF) +DELETE FROM rowmarks.second +WHERE rowmarks.second.id IN (SELECT id FROM rowmarks.first WHERE id < 1); + QUERY PLAN +----------------------------------------------------- + Delete on second + -> Nested Loop Semi Join + Join Filter: (second.id = first.id) + -> Seq Scan on second + -> Materialize + -> Append + -> Seq Scan on first_0 first_1 + Filter: (id < 1) + -> Seq Scan on first_1 first_2 + Filter: (id < 1) + -> Seq Scan on first_2 first_3 + Filter: (id < 1) + -> Seq Scan on first_3 first_4 + Filter: (id < 1) + -> Seq Scan on first_4 first_5 + Filter: (id < 1) +(16 rows) + +EXPLAIN (COSTS OFF) +DELETE FROM rowmarks.second +WHERE rowmarks.second.id IN (SELECT id FROM rowmarks.first WHERE id = 1 OR id = 2); + QUERY PLAN +----------------------------------------------------- + Delete on second + -> Nested Loop Semi Join + Join Filter: (second.id = first.id) + -> Seq Scan on second + -> Materialize + -> Append + -> Seq Scan on first_0 first_1 + Filter: (id = 1) + -> Seq Scan on first_1 first_2 + Filter: (id = 2) +(10 rows) + +SET enable_hashjoin = t; +SET enable_mergejoin = t; +DROP TABLE rowmarks.first CASCADE; +NOTICE: drop cascades to 5 other objects +DETAIL: drop cascades to table rowmarks.first_0 +drop cascades to table rowmarks.first_1 +drop cascades to table rowmarks.first_2 +drop cascades to table rowmarks.first_3 +drop cascades to table rowmarks.first_4 +DROP TABLE rowmarks.second CASCADE; +DROP SCHEMA rowmarks; +DROP EXTENSION pg_pathman; diff --git a/expected/pathman_rowmarks_4.out b/expected/pathman_rowmarks_4.out new file mode 100644 index 00000000..5fbec84d --- /dev/null +++ b/expected/pathman_rowmarks_4.out @@ -0,0 +1,407 @@ +/* + * ------------------------------------------- + * NOTE: This test behaves differenly on PgPro + * ------------------------------------------- + * + * ------------------------ + * pathman_rowmarks_1.sql + * ------------------------ + * Since PostgreSQL 9.5, output of EXPLAIN was changed. + * + * ------------------------ + * pathman_rowmarks_2.sql + * ------------------------ + * Since 8edd0e794 in PostgreSQL 12, append nodes with single subplan are + * eliminated, causing different output. + * + * ------------------------ + * pathman_rowmarks_3.sql + * ------------------------ + * Since 55a1954da16 and 6ef77cf46e8 in PostgreSQL 13 output of EXPLAIN was + * changed, now it includes aliases for inherited tables. + * + * ------------------------ + * pathman_rowmarks_3.sql + * ------------------------ + * Since fd0398fcb09 in PostgreSQL 17, output of EXPLAIN was + * changed, now it displays SubPlan nodes and output parameters. + */ +SET search_path = 'public'; +CREATE EXTENSION pg_pathman; +CREATE SCHEMA rowmarks; +CREATE TABLE rowmarks.first(id int NOT NULL); +CREATE TABLE rowmarks.second(id int NOT NULL); +INSERT INTO rowmarks.first SELECT generate_series(1, 10); +INSERT INTO rowmarks.second SELECT generate_series(1, 10); +SELECT create_hash_partitions('rowmarks.first', 'id', 5); + create_hash_partitions +------------------------ + 5 +(1 row) + +VACUUM ANALYZE; +/* Not partitioned */ +SELECT * FROM rowmarks.second ORDER BY id FOR UPDATE; + id +---- + 1 + 2 + 3 + 4 + 5 + 6 + 7 + 8 + 9 + 10 +(10 rows) + +/* Simple case (plan) */ +EXPLAIN (COSTS OFF) +SELECT * FROM rowmarks.first ORDER BY id FOR UPDATE; + QUERY PLAN +----------------------------------------------- + LockRows + -> Sort + Sort Key: first.id + -> Append + -> Seq Scan on first_0 first_1 + -> Seq Scan on first_1 first_2 + -> Seq Scan on first_2 first_3 + -> Seq Scan on first_3 first_4 + -> Seq Scan on first_4 first_5 +(9 rows) + +/* Simple case (execution) */ +SELECT * FROM rowmarks.first ORDER BY id FOR UPDATE; + id +---- + 1 + 2 + 3 + 4 + 5 + 6 + 7 + 8 + 9 + 10 +(10 rows) + +SELECT FROM rowmarks.first ORDER BY id FOR UPDATE; +-- +(10 rows) + +SELECT tableoid > 0 FROM rowmarks.first ORDER BY id FOR UPDATE; + ?column? +---------- + t + t + t + t + t + t + t + t + t + t +(10 rows) + +/* A little harder (plan) */ +EXPLAIN (COSTS OFF) +SELECT * FROM rowmarks.first +WHERE id = (SELECT id FROM rowmarks.first + ORDER BY id + OFFSET 10 LIMIT 1 + FOR UPDATE) +FOR SHARE; + QUERY PLAN +------------------------------------------------------------- + LockRows + InitPlan 1 + -> Limit + -> LockRows + -> Sort + Sort Key: first_1.id + -> Append + -> Seq Scan on first_0 first_2 + -> Seq Scan on first_1 first_3 + -> Seq Scan on first_2 first_4 + -> Seq Scan on first_3 first_5 + -> Seq Scan on first_4 first_6 + -> Custom Scan (RuntimeAppend) + Prune by: (first.id = (InitPlan 1).col1) + -> Seq Scan on first_0 first + Filter: (id = (InitPlan 1).col1) + -> Seq Scan on first_1 first + Filter: (id = (InitPlan 1).col1) + -> Seq Scan on first_2 first + Filter: (id = (InitPlan 1).col1) + -> Seq Scan on first_3 first + Filter: (id = (InitPlan 1).col1) + -> Seq Scan on first_4 first + Filter: (id = (InitPlan 1).col1) +(24 rows) + +/* A little harder (execution) */ +SELECT * FROM rowmarks.first +WHERE id = (SELECT id FROM rowmarks.first + ORDER BY id + OFFSET 5 LIMIT 1 + FOR UPDATE) +FOR SHARE; + id +---- + 6 +(1 row) + +/* Two tables (plan) */ +EXPLAIN (COSTS OFF) +SELECT * FROM rowmarks.first +WHERE id = (SELECT id FROM rowmarks.second + ORDER BY id + OFFSET 5 LIMIT 1 + FOR UPDATE) +FOR SHARE; + QUERY PLAN +-------------------------------------------------- + LockRows + InitPlan 1 + -> Limit + -> LockRows + -> Sort + Sort Key: second.id + -> Seq Scan on second + -> Custom Scan (RuntimeAppend) + Prune by: (first.id = (InitPlan 1).col1) + -> Seq Scan on first_0 first + Filter: (id = (InitPlan 1).col1) + -> Seq Scan on first_1 first + Filter: (id = (InitPlan 1).col1) + -> Seq Scan on first_2 first + Filter: (id = (InitPlan 1).col1) + -> Seq Scan on first_3 first + Filter: (id = (InitPlan 1).col1) + -> Seq Scan on first_4 first + Filter: (id = (InitPlan 1).col1) +(19 rows) + +/* Two tables (execution) */ +SELECT * FROM rowmarks.first +WHERE id = (SELECT id FROM rowmarks.second + ORDER BY id + OFFSET 5 LIMIT 1 + FOR UPDATE) +FOR SHARE; + id +---- + 6 +(1 row) + +/* JOIN (plan) */ +EXPLAIN (COSTS OFF) +SELECT * FROM rowmarks.first +JOIN rowmarks.second USING(id) +ORDER BY id +FOR UPDATE; + QUERY PLAN +----------------------------------------------------- + LockRows + -> Sort + Sort Key: first.id + -> Hash Join + Hash Cond: (first.id = second.id) + -> Append + -> Seq Scan on first_0 first_1 + -> Seq Scan on first_1 first_2 + -> Seq Scan on first_2 first_3 + -> Seq Scan on first_3 first_4 + -> Seq Scan on first_4 first_5 + -> Hash + -> Seq Scan on second +(13 rows) + +/* JOIN (execution) */ +SELECT * FROM rowmarks.first +JOIN rowmarks.second USING(id) +ORDER BY id +FOR UPDATE; + id +---- + 1 + 2 + 3 + 4 + 5 + 6 + 7 + 8 + 9 + 10 +(10 rows) + +/* ONLY (plan) */ +EXPLAIN (COSTS OFF) +SELECT * FROM ONLY rowmarks.first FOR SHARE; + QUERY PLAN +------------------------- + LockRows + -> Seq Scan on first +(2 rows) + +/* ONLY (execution) */ +SELECT * FROM ONLY rowmarks.first FOR SHARE; + id +---- +(0 rows) + +/* Check updates (plan) */ +SET enable_hashjoin = f; /* Hash Semi Join on 10 vs Hash Join on 9.6 */ +SET enable_mergejoin = f; /* Merge Semi Join on 10 vs Merge Join on 9.6 */ +EXPLAIN (COSTS OFF) +UPDATE rowmarks.second SET id = 2 +WHERE rowmarks.second.id IN (SELECT id FROM rowmarks.first WHERE id = 1); + QUERY PLAN +--------------------------------------- + Update on second + -> Nested Loop Semi Join + -> Seq Scan on second + Filter: (id = 1) + -> Seq Scan on first_0 first + Filter: (id = 1) +(6 rows) + +EXPLAIN (COSTS OFF) +UPDATE rowmarks.second SET id = 2 +WHERE rowmarks.second.id IN (SELECT id FROM rowmarks.first WHERE id < 1); + QUERY PLAN +----------------------------------------------------- + Update on second + -> Nested Loop Semi Join + Join Filter: (second.id = first.id) + -> Seq Scan on second + -> Materialize + -> Append + -> Seq Scan on first_0 first_1 + Filter: (id < 1) + -> Seq Scan on first_1 first_2 + Filter: (id < 1) + -> Seq Scan on first_2 first_3 + Filter: (id < 1) + -> Seq Scan on first_3 first_4 + Filter: (id < 1) + -> Seq Scan on first_4 first_5 + Filter: (id < 1) +(16 rows) + +EXPLAIN (COSTS OFF) +UPDATE rowmarks.second SET id = 2 +WHERE rowmarks.second.id IN (SELECT id FROM rowmarks.first WHERE id = 1 OR id = 2); + QUERY PLAN +----------------------------------------------------- + Update on second + -> Nested Loop Semi Join + Join Filter: (second.id = first.id) + -> Seq Scan on second + -> Materialize + -> Append + -> Seq Scan on first_0 first_1 + Filter: (id = 1) + -> Seq Scan on first_1 first_2 + Filter: (id = 2) +(10 rows) + +EXPLAIN (COSTS OFF) +UPDATE rowmarks.second SET id = 2 +WHERE rowmarks.second.id IN (SELECT id FROM rowmarks.first WHERE id = 1) +RETURNING *, tableoid::regclass; + QUERY PLAN +--------------------------------------- + Update on second + -> Nested Loop Semi Join + -> Seq Scan on second + Filter: (id = 1) + -> Seq Scan on first_0 first + Filter: (id = 1) +(6 rows) + +SET enable_hashjoin = t; +SET enable_mergejoin = t; +/* Check updates (execution) */ +UPDATE rowmarks.second SET id = 1 +WHERE rowmarks.second.id IN (SELECT id FROM rowmarks.first WHERE id = 1 OR id = 2) +RETURNING *, tableoid::regclass; + id | tableoid +----+----------------- + 1 | rowmarks.second + 1 | rowmarks.second +(2 rows) + +/* Check deletes (plan) */ +SET enable_hashjoin = f; /* Hash Semi Join on 10 vs Hash Join on 9.6 */ +SET enable_mergejoin = f; /* Merge Semi Join on 10 vs Merge Join on 9.6 */ +EXPLAIN (COSTS OFF) +DELETE FROM rowmarks.second +WHERE rowmarks.second.id IN (SELECT id FROM rowmarks.first WHERE id = 1); + QUERY PLAN +--------------------------------------- + Delete on second + -> Nested Loop Semi Join + -> Seq Scan on second + Filter: (id = 1) + -> Seq Scan on first_0 first + Filter: (id = 1) +(6 rows) + +EXPLAIN (COSTS OFF) +DELETE FROM rowmarks.second +WHERE rowmarks.second.id IN (SELECT id FROM rowmarks.first WHERE id < 1); + QUERY PLAN +----------------------------------------------------- + Delete on second + -> Nested Loop Semi Join + Join Filter: (second.id = first.id) + -> Seq Scan on second + -> Materialize + -> Append + -> Seq Scan on first_0 first_1 + Filter: (id < 1) + -> Seq Scan on first_1 first_2 + Filter: (id < 1) + -> Seq Scan on first_2 first_3 + Filter: (id < 1) + -> Seq Scan on first_3 first_4 + Filter: (id < 1) + -> Seq Scan on first_4 first_5 + Filter: (id < 1) +(16 rows) + +EXPLAIN (COSTS OFF) +DELETE FROM rowmarks.second +WHERE rowmarks.second.id IN (SELECT id FROM rowmarks.first WHERE id = 1 OR id = 2); + QUERY PLAN +----------------------------------------------------- + Delete on second + -> Nested Loop Semi Join + Join Filter: (second.id = first.id) + -> Seq Scan on second + -> Materialize + -> Append + -> Seq Scan on first_0 first_1 + Filter: (id = 1) + -> Seq Scan on first_1 first_2 + Filter: (id = 2) +(10 rows) + +SET enable_hashjoin = t; +SET enable_mergejoin = t; +DROP TABLE rowmarks.first CASCADE; +NOTICE: drop cascades to 5 other objects +DETAIL: drop cascades to table rowmarks.first_0 +drop cascades to table rowmarks.first_1 +drop cascades to table rowmarks.first_2 +drop cascades to table rowmarks.first_3 +drop cascades to table rowmarks.first_4 +DROP TABLE rowmarks.second CASCADE; +DROP SCHEMA rowmarks; +DROP EXTENSION pg_pathman; diff --git a/expected/pathman_runtime_nodes.out b/expected/pathman_runtime_nodes.out new file mode 100644 index 00000000..f699ddeb --- /dev/null +++ b/expected/pathman_runtime_nodes.out @@ -0,0 +1,505 @@ +\set VERBOSITY terse +SET search_path = 'public'; +CREATE SCHEMA pathman; +CREATE EXTENSION pg_pathman SCHEMA pathman; +CREATE SCHEMA test; +/* + * Test RuntimeAppend + */ +create or replace function test.pathman_assert(smt bool, error_msg text) returns text as $$ +begin + if not smt then + raise exception '%', error_msg; + end if; + + return 'ok'; +end; +$$ language plpgsql; +create or replace function test.pathman_equal(a text, b text, error_msg text) returns text as $$ +begin + if a != b then + raise exception '''%'' is not equal to ''%'', %', a, b, error_msg; + end if; + + return 'equal'; +end; +$$ language plpgsql; +create or replace function test.pathman_test(query text) returns jsonb as $$ +declare + plan jsonb; +begin + execute 'explain (analyze, format json)' || query into plan; + + return plan; +end; +$$ language plpgsql; +create or replace function test.pathman_test_1() returns text as $$ +declare + plan jsonb; + num int; +begin + plan = test.pathman_test('select * from test.runtime_test_1 where id = (select * from test.run_values limit 1)'); + + perform test.pathman_equal((plan->0->'Plan'->'Node Type')::text, + '"Custom Scan"', + 'wrong plan type'); + + perform test.pathman_equal((plan->0->'Plan'->'Custom Plan Provider')::text, + '"RuntimeAppend"', + 'wrong plan provider'); + + perform test.pathman_equal((plan->0->'Plan'->'Plans'->1->'Relation Name')::text, + format('"runtime_test_1_%s"', pathman.get_hash_part_idx(hashint4(1), 6)), + 'wrong partition'); + + select count(*) from jsonb_array_elements_text(plan->0->'Plan'->'Plans') into num; + perform test.pathman_equal(num::text, '2', 'expected 2 child plans for custom scan'); + + return 'ok'; +end; +$$ language plpgsql +set enable_mergejoin = off +set enable_hashjoin = off; +create or replace function test.pathman_test_2() returns text as $$ +declare + plan jsonb; + num int; + c text; +begin + plan = test.pathman_test('select * from test.runtime_test_1 where id = any (select * from test.run_values limit 4)'); + + perform test.pathman_equal((plan->0->'Plan'->'Node Type')::text, + '"Nested Loop"', + 'wrong plan type'); + + perform test.pathman_equal((plan->0->'Plan'->'Plans'->1->'Node Type')::text, + '"Custom Scan"', + 'wrong plan type'); + + perform test.pathman_equal((plan->0->'Plan'->'Plans'->1->'Custom Plan Provider')::text, + '"RuntimeAppend"', + 'wrong plan provider'); + + select count(*) from jsonb_array_elements_text(plan->0->'Plan'->'Plans'->1->'Plans') into num; + perform test.pathman_equal(num::text, '4', 'expected 4 child plans for custom scan'); + + execute 'select string_agg(y.z, '','') from + (select (x->''Relation Name'')::text as z from + jsonb_array_elements($1->0->''Plan''->''Plans''->1->''Plans'') x + order by x->''Relation Name'') y' + into c using plan; + perform test.pathman_equal(c, '"runtime_test_1_2","runtime_test_1_3","runtime_test_1_4","runtime_test_1_5"', + 'wrong partitions'); + + for i in 0..3 loop + num = plan->0->'Plan'->'Plans'->1->'Plans'->i->'Actual Loops'; + perform test.pathman_equal(num::text, '1', 'expected 1 loop'); + end loop; + + return 'ok'; +end; +$$ language plpgsql +set enable_mergejoin = off +set enable_hashjoin = off; +create or replace function test.pathman_test_3() returns text as $$ +declare + plan jsonb; + num int; +begin + plan = test.pathman_test('select * from test.runtime_test_1 a join test.run_values b on a.id = b.val'); + + perform test.pathman_equal((plan->0->'Plan'->'Node Type')::text, + '"Nested Loop"', + 'wrong plan type'); + + perform test.pathman_equal((plan->0->'Plan'->'Plans'->1->'Node Type')::text, + '"Custom Scan"', + 'wrong plan type'); + + perform test.pathman_equal((plan->0->'Plan'->'Plans'->1->'Custom Plan Provider')::text, + '"RuntimeAppend"', + 'wrong plan provider'); + + select count(*) from jsonb_array_elements_text(plan->0->'Plan'->'Plans'->1->'Plans') into num; + perform test.pathman_equal(num::text, '6', 'expected 6 child plans for custom scan'); + + for i in 0..5 loop + num = plan->0->'Plan'->'Plans'->1->'Plans'->i->'Actual Loops'; + perform test.pathman_assert(num > 0 and num <= 1718, 'expected no more than 1718 loops'); + end loop; + + return 'ok'; +end; +$$ language plpgsql +set enable_mergejoin = off +set enable_hashjoin = off; +create or replace function test.pathman_test_4() returns text as $$ +declare + plan jsonb; + num int; +begin + plan = test.pathman_test('select * from test.category c, lateral' || + '(select * from test.runtime_test_2 g where g.category_id = c.id order by rating limit 4) as tg'); + + perform test.pathman_equal((plan->0->'Plan'->'Node Type')::text, + '"Nested Loop"', + 'wrong plan type'); + + /* Limit -> Custom Scan */ + perform test.pathman_equal((plan->0->'Plan'->'Plans'->1->0->'Node Type')::text, + '"Custom Scan"', + 'wrong plan type'); + + perform test.pathman_equal((plan->0->'Plan'->'Plans'->1->0->'Custom Plan Provider')::text, + '"RuntimeMergeAppend"', + 'wrong plan provider'); + + select count(*) from jsonb_array_elements_text(plan->0->'Plan'->'Plans'->1->'Plans'->0->'Plans') into num; + perform test.pathman_equal(num::text, '4', 'expected 4 child plans for custom scan'); + + for i in 0..3 loop + perform test.pathman_equal((plan->0->'Plan'->'Plans'->1->'Plans'->0->'Plans'->i->'Relation Name')::text, + format('"runtime_test_2_%s"', pathman.get_hash_part_idx(hashint4(i + 1), 6)), + 'wrong partition'); + + num = plan->0->'Plan'->'Plans'->1->'Plans'->0->'Plans'->i->'Actual Loops'; + perform test.pathman_assert(num = 1, 'expected no more than 1 loops'); + end loop; + + return 'ok'; +end; +$$ language plpgsql +set enable_mergejoin = off +set enable_hashjoin = off; +create or replace function test.pathman_test_5() returns text as $$ +declare + res record; +begin + select + from test.runtime_test_3 + where id = (select * from test.vals order by val limit 1) + limit 1 + into res; /* test empty tlist */ + + + select id * 2, id, 17 + from test.runtime_test_3 + where id = (select * from test.vals order by val limit 1) + limit 1 + into res; /* test computations */ + + + select test.vals.* from test.vals, lateral (select from test.runtime_test_3 + where id = test.vals.val) as q + into res; /* test lateral */ + + + select id, generate_series(1, 2) gen, val + from test.runtime_test_3 + where id = (select * from test.vals order by val limit 1) + order by id, gen, val + offset 1 limit 1 + into res; /* without IndexOnlyScan */ + + perform test.pathman_equal(res.id::text, '1', 'id is incorrect (t2)'); + perform test.pathman_equal(res.gen::text, '2', 'gen is incorrect (t2)'); + perform test.pathman_equal(res.val::text, 'k = 1', 'val is incorrect (t2)'); + + + select id + from test.runtime_test_3 + where id = any (select * from test.vals order by val limit 5) + order by id + offset 3 limit 1 + into res; /* with IndexOnlyScan */ + + perform test.pathman_equal(res.id::text, '4', 'id is incorrect (t3)'); + + + select v.val v1, generate_series(2, 2) gen, t.val v2 + from test.runtime_test_3 t join test.vals v on id = v.val + order by v1, gen, v2 + limit 1 + into res; + + perform test.pathman_equal(res.v1::text, '1', 'v1 is incorrect (t4)'); + perform test.pathman_equal(res.gen::text, '2', 'gen is incorrect (t4)'); + perform test.pathman_equal(res.v2::text, 'k = 1', 'v2 is incorrect (t4)'); + + return 'ok'; +end; +$$ language plpgsql +set enable_hashjoin = off +set enable_mergejoin = off; +create table test.run_values as select generate_series(1, 10000) val; +create table test.runtime_test_1(id serial primary key, val real); +insert into test.runtime_test_1 select generate_series(1, 10000), random(); +select pathman.create_hash_partitions('test.runtime_test_1', 'id', 6); + create_hash_partitions +------------------------ + 6 +(1 row) + +create table test.category as (select id, 'cat' || id::text as name from generate_series(1, 4) id); +create table test.runtime_test_2 (id serial, category_id int not null, name text, rating real); +insert into test.runtime_test_2 (select id, (id % 6) + 1 as category_id, 'good' || id::text as name, random() as rating from generate_series(1, 100000) id); +create index on test.runtime_test_2 (category_id, rating); +select pathman.create_hash_partitions('test.runtime_test_2', 'category_id', 6); + create_hash_partitions +------------------------ + 6 +(1 row) + +create table test.vals as (select generate_series(1, 10000) as val); +create table test.runtime_test_3(val text, id serial not null); +insert into test.runtime_test_3(id, val) select * from generate_series(1, 10000) k, format('k = %s', k); +select pathman.create_hash_partitions('test.runtime_test_3', 'id', 4); + create_hash_partitions +------------------------ + 4 +(1 row) + +create index on test.runtime_test_3 (id); +create index on test.runtime_test_3_0 (id); +create table test.runtime_test_4(val text, id int not null); +insert into test.runtime_test_4(id, val) select * from generate_series(1, 10000) k, md5(k::text); +select pathman.create_range_partitions('test.runtime_test_4', 'id', 1, 2000); + create_range_partitions +------------------------- + 5 +(1 row) + +VACUUM ANALYZE; +set pg_pathman.enable_runtimeappend = on; +set pg_pathman.enable_runtimemergeappend = on; +select test.pathman_test_1(); /* RuntimeAppend (select ... where id = (subquery)) */ + pathman_test_1 +---------------- + ok +(1 row) + +select test.pathman_test_2(); /* RuntimeAppend (select ... where id = any(subquery)) */ + pathman_test_2 +---------------- + ok +(1 row) + +select test.pathman_test_3(); /* RuntimeAppend (a join b on a.id = b.val) */ + pathman_test_3 +---------------- + ok +(1 row) + +select test.pathman_test_4(); /* RuntimeMergeAppend (lateral) */ + pathman_test_4 +---------------- + ok +(1 row) + +select test.pathman_test_5(); /* projection tests for RuntimeXXX nodes */ + pathman_test_5 +---------------- + ok +(1 row) + +/* RuntimeAppend (join, enabled parent) */ +select pathman.set_enable_parent('test.runtime_test_1', true); + set_enable_parent +------------------- + +(1 row) + +explain (costs off) +select from test.runtime_test_1 as t1 +join (select * from test.run_values limit 4) as t2 on t1.id = t2.val; + QUERY PLAN +-------------------------------------------------------------------------------- + Nested Loop + -> Limit + -> Seq Scan on run_values + -> Custom Scan (RuntimeAppend) + Prune by: (run_values.val = t1.id) + -> Seq Scan on runtime_test_1 t1 + Filter: (run_values.val = id) + -> Index Only Scan using runtime_test_1_0_pkey on runtime_test_1_0 t1 + Index Cond: (id = run_values.val) + -> Index Only Scan using runtime_test_1_1_pkey on runtime_test_1_1 t1 + Index Cond: (id = run_values.val) + -> Index Only Scan using runtime_test_1_2_pkey on runtime_test_1_2 t1 + Index Cond: (id = run_values.val) + -> Index Only Scan using runtime_test_1_3_pkey on runtime_test_1_3 t1 + Index Cond: (id = run_values.val) + -> Index Only Scan using runtime_test_1_4_pkey on runtime_test_1_4 t1 + Index Cond: (id = run_values.val) + -> Index Only Scan using runtime_test_1_5_pkey on runtime_test_1_5 t1 + Index Cond: (id = run_values.val) +(19 rows) + +select from test.runtime_test_1 as t1 +join (select * from test.run_values limit 4) as t2 on t1.id = t2.val; +-- +(4 rows) + +/* RuntimeAppend (join, disabled parent) */ +select pathman.set_enable_parent('test.runtime_test_1', false); + set_enable_parent +------------------- + +(1 row) + +explain (costs off) +select from test.runtime_test_1 as t1 +join (select * from test.run_values limit 4) as t2 on t1.id = t2.val; + QUERY PLAN +-------------------------------------------------------------------------------- + Nested Loop + -> Limit + -> Seq Scan on run_values + -> Custom Scan (RuntimeAppend) + Prune by: (run_values.val = t1.id) + -> Index Only Scan using runtime_test_1_0_pkey on runtime_test_1_0 t1 + Index Cond: (id = run_values.val) + -> Index Only Scan using runtime_test_1_1_pkey on runtime_test_1_1 t1 + Index Cond: (id = run_values.val) + -> Index Only Scan using runtime_test_1_2_pkey on runtime_test_1_2 t1 + Index Cond: (id = run_values.val) + -> Index Only Scan using runtime_test_1_3_pkey on runtime_test_1_3 t1 + Index Cond: (id = run_values.val) + -> Index Only Scan using runtime_test_1_4_pkey on runtime_test_1_4 t1 + Index Cond: (id = run_values.val) + -> Index Only Scan using runtime_test_1_5_pkey on runtime_test_1_5 t1 + Index Cond: (id = run_values.val) +(17 rows) + +select from test.runtime_test_1 as t1 +join (select * from test.run_values limit 4) as t2 on t1.id = t2.val; +-- +(4 rows) + +/* RuntimeAppend (join, additional projections) */ +select generate_series(1, 2) from test.runtime_test_1 as t1 +join (select * from test.run_values limit 4) as t2 on t1.id = t2.val; + generate_series +----------------- + 1 + 2 + 1 + 2 + 1 + 2 + 1 + 2 +(8 rows) + +/* RuntimeAppend (select ... where id = ANY (subquery), missing partitions) */ +select count(*) = 0 from pathman.pathman_partition_list +where parent = 'test.runtime_test_4'::regclass and coalesce(range_min::int, 1) < 0; + ?column? +---------- + t +(1 row) + +/* RuntimeAppend (check that dropped columns don't break tlists) */ +create table test.dropped_cols(val int4 not null); +select pathman.create_hash_partitions('test.dropped_cols', 'val', 4); + create_hash_partitions +------------------------ + 4 +(1 row) + +insert into test.dropped_cols select generate_series(1, 100); +alter table test.dropped_cols add column new_col text; /* add column */ +alter table test.dropped_cols drop column new_col; /* drop column! */ +explain (costs off) select * from generate_series(1, 10) f(id), lateral (select count(1) FILTER (WHERE true) from test.dropped_cols where val = f.id) c; + QUERY PLAN +----------------------------------------------------------- + Nested Loop + -> Function Scan on generate_series f + -> Aggregate + -> Custom Scan (RuntimeAppend) + Prune by: (dropped_cols.val = f.id) + -> Seq Scan on dropped_cols_0 dropped_cols + Filter: (val = f.id) + -> Seq Scan on dropped_cols_1 dropped_cols + Filter: (val = f.id) + -> Seq Scan on dropped_cols_2 dropped_cols + Filter: (val = f.id) + -> Seq Scan on dropped_cols_3 dropped_cols + Filter: (val = f.id) +(13 rows) + +drop table test.dropped_cols cascade; +NOTICE: drop cascades to 4 other objects +set enable_hashjoin = off; +set enable_mergejoin = off; +select from test.runtime_test_4 +where id = any (select generate_series(-10, -1)); /* should be empty */ +-- +(0 rows) + +set enable_hashjoin = on; +set enable_mergejoin = on; +DROP TABLE test.vals CASCADE; +DROP TABLE test.category CASCADE; +DROP TABLE test.run_values CASCADE; +DROP TABLE test.runtime_test_1 CASCADE; +NOTICE: drop cascades to 6 other objects +DROP TABLE test.runtime_test_2 CASCADE; +NOTICE: drop cascades to 6 other objects +DROP TABLE test.runtime_test_3 CASCADE; +NOTICE: drop cascades to 4 other objects +DROP TABLE test.runtime_test_4 CASCADE; +NOTICE: drop cascades to 6 other objects +DROP FUNCTION test.pathman_assert(bool, text); +DROP FUNCTION test.pathman_equal(text, text, text); +DROP FUNCTION test.pathman_test(text); +DROP FUNCTION test.pathman_test_1(); +DROP FUNCTION test.pathman_test_2(); +DROP FUNCTION test.pathman_test_3(); +DROP FUNCTION test.pathman_test_4(); +DROP FUNCTION test.pathman_test_5(); +DROP SCHEMA test; +-- +-- +-- PGPRO-7928 +-- Variable pg_pathman.enable must be called before any query. +-- +CREATE TABLE part_test (val int NOT NULL); +SELECT pathman.create_hash_partitions('part_test', 'val', 2, partition_names := array['part_test_1','pg_pathman']); + create_hash_partitions +------------------------ + 2 +(1 row) + +CREATE OR REPLACE FUNCTION part_test_trigger() RETURNS TRIGGER AS $$ +BEGIN + RAISE NOTICE '%', format('%s %s %s (%s)', TG_WHEN, TG_OP, TG_LEVEL, TG_TABLE_NAME); + IF TG_OP::text = 'DELETE'::text then + SET pg_pathman.enable = f; + RETURN new; + END IF; +END; +$$ LANGUAGE PLPGSQL; +SET pg_pathman.enable_partitionrouter = t; +CREATE TRIGGER ad AFTER DELETE ON part_test_1 FOR EACH ROW EXECUTE PROCEDURE part_test_trigger (); +INSERT INTO part_test VALUES (1); +UPDATE part_test SET val = val + 1 RETURNING *, tableoid::regclass; + val | tableoid +-----+------------- + 2 | part_test_1 +(1 row) + +UPDATE part_test SET val = val + 1 RETURNING *, tableoid::regclass; +NOTICE: AFTER DELETE ROW (part_test_1) +WARNING: "pg_pathman.enable" must be called before any query, ignored + val | tableoid +-----+------------ + 3 | pg_pathman +(1 row) + +RESET pg_pathman.enable_partitionrouter; +DROP TABLE part_test CASCADE; +NOTICE: drop cascades to 2 other objects +DROP FUNCTION part_test_trigger(); +DROP EXTENSION pg_pathman CASCADE; +DROP SCHEMA pathman; diff --git a/expected/pathman_runtime_nodes_1.out b/expected/pathman_runtime_nodes_1.out new file mode 100644 index 00000000..e975c761 --- /dev/null +++ b/expected/pathman_runtime_nodes_1.out @@ -0,0 +1,505 @@ +\set VERBOSITY terse +SET search_path = 'public'; +CREATE SCHEMA pathman; +CREATE EXTENSION pg_pathman SCHEMA pathman; +CREATE SCHEMA test; +/* + * Test RuntimeAppend + */ +create or replace function test.pathman_assert(smt bool, error_msg text) returns text as $$ +begin + if not smt then + raise exception '%', error_msg; + end if; + + return 'ok'; +end; +$$ language plpgsql; +create or replace function test.pathman_equal(a text, b text, error_msg text) returns text as $$ +begin + if a != b then + raise exception '''%'' is not equal to ''%'', %', a, b, error_msg; + end if; + + return 'equal'; +end; +$$ language plpgsql; +create or replace function test.pathman_test(query text) returns jsonb as $$ +declare + plan jsonb; +begin + execute 'explain (analyze, format json)' || query into plan; + + return plan; +end; +$$ language plpgsql; +create or replace function test.pathman_test_1() returns text as $$ +declare + plan jsonb; + num int; +begin + plan = test.pathman_test('select * from test.runtime_test_1 where id = (select * from test.run_values limit 1)'); + + perform test.pathman_equal((plan->0->'Plan'->'Node Type')::text, + '"Custom Scan"', + 'wrong plan type'); + + perform test.pathman_equal((plan->0->'Plan'->'Custom Plan Provider')::text, + '"RuntimeAppend"', + 'wrong plan provider'); + + perform test.pathman_equal((plan->0->'Plan'->'Plans'->1->'Relation Name')::text, + format('"runtime_test_1_%s"', pathman.get_hash_part_idx(hashint4(1), 6)), + 'wrong partition'); + + select count(*) from jsonb_array_elements_text(plan->0->'Plan'->'Plans') into num; + perform test.pathman_equal(num::text, '2', 'expected 2 child plans for custom scan'); + + return 'ok'; +end; +$$ language plpgsql +set enable_mergejoin = off +set enable_hashjoin = off; +create or replace function test.pathman_test_2() returns text as $$ +declare + plan jsonb; + num int; + c text; +begin + plan = test.pathman_test('select * from test.runtime_test_1 where id = any (select * from test.run_values limit 4)'); + + perform test.pathman_equal((plan->0->'Plan'->'Node Type')::text, + '"Nested Loop"', + 'wrong plan type'); + + perform test.pathman_equal((plan->0->'Plan'->'Plans'->1->'Node Type')::text, + '"Custom Scan"', + 'wrong plan type'); + + perform test.pathman_equal((plan->0->'Plan'->'Plans'->1->'Custom Plan Provider')::text, + '"RuntimeAppend"', + 'wrong plan provider'); + + select count(*) from jsonb_array_elements_text(plan->0->'Plan'->'Plans'->1->'Plans') into num; + perform test.pathman_equal(num::text, '4', 'expected 4 child plans for custom scan'); + + execute 'select string_agg(y.z, '','') from + (select (x->''Relation Name'')::text as z from + jsonb_array_elements($1->0->''Plan''->''Plans''->1->''Plans'') x + order by x->''Relation Name'') y' + into c using plan; + perform test.pathman_equal(c, '"runtime_test_1_2","runtime_test_1_3","runtime_test_1_4","runtime_test_1_5"', + 'wrong partitions'); + + for i in 0..3 loop + num = plan->0->'Plan'->'Plans'->1->'Plans'->i->'Actual Loops'; + perform test.pathman_equal(num::text, '1', 'expected 1 loop'); + end loop; + + return 'ok'; +end; +$$ language plpgsql +set enable_mergejoin = off +set enable_hashjoin = off; +create or replace function test.pathman_test_3() returns text as $$ +declare + plan jsonb; + num int; +begin + plan = test.pathman_test('select * from test.runtime_test_1 a join test.run_values b on a.id = b.val'); + + perform test.pathman_equal((plan->0->'Plan'->'Node Type')::text, + '"Nested Loop"', + 'wrong plan type'); + + perform test.pathman_equal((plan->0->'Plan'->'Plans'->1->'Node Type')::text, + '"Custom Scan"', + 'wrong plan type'); + + perform test.pathman_equal((plan->0->'Plan'->'Plans'->1->'Custom Plan Provider')::text, + '"RuntimeAppend"', + 'wrong plan provider'); + + select count(*) from jsonb_array_elements_text(plan->0->'Plan'->'Plans'->1->'Plans') into num; + perform test.pathman_equal(num::text, '6', 'expected 6 child plans for custom scan'); + + for i in 0..5 loop + num = plan->0->'Plan'->'Plans'->1->'Plans'->i->'Actual Loops'; + perform test.pathman_assert(num > 0 and num <= 1718, 'expected no more than 1718 loops'); + end loop; + + return 'ok'; +end; +$$ language plpgsql +set enable_mergejoin = off +set enable_hashjoin = off; +create or replace function test.pathman_test_4() returns text as $$ +declare + plan jsonb; + num int; +begin + plan = test.pathman_test('select * from test.category c, lateral' || + '(select * from test.runtime_test_2 g where g.category_id = c.id order by rating limit 4) as tg'); + + perform test.pathman_equal((plan->0->'Plan'->'Node Type')::text, + '"Nested Loop"', + 'wrong plan type'); + + /* Limit -> Custom Scan */ + perform test.pathman_equal((plan->0->'Plan'->'Plans'->1->0->'Node Type')::text, + '"Custom Scan"', + 'wrong plan type'); + + perform test.pathman_equal((plan->0->'Plan'->'Plans'->1->0->'Custom Plan Provider')::text, + '"RuntimeMergeAppend"', + 'wrong plan provider'); + + select count(*) from jsonb_array_elements_text(plan->0->'Plan'->'Plans'->1->'Plans'->0->'Plans') into num; + perform test.pathman_equal(num::text, '4', 'expected 4 child plans for custom scan'); + + for i in 0..3 loop + perform test.pathman_equal((plan->0->'Plan'->'Plans'->1->'Plans'->0->'Plans'->i->'Relation Name')::text, + format('"runtime_test_2_%s"', pathman.get_hash_part_idx(hashint4(i + 1), 6)), + 'wrong partition'); + + num = plan->0->'Plan'->'Plans'->1->'Plans'->0->'Plans'->i->'Actual Loops'; + perform test.pathman_assert(num = 1, 'expected no more than 1 loops'); + end loop; + + return 'ok'; +end; +$$ language plpgsql +set enable_mergejoin = off +set enable_hashjoin = off; +create or replace function test.pathman_test_5() returns text as $$ +declare + res record; +begin + select + from test.runtime_test_3 + where id = (select * from test.vals order by val limit 1) + limit 1 + into res; /* test empty tlist */ + + + select id * 2, id, 17 + from test.runtime_test_3 + where id = (select * from test.vals order by val limit 1) + limit 1 + into res; /* test computations */ + + + select test.vals.* from test.vals, lateral (select from test.runtime_test_3 + where id = test.vals.val) as q + into res; /* test lateral */ + + + select id, generate_series(1, 2) gen, val + from test.runtime_test_3 + where id = (select * from test.vals order by val limit 1) + order by id, gen, val + offset 1 limit 1 + into res; /* without IndexOnlyScan */ + + perform test.pathman_equal(res.id::text, '1', 'id is incorrect (t2)'); + perform test.pathman_equal(res.gen::text, '2', 'gen is incorrect (t2)'); + perform test.pathman_equal(res.val::text, 'k = 1', 'val is incorrect (t2)'); + + + select id + from test.runtime_test_3 + where id = any (select * from test.vals order by val limit 5) + order by id + offset 3 limit 1 + into res; /* with IndexOnlyScan */ + + perform test.pathman_equal(res.id::text, '4', 'id is incorrect (t3)'); + + + select v.val v1, generate_series(2, 2) gen, t.val v2 + from test.runtime_test_3 t join test.vals v on id = v.val + order by v1, gen, v2 + limit 1 + into res; + + perform test.pathman_equal(res.v1::text, '1', 'v1 is incorrect (t4)'); + perform test.pathman_equal(res.gen::text, '2', 'gen is incorrect (t4)'); + perform test.pathman_equal(res.v2::text, 'k = 1', 'v2 is incorrect (t4)'); + + return 'ok'; +end; +$$ language plpgsql +set enable_hashjoin = off +set enable_mergejoin = off; +create table test.run_values as select generate_series(1, 10000) val; +create table test.runtime_test_1(id serial primary key, val real); +insert into test.runtime_test_1 select generate_series(1, 10000), random(); +select pathman.create_hash_partitions('test.runtime_test_1', 'id', 6); + create_hash_partitions +------------------------ + 6 +(1 row) + +create table test.category as (select id, 'cat' || id::text as name from generate_series(1, 4) id); +create table test.runtime_test_2 (id serial, category_id int not null, name text, rating real); +insert into test.runtime_test_2 (select id, (id % 6) + 1 as category_id, 'good' || id::text as name, random() as rating from generate_series(1, 100000) id); +create index on test.runtime_test_2 (category_id, rating); +select pathman.create_hash_partitions('test.runtime_test_2', 'category_id', 6); + create_hash_partitions +------------------------ + 6 +(1 row) + +create table test.vals as (select generate_series(1, 10000) as val); +create table test.runtime_test_3(val text, id serial not null); +insert into test.runtime_test_3(id, val) select * from generate_series(1, 10000) k, format('k = %s', k); +select pathman.create_hash_partitions('test.runtime_test_3', 'id', 4); + create_hash_partitions +------------------------ + 4 +(1 row) + +create index on test.runtime_test_3 (id); +create index on test.runtime_test_3_0 (id); +create table test.runtime_test_4(val text, id int not null); +insert into test.runtime_test_4(id, val) select * from generate_series(1, 10000) k, md5(k::text); +select pathman.create_range_partitions('test.runtime_test_4', 'id', 1, 2000); + create_range_partitions +------------------------- + 5 +(1 row) + +VACUUM ANALYZE; +set pg_pathman.enable_runtimeappend = on; +set pg_pathman.enable_runtimemergeappend = on; +select test.pathman_test_1(); /* RuntimeAppend (select ... where id = (subquery)) */ + pathman_test_1 +---------------- + ok +(1 row) + +select test.pathman_test_2(); /* RuntimeAppend (select ... where id = any(subquery)) */ + pathman_test_2 +---------------- + ok +(1 row) + +select test.pathman_test_3(); /* RuntimeAppend (a join b on a.id = b.val) */ + pathman_test_3 +---------------- + ok +(1 row) + +select test.pathman_test_4(); /* RuntimeMergeAppend (lateral) */ + pathman_test_4 +---------------- + ok +(1 row) + +select test.pathman_test_5(); /* projection tests for RuntimeXXX nodes */ + pathman_test_5 +---------------- + ok +(1 row) + +/* RuntimeAppend (join, enabled parent) */ +select pathman.set_enable_parent('test.runtime_test_1', true); + set_enable_parent +------------------- + +(1 row) + +explain (costs off) +select from test.runtime_test_1 as t1 +join (select * from test.run_values limit 4) as t2 on t1.id = t2.val; + QUERY PLAN +-------------------------------------------------------------------------------- + Nested Loop + -> Limit + -> Seq Scan on run_values + -> Custom Scan (RuntimeAppend) + Prune by: (t1.id = run_values.val) + -> Seq Scan on runtime_test_1 t1 + Filter: (id = run_values.val) + -> Index Only Scan using runtime_test_1_0_pkey on runtime_test_1_0 t1 + Index Cond: (id = run_values.val) + -> Index Only Scan using runtime_test_1_1_pkey on runtime_test_1_1 t1 + Index Cond: (id = run_values.val) + -> Index Only Scan using runtime_test_1_2_pkey on runtime_test_1_2 t1 + Index Cond: (id = run_values.val) + -> Index Only Scan using runtime_test_1_3_pkey on runtime_test_1_3 t1 + Index Cond: (id = run_values.val) + -> Index Only Scan using runtime_test_1_4_pkey on runtime_test_1_4 t1 + Index Cond: (id = run_values.val) + -> Index Only Scan using runtime_test_1_5_pkey on runtime_test_1_5 t1 + Index Cond: (id = run_values.val) +(19 rows) + +select from test.runtime_test_1 as t1 +join (select * from test.run_values limit 4) as t2 on t1.id = t2.val; +-- +(4 rows) + +/* RuntimeAppend (join, disabled parent) */ +select pathman.set_enable_parent('test.runtime_test_1', false); + set_enable_parent +------------------- + +(1 row) + +explain (costs off) +select from test.runtime_test_1 as t1 +join (select * from test.run_values limit 4) as t2 on t1.id = t2.val; + QUERY PLAN +-------------------------------------------------------------------------------- + Nested Loop + -> Limit + -> Seq Scan on run_values + -> Custom Scan (RuntimeAppend) + Prune by: (t1.id = run_values.val) + -> Index Only Scan using runtime_test_1_0_pkey on runtime_test_1_0 t1 + Index Cond: (id = run_values.val) + -> Index Only Scan using runtime_test_1_1_pkey on runtime_test_1_1 t1 + Index Cond: (id = run_values.val) + -> Index Only Scan using runtime_test_1_2_pkey on runtime_test_1_2 t1 + Index Cond: (id = run_values.val) + -> Index Only Scan using runtime_test_1_3_pkey on runtime_test_1_3 t1 + Index Cond: (id = run_values.val) + -> Index Only Scan using runtime_test_1_4_pkey on runtime_test_1_4 t1 + Index Cond: (id = run_values.val) + -> Index Only Scan using runtime_test_1_5_pkey on runtime_test_1_5 t1 + Index Cond: (id = run_values.val) +(17 rows) + +select from test.runtime_test_1 as t1 +join (select * from test.run_values limit 4) as t2 on t1.id = t2.val; +-- +(4 rows) + +/* RuntimeAppend (join, additional projections) */ +select generate_series(1, 2) from test.runtime_test_1 as t1 +join (select * from test.run_values limit 4) as t2 on t1.id = t2.val; + generate_series +----------------- + 1 + 2 + 1 + 2 + 1 + 2 + 1 + 2 +(8 rows) + +/* RuntimeAppend (select ... where id = ANY (subquery), missing partitions) */ +select count(*) = 0 from pathman.pathman_partition_list +where parent = 'test.runtime_test_4'::regclass and coalesce(range_min::int, 1) < 0; + ?column? +---------- + t +(1 row) + +/* RuntimeAppend (check that dropped columns don't break tlists) */ +create table test.dropped_cols(val int4 not null); +select pathman.create_hash_partitions('test.dropped_cols', 'val', 4); + create_hash_partitions +------------------------ + 4 +(1 row) + +insert into test.dropped_cols select generate_series(1, 100); +alter table test.dropped_cols add column new_col text; /* add column */ +alter table test.dropped_cols drop column new_col; /* drop column! */ +explain (costs off) select * from generate_series(1, 10) f(id), lateral (select count(1) FILTER (WHERE true) from test.dropped_cols where val = f.id) c; + QUERY PLAN +----------------------------------------------------------- + Nested Loop + -> Function Scan on generate_series f + -> Aggregate + -> Custom Scan (RuntimeAppend) + Prune by: (dropped_cols.val = f.id) + -> Seq Scan on dropped_cols_0 dropped_cols + Filter: (val = f.id) + -> Seq Scan on dropped_cols_1 dropped_cols + Filter: (val = f.id) + -> Seq Scan on dropped_cols_2 dropped_cols + Filter: (val = f.id) + -> Seq Scan on dropped_cols_3 dropped_cols + Filter: (val = f.id) +(13 rows) + +drop table test.dropped_cols cascade; +NOTICE: drop cascades to 4 other objects +set enable_hashjoin = off; +set enable_mergejoin = off; +select from test.runtime_test_4 +where id = any (select generate_series(-10, -1)); /* should be empty */ +-- +(0 rows) + +set enable_hashjoin = on; +set enable_mergejoin = on; +DROP TABLE test.vals CASCADE; +DROP TABLE test.category CASCADE; +DROP TABLE test.run_values CASCADE; +DROP TABLE test.runtime_test_1 CASCADE; +NOTICE: drop cascades to 6 other objects +DROP TABLE test.runtime_test_2 CASCADE; +NOTICE: drop cascades to 6 other objects +DROP TABLE test.runtime_test_3 CASCADE; +NOTICE: drop cascades to 4 other objects +DROP TABLE test.runtime_test_4 CASCADE; +NOTICE: drop cascades to 6 other objects +DROP FUNCTION test.pathman_assert(bool, text); +DROP FUNCTION test.pathman_equal(text, text, text); +DROP FUNCTION test.pathman_test(text); +DROP FUNCTION test.pathman_test_1(); +DROP FUNCTION test.pathman_test_2(); +DROP FUNCTION test.pathman_test_3(); +DROP FUNCTION test.pathman_test_4(); +DROP FUNCTION test.pathman_test_5(); +DROP SCHEMA test; +-- +-- +-- PGPRO-7928 +-- Variable pg_pathman.enable must be called before any query. +-- +CREATE TABLE part_test (val int NOT NULL); +SELECT pathman.create_hash_partitions('part_test', 'val', 2, partition_names := array['part_test_1','pg_pathman']); + create_hash_partitions +------------------------ + 2 +(1 row) + +CREATE OR REPLACE FUNCTION part_test_trigger() RETURNS TRIGGER AS $$ +BEGIN + RAISE NOTICE '%', format('%s %s %s (%s)', TG_WHEN, TG_OP, TG_LEVEL, TG_TABLE_NAME); + IF TG_OP::text = 'DELETE'::text then + SET pg_pathman.enable = f; + RETURN new; + END IF; +END; +$$ LANGUAGE PLPGSQL; +SET pg_pathman.enable_partitionrouter = t; +CREATE TRIGGER ad AFTER DELETE ON part_test_1 FOR EACH ROW EXECUTE PROCEDURE part_test_trigger (); +INSERT INTO part_test VALUES (1); +UPDATE part_test SET val = val + 1 RETURNING *, tableoid::regclass; + val | tableoid +-----+------------- + 2 | part_test_1 +(1 row) + +UPDATE part_test SET val = val + 1 RETURNING *, tableoid::regclass; +NOTICE: AFTER DELETE ROW (part_test_1) +WARNING: "pg_pathman.enable" must be called before any query, ignored + val | tableoid +-----+------------ + 3 | pg_pathman +(1 row) + +RESET pg_pathman.enable_partitionrouter; +DROP TABLE part_test CASCADE; +NOTICE: drop cascades to 2 other objects +DROP FUNCTION part_test_trigger(); +DROP EXTENSION pg_pathman CASCADE; +DROP SCHEMA pathman; diff --git a/expected/pathman_subpartitions.out b/expected/pathman_subpartitions.out new file mode 100644 index 00000000..3a6a19eb --- /dev/null +++ b/expected/pathman_subpartitions.out @@ -0,0 +1,467 @@ +/* + * Since 8edd0e794 (>= 12) Append nodes with single subplan are eliminated, + * causing different output; pathman_subpartitions_1.out is the updated version. + */ +\set VERBOSITY terse +CREATE EXTENSION pg_pathman; +CREATE SCHEMA subpartitions; +/* Create two level partitioning structure */ +CREATE TABLE subpartitions.abc(a INTEGER NOT NULL, b INTEGER NOT NULL); +INSERT INTO subpartitions.abc SELECT i, i FROM generate_series(1, 200, 20) as i; +SELECT create_range_partitions('subpartitions.abc', 'a', 0, 100, 2); + create_range_partitions +------------------------- + 2 +(1 row) + +SELECT create_hash_partitions('subpartitions.abc_1', 'a', 3); + create_hash_partitions +------------------------ + 3 +(1 row) + +SELECT create_hash_partitions('subpartitions.abc_2', 'b', 2); + create_hash_partitions +------------------------ + 2 +(1 row) + +SELECT * FROM pathman_partition_list; + parent | partition | parttype | expr | range_min | range_max +---------------------+-----------------------+----------+------+-----------+----------- + subpartitions.abc | subpartitions.abc_1 | 2 | a | 0 | 100 + subpartitions.abc | subpartitions.abc_2 | 2 | a | 100 | 200 + subpartitions.abc_1 | subpartitions.abc_1_0 | 1 | a | | + subpartitions.abc_1 | subpartitions.abc_1_1 | 1 | a | | + subpartitions.abc_1 | subpartitions.abc_1_2 | 1 | a | | + subpartitions.abc_2 | subpartitions.abc_2_0 | 1 | b | | + subpartitions.abc_2 | subpartitions.abc_2_1 | 1 | b | | +(7 rows) + +SELECT tableoid::regclass, * FROM subpartitions.abc ORDER BY a, b; + tableoid | a | b +-----------------------+-----+----- + subpartitions.abc_1_2 | 1 | 1 + subpartitions.abc_1_0 | 21 | 21 + subpartitions.abc_1_1 | 41 | 41 + subpartitions.abc_1_0 | 61 | 61 + subpartitions.abc_1_2 | 81 | 81 + subpartitions.abc_2_0 | 101 | 101 + subpartitions.abc_2_1 | 121 | 121 + subpartitions.abc_2_0 | 141 | 141 + subpartitions.abc_2_1 | 161 | 161 + subpartitions.abc_2_1 | 181 | 181 +(10 rows) + +/* Insert should result in creation of new subpartition */ +SELECT append_range_partition('subpartitions.abc', 'subpartitions.abc_3'); + append_range_partition +------------------------ + subpartitions.abc_3 +(1 row) + +SELECT create_range_partitions('subpartitions.abc_3', 'b', 200, 10, 2); + create_range_partitions +------------------------- + 2 +(1 row) + +SELECT * FROM pathman_partition_list WHERE parent = 'subpartitions.abc_3'::regclass; + parent | partition | parttype | expr | range_min | range_max +---------------------+-----------------------+----------+------+-----------+----------- + subpartitions.abc_3 | subpartitions.abc_3_1 | 2 | b | 200 | 210 + subpartitions.abc_3 | subpartitions.abc_3_2 | 2 | b | 210 | 220 +(2 rows) + +INSERT INTO subpartitions.abc VALUES (215, 215); +SELECT * FROM pathman_partition_list WHERE parent = 'subpartitions.abc_3'::regclass; + parent | partition | parttype | expr | range_min | range_max +---------------------+-----------------------+----------+------+-----------+----------- + subpartitions.abc_3 | subpartitions.abc_3_1 | 2 | b | 200 | 210 + subpartitions.abc_3 | subpartitions.abc_3_2 | 2 | b | 210 | 220 +(2 rows) + +SELECT tableoid::regclass, * FROM subpartitions.abc WHERE a = 215 AND b = 215 ORDER BY a, b; + tableoid | a | b +-----------------------+-----+----- + subpartitions.abc_3_2 | 215 | 215 +(1 row) + +/* Pruning tests */ +EXPLAIN (COSTS OFF) SELECT * FROM subpartitions.abc WHERE a < 150; + QUERY PLAN +--------------------------------- + Append + -> Append + -> Seq Scan on abc_1_0 + -> Seq Scan on abc_1_1 + -> Seq Scan on abc_1_2 + -> Append + -> Seq Scan on abc_2_0 + Filter: (a < 150) + -> Seq Scan on abc_2_1 + Filter: (a < 150) +(10 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM subpartitions.abc WHERE b = 215; + QUERY PLAN +--------------------------------- + Append + -> Append + -> Seq Scan on abc_1_0 + Filter: (b = 215) + -> Seq Scan on abc_1_1 + Filter: (b = 215) + -> Seq Scan on abc_1_2 + Filter: (b = 215) + -> Append + -> Seq Scan on abc_2_1 + Filter: (b = 215) + -> Append + -> Seq Scan on abc_3_2 + Filter: (b = 215) +(14 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM subpartitions.abc WHERE a = 215 AND b = 215; + QUERY PLAN +------------------------------------------------- + Append + -> Append + -> Seq Scan on abc_3_2 + Filter: ((a = 215) AND (b = 215)) +(4 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM subpartitions.abc WHERE a >= 210 AND b >= 210; + QUERY PLAN +---------------------------------- + Append + -> Append + -> Seq Scan on abc_3_2 + Filter: (a >= 210) +(4 rows) + +CREATE OR REPLACE FUNCTION check_multilevel_queries() +RETURNS VOID AS +$$ +BEGIN + IF NOT EXISTS(SELECT * FROM (SELECT tableoid::regclass, * + FROM subpartitions.abc + WHERE a = 215 AND b = 215 + ORDER BY a, b) t1) + THEN + RAISE EXCEPTION 'should be at least one record in result'; + END IF; +END +$$ LANGUAGE plpgsql; +SELECT check_multilevel_queries(); + check_multilevel_queries +-------------------------- + +(1 row) + +DROP FUNCTION check_multilevel_queries(); +/* Multilevel partitioning with updates */ +CREATE OR REPLACE FUNCTION subpartitions.partitions_tree( + rel REGCLASS, + level TEXT DEFAULT ' ' +) +RETURNS SETOF TEXT AS +$$ +DECLARE + partition REGCLASS; + subpartition TEXT; +BEGIN + IF rel IS NULL THEN + RETURN; + END IF; + + RETURN NEXT rel::TEXT; + + FOR partition IN (SELECT l.partition FROM pathman_partition_list l WHERE parent = rel) + LOOP + FOR subpartition IN (SELECT subpartitions.partitions_tree(partition, level || ' ')) + LOOP + RETURN NEXT level || subpartition::TEXT; + END LOOP; + END LOOP; +END +$$ LANGUAGE plpgsql; +SELECT append_range_partition('subpartitions.abc', 'subpartitions.abc_4'); + append_range_partition +------------------------ + subpartitions.abc_4 +(1 row) + +SELECT create_hash_partitions('subpartitions.abc_4', 'b', 2); + create_hash_partitions +------------------------ + 2 +(1 row) + +SELECT subpartitions.partitions_tree('subpartitions.abc'); + partitions_tree +-------------------------- + subpartitions.abc + subpartitions.abc_1 + subpartitions.abc_1_0 + subpartitions.abc_1_1 + subpartitions.abc_1_2 + subpartitions.abc_2 + subpartitions.abc_2_0 + subpartitions.abc_2_1 + subpartitions.abc_3 + subpartitions.abc_3_1 + subpartitions.abc_3_2 + subpartitions.abc_4 + subpartitions.abc_4_0 + subpartitions.abc_4_1 +(14 rows) + +DROP TABLE subpartitions.abc CASCADE; +NOTICE: drop cascades to 15 other objects +/* Test that update works correctly */ +SET pg_pathman.enable_partitionrouter = ON; +CREATE TABLE subpartitions.abc(a INTEGER NOT NULL, b INTEGER NOT NULL); +SELECT create_range_partitions('subpartitions.abc', 'a', 0, 100, 2); + create_range_partitions +------------------------- + 2 +(1 row) + +SELECT create_range_partitions('subpartitions.abc_1', 'b', 0, 50, 2); /* 0 - 100 */ + create_range_partitions +------------------------- + 2 +(1 row) + +SELECT create_range_partitions('subpartitions.abc_2', 'b', 0, 50, 2); /* 100 - 200 */ + create_range_partitions +------------------------- + 2 +(1 row) + +INSERT INTO subpartitions.abc SELECT 25, 25 FROM generate_series(1, 10); +SELECT tableoid::regclass, * FROM subpartitions.abc; /* subpartitions.abc_1_1 */ + tableoid | a | b +-----------------------+----+---- + subpartitions.abc_1_1 | 25 | 25 + subpartitions.abc_1_1 | 25 | 25 + subpartitions.abc_1_1 | 25 | 25 + subpartitions.abc_1_1 | 25 | 25 + subpartitions.abc_1_1 | 25 | 25 + subpartitions.abc_1_1 | 25 | 25 + subpartitions.abc_1_1 | 25 | 25 + subpartitions.abc_1_1 | 25 | 25 + subpartitions.abc_1_1 | 25 | 25 + subpartitions.abc_1_1 | 25 | 25 +(10 rows) + +UPDATE subpartitions.abc SET a = 125 WHERE a = 25 and b = 25; +SELECT tableoid::regclass, * FROM subpartitions.abc; /* subpartitions.abc_2_1 */ + tableoid | a | b +-----------------------+-----+---- + subpartitions.abc_2_1 | 125 | 25 + subpartitions.abc_2_1 | 125 | 25 + subpartitions.abc_2_1 | 125 | 25 + subpartitions.abc_2_1 | 125 | 25 + subpartitions.abc_2_1 | 125 | 25 + subpartitions.abc_2_1 | 125 | 25 + subpartitions.abc_2_1 | 125 | 25 + subpartitions.abc_2_1 | 125 | 25 + subpartitions.abc_2_1 | 125 | 25 + subpartitions.abc_2_1 | 125 | 25 +(10 rows) + +UPDATE subpartitions.abc SET b = 75 WHERE a = 125 and b = 25; +SELECT tableoid::regclass, * FROM subpartitions.abc; /* subpartitions.abc_2_2 */ + tableoid | a | b +-----------------------+-----+---- + subpartitions.abc_2_2 | 125 | 75 + subpartitions.abc_2_2 | 125 | 75 + subpartitions.abc_2_2 | 125 | 75 + subpartitions.abc_2_2 | 125 | 75 + subpartitions.abc_2_2 | 125 | 75 + subpartitions.abc_2_2 | 125 | 75 + subpartitions.abc_2_2 | 125 | 75 + subpartitions.abc_2_2 | 125 | 75 + subpartitions.abc_2_2 | 125 | 75 + subpartitions.abc_2_2 | 125 | 75 +(10 rows) + +UPDATE subpartitions.abc SET b = 125 WHERE a = 125 and b = 75; +SELECT tableoid::regclass, * FROM subpartitions.abc; /* subpartitions.abc_2_3 */ + tableoid | a | b +-----------------------+-----+----- + subpartitions.abc_2_3 | 125 | 125 + subpartitions.abc_2_3 | 125 | 125 + subpartitions.abc_2_3 | 125 | 125 + subpartitions.abc_2_3 | 125 | 125 + subpartitions.abc_2_3 | 125 | 125 + subpartitions.abc_2_3 | 125 | 125 + subpartitions.abc_2_3 | 125 | 125 + subpartitions.abc_2_3 | 125 | 125 + subpartitions.abc_2_3 | 125 | 125 + subpartitions.abc_2_3 | 125 | 125 +(10 rows) + +/* split_range_partition */ +SELECT split_range_partition('subpartitions.abc_2', 150); /* FAIL */ +ERROR: cannot split partition that has children +SELECT split_range_partition('subpartitions.abc_2_2', 75); /* OK */ + split_range_partition +----------------------- + subpartitions.abc_2_4 +(1 row) + +SELECT subpartitions.partitions_tree('subpartitions.abc'); + partitions_tree +-------------------------- + subpartitions.abc + subpartitions.abc_1 + subpartitions.abc_1_1 + subpartitions.abc_1_2 + subpartitions.abc_2 + subpartitions.abc_2_1 + subpartitions.abc_2_2 + subpartitions.abc_2_4 + subpartitions.abc_2_3 +(9 rows) + +/* merge_range_partitions */ +TRUNCATE subpartitions.abc; +INSERT INTO subpartitions.abc VALUES (150, 0); +SELECT append_range_partition('subpartitions.abc', 'subpartitions.abc_3'); /* 200 - 300 */ + append_range_partition +------------------------ + subpartitions.abc_3 +(1 row) + +INSERT INTO subpartitions.abc VALUES (250, 50); +SELECT merge_range_partitions('subpartitions.abc_2', 'subpartitions.abc_3'); /* OK */ + merge_range_partitions +------------------------ + subpartitions.abc_2 +(1 row) + +SELECT tableoid::regclass, * FROM subpartitions.abc ORDER BY a, b; + tableoid | a | b +-----------------------+-----+---- + subpartitions.abc_2_1 | 150 | 0 + subpartitions.abc_2_2 | 250 | 50 +(2 rows) + +SELECT merge_range_partitions('subpartitions.abc_2_1', 'subpartitions.abc_2_2'); /* OK */ + merge_range_partitions +------------------------ + subpartitions.abc_2_1 +(1 row) + +SELECT tableoid::regclass, * FROM subpartitions.abc ORDER BY a, b; + tableoid | a | b +-----------------------+-----+---- + subpartitions.abc_2_1 | 150 | 0 + subpartitions.abc_2_1 | 250 | 50 +(2 rows) + +DROP TABLE subpartitions.abc CASCADE; +NOTICE: drop cascades to 10 other objects +/* Check insert & update with dropped columns */ +CREATE TABLE subpartitions.abc(a int, b int, c int, id1 int not null, id2 int not null, val serial); +SELECT create_range_partitions('subpartitions.abc', 'id1', 0, 100, 2); + create_range_partitions +------------------------- + 2 +(1 row) + +ALTER TABLE subpartitions.abc DROP COLUMN c; +SELECT prepend_range_partition('subpartitions.abc'); + prepend_range_partition +------------------------- + subpartitions.abc_3 +(1 row) + +ALTER TABLE subpartitions.abc DROP COLUMN b; +SELECT create_range_partitions('subpartitions.abc_3', 'id2', 0, 10, 3); + create_range_partitions +------------------------- + 3 +(1 row) + +ALTER TABLE subpartitions.abc DROP COLUMN a; +SELECT prepend_range_partition('subpartitions.abc_3'); + prepend_range_partition +------------------------- + subpartitions.abc_3_4 +(1 row) + +SELECT * FROM pathman_partition_list ORDER BY parent, partition; + parent | partition | parttype | expr | range_min | range_max +---------------------+-----------------------+----------+------+-----------+----------- + subpartitions.abc | subpartitions.abc_1 | 2 | id1 | 0 | 100 + subpartitions.abc | subpartitions.abc_2 | 2 | id1 | 100 | 200 + subpartitions.abc | subpartitions.abc_3 | 2 | id1 | -100 | 0 + subpartitions.abc_3 | subpartitions.abc_3_1 | 2 | id2 | 0 | 10 + subpartitions.abc_3 | subpartitions.abc_3_2 | 2 | id2 | 10 | 20 + subpartitions.abc_3 | subpartitions.abc_3_3 | 2 | id2 | 20 | 30 + subpartitions.abc_3 | subpartitions.abc_3_4 | 2 | id2 | -10 | 0 +(7 rows) + +INSERT INTO subpartitions.abc VALUES (10, 0), (110, 0), (-1, 0), (-1, -1); +SELECT tableoid::regclass, * FROM subpartitions.abc ORDER BY id1, id2, val; + tableoid | id1 | id2 | val +-----------------------+-----+-----+----- + subpartitions.abc_3_4 | -1 | -1 | 4 + subpartitions.abc_3_1 | -1 | 0 | 3 + subpartitions.abc_1 | 10 | 0 | 1 + subpartitions.abc_2 | 110 | 0 | 2 +(4 rows) + +SET pg_pathman.enable_partitionrouter = ON; +WITH updated AS (UPDATE subpartitions.abc SET id1 = -1, id2 = -1 RETURNING tableoid::regclass, *) +SELECT * FROM updated ORDER BY val ASC; + tableoid | id1 | id2 | val +-----------------------+-----+-----+----- + subpartitions.abc_3_4 | -1 | -1 | 1 + subpartitions.abc_3_4 | -1 | -1 | 2 + subpartitions.abc_3_4 | -1 | -1 | 3 + subpartitions.abc_3_4 | -1 | -1 | 4 +(4 rows) + +DROP TABLE subpartitions.abc CASCADE; +NOTICE: drop cascades to 9 other objects +--- basic check how rowmark plays along with subparts; PGPRO-2755 +CREATE TABLE subpartitions.a1(n1 integer); +CREATE TABLE subpartitions.a2(n1 integer not null, n2 integer not null); +SELECT create_range_partitions('subpartitions.a2', 'n1', 1, 2, 0); + create_range_partitions +------------------------- + 0 +(1 row) + +SELECT add_range_partition('subpartitions.a2', 10, 20, 'subpartitions.a2_1020'); + add_range_partition +----------------------- + subpartitions.a2_1020 +(1 row) + +SELECT create_range_partitions('subpartitions.a2_1020'::regclass, 'n2'::text, array[30,40], array['subpartitions.a2_1020_3040']); + create_range_partitions +------------------------- + 1 +(1 row) + +INSERT INTO subpartitions.a2 VALUES (10, 30), (11, 31), (12, 32), (19, 39); +INSERT INTO subpartitions.a1 VALUES (12), (19), (20); +SELECT a2.* FROM subpartitions.a1 JOIN subpartitions.a2 ON a2.n1=a1.n1 FOR UPDATE; + n1 | n2 +----+---- + 12 | 32 + 19 | 39 +(2 rows) + +DROP TABLE subpartitions.a2 CASCADE; +NOTICE: drop cascades to 4 other objects +DROP TABLE subpartitions.a1; +DROP FUNCTION subpartitions.partitions_tree(regclass, text); +DROP SCHEMA subpartitions; +DROP EXTENSION pg_pathman; diff --git a/expected/pathman_subpartitions_1.out b/expected/pathman_subpartitions_1.out new file mode 100644 index 00000000..d620cde9 --- /dev/null +++ b/expected/pathman_subpartitions_1.out @@ -0,0 +1,461 @@ +/* + * Since 8edd0e794 (>= 12) Append nodes with single subplan are eliminated, + * causing different output; pathman_subpartitions_1.out is the updated version. + */ +\set VERBOSITY terse +CREATE EXTENSION pg_pathman; +CREATE SCHEMA subpartitions; +/* Create two level partitioning structure */ +CREATE TABLE subpartitions.abc(a INTEGER NOT NULL, b INTEGER NOT NULL); +INSERT INTO subpartitions.abc SELECT i, i FROM generate_series(1, 200, 20) as i; +SELECT create_range_partitions('subpartitions.abc', 'a', 0, 100, 2); + create_range_partitions +------------------------- + 2 +(1 row) + +SELECT create_hash_partitions('subpartitions.abc_1', 'a', 3); + create_hash_partitions +------------------------ + 3 +(1 row) + +SELECT create_hash_partitions('subpartitions.abc_2', 'b', 2); + create_hash_partitions +------------------------ + 2 +(1 row) + +SELECT * FROM pathman_partition_list; + parent | partition | parttype | expr | range_min | range_max +---------------------+-----------------------+----------+------+-----------+----------- + subpartitions.abc | subpartitions.abc_1 | 2 | a | 0 | 100 + subpartitions.abc | subpartitions.abc_2 | 2 | a | 100 | 200 + subpartitions.abc_1 | subpartitions.abc_1_0 | 1 | a | | + subpartitions.abc_1 | subpartitions.abc_1_1 | 1 | a | | + subpartitions.abc_1 | subpartitions.abc_1_2 | 1 | a | | + subpartitions.abc_2 | subpartitions.abc_2_0 | 1 | b | | + subpartitions.abc_2 | subpartitions.abc_2_1 | 1 | b | | +(7 rows) + +SELECT tableoid::regclass, * FROM subpartitions.abc ORDER BY a, b; + tableoid | a | b +-----------------------+-----+----- + subpartitions.abc_1_2 | 1 | 1 + subpartitions.abc_1_0 | 21 | 21 + subpartitions.abc_1_1 | 41 | 41 + subpartitions.abc_1_0 | 61 | 61 + subpartitions.abc_1_2 | 81 | 81 + subpartitions.abc_2_0 | 101 | 101 + subpartitions.abc_2_1 | 121 | 121 + subpartitions.abc_2_0 | 141 | 141 + subpartitions.abc_2_1 | 161 | 161 + subpartitions.abc_2_1 | 181 | 181 +(10 rows) + +/* Insert should result in creation of new subpartition */ +SELECT append_range_partition('subpartitions.abc', 'subpartitions.abc_3'); + append_range_partition +------------------------ + subpartitions.abc_3 +(1 row) + +SELECT create_range_partitions('subpartitions.abc_3', 'b', 200, 10, 2); + create_range_partitions +------------------------- + 2 +(1 row) + +SELECT * FROM pathman_partition_list WHERE parent = 'subpartitions.abc_3'::regclass; + parent | partition | parttype | expr | range_min | range_max +---------------------+-----------------------+----------+------+-----------+----------- + subpartitions.abc_3 | subpartitions.abc_3_1 | 2 | b | 200 | 210 + subpartitions.abc_3 | subpartitions.abc_3_2 | 2 | b | 210 | 220 +(2 rows) + +INSERT INTO subpartitions.abc VALUES (215, 215); +SELECT * FROM pathman_partition_list WHERE parent = 'subpartitions.abc_3'::regclass; + parent | partition | parttype | expr | range_min | range_max +---------------------+-----------------------+----------+------+-----------+----------- + subpartitions.abc_3 | subpartitions.abc_3_1 | 2 | b | 200 | 210 + subpartitions.abc_3 | subpartitions.abc_3_2 | 2 | b | 210 | 220 +(2 rows) + +SELECT tableoid::regclass, * FROM subpartitions.abc WHERE a = 215 AND b = 215 ORDER BY a, b; + tableoid | a | b +-----------------------+-----+----- + subpartitions.abc_3_2 | 215 | 215 +(1 row) + +/* Pruning tests */ +EXPLAIN (COSTS OFF) SELECT * FROM subpartitions.abc WHERE a < 150; + QUERY PLAN +--------------------------------- + Append + -> Append + -> Seq Scan on abc_1_0 + -> Seq Scan on abc_1_1 + -> Seq Scan on abc_1_2 + -> Append + -> Seq Scan on abc_2_0 + Filter: (a < 150) + -> Seq Scan on abc_2_1 + Filter: (a < 150) +(10 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM subpartitions.abc WHERE b = 215; + QUERY PLAN +--------------------------------- + Append + -> Append + -> Seq Scan on abc_1_0 + Filter: (b = 215) + -> Seq Scan on abc_1_1 + Filter: (b = 215) + -> Seq Scan on abc_1_2 + Filter: (b = 215) + -> Seq Scan on abc_2_1 + Filter: (b = 215) + -> Seq Scan on abc_3_2 + Filter: (b = 215) +(12 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM subpartitions.abc WHERE a = 215 AND b = 215; + QUERY PLAN +------------------------------------- + Seq Scan on abc_3_2 + Filter: ((a = 215) AND (b = 215)) +(2 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM subpartitions.abc WHERE a >= 210 AND b >= 210; + QUERY PLAN +---------------------- + Seq Scan on abc_3_2 + Filter: (a >= 210) +(2 rows) + +CREATE OR REPLACE FUNCTION check_multilevel_queries() +RETURNS VOID AS +$$ +BEGIN + IF NOT EXISTS(SELECT * FROM (SELECT tableoid::regclass, * + FROM subpartitions.abc + WHERE a = 215 AND b = 215 + ORDER BY a, b) t1) + THEN + RAISE EXCEPTION 'should be at least one record in result'; + END IF; +END +$$ LANGUAGE plpgsql; +SELECT check_multilevel_queries(); + check_multilevel_queries +-------------------------- + +(1 row) + +DROP FUNCTION check_multilevel_queries(); +/* Multilevel partitioning with updates */ +CREATE OR REPLACE FUNCTION subpartitions.partitions_tree( + rel REGCLASS, + level TEXT DEFAULT ' ' +) +RETURNS SETOF TEXT AS +$$ +DECLARE + partition REGCLASS; + subpartition TEXT; +BEGIN + IF rel IS NULL THEN + RETURN; + END IF; + + RETURN NEXT rel::TEXT; + + FOR partition IN (SELECT l.partition FROM pathman_partition_list l WHERE parent = rel) + LOOP + FOR subpartition IN (SELECT subpartitions.partitions_tree(partition, level || ' ')) + LOOP + RETURN NEXT level || subpartition::TEXT; + END LOOP; + END LOOP; +END +$$ LANGUAGE plpgsql; +SELECT append_range_partition('subpartitions.abc', 'subpartitions.abc_4'); + append_range_partition +------------------------ + subpartitions.abc_4 +(1 row) + +SELECT create_hash_partitions('subpartitions.abc_4', 'b', 2); + create_hash_partitions +------------------------ + 2 +(1 row) + +SELECT subpartitions.partitions_tree('subpartitions.abc'); + partitions_tree +-------------------------- + subpartitions.abc + subpartitions.abc_1 + subpartitions.abc_1_0 + subpartitions.abc_1_1 + subpartitions.abc_1_2 + subpartitions.abc_2 + subpartitions.abc_2_0 + subpartitions.abc_2_1 + subpartitions.abc_3 + subpartitions.abc_3_1 + subpartitions.abc_3_2 + subpartitions.abc_4 + subpartitions.abc_4_0 + subpartitions.abc_4_1 +(14 rows) + +DROP TABLE subpartitions.abc CASCADE; +NOTICE: drop cascades to 15 other objects +/* Test that update works correctly */ +SET pg_pathman.enable_partitionrouter = ON; +CREATE TABLE subpartitions.abc(a INTEGER NOT NULL, b INTEGER NOT NULL); +SELECT create_range_partitions('subpartitions.abc', 'a', 0, 100, 2); + create_range_partitions +------------------------- + 2 +(1 row) + +SELECT create_range_partitions('subpartitions.abc_1', 'b', 0, 50, 2); /* 0 - 100 */ + create_range_partitions +------------------------- + 2 +(1 row) + +SELECT create_range_partitions('subpartitions.abc_2', 'b', 0, 50, 2); /* 100 - 200 */ + create_range_partitions +------------------------- + 2 +(1 row) + +INSERT INTO subpartitions.abc SELECT 25, 25 FROM generate_series(1, 10); +SELECT tableoid::regclass, * FROM subpartitions.abc; /* subpartitions.abc_1_1 */ + tableoid | a | b +-----------------------+----+---- + subpartitions.abc_1_1 | 25 | 25 + subpartitions.abc_1_1 | 25 | 25 + subpartitions.abc_1_1 | 25 | 25 + subpartitions.abc_1_1 | 25 | 25 + subpartitions.abc_1_1 | 25 | 25 + subpartitions.abc_1_1 | 25 | 25 + subpartitions.abc_1_1 | 25 | 25 + subpartitions.abc_1_1 | 25 | 25 + subpartitions.abc_1_1 | 25 | 25 + subpartitions.abc_1_1 | 25 | 25 +(10 rows) + +UPDATE subpartitions.abc SET a = 125 WHERE a = 25 and b = 25; +SELECT tableoid::regclass, * FROM subpartitions.abc; /* subpartitions.abc_2_1 */ + tableoid | a | b +-----------------------+-----+---- + subpartitions.abc_2_1 | 125 | 25 + subpartitions.abc_2_1 | 125 | 25 + subpartitions.abc_2_1 | 125 | 25 + subpartitions.abc_2_1 | 125 | 25 + subpartitions.abc_2_1 | 125 | 25 + subpartitions.abc_2_1 | 125 | 25 + subpartitions.abc_2_1 | 125 | 25 + subpartitions.abc_2_1 | 125 | 25 + subpartitions.abc_2_1 | 125 | 25 + subpartitions.abc_2_1 | 125 | 25 +(10 rows) + +UPDATE subpartitions.abc SET b = 75 WHERE a = 125 and b = 25; +SELECT tableoid::regclass, * FROM subpartitions.abc; /* subpartitions.abc_2_2 */ + tableoid | a | b +-----------------------+-----+---- + subpartitions.abc_2_2 | 125 | 75 + subpartitions.abc_2_2 | 125 | 75 + subpartitions.abc_2_2 | 125 | 75 + subpartitions.abc_2_2 | 125 | 75 + subpartitions.abc_2_2 | 125 | 75 + subpartitions.abc_2_2 | 125 | 75 + subpartitions.abc_2_2 | 125 | 75 + subpartitions.abc_2_2 | 125 | 75 + subpartitions.abc_2_2 | 125 | 75 + subpartitions.abc_2_2 | 125 | 75 +(10 rows) + +UPDATE subpartitions.abc SET b = 125 WHERE a = 125 and b = 75; +SELECT tableoid::regclass, * FROM subpartitions.abc; /* subpartitions.abc_2_3 */ + tableoid | a | b +-----------------------+-----+----- + subpartitions.abc_2_3 | 125 | 125 + subpartitions.abc_2_3 | 125 | 125 + subpartitions.abc_2_3 | 125 | 125 + subpartitions.abc_2_3 | 125 | 125 + subpartitions.abc_2_3 | 125 | 125 + subpartitions.abc_2_3 | 125 | 125 + subpartitions.abc_2_3 | 125 | 125 + subpartitions.abc_2_3 | 125 | 125 + subpartitions.abc_2_3 | 125 | 125 + subpartitions.abc_2_3 | 125 | 125 +(10 rows) + +/* split_range_partition */ +SELECT split_range_partition('subpartitions.abc_2', 150); /* FAIL */ +ERROR: cannot split partition that has children +SELECT split_range_partition('subpartitions.abc_2_2', 75); /* OK */ + split_range_partition +----------------------- + subpartitions.abc_2_4 +(1 row) + +SELECT subpartitions.partitions_tree('subpartitions.abc'); + partitions_tree +-------------------------- + subpartitions.abc + subpartitions.abc_1 + subpartitions.abc_1_1 + subpartitions.abc_1_2 + subpartitions.abc_2 + subpartitions.abc_2_1 + subpartitions.abc_2_2 + subpartitions.abc_2_4 + subpartitions.abc_2_3 +(9 rows) + +/* merge_range_partitions */ +TRUNCATE subpartitions.abc; +INSERT INTO subpartitions.abc VALUES (150, 0); +SELECT append_range_partition('subpartitions.abc', 'subpartitions.abc_3'); /* 200 - 300 */ + append_range_partition +------------------------ + subpartitions.abc_3 +(1 row) + +INSERT INTO subpartitions.abc VALUES (250, 50); +SELECT merge_range_partitions('subpartitions.abc_2', 'subpartitions.abc_3'); /* OK */ + merge_range_partitions +------------------------ + subpartitions.abc_2 +(1 row) + +SELECT tableoid::regclass, * FROM subpartitions.abc ORDER BY a, b; + tableoid | a | b +-----------------------+-----+---- + subpartitions.abc_2_1 | 150 | 0 + subpartitions.abc_2_2 | 250 | 50 +(2 rows) + +SELECT merge_range_partitions('subpartitions.abc_2_1', 'subpartitions.abc_2_2'); /* OK */ + merge_range_partitions +------------------------ + subpartitions.abc_2_1 +(1 row) + +SELECT tableoid::regclass, * FROM subpartitions.abc ORDER BY a, b; + tableoid | a | b +-----------------------+-----+---- + subpartitions.abc_2_1 | 150 | 0 + subpartitions.abc_2_1 | 250 | 50 +(2 rows) + +DROP TABLE subpartitions.abc CASCADE; +NOTICE: drop cascades to 10 other objects +/* Check insert & update with dropped columns */ +CREATE TABLE subpartitions.abc(a int, b int, c int, id1 int not null, id2 int not null, val serial); +SELECT create_range_partitions('subpartitions.abc', 'id1', 0, 100, 2); + create_range_partitions +------------------------- + 2 +(1 row) + +ALTER TABLE subpartitions.abc DROP COLUMN c; +SELECT prepend_range_partition('subpartitions.abc'); + prepend_range_partition +------------------------- + subpartitions.abc_3 +(1 row) + +ALTER TABLE subpartitions.abc DROP COLUMN b; +SELECT create_range_partitions('subpartitions.abc_3', 'id2', 0, 10, 3); + create_range_partitions +------------------------- + 3 +(1 row) + +ALTER TABLE subpartitions.abc DROP COLUMN a; +SELECT prepend_range_partition('subpartitions.abc_3'); + prepend_range_partition +------------------------- + subpartitions.abc_3_4 +(1 row) + +SELECT * FROM pathman_partition_list ORDER BY parent, partition; + parent | partition | parttype | expr | range_min | range_max +---------------------+-----------------------+----------+------+-----------+----------- + subpartitions.abc | subpartitions.abc_1 | 2 | id1 | 0 | 100 + subpartitions.abc | subpartitions.abc_2 | 2 | id1 | 100 | 200 + subpartitions.abc | subpartitions.abc_3 | 2 | id1 | -100 | 0 + subpartitions.abc_3 | subpartitions.abc_3_1 | 2 | id2 | 0 | 10 + subpartitions.abc_3 | subpartitions.abc_3_2 | 2 | id2 | 10 | 20 + subpartitions.abc_3 | subpartitions.abc_3_3 | 2 | id2 | 20 | 30 + subpartitions.abc_3 | subpartitions.abc_3_4 | 2 | id2 | -10 | 0 +(7 rows) + +INSERT INTO subpartitions.abc VALUES (10, 0), (110, 0), (-1, 0), (-1, -1); +SELECT tableoid::regclass, * FROM subpartitions.abc ORDER BY id1, id2, val; + tableoid | id1 | id2 | val +-----------------------+-----+-----+----- + subpartitions.abc_3_4 | -1 | -1 | 4 + subpartitions.abc_3_1 | -1 | 0 | 3 + subpartitions.abc_1 | 10 | 0 | 1 + subpartitions.abc_2 | 110 | 0 | 2 +(4 rows) + +SET pg_pathman.enable_partitionrouter = ON; +WITH updated AS (UPDATE subpartitions.abc SET id1 = -1, id2 = -1 RETURNING tableoid::regclass, *) +SELECT * FROM updated ORDER BY val ASC; + tableoid | id1 | id2 | val +-----------------------+-----+-----+----- + subpartitions.abc_3_4 | -1 | -1 | 1 + subpartitions.abc_3_4 | -1 | -1 | 2 + subpartitions.abc_3_4 | -1 | -1 | 3 + subpartitions.abc_3_4 | -1 | -1 | 4 +(4 rows) + +DROP TABLE subpartitions.abc CASCADE; +NOTICE: drop cascades to 9 other objects +--- basic check how rowmark plays along with subparts; PGPRO-2755 +CREATE TABLE subpartitions.a1(n1 integer); +CREATE TABLE subpartitions.a2(n1 integer not null, n2 integer not null); +SELECT create_range_partitions('subpartitions.a2', 'n1', 1, 2, 0); + create_range_partitions +------------------------- + 0 +(1 row) + +SELECT add_range_partition('subpartitions.a2', 10, 20, 'subpartitions.a2_1020'); + add_range_partition +----------------------- + subpartitions.a2_1020 +(1 row) + +SELECT create_range_partitions('subpartitions.a2_1020'::regclass, 'n2'::text, array[30,40], array['subpartitions.a2_1020_3040']); + create_range_partitions +------------------------- + 1 +(1 row) + +INSERT INTO subpartitions.a2 VALUES (10, 30), (11, 31), (12, 32), (19, 39); +INSERT INTO subpartitions.a1 VALUES (12), (19), (20); +SELECT a2.* FROM subpartitions.a1 JOIN subpartitions.a2 ON a2.n1=a1.n1 FOR UPDATE; + n1 | n2 +----+---- + 12 | 32 + 19 | 39 +(2 rows) + +DROP TABLE subpartitions.a2 CASCADE; +NOTICE: drop cascades to 4 other objects +DROP TABLE subpartitions.a1; +DROP FUNCTION subpartitions.partitions_tree(regclass, text); +DROP SCHEMA subpartitions; +DROP EXTENSION pg_pathman; diff --git a/expected/pathman_subpartitions_2.out b/expected/pathman_subpartitions_2.out new file mode 100644 index 00000000..26eae913 --- /dev/null +++ b/expected/pathman_subpartitions_2.out @@ -0,0 +1,461 @@ +/* + * Since 8edd0e794 (>= 12) Append nodes with single subplan are eliminated, + * causing different output; pathman_subpartitions_1.out is the updated version. + */ +\set VERBOSITY terse +CREATE EXTENSION pg_pathman; +CREATE SCHEMA subpartitions; +/* Create two level partitioning structure */ +CREATE TABLE subpartitions.abc(a INTEGER NOT NULL, b INTEGER NOT NULL); +INSERT INTO subpartitions.abc SELECT i, i FROM generate_series(1, 200, 20) as i; +SELECT create_range_partitions('subpartitions.abc', 'a', 0, 100, 2); + create_range_partitions +------------------------- + 2 +(1 row) + +SELECT create_hash_partitions('subpartitions.abc_1', 'a', 3); + create_hash_partitions +------------------------ + 3 +(1 row) + +SELECT create_hash_partitions('subpartitions.abc_2', 'b', 2); + create_hash_partitions +------------------------ + 2 +(1 row) + +SELECT * FROM pathman_partition_list; + parent | partition | parttype | expr | range_min | range_max +---------------------+-----------------------+----------+------+-----------+----------- + subpartitions.abc | subpartitions.abc_1 | 2 | a | 0 | 100 + subpartitions.abc | subpartitions.abc_2 | 2 | a | 100 | 200 + subpartitions.abc_1 | subpartitions.abc_1_0 | 1 | a | | + subpartitions.abc_1 | subpartitions.abc_1_1 | 1 | a | | + subpartitions.abc_1 | subpartitions.abc_1_2 | 1 | a | | + subpartitions.abc_2 | subpartitions.abc_2_0 | 1 | b | | + subpartitions.abc_2 | subpartitions.abc_2_1 | 1 | b | | +(7 rows) + +SELECT tableoid::regclass, * FROM subpartitions.abc ORDER BY a, b; + tableoid | a | b +-----------------------+-----+----- + subpartitions.abc_1_2 | 1 | 1 + subpartitions.abc_1_0 | 21 | 21 + subpartitions.abc_1_1 | 41 | 41 + subpartitions.abc_1_0 | 61 | 61 + subpartitions.abc_1_2 | 81 | 81 + subpartitions.abc_2_0 | 101 | 101 + subpartitions.abc_2_1 | 121 | 121 + subpartitions.abc_2_0 | 141 | 141 + subpartitions.abc_2_1 | 161 | 161 + subpartitions.abc_2_1 | 181 | 181 +(10 rows) + +/* Insert should result in creation of new subpartition */ +SELECT append_range_partition('subpartitions.abc', 'subpartitions.abc_3'); + append_range_partition +------------------------ + subpartitions.abc_3 +(1 row) + +SELECT create_range_partitions('subpartitions.abc_3', 'b', 200, 10, 2); + create_range_partitions +------------------------- + 2 +(1 row) + +SELECT * FROM pathman_partition_list WHERE parent = 'subpartitions.abc_3'::regclass; + parent | partition | parttype | expr | range_min | range_max +---------------------+-----------------------+----------+------+-----------+----------- + subpartitions.abc_3 | subpartitions.abc_3_1 | 2 | b | 200 | 210 + subpartitions.abc_3 | subpartitions.abc_3_2 | 2 | b | 210 | 220 +(2 rows) + +INSERT INTO subpartitions.abc VALUES (215, 215); +SELECT * FROM pathman_partition_list WHERE parent = 'subpartitions.abc_3'::regclass; + parent | partition | parttype | expr | range_min | range_max +---------------------+-----------------------+----------+------+-----------+----------- + subpartitions.abc_3 | subpartitions.abc_3_1 | 2 | b | 200 | 210 + subpartitions.abc_3 | subpartitions.abc_3_2 | 2 | b | 210 | 220 +(2 rows) + +SELECT tableoid::regclass, * FROM subpartitions.abc WHERE a = 215 AND b = 215 ORDER BY a, b; + tableoid | a | b +-----------------------+-----+----- + subpartitions.abc_3_2 | 215 | 215 +(1 row) + +/* Pruning tests */ +EXPLAIN (COSTS OFF) SELECT * FROM subpartitions.abc WHERE a < 150; + QUERY PLAN +--------------------------------------- + Append + -> Append + -> Seq Scan on abc_1_0 abc_2 + -> Seq Scan on abc_1_1 abc_3 + -> Seq Scan on abc_1_2 abc_4 + -> Append + -> Seq Scan on abc_2_0 abc_6 + Filter: (a < 150) + -> Seq Scan on abc_2_1 abc_7 + Filter: (a < 150) +(10 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM subpartitions.abc WHERE b = 215; + QUERY PLAN +--------------------------------------- + Append + -> Append + -> Seq Scan on abc_1_0 abc_2 + Filter: (b = 215) + -> Seq Scan on abc_1_1 abc_3 + Filter: (b = 215) + -> Seq Scan on abc_1_2 abc_4 + Filter: (b = 215) + -> Seq Scan on abc_2_1 abc_5 + Filter: (b = 215) + -> Seq Scan on abc_3_2 abc_6 + Filter: (b = 215) +(12 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM subpartitions.abc WHERE a = 215 AND b = 215; + QUERY PLAN +------------------------------------- + Seq Scan on abc_3_2 abc + Filter: ((a = 215) AND (b = 215)) +(2 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM subpartitions.abc WHERE a >= 210 AND b >= 210; + QUERY PLAN +------------------------- + Seq Scan on abc_3_2 abc + Filter: (a >= 210) +(2 rows) + +CREATE OR REPLACE FUNCTION check_multilevel_queries() +RETURNS VOID AS +$$ +BEGIN + IF NOT EXISTS(SELECT * FROM (SELECT tableoid::regclass, * + FROM subpartitions.abc + WHERE a = 215 AND b = 215 + ORDER BY a, b) t1) + THEN + RAISE EXCEPTION 'should be at least one record in result'; + END IF; +END +$$ LANGUAGE plpgsql; +SELECT check_multilevel_queries(); + check_multilevel_queries +-------------------------- + +(1 row) + +DROP FUNCTION check_multilevel_queries(); +/* Multilevel partitioning with updates */ +CREATE OR REPLACE FUNCTION subpartitions.partitions_tree( + rel REGCLASS, + level TEXT DEFAULT ' ' +) +RETURNS SETOF TEXT AS +$$ +DECLARE + partition REGCLASS; + subpartition TEXT; +BEGIN + IF rel IS NULL THEN + RETURN; + END IF; + + RETURN NEXT rel::TEXT; + + FOR partition IN (SELECT l.partition FROM pathman_partition_list l WHERE parent = rel) + LOOP + FOR subpartition IN (SELECT subpartitions.partitions_tree(partition, level || ' ')) + LOOP + RETURN NEXT level || subpartition::TEXT; + END LOOP; + END LOOP; +END +$$ LANGUAGE plpgsql; +SELECT append_range_partition('subpartitions.abc', 'subpartitions.abc_4'); + append_range_partition +------------------------ + subpartitions.abc_4 +(1 row) + +SELECT create_hash_partitions('subpartitions.abc_4', 'b', 2); + create_hash_partitions +------------------------ + 2 +(1 row) + +SELECT subpartitions.partitions_tree('subpartitions.abc'); + partitions_tree +-------------------------- + subpartitions.abc + subpartitions.abc_1 + subpartitions.abc_1_0 + subpartitions.abc_1_1 + subpartitions.abc_1_2 + subpartitions.abc_2 + subpartitions.abc_2_0 + subpartitions.abc_2_1 + subpartitions.abc_3 + subpartitions.abc_3_1 + subpartitions.abc_3_2 + subpartitions.abc_4 + subpartitions.abc_4_0 + subpartitions.abc_4_1 +(14 rows) + +DROP TABLE subpartitions.abc CASCADE; +NOTICE: drop cascades to 15 other objects +/* Test that update works correctly */ +SET pg_pathman.enable_partitionrouter = ON; +CREATE TABLE subpartitions.abc(a INTEGER NOT NULL, b INTEGER NOT NULL); +SELECT create_range_partitions('subpartitions.abc', 'a', 0, 100, 2); + create_range_partitions +------------------------- + 2 +(1 row) + +SELECT create_range_partitions('subpartitions.abc_1', 'b', 0, 50, 2); /* 0 - 100 */ + create_range_partitions +------------------------- + 2 +(1 row) + +SELECT create_range_partitions('subpartitions.abc_2', 'b', 0, 50, 2); /* 100 - 200 */ + create_range_partitions +------------------------- + 2 +(1 row) + +INSERT INTO subpartitions.abc SELECT 25, 25 FROM generate_series(1, 10); +SELECT tableoid::regclass, * FROM subpartitions.abc; /* subpartitions.abc_1_1 */ + tableoid | a | b +-----------------------+----+---- + subpartitions.abc_1_1 | 25 | 25 + subpartitions.abc_1_1 | 25 | 25 + subpartitions.abc_1_1 | 25 | 25 + subpartitions.abc_1_1 | 25 | 25 + subpartitions.abc_1_1 | 25 | 25 + subpartitions.abc_1_1 | 25 | 25 + subpartitions.abc_1_1 | 25 | 25 + subpartitions.abc_1_1 | 25 | 25 + subpartitions.abc_1_1 | 25 | 25 + subpartitions.abc_1_1 | 25 | 25 +(10 rows) + +UPDATE subpartitions.abc SET a = 125 WHERE a = 25 and b = 25; +SELECT tableoid::regclass, * FROM subpartitions.abc; /* subpartitions.abc_2_1 */ + tableoid | a | b +-----------------------+-----+---- + subpartitions.abc_2_1 | 125 | 25 + subpartitions.abc_2_1 | 125 | 25 + subpartitions.abc_2_1 | 125 | 25 + subpartitions.abc_2_1 | 125 | 25 + subpartitions.abc_2_1 | 125 | 25 + subpartitions.abc_2_1 | 125 | 25 + subpartitions.abc_2_1 | 125 | 25 + subpartitions.abc_2_1 | 125 | 25 + subpartitions.abc_2_1 | 125 | 25 + subpartitions.abc_2_1 | 125 | 25 +(10 rows) + +UPDATE subpartitions.abc SET b = 75 WHERE a = 125 and b = 25; +SELECT tableoid::regclass, * FROM subpartitions.abc; /* subpartitions.abc_2_2 */ + tableoid | a | b +-----------------------+-----+---- + subpartitions.abc_2_2 | 125 | 75 + subpartitions.abc_2_2 | 125 | 75 + subpartitions.abc_2_2 | 125 | 75 + subpartitions.abc_2_2 | 125 | 75 + subpartitions.abc_2_2 | 125 | 75 + subpartitions.abc_2_2 | 125 | 75 + subpartitions.abc_2_2 | 125 | 75 + subpartitions.abc_2_2 | 125 | 75 + subpartitions.abc_2_2 | 125 | 75 + subpartitions.abc_2_2 | 125 | 75 +(10 rows) + +UPDATE subpartitions.abc SET b = 125 WHERE a = 125 and b = 75; +SELECT tableoid::regclass, * FROM subpartitions.abc; /* subpartitions.abc_2_3 */ + tableoid | a | b +-----------------------+-----+----- + subpartitions.abc_2_3 | 125 | 125 + subpartitions.abc_2_3 | 125 | 125 + subpartitions.abc_2_3 | 125 | 125 + subpartitions.abc_2_3 | 125 | 125 + subpartitions.abc_2_3 | 125 | 125 + subpartitions.abc_2_3 | 125 | 125 + subpartitions.abc_2_3 | 125 | 125 + subpartitions.abc_2_3 | 125 | 125 + subpartitions.abc_2_3 | 125 | 125 + subpartitions.abc_2_3 | 125 | 125 +(10 rows) + +/* split_range_partition */ +SELECT split_range_partition('subpartitions.abc_2', 150); /* FAIL */ +ERROR: cannot split partition that has children +SELECT split_range_partition('subpartitions.abc_2_2', 75); /* OK */ + split_range_partition +----------------------- + subpartitions.abc_2_4 +(1 row) + +SELECT subpartitions.partitions_tree('subpartitions.abc'); + partitions_tree +-------------------------- + subpartitions.abc + subpartitions.abc_1 + subpartitions.abc_1_1 + subpartitions.abc_1_2 + subpartitions.abc_2 + subpartitions.abc_2_1 + subpartitions.abc_2_2 + subpartitions.abc_2_4 + subpartitions.abc_2_3 +(9 rows) + +/* merge_range_partitions */ +TRUNCATE subpartitions.abc; +INSERT INTO subpartitions.abc VALUES (150, 0); +SELECT append_range_partition('subpartitions.abc', 'subpartitions.abc_3'); /* 200 - 300 */ + append_range_partition +------------------------ + subpartitions.abc_3 +(1 row) + +INSERT INTO subpartitions.abc VALUES (250, 50); +SELECT merge_range_partitions('subpartitions.abc_2', 'subpartitions.abc_3'); /* OK */ + merge_range_partitions +------------------------ + subpartitions.abc_2 +(1 row) + +SELECT tableoid::regclass, * FROM subpartitions.abc ORDER BY a, b; + tableoid | a | b +-----------------------+-----+---- + subpartitions.abc_2_1 | 150 | 0 + subpartitions.abc_2_2 | 250 | 50 +(2 rows) + +SELECT merge_range_partitions('subpartitions.abc_2_1', 'subpartitions.abc_2_2'); /* OK */ + merge_range_partitions +------------------------ + subpartitions.abc_2_1 +(1 row) + +SELECT tableoid::regclass, * FROM subpartitions.abc ORDER BY a, b; + tableoid | a | b +-----------------------+-----+---- + subpartitions.abc_2_1 | 150 | 0 + subpartitions.abc_2_1 | 250 | 50 +(2 rows) + +DROP TABLE subpartitions.abc CASCADE; +NOTICE: drop cascades to 10 other objects +/* Check insert & update with dropped columns */ +CREATE TABLE subpartitions.abc(a int, b int, c int, id1 int not null, id2 int not null, val serial); +SELECT create_range_partitions('subpartitions.abc', 'id1', 0, 100, 2); + create_range_partitions +------------------------- + 2 +(1 row) + +ALTER TABLE subpartitions.abc DROP COLUMN c; +SELECT prepend_range_partition('subpartitions.abc'); + prepend_range_partition +------------------------- + subpartitions.abc_3 +(1 row) + +ALTER TABLE subpartitions.abc DROP COLUMN b; +SELECT create_range_partitions('subpartitions.abc_3', 'id2', 0, 10, 3); + create_range_partitions +------------------------- + 3 +(1 row) + +ALTER TABLE subpartitions.abc DROP COLUMN a; +SELECT prepend_range_partition('subpartitions.abc_3'); + prepend_range_partition +------------------------- + subpartitions.abc_3_4 +(1 row) + +SELECT * FROM pathman_partition_list ORDER BY parent, partition; + parent | partition | parttype | expr | range_min | range_max +---------------------+-----------------------+----------+------+-----------+----------- + subpartitions.abc | subpartitions.abc_1 | 2 | id1 | 0 | 100 + subpartitions.abc | subpartitions.abc_2 | 2 | id1 | 100 | 200 + subpartitions.abc | subpartitions.abc_3 | 2 | id1 | -100 | 0 + subpartitions.abc_3 | subpartitions.abc_3_1 | 2 | id2 | 0 | 10 + subpartitions.abc_3 | subpartitions.abc_3_2 | 2 | id2 | 10 | 20 + subpartitions.abc_3 | subpartitions.abc_3_3 | 2 | id2 | 20 | 30 + subpartitions.abc_3 | subpartitions.abc_3_4 | 2 | id2 | -10 | 0 +(7 rows) + +INSERT INTO subpartitions.abc VALUES (10, 0), (110, 0), (-1, 0), (-1, -1); +SELECT tableoid::regclass, * FROM subpartitions.abc ORDER BY id1, id2, val; + tableoid | id1 | id2 | val +-----------------------+-----+-----+----- + subpartitions.abc_3_4 | -1 | -1 | 4 + subpartitions.abc_3_1 | -1 | 0 | 3 + subpartitions.abc_1 | 10 | 0 | 1 + subpartitions.abc_2 | 110 | 0 | 2 +(4 rows) + +SET pg_pathman.enable_partitionrouter = ON; +WITH updated AS (UPDATE subpartitions.abc SET id1 = -1, id2 = -1 RETURNING tableoid::regclass, *) +SELECT * FROM updated ORDER BY val ASC; + tableoid | id1 | id2 | val +-----------------------+-----+-----+----- + subpartitions.abc_3_4 | -1 | -1 | 1 + subpartitions.abc_3_4 | -1 | -1 | 2 + subpartitions.abc_3_4 | -1 | -1 | 3 + subpartitions.abc_3_4 | -1 | -1 | 4 +(4 rows) + +DROP TABLE subpartitions.abc CASCADE; +NOTICE: drop cascades to 9 other objects +--- basic check how rowmark plays along with subparts; PGPRO-2755 +CREATE TABLE subpartitions.a1(n1 integer); +CREATE TABLE subpartitions.a2(n1 integer not null, n2 integer not null); +SELECT create_range_partitions('subpartitions.a2', 'n1', 1, 2, 0); + create_range_partitions +------------------------- + 0 +(1 row) + +SELECT add_range_partition('subpartitions.a2', 10, 20, 'subpartitions.a2_1020'); + add_range_partition +----------------------- + subpartitions.a2_1020 +(1 row) + +SELECT create_range_partitions('subpartitions.a2_1020'::regclass, 'n2'::text, array[30,40], array['subpartitions.a2_1020_3040']); + create_range_partitions +------------------------- + 1 +(1 row) + +INSERT INTO subpartitions.a2 VALUES (10, 30), (11, 31), (12, 32), (19, 39); +INSERT INTO subpartitions.a1 VALUES (12), (19), (20); +SELECT a2.* FROM subpartitions.a1 JOIN subpartitions.a2 ON a2.n1=a1.n1 FOR UPDATE; + n1 | n2 +----+---- + 12 | 32 + 19 | 39 +(2 rows) + +DROP TABLE subpartitions.a2 CASCADE; +NOTICE: drop cascades to 4 other objects +DROP TABLE subpartitions.a1; +DROP FUNCTION subpartitions.partitions_tree(regclass, text); +DROP SCHEMA subpartitions; +DROP EXTENSION pg_pathman; diff --git a/expected/pathman_upd_del.out b/expected/pathman_upd_del.out new file mode 100644 index 00000000..752cff27 --- /dev/null +++ b/expected/pathman_upd_del.out @@ -0,0 +1,473 @@ +/* + * ------------------------------------------- + * NOTE: This test behaves differenly on 9.5 + * ------------------------------------------- + * + * Also since 8edd0e794 (>= 12) Append nodes with single subplan are eliminated, + * causing different output. Moreover, again since 12 (608b167f9f), CTEs which are + * scanned once are no longer an optimization fence, changing a good deal of + * plans here. There is an option to forcibly make them MATERIALIZED, but we + * also need to run tests on older versions, so put updated plans in + * pathman_upd_del_2.out instead. + * + * In Postgres Pro Standard/Enterprise 15+ the EXPLAIN output has changed so put + * the updated plan in pathman_upd_del_4.out. + */ +\set VERBOSITY terse +SET search_path = 'public'; +CREATE SCHEMA pathman; +CREATE EXTENSION pg_pathman SCHEMA pathman; +CREATE SCHEMA test; +SET enable_indexscan = ON; +SET enable_seqscan = OFF; +/* Temporary tables for JOINs */ +CREATE TABLE test.tmp (id INTEGER NOT NULL, value INTEGER NOT NULL); +INSERT INTO test.tmp VALUES (1, 1), (2, 2); +CREATE TABLE test.tmp2 (id INTEGER NOT NULL, value INTEGER NOT NULL); +INSERT INTO test.tmp2 SELECT i % 10 + 1, i FROM generate_series(1, 100) i; +SELECT pathman.create_range_partitions('test.tmp2', 'id', 1, 1, 10); + create_range_partitions +------------------------- + 10 +(1 row) + +/* Partition table by RANGE */ +CREATE TABLE test.range_rel ( + id SERIAL PRIMARY KEY, + dt TIMESTAMP NOT NULL, + value INTEGER); +INSERT INTO test.range_rel (dt, value) SELECT g, extract(day from g) +FROM generate_series('2010-01-01'::date, '2010-12-31'::date, '1 day') AS g; +SELECT pathman.create_range_partitions('test.range_rel', 'dt', + '2010-01-01'::date, '1 month'::interval, + 12); + create_range_partitions +------------------------- + 12 +(1 row) + +VACUUM ANALYZE; +/* + * Test UPDATE and DELETE + */ +/* have partitions for this 'dt' */ +EXPLAIN (COSTS OFF) UPDATE test.range_rel SET value = 111 WHERE dt = '2010-06-15'; + QUERY PLAN +-------------------------------------------------------------------------------- + Update on range_rel_6 + -> Seq Scan on range_rel_6 + Filter: (dt = 'Tue Jun 15 00:00:00 2010'::timestamp without time zone) +(3 rows) + +BEGIN; +UPDATE test.range_rel SET value = 111 WHERE dt = '2010-06-15'; +SELECT * FROM test.range_rel WHERE dt = '2010-06-15'; + id | dt | value +-----+--------------------------+------- + 166 | Tue Jun 15 00:00:00 2010 | 111 +(1 row) + +ROLLBACK; +/* have partitions for this 'dt' */ +EXPLAIN (COSTS OFF) DELETE FROM test.range_rel WHERE dt = '2010-06-15'; + QUERY PLAN +-------------------------------------------------------------------------------- + Delete on range_rel_6 + -> Seq Scan on range_rel_6 + Filter: (dt = 'Tue Jun 15 00:00:00 2010'::timestamp without time zone) +(3 rows) + +BEGIN; +DELETE FROM test.range_rel WHERE dt = '2010-06-15'; +SELECT * FROM test.range_rel WHERE dt = '2010-06-15'; + id | dt | value +----+----+------- +(0 rows) + +ROLLBACK; +/* no partitions for this 'dt' */ +EXPLAIN (COSTS OFF) UPDATE test.range_rel SET value = 222 WHERE dt = '1990-01-01'; + QUERY PLAN +-------------------------------------------------------------------------------- + Update on range_rel + -> Seq Scan on range_rel + Filter: (dt = 'Mon Jan 01 00:00:00 1990'::timestamp without time zone) +(3 rows) + +BEGIN; +UPDATE test.range_rel SET value = 111 WHERE dt = '1990-01-01'; +SELECT * FROM test.range_rel WHERE dt = '1990-01-01'; + id | dt | value +----+----+------- +(0 rows) + +ROLLBACK; +/* no partitions for this 'dt' */ +EXPLAIN (COSTS OFF) DELETE FROM test.range_rel WHERE dt < '1990-01-01'; + QUERY PLAN +-------------------------------------------------------------------------------- + Delete on range_rel + -> Seq Scan on range_rel + Filter: (dt < 'Mon Jan 01 00:00:00 1990'::timestamp without time zone) +(3 rows) + +BEGIN; +DELETE FROM test.range_rel WHERE dt < '1990-01-01'; +SELECT * FROM test.range_rel WHERE dt < '1990-01-01'; + id | dt | value +----+----+------- +(0 rows) + +ROLLBACK; +/* UPDATE + FROM, partitioned table */ +EXPLAIN (COSTS OFF) +UPDATE test.range_rel r SET value = t.value +FROM test.tmp t WHERE r.dt = '2010-01-01' AND r.id = t.id; + QUERY PLAN +-------------------------------------------------------------------------------------- + Update on range_rel_1 r + -> Nested Loop + Join Filter: (r.id = t.id) + -> Index Scan using range_rel_1_pkey on range_rel_1 r + Filter: (dt = 'Fri Jan 01 00:00:00 2010'::timestamp without time zone) + -> Seq Scan on tmp t +(6 rows) + +BEGIN; +UPDATE test.range_rel r SET value = t.value +FROM test.tmp t WHERE r.dt = '2010-01-01' AND r.id = t.id; +ROLLBACK; +/* UPDATE + FROM, single table */ +EXPLAIN (COSTS OFF) +UPDATE test.tmp t SET value = r.value +FROM test.range_rel r WHERE r.dt = '2010-01-01' AND r.id = t.id; + QUERY PLAN +-------------------------------------------------------------------------------------------- + Update on tmp t + -> Nested Loop + -> Seq Scan on tmp t + -> Append + -> Index Scan using range_rel_1_pkey on range_rel_1 r + Index Cond: (id = t.id) + Filter: (dt = 'Fri Jan 01 00:00:00 2010'::timestamp without time zone) +(7 rows) + +BEGIN; +UPDATE test.tmp t SET value = r.value +FROM test.range_rel r WHERE r.dt = '2010-01-01' AND r.id = t.id; +ROLLBACK; +/* DELETE + USING, partitioned table */ +EXPLAIN (COSTS OFF) +DELETE FROM test.range_rel r USING test.tmp t +WHERE r.dt = '2010-01-02' AND r.id = t.id; + QUERY PLAN +-------------------------------------------------------------------------------------- + Delete on range_rel_1 r + -> Nested Loop + Join Filter: (r.id = t.id) + -> Index Scan using range_rel_1_pkey on range_rel_1 r + Filter: (dt = 'Sat Jan 02 00:00:00 2010'::timestamp without time zone) + -> Seq Scan on tmp t +(6 rows) + +BEGIN; +DELETE FROM test.range_rel r USING test.tmp t +WHERE r.dt = '2010-01-02' AND r.id = t.id; +ROLLBACK; +/* DELETE + USING, single table */ +EXPLAIN (COSTS OFF) +DELETE FROM test.tmp t USING test.range_rel r +WHERE r.dt = '2010-01-02' AND r.id = t.id; + QUERY PLAN +-------------------------------------------------------------------------------------------- + Delete on tmp t + -> Nested Loop + -> Seq Scan on tmp t + -> Append + -> Index Scan using range_rel_1_pkey on range_rel_1 r + Index Cond: (id = t.id) + Filter: (dt = 'Sat Jan 02 00:00:00 2010'::timestamp without time zone) +(7 rows) + +BEGIN; +DELETE FROM test.tmp t USING test.range_rel r +WHERE r.dt = '2010-01-02' AND r.id = t.id; +ROLLBACK; +/* DELETE + USING, two partitioned tables */ +EXPLAIN (COSTS OFF) +DELETE FROM test.range_rel r USING test.tmp2 t +WHERE t.id = r.id; +ERROR: DELETE and UPDATE queries with a join of partitioned tables are not supported +BEGIN; +DELETE FROM test.range_rel r USING test.tmp2 t +WHERE t.id = r.id; +ERROR: DELETE and UPDATE queries with a join of partitioned tables are not supported +ROLLBACK; +/* DELETE + USING, partitioned table + two partitioned tables in subselect */ +EXPLAIN (COSTS OFF) +DELETE FROM test.range_rel r +USING (SELECT * + FROM test.tmp2 a1 + JOIN test.tmp2 a2 + USING(id)) t +WHERE t.id = r.id; +ERROR: DELETE and UPDATE queries with a join of partitioned tables are not supported +BEGIN; +DELETE FROM test.range_rel r +USING (SELECT * + FROM test.tmp2 a1 + JOIN test.tmp2 a2 + USING(id)) t +WHERE t.id = r.id; +ERROR: DELETE and UPDATE queries with a join of partitioned tables are not supported +ROLLBACK; +/* DELETE + USING, single table + two partitioned tables in subselect */ +EXPLAIN (COSTS OFF) +DELETE FROM test.tmp r +USING (SELECT * + FROM test.tmp2 a1 + JOIN test.tmp2 a2 + USING(id)) t +WHERE t.id = r.id; + QUERY PLAN +------------------------------------------------ + Delete on tmp r + -> Nested Loop + -> Nested Loop + -> Seq Scan on tmp r + -> Custom Scan (RuntimeAppend) + Prune by: (r.id = a1.id) + -> Seq Scan on tmp2_1 a1 + Filter: (r.id = id) + -> Seq Scan on tmp2_2 a1 + Filter: (r.id = id) + -> Seq Scan on tmp2_3 a1 + Filter: (r.id = id) + -> Seq Scan on tmp2_4 a1 + Filter: (r.id = id) + -> Seq Scan on tmp2_5 a1 + Filter: (r.id = id) + -> Seq Scan on tmp2_6 a1 + Filter: (r.id = id) + -> Seq Scan on tmp2_7 a1 + Filter: (r.id = id) + -> Seq Scan on tmp2_8 a1 + Filter: (r.id = id) + -> Seq Scan on tmp2_9 a1 + Filter: (r.id = id) + -> Seq Scan on tmp2_10 a1 + Filter: (r.id = id) + -> Custom Scan (RuntimeAppend) + Prune by: (a1.id = a2.id) + -> Seq Scan on tmp2_1 a2 + Filter: (a1.id = id) + -> Seq Scan on tmp2_2 a2 + Filter: (a1.id = id) + -> Seq Scan on tmp2_3 a2 + Filter: (a1.id = id) + -> Seq Scan on tmp2_4 a2 + Filter: (a1.id = id) + -> Seq Scan on tmp2_5 a2 + Filter: (a1.id = id) + -> Seq Scan on tmp2_6 a2 + Filter: (a1.id = id) + -> Seq Scan on tmp2_7 a2 + Filter: (a1.id = id) + -> Seq Scan on tmp2_8 a2 + Filter: (a1.id = id) + -> Seq Scan on tmp2_9 a2 + Filter: (a1.id = id) + -> Seq Scan on tmp2_10 a2 + Filter: (a1.id = id) +(48 rows) + +BEGIN; +DELETE FROM test.tmp r +USING (SELECT * + FROM test.tmp2 a1 + JOIN test.tmp2 a2 + USING(id)) t +WHERE t.id = r.id; +ROLLBACK; +/* UPDATE + FROM, two partitioned tables */ +EXPLAIN (COSTS OFF) +UPDATE test.range_rel r SET value = 1 FROM test.tmp2 t +WHERE t.id = r.id; +ERROR: DELETE and UPDATE queries with a join of partitioned tables are not supported +BEGIN; +UPDATE test.range_rel r SET value = 1 FROM test.tmp2 t +WHERE t.id = r.id; +ERROR: DELETE and UPDATE queries with a join of partitioned tables are not supported +ROLLBACK; +/* + * UPDATE + subquery with partitioned table (PG 9.5). + * See pathman_rel_pathlist_hook() + RELOPT_OTHER_MEMBER_REL. + */ +EXPLAIN (COSTS OFF) +UPDATE test.tmp t SET value = 2 +WHERE t.id IN (SELECT id + FROM test.tmp2 t2 + WHERE id = t.id); + QUERY PLAN +-------------------------------------------- + Update on tmp t + -> Seq Scan on tmp t + Filter: (SubPlan 1) + SubPlan 1 + -> Custom Scan (RuntimeAppend) + Prune by: (t2.id = t.id) + -> Seq Scan on tmp2_1 t2 + Filter: (id = t.id) + -> Seq Scan on tmp2_2 t2 + Filter: (id = t.id) + -> Seq Scan on tmp2_3 t2 + Filter: (id = t.id) + -> Seq Scan on tmp2_4 t2 + Filter: (id = t.id) + -> Seq Scan on tmp2_5 t2 + Filter: (id = t.id) + -> Seq Scan on tmp2_6 t2 + Filter: (id = t.id) + -> Seq Scan on tmp2_7 t2 + Filter: (id = t.id) + -> Seq Scan on tmp2_8 t2 + Filter: (id = t.id) + -> Seq Scan on tmp2_9 t2 + Filter: (id = t.id) + -> Seq Scan on tmp2_10 t2 + Filter: (id = t.id) +(26 rows) + +/* Test special rule for CTE; SELECT (PostgreSQL 9.5) */ +EXPLAIN (COSTS OFF) +WITH q AS (SELECT * FROM test.range_rel r + WHERE r.dt = '2010-01-02') +DELETE FROM test.tmp USING q; + QUERY PLAN +---------------------------------------------------------------------------------------- + Delete on tmp + CTE q + -> Append + -> Seq Scan on range_rel_1 r + Filter: (dt = 'Sat Jan 02 00:00:00 2010'::timestamp without time zone) + -> Nested Loop + -> Seq Scan on tmp + -> CTE Scan on q +(8 rows) + +BEGIN; +WITH q AS (SELECT * FROM test.range_rel r + WHERE r.dt = '2010-01-02') +DELETE FROM test.tmp USING q; +ROLLBACK; +/* Test special rule for CTE; DELETE (PostgreSQL 9.5) */ +EXPLAIN (COSTS OFF) +WITH q AS (DELETE FROM test.range_rel r + WHERE r.dt = '2010-01-02' + RETURNING *) +DELETE FROM test.tmp USING q; + QUERY PLAN +---------------------------------------------------------------------------------------- + Delete on tmp + CTE q + -> Delete on range_rel_1 r + -> Seq Scan on range_rel_1 r + Filter: (dt = 'Sat Jan 02 00:00:00 2010'::timestamp without time zone) + -> Nested Loop + -> Seq Scan on tmp + -> CTE Scan on q +(8 rows) + +BEGIN; +WITH q AS (DELETE FROM test.range_rel r + WHERE r.dt = '2010-01-02' + RETURNING *) +DELETE FROM test.tmp USING q; +ROLLBACK; +/* Test special rule for CTE; DELETE + USING (PostgreSQL 9.5) */ +EXPLAIN (COSTS OFF) +WITH q AS (DELETE FROM test.tmp t + USING test.range_rel r + WHERE r.dt = '2010-01-02' AND r.id = t.id + RETURNING *) +DELETE FROM test.tmp USING q; + QUERY PLAN +---------------------------------------------------------------------------------------------------- + Delete on tmp + CTE q + -> Delete on tmp t + -> Nested Loop + -> Seq Scan on tmp t + -> Append + -> Index Scan using range_rel_1_pkey on range_rel_1 r + Index Cond: (id = t.id) + Filter: (dt = 'Sat Jan 02 00:00:00 2010'::timestamp without time zone) + -> Nested Loop + -> Seq Scan on tmp + -> CTE Scan on q +(12 rows) + +BEGIN; +WITH q AS (DELETE FROM test.tmp t + USING test.range_rel r + WHERE r.dt = '2010-01-02' AND r.id = t.id + RETURNING *) +DELETE FROM test.tmp USING q; +ROLLBACK; +/* Test special rule for CTE; Nested CTEs (PostgreSQL 9.5) */ +EXPLAIN (COSTS OFF) +WITH q AS (WITH n AS (SELECT id FROM test.tmp2 WHERE id = 2) + DELETE FROM test.tmp t + USING n + WHERE t.id = n.id + RETURNING *) +DELETE FROM test.tmp USING q; + QUERY PLAN +-------------------------------------------- + Delete on tmp + CTE q + -> Delete on tmp t + CTE n + -> Append + -> Seq Scan on tmp2_2 + Filter: (id = 2) + -> Nested Loop + Join Filter: (t.id = n.id) + -> Seq Scan on tmp t + -> CTE Scan on n + -> Nested Loop + -> Seq Scan on tmp + -> CTE Scan on q +(14 rows) + +/* Test special rule for CTE; CTE in quals (PostgreSQL 9.5) */ +EXPLAIN (COSTS OFF) +WITH q AS (SELECT id FROM test.tmp2 + WHERE id < 3) +DELETE FROM test.tmp t WHERE t.id in (SELECT id FROM q); + QUERY PLAN +------------------------------------ + Delete on tmp t + CTE q + -> Append + -> Seq Scan on tmp2_1 + -> Seq Scan on tmp2_2 + -> Nested Loop Semi Join + Join Filter: (t.id = q.id) + -> Seq Scan on tmp t + -> CTE Scan on q +(9 rows) + +BEGIN; +WITH q AS (SELECT id FROM test.tmp2 + WHERE id < 3) +DELETE FROM test.tmp t WHERE t.id in (SELECT id FROM q); +ROLLBACK; +DROP TABLE test.tmp CASCADE; +DROP TABLE test.tmp2 CASCADE; +NOTICE: drop cascades to 11 other objects +DROP TABLE test.range_rel CASCADE; +NOTICE: drop cascades to 13 other objects +DROP SCHEMA test; +DROP EXTENSION pg_pathman CASCADE; +DROP SCHEMA pathman; diff --git a/expected/pathman_upd_del_1.out b/expected/pathman_upd_del_1.out new file mode 100644 index 00000000..6e0f312d --- /dev/null +++ b/expected/pathman_upd_del_1.out @@ -0,0 +1,473 @@ +/* + * ------------------------------------------- + * NOTE: This test behaves differenly on 9.5 + * ------------------------------------------- + * + * Also since 8edd0e794 (>= 12) Append nodes with single subplan are eliminated, + * causing different output. Moreover, again since 12 (608b167f9f), CTEs which are + * scanned once are no longer an optimization fence, changing a good deal of + * plans here. There is an option to forcibly make them MATERIALIZED, but we + * also need to run tests on older versions, so put updated plans in + * pathman_upd_del_2.out instead. + * + * In Postgres Pro Standard/Enterprise 15+ the EXPLAIN output has changed so put + * the updated plan in pathman_upd_del_4.out. + */ +\set VERBOSITY terse +SET search_path = 'public'; +CREATE SCHEMA pathman; +CREATE EXTENSION pg_pathman SCHEMA pathman; +CREATE SCHEMA test; +SET enable_indexscan = ON; +SET enable_seqscan = OFF; +/* Temporary tables for JOINs */ +CREATE TABLE test.tmp (id INTEGER NOT NULL, value INTEGER NOT NULL); +INSERT INTO test.tmp VALUES (1, 1), (2, 2); +CREATE TABLE test.tmp2 (id INTEGER NOT NULL, value INTEGER NOT NULL); +INSERT INTO test.tmp2 SELECT i % 10 + 1, i FROM generate_series(1, 100) i; +SELECT pathman.create_range_partitions('test.tmp2', 'id', 1, 1, 10); + create_range_partitions +------------------------- + 10 +(1 row) + +/* Partition table by RANGE */ +CREATE TABLE test.range_rel ( + id SERIAL PRIMARY KEY, + dt TIMESTAMP NOT NULL, + value INTEGER); +INSERT INTO test.range_rel (dt, value) SELECT g, extract(day from g) +FROM generate_series('2010-01-01'::date, '2010-12-31'::date, '1 day') AS g; +SELECT pathman.create_range_partitions('test.range_rel', 'dt', + '2010-01-01'::date, '1 month'::interval, + 12); + create_range_partitions +------------------------- + 12 +(1 row) + +VACUUM ANALYZE; +/* + * Test UPDATE and DELETE + */ +/* have partitions for this 'dt' */ +EXPLAIN (COSTS OFF) UPDATE test.range_rel SET value = 111 WHERE dt = '2010-06-15'; + QUERY PLAN +-------------------------------------------------------------------------------- + Update on range_rel_6 + -> Seq Scan on range_rel_6 + Filter: (dt = 'Tue Jun 15 00:00:00 2010'::timestamp without time zone) +(3 rows) + +BEGIN; +UPDATE test.range_rel SET value = 111 WHERE dt = '2010-06-15'; +SELECT * FROM test.range_rel WHERE dt = '2010-06-15'; + id | dt | value +-----+--------------------------+------- + 166 | Tue Jun 15 00:00:00 2010 | 111 +(1 row) + +ROLLBACK; +/* have partitions for this 'dt' */ +EXPLAIN (COSTS OFF) DELETE FROM test.range_rel WHERE dt = '2010-06-15'; + QUERY PLAN +-------------------------------------------------------------------------------- + Delete on range_rel_6 + -> Seq Scan on range_rel_6 + Filter: (dt = 'Tue Jun 15 00:00:00 2010'::timestamp without time zone) +(3 rows) + +BEGIN; +DELETE FROM test.range_rel WHERE dt = '2010-06-15'; +SELECT * FROM test.range_rel WHERE dt = '2010-06-15'; + id | dt | value +----+----+------- +(0 rows) + +ROLLBACK; +/* no partitions for this 'dt' */ +EXPLAIN (COSTS OFF) UPDATE test.range_rel SET value = 222 WHERE dt = '1990-01-01'; + QUERY PLAN +-------------------------------------------------------------------------------- + Update on range_rel + -> Seq Scan on range_rel + Filter: (dt = 'Mon Jan 01 00:00:00 1990'::timestamp without time zone) +(3 rows) + +BEGIN; +UPDATE test.range_rel SET value = 111 WHERE dt = '1990-01-01'; +SELECT * FROM test.range_rel WHERE dt = '1990-01-01'; + id | dt | value +----+----+------- +(0 rows) + +ROLLBACK; +/* no partitions for this 'dt' */ +EXPLAIN (COSTS OFF) DELETE FROM test.range_rel WHERE dt < '1990-01-01'; + QUERY PLAN +-------------------------------------------------------------------------------- + Delete on range_rel + -> Seq Scan on range_rel + Filter: (dt < 'Mon Jan 01 00:00:00 1990'::timestamp without time zone) +(3 rows) + +BEGIN; +DELETE FROM test.range_rel WHERE dt < '1990-01-01'; +SELECT * FROM test.range_rel WHERE dt < '1990-01-01'; + id | dt | value +----+----+------- +(0 rows) + +ROLLBACK; +/* UPDATE + FROM, partitioned table */ +EXPLAIN (COSTS OFF) +UPDATE test.range_rel r SET value = t.value +FROM test.tmp t WHERE r.dt = '2010-01-01' AND r.id = t.id; + QUERY PLAN +-------------------------------------------------------------------------------------- + Update on range_rel_1 r + -> Nested Loop + Join Filter: (r.id = t.id) + -> Index Scan using range_rel_1_pkey on range_rel_1 r + Filter: (dt = 'Fri Jan 01 00:00:00 2010'::timestamp without time zone) + -> Seq Scan on tmp t +(6 rows) + +BEGIN; +UPDATE test.range_rel r SET value = t.value +FROM test.tmp t WHERE r.dt = '2010-01-01' AND r.id = t.id; +ROLLBACK; +/* UPDATE + FROM, single table */ +EXPLAIN (COSTS OFF) +UPDATE test.tmp t SET value = r.value +FROM test.range_rel r WHERE r.dt = '2010-01-01' AND r.id = t.id; + QUERY PLAN +-------------------------------------------------------------------------------------------- + Update on tmp t + -> Merge Join + Merge Cond: (r.id = t.id) + -> Merge Append + Sort Key: r.id + -> Index Scan using range_rel_pkey on range_rel r + Filter: (dt = 'Fri Jan 01 00:00:00 2010'::timestamp without time zone) + -> Index Scan using range_rel_1_pkey on range_rel_1 r_1 + Filter: (dt = 'Fri Jan 01 00:00:00 2010'::timestamp without time zone) + -> Sort + Sort Key: t.id + -> Seq Scan on tmp t +(12 rows) + +BEGIN; +UPDATE test.tmp t SET value = r.value +FROM test.range_rel r WHERE r.dt = '2010-01-01' AND r.id = t.id; +ROLLBACK; +/* DELETE + USING, partitioned table */ +EXPLAIN (COSTS OFF) +DELETE FROM test.range_rel r USING test.tmp t +WHERE r.dt = '2010-01-02' AND r.id = t.id; + QUERY PLAN +-------------------------------------------------------------------------------------- + Delete on range_rel_1 r + -> Nested Loop + Join Filter: (r.id = t.id) + -> Index Scan using range_rel_1_pkey on range_rel_1 r + Filter: (dt = 'Sat Jan 02 00:00:00 2010'::timestamp without time zone) + -> Seq Scan on tmp t +(6 rows) + +BEGIN; +DELETE FROM test.range_rel r USING test.tmp t +WHERE r.dt = '2010-01-02' AND r.id = t.id; +ROLLBACK; +/* DELETE + USING, single table */ +EXPLAIN (COSTS OFF) +DELETE FROM test.tmp t USING test.range_rel r +WHERE r.dt = '2010-01-02' AND r.id = t.id; + QUERY PLAN +-------------------------------------------------------------------------------------------- + Delete on tmp t + -> Merge Join + Merge Cond: (r.id = t.id) + -> Merge Append + Sort Key: r.id + -> Index Scan using range_rel_pkey on range_rel r + Filter: (dt = 'Sat Jan 02 00:00:00 2010'::timestamp without time zone) + -> Index Scan using range_rel_1_pkey on range_rel_1 r_1 + Filter: (dt = 'Sat Jan 02 00:00:00 2010'::timestamp without time zone) + -> Sort + Sort Key: t.id + -> Seq Scan on tmp t +(12 rows) + +BEGIN; +DELETE FROM test.tmp t USING test.range_rel r +WHERE r.dt = '2010-01-02' AND r.id = t.id; +ROLLBACK; +/* DELETE + USING, two partitioned tables */ +EXPLAIN (COSTS OFF) +DELETE FROM test.range_rel r USING test.tmp2 t +WHERE t.id = r.id; +ERROR: DELETE and UPDATE queries with a join of partitioned tables are not supported +BEGIN; +DELETE FROM test.range_rel r USING test.tmp2 t +WHERE t.id = r.id; +ERROR: DELETE and UPDATE queries with a join of partitioned tables are not supported +ROLLBACK; +/* DELETE + USING, partitioned table + two partitioned tables in subselect */ +EXPLAIN (COSTS OFF) +DELETE FROM test.range_rel r +USING (SELECT * + FROM test.tmp2 a1 + JOIN test.tmp2 a2 + USING(id)) t +WHERE t.id = r.id; +ERROR: DELETE and UPDATE queries with a join of partitioned tables are not supported +BEGIN; +DELETE FROM test.range_rel r +USING (SELECT * + FROM test.tmp2 a1 + JOIN test.tmp2 a2 + USING(id)) t +WHERE t.id = r.id; +ERROR: DELETE and UPDATE queries with a join of partitioned tables are not supported +ROLLBACK; +/* DELETE + USING, single table + two partitioned tables in subselect */ +EXPLAIN (COSTS OFF) +DELETE FROM test.tmp r +USING (SELECT * + FROM test.tmp2 a1 + JOIN test.tmp2 a2 + USING(id)) t +WHERE t.id = r.id; + QUERY PLAN +--------------------------------------------------------- + Delete on tmp r + -> Nested Loop + Join Filter: (a1.id = a2.id) + -> Nested Loop + Join Filter: (r.id = a1.id) + -> Seq Scan on tmp r + -> Materialize + -> Append + -> Seq Scan on tmp2 a1 + -> Seq Scan on tmp2_1 a1_1 + -> Seq Scan on tmp2_2 a1_2 + -> Seq Scan on tmp2_3 a1_3 + -> Seq Scan on tmp2_4 a1_4 + -> Seq Scan on tmp2_5 a1_5 + -> Seq Scan on tmp2_6 a1_6 + -> Seq Scan on tmp2_7 a1_7 + -> Seq Scan on tmp2_8 a1_8 + -> Seq Scan on tmp2_9 a1_9 + -> Seq Scan on tmp2_10 a1_10 + -> Materialize + -> Append + -> Seq Scan on tmp2 a2 + -> Seq Scan on tmp2_1 a2_1 + -> Seq Scan on tmp2_2 a2_2 + -> Seq Scan on tmp2_3 a2_3 + -> Seq Scan on tmp2_4 a2_4 + -> Seq Scan on tmp2_5 a2_5 + -> Seq Scan on tmp2_6 a2_6 + -> Seq Scan on tmp2_7 a2_7 + -> Seq Scan on tmp2_8 a2_8 + -> Seq Scan on tmp2_9 a2_9 + -> Seq Scan on tmp2_10 a2_10 +(32 rows) + +BEGIN; +DELETE FROM test.tmp r +USING (SELECT * + FROM test.tmp2 a1 + JOIN test.tmp2 a2 + USING(id)) t +WHERE t.id = r.id; +ROLLBACK; +/* UPDATE + FROM, two partitioned tables */ +EXPLAIN (COSTS OFF) +UPDATE test.range_rel r SET value = 1 FROM test.tmp2 t +WHERE t.id = r.id; +ERROR: DELETE and UPDATE queries with a join of partitioned tables are not supported +BEGIN; +UPDATE test.range_rel r SET value = 1 FROM test.tmp2 t +WHERE t.id = r.id; +ERROR: DELETE and UPDATE queries with a join of partitioned tables are not supported +ROLLBACK; +/* + * UPDATE + subquery with partitioned table (PG 9.5). + * See pathman_rel_pathlist_hook() + RELOPT_OTHER_MEMBER_REL. + */ +EXPLAIN (COSTS OFF) +UPDATE test.tmp t SET value = 2 +WHERE t.id IN (SELECT id + FROM test.tmp2 t2 + WHERE id = t.id); + QUERY PLAN +----------------------------------------------- + Update on tmp t + -> Seq Scan on tmp t + Filter: (SubPlan 1) + SubPlan 1 + -> Append + -> Seq Scan on tmp2 t2 + Filter: (id = t.id) + -> Seq Scan on tmp2_1 t2_1 + Filter: (id = t.id) + -> Seq Scan on tmp2_2 t2_2 + Filter: (id = t.id) + -> Seq Scan on tmp2_3 t2_3 + Filter: (id = t.id) + -> Seq Scan on tmp2_4 t2_4 + Filter: (id = t.id) + -> Seq Scan on tmp2_5 t2_5 + Filter: (id = t.id) + -> Seq Scan on tmp2_6 t2_6 + Filter: (id = t.id) + -> Seq Scan on tmp2_7 t2_7 + Filter: (id = t.id) + -> Seq Scan on tmp2_8 t2_8 + Filter: (id = t.id) + -> Seq Scan on tmp2_9 t2_9 + Filter: (id = t.id) + -> Seq Scan on tmp2_10 t2_10 + Filter: (id = t.id) +(27 rows) + +/* Test special rule for CTE; SELECT (PostgreSQL 9.5) */ +EXPLAIN (COSTS OFF) +WITH q AS (SELECT * FROM test.range_rel r + WHERE r.dt = '2010-01-02') +DELETE FROM test.tmp USING q; + QUERY PLAN +---------------------------------------------------------------------------------------- + Delete on tmp + CTE q + -> Append + -> Seq Scan on range_rel_1 r + Filter: (dt = 'Sat Jan 02 00:00:00 2010'::timestamp without time zone) + -> Nested Loop + -> Seq Scan on tmp + -> CTE Scan on q +(8 rows) + +BEGIN; +WITH q AS (SELECT * FROM test.range_rel r + WHERE r.dt = '2010-01-02') +DELETE FROM test.tmp USING q; +ROLLBACK; +/* Test special rule for CTE; DELETE (PostgreSQL 9.5) */ +EXPLAIN (COSTS OFF) +WITH q AS (DELETE FROM test.range_rel r + WHERE r.dt = '2010-01-02' + RETURNING *) +DELETE FROM test.tmp USING q; + QUERY PLAN +---------------------------------------------------------------------------------------- + Delete on tmp + CTE q + -> Delete on range_rel_1 r + -> Seq Scan on range_rel_1 r + Filter: (dt = 'Sat Jan 02 00:00:00 2010'::timestamp without time zone) + -> Nested Loop + -> Seq Scan on tmp + -> CTE Scan on q +(8 rows) + +BEGIN; +WITH q AS (DELETE FROM test.range_rel r + WHERE r.dt = '2010-01-02' + RETURNING *) +DELETE FROM test.tmp USING q; +ROLLBACK; +/* Test special rule for CTE; DELETE + USING (PostgreSQL 9.5) */ +EXPLAIN (COSTS OFF) +WITH q AS (DELETE FROM test.tmp t + USING test.range_rel r + WHERE r.dt = '2010-01-02' AND r.id = t.id + RETURNING *) +DELETE FROM test.tmp USING q; + QUERY PLAN +---------------------------------------------------------------------------------------------------- + Delete on tmp + CTE q + -> Delete on tmp t + -> Merge Join + Merge Cond: (r.id = t.id) + -> Merge Append + Sort Key: r.id + -> Index Scan using range_rel_pkey on range_rel r + Filter: (dt = 'Sat Jan 02 00:00:00 2010'::timestamp without time zone) + -> Index Scan using range_rel_1_pkey on range_rel_1 r_1 + Filter: (dt = 'Sat Jan 02 00:00:00 2010'::timestamp without time zone) + -> Sort + Sort Key: t.id + -> Seq Scan on tmp t + -> Nested Loop + -> Seq Scan on tmp + -> CTE Scan on q +(17 rows) + +BEGIN; +WITH q AS (DELETE FROM test.tmp t + USING test.range_rel r + WHERE r.dt = '2010-01-02' AND r.id = t.id + RETURNING *) +DELETE FROM test.tmp USING q; +ROLLBACK; +/* Test special rule for CTE; Nested CTEs (PostgreSQL 9.5) */ +EXPLAIN (COSTS OFF) +WITH q AS (WITH n AS (SELECT id FROM test.tmp2 WHERE id = 2) + DELETE FROM test.tmp t + USING n + WHERE t.id = n.id + RETURNING *) +DELETE FROM test.tmp USING q; + QUERY PLAN +-------------------------------------------- + Delete on tmp + CTE q + -> Delete on tmp t + CTE n + -> Append + -> Seq Scan on tmp2_2 + Filter: (id = 2) + -> Nested Loop + Join Filter: (t.id = n.id) + -> Seq Scan on tmp t + -> CTE Scan on n + -> Nested Loop + -> Seq Scan on tmp + -> CTE Scan on q +(14 rows) + +/* Test special rule for CTE; CTE in quals (PostgreSQL 9.5) */ +EXPLAIN (COSTS OFF) +WITH q AS (SELECT id FROM test.tmp2 + WHERE id < 3) +DELETE FROM test.tmp t WHERE t.id in (SELECT id FROM q); + QUERY PLAN +------------------------------------ + Delete on tmp t + CTE q + -> Append + -> Seq Scan on tmp2_1 + -> Seq Scan on tmp2_2 + -> Nested Loop Semi Join + Join Filter: (t.id = q.id) + -> Seq Scan on tmp t + -> CTE Scan on q +(9 rows) + +BEGIN; +WITH q AS (SELECT id FROM test.tmp2 + WHERE id < 3) +DELETE FROM test.tmp t WHERE t.id in (SELECT id FROM q); +ROLLBACK; +DROP TABLE test.tmp CASCADE; +DROP TABLE test.tmp2 CASCADE; +NOTICE: drop cascades to 11 other objects +DROP TABLE test.range_rel CASCADE; +NOTICE: drop cascades to 13 other objects +DROP SCHEMA test; +DROP EXTENSION pg_pathman CASCADE; +DROP SCHEMA pathman; diff --git a/expected/pathman_upd_del_2.out b/expected/pathman_upd_del_2.out new file mode 100644 index 00000000..0826594c --- /dev/null +++ b/expected/pathman_upd_del_2.out @@ -0,0 +1,465 @@ +/* + * ------------------------------------------- + * NOTE: This test behaves differenly on 9.5 + * ------------------------------------------- + * + * Also since 8edd0e794 (>= 12) Append nodes with single subplan are eliminated, + * causing different output. Moreover, again since 12 (608b167f9f), CTEs which are + * scanned once are no longer an optimization fence, changing a good deal of + * plans here. There is an option to forcibly make them MATERIALIZED, but we + * also need to run tests on older versions, so put updated plans in + * pathman_upd_del_2.out instead. + * + * In Postgres Pro Standard/Enterprise 15+ the EXPLAIN output has changed so put + * the updated plan in pathman_upd_del_4.out. + */ +\set VERBOSITY terse +SET search_path = 'public'; +CREATE SCHEMA pathman; +CREATE EXTENSION pg_pathman SCHEMA pathman; +CREATE SCHEMA test; +SET enable_indexscan = ON; +SET enable_seqscan = OFF; +/* Temporary tables for JOINs */ +CREATE TABLE test.tmp (id INTEGER NOT NULL, value INTEGER NOT NULL); +INSERT INTO test.tmp VALUES (1, 1), (2, 2); +CREATE TABLE test.tmp2 (id INTEGER NOT NULL, value INTEGER NOT NULL); +INSERT INTO test.tmp2 SELECT i % 10 + 1, i FROM generate_series(1, 100) i; +SELECT pathman.create_range_partitions('test.tmp2', 'id', 1, 1, 10); + create_range_partitions +------------------------- + 10 +(1 row) + +/* Partition table by RANGE */ +CREATE TABLE test.range_rel ( + id SERIAL PRIMARY KEY, + dt TIMESTAMP NOT NULL, + value INTEGER); +INSERT INTO test.range_rel (dt, value) SELECT g, extract(day from g) +FROM generate_series('2010-01-01'::date, '2010-12-31'::date, '1 day') AS g; +SELECT pathman.create_range_partitions('test.range_rel', 'dt', + '2010-01-01'::date, '1 month'::interval, + 12); + create_range_partitions +------------------------- + 12 +(1 row) + +VACUUM ANALYZE; +/* + * Test UPDATE and DELETE + */ +/* have partitions for this 'dt' */ +EXPLAIN (COSTS OFF) UPDATE test.range_rel SET value = 111 WHERE dt = '2010-06-15'; + QUERY PLAN +-------------------------------------------------------------------------------- + Update on range_rel_6 + -> Seq Scan on range_rel_6 + Filter: (dt = 'Tue Jun 15 00:00:00 2010'::timestamp without time zone) +(3 rows) + +BEGIN; +UPDATE test.range_rel SET value = 111 WHERE dt = '2010-06-15'; +SELECT * FROM test.range_rel WHERE dt = '2010-06-15'; + id | dt | value +-----+--------------------------+------- + 166 | Tue Jun 15 00:00:00 2010 | 111 +(1 row) + +ROLLBACK; +/* have partitions for this 'dt' */ +EXPLAIN (COSTS OFF) DELETE FROM test.range_rel WHERE dt = '2010-06-15'; + QUERY PLAN +-------------------------------------------------------------------------------- + Delete on range_rel_6 + -> Seq Scan on range_rel_6 + Filter: (dt = 'Tue Jun 15 00:00:00 2010'::timestamp without time zone) +(3 rows) + +BEGIN; +DELETE FROM test.range_rel WHERE dt = '2010-06-15'; +SELECT * FROM test.range_rel WHERE dt = '2010-06-15'; + id | dt | value +----+----+------- +(0 rows) + +ROLLBACK; +/* no partitions for this 'dt' */ +EXPLAIN (COSTS OFF) UPDATE test.range_rel SET value = 222 WHERE dt = '1990-01-01'; + QUERY PLAN +-------------------------------------------------------------------------------- + Update on range_rel + -> Seq Scan on range_rel + Filter: (dt = 'Mon Jan 01 00:00:00 1990'::timestamp without time zone) +(3 rows) + +BEGIN; +UPDATE test.range_rel SET value = 111 WHERE dt = '1990-01-01'; +SELECT * FROM test.range_rel WHERE dt = '1990-01-01'; + id | dt | value +----+----+------- +(0 rows) + +ROLLBACK; +/* no partitions for this 'dt' */ +EXPLAIN (COSTS OFF) DELETE FROM test.range_rel WHERE dt < '1990-01-01'; + QUERY PLAN +-------------------------------------------------------------------------------- + Delete on range_rel + -> Seq Scan on range_rel + Filter: (dt < 'Mon Jan 01 00:00:00 1990'::timestamp without time zone) +(3 rows) + +BEGIN; +DELETE FROM test.range_rel WHERE dt < '1990-01-01'; +SELECT * FROM test.range_rel WHERE dt < '1990-01-01'; + id | dt | value +----+----+------- +(0 rows) + +ROLLBACK; +/* UPDATE + FROM, partitioned table */ +EXPLAIN (COSTS OFF) +UPDATE test.range_rel r SET value = t.value +FROM test.tmp t WHERE r.dt = '2010-01-01' AND r.id = t.id; + QUERY PLAN +-------------------------------------------------------------------------------------- + Update on range_rel_1 r + -> Nested Loop + Join Filter: (r.id = t.id) + -> Index Scan using range_rel_1_pkey on range_rel_1 r + Filter: (dt = 'Fri Jan 01 00:00:00 2010'::timestamp without time zone) + -> Seq Scan on tmp t +(6 rows) + +BEGIN; +UPDATE test.range_rel r SET value = t.value +FROM test.tmp t WHERE r.dt = '2010-01-01' AND r.id = t.id; +ROLLBACK; +/* UPDATE + FROM, single table */ +EXPLAIN (COSTS OFF) +UPDATE test.tmp t SET value = r.value +FROM test.range_rel r WHERE r.dt = '2010-01-01' AND r.id = t.id; + QUERY PLAN +-------------------------------------------------------------------------------------- + Update on tmp t + -> Nested Loop + -> Seq Scan on tmp t + -> Index Scan using range_rel_1_pkey on range_rel_1 r + Index Cond: (id = t.id) + Filter: (dt = 'Fri Jan 01 00:00:00 2010'::timestamp without time zone) +(6 rows) + +BEGIN; +UPDATE test.tmp t SET value = r.value +FROM test.range_rel r WHERE r.dt = '2010-01-01' AND r.id = t.id; +ROLLBACK; +/* DELETE + USING, partitioned table */ +EXPLAIN (COSTS OFF) +DELETE FROM test.range_rel r USING test.tmp t +WHERE r.dt = '2010-01-02' AND r.id = t.id; + QUERY PLAN +-------------------------------------------------------------------------------------- + Delete on range_rel_1 r + -> Nested Loop + Join Filter: (r.id = t.id) + -> Index Scan using range_rel_1_pkey on range_rel_1 r + Filter: (dt = 'Sat Jan 02 00:00:00 2010'::timestamp without time zone) + -> Seq Scan on tmp t +(6 rows) + +BEGIN; +DELETE FROM test.range_rel r USING test.tmp t +WHERE r.dt = '2010-01-02' AND r.id = t.id; +ROLLBACK; +/* DELETE + USING, single table */ +EXPLAIN (COSTS OFF) +DELETE FROM test.tmp t USING test.range_rel r +WHERE r.dt = '2010-01-02' AND r.id = t.id; + QUERY PLAN +-------------------------------------------------------------------------------------- + Delete on tmp t + -> Nested Loop + -> Seq Scan on tmp t + -> Index Scan using range_rel_1_pkey on range_rel_1 r + Index Cond: (id = t.id) + Filter: (dt = 'Sat Jan 02 00:00:00 2010'::timestamp without time zone) +(6 rows) + +BEGIN; +DELETE FROM test.tmp t USING test.range_rel r +WHERE r.dt = '2010-01-02' AND r.id = t.id; +ROLLBACK; +/* DELETE + USING, two partitioned tables */ +EXPLAIN (COSTS OFF) +DELETE FROM test.range_rel r USING test.tmp2 t +WHERE t.id = r.id; +ERROR: DELETE and UPDATE queries with a join of partitioned tables are not supported +BEGIN; +DELETE FROM test.range_rel r USING test.tmp2 t +WHERE t.id = r.id; +ERROR: DELETE and UPDATE queries with a join of partitioned tables are not supported +ROLLBACK; +/* DELETE + USING, partitioned table + two partitioned tables in subselect */ +EXPLAIN (COSTS OFF) +DELETE FROM test.range_rel r +USING (SELECT * + FROM test.tmp2 a1 + JOIN test.tmp2 a2 + USING(id)) t +WHERE t.id = r.id; +ERROR: DELETE and UPDATE queries with a join of partitioned tables are not supported +BEGIN; +DELETE FROM test.range_rel r +USING (SELECT * + FROM test.tmp2 a1 + JOIN test.tmp2 a2 + USING(id)) t +WHERE t.id = r.id; +ERROR: DELETE and UPDATE queries with a join of partitioned tables are not supported +ROLLBACK; +/* DELETE + USING, single table + two partitioned tables in subselect */ +EXPLAIN (COSTS OFF) +DELETE FROM test.tmp r +USING (SELECT * + FROM test.tmp2 a1 + JOIN test.tmp2 a2 + USING(id)) t +WHERE t.id = r.id; + QUERY PLAN +------------------------------------------------ + Delete on tmp r + -> Nested Loop + -> Nested Loop + -> Seq Scan on tmp r + -> Custom Scan (RuntimeAppend) + Prune by: (r.id = a1.id) + -> Seq Scan on tmp2_1 a1 + Filter: (r.id = id) + -> Seq Scan on tmp2_2 a1 + Filter: (r.id = id) + -> Seq Scan on tmp2_3 a1 + Filter: (r.id = id) + -> Seq Scan on tmp2_4 a1 + Filter: (r.id = id) + -> Seq Scan on tmp2_5 a1 + Filter: (r.id = id) + -> Seq Scan on tmp2_6 a1 + Filter: (r.id = id) + -> Seq Scan on tmp2_7 a1 + Filter: (r.id = id) + -> Seq Scan on tmp2_8 a1 + Filter: (r.id = id) + -> Seq Scan on tmp2_9 a1 + Filter: (r.id = id) + -> Seq Scan on tmp2_10 a1 + Filter: (r.id = id) + -> Custom Scan (RuntimeAppend) + Prune by: (a1.id = a2.id) + -> Seq Scan on tmp2_1 a2 + Filter: (a1.id = id) + -> Seq Scan on tmp2_2 a2 + Filter: (a1.id = id) + -> Seq Scan on tmp2_3 a2 + Filter: (a1.id = id) + -> Seq Scan on tmp2_4 a2 + Filter: (a1.id = id) + -> Seq Scan on tmp2_5 a2 + Filter: (a1.id = id) + -> Seq Scan on tmp2_6 a2 + Filter: (a1.id = id) + -> Seq Scan on tmp2_7 a2 + Filter: (a1.id = id) + -> Seq Scan on tmp2_8 a2 + Filter: (a1.id = id) + -> Seq Scan on tmp2_9 a2 + Filter: (a1.id = id) + -> Seq Scan on tmp2_10 a2 + Filter: (a1.id = id) +(48 rows) + +BEGIN; +DELETE FROM test.tmp r +USING (SELECT * + FROM test.tmp2 a1 + JOIN test.tmp2 a2 + USING(id)) t +WHERE t.id = r.id; +ROLLBACK; +/* UPDATE + FROM, two partitioned tables */ +EXPLAIN (COSTS OFF) +UPDATE test.range_rel r SET value = 1 FROM test.tmp2 t +WHERE t.id = r.id; +ERROR: DELETE and UPDATE queries with a join of partitioned tables are not supported +BEGIN; +UPDATE test.range_rel r SET value = 1 FROM test.tmp2 t +WHERE t.id = r.id; +ERROR: DELETE and UPDATE queries with a join of partitioned tables are not supported +ROLLBACK; +/* + * UPDATE + subquery with partitioned table (PG 9.5). + * See pathman_rel_pathlist_hook() + RELOPT_OTHER_MEMBER_REL. + */ +EXPLAIN (COSTS OFF) +UPDATE test.tmp t SET value = 2 +WHERE t.id IN (SELECT id + FROM test.tmp2 t2 + WHERE id = t.id); + QUERY PLAN +-------------------------------------------- + Update on tmp t + -> Seq Scan on tmp t + Filter: (SubPlan 1) + SubPlan 1 + -> Custom Scan (RuntimeAppend) + Prune by: (t2.id = t.id) + -> Seq Scan on tmp2_1 t2 + Filter: (id = t.id) + -> Seq Scan on tmp2_2 t2 + Filter: (id = t.id) + -> Seq Scan on tmp2_3 t2 + Filter: (id = t.id) + -> Seq Scan on tmp2_4 t2 + Filter: (id = t.id) + -> Seq Scan on tmp2_5 t2 + Filter: (id = t.id) + -> Seq Scan on tmp2_6 t2 + Filter: (id = t.id) + -> Seq Scan on tmp2_7 t2 + Filter: (id = t.id) + -> Seq Scan on tmp2_8 t2 + Filter: (id = t.id) + -> Seq Scan on tmp2_9 t2 + Filter: (id = t.id) + -> Seq Scan on tmp2_10 t2 + Filter: (id = t.id) +(26 rows) + +/* Test special rule for CTE; SELECT (PostgreSQL 9.5) */ +EXPLAIN (COSTS OFF) +WITH q AS (SELECT * FROM test.range_rel r + WHERE r.dt = '2010-01-02') +DELETE FROM test.tmp USING q; + QUERY PLAN +-------------------------------------------------------------------------------------------- + Delete on tmp + -> Nested Loop + -> Seq Scan on tmp + -> Materialize + -> Seq Scan on range_rel_1 r + Filter: (dt = 'Sat Jan 02 00:00:00 2010'::timestamp without time zone) +(6 rows) + +BEGIN; +WITH q AS (SELECT * FROM test.range_rel r + WHERE r.dt = '2010-01-02') +DELETE FROM test.tmp USING q; +ROLLBACK; +/* Test special rule for CTE; DELETE (PostgreSQL 9.5) */ +EXPLAIN (COSTS OFF) +WITH q AS (DELETE FROM test.range_rel r + WHERE r.dt = '2010-01-02' + RETURNING *) +DELETE FROM test.tmp USING q; + QUERY PLAN +---------------------------------------------------------------------------------------- + Delete on tmp + CTE q + -> Delete on range_rel_1 r + -> Seq Scan on range_rel_1 r + Filter: (dt = 'Sat Jan 02 00:00:00 2010'::timestamp without time zone) + -> Nested Loop + -> Seq Scan on tmp + -> CTE Scan on q +(8 rows) + +BEGIN; +WITH q AS (DELETE FROM test.range_rel r + WHERE r.dt = '2010-01-02' + RETURNING *) +DELETE FROM test.tmp USING q; +ROLLBACK; +/* Test special rule for CTE; DELETE + USING (PostgreSQL 9.5) */ +EXPLAIN (COSTS OFF) +WITH q AS (DELETE FROM test.tmp t + USING test.range_rel r + WHERE r.dt = '2010-01-02' AND r.id = t.id + RETURNING *) +DELETE FROM test.tmp USING q; + QUERY PLAN +---------------------------------------------------------------------------------------------- + Delete on tmp + CTE q + -> Delete on tmp t + -> Nested Loop + -> Seq Scan on tmp t + -> Index Scan using range_rel_1_pkey on range_rel_1 r + Index Cond: (id = t.id) + Filter: (dt = 'Sat Jan 02 00:00:00 2010'::timestamp without time zone) + -> Nested Loop + -> Seq Scan on tmp + -> CTE Scan on q +(11 rows) + +BEGIN; +WITH q AS (DELETE FROM test.tmp t + USING test.range_rel r + WHERE r.dt = '2010-01-02' AND r.id = t.id + RETURNING *) +DELETE FROM test.tmp USING q; +ROLLBACK; +/* Test special rule for CTE; Nested CTEs (PostgreSQL 9.5) */ +EXPLAIN (COSTS OFF) +WITH q AS (WITH n AS (SELECT id FROM test.tmp2 WHERE id = 2) + DELETE FROM test.tmp t + USING n + WHERE t.id = n.id + RETURNING *) +DELETE FROM test.tmp USING q; + QUERY PLAN +---------------------------------------- + Delete on tmp + CTE q + -> Delete on tmp t + -> Nested Loop + -> Seq Scan on tmp t + Filter: (id = 2) + -> Seq Scan on tmp2_2 + Filter: (id = 2) + -> Nested Loop + -> Seq Scan on tmp + -> CTE Scan on q +(11 rows) + +/* Test special rule for CTE; CTE in quals (PostgreSQL 9.5) */ +EXPLAIN (COSTS OFF) +WITH q AS (SELECT id FROM test.tmp2 + WHERE id < 3) +DELETE FROM test.tmp t WHERE t.id in (SELECT id FROM q); + QUERY PLAN +-------------------------------------------------------------- + Delete on tmp t + -> Nested Loop Semi Join + -> Seq Scan on tmp t + -> Custom Scan (RuntimeAppend) + Prune by: ((tmp2.id < 3) AND (t.id = tmp2.id)) + -> Seq Scan on tmp2_1 tmp2 + Filter: (t.id = id) + -> Seq Scan on tmp2_2 tmp2 + Filter: (t.id = id) +(9 rows) + +BEGIN; +WITH q AS (SELECT id FROM test.tmp2 + WHERE id < 3) +DELETE FROM test.tmp t WHERE t.id in (SELECT id FROM q); +ROLLBACK; +DROP TABLE test.tmp CASCADE; +DROP TABLE test.tmp2 CASCADE; +NOTICE: drop cascades to 11 other objects +DROP TABLE test.range_rel CASCADE; +NOTICE: drop cascades to 13 other objects +DROP SCHEMA test; +DROP EXTENSION pg_pathman CASCADE; +DROP SCHEMA pathman; diff --git a/expected/pathman_upd_del_3.out b/expected/pathman_upd_del_3.out new file mode 100644 index 00000000..d11eb6f8 --- /dev/null +++ b/expected/pathman_upd_del_3.out @@ -0,0 +1,465 @@ +/* + * ------------------------------------------- + * NOTE: This test behaves differenly on 9.5 + * ------------------------------------------- + * + * Also since 8edd0e794 (>= 12) Append nodes with single subplan are eliminated, + * causing different output. Moreover, again since 12 (608b167f9f), CTEs which are + * scanned once are no longer an optimization fence, changing a good deal of + * plans here. There is an option to forcibly make them MATERIALIZED, but we + * also need to run tests on older versions, so put updated plans in + * pathman_upd_del_2.out instead. + * + * In Postgres Pro Standard/Enterprise 15+ the EXPLAIN output has changed so put + * the updated plan in pathman_upd_del_4.out. + */ +\set VERBOSITY terse +SET search_path = 'public'; +CREATE SCHEMA pathman; +CREATE EXTENSION pg_pathman SCHEMA pathman; +CREATE SCHEMA test; +SET enable_indexscan = ON; +SET enable_seqscan = OFF; +/* Temporary tables for JOINs */ +CREATE TABLE test.tmp (id INTEGER NOT NULL, value INTEGER NOT NULL); +INSERT INTO test.tmp VALUES (1, 1), (2, 2); +CREATE TABLE test.tmp2 (id INTEGER NOT NULL, value INTEGER NOT NULL); +INSERT INTO test.tmp2 SELECT i % 10 + 1, i FROM generate_series(1, 100) i; +SELECT pathman.create_range_partitions('test.tmp2', 'id', 1, 1, 10); + create_range_partitions +------------------------- + 10 +(1 row) + +/* Partition table by RANGE */ +CREATE TABLE test.range_rel ( + id SERIAL PRIMARY KEY, + dt TIMESTAMP NOT NULL, + value INTEGER); +INSERT INTO test.range_rel (dt, value) SELECT g, extract(day from g) +FROM generate_series('2010-01-01'::date, '2010-12-31'::date, '1 day') AS g; +SELECT pathman.create_range_partitions('test.range_rel', 'dt', + '2010-01-01'::date, '1 month'::interval, + 12); + create_range_partitions +------------------------- + 12 +(1 row) + +VACUUM ANALYZE; +/* + * Test UPDATE and DELETE + */ +/* have partitions for this 'dt' */ +EXPLAIN (COSTS OFF) UPDATE test.range_rel SET value = 111 WHERE dt = '2010-06-15'; + QUERY PLAN +-------------------------------------------------------------------------------- + Update on range_rel_6 + -> Seq Scan on range_rel_6 + Filter: (dt = 'Tue Jun 15 00:00:00 2010'::timestamp without time zone) +(3 rows) + +BEGIN; +UPDATE test.range_rel SET value = 111 WHERE dt = '2010-06-15'; +SELECT * FROM test.range_rel WHERE dt = '2010-06-15'; + id | dt | value +-----+--------------------------+------- + 166 | Tue Jun 15 00:00:00 2010 | 111 +(1 row) + +ROLLBACK; +/* have partitions for this 'dt' */ +EXPLAIN (COSTS OFF) DELETE FROM test.range_rel WHERE dt = '2010-06-15'; + QUERY PLAN +-------------------------------------------------------------------------------- + Delete on range_rel_6 + -> Seq Scan on range_rel_6 + Filter: (dt = 'Tue Jun 15 00:00:00 2010'::timestamp without time zone) +(3 rows) + +BEGIN; +DELETE FROM test.range_rel WHERE dt = '2010-06-15'; +SELECT * FROM test.range_rel WHERE dt = '2010-06-15'; + id | dt | value +----+----+------- +(0 rows) + +ROLLBACK; +/* no partitions for this 'dt' */ +EXPLAIN (COSTS OFF) UPDATE test.range_rel SET value = 222 WHERE dt = '1990-01-01'; + QUERY PLAN +-------------------------------------------------------------------------------- + Update on range_rel + -> Seq Scan on range_rel + Filter: (dt = 'Mon Jan 01 00:00:00 1990'::timestamp without time zone) +(3 rows) + +BEGIN; +UPDATE test.range_rel SET value = 111 WHERE dt = '1990-01-01'; +SELECT * FROM test.range_rel WHERE dt = '1990-01-01'; + id | dt | value +----+----+------- +(0 rows) + +ROLLBACK; +/* no partitions for this 'dt' */ +EXPLAIN (COSTS OFF) DELETE FROM test.range_rel WHERE dt < '1990-01-01'; + QUERY PLAN +-------------------------------------------------------------------------------- + Delete on range_rel + -> Seq Scan on range_rel + Filter: (dt < 'Mon Jan 01 00:00:00 1990'::timestamp without time zone) +(3 rows) + +BEGIN; +DELETE FROM test.range_rel WHERE dt < '1990-01-01'; +SELECT * FROM test.range_rel WHERE dt < '1990-01-01'; + id | dt | value +----+----+------- +(0 rows) + +ROLLBACK; +/* UPDATE + FROM, partitioned table */ +EXPLAIN (COSTS OFF) +UPDATE test.range_rel r SET value = t.value +FROM test.tmp t WHERE r.dt = '2010-01-01' AND r.id = t.id; + QUERY PLAN +-------------------------------------------------------------------------------------- + Update on range_rel_1 r + -> Nested Loop + Join Filter: (r.id = t.id) + -> Index Scan using range_rel_1_pkey on range_rel_1 r + Filter: (dt = 'Fri Jan 01 00:00:00 2010'::timestamp without time zone) + -> Seq Scan on tmp t +(6 rows) + +BEGIN; +UPDATE test.range_rel r SET value = t.value +FROM test.tmp t WHERE r.dt = '2010-01-01' AND r.id = t.id; +ROLLBACK; +/* UPDATE + FROM, single table */ +EXPLAIN (COSTS OFF) +UPDATE test.tmp t SET value = r.value +FROM test.range_rel r WHERE r.dt = '2010-01-01' AND r.id = t.id; + QUERY PLAN +-------------------------------------------------------------------------------------- + Update on tmp t + -> Nested Loop + -> Seq Scan on tmp t + -> Index Scan using range_rel_1_pkey on range_rel_1 r + Index Cond: (id = t.id) + Filter: (dt = 'Fri Jan 01 00:00:00 2010'::timestamp without time zone) +(6 rows) + +BEGIN; +UPDATE test.tmp t SET value = r.value +FROM test.range_rel r WHERE r.dt = '2010-01-01' AND r.id = t.id; +ROLLBACK; +/* DELETE + USING, partitioned table */ +EXPLAIN (COSTS OFF) +DELETE FROM test.range_rel r USING test.tmp t +WHERE r.dt = '2010-01-02' AND r.id = t.id; + QUERY PLAN +-------------------------------------------------------------------------------------- + Delete on range_rel_1 r + -> Nested Loop + Join Filter: (r.id = t.id) + -> Index Scan using range_rel_1_pkey on range_rel_1 r + Filter: (dt = 'Sat Jan 02 00:00:00 2010'::timestamp without time zone) + -> Seq Scan on tmp t +(6 rows) + +BEGIN; +DELETE FROM test.range_rel r USING test.tmp t +WHERE r.dt = '2010-01-02' AND r.id = t.id; +ROLLBACK; +/* DELETE + USING, single table */ +EXPLAIN (COSTS OFF) +DELETE FROM test.tmp t USING test.range_rel r +WHERE r.dt = '2010-01-02' AND r.id = t.id; + QUERY PLAN +-------------------------------------------------------------------------------------- + Delete on tmp t + -> Nested Loop + -> Seq Scan on tmp t + -> Index Scan using range_rel_1_pkey on range_rel_1 r + Index Cond: (id = t.id) + Filter: (dt = 'Sat Jan 02 00:00:00 2010'::timestamp without time zone) +(6 rows) + +BEGIN; +DELETE FROM test.tmp t USING test.range_rel r +WHERE r.dt = '2010-01-02' AND r.id = t.id; +ROLLBACK; +/* DELETE + USING, two partitioned tables */ +EXPLAIN (COSTS OFF) +DELETE FROM test.range_rel r USING test.tmp2 t +WHERE t.id = r.id; +ERROR: DELETE and UPDATE queries with a join of partitioned tables are not supported +BEGIN; +DELETE FROM test.range_rel r USING test.tmp2 t +WHERE t.id = r.id; +ERROR: DELETE and UPDATE queries with a join of partitioned tables are not supported +ROLLBACK; +/* DELETE + USING, partitioned table + two partitioned tables in subselect */ +EXPLAIN (COSTS OFF) +DELETE FROM test.range_rel r +USING (SELECT * + FROM test.tmp2 a1 + JOIN test.tmp2 a2 + USING(id)) t +WHERE t.id = r.id; +ERROR: DELETE and UPDATE queries with a join of partitioned tables are not supported +BEGIN; +DELETE FROM test.range_rel r +USING (SELECT * + FROM test.tmp2 a1 + JOIN test.tmp2 a2 + USING(id)) t +WHERE t.id = r.id; +ERROR: DELETE and UPDATE queries with a join of partitioned tables are not supported +ROLLBACK; +/* DELETE + USING, single table + two partitioned tables in subselect */ +EXPLAIN (COSTS OFF) +DELETE FROM test.tmp r +USING (SELECT * + FROM test.tmp2 a1 + JOIN test.tmp2 a2 + USING(id)) t +WHERE t.id = r.id; + QUERY PLAN +------------------------------------------------ + Delete on tmp r + -> Nested Loop + -> Nested Loop + -> Seq Scan on tmp r + -> Custom Scan (RuntimeAppend) + Prune by: (r.id = a1.id) + -> Seq Scan on tmp2_1 a1 + Filter: (r.id = id) + -> Seq Scan on tmp2_2 a1 + Filter: (r.id = id) + -> Seq Scan on tmp2_3 a1 + Filter: (r.id = id) + -> Seq Scan on tmp2_4 a1 + Filter: (r.id = id) + -> Seq Scan on tmp2_5 a1 + Filter: (r.id = id) + -> Seq Scan on tmp2_6 a1 + Filter: (r.id = id) + -> Seq Scan on tmp2_7 a1 + Filter: (r.id = id) + -> Seq Scan on tmp2_8 a1 + Filter: (r.id = id) + -> Seq Scan on tmp2_9 a1 + Filter: (r.id = id) + -> Seq Scan on tmp2_10 a1 + Filter: (r.id = id) + -> Custom Scan (RuntimeAppend) + Prune by: (a1.id = a2.id) + -> Seq Scan on tmp2_1 a2 + Filter: (a1.id = id) + -> Seq Scan on tmp2_2 a2 + Filter: (a1.id = id) + -> Seq Scan on tmp2_3 a2 + Filter: (a1.id = id) + -> Seq Scan on tmp2_4 a2 + Filter: (a1.id = id) + -> Seq Scan on tmp2_5 a2 + Filter: (a1.id = id) + -> Seq Scan on tmp2_6 a2 + Filter: (a1.id = id) + -> Seq Scan on tmp2_7 a2 + Filter: (a1.id = id) + -> Seq Scan on tmp2_8 a2 + Filter: (a1.id = id) + -> Seq Scan on tmp2_9 a2 + Filter: (a1.id = id) + -> Seq Scan on tmp2_10 a2 + Filter: (a1.id = id) +(48 rows) + +BEGIN; +DELETE FROM test.tmp r +USING (SELECT * + FROM test.tmp2 a1 + JOIN test.tmp2 a2 + USING(id)) t +WHERE t.id = r.id; +ROLLBACK; +/* UPDATE + FROM, two partitioned tables */ +EXPLAIN (COSTS OFF) +UPDATE test.range_rel r SET value = 1 FROM test.tmp2 t +WHERE t.id = r.id; +ERROR: DELETE and UPDATE queries with a join of partitioned tables are not supported +BEGIN; +UPDATE test.range_rel r SET value = 1 FROM test.tmp2 t +WHERE t.id = r.id; +ERROR: DELETE and UPDATE queries with a join of partitioned tables are not supported +ROLLBACK; +/* + * UPDATE + subquery with partitioned table (PG 9.5). + * See pathman_rel_pathlist_hook() + RELOPT_OTHER_MEMBER_REL. + */ +EXPLAIN (COSTS OFF) +UPDATE test.tmp t SET value = 2 +WHERE t.id IN (SELECT id + FROM test.tmp2 t2 + WHERE id = t.id); + QUERY PLAN +-------------------------------------------- + Update on tmp t + -> Seq Scan on tmp t + Filter: (SubPlan 1) + SubPlan 1 + -> Custom Scan (RuntimeAppend) + Prune by: (t2.id = t.id) + -> Seq Scan on tmp2_1 t2 + Filter: (id = t.id) + -> Seq Scan on tmp2_2 t2 + Filter: (id = t.id) + -> Seq Scan on tmp2_3 t2 + Filter: (id = t.id) + -> Seq Scan on tmp2_4 t2 + Filter: (id = t.id) + -> Seq Scan on tmp2_5 t2 + Filter: (id = t.id) + -> Seq Scan on tmp2_6 t2 + Filter: (id = t.id) + -> Seq Scan on tmp2_7 t2 + Filter: (id = t.id) + -> Seq Scan on tmp2_8 t2 + Filter: (id = t.id) + -> Seq Scan on tmp2_9 t2 + Filter: (id = t.id) + -> Seq Scan on tmp2_10 t2 + Filter: (id = t.id) +(26 rows) + +/* Test special rule for CTE; SELECT (PostgreSQL 9.5) */ +EXPLAIN (COSTS OFF) +WITH q AS (SELECT * FROM test.range_rel r + WHERE r.dt = '2010-01-02') +DELETE FROM test.tmp USING q; + QUERY PLAN +-------------------------------------------------------------------------------------------- + Delete on tmp + -> Nested Loop + -> Seq Scan on tmp + -> Materialize + -> Seq Scan on range_rel_1 r + Filter: (dt = 'Sat Jan 02 00:00:00 2010'::timestamp without time zone) +(6 rows) + +BEGIN; +WITH q AS (SELECT * FROM test.range_rel r + WHERE r.dt = '2010-01-02') +DELETE FROM test.tmp USING q; +ROLLBACK; +/* Test special rule for CTE; DELETE (PostgreSQL 9.5) */ +EXPLAIN (COSTS OFF) +WITH q AS (DELETE FROM test.range_rel r + WHERE r.dt = '2010-01-02' + RETURNING *) +DELETE FROM test.tmp USING q; + QUERY PLAN +---------------------------------------------------------------------------------------- + Delete on tmp + CTE q + -> Delete on range_rel_1 r + -> Seq Scan on range_rel_1 r + Filter: (dt = 'Sat Jan 02 00:00:00 2010'::timestamp without time zone) + -> Nested Loop + -> Seq Scan on tmp + -> CTE Scan on q +(8 rows) + +BEGIN; +WITH q AS (DELETE FROM test.range_rel r + WHERE r.dt = '2010-01-02' + RETURNING *) +DELETE FROM test.tmp USING q; +ROLLBACK; +/* Test special rule for CTE; DELETE + USING (PostgreSQL 9.5) */ +EXPLAIN (COSTS OFF) +WITH q AS (DELETE FROM test.tmp t + USING test.range_rel r + WHERE r.dt = '2010-01-02' AND r.id = t.id + RETURNING *) +DELETE FROM test.tmp USING q; + QUERY PLAN +---------------------------------------------------------------------------------------------- + Delete on tmp + CTE q + -> Delete on tmp t + -> Nested Loop + -> Seq Scan on tmp t + -> Index Scan using range_rel_1_pkey on range_rel_1 r + Index Cond: (id = t.id) + Filter: (dt = 'Sat Jan 02 00:00:00 2010'::timestamp without time zone) + -> Nested Loop + -> Seq Scan on tmp + -> CTE Scan on q +(11 rows) + +BEGIN; +WITH q AS (DELETE FROM test.tmp t + USING test.range_rel r + WHERE r.dt = '2010-01-02' AND r.id = t.id + RETURNING *) +DELETE FROM test.tmp USING q; +ROLLBACK; +/* Test special rule for CTE; Nested CTEs (PostgreSQL 9.5) */ +EXPLAIN (COSTS OFF) +WITH q AS (WITH n AS (SELECT id FROM test.tmp2 WHERE id = 2) + DELETE FROM test.tmp t + USING n + WHERE t.id = n.id + RETURNING *) +DELETE FROM test.tmp USING q; + QUERY PLAN +--------------------------------------------- + Delete on tmp + CTE q + -> Delete on tmp t + -> Nested Loop + -> Seq Scan on tmp t + Filter: (id = 2) + -> Seq Scan on tmp2_2 tmp2 + Filter: (id = 2) + -> Nested Loop + -> Seq Scan on tmp + -> CTE Scan on q +(11 rows) + +/* Test special rule for CTE; CTE in quals (PostgreSQL 9.5) */ +EXPLAIN (COSTS OFF) +WITH q AS (SELECT id FROM test.tmp2 + WHERE id < 3) +DELETE FROM test.tmp t WHERE t.id in (SELECT id FROM q); + QUERY PLAN +-------------------------------------------------------------- + Delete on tmp t + -> Nested Loop Semi Join + -> Seq Scan on tmp t + -> Custom Scan (RuntimeAppend) + Prune by: ((tmp2.id < 3) AND (t.id = tmp2.id)) + -> Seq Scan on tmp2_1 tmp2 + Filter: (t.id = id) + -> Seq Scan on tmp2_2 tmp2 + Filter: (t.id = id) +(9 rows) + +BEGIN; +WITH q AS (SELECT id FROM test.tmp2 + WHERE id < 3) +DELETE FROM test.tmp t WHERE t.id in (SELECT id FROM q); +ROLLBACK; +DROP TABLE test.tmp CASCADE; +DROP TABLE test.tmp2 CASCADE; +NOTICE: drop cascades to 11 other objects +DROP TABLE test.range_rel CASCADE; +NOTICE: drop cascades to 13 other objects +DROP SCHEMA test; +DROP EXTENSION pg_pathman CASCADE; +DROP SCHEMA pathman; diff --git a/expected/pathman_upd_del_4.out b/expected/pathman_upd_del_4.out new file mode 100644 index 00000000..54330190 --- /dev/null +++ b/expected/pathman_upd_del_4.out @@ -0,0 +1,464 @@ +/* + * ------------------------------------------- + * NOTE: This test behaves differenly on 9.5 + * ------------------------------------------- + * + * Also since 8edd0e794 (>= 12) Append nodes with single subplan are eliminated, + * causing different output. Moreover, again since 12 (608b167f9f), CTEs which are + * scanned once are no longer an optimization fence, changing a good deal of + * plans here. There is an option to forcibly make them MATERIALIZED, but we + * also need to run tests on older versions, so put updated plans in + * pathman_upd_del_2.out instead. + * + * In Postgres Pro Standard/Enterprise 15+ the EXPLAIN output has changed so put + * the updated plan in pathman_upd_del_4.out. + */ +\set VERBOSITY terse +SET search_path = 'public'; +CREATE SCHEMA pathman; +CREATE EXTENSION pg_pathman SCHEMA pathman; +CREATE SCHEMA test; +SET enable_indexscan = ON; +SET enable_seqscan = OFF; +/* Temporary tables for JOINs */ +CREATE TABLE test.tmp (id INTEGER NOT NULL, value INTEGER NOT NULL); +INSERT INTO test.tmp VALUES (1, 1), (2, 2); +CREATE TABLE test.tmp2 (id INTEGER NOT NULL, value INTEGER NOT NULL); +INSERT INTO test.tmp2 SELECT i % 10 + 1, i FROM generate_series(1, 100) i; +SELECT pathman.create_range_partitions('test.tmp2', 'id', 1, 1, 10); + create_range_partitions +------------------------- + 10 +(1 row) + +/* Partition table by RANGE */ +CREATE TABLE test.range_rel ( + id SERIAL PRIMARY KEY, + dt TIMESTAMP NOT NULL, + value INTEGER); +INSERT INTO test.range_rel (dt, value) SELECT g, extract(day from g) +FROM generate_series('2010-01-01'::date, '2010-12-31'::date, '1 day') AS g; +SELECT pathman.create_range_partitions('test.range_rel', 'dt', + '2010-01-01'::date, '1 month'::interval, + 12); + create_range_partitions +------------------------- + 12 +(1 row) + +VACUUM ANALYZE; +/* + * Test UPDATE and DELETE + */ +/* have partitions for this 'dt' */ +EXPLAIN (COSTS OFF) UPDATE test.range_rel SET value = 111 WHERE dt = '2010-06-15'; + QUERY PLAN +-------------------------------------------------------------------------------- + Update on range_rel_6 + -> Seq Scan on range_rel_6 + Filter: (dt = 'Tue Jun 15 00:00:00 2010'::timestamp without time zone) +(3 rows) + +BEGIN; +UPDATE test.range_rel SET value = 111 WHERE dt = '2010-06-15'; +SELECT * FROM test.range_rel WHERE dt = '2010-06-15'; + id | dt | value +-----+--------------------------+------- + 166 | Tue Jun 15 00:00:00 2010 | 111 +(1 row) + +ROLLBACK; +/* have partitions for this 'dt' */ +EXPLAIN (COSTS OFF) DELETE FROM test.range_rel WHERE dt = '2010-06-15'; + QUERY PLAN +-------------------------------------------------------------------------------- + Delete on range_rel_6 + -> Seq Scan on range_rel_6 + Filter: (dt = 'Tue Jun 15 00:00:00 2010'::timestamp without time zone) +(3 rows) + +BEGIN; +DELETE FROM test.range_rel WHERE dt = '2010-06-15'; +SELECT * FROM test.range_rel WHERE dt = '2010-06-15'; + id | dt | value +----+----+------- +(0 rows) + +ROLLBACK; +/* no partitions for this 'dt' */ +EXPLAIN (COSTS OFF) UPDATE test.range_rel SET value = 222 WHERE dt = '1990-01-01'; + QUERY PLAN +-------------------------------------------------------------------------------- + Update on range_rel + -> Seq Scan on range_rel + Filter: (dt = 'Mon Jan 01 00:00:00 1990'::timestamp without time zone) +(3 rows) + +BEGIN; +UPDATE test.range_rel SET value = 111 WHERE dt = '1990-01-01'; +SELECT * FROM test.range_rel WHERE dt = '1990-01-01'; + id | dt | value +----+----+------- +(0 rows) + +ROLLBACK; +/* no partitions for this 'dt' */ +EXPLAIN (COSTS OFF) DELETE FROM test.range_rel WHERE dt < '1990-01-01'; + QUERY PLAN +-------------------------------------------------------------------------------- + Delete on range_rel + -> Seq Scan on range_rel + Filter: (dt < 'Mon Jan 01 00:00:00 1990'::timestamp without time zone) +(3 rows) + +BEGIN; +DELETE FROM test.range_rel WHERE dt < '1990-01-01'; +SELECT * FROM test.range_rel WHERE dt < '1990-01-01'; + id | dt | value +----+----+------- +(0 rows) + +ROLLBACK; +/* UPDATE + FROM, partitioned table */ +EXPLAIN (COSTS OFF) +UPDATE test.range_rel r SET value = t.value +FROM test.tmp t WHERE r.dt = '2010-01-01' AND r.id = t.id; + QUERY PLAN +-------------------------------------------------------------------------------------- + Update on range_rel_1 r + -> Nested Loop + Join Filter: (r.id = t.id) + -> Index Scan using range_rel_1_pkey on range_rel_1 r + Filter: (dt = 'Fri Jan 01 00:00:00 2010'::timestamp without time zone) + -> Seq Scan on tmp t +(6 rows) + +BEGIN; +UPDATE test.range_rel r SET value = t.value +FROM test.tmp t WHERE r.dt = '2010-01-01' AND r.id = t.id; +ROLLBACK; +/* UPDATE + FROM, single table */ +EXPLAIN (COSTS OFF) +UPDATE test.tmp t SET value = r.value +FROM test.range_rel r WHERE r.dt = '2010-01-01' AND r.id = t.id; + QUERY PLAN +-------------------------------------------------------------------------------------- + Update on tmp t + -> Nested Loop + -> Seq Scan on tmp t + -> Index Scan using range_rel_1_pkey on range_rel_1 r + Index Cond: (id = t.id) + Filter: (dt = 'Fri Jan 01 00:00:00 2010'::timestamp without time zone) +(6 rows) + +BEGIN; +UPDATE test.tmp t SET value = r.value +FROM test.range_rel r WHERE r.dt = '2010-01-01' AND r.id = t.id; +ROLLBACK; +/* DELETE + USING, partitioned table */ +EXPLAIN (COSTS OFF) +DELETE FROM test.range_rel r USING test.tmp t +WHERE r.dt = '2010-01-02' AND r.id = t.id; + QUERY PLAN +-------------------------------------------------------------------------------------- + Delete on range_rel_1 r + -> Nested Loop + Join Filter: (r.id = t.id) + -> Index Scan using range_rel_1_pkey on range_rel_1 r + Filter: (dt = 'Sat Jan 02 00:00:00 2010'::timestamp without time zone) + -> Seq Scan on tmp t +(6 rows) + +BEGIN; +DELETE FROM test.range_rel r USING test.tmp t +WHERE r.dt = '2010-01-02' AND r.id = t.id; +ROLLBACK; +/* DELETE + USING, single table */ +EXPLAIN (COSTS OFF) +DELETE FROM test.tmp t USING test.range_rel r +WHERE r.dt = '2010-01-02' AND r.id = t.id; + QUERY PLAN +-------------------------------------------------------------------------------------- + Delete on tmp t + -> Nested Loop + -> Seq Scan on tmp t + -> Index Scan using range_rel_1_pkey on range_rel_1 r + Index Cond: (id = t.id) + Filter: (dt = 'Sat Jan 02 00:00:00 2010'::timestamp without time zone) +(6 rows) + +BEGIN; +DELETE FROM test.tmp t USING test.range_rel r +WHERE r.dt = '2010-01-02' AND r.id = t.id; +ROLLBACK; +/* DELETE + USING, two partitioned tables */ +EXPLAIN (COSTS OFF) +DELETE FROM test.range_rel r USING test.tmp2 t +WHERE t.id = r.id; +ERROR: DELETE and UPDATE queries with a join of partitioned tables are not supported +BEGIN; +DELETE FROM test.range_rel r USING test.tmp2 t +WHERE t.id = r.id; +ERROR: DELETE and UPDATE queries with a join of partitioned tables are not supported +ROLLBACK; +/* DELETE + USING, partitioned table + two partitioned tables in subselect */ +EXPLAIN (COSTS OFF) +DELETE FROM test.range_rel r +USING (SELECT * + FROM test.tmp2 a1 + JOIN test.tmp2 a2 + USING(id)) t +WHERE t.id = r.id; +ERROR: DELETE and UPDATE queries with a join of partitioned tables are not supported +BEGIN; +DELETE FROM test.range_rel r +USING (SELECT * + FROM test.tmp2 a1 + JOIN test.tmp2 a2 + USING(id)) t +WHERE t.id = r.id; +ERROR: DELETE and UPDATE queries with a join of partitioned tables are not supported +ROLLBACK; +/* DELETE + USING, single table + two partitioned tables in subselect */ +EXPLAIN (COSTS OFF) +DELETE FROM test.tmp r +USING (SELECT * + FROM test.tmp2 a1 + JOIN test.tmp2 a2 + USING(id)) t +WHERE t.id = r.id; + QUERY PLAN +------------------------------------------------ + Delete on tmp r + -> Nested Loop + -> Nested Loop + -> Seq Scan on tmp r + -> Custom Scan (RuntimeAppend) + Prune by: (r.id = a1.id) + -> Seq Scan on tmp2_1 a1 + Filter: (r.id = id) + -> Seq Scan on tmp2_2 a1 + Filter: (r.id = id) + -> Seq Scan on tmp2_3 a1 + Filter: (r.id = id) + -> Seq Scan on tmp2_4 a1 + Filter: (r.id = id) + -> Seq Scan on tmp2_5 a1 + Filter: (r.id = id) + -> Seq Scan on tmp2_6 a1 + Filter: (r.id = id) + -> Seq Scan on tmp2_7 a1 + Filter: (r.id = id) + -> Seq Scan on tmp2_8 a1 + Filter: (r.id = id) + -> Seq Scan on tmp2_9 a1 + Filter: (r.id = id) + -> Seq Scan on tmp2_10 a1 + Filter: (r.id = id) + -> Custom Scan (RuntimeAppend) + Prune by: (a1.id = a2.id) + -> Seq Scan on tmp2_1 a2 + Filter: (a1.id = id) + -> Seq Scan on tmp2_2 a2 + Filter: (a1.id = id) + -> Seq Scan on tmp2_3 a2 + Filter: (a1.id = id) + -> Seq Scan on tmp2_4 a2 + Filter: (a1.id = id) + -> Seq Scan on tmp2_5 a2 + Filter: (a1.id = id) + -> Seq Scan on tmp2_6 a2 + Filter: (a1.id = id) + -> Seq Scan on tmp2_7 a2 + Filter: (a1.id = id) + -> Seq Scan on tmp2_8 a2 + Filter: (a1.id = id) + -> Seq Scan on tmp2_9 a2 + Filter: (a1.id = id) + -> Seq Scan on tmp2_10 a2 + Filter: (a1.id = id) +(48 rows) + +BEGIN; +DELETE FROM test.tmp r +USING (SELECT * + FROM test.tmp2 a1 + JOIN test.tmp2 a2 + USING(id)) t +WHERE t.id = r.id; +ROLLBACK; +/* UPDATE + FROM, two partitioned tables */ +EXPLAIN (COSTS OFF) +UPDATE test.range_rel r SET value = 1 FROM test.tmp2 t +WHERE t.id = r.id; +ERROR: DELETE and UPDATE queries with a join of partitioned tables are not supported +BEGIN; +UPDATE test.range_rel r SET value = 1 FROM test.tmp2 t +WHERE t.id = r.id; +ERROR: DELETE and UPDATE queries with a join of partitioned tables are not supported +ROLLBACK; +/* + * UPDATE + subquery with partitioned table (PG 9.5). + * See pathman_rel_pathlist_hook() + RELOPT_OTHER_MEMBER_REL. + */ +EXPLAIN (COSTS OFF) +UPDATE test.tmp t SET value = 2 +WHERE t.id IN (SELECT id + FROM test.tmp2 t2 + WHERE id = t.id); + QUERY PLAN +------------------------------------------ + Update on tmp t + -> Nested Loop Semi Join + -> Seq Scan on tmp t + -> Custom Scan (RuntimeAppend) + Prune by: (t.id = t2.id) + -> Seq Scan on tmp2_1 t2 + Filter: (t.id = id) + -> Seq Scan on tmp2_2 t2 + Filter: (t.id = id) + -> Seq Scan on tmp2_3 t2 + Filter: (t.id = id) + -> Seq Scan on tmp2_4 t2 + Filter: (t.id = id) + -> Seq Scan on tmp2_5 t2 + Filter: (t.id = id) + -> Seq Scan on tmp2_6 t2 + Filter: (t.id = id) + -> Seq Scan on tmp2_7 t2 + Filter: (t.id = id) + -> Seq Scan on tmp2_8 t2 + Filter: (t.id = id) + -> Seq Scan on tmp2_9 t2 + Filter: (t.id = id) + -> Seq Scan on tmp2_10 t2 + Filter: (t.id = id) +(25 rows) + +/* Test special rule for CTE; SELECT (PostgreSQL 9.5) */ +EXPLAIN (COSTS OFF) +WITH q AS (SELECT * FROM test.range_rel r + WHERE r.dt = '2010-01-02') +DELETE FROM test.tmp USING q; + QUERY PLAN +-------------------------------------------------------------------------------------------- + Delete on tmp + -> Nested Loop + -> Seq Scan on tmp + -> Materialize + -> Seq Scan on range_rel_1 r + Filter: (dt = 'Sat Jan 02 00:00:00 2010'::timestamp without time zone) +(6 rows) + +BEGIN; +WITH q AS (SELECT * FROM test.range_rel r + WHERE r.dt = '2010-01-02') +DELETE FROM test.tmp USING q; +ROLLBACK; +/* Test special rule for CTE; DELETE (PostgreSQL 9.5) */ +EXPLAIN (COSTS OFF) +WITH q AS (DELETE FROM test.range_rel r + WHERE r.dt = '2010-01-02' + RETURNING *) +DELETE FROM test.tmp USING q; + QUERY PLAN +---------------------------------------------------------------------------------------- + Delete on tmp + CTE q + -> Delete on range_rel_1 r + -> Seq Scan on range_rel_1 r + Filter: (dt = 'Sat Jan 02 00:00:00 2010'::timestamp without time zone) + -> Nested Loop + -> Seq Scan on tmp + -> CTE Scan on q +(8 rows) + +BEGIN; +WITH q AS (DELETE FROM test.range_rel r + WHERE r.dt = '2010-01-02' + RETURNING *) +DELETE FROM test.tmp USING q; +ROLLBACK; +/* Test special rule for CTE; DELETE + USING (PostgreSQL 9.5) */ +EXPLAIN (COSTS OFF) +WITH q AS (DELETE FROM test.tmp t + USING test.range_rel r + WHERE r.dt = '2010-01-02' AND r.id = t.id + RETURNING *) +DELETE FROM test.tmp USING q; + QUERY PLAN +---------------------------------------------------------------------------------------------- + Delete on tmp + CTE q + -> Delete on tmp t + -> Nested Loop + -> Seq Scan on tmp t + -> Index Scan using range_rel_1_pkey on range_rel_1 r + Index Cond: (id = t.id) + Filter: (dt = 'Sat Jan 02 00:00:00 2010'::timestamp without time zone) + -> Nested Loop + -> Seq Scan on tmp + -> CTE Scan on q +(11 rows) + +BEGIN; +WITH q AS (DELETE FROM test.tmp t + USING test.range_rel r + WHERE r.dt = '2010-01-02' AND r.id = t.id + RETURNING *) +DELETE FROM test.tmp USING q; +ROLLBACK; +/* Test special rule for CTE; Nested CTEs (PostgreSQL 9.5) */ +EXPLAIN (COSTS OFF) +WITH q AS (WITH n AS (SELECT id FROM test.tmp2 WHERE id = 2) + DELETE FROM test.tmp t + USING n + WHERE t.id = n.id + RETURNING *) +DELETE FROM test.tmp USING q; + QUERY PLAN +--------------------------------------------- + Delete on tmp + CTE q + -> Delete on tmp t + -> Nested Loop + -> Seq Scan on tmp t + Filter: (id = 2) + -> Seq Scan on tmp2_2 tmp2 + Filter: (id = 2) + -> Nested Loop + -> Seq Scan on tmp + -> CTE Scan on q +(11 rows) + +/* Test special rule for CTE; CTE in quals (PostgreSQL 9.5) */ +EXPLAIN (COSTS OFF) +WITH q AS (SELECT id FROM test.tmp2 + WHERE id < 3) +DELETE FROM test.tmp t WHERE t.id in (SELECT id FROM q); + QUERY PLAN +-------------------------------------------------------------- + Delete on tmp t + -> Nested Loop Semi Join + -> Seq Scan on tmp t + -> Custom Scan (RuntimeAppend) + Prune by: ((tmp2.id < 3) AND (t.id = tmp2.id)) + -> Seq Scan on tmp2_1 tmp2 + Filter: (t.id = id) + -> Seq Scan on tmp2_2 tmp2 + Filter: (t.id = id) +(9 rows) + +BEGIN; +WITH q AS (SELECT id FROM test.tmp2 + WHERE id < 3) +DELETE FROM test.tmp t WHERE t.id in (SELECT id FROM q); +ROLLBACK; +DROP TABLE test.tmp CASCADE; +DROP TABLE test.tmp2 CASCADE; +NOTICE: drop cascades to 11 other objects +DROP TABLE test.range_rel CASCADE; +NOTICE: drop cascades to 13 other objects +DROP SCHEMA test; +DROP EXTENSION pg_pathman CASCADE; +DROP SCHEMA pathman; diff --git a/expected/pathman_update_node.out b/expected/pathman_update_node.out new file mode 100644 index 00000000..9fc1d07f --- /dev/null +++ b/expected/pathman_update_node.out @@ -0,0 +1,454 @@ +\set VERBOSITY terse +SET search_path = 'public'; +CREATE EXTENSION pg_pathman; +CREATE SCHEMA test_update_node; +SET pg_pathman.enable_partitionrouter = ON; +/* Partition table by RANGE (NUMERIC) */ +CREATE TABLE test_update_node.test_range(val NUMERIC NOT NULL, comment TEXT); +CREATE INDEX val_idx ON test_update_node.test_range (val); +INSERT INTO test_update_node.test_range SELECT i, i FROM generate_series(1, 100) i; +SELECT create_range_partitions('test_update_node.test_range', 'val', 1, 10); + create_range_partitions +------------------------- + 10 +(1 row) + +/* Moving from 2st to 1st partition */ +EXPLAIN (COSTS OFF) UPDATE test_update_node.test_range SET val = 5 WHERE val = 15; + QUERY PLAN +------------------------------------------------------------------------- + Custom Scan (PartitionOverseer) + -> Update on test_range_2 + -> Custom Scan (PartitionFilter) + -> Custom Scan (PartitionRouter) + -> Bitmap Heap Scan on test_range_2 + Recheck Cond: (val = '15'::numeric) + -> Bitmap Index Scan on test_range_2_val_idx + Index Cond: (val = '15'::numeric) +(8 rows) + +/* Keep same partition */ +EXPLAIN (COSTS OFF) UPDATE test_update_node.test_range SET val = 14 WHERE val = 15; + QUERY PLAN +------------------------------------------------------------------------- + Custom Scan (PartitionOverseer) + -> Update on test_range_2 + -> Custom Scan (PartitionFilter) + -> Custom Scan (PartitionRouter) + -> Bitmap Heap Scan on test_range_2 + Recheck Cond: (val = '15'::numeric) + -> Bitmap Index Scan on test_range_2_val_idx + Index Cond: (val = '15'::numeric) +(8 rows) + +/* Update values in 1st partition (rows remain there) */ +UPDATE test_update_node.test_range SET val = 5 WHERE val <= 10; +/* Check values #1 */ +SELECT tableoid::REGCLASS, * +FROM test_update_node.test_range +WHERE val < 10 +ORDER BY comment; + tableoid | val | comment +-------------------------------+-----+--------- + test_update_node.test_range_1 | 5 | 1 + test_update_node.test_range_1 | 5 | 10 + test_update_node.test_range_1 | 5 | 2 + test_update_node.test_range_1 | 5 | 3 + test_update_node.test_range_1 | 5 | 4 + test_update_node.test_range_1 | 5 | 5 + test_update_node.test_range_1 | 5 | 6 + test_update_node.test_range_1 | 5 | 7 + test_update_node.test_range_1 | 5 | 8 + test_update_node.test_range_1 | 5 | 9 +(10 rows) + +SELECT count(*) FROM test_update_node.test_range; + count +------- + 100 +(1 row) + +/* Update values in 2nd partition (rows move to 3rd partition) */ +UPDATE test_update_node.test_range SET val = val + 10 WHERE val > 10 AND val <= 20; +/* Check values #2 */ +SELECT tableoid::REGCLASS, * +FROM test_update_node.test_range +WHERE val > 20 AND val <= 30 +ORDER BY comment; + tableoid | val | comment +-------------------------------+-----+--------- + test_update_node.test_range_3 | 21 | 11 + test_update_node.test_range_3 | 22 | 12 + test_update_node.test_range_3 | 23 | 13 + test_update_node.test_range_3 | 24 | 14 + test_update_node.test_range_3 | 25 | 15 + test_update_node.test_range_3 | 26 | 16 + test_update_node.test_range_3 | 27 | 17 + test_update_node.test_range_3 | 28 | 18 + test_update_node.test_range_3 | 29 | 19 + test_update_node.test_range_3 | 30 | 20 + test_update_node.test_range_3 | 21 | 21 + test_update_node.test_range_3 | 22 | 22 + test_update_node.test_range_3 | 23 | 23 + test_update_node.test_range_3 | 24 | 24 + test_update_node.test_range_3 | 25 | 25 + test_update_node.test_range_3 | 26 | 26 + test_update_node.test_range_3 | 27 | 27 + test_update_node.test_range_3 | 28 | 28 + test_update_node.test_range_3 | 29 | 29 + test_update_node.test_range_3 | 30 | 30 +(20 rows) + +SELECT count(*) FROM test_update_node.test_range; + count +------- + 100 +(1 row) + +/* Move single row */ +UPDATE test_update_node.test_range SET val = 90 WHERE val = 80; +/* Check values #3 */ +SELECT tableoid::REGCLASS, * +FROM test_update_node.test_range +WHERE val = 90 +ORDER BY comment; + tableoid | val | comment +-------------------------------+-----+--------- + test_update_node.test_range_9 | 90 | 80 + test_update_node.test_range_9 | 90 | 90 +(2 rows) + +SELECT count(*) FROM test_update_node.test_range; + count +------- + 100 +(1 row) + +/* Move single row (create new partition) */ +UPDATE test_update_node.test_range SET val = -1 WHERE val = 50; +/* Check values #4 */ +SELECT tableoid::REGCLASS, * +FROM test_update_node.test_range +WHERE val = -1 +ORDER BY comment; + tableoid | val | comment +--------------------------------+-----+--------- + test_update_node.test_range_11 | -1 | 50 +(1 row) + +SELECT count(*) FROM test_update_node.test_range; + count +------- + 100 +(1 row) + +/* Update non-key column */ +UPDATE test_update_node.test_range SET comment = 'test!' WHERE val = 100; +/* Check values #5 */ +SELECT tableoid::REGCLASS, * +FROM test_update_node.test_range +WHERE val = 100 +ORDER BY comment; + tableoid | val | comment +--------------------------------+-----+--------- + test_update_node.test_range_10 | 100 | test! +(1 row) + +SELECT count(*) FROM test_update_node.test_range; + count +------- + 100 +(1 row) + +/* Try moving row into a gap (ERROR) */ +DROP TABLE test_update_node.test_range_4; +UPDATE test_update_node.test_range SET val = 35 WHERE val = 70; +ERROR: cannot spawn a partition +/* Check values #6 */ +SELECT tableoid::REGCLASS, * +FROM test_update_node.test_range +WHERE val = 70 +ORDER BY comment; + tableoid | val | comment +-------------------------------+-----+--------- + test_update_node.test_range_7 | 70 | 70 +(1 row) + +SELECT count(*) FROM test_update_node.test_range; + count +------- + 90 +(1 row) + +/* Test trivial move (same key) */ +UPDATE test_update_node.test_range SET val = 65 WHERE val = 65; +/* Check values #7 */ +SELECT tableoid::REGCLASS, * +FROM test_update_node.test_range +WHERE val = 65 +ORDER BY comment; + tableoid | val | comment +-------------------------------+-----+--------- + test_update_node.test_range_7 | 65 | 65 +(1 row) + +SELECT count(*) FROM test_update_node.test_range; + count +------- + 90 +(1 row) + +/* Test tuple conversion (attached partition) */ +CREATE TABLE test_update_node.test_range_inv(comment TEXT, val NUMERIC NOT NULL); +SELECT attach_range_partition('test_update_node.test_range', + 'test_update_node.test_range_inv', + 101::NUMERIC, 111::NUMERIC); + attach_range_partition +--------------------------------- + test_update_node.test_range_inv +(1 row) + +UPDATE test_update_node.test_range SET val = 105 WHERE val = 60; +UPDATE test_update_node.test_range SET val = 105 WHERE val = 105; +/* Check values #8 */ +SELECT tableoid::REGCLASS, * +FROM test_update_node.test_range +WHERE val = 105 +ORDER BY comment; + tableoid | val | comment +---------------------------------+-----+--------- + test_update_node.test_range_inv | 105 | 60 +(1 row) + +UPDATE test_update_node.test_range SET val = 60 WHERE val = 105; +SELECT count(*) FROM test_update_node.test_range; + count +------- + 90 +(1 row) + +/* Test RETURNING */ +UPDATE test_update_node.test_range SET val = 71 WHERE val = 41 RETURNING val, comment; + val | comment +-----+--------- + 71 | 41 +(1 row) + +UPDATE test_update_node.test_range SET val = 71 WHERE val = 71 RETURNING val, comment; + val | comment +-----+--------- + 71 | 71 + 71 | 41 +(2 rows) + +UPDATE test_update_node.test_range SET val = 106 WHERE val = 61 RETURNING val, comment; + val | comment +-----+--------- + 106 | 61 +(1 row) + +UPDATE test_update_node.test_range SET val = 106 WHERE val = 106 RETURNING val, comment; + val | comment +-----+--------- + 106 | 61 +(1 row) + +UPDATE test_update_node.test_range SET val = 61 WHERE val = 106 RETURNING val, comment; + val | comment +-----+--------- + 61 | 61 +(1 row) + +/* Just in case, check we don't duplicate anything */ +SELECT count(*) FROM test_update_node.test_range; + count +------- + 90 +(1 row) + +/* Test tuple conversion (dropped column) */ +ALTER TABLE test_update_node.test_range DROP COLUMN comment CASCADE; +SELECT append_range_partition('test_update_node.test_range'); + append_range_partition +-------------------------------- + test_update_node.test_range_12 +(1 row) + +UPDATE test_update_node.test_range SET val = 115 WHERE val = 55; +UPDATE test_update_node.test_range SET val = 115 WHERE val = 115; +/* Check values #9 */ +SELECT tableoid::REGCLASS, * +FROM test_update_node.test_range +WHERE val = 115; + tableoid | val +--------------------------------+----- + test_update_node.test_range_12 | 115 +(1 row) + +UPDATE test_update_node.test_range SET val = 55 WHERE val = 115; +SELECT count(*) FROM test_update_node.test_range; + count +------- + 90 +(1 row) + +DROP TABLE test_update_node.test_range CASCADE; +NOTICE: drop cascades to 13 other objects +/* recreate table and mass move */ +CREATE TABLE test_update_node.test_range(val NUMERIC NOT NULL, comment TEXT); +INSERT INTO test_update_node.test_range SELECT i, i FROM generate_series(1, 100) i; +SELECT create_range_partitions('test_update_node.test_range', 'val', 1, 10); + create_range_partitions +------------------------- + 10 +(1 row) + +SELECT tableoid::regclass, MIN(val) FROM test_update_node.test_range + GROUP BY tableoid::regclass ORDER BY tableoid::regclass; + tableoid | min +--------------------------------+----- + test_update_node.test_range_1 | 1 + test_update_node.test_range_2 | 11 + test_update_node.test_range_3 | 21 + test_update_node.test_range_4 | 31 + test_update_node.test_range_5 | 41 + test_update_node.test_range_6 | 51 + test_update_node.test_range_7 | 61 + test_update_node.test_range_8 | 71 + test_update_node.test_range_9 | 81 + test_update_node.test_range_10 | 91 +(10 rows) + +SELECT count(*) FROM test_update_node.test_range; + count +------- + 100 +(1 row) + +/* move everything to next partition */ +UPDATE test_update_node.test_range SET val = val + 10; +SELECT tableoid::regclass, MIN(val) FROM test_update_node.test_range + GROUP BY tableoid::regclass ORDER BY tableoid::regclass; + tableoid | min +--------------------------------+----- + test_update_node.test_range_2 | 11 + test_update_node.test_range_3 | 21 + test_update_node.test_range_4 | 31 + test_update_node.test_range_5 | 41 + test_update_node.test_range_6 | 51 + test_update_node.test_range_7 | 61 + test_update_node.test_range_8 | 71 + test_update_node.test_range_9 | 81 + test_update_node.test_range_10 | 91 + test_update_node.test_range_11 | 101 +(10 rows) + +/* move everything to previous partition */ +UPDATE test_update_node.test_range SET val = val - 10; +SELECT tableoid::regclass, MIN(val) FROM test_update_node.test_range + GROUP BY tableoid::regclass ORDER BY tableoid::regclass; + tableoid | min +--------------------------------+----- + test_update_node.test_range_1 | 1 + test_update_node.test_range_2 | 11 + test_update_node.test_range_3 | 21 + test_update_node.test_range_4 | 31 + test_update_node.test_range_5 | 41 + test_update_node.test_range_6 | 51 + test_update_node.test_range_7 | 61 + test_update_node.test_range_8 | 71 + test_update_node.test_range_9 | 81 + test_update_node.test_range_10 | 91 +(10 rows) + +SELECT count(*) FROM test_update_node.test_range; + count +------- + 100 +(1 row) + +/* Partition table by HASH (INT4) */ +CREATE TABLE test_update_node.test_hash(val INT4 NOT NULL, comment TEXT); +INSERT INTO test_update_node.test_hash SELECT i, i FROM generate_series(1, 10) i; +SELECT create_hash_partitions('test_update_node.test_hash', 'val', 3); + create_hash_partitions +------------------------ + 3 +(1 row) + +/* Shuffle rows a few times */ +UPDATE test_update_node.test_hash SET val = val + 1; +UPDATE test_update_node.test_hash SET val = val + 1; +UPDATE test_update_node.test_hash SET val = val + 1; +UPDATE test_update_node.test_hash SET val = val + 1; +UPDATE test_update_node.test_hash SET val = val + 1; +UPDATE test_update_node.test_hash SET val = val + 1; +UPDATE test_update_node.test_hash SET val = val + 1; +UPDATE test_update_node.test_hash SET val = val + 1; +UPDATE test_update_node.test_hash SET val = val + 1; +/* Check values #0 */ +SELECT tableoid::regclass, * FROM test_update_node.test_hash ORDER BY val; + tableoid | val | comment +------------------------------+-----+--------- + test_update_node.test_hash_2 | 10 | 1 + test_update_node.test_hash_1 | 11 | 2 + test_update_node.test_hash_1 | 12 | 3 + test_update_node.test_hash_2 | 13 | 4 + test_update_node.test_hash_1 | 14 | 5 + test_update_node.test_hash_1 | 15 | 6 + test_update_node.test_hash_2 | 16 | 7 + test_update_node.test_hash_0 | 17 | 8 + test_update_node.test_hash_1 | 18 | 9 + test_update_node.test_hash_0 | 19 | 10 +(10 rows) + +/* Move all rows into single partition */ +UPDATE test_update_node.test_hash SET val = 1; +/* Check values #1 */ +SELECT tableoid::REGCLASS, * +FROM test_update_node.test_hash +WHERE val = 1 +ORDER BY comment; + tableoid | val | comment +------------------------------+-----+--------- + test_update_node.test_hash_2 | 1 | 1 + test_update_node.test_hash_2 | 1 | 10 + test_update_node.test_hash_2 | 1 | 2 + test_update_node.test_hash_2 | 1 | 3 + test_update_node.test_hash_2 | 1 | 4 + test_update_node.test_hash_2 | 1 | 5 + test_update_node.test_hash_2 | 1 | 6 + test_update_node.test_hash_2 | 1 | 7 + test_update_node.test_hash_2 | 1 | 8 + test_update_node.test_hash_2 | 1 | 9 +(10 rows) + +SELECT count(*) FROM test_update_node.test_hash; + count +------- + 10 +(1 row) + +/* Don't move any rows */ +UPDATE test_update_node.test_hash SET val = 3 WHERE val = 2; +/* Check values #2 */ +SELECT tableoid::REGCLASS, * +FROM test_update_node.test_hash +WHERE val = 3 +ORDER BY comment; + tableoid | val | comment +----------+-----+--------- +(0 rows) + +SELECT count(*) FROM test_update_node.test_hash; + count +------- + 10 +(1 row) + +DROP TABLE test_update_node.test_hash CASCADE; +NOTICE: drop cascades to 3 other objects +DROP TABLE test_update_node.test_range CASCADE; +NOTICE: drop cascades to 12 other objects +DROP SCHEMA test_update_node; +DROP EXTENSION pg_pathman; diff --git a/expected/pathman_update_triggers.out b/expected/pathman_update_triggers.out new file mode 100644 index 00000000..40c6a19c --- /dev/null +++ b/expected/pathman_update_triggers.out @@ -0,0 +1,191 @@ +\set VERBOSITY terse +SET search_path = 'public'; +CREATE EXTENSION pg_pathman; +CREATE SCHEMA test_update_triggers; +create table test_update_triggers.test (val int not null); +select create_hash_partitions('test_update_triggers.test', 'val', 2, + partition_names := array[ + 'test_update_triggers.test_1', + 'test_update_triggers.test_2']); + create_hash_partitions +------------------------ + 2 +(1 row) + +create or replace function test_update_triggers.test_trigger() returns trigger as $$ +begin + raise notice '%', format('%s %s %s (%s)', TG_WHEN, TG_OP, TG_LEVEL, TG_TABLE_NAME); + + if TG_OP::text = 'DELETE'::text then + return old; + else + return new; + end if; end; +$$ language plpgsql; +/* Enable our precious custom node */ +set pg_pathman.enable_partitionrouter = t; +/* + * Statement level triggers + */ +create trigger bus before update ON test_update_triggers.test + execute procedure test_update_triggers.test_trigger (); +create trigger bds before delete ON test_update_triggers.test + execute procedure test_update_triggers.test_trigger (); +create trigger bis before insert ON test_update_triggers.test + execute procedure test_update_triggers.test_trigger (); +create trigger aus after update ON test_update_triggers.test + execute procedure test_update_triggers.test_trigger (); +create trigger ads after delete ON test_update_triggers.test + execute procedure test_update_triggers.test_trigger (); +create trigger ais after insert ON test_update_triggers.test + execute procedure test_update_triggers.test_trigger (); +create trigger bus before update ON test_update_triggers.test_1 + execute procedure test_update_triggers.test_trigger (); +create trigger bds before delete ON test_update_triggers.test_1 + execute procedure test_update_triggers.test_trigger (); +create trigger bis before insert ON test_update_triggers.test_1 + execute procedure test_update_triggers.test_trigger (); +create trigger aus after update ON test_update_triggers.test_1 + execute procedure test_update_triggers.test_trigger (); +create trigger ads after delete ON test_update_triggers.test_1 + execute procedure test_update_triggers.test_trigger (); +create trigger ais after insert ON test_update_triggers.test_1 + execute procedure test_update_triggers.test_trigger (); +create trigger bus before update ON test_update_triggers.test_2 + execute procedure test_update_triggers.test_trigger (); +create trigger bds before delete ON test_update_triggers.test_2 + execute procedure test_update_triggers.test_trigger (); +create trigger bis before insert ON test_update_triggers.test_2 + execute procedure test_update_triggers.test_trigger (); +create trigger aus after update ON test_update_triggers.test_2 + execute procedure test_update_triggers.test_trigger (); +create trigger ads after delete ON test_update_triggers.test_2 + execute procedure test_update_triggers.test_trigger (); +create trigger ais after insert ON test_update_triggers.test_2 + execute procedure test_update_triggers.test_trigger (); +/* multiple values */ +insert into test_update_triggers.test select generate_series(1, 200); +NOTICE: BEFORE INSERT STATEMENT (test) +NOTICE: AFTER INSERT STATEMENT (test) +update test_update_triggers.test set val = val + 1; +NOTICE: BEFORE UPDATE STATEMENT (test) +NOTICE: AFTER UPDATE STATEMENT (test) +update test_update_triggers.test set val = val + 1; +NOTICE: BEFORE UPDATE STATEMENT (test) +NOTICE: AFTER UPDATE STATEMENT (test) +update test_update_triggers.test set val = val + 1; +NOTICE: BEFORE UPDATE STATEMENT (test) +NOTICE: AFTER UPDATE STATEMENT (test) +update test_update_triggers.test set val = val + 1; +NOTICE: BEFORE UPDATE STATEMENT (test) +NOTICE: AFTER UPDATE STATEMENT (test) +update test_update_triggers.test set val = val + 1; +NOTICE: BEFORE UPDATE STATEMENT (test) +NOTICE: AFTER UPDATE STATEMENT (test) +select count(distinct val) from test_update_triggers.test; + count +------- + 200 +(1 row) + +truncate test_update_triggers.test; +/* + * Row level triggers + */ +create trigger bu before update ON test_update_triggers.test_1 + for each row execute procedure test_update_triggers.test_trigger (); +create trigger bd before delete ON test_update_triggers.test_1 + for each row execute procedure test_update_triggers.test_trigger (); +create trigger bi before insert ON test_update_triggers.test_1 + for each row execute procedure test_update_triggers.test_trigger (); +create trigger au after update ON test_update_triggers.test_1 + for each row execute procedure test_update_triggers.test_trigger (); +create trigger ad after delete ON test_update_triggers.test_1 + for each row execute procedure test_update_triggers.test_trigger (); +create trigger ai after insert ON test_update_triggers.test_1 + for each row execute procedure test_update_triggers.test_trigger (); +create trigger bu before update ON test_update_triggers.test_2 + for each row execute procedure test_update_triggers.test_trigger (); +create trigger bd before delete ON test_update_triggers.test_2 + for each row execute procedure test_update_triggers.test_trigger (); +create trigger bi before insert ON test_update_triggers.test_2 + for each row execute procedure test_update_triggers.test_trigger (); +create trigger au after update ON test_update_triggers.test_2 + for each row execute procedure test_update_triggers.test_trigger (); +create trigger ad after delete ON test_update_triggers.test_2 + for each row execute procedure test_update_triggers.test_trigger (); +create trigger ai after insert ON test_update_triggers.test_2 + for each row execute procedure test_update_triggers.test_trigger (); +/* single value */ +insert into test_update_triggers.test values (1); +NOTICE: BEFORE INSERT STATEMENT (test) +NOTICE: BEFORE INSERT ROW (test_1) +NOTICE: AFTER INSERT ROW (test_1) +NOTICE: AFTER INSERT STATEMENT (test) +update test_update_triggers.test set val = val + 1 returning *, tableoid::regclass; +NOTICE: BEFORE UPDATE STATEMENT (test) +NOTICE: BEFORE UPDATE ROW (test_1) +NOTICE: AFTER UPDATE ROW (test_1) +NOTICE: AFTER UPDATE STATEMENT (test) + val | tableoid +-----+----------------------------- + 2 | test_update_triggers.test_1 +(1 row) + +update test_update_triggers.test set val = val + 1 returning *, tableoid::regclass; +NOTICE: BEFORE UPDATE STATEMENT (test) +NOTICE: BEFORE UPDATE ROW (test_1) +NOTICE: BEFORE DELETE ROW (test_1) +NOTICE: BEFORE INSERT ROW (test_2) +NOTICE: AFTER DELETE ROW (test_1) +NOTICE: AFTER INSERT ROW (test_2) +NOTICE: AFTER UPDATE STATEMENT (test) + val | tableoid +-----+----------------------------- + 3 | test_update_triggers.test_2 +(1 row) + +update test_update_triggers.test set val = val + 1 returning *, tableoid::regclass; +NOTICE: BEFORE UPDATE STATEMENT (test) +NOTICE: BEFORE UPDATE ROW (test_2) +NOTICE: AFTER UPDATE ROW (test_2) +NOTICE: AFTER UPDATE STATEMENT (test) + val | tableoid +-----+----------------------------- + 4 | test_update_triggers.test_2 +(1 row) + +update test_update_triggers.test set val = val + 1 returning *, tableoid::regclass; +NOTICE: BEFORE UPDATE STATEMENT (test) +NOTICE: BEFORE UPDATE ROW (test_2) +NOTICE: BEFORE DELETE ROW (test_2) +NOTICE: BEFORE INSERT ROW (test_1) +NOTICE: AFTER DELETE ROW (test_2) +NOTICE: AFTER INSERT ROW (test_1) +NOTICE: AFTER UPDATE STATEMENT (test) + val | tableoid +-----+----------------------------- + 5 | test_update_triggers.test_1 +(1 row) + +update test_update_triggers.test set val = val + 1 returning *, tableoid::regclass; +NOTICE: BEFORE UPDATE STATEMENT (test) +NOTICE: BEFORE UPDATE ROW (test_1) +NOTICE: AFTER UPDATE ROW (test_1) +NOTICE: AFTER UPDATE STATEMENT (test) + val | tableoid +-----+----------------------------- + 6 | test_update_triggers.test_1 +(1 row) + +select count(distinct val) from test_update_triggers.test; + count +------- + 1 +(1 row) + +DROP TABLE test_update_triggers.test CASCADE; +NOTICE: drop cascades to 2 other objects +DROP FUNCTION test_update_triggers.test_trigger(); +DROP SCHEMA test_update_triggers; +DROP EXTENSION pg_pathman CASCADE; diff --git a/expected/pathman_update_triggers_1.out b/expected/pathman_update_triggers_1.out new file mode 100644 index 00000000..5d26ac1e --- /dev/null +++ b/expected/pathman_update_triggers_1.out @@ -0,0 +1,198 @@ +\set VERBOSITY terse +SET search_path = 'public'; +CREATE EXTENSION pg_pathman; +CREATE SCHEMA test_update_triggers; +create table test_update_triggers.test (val int not null); +select create_hash_partitions('test_update_triggers.test', 'val', 2, + partition_names := array[ + 'test_update_triggers.test_1', + 'test_update_triggers.test_2']); + create_hash_partitions +------------------------ + 2 +(1 row) + +create or replace function test_update_triggers.test_trigger() returns trigger as $$ +begin + raise notice '%', format('%s %s %s (%s)', TG_WHEN, TG_OP, TG_LEVEL, TG_TABLE_NAME); + + if TG_OP::text = 'DELETE'::text then + return old; + else + return new; + end if; end; +$$ language plpgsql; +/* Enable our precious custom node */ +set pg_pathman.enable_partitionrouter = t; +/* + * Statement level triggers + */ +create trigger bus before update ON test_update_triggers.test + execute procedure test_update_triggers.test_trigger (); +create trigger bds before delete ON test_update_triggers.test + execute procedure test_update_triggers.test_trigger (); +create trigger bis before insert ON test_update_triggers.test + execute procedure test_update_triggers.test_trigger (); +create trigger aus after update ON test_update_triggers.test + execute procedure test_update_triggers.test_trigger (); +create trigger ads after delete ON test_update_triggers.test + execute procedure test_update_triggers.test_trigger (); +create trigger ais after insert ON test_update_triggers.test + execute procedure test_update_triggers.test_trigger (); +create trigger bus before update ON test_update_triggers.test_1 + execute procedure test_update_triggers.test_trigger (); +create trigger bds before delete ON test_update_triggers.test_1 + execute procedure test_update_triggers.test_trigger (); +create trigger bis before insert ON test_update_triggers.test_1 + execute procedure test_update_triggers.test_trigger (); +create trigger aus after update ON test_update_triggers.test_1 + execute procedure test_update_triggers.test_trigger (); +create trigger ads after delete ON test_update_triggers.test_1 + execute procedure test_update_triggers.test_trigger (); +create trigger ais after insert ON test_update_triggers.test_1 + execute procedure test_update_triggers.test_trigger (); +create trigger bus before update ON test_update_triggers.test_2 + execute procedure test_update_triggers.test_trigger (); +create trigger bds before delete ON test_update_triggers.test_2 + execute procedure test_update_triggers.test_trigger (); +create trigger bis before insert ON test_update_triggers.test_2 + execute procedure test_update_triggers.test_trigger (); +create trigger aus after update ON test_update_triggers.test_2 + execute procedure test_update_triggers.test_trigger (); +create trigger ads after delete ON test_update_triggers.test_2 + execute procedure test_update_triggers.test_trigger (); +create trigger ais after insert ON test_update_triggers.test_2 + execute procedure test_update_triggers.test_trigger (); +/* multiple values */ +insert into test_update_triggers.test select generate_series(1, 200); +NOTICE: BEFORE INSERT STATEMENT (test) +NOTICE: AFTER INSERT STATEMENT (test) +update test_update_triggers.test set val = val + 1; +NOTICE: BEFORE UPDATE STATEMENT (test) +NOTICE: AFTER INSERT STATEMENT (test) +NOTICE: AFTER UPDATE STATEMENT (test) +update test_update_triggers.test set val = val + 1; +NOTICE: BEFORE UPDATE STATEMENT (test) +NOTICE: AFTER INSERT STATEMENT (test) +NOTICE: AFTER UPDATE STATEMENT (test) +update test_update_triggers.test set val = val + 1; +NOTICE: BEFORE UPDATE STATEMENT (test) +NOTICE: AFTER INSERT STATEMENT (test) +NOTICE: AFTER UPDATE STATEMENT (test) +update test_update_triggers.test set val = val + 1; +NOTICE: BEFORE UPDATE STATEMENT (test) +NOTICE: AFTER INSERT STATEMENT (test) +NOTICE: AFTER UPDATE STATEMENT (test) +update test_update_triggers.test set val = val + 1; +NOTICE: BEFORE UPDATE STATEMENT (test) +NOTICE: AFTER INSERT STATEMENT (test) +NOTICE: AFTER UPDATE STATEMENT (test) +select count(distinct val) from test_update_triggers.test; + count +------- + 200 +(1 row) + +truncate test_update_triggers.test; +/* + * Row level triggers + */ +create trigger bu before update ON test_update_triggers.test_1 + for each row execute procedure test_update_triggers.test_trigger (); +create trigger bd before delete ON test_update_triggers.test_1 + for each row execute procedure test_update_triggers.test_trigger (); +create trigger bi before insert ON test_update_triggers.test_1 + for each row execute procedure test_update_triggers.test_trigger (); +create trigger au after update ON test_update_triggers.test_1 + for each row execute procedure test_update_triggers.test_trigger (); +create trigger ad after delete ON test_update_triggers.test_1 + for each row execute procedure test_update_triggers.test_trigger (); +create trigger ai after insert ON test_update_triggers.test_1 + for each row execute procedure test_update_triggers.test_trigger (); +create trigger bu before update ON test_update_triggers.test_2 + for each row execute procedure test_update_triggers.test_trigger (); +create trigger bd before delete ON test_update_triggers.test_2 + for each row execute procedure test_update_triggers.test_trigger (); +create trigger bi before insert ON test_update_triggers.test_2 + for each row execute procedure test_update_triggers.test_trigger (); +create trigger au after update ON test_update_triggers.test_2 + for each row execute procedure test_update_triggers.test_trigger (); +create trigger ad after delete ON test_update_triggers.test_2 + for each row execute procedure test_update_triggers.test_trigger (); +create trigger ai after insert ON test_update_triggers.test_2 + for each row execute procedure test_update_triggers.test_trigger (); +/* single value */ +insert into test_update_triggers.test values (1); +NOTICE: BEFORE INSERT STATEMENT (test) +NOTICE: BEFORE INSERT ROW (test_1) +NOTICE: AFTER INSERT ROW (test_1) +NOTICE: AFTER INSERT STATEMENT (test) +update test_update_triggers.test set val = val + 1 returning *, tableoid::regclass; +NOTICE: BEFORE UPDATE STATEMENT (test) +NOTICE: BEFORE UPDATE ROW (test_1) +NOTICE: AFTER UPDATE ROW (test_1) +NOTICE: AFTER UPDATE STATEMENT (test) + val | tableoid +-----+----------------------------- + 2 | test_update_triggers.test_1 +(1 row) + +update test_update_triggers.test set val = val + 1 returning *, tableoid::regclass; +NOTICE: BEFORE UPDATE STATEMENT (test) +NOTICE: BEFORE UPDATE ROW (test_1) +NOTICE: BEFORE DELETE ROW (test_1) +NOTICE: BEFORE INSERT ROW (test_2) +NOTICE: AFTER DELETE ROW (test_1) +NOTICE: AFTER INSERT STATEMENT (test) +NOTICE: AFTER INSERT ROW (test_2) +NOTICE: AFTER UPDATE STATEMENT (test) + val | tableoid +-----+----------------------------- + 3 | test_update_triggers.test_2 +(1 row) + +update test_update_triggers.test set val = val + 1 returning *, tableoid::regclass; +NOTICE: BEFORE UPDATE STATEMENT (test) +NOTICE: BEFORE UPDATE ROW (test_2) +NOTICE: AFTER UPDATE ROW (test_2) +NOTICE: AFTER UPDATE STATEMENT (test) + val | tableoid +-----+----------------------------- + 4 | test_update_triggers.test_2 +(1 row) + +update test_update_triggers.test set val = val + 1 returning *, tableoid::regclass; +NOTICE: BEFORE UPDATE STATEMENT (test) +NOTICE: BEFORE UPDATE ROW (test_2) +NOTICE: BEFORE DELETE ROW (test_2) +NOTICE: BEFORE INSERT ROW (test_1) +NOTICE: AFTER DELETE ROW (test_2) +NOTICE: AFTER INSERT STATEMENT (test) +NOTICE: AFTER INSERT ROW (test_1) +NOTICE: AFTER UPDATE STATEMENT (test) + val | tableoid +-----+----------------------------- + 5 | test_update_triggers.test_1 +(1 row) + +update test_update_triggers.test set val = val + 1 returning *, tableoid::regclass; +NOTICE: BEFORE UPDATE STATEMENT (test) +NOTICE: BEFORE UPDATE ROW (test_1) +NOTICE: AFTER UPDATE ROW (test_1) +NOTICE: AFTER UPDATE STATEMENT (test) + val | tableoid +-----+----------------------------- + 6 | test_update_triggers.test_1 +(1 row) + +select count(distinct val) from test_update_triggers.test; + count +------- + 1 +(1 row) + +DROP TABLE test_update_triggers.test CASCADE; +NOTICE: drop cascades to 2 other objects +DROP FUNCTION test_update_triggers.test_trigger(); +DROP SCHEMA test_update_triggers; +DROP EXTENSION pg_pathman CASCADE; diff --git a/expected/pathman_utility_stmt.out b/expected/pathman_utility_stmt.out new file mode 100644 index 00000000..1a8b969e --- /dev/null +++ b/expected/pathman_utility_stmt.out @@ -0,0 +1,448 @@ +\set VERBOSITY terse +SET search_path = 'public'; +CREATE EXTENSION pg_pathman; +/* + * Test COPY + */ +CREATE SCHEMA copy_stmt_hooking; +CREATE TABLE copy_stmt_hooking.test( + val int not null, + comment text, + c3 int, + c4 int); +INSERT INTO copy_stmt_hooking.test SELECT generate_series(1, 20), 'comment'; +CREATE INDEX ON copy_stmt_hooking.test(val); +/* test for RANGE partitioning */ +SELECT create_range_partitions('copy_stmt_hooking.test', 'val', 1, 5); + create_range_partitions +------------------------- + 4 +(1 row) + +/* perform VACUUM */ +VACUUM FULL copy_stmt_hooking.test; +VACUUM FULL copy_stmt_hooking.test_1; +VACUUM FULL copy_stmt_hooking.test_2; +VACUUM FULL copy_stmt_hooking.test_3; +VACUUM FULL copy_stmt_hooking.test_4; +/* DELETE ROWS, COPY FROM */ +DELETE FROM copy_stmt_hooking.test; +COPY copy_stmt_hooking.test FROM stdin; +SELECT count(*) FROM ONLY copy_stmt_hooking.test; + count +------- + 0 +(1 row) + +SELECT *, tableoid::REGCLASS FROM copy_stmt_hooking.test ORDER BY val; + val | comment | c3 | c4 | tableoid +-----+---------+----+----+-------------------------- + 1 | test_1 | 0 | 0 | copy_stmt_hooking.test_1 + 6 | test_2 | 0 | 0 | copy_stmt_hooking.test_2 + 7 | test_2 | 0 | 0 | copy_stmt_hooking.test_2 + 11 | test_3 | 0 | 0 | copy_stmt_hooking.test_3 + 16 | test_4 | 0 | 0 | copy_stmt_hooking.test_4 +(5 rows) + +/* perform VACUUM */ +VACUUM FULL copy_stmt_hooking.test; +VACUUM FULL copy_stmt_hooking.test_1; +VACUUM FULL copy_stmt_hooking.test_2; +VACUUM FULL copy_stmt_hooking.test_3; +VACUUM FULL copy_stmt_hooking.test_4; +/* COPY TO */ +COPY copy_stmt_hooking.test TO stdout; /* not ok */ +WARNING: COPY TO will only select rows from parent table "test" +COPY copy_stmt_hooking.test (val) TO stdout; /* not ok */ +WARNING: COPY TO will only select rows from parent table "test" +COPY (SELECT * FROM copy_stmt_hooking.test) TO stdout; +1 test_1 0 0 +6 test_2 0 0 +7 test_2 0 0 +11 test_3 0 0 +16 test_4 0 0 +COPY (SELECT * FROM copy_stmt_hooking.test) TO stdout (FORMAT CSV); +1,test_1,0,0 +6,test_2,0,0 +7,test_2,0,0 +11,test_3,0,0 +16,test_4,0,0 +\copy (SELECT * FROM copy_stmt_hooking.test) TO stdout +1 test_1 0 0 +6 test_2 0 0 +7 test_2 0 0 +11 test_3 0 0 +16 test_4 0 0 +/* COPY FROM (partition does not exist, NOT allowed to create partitions) */ +SET pg_pathman.enable_auto_partition = OFF; +COPY copy_stmt_hooking.test FROM stdin; +ERROR: no suitable partition for key '21' +SELECT * FROM copy_stmt_hooking.test WHERE val > 20; + val | comment | c3 | c4 +-----+---------+----+---- +(0 rows) + +/* COPY FROM (partition does not exist, allowed to create partitions) */ +SET pg_pathman.enable_auto_partition = ON; +COPY copy_stmt_hooking.test FROM stdin; +SELECT * FROM copy_stmt_hooking.test WHERE val > 20; + val | comment | c3 | c4 +-----+--------------+----+---- + 21 | test_no_part | 0 | 0 +(1 row) + +/* COPY FROM (partitioned column is not specified) */ +COPY copy_stmt_hooking.test(comment) FROM stdin; +ERROR: partitioning expression's value should not be NULL +/* COPY FROM (we don't support FREEZE) */ +COPY copy_stmt_hooking.test FROM stdin WITH (FREEZE); +ERROR: freeze is not supported for partitioned tables +/* Drop column (make use of 'tuple_map') */ +ALTER TABLE copy_stmt_hooking.test DROP COLUMN comment; +/* create new partition */ +SELECT get_number_of_partitions('copy_stmt_hooking.test'); + get_number_of_partitions +-------------------------- + 5 +(1 row) + +INSERT INTO copy_stmt_hooking.test (val, c3, c4) VALUES (26, 1, 2); +SELECT get_number_of_partitions('copy_stmt_hooking.test'); + get_number_of_partitions +-------------------------- + 6 +(1 row) + +/* check number of columns in 'test' */ +SELECT count(*) FROM pg_attribute +WHERE attnum > 0 AND attrelid = 'copy_stmt_hooking.test'::REGCLASS; + count +------- + 4 +(1 row) + +/* check number of columns in 'test_6' */ +SELECT count(*) FROM pg_attribute +WHERE attnum > 0 AND attrelid = 'copy_stmt_hooking.test_6'::REGCLASS; + count +------- + 3 +(1 row) + +/* test transformed tuples */ +COPY (SELECT * FROM copy_stmt_hooking.test) TO stdout; +1 0 0 +6 0 0 +7 0 0 +11 0 0 +16 0 0 +21 0 0 +26 1 2 +/* COPY FROM (insert into table with dropped column) */ +COPY copy_stmt_hooking.test(val, c3, c4) FROM stdin; +/* COPY FROM (insert into table without dropped column) */ +COPY copy_stmt_hooking.test(val, c3, c4) FROM stdin; +/* check tuples from last partition (without dropped column) */ +SELECT *, tableoid::REGCLASS FROM copy_stmt_hooking.test ORDER BY val; + val | c3 | c4 | tableoid +-----+----+----+-------------------------- + 1 | 0 | 0 | copy_stmt_hooking.test_1 + 2 | 1 | 2 | copy_stmt_hooking.test_1 + 6 | 0 | 0 | copy_stmt_hooking.test_2 + 7 | 0 | 0 | copy_stmt_hooking.test_2 + 11 | 0 | 0 | copy_stmt_hooking.test_3 + 16 | 0 | 0 | copy_stmt_hooking.test_4 + 21 | 0 | 0 | copy_stmt_hooking.test_5 + 26 | 1 | 2 | copy_stmt_hooking.test_6 + 27 | 1 | 2 | copy_stmt_hooking.test_6 +(9 rows) + +/* drop modified table */ +DROP TABLE copy_stmt_hooking.test CASCADE; +NOTICE: drop cascades to 7 other objects +/* create table again */ +CREATE TABLE copy_stmt_hooking.test( + val int not null, + comment text, + c3 int, + c4 int); +CREATE INDEX ON copy_stmt_hooking.test(val); +/* test for HASH partitioning */ +SELECT create_hash_partitions('copy_stmt_hooking.test', 'val', 5); + create_hash_partitions +------------------------ + 5 +(1 row) + +/* DELETE ROWS, COPY FROM */ +DELETE FROM copy_stmt_hooking.test; +COPY copy_stmt_hooking.test FROM stdin; +SELECT count(*) FROM ONLY copy_stmt_hooking.test; + count +------- + 0 +(1 row) + +SELECT * FROM copy_stmt_hooking.test ORDER BY val; + val | comment | c3 | c4 +-----+---------+----+---- + 1 | hash_1 | 0 | 0 + 6 | hash_2 | 0 | 0 +(2 rows) + +/* Check dropped colums before partitioning */ +CREATE TABLE copy_stmt_hooking.test2 ( + a varchar(50), + b varchar(50), + t timestamp without time zone not null +); +ALTER TABLE copy_stmt_hooking.test2 DROP COLUMN a; +SELECT create_range_partitions('copy_stmt_hooking.test2', + 't', + '2017-01-01 00:00:00'::timestamp, + interval '1 hour', 5, false +); + create_range_partitions +------------------------- + 5 +(1 row) + +COPY copy_stmt_hooking.test2(t) FROM stdin; +SELECT COUNT(*) FROM copy_stmt_hooking.test2; + count +------- + 1 +(1 row) + +DROP TABLE copy_stmt_hooking.test CASCADE; +NOTICE: drop cascades to 5 other objects +DROP TABLE copy_stmt_hooking.test2 CASCADE; +NOTICE: drop cascades to 790 other objects +DROP SCHEMA copy_stmt_hooking; +/* + * Test auto check constraint renaming + */ +CREATE SCHEMA rename; +/* + * Check that auto naming sequence is renamed + */ +CREATE TABLE rename.parent(id int not null); +SELECT create_range_partitions('rename.parent', 'id', 1, 2, 2); + create_range_partitions +------------------------- + 2 +(1 row) + +SELECT 'rename.parent'::regclass; /* parent is OK */ + regclass +--------------- + rename.parent +(1 row) + +SELECT 'rename.parent_seq'::regclass; /* sequence is OK */ + regclass +------------------- + rename.parent_seq +(1 row) + +ALTER TABLE rename.parent RENAME TO parent_renamed; +SELECT 'rename.parent_renamed'::regclass; /* parent is OK */ + regclass +----------------------- + rename.parent_renamed +(1 row) + +SELECT 'rename.parent_renamed_seq'::regclass; /* sequence is OK */ + regclass +--------------------------- + rename.parent_renamed_seq +(1 row) + +SELECT append_range_partition('rename.parent_renamed'); /* can append */ + append_range_partition +------------------------- + rename.parent_renamed_3 +(1 row) + +DROP SEQUENCE rename.parent_renamed_seq; +ALTER TABLE rename.parent_renamed RENAME TO parent; +SELECT 'rename.parent'::regclass; /* parent is OK */ + regclass +--------------- + rename.parent +(1 row) + +/* + * Check that partitioning constraints are renamed + */ +CREATE TABLE rename.test(a serial, b int); +SELECT create_hash_partitions('rename.test', 'a', 3); + create_hash_partitions +------------------------ + 3 +(1 row) + +ALTER TABLE rename.test_0 RENAME TO test_one; +/* We expect to find check constraint renamed as well */ +SELECT r.conname, pg_get_constraintdef(r.oid, true) +FROM pg_constraint r +WHERE r.conrelid = 'rename.test_one'::regclass AND r.contype = 'c'; + conname | pg_get_constraintdef +------------------------+----------------------------------------------- + pathman_test_one_check | CHECK (get_hash_part_idx(hashint4(a), 3) = 0) +(1 row) + +/* Generates check constraint for relation */ +CREATE OR REPLACE FUNCTION add_constraint(rel regclass) +RETURNS VOID AS $$ +declare + constraint_name text := build_check_constraint_name(rel); +BEGIN + EXECUTE format('ALTER TABLE %s ADD CONSTRAINT %s CHECK (a < 100);', + rel, constraint_name); +END +$$ +LANGUAGE plpgsql; +/* + * Check that it doesn't affect regular inherited + * tables that aren't managed by pg_pathman + */ +CREATE TABLE rename.test_inh (LIKE rename.test INCLUDING ALL); +CREATE TABLE rename.test_inh_1 (LIKE rename.test INCLUDING ALL); +ALTER TABLE rename.test_inh_1 INHERIT rename.test_inh; +SELECT add_constraint('rename.test_inh_1'); + add_constraint +---------------- + +(1 row) + +ALTER TABLE rename.test_inh_1 RENAME TO test_inh_one; +/* Show check constraints of rename.test_inh_one */ +SELECT r.conname, pg_get_constraintdef(r.oid, true) +FROM pg_constraint r +WHERE r.conrelid = 'rename.test_inh_one'::regclass AND r.contype = 'c'; + conname | pg_get_constraintdef +--------------------------+---------------------- + pathman_test_inh_1_check | CHECK (a < 100) +(1 row) + +/* + * Check that plain tables are not affected too + */ +CREATE TABLE rename.plain_test(a serial, b int); +ALTER TABLE rename.plain_test RENAME TO plain_test_renamed; +SELECT add_constraint('rename.plain_test_renamed'); + add_constraint +---------------- + +(1 row) + +/* Show check constraints of rename.plain_test_renamed */ +SELECT r.conname, pg_get_constraintdef(r.oid, true) +FROM pg_constraint r +WHERE r.conrelid = 'rename.plain_test_renamed'::regclass AND r.contype = 'c'; + conname | pg_get_constraintdef +----------------------------------+---------------------- + pathman_plain_test_renamed_check | CHECK (a < 100) +(1 row) + +ALTER TABLE rename.plain_test_renamed RENAME TO plain_test; +/* ... and check constraints of rename.plain_test */ +SELECT r.conname, pg_get_constraintdef(r.oid, true) +FROM pg_constraint r +WHERE r.conrelid = 'rename.plain_test'::regclass AND r.contype = 'c'; + conname | pg_get_constraintdef +----------------------------------+---------------------- + pathman_plain_test_renamed_check | CHECK (a < 100) +(1 row) + +DROP TABLE rename.plain_test CASCADE; +DROP TABLE rename.test_inh CASCADE; +NOTICE: drop cascades to table rename.test_inh_one +DROP TABLE rename.parent CASCADE; +NOTICE: drop cascades to 3 other objects +DROP TABLE rename.test CASCADE; +NOTICE: drop cascades to 3 other objects +DROP FUNCTION add_constraint(regclass); +DROP SCHEMA rename; +/* + * Test DROP INDEX CONCURRENTLY (test snapshots) + */ +CREATE SCHEMA drop_index; +CREATE TABLE drop_index.test (val INT4 NOT NULL); +CREATE INDEX ON drop_index.test (val); +SELECT create_hash_partitions('drop_index.test', 'val', 2); + create_hash_partitions +------------------------ + 2 +(1 row) + +DROP INDEX CONCURRENTLY drop_index.test_0_val_idx; +DROP TABLE drop_index.test CASCADE; +NOTICE: drop cascades to 2 other objects +DROP SCHEMA drop_index; +/* + * Checking that ALTER TABLE IF EXISTS with loaded (and created) pg_pathman extension works the same as in vanilla + */ +CREATE SCHEMA test_nonexistance; +ALTER TABLE IF EXISTS test_nonexistance.nonexistent_table RENAME TO other_table_name; +NOTICE: relation "nonexistent_table" does not exist, skipping +/* renaming existent tables already tested earlier (see rename.plain_test) */ +ALTER TABLE IF EXISTS test_nonexistance.nonexistent_table ADD COLUMN j INT4; +NOTICE: relation "nonexistent_table" does not exist, skipping +CREATE TABLE test_nonexistance.existent_table(i INT4); +ALTER TABLE IF EXISTS test_nonexistance.existent_table ADD COLUMN j INT4; +SELECT attname FROM pg_attribute WHERE attnum > 0 AND attrelid = 'test_nonexistance.existent_table'::REGCLASS; + attname +--------- + i + j +(2 rows) + +DROP TABLE test_nonexistance.existent_table; +ALTER TABLE IF EXISTS test_nonexistance.nonexistent_table DROP COLUMN IF EXISTS i; +NOTICE: relation "nonexistent_table" does not exist, skipping +CREATE TABLE test_nonexistance.existent_table(i INT4); +ALTER TABLE IF EXISTS test_nonexistance.existent_table DROP COLUMN IF EXISTS i; +ALTER TABLE IF EXISTS test_nonexistance.existent_table DROP COLUMN IF EXISTS nonexistent_column; +NOTICE: column "nonexistent_column" of relation "existent_table" does not exist, skipping +SELECT attname FROM pg_attribute WHERE attnum > 0 AND attrelid = 'test_nonexistance.existent_table'::REGCLASS; + attname +------------------------------ + ........pg.dropped.1........ +(1 row) + +DROP TABLE test_nonexistance.existent_table; +ALTER TABLE IF EXISTS test_nonexistance.nonexistent_table RENAME COLUMN i TO j; +NOTICE: relation "nonexistent_table" does not exist, skipping +CREATE TABLE test_nonexistance.existent_table(i INT4); +ALTER TABLE IF EXISTS test_nonexistance.existent_table RENAME COLUMN i TO j; +SELECT attname FROM pg_attribute WHERE attnum > 0 AND attrelid = 'test_nonexistance.existent_table'::REGCLASS; + attname +--------- + j +(1 row) + +DROP TABLE test_nonexistance.existent_table; +ALTER TABLE IF EXISTS test_nonexistance.nonexistent_table RENAME CONSTRAINT baz TO bar; +NOTICE: relation "nonexistent_table" does not exist, skipping +CREATE TABLE test_nonexistance.existent_table(i INT4 CONSTRAINT existent_table_i_check CHECK (i < 100)); +ALTER TABLE IF EXISTS test_nonexistance.existent_table RENAME CONSTRAINT existent_table_i_check TO existent_table_i_other_check; +DROP TABLE test_nonexistance.existent_table; +ALTER TABLE IF EXISTS test_nonexistance.nonexistent_table SET SCHEMA nonexistent_schema; +NOTICE: relation "nonexistent_table" does not exist, skipping +CREATE TABLE test_nonexistance.existent_table(i INT4); +ALTER TABLE IF EXISTS test_nonexistance.existent_table SET SCHEMA nonexistent_schema; +ERROR: schema "nonexistent_schema" does not exist +CREATE SCHEMA test_nonexistance2; +ALTER TABLE IF EXISTS test_nonexistance.existent_table SET SCHEMA test_nonexistance2; +DROP TABLE test_nonexistance2.existent_table; +DROP SCHEMA test_nonexistance2; +ALTER TABLE IF EXISTS test_nonexistance.nonexistent_table SET TABLESPACE nonexistent_tablespace; +NOTICE: relation "nonexistent_table" does not exist, skipping +CREATE TABLE test_nonexistance.existent_table(i INT4); +ALTER TABLE IF EXISTS test_nonexistance.existent_table SET TABLESPACE nonexistent_tablespace; +ERROR: tablespace "nonexistent_tablespace" does not exist +DROP TABLE test_nonexistance.existent_table; +DROP SCHEMA test_nonexistance; +DROP EXTENSION pg_pathman; diff --git a/expected/pathman_views.out b/expected/pathman_views.out new file mode 100644 index 00000000..64b8425d --- /dev/null +++ b/expected/pathman_views.out @@ -0,0 +1,194 @@ +/* + * ------------------------------------------- + * NOTE: This test behaves differenly on 9.5 + * ------------------------------------------- + * + * Also since 8edd0e794 (>= 12) Append nodes with single subplan are eliminated, + * causing different output; pathman_views_2.out is the updated version. + */ +\set VERBOSITY terse +SET search_path = 'public'; +CREATE EXTENSION pg_pathman; +CREATE SCHEMA views; +/* create a partitioned table */ +create table views._abc(id int4 not null); +select create_hash_partitions('views._abc', 'id', 10); + create_hash_partitions +------------------------ + 10 +(1 row) + +insert into views._abc select generate_series(1, 100); +/* create a dummy table */ +create table views._abc_add (like views._abc); +vacuum analyze; +/* create a facade view */ +create view views.abc as select * from views._abc; +create or replace function views.disable_modification() +returns trigger as +$$ +BEGIN + RAISE EXCEPTION '%', TG_OP; + RETURN NULL; +END; +$$ +language 'plpgsql'; +create trigger abc_mod_tr +instead of insert or update or delete +on views.abc for each row +execute procedure views.disable_modification(); +/* Test SELECT */ +explain (costs off) select * from views.abc; + QUERY PLAN +-------------------------- + Append + -> Seq Scan on _abc_0 + -> Seq Scan on _abc_1 + -> Seq Scan on _abc_2 + -> Seq Scan on _abc_3 + -> Seq Scan on _abc_4 + -> Seq Scan on _abc_5 + -> Seq Scan on _abc_6 + -> Seq Scan on _abc_7 + -> Seq Scan on _abc_8 + -> Seq Scan on _abc_9 +(11 rows) + +explain (costs off) select * from views.abc where id = 1; + QUERY PLAN +-------------------------- + Append + -> Seq Scan on _abc_0 + Filter: (id = 1) +(3 rows) + +explain (costs off) select * from views.abc where id = 1 for update; + QUERY PLAN +-------------------------------- + LockRows + -> Append + -> Seq Scan on _abc_0 + Filter: (id = 1) +(4 rows) + +select * from views.abc where id = 1 for update; + id +---- + 1 +(1 row) + +select count (*) from views.abc; + count +------- + 100 +(1 row) + +/* Test INSERT */ +explain (costs off) insert into views.abc values (1); + QUERY PLAN +--------------- + Insert on abc + -> Result +(2 rows) + +insert into views.abc values (1); +ERROR: INSERT +/* Test UPDATE */ +explain (costs off) update views.abc set id = 2 where id = 1 or id = 2; + QUERY PLAN +-------------------------------------- + Update on abc + -> Result + -> Append + -> Seq Scan on _abc_0 + Filter: (id = 1) + -> Seq Scan on _abc_6 + Filter: (id = 2) +(7 rows) + +update views.abc set id = 2 where id = 1 or id = 2; +ERROR: UPDATE +/* Test DELETE */ +explain (costs off) delete from views.abc where id = 1 or id = 2; + QUERY PLAN +-------------------------------------- + Delete on abc + -> Result + -> Append + -> Seq Scan on _abc_0 + Filter: (id = 1) + -> Seq Scan on _abc_6 + Filter: (id = 2) +(7 rows) + +delete from views.abc where id = 1 or id = 2; +ERROR: DELETE +/* Test SELECT with UNION */ +create view views.abc_union as table views._abc union table views._abc_add; +create view views.abc_union_all as table views._abc union all table views._abc_add; +explain (costs off) table views.abc_union; + QUERY PLAN +-------------------------------------- + HashAggregate + Group Key: _abc_0.id + -> Append + -> Append + -> Seq Scan on _abc_0 + -> Seq Scan on _abc_1 + -> Seq Scan on _abc_2 + -> Seq Scan on _abc_3 + -> Seq Scan on _abc_4 + -> Seq Scan on _abc_5 + -> Seq Scan on _abc_6 + -> Seq Scan on _abc_7 + -> Seq Scan on _abc_8 + -> Seq Scan on _abc_9 + -> Seq Scan on _abc_add +(15 rows) + +explain (costs off) select * from views.abc_union where id = 5; + QUERY PLAN +-------------------------------------- + HashAggregate + Group Key: _abc_8.id + -> Append + -> Append + -> Seq Scan on _abc_8 + Filter: (id = 5) + -> Seq Scan on _abc_add + Filter: (id = 5) +(8 rows) + +explain (costs off) table views.abc_union_all; + QUERY PLAN +---------------------------- + Append + -> Seq Scan on _abc_0 + -> Seq Scan on _abc_1 + -> Seq Scan on _abc_2 + -> Seq Scan on _abc_3 + -> Seq Scan on _abc_4 + -> Seq Scan on _abc_5 + -> Seq Scan on _abc_6 + -> Seq Scan on _abc_7 + -> Seq Scan on _abc_8 + -> Seq Scan on _abc_9 + -> Seq Scan on _abc_add +(12 rows) + +explain (costs off) select * from views.abc_union_all where id = 5; + QUERY PLAN +---------------------------- + Append + -> Seq Scan on _abc_8 + Filter: (id = 5) + -> Seq Scan on _abc_add + Filter: (id = 5) +(5 rows) + +DROP TABLE views._abc CASCADE; +NOTICE: drop cascades to 13 other objects +DROP TABLE views._abc_add CASCADE; +DROP FUNCTION views.disable_modification(); +DROP SCHEMA views; +DROP EXTENSION pg_pathman; diff --git a/expected/pathman_views_1.out b/expected/pathman_views_1.out new file mode 100644 index 00000000..e6bb45f5 --- /dev/null +++ b/expected/pathman_views_1.out @@ -0,0 +1,250 @@ +/* + * ------------------------------------------- + * NOTE: This test behaves differenly on 9.5 + * ------------------------------------------- + * + * Also since 8edd0e794 (>= 12) Append nodes with single subplan are eliminated, + * causing different output; pathman_views_2.out is the updated version. + */ +\set VERBOSITY terse +SET search_path = 'public'; +CREATE EXTENSION pg_pathman; +CREATE SCHEMA views; +/* create a partitioned table */ +create table views._abc(id int4 not null); +select create_hash_partitions('views._abc', 'id', 10); + create_hash_partitions +------------------------ + 10 +(1 row) + +insert into views._abc select generate_series(1, 100); +/* create a dummy table */ +create table views._abc_add (like views._abc); +vacuum analyze; +/* create a facade view */ +create view views.abc as select * from views._abc; +create or replace function views.disable_modification() +returns trigger as +$$ +BEGIN + RAISE EXCEPTION '%', TG_OP; + RETURN NULL; +END; +$$ +language 'plpgsql'; +create trigger abc_mod_tr +instead of insert or update or delete +on views.abc for each row +execute procedure views.disable_modification(); +/* Test SELECT */ +explain (costs off) select * from views.abc; + QUERY PLAN +-------------------------- + Append + -> Seq Scan on _abc_0 + -> Seq Scan on _abc_1 + -> Seq Scan on _abc_2 + -> Seq Scan on _abc_3 + -> Seq Scan on _abc_4 + -> Seq Scan on _abc_5 + -> Seq Scan on _abc_6 + -> Seq Scan on _abc_7 + -> Seq Scan on _abc_8 + -> Seq Scan on _abc_9 +(11 rows) + +explain (costs off) select * from views.abc where id = 1; + QUERY PLAN +-------------------------- + Append + -> Seq Scan on _abc_0 + Filter: (id = 1) +(3 rows) + +explain (costs off) select * from views.abc where id = 1 for update; + QUERY PLAN +-------------------------------- + LockRows + -> Append + -> Seq Scan on _abc + Filter: (id = 1) + -> Seq Scan on _abc_0 + Filter: (id = 1) + -> Seq Scan on _abc_1 + Filter: (id = 1) + -> Seq Scan on _abc_2 + Filter: (id = 1) + -> Seq Scan on _abc_3 + Filter: (id = 1) + -> Seq Scan on _abc_4 + Filter: (id = 1) + -> Seq Scan on _abc_5 + Filter: (id = 1) + -> Seq Scan on _abc_6 + Filter: (id = 1) + -> Seq Scan on _abc_7 + Filter: (id = 1) + -> Seq Scan on _abc_8 + Filter: (id = 1) + -> Seq Scan on _abc_9 + Filter: (id = 1) +(24 rows) + +select * from views.abc where id = 1 for update; + id +---- + 1 +(1 row) + +select count (*) from views.abc; + count +------- + 100 +(1 row) + +/* Test INSERT */ +explain (costs off) insert into views.abc values (1); + QUERY PLAN +--------------- + Insert on abc + -> Result +(2 rows) + +insert into views.abc values (1); +ERROR: INSERT +/* Test UPDATE */ +explain (costs off) update views.abc set id = 2 where id = 1 or id = 2; + QUERY PLAN +---------------------------------------------------- + Update on abc + -> Result + -> Append + -> Seq Scan on _abc + Filter: ((id = 1) OR (id = 2)) + -> Seq Scan on _abc_0 + Filter: ((id = 1) OR (id = 2)) + -> Seq Scan on _abc_1 + Filter: ((id = 1) OR (id = 2)) + -> Seq Scan on _abc_2 + Filter: ((id = 1) OR (id = 2)) + -> Seq Scan on _abc_3 + Filter: ((id = 1) OR (id = 2)) + -> Seq Scan on _abc_4 + Filter: ((id = 1) OR (id = 2)) + -> Seq Scan on _abc_5 + Filter: ((id = 1) OR (id = 2)) + -> Seq Scan on _abc_6 + Filter: ((id = 1) OR (id = 2)) + -> Seq Scan on _abc_7 + Filter: ((id = 1) OR (id = 2)) + -> Seq Scan on _abc_8 + Filter: ((id = 1) OR (id = 2)) + -> Seq Scan on _abc_9 + Filter: ((id = 1) OR (id = 2)) +(25 rows) + +update views.abc set id = 2 where id = 1 or id = 2; +ERROR: UPDATE +/* Test DELETE */ +explain (costs off) delete from views.abc where id = 1 or id = 2; + QUERY PLAN +---------------------------------------------------- + Delete on abc + -> Result + -> Append + -> Seq Scan on _abc + Filter: ((id = 1) OR (id = 2)) + -> Seq Scan on _abc_0 + Filter: ((id = 1) OR (id = 2)) + -> Seq Scan on _abc_1 + Filter: ((id = 1) OR (id = 2)) + -> Seq Scan on _abc_2 + Filter: ((id = 1) OR (id = 2)) + -> Seq Scan on _abc_3 + Filter: ((id = 1) OR (id = 2)) + -> Seq Scan on _abc_4 + Filter: ((id = 1) OR (id = 2)) + -> Seq Scan on _abc_5 + Filter: ((id = 1) OR (id = 2)) + -> Seq Scan on _abc_6 + Filter: ((id = 1) OR (id = 2)) + -> Seq Scan on _abc_7 + Filter: ((id = 1) OR (id = 2)) + -> Seq Scan on _abc_8 + Filter: ((id = 1) OR (id = 2)) + -> Seq Scan on _abc_9 + Filter: ((id = 1) OR (id = 2)) +(25 rows) + +delete from views.abc where id = 1 or id = 2; +ERROR: DELETE +/* Test SELECT with UNION */ +create view views.abc_union as table views._abc union table views._abc_add; +create view views.abc_union_all as table views._abc union all table views._abc_add; +explain (costs off) table views.abc_union; + QUERY PLAN +-------------------------------------- + HashAggregate + Group Key: _abc_0.id + -> Append + -> Append + -> Seq Scan on _abc_0 + -> Seq Scan on _abc_1 + -> Seq Scan on _abc_2 + -> Seq Scan on _abc_3 + -> Seq Scan on _abc_4 + -> Seq Scan on _abc_5 + -> Seq Scan on _abc_6 + -> Seq Scan on _abc_7 + -> Seq Scan on _abc_8 + -> Seq Scan on _abc_9 + -> Seq Scan on _abc_add +(15 rows) + +explain (costs off) select * from views.abc_union where id = 5; + QUERY PLAN +-------------------------------------- + HashAggregate + Group Key: _abc_8.id + -> Append + -> Append + -> Seq Scan on _abc_8 + Filter: (id = 5) + -> Seq Scan on _abc_add + Filter: (id = 5) +(8 rows) + +explain (costs off) table views.abc_union_all; + QUERY PLAN +---------------------------- + Append + -> Seq Scan on _abc_0 + -> Seq Scan on _abc_1 + -> Seq Scan on _abc_2 + -> Seq Scan on _abc_3 + -> Seq Scan on _abc_4 + -> Seq Scan on _abc_5 + -> Seq Scan on _abc_6 + -> Seq Scan on _abc_7 + -> Seq Scan on _abc_8 + -> Seq Scan on _abc_9 + -> Seq Scan on _abc_add +(12 rows) + +explain (costs off) select * from views.abc_union_all where id = 5; + QUERY PLAN +---------------------------- + Append + -> Seq Scan on _abc_8 + Filter: (id = 5) + -> Seq Scan on _abc_add + Filter: (id = 5) +(5 rows) + +DROP TABLE views._abc CASCADE; +NOTICE: drop cascades to 13 other objects +DROP TABLE views._abc_add CASCADE; +DROP FUNCTION views.disable_modification(); +DROP SCHEMA views; +DROP EXTENSION pg_pathman; diff --git a/expected/pathman_views_2.out b/expected/pathman_views_2.out new file mode 100644 index 00000000..45ea3eb4 --- /dev/null +++ b/expected/pathman_views_2.out @@ -0,0 +1,191 @@ +/* + * ------------------------------------------- + * NOTE: This test behaves differenly on 9.5 + * ------------------------------------------- + * + * Also since 8edd0e794 (>= 12) Append nodes with single subplan are eliminated, + * causing different output; pathman_views_2.out is the updated version. + */ +\set VERBOSITY terse +SET search_path = 'public'; +CREATE EXTENSION pg_pathman; +CREATE SCHEMA views; +/* create a partitioned table */ +create table views._abc(id int4 not null); +select create_hash_partitions('views._abc', 'id', 10); + create_hash_partitions +------------------------ + 10 +(1 row) + +insert into views._abc select generate_series(1, 100); +/* create a dummy table */ +create table views._abc_add (like views._abc); +vacuum analyze; +/* create a facade view */ +create view views.abc as select * from views._abc; +create or replace function views.disable_modification() +returns trigger as +$$ +BEGIN + RAISE EXCEPTION '%', TG_OP; + RETURN NULL; +END; +$$ +language 'plpgsql'; +create trigger abc_mod_tr +instead of insert or update or delete +on views.abc for each row +execute procedure views.disable_modification(); +/* Test SELECT */ +explain (costs off) select * from views.abc; + QUERY PLAN +-------------------------- + Append + -> Seq Scan on _abc_0 + -> Seq Scan on _abc_1 + -> Seq Scan on _abc_2 + -> Seq Scan on _abc_3 + -> Seq Scan on _abc_4 + -> Seq Scan on _abc_5 + -> Seq Scan on _abc_6 + -> Seq Scan on _abc_7 + -> Seq Scan on _abc_8 + -> Seq Scan on _abc_9 +(11 rows) + +explain (costs off) select * from views.abc where id = 1; + QUERY PLAN +-------------------- + Seq Scan on _abc_0 + Filter: (id = 1) +(2 rows) + +explain (costs off) select * from views.abc where id = 1 for update; + QUERY PLAN +-------------------------- + LockRows + -> Seq Scan on _abc_0 + Filter: (id = 1) +(3 rows) + +select * from views.abc where id = 1 for update; + id +---- + 1 +(1 row) + +select count (*) from views.abc; + count +------- + 100 +(1 row) + +/* Test INSERT */ +explain (costs off) insert into views.abc values (1); + QUERY PLAN +--------------- + Insert on abc + -> Result +(2 rows) + +insert into views.abc values (1); +ERROR: INSERT +/* Test UPDATE */ +explain (costs off) update views.abc set id = 2 where id = 1 or id = 2; + QUERY PLAN +-------------------------------------- + Update on abc + -> Result + -> Append + -> Seq Scan on _abc_0 + Filter: (id = 1) + -> Seq Scan on _abc_6 + Filter: (id = 2) +(7 rows) + +update views.abc set id = 2 where id = 1 or id = 2; +ERROR: UPDATE +/* Test DELETE */ +explain (costs off) delete from views.abc where id = 1 or id = 2; + QUERY PLAN +-------------------------------------- + Delete on abc + -> Result + -> Append + -> Seq Scan on _abc_0 + Filter: (id = 1) + -> Seq Scan on _abc_6 + Filter: (id = 2) +(7 rows) + +delete from views.abc where id = 1 or id = 2; +ERROR: DELETE +/* Test SELECT with UNION */ +create view views.abc_union as table views._abc union table views._abc_add; +create view views.abc_union_all as table views._abc union all table views._abc_add; +explain (costs off) table views.abc_union; + QUERY PLAN +-------------------------------------- + HashAggregate + Group Key: _abc_0.id + -> Append + -> Append + -> Seq Scan on _abc_0 + -> Seq Scan on _abc_1 + -> Seq Scan on _abc_2 + -> Seq Scan on _abc_3 + -> Seq Scan on _abc_4 + -> Seq Scan on _abc_5 + -> Seq Scan on _abc_6 + -> Seq Scan on _abc_7 + -> Seq Scan on _abc_8 + -> Seq Scan on _abc_9 + -> Seq Scan on _abc_add +(15 rows) + +explain (costs off) select * from views.abc_union where id = 5; + QUERY PLAN +---------------------------------- + HashAggregate + Group Key: _abc_8.id + -> Append + -> Seq Scan on _abc_8 + Filter: (id = 5) + -> Seq Scan on _abc_add + Filter: (id = 5) +(7 rows) + +explain (costs off) table views.abc_union_all; + QUERY PLAN +---------------------------- + Append + -> Seq Scan on _abc_0 + -> Seq Scan on _abc_1 + -> Seq Scan on _abc_2 + -> Seq Scan on _abc_3 + -> Seq Scan on _abc_4 + -> Seq Scan on _abc_5 + -> Seq Scan on _abc_6 + -> Seq Scan on _abc_7 + -> Seq Scan on _abc_8 + -> Seq Scan on _abc_9 + -> Seq Scan on _abc_add +(12 rows) + +explain (costs off) select * from views.abc_union_all where id = 5; + QUERY PLAN +---------------------------- + Append + -> Seq Scan on _abc_8 + Filter: (id = 5) + -> Seq Scan on _abc_add + Filter: (id = 5) +(5 rows) + +DROP TABLE views._abc CASCADE; +NOTICE: drop cascades to 13 other objects +DROP TABLE views._abc_add CASCADE; +DROP FUNCTION views.disable_modification(); +DROP SCHEMA views; +DROP EXTENSION pg_pathman; diff --git a/expected/pathman_views_3.out b/expected/pathman_views_3.out new file mode 100644 index 00000000..ae50bcb3 --- /dev/null +++ b/expected/pathman_views_3.out @@ -0,0 +1,192 @@ +/* + * ------------------------------------------- + * NOTE: This test behaves differenly on 9.5 + * ------------------------------------------- + * + * Also since 8edd0e794 (>= 12) Append nodes with single subplan are eliminated, + * causing different output; pathman_views_2.out is the updated version. + */ +\set VERBOSITY terse +SET search_path = 'public'; +CREATE EXTENSION pg_pathman; +CREATE SCHEMA views; +/* create a partitioned table */ +create table views._abc(id int4 not null); +select create_hash_partitions('views._abc', 'id', 10); + create_hash_partitions +------------------------ + 10 +(1 row) + +insert into views._abc select generate_series(1, 100); +/* create a dummy table */ +create table views._abc_add (like views._abc); +vacuum analyze; +/* create a facade view */ +create view views.abc as select * from views._abc; +create or replace function views.disable_modification() +returns trigger as +$$ +BEGIN + RAISE EXCEPTION '%', TG_OP; + RETURN NULL; +END; +$$ +language 'plpgsql'; +create trigger abc_mod_tr +instead of insert or update or delete +on views.abc for each row +execute procedure views.disable_modification(); +/* Test SELECT */ +explain (costs off) select * from views.abc; + QUERY PLAN +---------------------------------- + Append + -> Seq Scan on _abc_0 _abc_1 + -> Seq Scan on _abc_1 _abc_2 + -> Seq Scan on _abc_2 _abc_3 + -> Seq Scan on _abc_3 _abc_4 + -> Seq Scan on _abc_4 _abc_5 + -> Seq Scan on _abc_5 _abc_6 + -> Seq Scan on _abc_6 _abc_7 + -> Seq Scan on _abc_7 _abc_8 + -> Seq Scan on _abc_8 _abc_9 + -> Seq Scan on _abc_9 _abc_10 +(11 rows) + +explain (costs off) select * from views.abc where id = 1; + QUERY PLAN +------------------------- + Seq Scan on _abc_0 _abc + Filter: (id = 1) +(2 rows) + +explain (costs off) select * from views.abc where id = 1 for update; + QUERY PLAN +------------------------------- + LockRows + -> Seq Scan on _abc_0 _abc + Filter: (id = 1) +(3 rows) + +select * from views.abc where id = 1 for update; + id +---- + 1 +(1 row) + +select count (*) from views.abc; + count +------- + 100 +(1 row) + +/* Test INSERT */ +explain (costs off) insert into views.abc values (1); + QUERY PLAN +--------------- + Insert on abc + -> Result +(2 rows) + +insert into views.abc values (1); +ERROR: INSERT +/* Test UPDATE */ +explain (costs off) update views.abc set id = 2 where id = 1 or id = 2; + QUERY PLAN +--------------------------------------------- + Update on abc + -> Result + -> Append + -> Seq Scan on _abc_0 _abc_1 + Filter: (id = 1) + -> Seq Scan on _abc_6 _abc_2 + Filter: (id = 2) +(7 rows) + +update views.abc set id = 2 where id = 1 or id = 2; +ERROR: UPDATE +/* Test DELETE */ +explain (costs off) delete from views.abc where id = 1 or id = 2; + QUERY PLAN +--------------------------------------------- + Delete on abc + -> Result + -> Append + -> Seq Scan on _abc_0 _abc_1 + Filter: (id = 1) + -> Seq Scan on _abc_6 _abc_2 + Filter: (id = 2) +(7 rows) + +delete from views.abc where id = 1 or id = 2; +ERROR: DELETE +/* Test SELECT with UNION */ +create view views.abc_union as table views._abc union table views._abc_add; +create view views.abc_union_all as table views._abc union all table views._abc_add; +explain (costs off) table views.abc_union; + QUERY PLAN +---------------------------------------------- + HashAggregate + Group Key: _abc.id + -> Append + -> Append + -> Seq Scan on _abc_0 _abc_1 + -> Seq Scan on _abc_1 _abc_2 + -> Seq Scan on _abc_2 _abc_3 + -> Seq Scan on _abc_3 _abc_4 + -> Seq Scan on _abc_4 _abc_5 + -> Seq Scan on _abc_5 _abc_6 + -> Seq Scan on _abc_6 _abc_7 + -> Seq Scan on _abc_7 _abc_8 + -> Seq Scan on _abc_8 _abc_9 + -> Seq Scan on _abc_9 _abc_10 + -> Seq Scan on _abc_add +(15 rows) + +explain (costs off) select * from views.abc_union where id = 5; + QUERY PLAN +------------------------------------------- + Unique + -> Sort + Sort Key: _abc.id + -> Append + -> Seq Scan on _abc_8 _abc + Filter: (id = 5) + -> Seq Scan on _abc_add + Filter: (id = 5) +(8 rows) + +explain (costs off) table views.abc_union_all; + QUERY PLAN +------------------------------- + Append + -> Seq Scan on _abc_0 _abc + -> Seq Scan on _abc_1 + -> Seq Scan on _abc_2 + -> Seq Scan on _abc_3 + -> Seq Scan on _abc_4 + -> Seq Scan on _abc_5 + -> Seq Scan on _abc_6 + -> Seq Scan on _abc_7 + -> Seq Scan on _abc_8 + -> Seq Scan on _abc_9 + -> Seq Scan on _abc_add +(12 rows) + +explain (costs off) select * from views.abc_union_all where id = 5; + QUERY PLAN +------------------------------- + Append + -> Seq Scan on _abc_8 _abc + Filter: (id = 5) + -> Seq Scan on _abc_add + Filter: (id = 5) +(5 rows) + +DROP TABLE views._abc CASCADE; +NOTICE: drop cascades to 13 other objects +DROP TABLE views._abc_add CASCADE; +DROP FUNCTION views.disable_modification(); +DROP SCHEMA views; +DROP EXTENSION pg_pathman; diff --git a/expected/pathman_views_4.out b/expected/pathman_views_4.out new file mode 100644 index 00000000..8fde5770 --- /dev/null +++ b/expected/pathman_views_4.out @@ -0,0 +1,191 @@ +/* + * ------------------------------------------- + * NOTE: This test behaves differenly on 9.5 + * ------------------------------------------- + * + * Also since 8edd0e794 (>= 12) Append nodes with single subplan are eliminated, + * causing different output; pathman_views_2.out is the updated version. + */ +\set VERBOSITY terse +SET search_path = 'public'; +CREATE EXTENSION pg_pathman; +CREATE SCHEMA views; +/* create a partitioned table */ +create table views._abc(id int4 not null); +select create_hash_partitions('views._abc', 'id', 10); + create_hash_partitions +------------------------ + 10 +(1 row) + +insert into views._abc select generate_series(1, 100); +/* create a dummy table */ +create table views._abc_add (like views._abc); +vacuum analyze; +/* create a facade view */ +create view views.abc as select * from views._abc; +create or replace function views.disable_modification() +returns trigger as +$$ +BEGIN + RAISE EXCEPTION '%', TG_OP; + RETURN NULL; +END; +$$ +language 'plpgsql'; +create trigger abc_mod_tr +instead of insert or update or delete +on views.abc for each row +execute procedure views.disable_modification(); +/* Test SELECT */ +explain (costs off) select * from views.abc; + QUERY PLAN +---------------------------------- + Append + -> Seq Scan on _abc_0 _abc_1 + -> Seq Scan on _abc_1 _abc_2 + -> Seq Scan on _abc_2 _abc_3 + -> Seq Scan on _abc_3 _abc_4 + -> Seq Scan on _abc_4 _abc_5 + -> Seq Scan on _abc_5 _abc_6 + -> Seq Scan on _abc_6 _abc_7 + -> Seq Scan on _abc_7 _abc_8 + -> Seq Scan on _abc_8 _abc_9 + -> Seq Scan on _abc_9 _abc_10 +(11 rows) + +explain (costs off) select * from views.abc where id = 1; + QUERY PLAN +------------------------- + Seq Scan on _abc_0 _abc + Filter: (id = 1) +(2 rows) + +explain (costs off) select * from views.abc where id = 1 for update; + QUERY PLAN +------------------------------- + LockRows + -> Seq Scan on _abc_0 _abc + Filter: (id = 1) +(3 rows) + +select * from views.abc where id = 1 for update; + id +---- + 1 +(1 row) + +select count (*) from views.abc; + count +------- + 100 +(1 row) + +/* Test INSERT */ +explain (costs off) insert into views.abc values (1); + QUERY PLAN +--------------- + Insert on abc + -> Result +(2 rows) + +insert into views.abc values (1); +ERROR: INSERT +/* Test UPDATE */ +explain (costs off) update views.abc set id = 2 where id = 1 or id = 2; + QUERY PLAN +--------------------------------------------- + Update on abc + -> Result + -> Append + -> Seq Scan on _abc_0 _abc_1 + Filter: (id = 1) + -> Seq Scan on _abc_6 _abc_2 + Filter: (id = 2) +(7 rows) + +update views.abc set id = 2 where id = 1 or id = 2; +ERROR: UPDATE +/* Test DELETE */ +explain (costs off) delete from views.abc where id = 1 or id = 2; + QUERY PLAN +--------------------------------------------- + Delete on abc + -> Result + -> Append + -> Seq Scan on _abc_0 _abc_1 + Filter: (id = 1) + -> Seq Scan on _abc_6 _abc_2 + Filter: (id = 2) +(7 rows) + +delete from views.abc where id = 1 or id = 2; +ERROR: DELETE +/* Test SELECT with UNION */ +create view views.abc_union as table views._abc union table views._abc_add; +create view views.abc_union_all as table views._abc union all table views._abc_add; +explain (costs off) table views.abc_union; + QUERY PLAN +---------------------------------------------- + HashAggregate + Group Key: _abc.id + -> Append + -> Append + -> Seq Scan on _abc_0 _abc_1 + -> Seq Scan on _abc_1 _abc_2 + -> Seq Scan on _abc_2 _abc_3 + -> Seq Scan on _abc_3 _abc_4 + -> Seq Scan on _abc_4 _abc_5 + -> Seq Scan on _abc_5 _abc_6 + -> Seq Scan on _abc_6 _abc_7 + -> Seq Scan on _abc_7 _abc_8 + -> Seq Scan on _abc_8 _abc_9 + -> Seq Scan on _abc_9 _abc_10 + -> Seq Scan on _abc_add +(15 rows) + +explain (costs off) select * from views.abc_union where id = 5; + QUERY PLAN +------------------------------------- + HashAggregate + Group Key: _abc.id + -> Append + -> Seq Scan on _abc_8 _abc + Filter: (id = 5) + -> Seq Scan on _abc_add + Filter: (id = 5) +(7 rows) + +explain (costs off) table views.abc_union_all; + QUERY PLAN +------------------------------- + Append + -> Seq Scan on _abc_0 _abc + -> Seq Scan on _abc_1 + -> Seq Scan on _abc_2 + -> Seq Scan on _abc_3 + -> Seq Scan on _abc_4 + -> Seq Scan on _abc_5 + -> Seq Scan on _abc_6 + -> Seq Scan on _abc_7 + -> Seq Scan on _abc_8 + -> Seq Scan on _abc_9 + -> Seq Scan on _abc_add +(12 rows) + +explain (costs off) select * from views.abc_union_all where id = 5; + QUERY PLAN +------------------------------- + Append + -> Seq Scan on _abc_8 _abc + Filter: (id = 5) + -> Seq Scan on _abc_add + Filter: (id = 5) +(5 rows) + +DROP TABLE views._abc CASCADE; +NOTICE: drop cascades to 13 other objects +DROP TABLE views._abc_add CASCADE; +DROP FUNCTION views.disable_modification(); +DROP SCHEMA views; +DROP EXTENSION pg_pathman; diff --git a/expected/rollback_on_create_partitions.out b/expected/rollback_on_create_partitions.out index 3531107d..ee0c7c0f 100644 --- a/expected/rollback_on_create_partitions.out +++ b/expected/rollback_on_create_partitions.out @@ -5,64 +5,72 @@ step begin: BEGIN; step insert_data: INSERT INTO range_rel SELECT generate_series(1, 10000); step create_partitions: SELECT create_range_partitions('range_rel', 'id', 1, 1000); create_range_partitions +----------------------- + 10 +(1 row) + +step show_rel: SELECT l.parent, l.partition FROM pathman_partition_list l WHERE l.parent = 'range_rel'::regclass; +parent |partition +---------+------------ +range_rel|range_rel_1 +range_rel|range_rel_2 +range_rel|range_rel_3 +range_rel|range_rel_4 +range_rel|range_rel_5 +range_rel|range_rel_6 +range_rel|range_rel_7 +range_rel|range_rel_8 +range_rel|range_rel_9 +range_rel|range_rel_10 +(10 rows) -10 -step show_rel: EXPLAIN (COSTS OFF) SELECT * FROM range_rel; -QUERY PLAN - -Append - -> Seq Scan on range_rel_1 - -> Seq Scan on range_rel_2 - -> Seq Scan on range_rel_3 - -> Seq Scan on range_rel_4 - -> Seq Scan on range_rel_5 - -> Seq Scan on range_rel_6 - -> Seq Scan on range_rel_7 - -> Seq Scan on range_rel_8 - -> Seq Scan on range_rel_9 - -> Seq Scan on range_rel_10 step rollback: ROLLBACK; -step show_rel: EXPLAIN (COSTS OFF) SELECT * FROM range_rel; -QUERY PLAN +step show_rel: SELECT l.parent, l.partition FROM pathman_partition_list l WHERE l.parent = 'range_rel'::regclass; +parent|partition +------+--------- +(0 rows) -Seq Scan on range_rel starting permutation: begin insert_data create_partitions show_rel commit show_rel step begin: BEGIN; step insert_data: INSERT INTO range_rel SELECT generate_series(1, 10000); step create_partitions: SELECT create_range_partitions('range_rel', 'id', 1, 1000); create_range_partitions +----------------------- + 10 +(1 row) + +step show_rel: SELECT l.parent, l.partition FROM pathman_partition_list l WHERE l.parent = 'range_rel'::regclass; +parent |partition +---------+------------ +range_rel|range_rel_1 +range_rel|range_rel_2 +range_rel|range_rel_3 +range_rel|range_rel_4 +range_rel|range_rel_5 +range_rel|range_rel_6 +range_rel|range_rel_7 +range_rel|range_rel_8 +range_rel|range_rel_9 +range_rel|range_rel_10 +(10 rows) -10 -step show_rel: EXPLAIN (COSTS OFF) SELECT * FROM range_rel; -QUERY PLAN - -Append - -> Seq Scan on range_rel_1 - -> Seq Scan on range_rel_2 - -> Seq Scan on range_rel_3 - -> Seq Scan on range_rel_4 - -> Seq Scan on range_rel_5 - -> Seq Scan on range_rel_6 - -> Seq Scan on range_rel_7 - -> Seq Scan on range_rel_8 - -> Seq Scan on range_rel_9 - -> Seq Scan on range_rel_10 step commit: COMMIT; -step show_rel: EXPLAIN (COSTS OFF) SELECT * FROM range_rel; -QUERY PLAN - -Append - -> Seq Scan on range_rel_1 - -> Seq Scan on range_rel_2 - -> Seq Scan on range_rel_3 - -> Seq Scan on range_rel_4 - -> Seq Scan on range_rel_5 - -> Seq Scan on range_rel_6 - -> Seq Scan on range_rel_7 - -> Seq Scan on range_rel_8 - -> Seq Scan on range_rel_9 - -> Seq Scan on range_rel_10 +step show_rel: SELECT l.parent, l.partition FROM pathman_partition_list l WHERE l.parent = 'range_rel'::regclass; +parent |partition +---------+------------ +range_rel|range_rel_1 +range_rel|range_rel_2 +range_rel|range_rel_3 +range_rel|range_rel_4 +range_rel|range_rel_5 +range_rel|range_rel_6 +range_rel|range_rel_7 +range_rel|range_rel_8 +range_rel|range_rel_9 +range_rel|range_rel_10 +(10 rows) + starting permutation: begin insert_data savepoint_a create_partitions savepoint_b drop_partitions show_rel savepoint_c rollback show_rel step begin: BEGIN; @@ -70,23 +78,39 @@ step insert_data: INSERT INTO range_rel SELECT generate_series(1, 10000); step savepoint_a: SAVEPOINT a; step create_partitions: SELECT create_range_partitions('range_rel', 'id', 1, 1000); create_range_partitions +----------------------- + 10 +(1 row) -10 step savepoint_b: SAVEPOINT b; +s1: NOTICE: 1000 rows copied from range_rel_1 +s1: NOTICE: 1000 rows copied from range_rel_2 +s1: NOTICE: 1000 rows copied from range_rel_3 +s1: NOTICE: 1000 rows copied from range_rel_4 +s1: NOTICE: 1000 rows copied from range_rel_5 +s1: NOTICE: 1000 rows copied from range_rel_6 +s1: NOTICE: 1000 rows copied from range_rel_7 +s1: NOTICE: 1000 rows copied from range_rel_8 +s1: NOTICE: 1000 rows copied from range_rel_9 +s1: NOTICE: 1000 rows copied from range_rel_10 step drop_partitions: SELECT drop_partitions('range_rel'); drop_partitions +--------------- + 10 +(1 row) -10 -step show_rel: EXPLAIN (COSTS OFF) SELECT * FROM range_rel; -QUERY PLAN +step show_rel: SELECT l.parent, l.partition FROM pathman_partition_list l WHERE l.parent = 'range_rel'::regclass; +parent|partition +------+--------- +(0 rows) -Seq Scan on range_rel step savepoint_c: SAVEPOINT c; step rollback: ROLLBACK; -step show_rel: EXPLAIN (COSTS OFF) SELECT * FROM range_rel; -QUERY PLAN +step show_rel: SELECT l.parent, l.partition FROM pathman_partition_list l WHERE l.parent = 'range_rel'::regclass; +parent|partition +------+--------- +(0 rows) -Seq Scan on range_rel starting permutation: begin insert_data savepoint_a create_partitions savepoint_b drop_partitions show_rel savepoint_c commit show_rel step begin: BEGIN; @@ -94,23 +118,39 @@ step insert_data: INSERT INTO range_rel SELECT generate_series(1, 10000); step savepoint_a: SAVEPOINT a; step create_partitions: SELECT create_range_partitions('range_rel', 'id', 1, 1000); create_range_partitions +----------------------- + 10 +(1 row) -10 step savepoint_b: SAVEPOINT b; +s1: NOTICE: 1000 rows copied from range_rel_1 +s1: NOTICE: 1000 rows copied from range_rel_2 +s1: NOTICE: 1000 rows copied from range_rel_3 +s1: NOTICE: 1000 rows copied from range_rel_4 +s1: NOTICE: 1000 rows copied from range_rel_5 +s1: NOTICE: 1000 rows copied from range_rel_6 +s1: NOTICE: 1000 rows copied from range_rel_7 +s1: NOTICE: 1000 rows copied from range_rel_8 +s1: NOTICE: 1000 rows copied from range_rel_9 +s1: NOTICE: 1000 rows copied from range_rel_10 step drop_partitions: SELECT drop_partitions('range_rel'); drop_partitions +--------------- + 10 +(1 row) -10 -step show_rel: EXPLAIN (COSTS OFF) SELECT * FROM range_rel; -QUERY PLAN +step show_rel: SELECT l.parent, l.partition FROM pathman_partition_list l WHERE l.parent = 'range_rel'::regclass; +parent|partition +------+--------- +(0 rows) -Seq Scan on range_rel step savepoint_c: SAVEPOINT c; step commit: COMMIT; -step show_rel: EXPLAIN (COSTS OFF) SELECT * FROM range_rel; -QUERY PLAN +step show_rel: SELECT l.parent, l.partition FROM pathman_partition_list l WHERE l.parent = 'range_rel'::regclass; +parent|partition +------+--------- +(0 rows) -Seq Scan on range_rel starting permutation: begin insert_data savepoint_a create_partitions savepoint_b drop_partitions savepoint_c rollback_b show_rel rollback show_rel step begin: BEGIN; @@ -118,34 +158,50 @@ step insert_data: INSERT INTO range_rel SELECT generate_series(1, 10000); step savepoint_a: SAVEPOINT a; step create_partitions: SELECT create_range_partitions('range_rel', 'id', 1, 1000); create_range_partitions +----------------------- + 10 +(1 row) -10 step savepoint_b: SAVEPOINT b; +s1: NOTICE: 1000 rows copied from range_rel_1 +s1: NOTICE: 1000 rows copied from range_rel_2 +s1: NOTICE: 1000 rows copied from range_rel_3 +s1: NOTICE: 1000 rows copied from range_rel_4 +s1: NOTICE: 1000 rows copied from range_rel_5 +s1: NOTICE: 1000 rows copied from range_rel_6 +s1: NOTICE: 1000 rows copied from range_rel_7 +s1: NOTICE: 1000 rows copied from range_rel_8 +s1: NOTICE: 1000 rows copied from range_rel_9 +s1: NOTICE: 1000 rows copied from range_rel_10 step drop_partitions: SELECT drop_partitions('range_rel'); drop_partitions +--------------- + 10 +(1 row) -10 step savepoint_c: SAVEPOINT c; step rollback_b: ROLLBACK TO SAVEPOINT b; -step show_rel: EXPLAIN (COSTS OFF) SELECT * FROM range_rel; -QUERY PLAN - -Append - -> Seq Scan on range_rel_1 - -> Seq Scan on range_rel_2 - -> Seq Scan on range_rel_3 - -> Seq Scan on range_rel_4 - -> Seq Scan on range_rel_5 - -> Seq Scan on range_rel_6 - -> Seq Scan on range_rel_7 - -> Seq Scan on range_rel_8 - -> Seq Scan on range_rel_9 - -> Seq Scan on range_rel_10 +step show_rel: SELECT l.parent, l.partition FROM pathman_partition_list l WHERE l.parent = 'range_rel'::regclass; +parent |partition +---------+------------ +range_rel|range_rel_1 +range_rel|range_rel_2 +range_rel|range_rel_3 +range_rel|range_rel_4 +range_rel|range_rel_5 +range_rel|range_rel_6 +range_rel|range_rel_7 +range_rel|range_rel_8 +range_rel|range_rel_9 +range_rel|range_rel_10 +(10 rows) + step rollback: ROLLBACK; -step show_rel: EXPLAIN (COSTS OFF) SELECT * FROM range_rel; -QUERY PLAN +step show_rel: SELECT l.parent, l.partition FROM pathman_partition_list l WHERE l.parent = 'range_rel'::regclass; +parent|partition +------+--------- +(0 rows) -Seq Scan on range_rel starting permutation: begin insert_data savepoint_a create_partitions savepoint_b drop_partitions savepoint_c rollback_b show_rel commit show_rel step begin: BEGIN; @@ -153,44 +209,60 @@ step insert_data: INSERT INTO range_rel SELECT generate_series(1, 10000); step savepoint_a: SAVEPOINT a; step create_partitions: SELECT create_range_partitions('range_rel', 'id', 1, 1000); create_range_partitions +----------------------- + 10 +(1 row) -10 step savepoint_b: SAVEPOINT b; +s1: NOTICE: 1000 rows copied from range_rel_1 +s1: NOTICE: 1000 rows copied from range_rel_2 +s1: NOTICE: 1000 rows copied from range_rel_3 +s1: NOTICE: 1000 rows copied from range_rel_4 +s1: NOTICE: 1000 rows copied from range_rel_5 +s1: NOTICE: 1000 rows copied from range_rel_6 +s1: NOTICE: 1000 rows copied from range_rel_7 +s1: NOTICE: 1000 rows copied from range_rel_8 +s1: NOTICE: 1000 rows copied from range_rel_9 +s1: NOTICE: 1000 rows copied from range_rel_10 step drop_partitions: SELECT drop_partitions('range_rel'); drop_partitions +--------------- + 10 +(1 row) -10 step savepoint_c: SAVEPOINT c; step rollback_b: ROLLBACK TO SAVEPOINT b; -step show_rel: EXPLAIN (COSTS OFF) SELECT * FROM range_rel; -QUERY PLAN - -Append - -> Seq Scan on range_rel_1 - -> Seq Scan on range_rel_2 - -> Seq Scan on range_rel_3 - -> Seq Scan on range_rel_4 - -> Seq Scan on range_rel_5 - -> Seq Scan on range_rel_6 - -> Seq Scan on range_rel_7 - -> Seq Scan on range_rel_8 - -> Seq Scan on range_rel_9 - -> Seq Scan on range_rel_10 +step show_rel: SELECT l.parent, l.partition FROM pathman_partition_list l WHERE l.parent = 'range_rel'::regclass; +parent |partition +---------+------------ +range_rel|range_rel_1 +range_rel|range_rel_2 +range_rel|range_rel_3 +range_rel|range_rel_4 +range_rel|range_rel_5 +range_rel|range_rel_6 +range_rel|range_rel_7 +range_rel|range_rel_8 +range_rel|range_rel_9 +range_rel|range_rel_10 +(10 rows) + step commit: COMMIT; -step show_rel: EXPLAIN (COSTS OFF) SELECT * FROM range_rel; -QUERY PLAN - -Append - -> Seq Scan on range_rel_1 - -> Seq Scan on range_rel_2 - -> Seq Scan on range_rel_3 - -> Seq Scan on range_rel_4 - -> Seq Scan on range_rel_5 - -> Seq Scan on range_rel_6 - -> Seq Scan on range_rel_7 - -> Seq Scan on range_rel_8 - -> Seq Scan on range_rel_9 - -> Seq Scan on range_rel_10 +step show_rel: SELECT l.parent, l.partition FROM pathman_partition_list l WHERE l.parent = 'range_rel'::regclass; +parent |partition +---------+------------ +range_rel|range_rel_1 +range_rel|range_rel_2 +range_rel|range_rel_3 +range_rel|range_rel_4 +range_rel|range_rel_5 +range_rel|range_rel_6 +range_rel|range_rel_7 +range_rel|range_rel_8 +range_rel|range_rel_9 +range_rel|range_rel_10 +(10 rows) + starting permutation: begin insert_data savepoint_a create_partitions savepoint_b drop_partitions show_rel savepoint_c rollback_a show_rel rollback show_rel step begin: BEGIN; @@ -198,28 +270,45 @@ step insert_data: INSERT INTO range_rel SELECT generate_series(1, 10000); step savepoint_a: SAVEPOINT a; step create_partitions: SELECT create_range_partitions('range_rel', 'id', 1, 1000); create_range_partitions +----------------------- + 10 +(1 row) -10 step savepoint_b: SAVEPOINT b; +s1: NOTICE: 1000 rows copied from range_rel_1 +s1: NOTICE: 1000 rows copied from range_rel_2 +s1: NOTICE: 1000 rows copied from range_rel_3 +s1: NOTICE: 1000 rows copied from range_rel_4 +s1: NOTICE: 1000 rows copied from range_rel_5 +s1: NOTICE: 1000 rows copied from range_rel_6 +s1: NOTICE: 1000 rows copied from range_rel_7 +s1: NOTICE: 1000 rows copied from range_rel_8 +s1: NOTICE: 1000 rows copied from range_rel_9 +s1: NOTICE: 1000 rows copied from range_rel_10 step drop_partitions: SELECT drop_partitions('range_rel'); drop_partitions +--------------- + 10 +(1 row) -10 -step show_rel: EXPLAIN (COSTS OFF) SELECT * FROM range_rel; -QUERY PLAN +step show_rel: SELECT l.parent, l.partition FROM pathman_partition_list l WHERE l.parent = 'range_rel'::regclass; +parent|partition +------+--------- +(0 rows) -Seq Scan on range_rel step savepoint_c: SAVEPOINT c; step rollback_a: ROLLBACK TO SAVEPOINT a; -step show_rel: EXPLAIN (COSTS OFF) SELECT * FROM range_rel; -QUERY PLAN +step show_rel: SELECT l.parent, l.partition FROM pathman_partition_list l WHERE l.parent = 'range_rel'::regclass; +parent|partition +------+--------- +(0 rows) -Seq Scan on range_rel step rollback: ROLLBACK; -step show_rel: EXPLAIN (COSTS OFF) SELECT * FROM range_rel; -QUERY PLAN +step show_rel: SELECT l.parent, l.partition FROM pathman_partition_list l WHERE l.parent = 'range_rel'::regclass; +parent|partition +------+--------- +(0 rows) -Seq Scan on range_rel starting permutation: begin insert_data savepoint_a create_partitions savepoint_b drop_partitions show_rel savepoint_c rollback_a show_rel commit show_rel step begin: BEGIN; @@ -227,28 +316,45 @@ step insert_data: INSERT INTO range_rel SELECT generate_series(1, 10000); step savepoint_a: SAVEPOINT a; step create_partitions: SELECT create_range_partitions('range_rel', 'id', 1, 1000); create_range_partitions +----------------------- + 10 +(1 row) -10 step savepoint_b: SAVEPOINT b; +s1: NOTICE: 1000 rows copied from range_rel_1 +s1: NOTICE: 1000 rows copied from range_rel_2 +s1: NOTICE: 1000 rows copied from range_rel_3 +s1: NOTICE: 1000 rows copied from range_rel_4 +s1: NOTICE: 1000 rows copied from range_rel_5 +s1: NOTICE: 1000 rows copied from range_rel_6 +s1: NOTICE: 1000 rows copied from range_rel_7 +s1: NOTICE: 1000 rows copied from range_rel_8 +s1: NOTICE: 1000 rows copied from range_rel_9 +s1: NOTICE: 1000 rows copied from range_rel_10 step drop_partitions: SELECT drop_partitions('range_rel'); drop_partitions +--------------- + 10 +(1 row) -10 -step show_rel: EXPLAIN (COSTS OFF) SELECT * FROM range_rel; -QUERY PLAN +step show_rel: SELECT l.parent, l.partition FROM pathman_partition_list l WHERE l.parent = 'range_rel'::regclass; +parent|partition +------+--------- +(0 rows) -Seq Scan on range_rel step savepoint_c: SAVEPOINT c; step rollback_a: ROLLBACK TO SAVEPOINT a; -step show_rel: EXPLAIN (COSTS OFF) SELECT * FROM range_rel; -QUERY PLAN +step show_rel: SELECT l.parent, l.partition FROM pathman_partition_list l WHERE l.parent = 'range_rel'::regclass; +parent|partition +------+--------- +(0 rows) -Seq Scan on range_rel step commit: COMMIT; -step show_rel: EXPLAIN (COSTS OFF) SELECT * FROM range_rel; -QUERY PLAN +step show_rel: SELECT l.parent, l.partition FROM pathman_partition_list l WHERE l.parent = 'range_rel'::regclass; +parent|partition +------+--------- +(0 rows) -Seq Scan on range_rel starting permutation: begin insert_data savepoint_a create_partitions savepoint_b drop_partitions show_rel savepoint_c rollback_b drop_partitions show_rel rollback show_rel step begin: BEGIN; @@ -256,32 +362,61 @@ step insert_data: INSERT INTO range_rel SELECT generate_series(1, 10000); step savepoint_a: SAVEPOINT a; step create_partitions: SELECT create_range_partitions('range_rel', 'id', 1, 1000); create_range_partitions +----------------------- + 10 +(1 row) -10 step savepoint_b: SAVEPOINT b; +s1: NOTICE: 1000 rows copied from range_rel_1 +s1: NOTICE: 1000 rows copied from range_rel_2 +s1: NOTICE: 1000 rows copied from range_rel_3 +s1: NOTICE: 1000 rows copied from range_rel_4 +s1: NOTICE: 1000 rows copied from range_rel_5 +s1: NOTICE: 1000 rows copied from range_rel_6 +s1: NOTICE: 1000 rows copied from range_rel_7 +s1: NOTICE: 1000 rows copied from range_rel_8 +s1: NOTICE: 1000 rows copied from range_rel_9 +s1: NOTICE: 1000 rows copied from range_rel_10 step drop_partitions: SELECT drop_partitions('range_rel'); drop_partitions +--------------- + 10 +(1 row) -10 -step show_rel: EXPLAIN (COSTS OFF) SELECT * FROM range_rel; -QUERY PLAN +step show_rel: SELECT l.parent, l.partition FROM pathman_partition_list l WHERE l.parent = 'range_rel'::regclass; +parent|partition +------+--------- +(0 rows) -Seq Scan on range_rel step savepoint_c: SAVEPOINT c; step rollback_b: ROLLBACK TO SAVEPOINT b; +s1: NOTICE: 1000 rows copied from range_rel_1 +s1: NOTICE: 1000 rows copied from range_rel_2 +s1: NOTICE: 1000 rows copied from range_rel_3 +s1: NOTICE: 1000 rows copied from range_rel_4 +s1: NOTICE: 1000 rows copied from range_rel_5 +s1: NOTICE: 1000 rows copied from range_rel_6 +s1: NOTICE: 1000 rows copied from range_rel_7 +s1: NOTICE: 1000 rows copied from range_rel_8 +s1: NOTICE: 1000 rows copied from range_rel_9 +s1: NOTICE: 1000 rows copied from range_rel_10 step drop_partitions: SELECT drop_partitions('range_rel'); drop_partitions +--------------- + 10 +(1 row) -10 -step show_rel: EXPLAIN (COSTS OFF) SELECT * FROM range_rel; -QUERY PLAN +step show_rel: SELECT l.parent, l.partition FROM pathman_partition_list l WHERE l.parent = 'range_rel'::regclass; +parent|partition +------+--------- +(0 rows) -Seq Scan on range_rel step rollback: ROLLBACK; -step show_rel: EXPLAIN (COSTS OFF) SELECT * FROM range_rel; -QUERY PLAN +step show_rel: SELECT l.parent, l.partition FROM pathman_partition_list l WHERE l.parent = 'range_rel'::regclass; +parent|partition +------+--------- +(0 rows) -Seq Scan on range_rel starting permutation: begin insert_data savepoint_a create_partitions savepoint_b drop_partitions show_rel savepoint_c rollback_b drop_partitions show_rel commit show_rel step begin: BEGIN; @@ -289,32 +424,61 @@ step insert_data: INSERT INTO range_rel SELECT generate_series(1, 10000); step savepoint_a: SAVEPOINT a; step create_partitions: SELECT create_range_partitions('range_rel', 'id', 1, 1000); create_range_partitions +----------------------- + 10 +(1 row) -10 step savepoint_b: SAVEPOINT b; +s1: NOTICE: 1000 rows copied from range_rel_1 +s1: NOTICE: 1000 rows copied from range_rel_2 +s1: NOTICE: 1000 rows copied from range_rel_3 +s1: NOTICE: 1000 rows copied from range_rel_4 +s1: NOTICE: 1000 rows copied from range_rel_5 +s1: NOTICE: 1000 rows copied from range_rel_6 +s1: NOTICE: 1000 rows copied from range_rel_7 +s1: NOTICE: 1000 rows copied from range_rel_8 +s1: NOTICE: 1000 rows copied from range_rel_9 +s1: NOTICE: 1000 rows copied from range_rel_10 step drop_partitions: SELECT drop_partitions('range_rel'); drop_partitions +--------------- + 10 +(1 row) -10 -step show_rel: EXPLAIN (COSTS OFF) SELECT * FROM range_rel; -QUERY PLAN +step show_rel: SELECT l.parent, l.partition FROM pathman_partition_list l WHERE l.parent = 'range_rel'::regclass; +parent|partition +------+--------- +(0 rows) -Seq Scan on range_rel step savepoint_c: SAVEPOINT c; step rollback_b: ROLLBACK TO SAVEPOINT b; +s1: NOTICE: 1000 rows copied from range_rel_1 +s1: NOTICE: 1000 rows copied from range_rel_2 +s1: NOTICE: 1000 rows copied from range_rel_3 +s1: NOTICE: 1000 rows copied from range_rel_4 +s1: NOTICE: 1000 rows copied from range_rel_5 +s1: NOTICE: 1000 rows copied from range_rel_6 +s1: NOTICE: 1000 rows copied from range_rel_7 +s1: NOTICE: 1000 rows copied from range_rel_8 +s1: NOTICE: 1000 rows copied from range_rel_9 +s1: NOTICE: 1000 rows copied from range_rel_10 step drop_partitions: SELECT drop_partitions('range_rel'); drop_partitions +--------------- + 10 +(1 row) -10 -step show_rel: EXPLAIN (COSTS OFF) SELECT * FROM range_rel; -QUERY PLAN +step show_rel: SELECT l.parent, l.partition FROM pathman_partition_list l WHERE l.parent = 'range_rel'::regclass; +parent|partition +------+--------- +(0 rows) -Seq Scan on range_rel step commit: COMMIT; -step show_rel: EXPLAIN (COSTS OFF) SELECT * FROM range_rel; -QUERY PLAN +step show_rel: SELECT l.parent, l.partition FROM pathman_partition_list l WHERE l.parent = 'range_rel'::regclass; +parent|partition +------+--------- +(0 rows) -Seq Scan on range_rel starting permutation: begin insert_data savepoint_a create_partitions savepoint_b drop_partitions rollback_a create_partitions show_rel rollback show_rel step begin: BEGIN; @@ -322,37 +486,55 @@ step insert_data: INSERT INTO range_rel SELECT generate_series(1, 10000); step savepoint_a: SAVEPOINT a; step create_partitions: SELECT create_range_partitions('range_rel', 'id', 1, 1000); create_range_partitions +----------------------- + 10 +(1 row) -10 step savepoint_b: SAVEPOINT b; +s1: NOTICE: 1000 rows copied from range_rel_1 +s1: NOTICE: 1000 rows copied from range_rel_2 +s1: NOTICE: 1000 rows copied from range_rel_3 +s1: NOTICE: 1000 rows copied from range_rel_4 +s1: NOTICE: 1000 rows copied from range_rel_5 +s1: NOTICE: 1000 rows copied from range_rel_6 +s1: NOTICE: 1000 rows copied from range_rel_7 +s1: NOTICE: 1000 rows copied from range_rel_8 +s1: NOTICE: 1000 rows copied from range_rel_9 +s1: NOTICE: 1000 rows copied from range_rel_10 step drop_partitions: SELECT drop_partitions('range_rel'); drop_partitions +--------------- + 10 +(1 row) -10 step rollback_a: ROLLBACK TO SAVEPOINT a; step create_partitions: SELECT create_range_partitions('range_rel', 'id', 1, 1000); create_range_partitions +----------------------- + 10 +(1 row) + +step show_rel: SELECT l.parent, l.partition FROM pathman_partition_list l WHERE l.parent = 'range_rel'::regclass; +parent |partition +---------+------------ +range_rel|range_rel_1 +range_rel|range_rel_2 +range_rel|range_rel_3 +range_rel|range_rel_4 +range_rel|range_rel_5 +range_rel|range_rel_6 +range_rel|range_rel_7 +range_rel|range_rel_8 +range_rel|range_rel_9 +range_rel|range_rel_10 +(10 rows) -10 -step show_rel: EXPLAIN (COSTS OFF) SELECT * FROM range_rel; -QUERY PLAN - -Append - -> Seq Scan on range_rel_1 - -> Seq Scan on range_rel_2 - -> Seq Scan on range_rel_3 - -> Seq Scan on range_rel_4 - -> Seq Scan on range_rel_5 - -> Seq Scan on range_rel_6 - -> Seq Scan on range_rel_7 - -> Seq Scan on range_rel_8 - -> Seq Scan on range_rel_9 - -> Seq Scan on range_rel_10 step rollback: ROLLBACK; -step show_rel: EXPLAIN (COSTS OFF) SELECT * FROM range_rel; -QUERY PLAN +step show_rel: SELECT l.parent, l.partition FROM pathman_partition_list l WHERE l.parent = 'range_rel'::regclass; +parent|partition +------+--------- +(0 rows) -Seq Scan on range_rel starting permutation: begin insert_data savepoint_a create_partitions savepoint_b drop_partitions rollback_a create_partitions show_rel commit show_rel step begin: BEGIN; @@ -360,44 +542,62 @@ step insert_data: INSERT INTO range_rel SELECT generate_series(1, 10000); step savepoint_a: SAVEPOINT a; step create_partitions: SELECT create_range_partitions('range_rel', 'id', 1, 1000); create_range_partitions +----------------------- + 10 +(1 row) -10 step savepoint_b: SAVEPOINT b; +s1: NOTICE: 1000 rows copied from range_rel_1 +s1: NOTICE: 1000 rows copied from range_rel_2 +s1: NOTICE: 1000 rows copied from range_rel_3 +s1: NOTICE: 1000 rows copied from range_rel_4 +s1: NOTICE: 1000 rows copied from range_rel_5 +s1: NOTICE: 1000 rows copied from range_rel_6 +s1: NOTICE: 1000 rows copied from range_rel_7 +s1: NOTICE: 1000 rows copied from range_rel_8 +s1: NOTICE: 1000 rows copied from range_rel_9 +s1: NOTICE: 1000 rows copied from range_rel_10 step drop_partitions: SELECT drop_partitions('range_rel'); drop_partitions +--------------- + 10 +(1 row) -10 step rollback_a: ROLLBACK TO SAVEPOINT a; step create_partitions: SELECT create_range_partitions('range_rel', 'id', 1, 1000); create_range_partitions +----------------------- + 10 +(1 row) + +step show_rel: SELECT l.parent, l.partition FROM pathman_partition_list l WHERE l.parent = 'range_rel'::regclass; +parent |partition +---------+------------ +range_rel|range_rel_1 +range_rel|range_rel_2 +range_rel|range_rel_3 +range_rel|range_rel_4 +range_rel|range_rel_5 +range_rel|range_rel_6 +range_rel|range_rel_7 +range_rel|range_rel_8 +range_rel|range_rel_9 +range_rel|range_rel_10 +(10 rows) -10 -step show_rel: EXPLAIN (COSTS OFF) SELECT * FROM range_rel; -QUERY PLAN - -Append - -> Seq Scan on range_rel_1 - -> Seq Scan on range_rel_2 - -> Seq Scan on range_rel_3 - -> Seq Scan on range_rel_4 - -> Seq Scan on range_rel_5 - -> Seq Scan on range_rel_6 - -> Seq Scan on range_rel_7 - -> Seq Scan on range_rel_8 - -> Seq Scan on range_rel_9 - -> Seq Scan on range_rel_10 step commit: COMMIT; -step show_rel: EXPLAIN (COSTS OFF) SELECT * FROM range_rel; -QUERY PLAN - -Append - -> Seq Scan on range_rel_1 - -> Seq Scan on range_rel_2 - -> Seq Scan on range_rel_3 - -> Seq Scan on range_rel_4 - -> Seq Scan on range_rel_5 - -> Seq Scan on range_rel_6 - -> Seq Scan on range_rel_7 - -> Seq Scan on range_rel_8 - -> Seq Scan on range_rel_9 - -> Seq Scan on range_rel_10 +step show_rel: SELECT l.parent, l.partition FROM pathman_partition_list l WHERE l.parent = 'range_rel'::regclass; +parent |partition +---------+------------ +range_rel|range_rel_1 +range_rel|range_rel_2 +range_rel|range_rel_3 +range_rel|range_rel_4 +range_rel|range_rel_5 +range_rel|range_rel_6 +range_rel|range_rel_7 +range_rel|range_rel_8 +range_rel|range_rel_9 +range_rel|range_rel_10 +(10 rows) + diff --git a/expected/test_variants.sh b/expected/test_variants.sh new file mode 100755 index 00000000..46bf2817 --- /dev/null +++ b/expected/test_variants.sh @@ -0,0 +1,27 @@ +#!/usr/bin/bash + +ret=0 + +red="\033[0;31m" +reset='\033[0m' + +shopt -s extglob + +for result in ./*_+([0-9]).out; do + f1="$result" + f2="${f1//_+([0-9])/}" + + printf "examine $(basename $f1) \n" + + file_diff=$(diff $f1 $f2 | wc -l) + + if [ $file_diff -eq 0 ]; then + printf $red + printf "WARNING: $(basename $f1) is redundant \n" >&2 + printf $reset + + ret=1 # change exit code + fi +done + +exit $ret diff --git a/hash.sql b/hash.sql index 2df89fd7..b22fd75e 100644 --- a/hash.sql +++ b/hash.sql @@ -1,9 +1,9 @@ /* ------------------------------------------------------------------------ * * hash.sql - * HASH partitioning functions + * HASH partitioning functions * - * Copyright (c) 2015-2016, Postgres Professional + * Copyright (c) 2015-2020, Postgres Professional * * ------------------------------------------------------------------------ */ @@ -11,75 +11,35 @@ /* * Creates hash partitions for specified relation */ -CREATE OR REPLACE FUNCTION @extschema@.create_hash_partitions( +CREATE FUNCTION @extschema@.create_hash_partitions( parent_relid REGCLASS, - attribute TEXT, - partitions_count INTEGER, - partition_data BOOLEAN DEFAULT true) -RETURNS INTEGER AS -$$ -DECLARE - v_child_relname TEXT; - v_type TEXT; - v_plain_schema TEXT; - v_plain_relname TEXT; - v_hashfunc TEXT; - + expression TEXT, + partitions_count INT4, + partition_data BOOLEAN DEFAULT TRUE, + partition_names TEXT[] DEFAULT NULL, + tablespaces TEXT[] DEFAULT NULL) +RETURNS INTEGER AS $$ BEGIN - IF partition_data = true THEN - /* Acquire data modification lock */ - PERFORM @extschema@.prevent_relation_modification(parent_relid); - ELSE - /* Acquire lock on parent */ - PERFORM @extschema@.lock_partitioned_relation(parent_relid); - END IF; - - PERFORM @extschema@.validate_relname(parent_relid); - attribute := lower(attribute); - PERFORM @extschema@.common_relation_checks(parent_relid, attribute); - - v_type := @extschema@.get_attribute_type_name(parent_relid, attribute); - - SELECT * INTO v_plain_schema, v_plain_relname - FROM @extschema@.get_plain_schema_and_relname(parent_relid); - - v_hashfunc := @extschema@.get_type_hash_func(v_type::regtype)::regproc; + PERFORM @extschema@.prepare_for_partitioning(parent_relid, + expression, + partition_data); /* Insert new entry to pathman config */ - INSERT INTO @extschema@.pathman_config (partrel, attname, parttype) - VALUES (parent_relid, attribute, 1); - - /* Create partitions and update pg_pathman configuration */ - FOR partnum IN 0..partitions_count-1 - LOOP - v_child_relname := format('%s.%s', - quote_ident(v_plain_schema), - quote_ident(v_plain_relname || '_' || partnum)); - - EXECUTE format('CREATE TABLE %1$s (LIKE %2$s INCLUDING ALL) INHERITS (%2$s)', - v_child_relname, - parent_relid::TEXT); - - EXECUTE format('ALTER TABLE %s ADD CONSTRAINT %s - CHECK (@extschema@.get_hash_part_idx(%s(%s), %s) = %s)', - v_child_relname, - @extschema@.build_check_constraint_name(v_child_relname::regclass, - attribute), - v_hashfunc, - attribute, - partitions_count, - partnum); - END LOOP; - - /* Notify backend about changes */ - PERFORM @extschema@.on_create_partitions(parent_relid); + PERFORM @extschema@.add_to_pathman_config(parent_relid, expression); + + /* Create partitions */ + PERFORM @extschema@.create_hash_partitions_internal(parent_relid, + expression, + partitions_count, + partition_names, + tablespaces); /* Copy data */ IF partition_data = true THEN - PERFORM @extschema@.disable_parent(parent_relid); + PERFORM @extschema@.set_enable_parent(parent_relid, false); PERFORM @extschema@.partition_data(parent_relid); ELSE - PERFORM @extschema@.enable_parent(parent_relid); + PERFORM @extschema@.set_enable_parent(parent_relid, true); END IF; RETURN partitions_count; @@ -88,127 +48,127 @@ $$ LANGUAGE plpgsql SET client_min_messages = WARNING; /* - * Creates an update trigger + * Replace hash partition with another one. It could be useful in case when + * someone wants to attach foreign table as a partition. + * + * lock_parent - should we take an exclusive lock? */ -CREATE OR REPLACE FUNCTION @extschema@.create_hash_update_trigger( - parent_relid REGCLASS) -RETURNS TEXT AS -$$ +CREATE FUNCTION @extschema@.replace_hash_partition( + old_partition REGCLASS, + new_partition REGCLASS, + lock_parent BOOL DEFAULT TRUE) +RETURNS REGCLASS AS $$ DECLARE - func TEXT := 'CREATE OR REPLACE FUNCTION %1$s() - RETURNS TRIGGER AS - $body$ - DECLARE - old_idx INTEGER; /* partition indices */ - new_idx INTEGER; - - BEGIN - old_idx := @extschema@.get_hash_part_idx(%9$s(OLD.%2$s), %3$s); - new_idx := @extschema@.get_hash_part_idx(%9$s(NEW.%2$s), %3$s); - - IF old_idx = new_idx THEN - RETURN NEW; - END IF; - - EXECUTE format(''DELETE FROM %8$s WHERE %4$s'', old_idx) - USING %5$s; - - EXECUTE format(''INSERT INTO %8$s VALUES (%6$s)'', new_idx) - USING %7$s; - - RETURN NULL; - END $body$ - LANGUAGE plpgsql'; - - trigger TEXT := 'CREATE TRIGGER %s - BEFORE UPDATE ON %s - FOR EACH ROW EXECUTE PROCEDURE %s()'; - - att_names TEXT; - old_fields TEXT; - new_fields TEXT; - att_val_fmt TEXT; - att_fmt TEXT; - attr TEXT; - plain_schema TEXT; - plain_relname TEXT; - child_relname_format TEXT; - funcname TEXT; - triggername TEXT; - atttype TEXT; - hashfunc TEXT; - partitions_count INTEGER; + parent_relid REGCLASS; + old_constr_name TEXT; /* name of old_partition's constraint */ + old_constr_def TEXT; /* definition of old_partition's constraint */ + rel_persistence CHAR; + p_init_callback REGPROCEDURE; BEGIN - attr := attname FROM @extschema@.pathman_config WHERE partrel = parent_relid; + PERFORM @extschema@.validate_relname(old_partition); + PERFORM @extschema@.validate_relname(new_partition); + + /* Parent relation */ + parent_relid := @extschema@.get_parent_of_partition(old_partition); - IF attr IS NULL THEN - RAISE EXCEPTION 'Table "%" is not partitioned', parent_relid::TEXT; + IF lock_parent THEN + /* Acquire data modification lock (prevent further modifications) */ + PERFORM @extschema@.prevent_data_modification(parent_relid); + ELSE + /* Acquire lock on parent */ + PERFORM @extschema@.prevent_part_modification(parent_relid); END IF; - SELECT string_agg(attname, ', '), - string_agg('OLD.' || attname, ', '), - string_agg('NEW.' || attname, ', '), - string_agg('CASE WHEN NOT $' || attnum || ' IS NULL THEN ' || - attname || ' = $' || attnum || ' ' || - 'ELSE ' || - attname || ' IS NULL END', - ' AND '), - string_agg('$' || attnum, ', ') - FROM pg_catalog.pg_attribute - WHERE attrelid = parent_relid AND attnum > 0 - INTO att_names, - old_fields, - new_fields, - att_val_fmt, - att_fmt; - - partitions_count := COUNT(*) FROM pg_catalog.pg_inherits - WHERE inhparent = parent_relid::oid; - - /* Build trigger & trigger function's names */ - funcname := @extschema@.build_update_trigger_func_name(parent_relid); - triggername := @extschema@.build_update_trigger_name(parent_relid); - - /* Build partition name template */ - SELECT * INTO plain_schema, plain_relname - FROM @extschema@.get_plain_schema_and_relname(parent_relid); - - child_relname_format := quote_ident(plain_schema) || '.' || - quote_ident(plain_relname || '_%s'); - - /* Fetch base hash function for atttype */ - atttype := @extschema@.get_attribute_type_name(parent_relid, attr); - hashfunc := @extschema@.get_type_hash_func(atttype::regtype)::regproc; - - /* Format function definition and execute it */ - func := format(func, funcname, attr, partitions_count, att_val_fmt, - old_fields, att_fmt, new_fields, child_relname_format, hashfunc); - EXECUTE func; - - /* Create trigger on every partition */ - FOR num IN 0..partitions_count-1 - LOOP - EXECUTE format(trigger, - triggername, - format(child_relname_format, num), - funcname); - END LOOP; - - return funcname; + /* Acquire data modification lock (prevent further modifications) */ + PERFORM @extschema@.prevent_data_modification(old_partition); + PERFORM @extschema@.prevent_data_modification(new_partition); + + /* Ignore temporary tables */ + SELECT relpersistence FROM pg_catalog.pg_class + WHERE oid = new_partition INTO rel_persistence; + + IF rel_persistence = 't'::CHAR THEN + RAISE EXCEPTION 'temporary table "%" cannot be used as a partition', + new_partition::TEXT; + END IF; + + /* Check that new partition has an equal structure as parent does */ + BEGIN + PERFORM @extschema@.is_tuple_convertible(parent_relid, new_partition); + EXCEPTION WHEN OTHERS THEN + RAISE EXCEPTION 'partition must have a compatible tuple format'; + END; + + /* Check that table is partitioned */ + IF @extschema@.get_partition_key(parent_relid) IS NULL THEN + RAISE EXCEPTION 'table "%" is not partitioned', parent_relid::TEXT; + END IF; + + /* Fetch name of old_partition's HASH constraint */ + old_constr_name = @extschema@.build_check_constraint_name(old_partition::REGCLASS); + + /* Fetch definition of old_partition's HASH constraint */ + SELECT pg_catalog.pg_get_constraintdef(oid) FROM pg_catalog.pg_constraint + WHERE conrelid = old_partition AND pg_catalog.quote_ident(conname) = old_constr_name + INTO old_constr_def; + + /* Detach old partition */ + EXECUTE pg_catalog.format('ALTER TABLE %s NO INHERIT %s', old_partition, parent_relid); + EXECUTE pg_catalog.format('ALTER TABLE %s DROP CONSTRAINT %s', + old_partition, + old_constr_name); + + /* Attach the new one */ + EXECUTE pg_catalog.format('ALTER TABLE %s INHERIT %s', new_partition, parent_relid); + EXECUTE pg_catalog.format('ALTER TABLE %s ADD CONSTRAINT %s %s', + new_partition, + @extschema@.build_check_constraint_name(new_partition::REGCLASS), + old_constr_def); + + /* Fetch init_callback from 'params' table */ + WITH stub_callback(stub) as (values (0)) + SELECT init_callback + FROM stub_callback + LEFT JOIN @extschema@.pathman_config_params AS params + ON params.partrel = parent_relid + INTO p_init_callback; + + /* Finally invoke init_callback */ + PERFORM @extschema@.invoke_on_partition_created_callback(parent_relid, + new_partition, + p_init_callback); + + RETURN new_partition; END $$ LANGUAGE plpgsql; /* - * Returns hash function OID for specified type + * Just create HASH partitions, called by create_hash_partitions(). */ -CREATE OR REPLACE FUNCTION @extschema@.get_type_hash_func(REGTYPE) -RETURNS OID AS 'pg_pathman', 'get_type_hash_func' -LANGUAGE C STRICT; +CREATE FUNCTION @extschema@.create_hash_partitions_internal( + parent_relid REGCLASS, + attribute TEXT, + partitions_count INT4, + partition_names TEXT[] DEFAULT NULL, + tablespaces TEXT[] DEFAULT NULL) +RETURNS VOID AS 'pg_pathman', 'create_hash_partitions_internal' +LANGUAGE C; /* * Calculates hash for integer value */ -CREATE OR REPLACE FUNCTION @extschema@.get_hash_part_idx(INTEGER, INTEGER) +CREATE FUNCTION @extschema@.get_hash_part_idx(INT4, INT4) RETURNS INTEGER AS 'pg_pathman', 'get_hash_part_idx' LANGUAGE C STRICT; + +/* + * Build hash condition for a CHECK CONSTRAINT + */ +CREATE FUNCTION @extschema@.build_hash_condition( + attribute_type REGTYPE, + attribute TEXT, + partitions_count INT4, + partition_index INT4) +RETURNS TEXT AS 'pg_pathman', 'build_hash_condition' +LANGUAGE C STRICT; diff --git a/init.sql b/init.sql index 09fb7d54..123b2a36 100644 --- a/init.sql +++ b/init.sql @@ -3,69 +3,126 @@ * init.sql * Creates config table and provides common utility functions * - * Copyright (c) 2015-2016, Postgres Professional + * Copyright (c) 2015-2020, Postgres Professional * * ------------------------------------------------------------------------ */ + /* - * Pathman config - * partrel - regclass (relation type, stored as Oid) - * attname - partitioning key - * parttype - partitioning type: - * 1 - HASH - * 2 - RANGE - * range_interval - base interval for RANGE partitioning as string + * Takes text representation of interval value and checks if it is corresponds + * to partitioning key. The function throws an error if it fails to convert + * text to Datum */ -CREATE TABLE IF NOT EXISTS @extschema@.pathman_config ( +CREATE FUNCTION @extschema@.validate_interval_value( + partrel REGCLASS, + expr TEXT, + parttype INTEGER, + range_interval TEXT) +RETURNS BOOL AS 'pg_pathman', 'validate_interval_value' +LANGUAGE C; + + +/* + * Main config. + * partrel - regclass (relation type, stored as Oid) + * expr - partitioning expression (key) + * parttype - partitioning type: (1 - HASH, 2 - RANGE) + * range_interval - base interval for RANGE partitioning as string + * cooked_expr - cooked partitioning expression (parsed & rewritten) + */ +CREATE TABLE @extschema@.pathman_config ( partrel REGCLASS NOT NULL PRIMARY KEY, - attname TEXT NOT NULL, + expr TEXT NOT NULL, parttype INTEGER NOT NULL, - range_interval TEXT, + range_interval TEXT DEFAULT NULL, + + /* check for allowed part types */ + CONSTRAINT pathman_config_parttype_check CHECK (parttype IN (1, 2)), - CHECK (parttype IN (1, 2)) /* check for allowed part types */ + /* check for correct interval */ + CONSTRAINT pathman_config_interval_check + CHECK (@extschema@.validate_interval_value(partrel, + expr, + parttype, + range_interval)) ); + +/* + * Checks that callback function meets specific requirements. + * Particularly it must have the only JSONB argument and VOID return type. + * + * NOTE: this function is used in CHECK CONSTRAINT. + */ +CREATE FUNCTION @extschema@.validate_part_callback( + callback REGPROCEDURE, + raise_error BOOL DEFAULT TRUE) +RETURNS BOOL AS 'pg_pathman', 'validate_part_callback_pl' +LANGUAGE C STRICT; + + /* * Optional parameters for partitioned tables. - * partrel - regclass (relation type, stored as Oid) - * enable_parent - add parent table to plan - * auto - enable automatic partition creation + * partrel - regclass (relation type, stored as Oid) + * enable_parent - add parent table to plan + * auto - enable automatic partition creation + * init_callback - text signature of cb to be executed on partition creation + * spawn_using_bgw - use background worker in order to auto create partitions */ -CREATE TABLE IF NOT EXISTS @extschema@.pathman_config_params ( +CREATE TABLE @extschema@.pathman_config_params ( partrel REGCLASS NOT NULL PRIMARY KEY, - enable_parent BOOLEAN NOT NULL DEFAULT TRUE, - auto BOOLEAN NOT NULL DEFAULT TRUE + enable_parent BOOLEAN NOT NULL DEFAULT FALSE, + auto BOOLEAN NOT NULL DEFAULT TRUE, + init_callback TEXT DEFAULT NULL, + spawn_using_bgw BOOLEAN NOT NULL DEFAULT FALSE + + /* check callback's signature */ + CHECK (@extschema@.validate_part_callback(CASE WHEN init_callback IS NULL + THEN 0::REGPROCEDURE + ELSE init_callback::REGPROCEDURE + END)) ); -CREATE UNIQUE INDEX i_pathman_config_params -ON @extschema@.pathman_config_params(partrel); + +GRANT SELECT, INSERT, UPDATE, DELETE +ON @extschema@.pathman_config, @extschema@.pathman_config_params +TO public; /* - * Invalidate relcache every time someone changes parameters config. + * Check if current user can alter/drop specified relation */ -CREATE OR REPLACE FUNCTION @extschema@.pathman_config_params_trigger_func() -RETURNS TRIGGER AS -$$ -BEGIN - IF TG_OP IN ('INSERT', 'UPDATE') THEN - PERFORM @extschema@.invalidate_relcache(NEW.partrel); - END IF; +CREATE FUNCTION @extschema@.check_security_policy(relation regclass) +RETURNS BOOL AS 'pg_pathman', 'check_security_policy' LANGUAGE C STRICT; - IF TG_OP IN ('UPDATE', 'DELETE') THEN - PERFORM @extschema@.invalidate_relcache(OLD.partrel); - END IF; +/* + * Row security policy to restrict partitioning operations to owner and superusers only + */ +CREATE POLICY deny_modification ON @extschema@.pathman_config +FOR ALL USING (check_security_policy(partrel)); - IF TG_OP = 'DELETE' THEN - RETURN OLD; - ELSE - RETURN NEW; - END IF; -END -$$ -LANGUAGE plpgsql; +CREATE POLICY deny_modification ON @extschema@.pathman_config_params +FOR ALL USING (check_security_policy(partrel)); + +CREATE POLICY allow_select ON @extschema@.pathman_config FOR SELECT USING (true); + +CREATE POLICY allow_select ON @extschema@.pathman_config_params FOR SELECT USING (true); + +ALTER TABLE @extschema@.pathman_config ENABLE ROW LEVEL SECURITY; +ALTER TABLE @extschema@.pathman_config_params ENABLE ROW LEVEL SECURITY; + +/* + * Invalidate relcache every time someone changes parameters config or pathman_config + */ +CREATE FUNCTION @extschema@.pathman_config_params_trigger_func() +RETURNS TRIGGER AS 'pg_pathman', 'pathman_config_params_trigger_func' +LANGUAGE C; CREATE TRIGGER pathman_config_params_trigger -BEFORE INSERT OR UPDATE OR DELETE ON @extschema@.pathman_config_params +AFTER INSERT OR UPDATE OR DELETE ON @extschema@.pathman_config_params +FOR EACH ROW EXECUTE PROCEDURE @extschema@.pathman_config_params_trigger_func(); + +CREATE TRIGGER pathman_config_trigger +AFTER INSERT OR UPDATE OR DELETE ON @extschema@.pathman_config FOR EACH ROW EXECUTE PROCEDURE @extschema@.pathman_config_params_trigger_func(); /* @@ -75,153 +132,228 @@ SELECT pg_catalog.pg_extension_config_dump('@extschema@.pathman_config', ''); SELECT pg_catalog.pg_extension_config_dump('@extschema@.pathman_config_params', ''); -CREATE OR REPLACE FUNCTION @extschema@.invalidate_relcache(relid OID) -RETURNS VOID AS 'pg_pathman' LANGUAGE C STRICT; - -CREATE OR REPLACE FUNCTION @extschema@.partitions_count(relation REGCLASS) -RETURNS INT AS -$$ -BEGIN - RETURN count(*) FROM pg_inherits WHERE inhparent = relation; -END -$$ -LANGUAGE plpgsql; - /* * Add a row describing the optional parameter to pathman_config_params. */ -CREATE OR REPLACE FUNCTION @extschema@.pathman_set_param( +CREATE FUNCTION @extschema@.pathman_set_param( relation REGCLASS, param TEXT, - value BOOLEAN) -RETURNS VOID AS -$$ + value ANYELEMENT) +RETURNS VOID AS $$ BEGIN - EXECUTE format('INSERT INTO @extschema@.pathman_config_params + EXECUTE pg_catalog.format('INSERT INTO @extschema@.pathman_config_params (partrel, %1$s) VALUES ($1, $2) ON CONFLICT (partrel) DO UPDATE SET %1$s = $2', param) USING relation, value; END -$$ -LANGUAGE plpgsql; +$$ LANGUAGE plpgsql; /* - * Include parent relation into query plan's for specified relation. + * Include\exclude parent relation in query plan. */ -CREATE OR REPLACE FUNCTION @extschema@.enable_parent(relation REGCLASS) -RETURNS VOID AS -$$ +CREATE FUNCTION @extschema@.set_enable_parent( + relation REGCLASS, + value BOOLEAN) +RETURNS VOID AS $$ BEGIN - PERFORM @extschema@.pathman_set_param(relation, 'enable_parent', True); + PERFORM @extschema@.pathman_set_param(relation, 'enable_parent', value); END -$$ -LANGUAGE plpgsql; +$$ LANGUAGE plpgsql STRICT; /* - * Do not include parent relation into query plan's for specified relation. + * Enable\disable automatic partition creation. */ -CREATE OR REPLACE FUNCTION @extschema@.disable_parent(relation REGCLASS) -RETURNS VOID AS -$$ +CREATE FUNCTION @extschema@.set_auto( + relation REGCLASS, + value BOOLEAN) +RETURNS VOID AS $$ BEGIN - PERFORM @extschema@.pathman_set_param(relation, 'enable_parent', False); + PERFORM @extschema@.pathman_set_param(relation, 'auto', value); END -$$ -LANGUAGE plpgsql; +$$ LANGUAGE plpgsql STRICT; /* - * Enable automatic partition creation. + * Set partition creation callback */ -CREATE OR REPLACE FUNCTION @extschema@.enable_auto(relation REGCLASS) -RETURNS VOID AS -$$ +CREATE FUNCTION @extschema@.set_init_callback( + relation REGCLASS, + callback REGPROCEDURE DEFAULT 0) +RETURNS VOID AS $$ +DECLARE + regproc_text TEXT := NULL; + BEGIN - PERFORM @extschema@.pathman_set_param(relation, 'auto', True); + + /* Fetch schema-qualified name of callback */ + IF callback != 0 THEN + SELECT pg_catalog.quote_ident(nspname) || '.' || + pg_catalog.quote_ident(proname) || '(' || + (SELECT pg_catalog.string_agg(x.argtype::REGTYPE::TEXT, ',') + FROM pg_catalog.unnest(proargtypes) AS x(argtype)) || + ')' + FROM pg_catalog.pg_proc p JOIN pg_catalog.pg_namespace n + ON n.oid = p.pronamespace + WHERE p.oid = callback + INTO regproc_text; /* <= result */ + END IF; + + PERFORM @extschema@.pathman_set_param(relation, 'init_callback', regproc_text); END -$$ -LANGUAGE plpgsql; +$$ LANGUAGE plpgsql STRICT; /* - * Disable automatic partition creation. + * Set 'spawn using BGW' option */ -CREATE OR REPLACE FUNCTION @extschema@.disable_auto(relation REGCLASS) -RETURNS VOID AS -$$ +CREATE FUNCTION @extschema@.set_spawn_using_bgw( + relation REGCLASS, + value BOOLEAN) +RETURNS VOID AS $$ BEGIN - PERFORM @extschema@.pathman_set_param(relation, 'auto', False); + PERFORM @extschema@.pathman_set_param(relation, 'spawn_using_bgw', value); END -$$ -LANGUAGE plpgsql; +$$ LANGUAGE plpgsql STRICT; + +/* + * Set (or reset) default interval for auto created partitions + */ +CREATE FUNCTION @extschema@.set_interval( + relation REGCLASS, + value ANYELEMENT) +RETURNS VOID AS $$ +DECLARE + affected INTEGER; +BEGIN + UPDATE @extschema@.pathman_config + SET range_interval = value::text + WHERE partrel = relation AND parttype = 2; + + /* Check number of affected rows */ + GET DIAGNOSTICS affected = ROW_COUNT; + + IF affected = 0 THEN + RAISE EXCEPTION 'table "%" is not partitioned by RANGE', relation; + END IF; +END +$$ LANGUAGE plpgsql; + + +/* + * Show all existing parents and partitions. + */ +CREATE FUNCTION @extschema@.show_partition_list() +RETURNS TABLE ( + parent REGCLASS, + partition REGCLASS, + parttype INT4, + expr TEXT, + range_min TEXT, + range_max TEXT) +AS 'pg_pathman', 'show_partition_list_internal' +LANGUAGE C STRICT; + +/* + * View for show_partition_list(). + */ +CREATE VIEW @extschema@.pathman_partition_list +AS SELECT * FROM @extschema@.show_partition_list(); + +GRANT SELECT ON @extschema@.pathman_partition_list TO PUBLIC; + +/* + * Show memory usage of pg_pathman's caches. + */ +CREATE FUNCTION @extschema@.show_cache_stats() +RETURNS TABLE ( + context TEXT, + size INT8, + used INT8, + entries INT8) +AS 'pg_pathman', 'show_cache_stats_internal' +LANGUAGE C STRICT; + +/* + * View for show_cache_stats(). + */ +CREATE VIEW @extschema@.pathman_cache_stats +AS SELECT * FROM @extschema@.show_cache_stats(); /* * Show all existing concurrent partitioning tasks. */ -CREATE OR REPLACE FUNCTION @extschema@.show_concurrent_part_tasks() +CREATE FUNCTION @extschema@.show_concurrent_part_tasks() RETURNS TABLE ( userid REGROLE, pid INT, dbid OID, relid REGCLASS, - processed INT, - status TEXT -) AS 'pg_pathman', 'show_concurrent_part_tasks_internal' LANGUAGE C STRICT; + processed INT8, + status TEXT) +AS 'pg_pathman', 'show_concurrent_part_tasks_internal' +LANGUAGE C STRICT; /* * View for show_concurrent_part_tasks(). */ -CREATE OR REPLACE VIEW @extschema@.pathman_concurrent_part_tasks +CREATE VIEW @extschema@.pathman_concurrent_part_tasks AS SELECT * FROM @extschema@.show_concurrent_part_tasks(); +GRANT SELECT ON @extschema@.pathman_concurrent_part_tasks TO PUBLIC; + /* * Partition table using ConcurrentPartWorker. */ -CREATE OR REPLACE FUNCTION @extschema@.partition_table_concurrently(relation regclass) -RETURNS VOID AS 'pg_pathman', 'partition_table_concurrently' LANGUAGE C STRICT; +CREATE FUNCTION @extschema@.partition_table_concurrently( + relation REGCLASS, + batch_size INTEGER DEFAULT 1000, + sleep_time FLOAT8 DEFAULT 1.0) +RETURNS VOID AS 'pg_pathman', 'partition_table_concurrently' +LANGUAGE C STRICT; /* * Stop concurrent partitioning task. */ -CREATE OR REPLACE FUNCTION @extschema@.stop_concurrent_part_task(relation regclass) -RETURNS BOOL AS 'pg_pathman', 'stop_concurrent_part_task' LANGUAGE C STRICT; +CREATE FUNCTION @extschema@.stop_concurrent_part_task( + relation REGCLASS) +RETURNS BOOL AS 'pg_pathman', 'stop_concurrent_part_task' +LANGUAGE C STRICT; /* * Copy rows to partitions concurrently. */ -CREATE OR REPLACE FUNCTION @extschema@._partition_data_concurrent( - p_relation REGCLASS, +CREATE FUNCTION @extschema@._partition_data_concurrent( + relation REGCLASS, p_min ANYELEMENT DEFAULT NULL::text, p_max ANYELEMENT DEFAULT NULL::text, p_limit INT DEFAULT NULL, OUT p_total BIGINT) -AS -$$ +AS $$ DECLARE - v_attr TEXT; + part_expr TEXT; v_limit_clause TEXT := ''; v_where_clause TEXT := ''; ctids TID[]; + BEGIN - SELECT attname INTO v_attr - FROM @extschema@.pathman_config WHERE partrel = p_relation; + part_expr := @extschema@.get_partition_key(relation); p_total := 0; /* Format LIMIT clause if needed */ IF NOT p_limit IS NULL THEN - v_limit_clause := format('LIMIT %s', p_limit); + v_limit_clause := pg_catalog.format('LIMIT %s', p_limit); END IF; /* Format WHERE clause if needed */ IF NOT p_min IS NULL THEN - v_where_clause := format('%1$s >= $1', v_attr); + v_where_clause := pg_catalog.format('%1$s >= $1', part_expr); END IF; IF NOT p_max IS NULL THEN IF NOT p_min IS NULL THEN v_where_clause := v_where_clause || ' AND '; END IF; - v_where_clause := v_where_clause || format('%1$s < $2', v_attr); + v_where_clause := v_where_clause || pg_catalog.format('%1$s < $2', part_expr); END IF; IF v_where_clause != '' THEN @@ -230,44 +362,36 @@ BEGIN /* Lock rows and copy data */ RAISE NOTICE 'Copying data to partitions...'; - EXECUTE format('SELECT array(SELECT ctid FROM ONLY %1$s %2$s %3$s FOR UPDATE NOWAIT)', - p_relation, v_where_clause, v_limit_clause) + EXECUTE pg_catalog.format('SELECT array(SELECT ctid FROM ONLY %1$s %2$s %3$s FOR UPDATE NOWAIT)', + relation, v_where_clause, v_limit_clause) USING p_min, p_max INTO ctids; - EXECUTE format(' - WITH data AS ( - DELETE FROM ONLY %1$s WHERE ctid = ANY($1) RETURNING *) - INSERT INTO %1$s SELECT * FROM data', - p_relation) + EXECUTE pg_catalog.format('WITH data AS ( + DELETE FROM ONLY %1$s WHERE ctid = ANY($1) RETURNING *) + INSERT INTO %1$s SELECT * FROM data', + relation) USING ctids; /* Get number of inserted rows */ GET DIAGNOSTICS p_total = ROW_COUNT; RETURN; END -$$ -LANGUAGE plpgsql +$$ LANGUAGE plpgsql SET pg_pathman.enable_partitionfilter = on; /* ensures that PartitionFilter is ON */ /* * Old school way to distribute rows to partitions. */ -CREATE OR REPLACE FUNCTION @extschema@.partition_data( +CREATE FUNCTION @extschema@.partition_data( parent_relid REGCLASS, OUT p_total BIGINT) -AS -$$ -DECLARE - relname TEXT; - rec RECORD; - cnt BIGINT := 0; - +AS $$ BEGIN p_total := 0; /* Create partitions and copy rest of the data */ - EXECUTE format('WITH part_data AS (DELETE FROM ONLY %1$s RETURNING *) + EXECUTE pg_catalog.format('WITH part_data AS (DELETE FROM ONLY %1$s RETURNING *) INSERT INTO %1$s SELECT * FROM part_data', parent_relid::TEXT); @@ -275,277 +399,256 @@ BEGIN GET DIAGNOSTICS p_total = ROW_COUNT; RETURN; END -$$ -LANGUAGE plpgsql +$$ LANGUAGE plpgsql STRICT SET pg_pathman.enable_partitionfilter = on; /* ensures that PartitionFilter is ON */ /* * Disable pathman partitioning for specified relation. */ -CREATE OR REPLACE FUNCTION @extschema@.disable_pathman_for( +CREATE FUNCTION @extschema@.disable_pathman_for( parent_relid REGCLASS) -RETURNS VOID AS -$$ +RETURNS VOID AS $$ BEGIN PERFORM @extschema@.validate_relname(parent_relid); + /* Delete rows from both config tables */ DELETE FROM @extschema@.pathman_config WHERE partrel = parent_relid; - PERFORM @extschema@.drop_triggers(parent_relid); - - /* Notify backend about changes */ - PERFORM @extschema@.on_remove_partitions(parent_relid); + DELETE FROM @extschema@.pathman_config_params WHERE partrel = parent_relid; END -$$ -LANGUAGE plpgsql; +$$ LANGUAGE plpgsql STRICT; /* - * Aggregates several common relation checks before partitioning. - * Suitable for every partitioning type. + * Check a few things and take locks before partitioning. */ -CREATE OR REPLACE FUNCTION @extschema@.common_relation_checks( - p_relation REGCLASS, - p_attribute TEXT) -RETURNS BOOLEAN AS -$$ +CREATE FUNCTION @extschema@.prepare_for_partitioning( + parent_relid REGCLASS, + expression TEXT, + partition_data BOOLEAN) +RETURNS VOID AS $$ DECLARE - v_rec RECORD; + constr_name TEXT; is_referenced BOOLEAN; rel_persistence CHAR; BEGIN + PERFORM @extschema@.validate_relname(parent_relid); + PERFORM @extschema@.validate_expression(parent_relid, expression); + + IF partition_data = true THEN + /* Acquire data modification lock */ + PERFORM @extschema@.prevent_data_modification(parent_relid); + ELSE + /* Acquire lock on parent */ + PERFORM @extschema@.prevent_part_modification(parent_relid); + END IF; + /* Ignore temporary tables */ SELECT relpersistence FROM pg_catalog.pg_class - WHERE oid = p_relation INTO rel_persistence; + WHERE oid = parent_relid INTO rel_persistence; IF rel_persistence = 't'::CHAR THEN - RAISE EXCEPTION 'Temporary table "%" cannot be partitioned', - p_relation::TEXT; + RAISE EXCEPTION 'temporary table "%" cannot be partitioned', parent_relid; END IF; IF EXISTS (SELECT * FROM @extschema@.pathman_config - WHERE partrel = p_relation) THEN - RAISE EXCEPTION 'Relation "%" has already been partitioned', p_relation; + WHERE partrel = parent_relid) THEN + RAISE EXCEPTION 'table "%" has already been partitioned', parent_relid; END IF; - IF @extschema@.is_attribute_nullable(p_relation, p_attribute) THEN - RAISE EXCEPTION 'Partitioning key ''%'' must be NOT NULL', p_attribute; + IF EXISTS (SELECT 1 FROM pg_catalog.pg_inherits WHERE inhparent = parent_relid) THEN + RAISE EXCEPTION 'can''t partition table "%" with existing children', parent_relid; END IF; /* Check if there are foreign keys that reference the relation */ - FOR v_rec IN (SELECT * - FROM pg_constraint WHERE confrelid = p_relation::regclass::oid) + FOR constr_name IN (SELECT conname FROM pg_catalog.pg_constraint + WHERE confrelid = parent_relid::REGCLASS::OID) LOOP is_referenced := TRUE; - RAISE WARNING 'Foreign key ''%'' references to the relation ''%''', - v_rec.conname, p_relation; + RAISE WARNING 'foreign key "%" references table "%"', constr_name, parent_relid; END LOOP; IF is_referenced THEN - RAISE EXCEPTION 'Relation "%" is referenced from other relations', p_relation; + RAISE EXCEPTION 'table "%" is referenced from other tables', parent_relid; END IF; - RETURN TRUE; END -$$ -LANGUAGE plpgsql; +$$ LANGUAGE plpgsql; + /* - * Returns relname without quotes or something + * Returns relname without quotes or something. */ -CREATE OR REPLACE FUNCTION @extschema@.get_plain_schema_and_relname( +CREATE FUNCTION @extschema@.get_plain_schema_and_relname( cls REGCLASS, OUT schema TEXT, OUT relname TEXT) -AS -$$ +AS $$ BEGIN SELECT pg_catalog.pg_class.relnamespace::regnamespace, pg_catalog.pg_class.relname FROM pg_catalog.pg_class WHERE oid = cls::oid INTO schema, relname; END -$$ -LANGUAGE plpgsql; - -/* - * Returns schema-qualified name for table - */ -CREATE OR REPLACE FUNCTION @extschema@.get_schema_qualified_name( - cls REGCLASS, - delimiter TEXT DEFAULT '.', - suffix TEXT DEFAULT '') -RETURNS TEXT AS -$$ -BEGIN - RETURN (SELECT quote_ident(relnamespace::regnamespace::text) || - delimiter || - quote_ident(relname || suffix) - FROM pg_catalog.pg_class - WHERE oid = cls::oid); -END -$$ -LANGUAGE plpgsql; - -/* - * Validates relation name. It must be schema qualified - */ -CREATE OR REPLACE FUNCTION @extschema@.validate_relname( - cls REGCLASS) -RETURNS TEXT AS -$$ -DECLARE - relname TEXT; - -BEGIN - relname = @extschema@.get_schema_qualified_name(cls); - - IF relname IS NULL THEN - RAISE EXCEPTION 'Relation %s does not exist', cls; - END IF; - - RETURN relname; -END -$$ -LANGUAGE plpgsql; +$$ LANGUAGE plpgsql STRICT; /* - * Check if two relations have equal structures + * DDL trigger that removes entry from pathman_config table. */ -CREATE OR REPLACE FUNCTION @extschema@.validate_relations_equality( - relation1 OID, relation2 OID) -RETURNS BOOLEAN AS -$$ +CREATE FUNCTION @extschema@.pathman_ddl_trigger_func() +RETURNS event_trigger AS $$ DECLARE - rec RECORD; + obj RECORD; + pg_class_oid OID; + relids REGCLASS[]; -BEGIN - FOR rec IN ( - WITH - a1 AS (select * from pg_catalog.pg_attribute - where attrelid = relation1 and attnum > 0), - a2 AS (select * from pg_catalog.pg_attribute - where attrelid = relation2 and attnum > 0) - SELECT a1.attname name1, a2.attname name2, a1.atttypid type1, a2.atttypid type2 - FROM a1 - FULL JOIN a2 ON a1.attnum = a2.attnum - ) - LOOP - IF rec.name1 IS NULL OR rec.name2 IS NULL OR rec.name1 != rec.name2 THEN - RETURN false; - END IF; - END LOOP; - - RETURN true; -END -$$ -LANGUAGE plpgsql; - -/* - * DDL trigger that deletes entry from pathman_config table - */ -CREATE OR REPLACE FUNCTION @extschema@.pathman_ddl_trigger_func() -RETURNS event_trigger AS -$$ -DECLARE - obj record; - pg_class_oid oid; BEGIN pg_class_oid = 'pg_catalog.pg_class'::regclass; - /* Handle 'DROP TABLE' events */ - WITH to_be_deleted AS ( - SELECT cfg.partrel AS rel FROM pg_event_trigger_dropped_objects() AS events - JOIN @extschema@.pathman_config AS cfg ON cfg.partrel::oid = events.objid - WHERE events.classid = pg_class_oid - ) - DELETE FROM @extschema@.pathman_config - WHERE partrel IN (SELECT rel FROM to_be_deleted); + /* Find relids to remove from config */ + SELECT pg_catalog.array_agg(cfg.partrel) INTO relids + FROM pg_catalog.pg_event_trigger_dropped_objects() AS events + JOIN @extschema@.pathman_config AS cfg ON cfg.partrel::oid = events.objid + WHERE events.classid = pg_class_oid AND events.objsubid = 0; - /* Cleanup params table too */ - WITH to_be_deleted AS ( - SELECT cfg.partrel AS rel FROM pg_event_trigger_dropped_objects() AS events - JOIN @extschema@.pathman_config_params AS cfg ON cfg.partrel::oid = events.objid - WHERE events.classid = pg_class_oid - ) - DELETE FROM @extschema@.pathman_config_params - WHERE partrel IN (SELECT rel FROM to_be_deleted); -END -$$ -LANGUAGE plpgsql; + /* Cleanup pathman_config */ + DELETE FROM @extschema@.pathman_config WHERE partrel = ANY(relids); -/* - * Drop trigger - */ -CREATE OR REPLACE FUNCTION @extschema@.drop_triggers( - parent_relid REGCLASS) -RETURNS VOID AS -$$ -BEGIN - EXECUTE format('DROP FUNCTION IF EXISTS %s() CASCADE', - @extschema@.build_update_trigger_func_name(parent_relid)); + /* Cleanup params table too */ + DELETE FROM @extschema@.pathman_config_params WHERE partrel = ANY(relids); END $$ LANGUAGE plpgsql; /* - * Drop partitions - * If delete_data set to TRUE then partitions will be dropped with all the data + * Drop partitions. If delete_data set to TRUE, partitions + * will be dropped with all the data. */ -CREATE OR REPLACE FUNCTION @extschema@.drop_partitions( +CREATE FUNCTION @extschema@.drop_partitions( parent_relid REGCLASS, delete_data BOOLEAN DEFAULT FALSE) -RETURNS INTEGER AS -$$ +RETURNS INTEGER AS $$ DECLARE - v_rec RECORD; - v_rows INTEGER; - v_part_count INTEGER := 0; - conf_num_del INTEGER; + child REGCLASS; + rows_count BIGINT; + part_count INTEGER := 0; + rel_kind CHAR; BEGIN PERFORM @extschema@.validate_relname(parent_relid); - /* Drop trigger first */ - PERFORM @extschema@.drop_triggers(parent_relid); - - WITH config_num_deleted AS (DELETE FROM @extschema@.pathman_config - WHERE partrel = parent_relid - RETURNING *) - SELECT count(*) from config_num_deleted INTO conf_num_del; - - DELETE FROM @extschema@.pathman_config_params WHERE partrel = parent_relid; + /* Acquire data modification lock */ + PERFORM @extschema@.prevent_data_modification(parent_relid); - IF conf_num_del = 0 THEN - RAISE EXCEPTION 'Relation "%" has no partitions', parent_relid::text; + IF NOT EXISTS (SELECT FROM @extschema@.pathman_config + WHERE partrel = parent_relid) THEN + RAISE EXCEPTION 'table "%" has no partitions', parent_relid::TEXT; END IF; - FOR v_rec IN (SELECT inhrelid::regclass::text AS tbl + /* Also drop naming sequence */ + PERFORM @extschema@.drop_naming_sequence(parent_relid); + + FOR child IN (SELECT inhrelid::REGCLASS FROM pg_catalog.pg_inherits - WHERE inhparent::regclass = parent_relid) + WHERE inhparent::regclass = parent_relid + ORDER BY inhrelid ASC) LOOP IF NOT delete_data THEN - EXECUTE format('WITH part_data AS (DELETE FROM %s RETURNING *) - INSERT INTO %s SELECT * FROM part_data', - v_rec.tbl, - parent_relid::text); - GET DIAGNOSTICS v_rows = ROW_COUNT; + EXECUTE pg_catalog.format('INSERT INTO %s SELECT * FROM %s', + parent_relid::TEXT, + child::TEXT); + GET DIAGNOSTICS rows_count = ROW_COUNT; /* Show number of copied rows */ - RAISE NOTICE '% rows copied from %', v_rows, v_rec.tbl; + RAISE NOTICE '% rows copied from %', rows_count, child; + END IF; + + SELECT relkind FROM pg_catalog.pg_class + WHERE oid = child + INTO rel_kind; + + /* + * Determine the kind of child relation. It can be either a regular + * table (r) or a foreign table (f). Depending on relkind we use + * DROP TABLE or DROP FOREIGN TABLE. + */ + IF rel_kind = 'f' THEN + EXECUTE pg_catalog.format('DROP FOREIGN TABLE %s', child); + ELSE + EXECUTE pg_catalog.format('DROP TABLE %s', child); END IF; - EXECUTE format('DROP TABLE %s', v_rec.tbl); - v_part_count := v_part_count + 1; + part_count := part_count + 1; END LOOP; - /* Notify backend about changes */ - PERFORM @extschema@.on_remove_partitions(parent_relid); + /* Finally delete both config entries */ + DELETE FROM @extschema@.pathman_config WHERE partrel = parent_relid; + DELETE FROM @extschema@.pathman_config_params WHERE partrel = parent_relid; - RETURN v_part_count; + RETURN part_count; END $$ LANGUAGE plpgsql SET pg_pathman.enable_partitionfilter = off; /* ensures that PartitionFilter is OFF */ +/* + * Copy all of parent's foreign keys. + */ +CREATE FUNCTION @extschema@.copy_foreign_keys( + parent_relid REGCLASS, + partition_relid REGCLASS) +RETURNS VOID AS $$ +DECLARE + conid OID; + +BEGIN + PERFORM @extschema@.validate_relname(parent_relid); + PERFORM @extschema@.validate_relname(partition_relid); + + FOR conid IN (SELECT oid FROM pg_catalog.pg_constraint + WHERE conrelid = parent_relid AND contype = 'f') + LOOP + EXECUTE pg_catalog.format('ALTER TABLE %s ADD %s', + partition_relid::TEXT, + pg_catalog.pg_get_constraintdef(conid)); + END LOOP; +END +$$ LANGUAGE plpgsql STRICT; + + +/* + * Set new relname, schema and tablespace + */ +CREATE FUNCTION @extschema@.alter_partition( + relation REGCLASS, + new_name TEXT, + new_schema REGNAMESPACE, + new_tablespace TEXT) +RETURNS VOID AS $$ +DECLARE + orig_name TEXT; + orig_schema OID; + +BEGIN + SELECT relname, relnamespace FROM pg_class + WHERE oid = relation + INTO orig_name, orig_schema; + + /* Alter table name */ + IF new_name != orig_name THEN + EXECUTE pg_catalog.format('ALTER TABLE %s RENAME TO %s', relation, new_name); + END IF; + + /* Alter table schema */ + IF new_schema != orig_schema THEN + EXECUTE pg_catalog.format('ALTER TABLE %s SET SCHEMA %s', relation, new_schema); + END IF; + + /* Move to another tablespace */ + IF NOT new_tablespace IS NULL THEN + EXECUTE pg_catalog.format('ALTER TABLE %s SET TABLESPACE %s', relation, new_tablespace); + END IF; +END +$$ LANGUAGE plpgsql; + /* * Create DDL trigger to call pathman_ddl_trigger_func(). @@ -556,117 +659,204 @@ EXECUTE PROCEDURE @extschema@.pathman_ddl_trigger_func(); /* - * Attach a previously partitioned table + * Get partitioning key. */ -CREATE OR REPLACE FUNCTION @extschema@.add_to_pathman_config( - parent_relid REGCLASS, - attname TEXT, - range_interval TEXT DEFAULT NULL) -RETURNS BOOLEAN AS 'pg_pathman', 'add_to_pathman_config' -LANGUAGE C; - +CREATE FUNCTION @extschema@.get_partition_key( + parent_relid REGCLASS) +RETURNS TEXT AS +$$ + SELECT expr + FROM @extschema@.pathman_config + WHERE partrel = parent_relid; +$$ +LANGUAGE sql STRICT; -CREATE OR REPLACE FUNCTION @extschema@.on_create_partitions( - relid REGCLASS) -RETURNS VOID AS 'pg_pathman', 'on_partitions_created' +/* + * Get partitioning key type. + */ +CREATE FUNCTION @extschema@.get_partition_key_type( + parent_relid REGCLASS) +RETURNS REGTYPE AS 'pg_pathman', 'get_partition_key_type_pl' LANGUAGE C STRICT; -CREATE OR REPLACE FUNCTION @extschema@.on_update_partitions( - relid REGCLASS) -RETURNS VOID AS 'pg_pathman', 'on_partitions_updated' +/* + * Get parsed and analyzed expression. + */ +CREATE FUNCTION @extschema@.get_partition_cooked_key( + parent_relid REGCLASS) +RETURNS TEXT AS 'pg_pathman', 'get_partition_cooked_key_pl' LANGUAGE C STRICT; -CREATE OR REPLACE FUNCTION @extschema@.on_remove_partitions( - relid REGCLASS) -RETURNS VOID AS 'pg_pathman', 'on_partitions_removed' -LANGUAGE C STRICT; +/* + * Get partitioning type. + */ +CREATE FUNCTION @extschema@.get_partition_type( + parent_relid REGCLASS) +RETURNS INT4 AS +$$ + SELECT parttype + FROM @extschema@.pathman_config + WHERE partrel = parent_relid; +$$ +LANGUAGE sql STRICT; +/* + * Get number of partitions managed by pg_pathman. + */ +CREATE FUNCTION @extschema@.get_number_of_partitions( + parent_relid REGCLASS) +RETURNS INT4 AS +$$ + SELECT pg_catalog.count(*)::INT4 + FROM pg_catalog.pg_inherits + WHERE inhparent = parent_relid; +$$ +LANGUAGE sql STRICT; /* * Get parent of pg_pathman's partition. */ -CREATE OR REPLACE FUNCTION @extschema@.get_parent_of_partition(REGCLASS) +CREATE FUNCTION @extschema@.get_parent_of_partition( + partition_relid REGCLASS) RETURNS REGCLASS AS 'pg_pathman', 'get_parent_of_partition_pl' LANGUAGE C STRICT; /* - * Checks if attribute is nullable + * Extract basic type of a domain. */ -CREATE OR REPLACE FUNCTION @extschema@.is_attribute_nullable( - REGCLASS, TEXT) -RETURNS BOOLEAN AS 'pg_pathman', 'is_attribute_nullable' +CREATE FUNCTION @extschema@.get_base_type( + typid REGTYPE) +RETURNS REGTYPE AS 'pg_pathman', 'get_base_type_pl' LANGUAGE C STRICT; /* - * Check if regclass is date or timestamp + * Return tablespace name for specified relation. + */ +CREATE FUNCTION @extschema@.get_tablespace( + relid REGCLASS) +RETURNS TEXT AS 'pg_pathman', 'get_tablespace_pl' +LANGUAGE C STRICT; + + +/* + * Check that relation exists. + */ +CREATE FUNCTION @extschema@.validate_relname( + relid REGCLASS) +RETURNS VOID AS 'pg_pathman', 'validate_relname' +LANGUAGE C; + +/* + * Check that expression is valid + */ +CREATE FUNCTION @extschema@.validate_expression( + relid REGCLASS, + expression TEXT) +RETURNS VOID AS 'pg_pathman', 'validate_expression' +LANGUAGE C; + +/* + * Check if regclass is date or timestamp. */ -CREATE OR REPLACE FUNCTION @extschema@.is_date_type( +CREATE FUNCTION @extschema@.is_date_type( typid REGTYPE) RETURNS BOOLEAN AS 'pg_pathman', 'is_date_type' LANGUAGE C STRICT; /* - * Returns attribute type name for relation + * Check if TYPE supports the specified operator. */ -CREATE OR REPLACE FUNCTION @extschema@.get_attribute_type_name( - REGCLASS, TEXT) -RETURNS TEXT AS 'pg_pathman', 'get_attribute_type_name' +CREATE FUNCTION @extschema@.is_operator_supported( + type_oid REGTYPE, + opname TEXT) +RETURNS BOOLEAN AS 'pg_pathman', 'is_operator_supported' LANGUAGE C STRICT; /* - * Get parent of pg_pathman's partition. + * Check if tuple from first relation can be converted to fit the second one. */ -CREATE OR REPLACE FUNCTION @extschema@.get_parent_of_partition(REGCLASS) -RETURNS REGCLASS AS 'pg_pathman', 'get_parent_of_partition_pl' +CREATE FUNCTION @extschema@.is_tuple_convertible( + relation1 REGCLASS, + relation2 REGCLASS) +RETURNS BOOL AS 'pg_pathman', 'is_tuple_convertible' LANGUAGE C STRICT; + /* - * Build check constraint name for a specified relation's column + * Build check constraint name for a specified relation's column. */ -CREATE OR REPLACE FUNCTION @extschema@.build_check_constraint_name( - REGCLASS, INT2) -RETURNS TEXT AS 'pg_pathman', 'build_check_constraint_name_attnum' +CREATE FUNCTION @extschema@.build_check_constraint_name( + partition_relid REGCLASS) +RETURNS TEXT AS 'pg_pathman', 'build_check_constraint_name' LANGUAGE C STRICT; -CREATE OR REPLACE FUNCTION @extschema@.build_check_constraint_name( - REGCLASS, TEXT) -RETURNS TEXT AS 'pg_pathman', 'build_check_constraint_name_attname' -LANGUAGE C STRICT; +/* + * Add record to pathman_config (RANGE) and validate partitions. + */ +CREATE FUNCTION @extschema@.add_to_pathman_config( + parent_relid REGCLASS, + expression TEXT, + range_interval TEXT) +RETURNS BOOLEAN AS 'pg_pathman', 'add_to_pathman_config' +LANGUAGE C; + +/* + * Add record to pathman_config (HASH) and validate partitions. + */ +CREATE FUNCTION @extschema@.add_to_pathman_config( + parent_relid REGCLASS, + expression TEXT) +RETURNS BOOLEAN AS 'pg_pathman', 'add_to_pathman_config' +LANGUAGE C; + /* - * Build update trigger and its underlying function's names. + * Lock partitioned relation to restrict concurrent + * modification of partitioning scheme. */ -CREATE OR REPLACE FUNCTION @extschema@.build_update_trigger_name( - REGCLASS) -RETURNS TEXT AS 'pg_pathman', 'build_update_trigger_name' +CREATE FUNCTION @extschema@.prevent_part_modification( + parent_relid REGCLASS) +RETURNS VOID AS 'pg_pathman', 'prevent_part_modification' LANGUAGE C STRICT; -CREATE OR REPLACE FUNCTION @extschema@.build_update_trigger_func_name( - REGCLASS) -RETURNS TEXT AS 'pg_pathman', 'build_update_trigger_func_name' +/* + * Lock relation to restrict concurrent modification of data. + */ +CREATE FUNCTION @extschema@.prevent_data_modification( + parent_relid REGCLASS) +RETURNS VOID AS 'pg_pathman', 'prevent_data_modification' LANGUAGE C STRICT; /* - * Lock partitioned relation to restrict concurrent modification of partitioning scheme. + * Invoke init_callback on RANGE partition. */ - CREATE OR REPLACE FUNCTION @extschema@.lock_partitioned_relation( - REGCLASS) - RETURNS VOID AS 'pg_pathman', 'lock_partitioned_relation' - LANGUAGE C STRICT; +CREATE FUNCTION @extschema@.invoke_on_partition_created_callback( + parent_relid REGCLASS, + partition_relid REGCLASS, + init_callback REGPROCEDURE, + start_value ANYELEMENT, + end_value ANYELEMENT) +RETURNS VOID AS 'pg_pathman', 'invoke_on_partition_created_callback' +LANGUAGE C; /* - * Lock relation to restrict concurrent modification of data. + * Invoke init_callback on HASH partition. */ - CREATE OR REPLACE FUNCTION @extschema@.prevent_relation_modification( - REGCLASS) - RETURNS VOID AS 'pg_pathman', 'prevent_relation_modification' - LANGUAGE C STRICT; - +CREATE FUNCTION @extschema@.invoke_on_partition_created_callback( + parent_relid REGCLASS, + partition_relid REGCLASS, + init_callback REGPROCEDURE) +RETURNS VOID AS 'pg_pathman', 'invoke_on_partition_created_callback' +LANGUAGE C; /* * DEBUG: Place this inside some plpgsql fuction and set breakpoint. */ -CREATE OR REPLACE FUNCTION @extschema@.debug_capture() +CREATE FUNCTION @extschema@.debug_capture() RETURNS VOID AS 'pg_pathman', 'debug_capture' LANGUAGE C STRICT; + +CREATE FUNCTION @extschema@.pathman_version() +RETURNS CSTRING AS 'pg_pathman', 'pathman_version' +LANGUAGE C STRICT; diff --git a/mk_dockerfile.sh b/mk_dockerfile.sh new file mode 100755 index 00000000..f15433c4 --- /dev/null +++ b/mk_dockerfile.sh @@ -0,0 +1,16 @@ +if [ -z ${PG_VERSION+x} ]; then + echo PG_VERSION is not set! + exit 1 +fi + +if [ -z ${LEVEL+x} ]; then + LEVEL=scan-build +fi + +echo PG_VERSION=${PG_VERSION} +echo LEVEL=${LEVEL} + +sed \ + -e 's/${PG_VERSION}/'${PG_VERSION}/g \ + -e 's/${LEVEL}/'${LEVEL}/g \ + Dockerfile.tmpl > Dockerfile diff --git a/patches/REL_11_STABLE-pg_pathman-core.diff b/patches/REL_11_STABLE-pg_pathman-core.diff new file mode 100644 index 00000000..b3b08e0a --- /dev/null +++ b/patches/REL_11_STABLE-pg_pathman-core.diff @@ -0,0 +1,53 @@ +diff --git a/src/backend/jit/llvm/llvmjit_deform.c b/src/backend/jit/llvm/llvmjit_deform.c +index 6384ac940d8..8b4f731e7a8 100644 +--- a/src/backend/jit/llvm/llvmjit_deform.c ++++ b/src/backend/jit/llvm/llvmjit_deform.c +@@ -104,6 +104,10 @@ slot_compile_deform(LLVMJitContext *context, TupleDesc desc, int natts) + + int attnum; + ++ /* don't generate code for tuples without user attributes */ ++ if (desc->natts == 0) ++ return NULL; ++ + mod = llvm_mutable_module(context); + + funcname = llvm_expand_funcname(context, "deform"); +diff --git a/src/backend/jit/llvm/llvmjit_expr.c b/src/backend/jit/llvm/llvmjit_expr.c +index 12138e49577..8638ebc4ba1 100644 +--- a/src/backend/jit/llvm/llvmjit_expr.c ++++ b/src/backend/jit/llvm/llvmjit_expr.c +@@ -274,6 +274,7 @@ llvm_compile_expr(ExprState *state) + LLVMValueRef v_slot; + LLVMBasicBlockRef b_fetch; + LLVMValueRef v_nvalid; ++ LLVMValueRef l_jit_deform = NULL; + + b_fetch = l_bb_before_v(opblocks[i + 1], + "op.%d.fetch", i); +@@ -336,17 +337,20 @@ llvm_compile_expr(ExprState *state) + */ + if (desc && (context->base.flags & PGJIT_DEFORM)) + { +- LLVMValueRef params[1]; +- LLVMValueRef l_jit_deform; +- + l_jit_deform = +- slot_compile_deform(context, desc, ++ slot_compile_deform(context, ++ desc, + op->d.fetch.last_var); ++ } ++ ++ if (l_jit_deform) ++ { ++ LLVMValueRef params[1]; ++ + params[0] = v_slot; + + LLVMBuildCall(b, l_jit_deform, + params, lengthof(params), ""); +- + } + else + { diff --git a/patches/REL_14_STABLE-pg_pathman-core.diff b/patches/REL_14_STABLE-pg_pathman-core.diff new file mode 100644 index 00000000..a6ac1afa --- /dev/null +++ b/patches/REL_14_STABLE-pg_pathman-core.diff @@ -0,0 +1,513 @@ +diff --git a/contrib/Makefile b/contrib/Makefile +index f27e458482..ea47c341c1 100644 +--- a/contrib/Makefile ++++ b/contrib/Makefile +@@ -32,6 +32,7 @@ SUBDIRS = \ + passwordcheck \ + pg_buffercache \ + pg_freespacemap \ ++ pg_pathman \ + pg_prewarm \ + pg_stat_statements \ + pg_surgery \ +diff --git a/src/backend/access/transam/xact.c b/src/backend/access/transam/xact.c +index bf551b0395..10d2044ae6 100644 +--- a/src/backend/access/transam/xact.c ++++ b/src/backend/access/transam/xact.c +@@ -76,7 +76,7 @@ int DefaultXactIsoLevel = XACT_READ_COMMITTED; + int XactIsoLevel; + + bool DefaultXactReadOnly = false; +-bool XactReadOnly; ++bool XactReadOnly = false; + + bool DefaultXactDeferrable = false; + bool XactDeferrable; +diff --git a/src/backend/executor/execExprInterp.c b/src/backend/executor/execExprInterp.c +index bdf59a10fc..972453d9a5 100644 +--- a/src/backend/executor/execExprInterp.c ++++ b/src/backend/executor/execExprInterp.c +@@ -1799,6 +1799,16 @@ ExecInterpExpr(ExprState *state, ExprContext *econtext, bool *isnull) + } + + out: ++ ++ /* ++ * pg_pathman: pass 'tts_tableOid' to result tuple to determine from ++ * which partition the tuple was read ++ */ ++ if (resultslot) ++ { ++ resultslot->tts_tableOid = scanslot ? scanslot->tts_tableOid : ++ (innerslot ? innerslot->tts_tableOid : (outerslot ? outerslot->tts_tableOid : InvalidOid)); ++ } + *isnull = state->resnull; + return state->resvalue; + } +diff --git a/src/backend/executor/execMain.c b/src/backend/executor/execMain.c +index b3ce4bae53..8f2bb12542 100644 +--- a/src/backend/executor/execMain.c ++++ b/src/backend/executor/execMain.c +@@ -824,6 +824,13 @@ InitPlan(QueryDesc *queryDesc, int eflags) + + estate->es_plannedstmt = plannedstmt; + ++ /* ++ * Fields "es_result_relation_info", "es_original_tuple" are used for ++ * pg_pathman only: ++ */ ++ estate->es_result_relation_info = NULL; ++ estate->es_original_tuple = NULL; ++ + /* + * Next, build the ExecRowMark array from the PlanRowMark(s), if any. + */ +@@ -2713,6 +2720,13 @@ EvalPlanQualStart(EPQState *epqstate, Plan *planTree) + rcestate->es_junkFilter = parentestate->es_junkFilter; + rcestate->es_output_cid = parentestate->es_output_cid; + ++ /* ++ * Fields "es_result_relation_info", "es_original_tuple" are used for ++ * pg_pathman only: ++ */ ++ rcestate->es_result_relation_info = NULL; ++ rcestate->es_original_tuple = NULL; ++ + /* + * ResultRelInfos needed by subplans are initialized from scratch when the + * subplans themselves are initialized. +diff --git a/src/backend/executor/nodeModifyTable.c b/src/backend/executor/nodeModifyTable.c +index 55c430c9ec..21d9e6304a 100644 +--- a/src/backend/executor/nodeModifyTable.c ++++ b/src/backend/executor/nodeModifyTable.c +@@ -510,7 +510,7 @@ ExecInitInsertProjection(ModifyTableState *mtstate, + * This is also a convenient place to verify that the output of an UPDATE + * matches the target table (ExecBuildUpdateProjection does that). + */ +-static void ++void + ExecInitUpdateProjection(ModifyTableState *mtstate, + ResultRelInfo *resultRelInfo) + { +@@ -2486,6 +2486,7 @@ ExecModifyTable(PlanState *pstate) + ItemPointerData tuple_ctid; + HeapTupleData oldtupdata; + HeapTuple oldtuple; ++ ResultRelInfo *saved_resultRelInfo; + + CHECK_FOR_INTERRUPTS(); + +@@ -2523,12 +2524,23 @@ ExecModifyTable(PlanState *pstate) + resultRelInfo = node->resultRelInfo + node->mt_lastResultIndex; + subplanstate = outerPlanState(node); + ++ saved_resultRelInfo = estate->es_result_relation_info; ++ estate->es_result_relation_info = NULL; ++ + /* + * Fetch rows from subplan, and execute the required table modification + * for each row. + */ + for (;;) + { ++ /* ++ * "es_original_tuple" should contains original modified tuple (new ++ * values of the changed columns plus row identity information such as ++ * CTID) in case tuple planSlot is replaced in pg_pathman to new value ++ * in call "ExecProcNode(subplanstate)". ++ */ ++ estate->es_original_tuple = NULL; ++ + /* + * Reset the per-output-tuple exprcontext. This is needed because + * triggers expect to use that context as workspace. It's a bit ugly +@@ -2562,7 +2574,9 @@ ExecModifyTable(PlanState *pstate) + bool isNull; + Oid resultoid; + +- datum = ExecGetJunkAttribute(planSlot, node->mt_resultOidAttno, ++ datum = ExecGetJunkAttribute(estate->es_original_tuple ? ++ estate->es_original_tuple : planSlot, ++ node->mt_resultOidAttno, + &isNull); + if (isNull) + elog(ERROR, "tableoid is NULL"); +@@ -2581,6 +2595,8 @@ ExecModifyTable(PlanState *pstate) + if (resultRelInfo->ri_usesFdwDirectModify) + { + Assert(resultRelInfo->ri_projectReturning); ++ /* PartitionRouter does not support foreign data wrappers: */ ++ Assert(estate->es_original_tuple == NULL); + + /* + * A scan slot containing the data that was actually inserted, +@@ -2590,6 +2606,7 @@ ExecModifyTable(PlanState *pstate) + */ + slot = ExecProcessReturning(resultRelInfo, NULL, planSlot); + ++ estate->es_result_relation_info = saved_resultRelInfo; + return slot; + } + +@@ -2619,7 +2636,8 @@ ExecModifyTable(PlanState *pstate) + { + /* ri_RowIdAttNo refers to a ctid attribute */ + Assert(AttributeNumberIsValid(resultRelInfo->ri_RowIdAttNo)); +- datum = ExecGetJunkAttribute(slot, ++ datum = ExecGetJunkAttribute(estate->es_original_tuple ++ ? estate->es_original_tuple : slot, + resultRelInfo->ri_RowIdAttNo, + &isNull); + /* shouldn't ever get a null result... */ +@@ -2649,7 +2667,8 @@ ExecModifyTable(PlanState *pstate) + */ + else if (AttributeNumberIsValid(resultRelInfo->ri_RowIdAttNo)) + { +- datum = ExecGetJunkAttribute(slot, ++ datum = ExecGetJunkAttribute(estate->es_original_tuple ++ ? estate->es_original_tuple : slot, + resultRelInfo->ri_RowIdAttNo, + &isNull); + /* shouldn't ever get a null result... */ +@@ -2680,8 +2699,12 @@ ExecModifyTable(PlanState *pstate) + /* Initialize projection info if first time for this table */ + if (unlikely(!resultRelInfo->ri_projectNewInfoValid)) + ExecInitInsertProjection(node, resultRelInfo); +- slot = ExecGetInsertNewTuple(resultRelInfo, planSlot); +- slot = ExecInsert(node, resultRelInfo, slot, planSlot, ++ /* Do nothing in case tuple was modified in pg_pathman: */ ++ if (!estate->es_original_tuple) ++ slot = ExecGetInsertNewTuple(resultRelInfo, planSlot); ++ slot = ExecInsert(node, estate->es_result_relation_info ? ++ estate->es_result_relation_info : resultRelInfo, ++ slot, planSlot, + estate, node->canSetTag); + break; + case CMD_UPDATE: +@@ -2689,6 +2712,13 @@ ExecModifyTable(PlanState *pstate) + if (unlikely(!resultRelInfo->ri_projectNewInfoValid)) + ExecInitUpdateProjection(node, resultRelInfo); + ++ /* ++ * Do not change the indentation for PostgreSQL code to make it ++ * easier to merge new PostgreSQL changes. ++ */ ++ /* Do nothing in case tuple was modified in pg_pathman: */ ++ if (!estate->es_original_tuple) ++ { + /* + * Make the new tuple by combining plan's output tuple with + * the old tuple being updated. +@@ -2712,14 +2742,19 @@ ExecModifyTable(PlanState *pstate) + } + slot = ExecGetUpdateNewTuple(resultRelInfo, planSlot, + oldSlot); ++ } + + /* Now apply the update. */ +- slot = ExecUpdate(node, resultRelInfo, tupleid, oldtuple, slot, ++ slot = ExecUpdate(node, estate->es_result_relation_info ? ++ estate->es_result_relation_info : resultRelInfo, ++ tupleid, oldtuple, slot, + planSlot, &node->mt_epqstate, estate, + node->canSetTag); + break; + case CMD_DELETE: +- slot = ExecDelete(node, resultRelInfo, tupleid, oldtuple, ++ slot = ExecDelete(node, estate->es_result_relation_info ? ++ estate->es_result_relation_info : resultRelInfo, ++ tupleid, oldtuple, + planSlot, &node->mt_epqstate, estate, + true, /* processReturning */ + node->canSetTag, +@@ -2736,7 +2771,10 @@ ExecModifyTable(PlanState *pstate) + * the work on next call. + */ + if (slot) ++ { ++ estate->es_result_relation_info = saved_resultRelInfo; + return slot; ++ } + } + + /* +@@ -2752,6 +2790,7 @@ ExecModifyTable(PlanState *pstate) + + node->mt_done = true; + ++ estate->es_result_relation_info = saved_resultRelInfo; + return NULL; + } + +@@ -2826,6 +2865,7 @@ ExecInitModifyTable(ModifyTable *node, EState *estate, int eflags) + ListCell *l; + int i; + Relation rel; ++ ResultRelInfo *saved_resultRelInfo; + + /* check for unsupported flags */ + Assert(!(eflags & (EXEC_FLAG_BACKWARD | EXEC_FLAG_MARK))); +@@ -2922,6 +2962,13 @@ ExecInitModifyTable(ModifyTable *node, EState *estate, int eflags) + i++; + } + ++ /* ++ * pg_pathman: set "estate->es_result_relation_info" value for take it in ++ * functions partition_filter_begin(), partition_router_begin() ++ */ ++ saved_resultRelInfo = estate->es_result_relation_info; ++ estate->es_result_relation_info = mtstate->resultRelInfo; ++ + /* + * Now we may initialize the subplan. + */ +@@ -3002,6 +3049,8 @@ ExecInitModifyTable(ModifyTable *node, EState *estate, int eflags) + ExecInitStoredGenerated(resultRelInfo, estate, operation); + } + ++ estate->es_result_relation_info = saved_resultRelInfo; ++ + /* + * If this is an inherited update/delete, there will be a junk attribute + * named "tableoid" present in the subplan's targetlist. It will be used +diff --git a/src/backend/utils/init/globals.c b/src/backend/utils/init/globals.c +index 381d9e548d..0a4657d291 100644 +--- a/src/backend/utils/init/globals.c ++++ b/src/backend/utils/init/globals.c +@@ -25,7 +25,7 @@ + #include "storage/backendid.h" + + +-ProtocolVersion FrontendProtocol; ++ProtocolVersion FrontendProtocol = (ProtocolVersion) 0; + + volatile sig_atomic_t InterruptPending = false; + volatile sig_atomic_t QueryCancelPending = false; +diff --git a/src/include/access/xact.h b/src/include/access/xact.h +index 5af78bd0dc..0c13bc9d83 100644 +--- a/src/include/access/xact.h ++++ b/src/include/access/xact.h +@@ -53,7 +53,9 @@ extern PGDLLIMPORT int XactIsoLevel; + + /* Xact read-only state */ + extern bool DefaultXactReadOnly; +-extern bool XactReadOnly; ++ ++#define PGPRO_PATHMAN_AWARE_COPY ++extern PGDLLIMPORT bool XactReadOnly; + + /* flag for logging statements in this transaction */ + extern bool xact_is_sampled; +diff --git a/src/include/catalog/objectaddress.h b/src/include/catalog/objectaddress.h +index 2b4e104bb9..80d1274efe 100644 +--- a/src/include/catalog/objectaddress.h ++++ b/src/include/catalog/objectaddress.h +@@ -28,7 +28,7 @@ typedef struct ObjectAddress + int32 objectSubId; /* Subitem within object (eg column), or 0 */ + } ObjectAddress; + +-extern const ObjectAddress InvalidObjectAddress; ++extern PGDLLIMPORT const ObjectAddress InvalidObjectAddress; + + #define ObjectAddressSubSet(addr, class_id, object_id, object_sub_id) \ + do { \ +diff --git a/src/include/executor/executor.h b/src/include/executor/executor.h +index 3dc03c913e..1002d97499 100644 +--- a/src/include/executor/executor.h ++++ b/src/include/executor/executor.h +@@ -657,5 +657,7 @@ extern ResultRelInfo *ExecLookupResultRelByOid(ModifyTableState *node, + Oid resultoid, + bool missing_ok, + bool update_cache); ++extern void ExecInitUpdateProjection(ModifyTableState *mtstate, ++ ResultRelInfo *resultRelInfo); + + #endif /* EXECUTOR_H */ +diff --git a/src/include/libpq/libpq-be.h b/src/include/libpq/libpq-be.h +index 4acb1cda6e..fd8d38347d 100644 +--- a/src/include/libpq/libpq-be.h ++++ b/src/include/libpq/libpq-be.h +@@ -327,7 +327,7 @@ extern ssize_t be_gssapi_read(Port *port, void *ptr, size_t len); + extern ssize_t be_gssapi_write(Port *port, void *ptr, size_t len); + #endif /* ENABLE_GSS */ + +-extern ProtocolVersion FrontendProtocol; ++extern PGDLLIMPORT ProtocolVersion FrontendProtocol; + + /* TCP keepalives configuration. These are no-ops on an AF_UNIX socket. */ + +diff --git a/src/include/nodes/execnodes.h b/src/include/nodes/execnodes.h +index ee5ad3c058..dc474819d7 100644 +--- a/src/include/nodes/execnodes.h ++++ b/src/include/nodes/execnodes.h +@@ -592,6 +592,12 @@ typedef struct EState + * es_result_relations in no + * specific order */ + ++ /* These fields was added for compatibility pg_pathman with 14: */ ++ ResultRelInfo *es_result_relation_info; /* currently active array elt */ ++ TupleTableSlot *es_original_tuple; /* original modified tuple (new values ++ * of the changed columns plus row ++ * identity information such as CTID) */ ++ + PartitionDirectory es_partition_directory; /* for PartitionDesc lookup */ + + /* +diff --git a/src/include/utils/snapmgr.h b/src/include/utils/snapmgr.h +index 33e6c14e81..abd9bba23e 100644 +--- a/src/include/utils/snapmgr.h ++++ b/src/include/utils/snapmgr.h +@@ -53,7 +53,7 @@ extern TimestampTz GetSnapshotCurrentTimestamp(void); + extern TimestampTz GetOldSnapshotThresholdTimestamp(void); + extern void SnapshotTooOldMagicForTest(void); + +-extern bool FirstSnapshotSet; ++extern PGDLLIMPORT bool FirstSnapshotSet; + + extern PGDLLIMPORT TransactionId TransactionXmin; + extern PGDLLIMPORT TransactionId RecentXmin; +diff --git a/src/tools/msvc/Install.pm b/src/tools/msvc/Install.pm +index de22c9ba2c..c8be5323b8 100644 +--- a/src/tools/msvc/Install.pm ++++ b/src/tools/msvc/Install.pm +@@ -30,6 +30,18 @@ my @client_program_files = ( + 'pg_receivewal', 'pg_recvlogical', 'pg_restore', 'psql', + 'reindexdb', 'vacuumdb', @client_contribs); + ++sub SubstituteMakefileVariables { ++ local $_ = shift; # Line to substitue ++ my $mf = shift; # Makefile text ++ while (/\$\((\w+)\)/) { ++ my $varname = $1; ++ if ($mf =~ /^$varname\s*=\s*(.*)$/mg) { ++ my $varvalue=$1; ++ s/\$\($varname\)/$varvalue/g; ++ } ++ } ++ return $_; ++} + sub lcopy + { + my $src = shift; +@@ -608,7 +620,7 @@ sub ParseAndCleanRule + substr($flist, 0, index($flist, '$(addsuffix ')) + . substr($flist, $i + 1); + } +- return $flist; ++ return SubstituteMakefileVariables($flist, $mf); + } + + sub CopyIncludeFiles +diff --git a/src/tools/msvc/Mkvcbuild.pm b/src/tools/msvc/Mkvcbuild.pm +index 9b6539fb15..f8a67c6701 100644 +--- a/src/tools/msvc/Mkvcbuild.pm ++++ b/src/tools/msvc/Mkvcbuild.pm +@@ -41,7 +41,10 @@ my @contrib_uselibpq = + my @contrib_uselibpgport = ('libpq_pipeline', 'oid2name', 'vacuumlo'); + my @contrib_uselibpgcommon = ('libpq_pipeline', 'oid2name', 'vacuumlo'); + my $contrib_extralibs = { 'libpq_pipeline' => ['ws2_32.lib'] }; +-my $contrib_extraincludes = { 'dblink' => ['src/backend'] }; ++my $contrib_extraincludes = { ++ 'dblink' => ['src/backend'], ++ 'pg_pathman' => ['contrib/pg_pathman/src/include'] ++}; + my $contrib_extrasource = { + 'cube' => [ 'contrib/cube/cubescan.l', 'contrib/cube/cubeparse.y' ], + 'seg' => [ 'contrib/seg/segscan.l', 'contrib/seg/segparse.y' ], +@@ -973,6 +976,7 @@ sub AddContrib + my $dn = $1; + my $proj = $solution->AddProject($dn, 'dll', 'contrib', "$subdir/$n"); + $proj->AddReference($postgres); ++ $proj->RemoveFile("$subdir/$n/src/declarative.c") if $n eq 'pg_pathman'; + AdjustContribProj($proj); + } + elsif ($mf =~ /^MODULES\s*=\s*(.*)$/mg) +@@ -1002,6 +1006,19 @@ sub AddContrib + return; + } + ++sub SubstituteMakefileVariables { ++ local $_ = shift; # Line to substitue ++ my $mf = shift; # Makefile text ++ while (/\$\((\w+)\)/) { ++ my $varname = $1; ++ if ($mf =~ /^$varname\s*=\s*(.*)$/mg) { ++ my $varvalue=$1; ++ s/\$\($varname\)/$varvalue/g; ++ } ++ } ++ return $_; ++} ++ + sub GenerateContribSqlFiles + { + my $n = shift; +@@ -1026,23 +1043,53 @@ sub GenerateContribSqlFiles + substr($l, 0, index($l, '$(addsuffix ')) . substr($l, $i + 1); + } + ++ $l = SubstituteMakefileVariables($l,$mf); + foreach my $d (split /\s+/, $l) + { +- my $in = "$d.in"; +- my $out = "$d"; +- +- if (Solution::IsNewer("contrib/$n/$out", "contrib/$n/$in")) +- { +- print "Building $out from $in (contrib/$n)...\n"; +- my $cont = Project::read_file("contrib/$n/$in"); +- my $dn = $out; +- $dn =~ s/\.sql$//; +- $cont =~ s/MODULE_PATHNAME/\$libdir\/$dn/g; +- my $o; +- open($o, '>', "contrib/$n/$out") +- || croak "Could not write to contrib/$n/$d"; +- print $o $cont; +- close($o); ++ if ( -f "contrib/$n/$d.in" ) { ++ my $in = "$d.in"; ++ my $out = "$d"; ++ if (Solution::IsNewer("contrib/$n/$out", "contrib/$n/$in")) ++ { ++ print "Building $out from $in (contrib/$n)...\n"; ++ my $cont = Project::read_file("contrib/$n/$in"); ++ my $dn = $out; ++ $dn =~ s/\.sql$//; ++ $cont =~ s/MODULE_PATHNAME/\$libdir\/$dn/g; ++ my $o; ++ open($o, '>', "contrib/$n/$out") ++ || croak "Could not write to contrib/$n/$d"; ++ print $o $cont; ++ close($o); ++ } ++ } else { ++ # Search for makefile rule. ++ # For now we do not process rule command and assume ++ # that we should just concatenate all prerequisites ++ # ++ my @prereq = (); ++ my $target; ++ my @rules = $mf =~ /^(\S+)\s*:\s*([^=].*)$/mg; ++ RULE: ++ while (@rules) { ++ $target = SubstituteMakefileVariables(shift @rules,$mf); ++ @prereq = split(/\s+/,SubstituteMakefileVariables(shift @rules,$mf)); ++ last RULE if ($target eq $d); ++ @prereq = (); ++ } ++ croak "Don't know how to build contrib/$n/$d" unless @prereq; ++ if (grep(Solution::IsNewer("contrib/$n/$d","contrib/$n/$_"), ++ @prereq)) { ++ print STDERR "building $d from @prereq by concatentation\n"; ++ my $o; ++ open $o, ">contrib/$n/$d" ++ or croak("Couldn't write to contrib/$n/$d:$!"); ++ for my $in (@prereq) { ++ my $data = Project::read_file("contrib/$n/$in"); ++ print $o $data; ++ } ++ close $o; ++ } + } + } + } diff --git a/patches/REL_15_STABLE-pg_pathman-core.diff b/patches/REL_15_STABLE-pg_pathman-core.diff new file mode 100644 index 00000000..b8db29fd --- /dev/null +++ b/patches/REL_15_STABLE-pg_pathman-core.diff @@ -0,0 +1,487 @@ +diff --git a/contrib/Makefile b/contrib/Makefile +index bbf220407b..9a82a2db04 100644 +--- a/contrib/Makefile ++++ b/contrib/Makefile +@@ -34,6 +34,7 @@ SUBDIRS = \ + passwordcheck \ + pg_buffercache \ + pg_freespacemap \ ++ pg_pathman \ + pg_prewarm \ + pg_stat_statements \ + pg_surgery \ +diff --git a/src/backend/access/transam/xact.c b/src/backend/access/transam/xact.c +index 7a3d9b4b01..0c3d2dec6c 100644 +--- a/src/backend/access/transam/xact.c ++++ b/src/backend/access/transam/xact.c +@@ -78,7 +78,7 @@ int DefaultXactIsoLevel = XACT_READ_COMMITTED; + int XactIsoLevel; + + bool DefaultXactReadOnly = false; +-bool XactReadOnly; ++bool XactReadOnly = false; + + bool DefaultXactDeferrable = false; + bool XactDeferrable; +diff --git a/src/backend/executor/execExprInterp.c b/src/backend/executor/execExprInterp.c +index 87c7603f2b..9cc0bc0da8 100644 +--- a/src/backend/executor/execExprInterp.c ++++ b/src/backend/executor/execExprInterp.c +@@ -1801,6 +1801,16 @@ ExecInterpExpr(ExprState *state, ExprContext *econtext, bool *isnull) + } + + out: ++ ++ /* ++ * pg_pathman: pass 'tts_tableOid' to result tuple to determine from ++ * which partition the tuple was read ++ */ ++ if (resultslot) ++ { ++ resultslot->tts_tableOid = scanslot ? scanslot->tts_tableOid : ++ (innerslot ? innerslot->tts_tableOid : (outerslot ? outerslot->tts_tableOid : InvalidOid)); ++ } + *isnull = state->resnull; + return state->resvalue; + } +diff --git a/src/backend/executor/execMain.c b/src/backend/executor/execMain.c +index 0ba61fd547..29d93998b2 100644 +--- a/src/backend/executor/execMain.c ++++ b/src/backend/executor/execMain.c +@@ -826,6 +826,13 @@ InitPlan(QueryDesc *queryDesc, int eflags) + + estate->es_plannedstmt = plannedstmt; + ++ /* ++ * Fields "es_result_relation_info", "es_original_tuple" are used for ++ * pg_pathman only: ++ */ ++ estate->es_result_relation_info = NULL; ++ estate->es_original_tuple = NULL; ++ + /* + * Next, build the ExecRowMark array from the PlanRowMark(s), if any. + */ +@@ -2849,6 +2856,13 @@ EvalPlanQualStart(EPQState *epqstate, Plan *planTree) + rcestate->es_junkFilter = parentestate->es_junkFilter; + rcestate->es_output_cid = parentestate->es_output_cid; + ++ /* ++ * Fields "es_result_relation_info", "es_original_tuple" are used for ++ * pg_pathman only: ++ */ ++ rcestate->es_result_relation_info = NULL; ++ rcestate->es_original_tuple = NULL; ++ + /* + * ResultRelInfos needed by subplans are initialized from scratch when the + * subplans themselves are initialized. +diff --git a/src/backend/executor/nodeModifyTable.c b/src/backend/executor/nodeModifyTable.c +index 1ad5dcb406..047508e0da 100644 +--- a/src/backend/executor/nodeModifyTable.c ++++ b/src/backend/executor/nodeModifyTable.c +@@ -641,6 +641,13 @@ ExecInitUpdateProjection(ModifyTableState *mtstate, + resultRelInfo->ri_projectNewInfoValid = true; + } + ++void ++PgproExecInitUpdateProjection(ModifyTableState *mtstate, ++ ResultRelInfo *resultRelInfo) ++{ ++ ExecInitUpdateProjection(mtstate, resultRelInfo); ++} ++ + /* + * ExecGetInsertNewTuple + * This prepares a "new" tuple ready to be inserted into given result +@@ -3581,6 +3588,7 @@ ExecModifyTable(PlanState *pstate) + HeapTupleData oldtupdata; + HeapTuple oldtuple; + ItemPointer tupleid; ++ ResultRelInfo *saved_resultRelInfo; + + CHECK_FOR_INTERRUPTS(); + +@@ -3622,6 +3630,8 @@ ExecModifyTable(PlanState *pstate) + context.mtstate = node; + context.epqstate = &node->mt_epqstate; + context.estate = estate; ++ saved_resultRelInfo = estate->es_result_relation_info; ++ estate->es_result_relation_info = NULL; + + /* + * Fetch rows from subplan, and execute the required table modification +@@ -3629,6 +3639,14 @@ ExecModifyTable(PlanState *pstate) + */ + for (;;) + { ++ /* ++ * "es_original_tuple" should contain original modified tuple (new ++ * values of the changed columns plus row identity information such as ++ * CTID) in case tuple planSlot is replaced in pg_pathman to new value ++ * in call "ExecProcNode(subplanstate)". ++ */ ++ estate->es_original_tuple = NULL; ++ + /* + * Reset the per-output-tuple exprcontext. This is needed because + * triggers expect to use that context as workspace. It's a bit ugly +@@ -3662,7 +3680,9 @@ ExecModifyTable(PlanState *pstate) + bool isNull; + Oid resultoid; + +- datum = ExecGetJunkAttribute(context.planSlot, node->mt_resultOidAttno, ++ datum = ExecGetJunkAttribute(estate->es_original_tuple ? ++ estate->es_original_tuple : context.planSlot, ++ node->mt_resultOidAttno, + &isNull); + if (isNull) + { +@@ -3699,6 +3719,8 @@ ExecModifyTable(PlanState *pstate) + if (resultRelInfo->ri_usesFdwDirectModify) + { + Assert(resultRelInfo->ri_projectReturning); ++ /* PartitionRouter does not support foreign data wrappers: */ ++ Assert(estate->es_original_tuple == NULL); + + /* + * A scan slot containing the data that was actually inserted, +@@ -3708,6 +3730,7 @@ ExecModifyTable(PlanState *pstate) + */ + slot = ExecProcessReturning(resultRelInfo, NULL, context.planSlot); + ++ estate->es_result_relation_info = saved_resultRelInfo; + return slot; + } + +@@ -3738,7 +3761,8 @@ ExecModifyTable(PlanState *pstate) + { + /* ri_RowIdAttNo refers to a ctid attribute */ + Assert(AttributeNumberIsValid(resultRelInfo->ri_RowIdAttNo)); +- datum = ExecGetJunkAttribute(slot, ++ datum = ExecGetJunkAttribute(estate->es_original_tuple ++ ? estate->es_original_tuple : slot, + resultRelInfo->ri_RowIdAttNo, + &isNull); + +@@ -3786,7 +3810,8 @@ ExecModifyTable(PlanState *pstate) + */ + else if (AttributeNumberIsValid(resultRelInfo->ri_RowIdAttNo)) + { +- datum = ExecGetJunkAttribute(slot, ++ datum = ExecGetJunkAttribute(estate->es_original_tuple ++ ? estate->es_original_tuple : slot, + resultRelInfo->ri_RowIdAttNo, + &isNull); + /* shouldn't ever get a null result... */ +@@ -3817,9 +3842,12 @@ ExecModifyTable(PlanState *pstate) + /* Initialize projection info if first time for this table */ + if (unlikely(!resultRelInfo->ri_projectNewInfoValid)) + ExecInitInsertProjection(node, resultRelInfo); +- slot = ExecGetInsertNewTuple(resultRelInfo, context.planSlot); +- slot = ExecInsert(&context, resultRelInfo, slot, +- node->canSetTag, NULL, NULL); ++ /* Do nothing in case tuple was modified in pg_pathman: */ ++ if (!estate->es_original_tuple) ++ slot = ExecGetInsertNewTuple(resultRelInfo, context.planSlot); ++ slot = ExecInsert(&context, estate->es_result_relation_info ? ++ estate->es_result_relation_info : resultRelInfo, ++ slot, node->canSetTag, NULL, NULL); + break; + + case CMD_UPDATE: +@@ -3827,6 +3855,13 @@ ExecModifyTable(PlanState *pstate) + if (unlikely(!resultRelInfo->ri_projectNewInfoValid)) + ExecInitUpdateProjection(node, resultRelInfo); + ++ /* ++ * Do not change the indentation for PostgreSQL code to make it ++ * easier to merge new PostgreSQL changes. ++ */ ++ /* Do nothing in case tuple was modified in pg_pathman: */ ++ if (!estate->es_original_tuple) ++ { + /* + * Make the new tuple by combining plan's output tuple with + * the old tuple being updated. +@@ -3850,14 +3885,19 @@ ExecModifyTable(PlanState *pstate) + slot = ExecGetUpdateNewTuple(resultRelInfo, context.planSlot, + oldSlot); + context.relaction = NULL; ++ } + + /* Now apply the update. */ +- slot = ExecUpdate(&context, resultRelInfo, tupleid, oldtuple, ++ slot = ExecUpdate(&context, estate->es_result_relation_info ? ++ estate->es_result_relation_info : resultRelInfo, ++ tupleid, oldtuple, + slot, node->canSetTag); + break; + + case CMD_DELETE: +- slot = ExecDelete(&context, resultRelInfo, tupleid, oldtuple, ++ slot = ExecDelete(&context, estate->es_result_relation_info ? ++ estate->es_result_relation_info : resultRelInfo, ++ tupleid, oldtuple, + true, false, node->canSetTag, NULL, NULL, NULL); + break; + +@@ -3875,7 +3915,10 @@ ExecModifyTable(PlanState *pstate) + * the work on next call. + */ + if (slot) ++ { ++ estate->es_result_relation_info = saved_resultRelInfo; + return slot; ++ } + } + + /* +@@ -3891,6 +3934,7 @@ ExecModifyTable(PlanState *pstate) + + node->mt_done = true; + ++ estate->es_result_relation_info = saved_resultRelInfo; + return NULL; + } + +@@ -3965,6 +4009,7 @@ ExecInitModifyTable(ModifyTable *node, EState *estate, int eflags) + ListCell *l; + int i; + Relation rel; ++ ResultRelInfo *saved_resultRelInfo; + + /* check for unsupported flags */ + Assert(!(eflags & (EXEC_FLAG_BACKWARD | EXEC_FLAG_MARK))); +@@ -4067,6 +4112,13 @@ ExecInitModifyTable(ModifyTable *node, EState *estate, int eflags) + i++; + } + ++ /* ++ * pg_pathman: set "estate->es_result_relation_info" value for take it in ++ * functions partition_filter_begin(), partition_router_begin() ++ */ ++ saved_resultRelInfo = estate->es_result_relation_info; ++ estate->es_result_relation_info = mtstate->resultRelInfo; ++ + /* + * Now we may initialize the subplan. + */ +@@ -4161,6 +4213,8 @@ ExecInitModifyTable(ModifyTable *node, EState *estate, int eflags) + ExecInitStoredGenerated(resultRelInfo, estate, operation); + } + ++ estate->es_result_relation_info = saved_resultRelInfo; ++ + /* + * If this is an inherited update/delete/merge, there will be a junk + * attribute named "tableoid" present in the subplan's targetlist. It +diff --git a/src/backend/utils/init/globals.c b/src/backend/utils/init/globals.c +index 1a5d29ac9b..aadca8ea47 100644 +--- a/src/backend/utils/init/globals.c ++++ b/src/backend/utils/init/globals.c +@@ -25,7 +25,7 @@ + #include "storage/backendid.h" + + +-ProtocolVersion FrontendProtocol; ++ProtocolVersion FrontendProtocol = (ProtocolVersion) 0; + + volatile sig_atomic_t InterruptPending = false; + volatile sig_atomic_t QueryCancelPending = false; +diff --git a/src/include/access/xact.h b/src/include/access/xact.h +index 8d46a781bb..150d70cb64 100644 +--- a/src/include/access/xact.h ++++ b/src/include/access/xact.h +@@ -53,6 +53,8 @@ extern PGDLLIMPORT int XactIsoLevel; + + /* Xact read-only state */ + extern PGDLLIMPORT bool DefaultXactReadOnly; ++ ++#define PGPRO_PATHMAN_AWARE_COPY + extern PGDLLIMPORT bool XactReadOnly; + + /* flag for logging statements in this transaction */ +diff --git a/src/include/executor/executor.h b/src/include/executor/executor.h +index 7cd9b2f2bf..b31a7934a4 100644 +--- a/src/include/executor/executor.h ++++ b/src/include/executor/executor.h +@@ -662,5 +662,17 @@ extern ResultRelInfo *ExecLookupResultRelByOid(ModifyTableState *node, + Oid resultoid, + bool missing_ok, + bool update_cache); ++#define PG_HAVE_PGPRO_EXEC_INIT_UPDATE_PROJECTION ++/* ++ * This function is static in vanilla, but pg_pathman wants it exported. ++ * We cannot make it extern with the same name to avoid compilation errors ++ * in timescaledb, which ships it's own static copy of the same function. ++ * So, export ExecInitUpdateProjection with Pgpro prefix. ++ * ++ * The define above helps pg_pathman to expect proper exported symbol ++ * from various versions of pgpro. ++ */ ++extern void PgproExecInitUpdateProjection(ModifyTableState *mtstate, ++ ResultRelInfo *resultRelInfo); + + #endif /* EXECUTOR_H */ +diff --git a/src/include/nodes/execnodes.h b/src/include/nodes/execnodes.h +index 9f176b0e37..a65799dcce 100644 +--- a/src/include/nodes/execnodes.h ++++ b/src/include/nodes/execnodes.h +@@ -624,6 +624,12 @@ typedef struct EState + * es_result_relations in no + * specific order */ + ++ /* These fields was added for compatibility pg_pathman with 14: */ ++ ResultRelInfo *es_result_relation_info; /* currently active array elt */ ++ TupleTableSlot *es_original_tuple; /* original modified tuple (new values ++ * of the changed columns plus row ++ * identity information such as CTID) */ ++ + PartitionDirectory es_partition_directory; /* for PartitionDesc lookup */ + + /* +diff --git a/src/tools/msvc/Install.pm b/src/tools/msvc/Install.pm +index 8de79c618c..c9226ba5ad 100644 +--- a/src/tools/msvc/Install.pm ++++ b/src/tools/msvc/Install.pm +@@ -30,6 +30,18 @@ my @client_program_files = ( + 'pg_receivewal', 'pg_recvlogical', 'pg_restore', 'psql', + 'reindexdb', 'vacuumdb', @client_contribs); + ++sub SubstituteMakefileVariables { ++ local $_ = shift; # Line to substitue ++ my $mf = shift; # Makefile text ++ while (/\$\((\w+)\)/) { ++ my $varname = $1; ++ if ($mf =~ /^$varname\s*=\s*(.*)$/mg) { ++ my $varvalue=$1; ++ s/\$\($varname\)/$varvalue/g; ++ } ++ } ++ return $_; ++} + sub lcopy + { + my $src = shift; +@@ -609,7 +621,7 @@ sub ParseAndCleanRule + substr($flist, 0, index($flist, '$(addsuffix ')) + . substr($flist, $i + 1); + } +- return $flist; ++ return SubstituteMakefileVariables($flist, $mf); + } + + sub CopyIncludeFiles +diff --git a/src/tools/msvc/Mkvcbuild.pm b/src/tools/msvc/Mkvcbuild.pm +index 990c223a9b..cd5048f8d5 100644 +--- a/src/tools/msvc/Mkvcbuild.pm ++++ b/src/tools/msvc/Mkvcbuild.pm +@@ -39,8 +39,8 @@ my $contrib_defines = {}; + my @contrib_uselibpq = (); + my @contrib_uselibpgport = (); + my @contrib_uselibpgcommon = (); +-my $contrib_extralibs = { 'libpq_pipeline' => ['ws2_32.lib'] }; +-my $contrib_extraincludes = {}; ++my $contrib_extralibs = { 'libpq_pipeline' => ['ws2_32.lib'] }; ++my $contrib_extraincludes = { 'pg_pathman' => ['contrib/pg_pathman/src/include'] }; + my $contrib_extrasource = {}; + my @contrib_excludes = ( + 'bool_plperl', 'commit_ts', +@@ -967,6 +967,7 @@ sub AddContrib + my $dn = $1; + my $proj = $solution->AddProject($dn, 'dll', 'contrib', "$subdir/$n"); + $proj->AddReference($postgres); ++ $proj->RemoveFile("$subdir/$n/src/declarative.c") if $n eq 'pg_pathman'; + AdjustContribProj($proj); + push @projects, $proj; + } +@@ -1070,6 +1071,19 @@ sub AddContrib + return; + } + ++sub SubstituteMakefileVariables { ++ local $_ = shift; # Line to substitue ++ my $mf = shift; # Makefile text ++ while (/\$\((\w+)\)/) { ++ my $varname = $1; ++ if ($mf =~ /^$varname\s*=\s*(.*)$/mg) { ++ my $varvalue=$1; ++ s/\$\($varname\)/$varvalue/g; ++ } ++ } ++ return $_; ++} ++ + sub GenerateContribSqlFiles + { + my $n = shift; +@@ -1094,23 +1108,53 @@ sub GenerateContribSqlFiles + substr($l, 0, index($l, '$(addsuffix ')) . substr($l, $i + 1); + } + ++ $l = SubstituteMakefileVariables($l,$mf); + foreach my $d (split /\s+/, $l) + { +- my $in = "$d.in"; +- my $out = "$d"; +- +- if (Solution::IsNewer("contrib/$n/$out", "contrib/$n/$in")) +- { +- print "Building $out from $in (contrib/$n)...\n"; +- my $cont = Project::read_file("contrib/$n/$in"); +- my $dn = $out; +- $dn =~ s/\.sql$//; +- $cont =~ s/MODULE_PATHNAME/\$libdir\/$dn/g; +- my $o; +- open($o, '>', "contrib/$n/$out") +- || croak "Could not write to contrib/$n/$d"; +- print $o $cont; +- close($o); ++ if ( -f "contrib/$n/$d.in" ) { ++ my $in = "$d.in"; ++ my $out = "$d"; ++ if (Solution::IsNewer("contrib/$n/$out", "contrib/$n/$in")) ++ { ++ print "Building $out from $in (contrib/$n)...\n"; ++ my $cont = Project::read_file("contrib/$n/$in"); ++ my $dn = $out; ++ $dn =~ s/\.sql$//; ++ $cont =~ s/MODULE_PATHNAME/\$libdir\/$dn/g; ++ my $o; ++ open($o, '>', "contrib/$n/$out") ++ || croak "Could not write to contrib/$n/$d"; ++ print $o $cont; ++ close($o); ++ } ++ } else { ++ # Search for makefile rule. ++ # For now we do not process rule command and assume ++ # that we should just concatenate all prerequisites ++ # ++ my @prereq = (); ++ my $target; ++ my @rules = $mf =~ /^(\S+)\s*:\s*([^=].*)$/mg; ++ RULE: ++ while (@rules) { ++ $target = SubstituteMakefileVariables(shift @rules,$mf); ++ @prereq = split(/\s+/,SubstituteMakefileVariables(shift @rules,$mf)); ++ last RULE if ($target eq $d); ++ @prereq = (); ++ } ++ croak "Don't know how to build contrib/$n/$d" unless @prereq; ++ if (grep(Solution::IsNewer("contrib/$n/$d","contrib/$n/$_"), ++ @prereq)) { ++ print STDERR "building $d from @prereq by concatentation\n"; ++ my $o; ++ open $o, ">contrib/$n/$d" ++ or croak("Couldn't write to contrib/$n/$d:$!"); ++ for my $in (@prereq) { ++ my $data = Project::read_file("contrib/$n/$in"); ++ print $o $data; ++ } ++ close $o; ++ } + } + } + } diff --git a/patches/REL_16_STABLE-pg_pathman-core.diff b/patches/REL_16_STABLE-pg_pathman-core.diff new file mode 100644 index 00000000..50dad389 --- /dev/null +++ b/patches/REL_16_STABLE-pg_pathman-core.diff @@ -0,0 +1,547 @@ +diff --git a/contrib/Makefile b/contrib/Makefile +index bbf220407b..9a82a2db04 100644 +--- a/contrib/Makefile ++++ b/contrib/Makefile +@@ -34,6 +34,7 @@ SUBDIRS = \ + passwordcheck \ + pg_buffercache \ + pg_freespacemap \ ++ pg_pathman \ + pg_prewarm \ + pg_stat_statements \ + pg_surgery \ +diff --git a/src/backend/access/transam/xact.c b/src/backend/access/transam/xact.c +index 4a2ea4adba..7cadde5499 100644 +--- a/src/backend/access/transam/xact.c ++++ b/src/backend/access/transam/xact.c +@@ -79,7 +79,7 @@ int DefaultXactIsoLevel = XACT_READ_COMMITTED; + int XactIsoLevel = XACT_READ_COMMITTED; + + bool DefaultXactReadOnly = false; +-bool XactReadOnly; ++bool XactReadOnly = false; + + bool DefaultXactDeferrable = false; + bool XactDeferrable; +diff --git a/src/backend/executor/execExprInterp.c b/src/backend/executor/execExprInterp.c +index 6b7997465d..5e9e878d3b 100644 +--- a/src/backend/executor/execExprInterp.c ++++ b/src/backend/executor/execExprInterp.c +@@ -1845,6 +1845,16 @@ ExecInterpExpr(ExprState *state, ExprContext *econtext, bool *isnull) + } + + out: ++ ++ /* ++ * pg_pathman: pass 'tts_tableOid' to result tuple to determine from ++ * which partition the tuple was read ++ */ ++ if (resultslot) ++ { ++ resultslot->tts_tableOid = scanslot ? scanslot->tts_tableOid : ++ (innerslot ? innerslot->tts_tableOid : (outerslot ? outerslot->tts_tableOid : InvalidOid)); ++ } + *isnull = state->resnull; + return state->resvalue; + } +diff --git a/src/backend/executor/execMain.c b/src/backend/executor/execMain.c +index 4c5a7bbf62..7d638aa22d 100644 +--- a/src/backend/executor/execMain.c ++++ b/src/backend/executor/execMain.c +@@ -561,6 +561,39 @@ ExecutorRewind(QueryDesc *queryDesc) + } + + ++/* ++ * ExecCheckOneRtePermissions ++ * Check access permissions for one RTE ++ * ++ * Returns true if permissions are adequate. Otherwise, throws an appropriate ++ * error if ereport_on_violation is true, or simply returns false otherwise. ++ * ++ * This function uses pg_pathman due to commit f75cec4fff, see PGPRO-7792 ++ */ ++bool ++ExecCheckOneRtePermissions(RangeTblEntry *rte, RTEPermissionInfo *perminfo, ++ bool ereport_on_violation) ++{ ++ bool result = true; ++ ++ Assert(OidIsValid(perminfo->relid)); ++ Assert(rte->relid == perminfo->relid); ++ ++ result = ExecCheckOneRelPerms(perminfo); ++ ++ if (!result) ++ { ++ if (ereport_on_violation) ++ aclcheck_error(ACLCHECK_NO_PRIV, ++ get_relkind_objtype(get_rel_relkind(perminfo->relid)), ++ get_rel_name(perminfo->relid)); ++ return false; ++ } ++ ++ return result; ++} ++ ++ + /* + * ExecCheckPermissions + * Check access permissions of relations mentioned in a query +@@ -856,6 +889,13 @@ InitPlan(QueryDesc *queryDesc, int eflags) + + estate->es_plannedstmt = plannedstmt; + ++ /* ++ * Fields "es_result_relation_info", "es_original_tuple" are used for ++ * pg_pathman only: ++ */ ++ estate->es_result_relation_info = NULL; ++ estate->es_original_tuple = NULL; ++ + /* + * Next, build the ExecRowMark array from the PlanRowMark(s), if any. + */ +@@ -2873,6 +2913,13 @@ EvalPlanQualStart(EPQState *epqstate, Plan *planTree) + rcestate->es_output_cid = parentestate->es_output_cid; + rcestate->es_queryEnv = parentestate->es_queryEnv; + ++ /* ++ * Fields "es_result_relation_info", "es_original_tuple" are used for ++ * pg_pathman only: ++ */ ++ rcestate->es_result_relation_info = NULL; ++ rcestate->es_original_tuple = NULL; ++ + /* + * ResultRelInfos needed by subplans are initialized from scratch when the + * subplans themselves are initialized. +diff --git a/src/backend/executor/nodeModifyTable.c b/src/backend/executor/nodeModifyTable.c +index c84caeeaee..2a355607e9 100644 +--- a/src/backend/executor/nodeModifyTable.c ++++ b/src/backend/executor/nodeModifyTable.c +@@ -660,6 +660,13 @@ ExecInitUpdateProjection(ModifyTableState *mtstate, + resultRelInfo->ri_projectNewInfoValid = true; + } + ++void ++PgproExecInitUpdateProjection(ModifyTableState *mtstate, ++ ResultRelInfo *resultRelInfo) ++{ ++ ExecInitUpdateProjection(mtstate, resultRelInfo); ++} ++ + /* + * ExecGetInsertNewTuple + * This prepares a "new" tuple ready to be inserted into given result +@@ -3570,6 +3577,7 @@ ExecModifyTable(PlanState *pstate) + HeapTupleData oldtupdata; + HeapTuple oldtuple; + ItemPointer tupleid; ++ ResultRelInfo *saved_resultRelInfo; + + CHECK_FOR_INTERRUPTS(); + +@@ -3611,6 +3619,8 @@ ExecModifyTable(PlanState *pstate) + context.mtstate = node; + context.epqstate = &node->mt_epqstate; + context.estate = estate; ++ saved_resultRelInfo = estate->es_result_relation_info; ++ estate->es_result_relation_info = NULL; + + /* + * Fetch rows from subplan, and execute the required table modification +@@ -3618,6 +3628,14 @@ ExecModifyTable(PlanState *pstate) + */ + for (;;) + { ++ /* ++ * "es_original_tuple" should contain original modified tuple (new ++ * values of the changed columns plus row identity information such as ++ * CTID) in case tuple planSlot is replaced in pg_pathman to new value ++ * in call "ExecProcNode(subplanstate)". ++ */ ++ estate->es_original_tuple = NULL; ++ + /* + * Reset the per-output-tuple exprcontext. This is needed because + * triggers expect to use that context as workspace. It's a bit ugly +@@ -3651,7 +3669,9 @@ ExecModifyTable(PlanState *pstate) + bool isNull; + Oid resultoid; + +- datum = ExecGetJunkAttribute(context.planSlot, node->mt_resultOidAttno, ++ datum = ExecGetJunkAttribute(estate->es_original_tuple ? ++ estate->es_original_tuple : context.planSlot, ++ node->mt_resultOidAttno, + &isNull); + if (isNull) + { +@@ -3688,6 +3708,8 @@ ExecModifyTable(PlanState *pstate) + if (resultRelInfo->ri_usesFdwDirectModify) + { + Assert(resultRelInfo->ri_projectReturning); ++ /* PartitionRouter does not support foreign data wrappers: */ ++ Assert(estate->es_original_tuple == NULL); + + /* + * A scan slot containing the data that was actually inserted, +@@ -3697,6 +3719,7 @@ ExecModifyTable(PlanState *pstate) + */ + slot = ExecProcessReturning(resultRelInfo, NULL, context.planSlot); + ++ estate->es_result_relation_info = saved_resultRelInfo; + return slot; + } + +@@ -3727,7 +3750,8 @@ ExecModifyTable(PlanState *pstate) + { + /* ri_RowIdAttNo refers to a ctid attribute */ + Assert(AttributeNumberIsValid(resultRelInfo->ri_RowIdAttNo)); +- datum = ExecGetJunkAttribute(slot, ++ datum = ExecGetJunkAttribute(estate->es_original_tuple ++ ? estate->es_original_tuple : slot, + resultRelInfo->ri_RowIdAttNo, + &isNull); + +@@ -3775,7 +3799,8 @@ ExecModifyTable(PlanState *pstate) + */ + else if (AttributeNumberIsValid(resultRelInfo->ri_RowIdAttNo)) + { +- datum = ExecGetJunkAttribute(slot, ++ datum = ExecGetJunkAttribute(estate->es_original_tuple ++ ? estate->es_original_tuple : slot, + resultRelInfo->ri_RowIdAttNo, + &isNull); + /* shouldn't ever get a null result... */ +@@ -3806,9 +3831,12 @@ ExecModifyTable(PlanState *pstate) + /* Initialize projection info if first time for this table */ + if (unlikely(!resultRelInfo->ri_projectNewInfoValid)) + ExecInitInsertProjection(node, resultRelInfo); +- slot = ExecGetInsertNewTuple(resultRelInfo, context.planSlot); +- slot = ExecInsert(&context, resultRelInfo, slot, +- node->canSetTag, NULL, NULL); ++ /* Do nothing in case tuple was modified in pg_pathman: */ ++ if (!estate->es_original_tuple) ++ slot = ExecGetInsertNewTuple(resultRelInfo, context.planSlot); ++ slot = ExecInsert(&context, estate->es_result_relation_info ? ++ estate->es_result_relation_info : resultRelInfo, ++ slot, node->canSetTag, NULL, NULL); + break; + + case CMD_UPDATE: +@@ -3816,6 +3844,13 @@ ExecModifyTable(PlanState *pstate) + if (unlikely(!resultRelInfo->ri_projectNewInfoValid)) + ExecInitUpdateProjection(node, resultRelInfo); + ++ /* ++ * Do not change the indentation for PostgreSQL code to make it ++ * easier to merge new PostgreSQL changes. ++ */ ++ /* Do nothing in case tuple was modified in pg_pathman: */ ++ if (!estate->es_original_tuple) ++ { + /* + * Make the new tuple by combining plan's output tuple with + * the old tuple being updated. +@@ -3839,14 +3874,19 @@ ExecModifyTable(PlanState *pstate) + slot = ExecGetUpdateNewTuple(resultRelInfo, context.planSlot, + oldSlot); + context.relaction = NULL; ++ } + + /* Now apply the update. */ +- slot = ExecUpdate(&context, resultRelInfo, tupleid, oldtuple, ++ slot = ExecUpdate(&context, estate->es_result_relation_info ? ++ estate->es_result_relation_info : resultRelInfo, ++ tupleid, oldtuple, + slot, node->canSetTag); + break; + + case CMD_DELETE: +- slot = ExecDelete(&context, resultRelInfo, tupleid, oldtuple, ++ slot = ExecDelete(&context, estate->es_result_relation_info ? ++ estate->es_result_relation_info : resultRelInfo, ++ tupleid, oldtuple, + true, false, node->canSetTag, NULL, NULL, NULL); + break; + +@@ -3864,7 +3904,10 @@ ExecModifyTable(PlanState *pstate) + * the work on next call. + */ + if (slot) ++ { ++ estate->es_result_relation_info = saved_resultRelInfo; + return slot; ++ } + } + + /* +@@ -3880,6 +3923,7 @@ ExecModifyTable(PlanState *pstate) + + node->mt_done = true; + ++ estate->es_result_relation_info = saved_resultRelInfo; + return NULL; + } + +@@ -3954,6 +3998,7 @@ ExecInitModifyTable(ModifyTable *node, EState *estate, int eflags) + ListCell *l; + int i; + Relation rel; ++ ResultRelInfo *saved_resultRelInfo; + + /* check for unsupported flags */ + Assert(!(eflags & (EXEC_FLAG_BACKWARD | EXEC_FLAG_MARK))); +@@ -4056,6 +4101,13 @@ ExecInitModifyTable(ModifyTable *node, EState *estate, int eflags) + i++; + } + ++ /* ++ * pg_pathman: set "estate->es_result_relation_info" value for take it in ++ * functions partition_filter_begin(), partition_router_begin() ++ */ ++ saved_resultRelInfo = estate->es_result_relation_info; ++ estate->es_result_relation_info = mtstate->resultRelInfo; ++ + /* + * Now we may initialize the subplan. + */ +@@ -4138,6 +4190,8 @@ ExecInitModifyTable(ModifyTable *node, EState *estate, int eflags) + } + } + ++ estate->es_result_relation_info = saved_resultRelInfo; ++ + /* + * If this is an inherited update/delete/merge, there will be a junk + * attribute named "tableoid" present in the subplan's targetlist. It +diff --git a/src/backend/utils/init/globals.c b/src/backend/utils/init/globals.c +index 011ec18015..7b4fcb2807 100644 +--- a/src/backend/utils/init/globals.c ++++ b/src/backend/utils/init/globals.c +@@ -25,7 +25,7 @@ + #include "storage/backendid.h" + + +-ProtocolVersion FrontendProtocol; ++ProtocolVersion FrontendProtocol = (ProtocolVersion) 0; + + volatile sig_atomic_t InterruptPending = false; + volatile sig_atomic_t QueryCancelPending = false; +diff --git a/src/include/access/xact.h b/src/include/access/xact.h +index 7d3b9446e6..20030111f4 100644 +--- a/src/include/access/xact.h ++++ b/src/include/access/xact.h +@@ -53,6 +53,8 @@ extern PGDLLIMPORT int XactIsoLevel; + + /* Xact read-only state */ + extern PGDLLIMPORT bool DefaultXactReadOnly; ++ ++#define PGPRO_PATHMAN_AWARE_COPY + extern PGDLLIMPORT bool XactReadOnly; + + /* flag for logging statements in this transaction */ +diff --git a/src/include/executor/executor.h b/src/include/executor/executor.h +index ac02247947..c39ae13a8e 100644 +--- a/src/include/executor/executor.h ++++ b/src/include/executor/executor.h +@@ -208,6 +208,9 @@ extern void standard_ExecutorFinish(QueryDesc *queryDesc); + extern void ExecutorEnd(QueryDesc *queryDesc); + extern void standard_ExecutorEnd(QueryDesc *queryDesc); + extern void ExecutorRewind(QueryDesc *queryDesc); ++extern bool ExecCheckOneRtePermissions(RangeTblEntry *rte, ++ RTEPermissionInfo *perminfo, ++ bool ereport_on_violation); + extern bool ExecCheckPermissions(List *rangeTable, + List *rteperminfos, bool ereport_on_violation); + extern void CheckValidResultRel(ResultRelInfo *resultRelInfo, CmdType operation); +@@ -676,5 +679,17 @@ extern ResultRelInfo *ExecLookupResultRelByOid(ModifyTableState *node, + Oid resultoid, + bool missing_ok, + bool update_cache); ++#define PG_HAVE_PGPRO_EXEC_INIT_UPDATE_PROJECTION ++/* ++ * This function is static in vanilla, but pg_pathman wants it exported. ++ * We cannot make it extern with the same name to avoid compilation errors ++ * in timescaledb, which ships it's own static copy of the same function. ++ * So, export ExecInitUpdateProjection with Pgpro prefix. ++ * ++ * The define above helps pg_pathman to expect proper exported symbol ++ * from various versions of pgpro. ++ */ ++extern void PgproExecInitUpdateProjection(ModifyTableState *mtstate, ++ ResultRelInfo *resultRelInfo); + + #endif /* EXECUTOR_H */ +diff --git a/src/include/nodes/execnodes.h b/src/include/nodes/execnodes.h +index 869465d6f8..6bdde351d7 100644 +--- a/src/include/nodes/execnodes.h ++++ b/src/include/nodes/execnodes.h +@@ -638,6 +638,12 @@ typedef struct EState + * es_result_relations in no + * specific order */ + ++ /* These fields was added for compatibility pg_pathman with 14: */ ++ ResultRelInfo *es_result_relation_info; /* currently active array elt */ ++ TupleTableSlot *es_original_tuple; /* original modified tuple (new values ++ * of the changed columns plus row ++ * identity information such as CTID) */ ++ + PartitionDirectory es_partition_directory; /* for PartitionDesc lookup */ + + /* +diff --git a/src/tools/msvc/Install.pm b/src/tools/msvc/Install.pm +index 05548d7c0a..37754370e0 100644 +--- a/src/tools/msvc/Install.pm ++++ b/src/tools/msvc/Install.pm +@@ -30,6 +30,22 @@ my @client_program_files = ( + 'pg_receivewal', 'pg_recvlogical', 'pg_restore', 'psql', + 'reindexdb', 'vacuumdb', @client_contribs); + ++sub SubstituteMakefileVariables ++{ ++ local $_ = shift; # Line to substitue ++ my $mf = shift; # Makefile text ++ while (/\$\((\w+)\)/) ++ { ++ my $varname = $1; ++ if ($mf =~ /^$varname\s*=\s*(.*)$/mg) ++ { ++ my $varvalue=$1; ++ s/\$\($varname\)/$varvalue/g; ++ } ++ } ++ return $_; ++} ++ + sub lcopy + { + my $src = shift; +@@ -580,7 +596,7 @@ sub ParseAndCleanRule + substr($flist, 0, index($flist, '$(addsuffix ')) + . substr($flist, $i + 1); + } +- return $flist; ++ return SubstituteMakefileVariables($flist, $mf); + } + + sub CopyIncludeFiles +diff --git a/src/tools/msvc/Mkvcbuild.pm b/src/tools/msvc/Mkvcbuild.pm +index 6a79a0e037..93696f53ae 100644 +--- a/src/tools/msvc/Mkvcbuild.pm ++++ b/src/tools/msvc/Mkvcbuild.pm +@@ -40,7 +40,7 @@ my @contrib_uselibpq = (); + my @contrib_uselibpgport = (); + my @contrib_uselibpgcommon = (); + my $contrib_extralibs = { 'libpq_pipeline' => ['ws2_32.lib'] }; +-my $contrib_extraincludes = {}; ++my $contrib_extraincludes = { 'pg_pathman' => ['contrib/pg_pathman/src/include'] }; + my $contrib_extrasource = {}; + my @contrib_excludes = ( + 'bool_plperl', 'commit_ts', +@@ -980,6 +980,7 @@ sub AddContrib + my $dn = $1; + my $proj = $solution->AddProject($dn, 'dll', 'contrib', "$subdir/$n"); + $proj->AddReference($postgres); ++ $proj->RemoveFile("$subdir/$n/src/declarative.c") if $n eq 'pg_pathman'; + AdjustContribProj($proj); + push @projects, $proj; + } +@@ -1083,6 +1084,22 @@ sub AddContrib + return; + } + ++sub SubstituteMakefileVariables ++{ ++ local $_ = shift; # Line to substitue ++ my $mf = shift; # Makefile text ++ while (/\$\((\w+)\)/) ++ { ++ my $varname = $1; ++ if ($mf =~ /^$varname\s*=\s*(.*)$/mg) ++ { ++ my $varvalue=$1; ++ s/\$\($varname\)/$varvalue/g; ++ } ++ } ++ return $_; ++} ++ + sub GenerateContribSqlFiles + { + my $n = shift; +@@ -1107,23 +1124,59 @@ sub GenerateContribSqlFiles + substr($l, 0, index($l, '$(addsuffix ')) . substr($l, $i + 1); + } + ++ $l = SubstituteMakefileVariables($l,$mf); + foreach my $d (split /\s+/, $l) + { +- my $in = "$d.in"; +- my $out = "$d"; +- +- if (Solution::IsNewer("contrib/$n/$out", "contrib/$n/$in")) ++ if ( -f "contrib/$n/$d.in" ) ++ { ++ my $in = "$d.in"; ++ my $out = "$d"; ++ if (Solution::IsNewer("contrib/$n/$out", "contrib/$n/$in")) ++ { ++ print "Building $out from $in (contrib/$n)...\n"; ++ my $cont = Project::read_file("contrib/$n/$in"); ++ my $dn = $out; ++ $dn =~ s/\.sql$//; ++ $cont =~ s/MODULE_PATHNAME/\$libdir\/$dn/g; ++ my $o; ++ open($o, '>', "contrib/$n/$out") ++ || croak "Could not write to contrib/$n/$d"; ++ print $o $cont; ++ close($o); ++ } ++ } ++ else + { +- print "Building $out from $in (contrib/$n)...\n"; +- my $cont = Project::read_file("contrib/$n/$in"); +- my $dn = $out; +- $dn =~ s/\.sql$//; +- $cont =~ s/MODULE_PATHNAME/\$libdir\/$dn/g; +- my $o; +- open($o, '>', "contrib/$n/$out") +- || croak "Could not write to contrib/$n/$d"; +- print $o $cont; +- close($o); ++ # Search for makefile rule. ++ # For now we do not process rule command and assume ++ # that we should just concatenate all prerequisites ++ # ++ my @prereq = (); ++ my $target; ++ my @rules = $mf =~ /^(\S+)\s*:\s*([^=].*)$/mg; ++ RULE: ++ while (@rules) ++ { ++ $target = SubstituteMakefileVariables(shift @rules,$mf); ++ @prereq = split(/\s+/,SubstituteMakefileVariables(shift @rules,$mf)); ++ last RULE if ($target eq $d); ++ @prereq = (); ++ } ++ croak "Don't know how to build contrib/$n/$d" unless @prereq; ++ if (grep(Solution::IsNewer("contrib/$n/$d","contrib/$n/$_"), ++ @prereq)) ++ { ++ print STDERR "building $d from @prereq by concatentation\n"; ++ my $o; ++ open $o, ">contrib/$n/$d" ++ or croak("Couldn't write to contrib/$n/$d:$!"); ++ for my $in (@prereq) ++ { ++ my $data = Project::read_file("contrib/$n/$in"); ++ print $o $data; ++ } ++ close $o; ++ } + } + } + } diff --git a/pg_compat_available.sh b/pg_compat_available.sh new file mode 100755 index 00000000..d2d7cabc --- /dev/null +++ b/pg_compat_available.sh @@ -0,0 +1,6 @@ +#!/usr/bin/bash + +dir=$(dirname $0) +func="$1" + +grep -n -r --include=pg_compat.c --include=pg_compat.h $func $dir | head -n1 diff --git a/pg_pathman--1.0--1.1.sql b/pg_pathman--1.0--1.1.sql new file mode 100644 index 00000000..e007b41d --- /dev/null +++ b/pg_pathman--1.0--1.1.sql @@ -0,0 +1,1980 @@ +/* ------------------------------------------------------------------------ + * + * pg_pathman--1.0--1.1.sql + * Migration scripts to version 1.1 + * + * Copyright (c) 2015-2016, Postgres Professional + * + * ------------------------------------------------------------------------ + */ + + +/* ------------------------------------------------------------------------ + * Modify config params table + * ----------------------------------------------------------------------*/ +ALTER TABLE @extschema@.pathman_config_params ADD COLUMN init_callback REGPROCEDURE NOT NULL DEFAULT 0; +ALTER TABLE @extschema@.pathman_config_params ALTER COLUMN enable_parent SET DEFAULT FALSE; + + +/* ------------------------------------------------------------------------ + * Enable permissions + * ----------------------------------------------------------------------*/ +GRANT SELECT, INSERT, UPDATE, DELETE +ON @extschema@.pathman_config, @extschema@.pathman_config_params +TO public; + +CREATE OR REPLACE FUNCTION @extschema@.check_security_policy(relation regclass) +RETURNS BOOL AS 'pg_pathman', 'check_security_policy' LANGUAGE C STRICT; + +CREATE POLICY deny_modification ON @extschema@.pathman_config +FOR ALL USING (check_security_policy(partrel)); + +CREATE POLICY deny_modification ON @extschema@.pathman_config_params +FOR ALL USING (check_security_policy(partrel)); + +CREATE POLICY allow_select ON @extschema@.pathman_config FOR SELECT USING (true); + +CREATE POLICY allow_select ON @extschema@.pathman_config_params FOR SELECT USING (true); + +ALTER TABLE @extschema@.pathman_config ENABLE ROW LEVEL SECURITY; +ALTER TABLE @extschema@.pathman_config_params ENABLE ROW LEVEL SECURITY; + +GRANT SELECT ON @extschema@.pathman_concurrent_part_tasks TO PUBLIC; + + +/* ------------------------------------------------------------------------ + * Drop irrelevant functions + * ----------------------------------------------------------------------*/ +DROP FUNCTION @extschema@.pathman_set_param(REGCLASS, TEXT, BOOLEAN); +DROP FUNCTION @extschema@.enable_parent(REGCLASS); +DROP FUNCTION @extschema@.disable_parent(relation REGCLASS); +DROP FUNCTION @extschema@.enable_auto(relation REGCLASS); +DROP FUNCTION @extschema@.disable_auto(relation REGCLASS); +DROP FUNCTION @extschema@.partition_table_concurrently(relation regclass); +DROP FUNCTION @extschema@._partition_data_concurrent(REGCLASS, ANYELEMENT, ANYELEMENT, INT, OUT BIGINT); +DROP FUNCTION @extschema@.common_relation_checks(REGCLASS, TEXT); +DROP FUNCTION @extschema@.get_attribute_type_name(REGCLASS, TEXT); +DROP FUNCTION @extschema@.get_type_hash_func(REGTYPE); +DROP FUNCTION @extschema@.check_boundaries(REGCLASS, TEXT, ANYELEMENT, ANYELEMENT); +DROP FUNCTION @extschema@.create_range_partitions(REGCLASS, TEXT, ANYELEMENT, INTERVAL, INTEGER, BOOLEAN); +DROP FUNCTION @extschema@.create_range_partitions(REGCLASS, TEXT, ANYELEMENT, ANYELEMENT, INTEGER, BOOLEAN); +DROP FUNCTION @extschema@.create_partitions_from_range(REGCLASS, TEXT, ANYELEMENT, ANYELEMENT, ANYELEMENT, BOOLEAN); +DROP FUNCTION @extschema@.create_partitions_from_range(REGCLASS, TEXT, ANYELEMENT, ANYELEMENT, INTERVAL, BOOLEAN); +DROP FUNCTION @extschema@.create_single_range_partition(REGCLASS, ANYELEMENT, ANYELEMENT, TEXT); +DROP FUNCTION @extschema@.split_range_partition(REGCLASS, ANYELEMENT, TEXT, OUT ANYARRAY); +DROP FUNCTION @extschema@.merge_range_partitions(REGCLASS, REGCLASS); +DROP FUNCTION @extschema@.append_range_partition(REGCLASS, TEXT); +DROP FUNCTION @extschema@.append_partition_internal(REGCLASS, TEXT, TEXT, ANYARRAY, TEXT); +DROP FUNCTION @extschema@.prepend_range_partition(REGCLASS, TEXT); +DROP FUNCTION @extschema@.prepend_partition_internal(REGCLASS, TEXT, TEXT, ANYARRAY, TEXT); +DROP FUNCTION @extschema@.add_range_partition(REGCLASS, ANYELEMENT, ANYELEMENT, TEXT); +DROP FUNCTION @extschema@.drop_range_partition(REGCLASS); +DROP FUNCTION @extschema@.attach_range_partition(REGCLASS, REGCLASS, ANYELEMENT, ANYELEMENT); +DROP FUNCTION @extschema@.detach_range_partition(REGCLASS); +DROP FUNCTION @extschema@.build_range_condition(TEXT, ANYELEMENT, ANYELEMENT); +DROP FUNCTION @extschema@.get_range_by_idx(REGCLASS, INTEGER, ANYELEMENT); +DROP FUNCTION @extschema@.get_range_by_part_oid(REGCLASS, REGCLASS, ANYELEMENT); +DROP FUNCTION @extschema@.get_min_range_value(REGCLASS, ANYELEMENT); +DROP FUNCTION @extschema@.get_max_range_value(REGCLASS, ANYELEMENT); + + +/* ------------------------------------------------------------------------ + * Alter functions' modifiers + * ----------------------------------------------------------------------*/ +ALTER FUNCTION @extschema@.partitions_count(REGCLASS) STRICT; +ALTER FUNCTION @extschema@.partition_data(REGCLASS, OUT BIGINT) STRICT; +ALTER FUNCTION @extschema@.disable_pathman_for(REGCLASS) STRICT; +ALTER FUNCTION @extschema@.get_plain_schema_and_relname(REGCLASS, OUT TEXT, OUT TEXT) STRICT; +ALTER FUNCTION @extschema@.get_schema_qualified_name(REGCLASS, TEXT, TEXT) STRICT; +ALTER FUNCTION @extschema@.drop_triggers(REGCLASS) STRICT; +ALTER FUNCTION @extschema@.check_overlap(REGCLASS, ANYELEMENT, ANYELEMENT) CALLED ON NULL INPUT; +ALTER FUNCTION @extschema@.find_or_create_range_partition(REGCLASS, ANYELEMENT) CALLED ON NULL INPUT; + + +/* ------------------------------------------------------------------------ + * Add new views + * ----------------------------------------------------------------------*/ +CREATE OR REPLACE FUNCTION @extschema@.show_partition_list() +RETURNS TABLE ( + parent REGCLASS, + partition REGCLASS, + parttype INT4, + partattr TEXT, + range_min TEXT, + range_max TEXT) +AS 'pg_pathman', 'show_partition_list_internal' LANGUAGE C STRICT; + +CREATE OR REPLACE VIEW @extschema@.pathman_partition_list +AS SELECT * FROM @extschema@.show_partition_list(); + +GRANT SELECT ON @extschema@.pathman_partition_list TO PUBLIC; + + +/* ------------------------------------------------------------------------ + * (Re)create functions + * ----------------------------------------------------------------------*/ +CREATE OR REPLACE FUNCTION @extschema@.pathman_set_param( + relation REGCLASS, + param TEXT, + value ANYELEMENT) +RETURNS VOID AS +$$ +BEGIN + EXECUTE format('INSERT INTO @extschema@.pathman_config_params + (partrel, %1$s) VALUES ($1, $2) + ON CONFLICT (partrel) DO UPDATE SET %1$s = $2', param) + USING relation, value; +END +$$ +LANGUAGE plpgsql; + +CREATE OR REPLACE FUNCTION @extschema@.set_enable_parent( + relation REGCLASS, + value BOOLEAN) +RETURNS VOID AS +$$ +BEGIN + PERFORM @extschema@.pathman_set_param(relation, 'enable_parent', value); +END +$$ +LANGUAGE plpgsql STRICT; + +/* + * Partition table using ConcurrentPartWorker. + */ +CREATE OR REPLACE FUNCTION @extschema@.partition_table_concurrently( + relation REGCLASS, + batch_size INTEGER DEFAULT 1000, + sleep_time FLOAT8 DEFAULT 1.0) +RETURNS VOID AS 'pg_pathman', 'partition_table_concurrently' +LANGUAGE C STRICT; + +/* + * Copy rows to partitions concurrently. + */ +CREATE OR REPLACE FUNCTION @extschema@._partition_data_concurrent( + relation REGCLASS, + p_min ANYELEMENT DEFAULT NULL::text, + p_max ANYELEMENT DEFAULT NULL::text, + p_limit INT DEFAULT NULL, + OUT p_total BIGINT) +AS +$$ +DECLARE + v_attr TEXT; + v_limit_clause TEXT := ''; + v_where_clause TEXT := ''; + ctids TID[]; + +BEGIN + SELECT attname INTO v_attr + FROM @extschema@.pathman_config WHERE partrel = relation; + + p_total := 0; + + /* Format LIMIT clause if needed */ + IF NOT p_limit IS NULL THEN + v_limit_clause := format('LIMIT %s', p_limit); + END IF; + + /* Format WHERE clause if needed */ + IF NOT p_min IS NULL THEN + v_where_clause := format('%1$s >= $1', v_attr); + END IF; + + IF NOT p_max IS NULL THEN + IF NOT p_min IS NULL THEN + v_where_clause := v_where_clause || ' AND '; + END IF; + v_where_clause := v_where_clause || format('%1$s < $2', v_attr); + END IF; + + IF v_where_clause != '' THEN + v_where_clause := 'WHERE ' || v_where_clause; + END IF; + + /* Lock rows and copy data */ + RAISE NOTICE 'Copying data to partitions...'; + EXECUTE format('SELECT array(SELECT ctid FROM ONLY %1$s %2$s %3$s FOR UPDATE NOWAIT)', + relation, v_where_clause, v_limit_clause) + USING p_min, p_max + INTO ctids; + + EXECUTE format(' + WITH data AS ( + DELETE FROM ONLY %1$s WHERE ctid = ANY($1) RETURNING *) + INSERT INTO %1$s SELECT * FROM data', + relation) + USING ctids; + + /* Get number of inserted rows */ + GET DIAGNOSTICS p_total = ROW_COUNT; + RETURN; +END +$$ +LANGUAGE plpgsql +SET pg_pathman.enable_partitionfilter = on; /* ensures that PartitionFilter is ON */ + +/* + * Aggregates several common relation checks before partitioning. + * Suitable for every partitioning type. + */ +CREATE OR REPLACE FUNCTION @extschema@.common_relation_checks( + relation REGCLASS, + p_attribute TEXT) +RETURNS BOOLEAN AS +$$ +DECLARE + v_rec RECORD; + is_referenced BOOLEAN; + rel_persistence CHAR; + +BEGIN + /* Ignore temporary tables */ + SELECT relpersistence FROM pg_catalog.pg_class + WHERE oid = relation INTO rel_persistence; + + IF rel_persistence = 't'::CHAR THEN + RAISE EXCEPTION 'temporary table "%" cannot be partitioned', + relation::TEXT; + END IF; + + IF EXISTS (SELECT * FROM @extschema@.pathman_config + WHERE partrel = relation) THEN + RAISE EXCEPTION 'relation "%" has already been partitioned', relation; + END IF; + + IF @extschema@.is_attribute_nullable(relation, p_attribute) THEN + RAISE EXCEPTION 'partitioning key ''%'' must be NOT NULL', p_attribute; + END IF; + + /* Check if there are foreign keys that reference the relation */ + FOR v_rec IN (SELECT * FROM pg_catalog.pg_constraint + WHERE confrelid = relation::REGCLASS::OID) + LOOP + is_referenced := TRUE; + RAISE WARNING 'foreign key "%" references relation "%"', + v_rec.conname, relation; + END LOOP; + + IF is_referenced THEN + RAISE EXCEPTION 'relation "%" is referenced from other relations', relation; + END IF; + + RETURN TRUE; +END +$$ +LANGUAGE plpgsql; + +/* + * DDL trigger that removes entry from pathman_config table. + */ +CREATE OR REPLACE FUNCTION @extschema@.pathman_ddl_trigger_func() +RETURNS event_trigger AS +$$ +DECLARE + obj record; + pg_class_oid oid; + relids regclass[]; +BEGIN + pg_class_oid = 'pg_catalog.pg_class'::regclass; + + /* Find relids to remove from config */ + SELECT array_agg(cfg.partrel) INTO relids + FROM pg_event_trigger_dropped_objects() AS events + JOIN @extschema@.pathman_config AS cfg ON cfg.partrel::oid = events.objid + WHERE events.classid = pg_class_oid; + + /* Cleanup pathman_config */ + DELETE FROM @extschema@.pathman_config WHERE partrel = ANY(relids); + + /* Cleanup params table too */ + DELETE FROM @extschema@.pathman_config_params WHERE partrel = ANY(relids); +END +$$ +LANGUAGE plpgsql; + +/* + * Drop partitions. If delete_data set to TRUE, partitions + * will be dropped with all the data. + */ +CREATE OR REPLACE FUNCTION @extschema@.drop_partitions( + parent_relid REGCLASS, + delete_data BOOLEAN DEFAULT FALSE) +RETURNS INTEGER AS +$$ +DECLARE + v_rec RECORD; + v_rows BIGINT; + v_part_count INTEGER := 0; + conf_num_del INTEGER; + v_relkind CHAR; + +BEGIN + PERFORM @extschema@.validate_relname(parent_relid); + + /* Drop trigger first */ + PERFORM @extschema@.drop_triggers(parent_relid); + + WITH config_num_deleted AS (DELETE FROM @extschema@.pathman_config + WHERE partrel = parent_relid + RETURNING *) + SELECT count(*) from config_num_deleted INTO conf_num_del; + + DELETE FROM @extschema@.pathman_config_params WHERE partrel = parent_relid; + + IF conf_num_del = 0 THEN + RAISE EXCEPTION 'relation "%" has no partitions', parent_relid::TEXT; + END IF; + + FOR v_rec IN (SELECT inhrelid::REGCLASS AS tbl + FROM pg_catalog.pg_inherits + WHERE inhparent::regclass = parent_relid + ORDER BY inhrelid ASC) + LOOP + IF NOT delete_data THEN + EXECUTE format('INSERT INTO %s SELECT * FROM %s', + parent_relid::TEXT, + v_rec.tbl::TEXT); + GET DIAGNOSTICS v_rows = ROW_COUNT; + + /* Show number of copied rows */ + RAISE NOTICE '% rows copied from %', v_rows, v_rec.tbl::TEXT; + END IF; + + SELECT relkind FROM pg_catalog.pg_class + WHERE oid = v_rec.tbl + INTO v_relkind; + + /* + * Determine the kind of child relation. It can be either regular + * table (r) or foreign table (f). Depending on relkind we use + * DROP TABLE or DROP FOREIGN TABLE. + */ + IF v_relkind = 'f' THEN + EXECUTE format('DROP FOREIGN TABLE %s', v_rec.tbl::TEXT); + ELSE + EXECUTE format('DROP TABLE %s', v_rec.tbl::TEXT); + END IF; + + v_part_count := v_part_count + 1; + END LOOP; + + /* Notify backend about changes */ + PERFORM @extschema@.on_remove_partitions(parent_relid); + + RETURN v_part_count; +END +$$ LANGUAGE plpgsql +SET pg_pathman.enable_partitionfilter = off; /* ensures that PartitionFilter is OFF */ + +/* + * Copy all of parent's foreign keys. + */ +CREATE OR REPLACE FUNCTION @extschema@.copy_foreign_keys( + parent_relid REGCLASS, + partition REGCLASS) +RETURNS VOID AS +$$ +DECLARE + rec RECORD; + +BEGIN + PERFORM @extschema@.validate_relname(parent_relid); + PERFORM @extschema@.validate_relname(partition); + + FOR rec IN (SELECT oid as conid FROM pg_catalog.pg_constraint + WHERE conrelid = parent_relid AND contype = 'f') + LOOP + EXECUTE format('ALTER TABLE %s ADD %s', + partition::TEXT, + pg_catalog.pg_get_constraintdef(rec.conid)); + END LOOP; +END +$$ LANGUAGE plpgsql STRICT; + +/* + * Extract basic type of a domain. + */ +CREATE OR REPLACE FUNCTION @extschema@.get_base_type(REGTYPE) +RETURNS REGTYPE AS 'pg_pathman', 'get_base_type_pl' +LANGUAGE C STRICT; + +/* + * Returns attribute type name for relation. + */ +CREATE OR REPLACE FUNCTION @extschema@.get_attribute_type( + REGCLASS, TEXT) +RETURNS REGTYPE AS 'pg_pathman', 'get_attribute_type_pl' +LANGUAGE C STRICT; + +/* + * Return tablespace name for specified relation. + */ +CREATE OR REPLACE FUNCTION @extschema@.get_rel_tablespace_name(REGCLASS) +RETURNS TEXT AS 'pg_pathman', 'get_rel_tablespace_name' +LANGUAGE C STRICT; + + +/* + * Checks that callback function meets specific requirements. Particularly it + * must have the only JSONB argument and VOID return type. + */ +CREATE OR REPLACE FUNCTION @extschema@.validate_on_partition_created_callback( + callback REGPROC) +RETURNS VOID AS 'pg_pathman', 'validate_on_part_init_callback_pl' +LANGUAGE C STRICT; + + +/* + * Invoke init_callback on RANGE partition. + */ +CREATE OR REPLACE FUNCTION @extschema@.invoke_on_partition_created_callback( + parent_relid REGCLASS, + partition REGCLASS, + init_callback REGPROCEDURE, + start_value ANYELEMENT, + end_value ANYELEMENT) +RETURNS VOID AS 'pg_pathman', 'invoke_on_partition_created_callback' +LANGUAGE C; + +/* + * Invoke init_callback on HASH partition. + */ +CREATE OR REPLACE FUNCTION @extschema@.invoke_on_partition_created_callback( + parent_relid REGCLASS, + partition REGCLASS, + init_callback REGPROCEDURE) +RETURNS VOID AS 'pg_pathman', 'invoke_on_partition_created_callback' +LANGUAGE C; + +/* + * Creates hash partitions for specified relation + */ +CREATE OR REPLACE FUNCTION @extschema@.create_hash_partitions( + parent_relid REGCLASS, + attribute TEXT, + partitions_count INTEGER, + partition_data BOOLEAN DEFAULT TRUE) +RETURNS INTEGER AS +$$ +DECLARE + v_child_relname TEXT; + v_plain_schema TEXT; + v_plain_relname TEXT; + v_atttype REGTYPE; + v_hashfunc REGPROC; + v_init_callback REGPROCEDURE; + +BEGIN + PERFORM @extschema@.validate_relname(parent_relid); + + IF partition_data = true THEN + /* Acquire data modification lock */ + PERFORM @extschema@.prevent_relation_modification(parent_relid); + ELSE + /* Acquire lock on parent */ + PERFORM @extschema@.lock_partitioned_relation(parent_relid); + END IF; + + attribute := lower(attribute); + PERFORM @extschema@.common_relation_checks(parent_relid, attribute); + + /* Fetch atttype and its hash function */ + v_atttype := @extschema@.get_attribute_type(parent_relid, attribute); + v_hashfunc := @extschema@.get_type_hash_func(v_atttype); + + SELECT * INTO v_plain_schema, v_plain_relname + FROM @extschema@.get_plain_schema_and_relname(parent_relid); + + /* Insert new entry to pathman config */ + INSERT INTO @extschema@.pathman_config (partrel, attname, parttype) + VALUES (parent_relid, attribute, 1); + + /* Create partitions and update pg_pathman configuration */ + FOR partnum IN 0..partitions_count-1 + LOOP + v_child_relname := format('%s.%s', + quote_ident(v_plain_schema), + quote_ident(v_plain_relname || '_' || partnum)); + + EXECUTE format( + 'CREATE TABLE %1$s (LIKE %2$s INCLUDING ALL) INHERITS (%2$s) TABLESPACE %s', + v_child_relname, + parent_relid::TEXT, + @extschema@.get_rel_tablespace_name(parent_relid)); + + EXECUTE format('ALTER TABLE %s ADD CONSTRAINT %s + CHECK (@extschema@.get_hash_part_idx(%s(%s), %s) = %s)', + v_child_relname, + @extschema@.build_check_constraint_name(v_child_relname::REGCLASS, + attribute), + v_hashfunc::TEXT, + attribute, + partitions_count, + partnum); + + PERFORM @extschema@.copy_foreign_keys(parent_relid, v_child_relname::REGCLASS); + + /* Fetch init_callback from 'params' table */ + WITH stub_callback(stub) as (values (0)) + SELECT coalesce(init_callback, 0::REGPROCEDURE) + FROM stub_callback + LEFT JOIN @extschema@.pathman_config_params AS params + ON params.partrel = parent_relid + INTO v_init_callback; + + PERFORM @extschema@.invoke_on_partition_created_callback(parent_relid, + v_child_relname::REGCLASS, + v_init_callback); + END LOOP; + + /* Notify backend about changes */ + PERFORM @extschema@.on_create_partitions(parent_relid); + + /* Copy data */ + IF partition_data = true THEN + PERFORM @extschema@.set_enable_parent(parent_relid, false); + PERFORM @extschema@.partition_data(parent_relid); + ELSE + PERFORM @extschema@.set_enable_parent(parent_relid, true); + END IF; + + RETURN partitions_count; +END +$$ LANGUAGE plpgsql +SET client_min_messages = WARNING; + +/* + * Creates an update trigger + */ +CREATE OR REPLACE FUNCTION @extschema@.create_hash_update_trigger( + parent_relid REGCLASS) +RETURNS TEXT AS +$$ +DECLARE + func TEXT := 'CREATE OR REPLACE FUNCTION %1$s() + RETURNS TRIGGER AS + $body$ + DECLARE + old_idx INTEGER; /* partition indices */ + new_idx INTEGER; + + BEGIN + old_idx := @extschema@.get_hash_part_idx(%9$s(OLD.%2$s), %3$s); + new_idx := @extschema@.get_hash_part_idx(%9$s(NEW.%2$s), %3$s); + + IF old_idx = new_idx THEN + RETURN NEW; + END IF; + + EXECUTE format(''DELETE FROM %8$s WHERE %4$s'', old_idx) + USING %5$s; + + EXECUTE format(''INSERT INTO %8$s VALUES (%6$s)'', new_idx) + USING %7$s; + + RETURN NULL; + END $body$ + LANGUAGE plpgsql'; + + trigger TEXT := 'CREATE TRIGGER %s + BEFORE UPDATE ON %s + FOR EACH ROW EXECUTE PROCEDURE %s()'; + + att_names TEXT; + old_fields TEXT; + new_fields TEXT; + att_val_fmt TEXT; + att_fmt TEXT; + attr TEXT; + plain_schema TEXT; + plain_relname TEXT; + child_relname_format TEXT; + funcname TEXT; + triggername TEXT; + atttype REGTYPE; + partitions_count INTEGER; + +BEGIN + attr := attname FROM @extschema@.pathman_config WHERE partrel = parent_relid; + + IF attr IS NULL THEN + RAISE EXCEPTION 'table "%" is not partitioned', parent_relid::TEXT; + END IF; + + SELECT string_agg(attname, ', '), + string_agg('OLD.' || attname, ', '), + string_agg('NEW.' || attname, ', '), + string_agg('CASE WHEN NOT $' || attnum || ' IS NULL THEN ' || + attname || ' = $' || attnum || ' ' || + 'ELSE ' || + attname || ' IS NULL END', + ' AND '), + string_agg('$' || attnum, ', ') + FROM pg_catalog.pg_attribute + WHERE attrelid = parent_relid AND attnum > 0 + INTO att_names, + old_fields, + new_fields, + att_val_fmt, + att_fmt; + + partitions_count := COUNT(*) FROM pg_catalog.pg_inherits + WHERE inhparent = parent_relid::oid; + + /* Build trigger & trigger function's names */ + funcname := @extschema@.build_update_trigger_func_name(parent_relid); + triggername := @extschema@.build_update_trigger_name(parent_relid); + + /* Build partition name template */ + SELECT * INTO plain_schema, plain_relname + FROM @extschema@.get_plain_schema_and_relname(parent_relid); + + child_relname_format := quote_ident(plain_schema) || '.' || + quote_ident(plain_relname || '_%s'); + + /* Fetch base hash function for atttype */ + atttype := @extschema@.get_attribute_type(parent_relid, attr); + + /* Format function definition and execute it */ + EXECUTE format(func, funcname, attr, partitions_count, att_val_fmt, + old_fields, att_fmt, new_fields, child_relname_format, + @extschema@.get_type_hash_func(atttype)::TEXT); + + /* Create trigger on every partition */ + FOR num IN 0..partitions_count-1 + LOOP + EXECUTE format(trigger, + triggername, + format(child_relname_format, num), + funcname); + END LOOP; + + return funcname; +END +$$ LANGUAGE plpgsql; + +/* + * Returns hash function OID for specified type + */ +CREATE OR REPLACE FUNCTION @extschema@.get_type_hash_func(REGTYPE) +RETURNS REGPROC AS 'pg_pathman', 'get_type_hash_func' +LANGUAGE C STRICT; + +/* + * Check RANGE partition boundaries. + */ +CREATE OR REPLACE FUNCTION @extschema@.check_boundaries( + parent_relid REGCLASS, + attribute TEXT, + start_value ANYELEMENT, + end_value ANYELEMENT) +RETURNS VOID AS +$$ +DECLARE + v_min start_value%TYPE; + v_max start_value%TYPE; + v_count BIGINT; + +BEGIN + /* Get min and max values */ + EXECUTE format('SELECT count(*), min(%1$s), max(%1$s) + FROM %2$s WHERE NOT %1$s IS NULL', + attribute, parent_relid::TEXT) + INTO v_count, v_min, v_max; + + /* Check if column has NULL values */ + IF v_count > 0 AND (v_min IS NULL OR v_max IS NULL) THEN + RAISE EXCEPTION '''%'' column contains NULL values', attribute; + END IF; + + /* Check lower boundary */ + IF start_value > v_min THEN + RAISE EXCEPTION 'start value is less than minimum value of ''%''', + attribute; + END IF; + + /* Check upper boundary */ + IF end_value <= v_max THEN + RAISE EXCEPTION 'not enough partitions to fit all values of ''%''', + attribute; + END IF; +END +$$ LANGUAGE plpgsql; + +/* + * Creates RANGE partitions for specified relation based on datetime attribute + */ +CREATE OR REPLACE FUNCTION @extschema@.create_range_partitions( + parent_relid REGCLASS, + attribute TEXT, + start_value ANYELEMENT, + p_interval INTERVAL, + p_count INTEGER DEFAULT NULL, + partition_data BOOLEAN DEFAULT TRUE) +RETURNS INTEGER AS +$$ +DECLARE + v_rows_count BIGINT; + v_atttype REGTYPE; + v_max start_value%TYPE; + v_cur_value start_value%TYPE := start_value; + end_value start_value%TYPE; + i INTEGER; + +BEGIN + PERFORM @extschema@.validate_relname(parent_relid); + + IF partition_data = true THEN + /* Acquire data modification lock */ + PERFORM @extschema@.prevent_relation_modification(parent_relid); + ELSE + /* Acquire lock on parent */ + PERFORM @extschema@.lock_partitioned_relation(parent_relid); + END IF; + + attribute := lower(attribute); + PERFORM @extschema@.common_relation_checks(parent_relid, attribute); + + IF p_count < 0 THEN + RAISE EXCEPTION '''p_count'' must not be less than 0'; + END IF; + + /* Try to determine partitions count if not set */ + IF p_count IS NULL THEN + EXECUTE format('SELECT count(*), max(%s) FROM %s', attribute, parent_relid) + INTO v_rows_count, v_max; + + IF v_rows_count = 0 THEN + RAISE EXCEPTION 'cannot determine partitions count for empty table'; + END IF; + + p_count := 0; + WHILE v_cur_value <= v_max + LOOP + v_cur_value := v_cur_value + p_interval; + p_count := p_count + 1; + END LOOP; + END IF; + + v_atttype := @extschema@.get_base_type(pg_typeof(start_value)); + + /* + * In case when user doesn't want to automatically create partitions + * and specifies partition count as 0 then do not check boundaries + */ + IF p_count != 0 THEN + /* compute right bound of partitioning through additions */ + end_value := start_value; + FOR i IN 1..p_count + LOOP + end_value := end_value + p_interval; + END LOOP; + + /* Check boundaries */ + EXECUTE format('SELECT @extschema@.check_boundaries(''%s'', ''%s'', ''%s'', ''%s''::%s)', + parent_relid, + attribute, + start_value, + end_value, + v_atttype::TEXT); + END IF; + + /* Create sequence for child partitions names */ + PERFORM @extschema@.create_or_replace_sequence(schema, relname) + FROM @extschema@.get_plain_schema_and_relname(parent_relid); + + /* Insert new entry to pathman config */ + INSERT INTO @extschema@.pathman_config (partrel, attname, parttype, range_interval) + VALUES (parent_relid, attribute, 2, p_interval::TEXT); + + /* Create first partition */ + FOR i IN 1..p_count + LOOP + EXECUTE + format('SELECT @extschema@.create_single_range_partition($1, $2, $3::%s, tablespace:=$4)', + v_atttype::TEXT) + USING + parent_relid, + start_value, + start_value + p_interval, + @extschema@.get_rel_tablespace_name(parent_relid); + + start_value := start_value + p_interval; + END LOOP; + + /* Notify backend about changes */ + PERFORM @extschema@.on_create_partitions(parent_relid); + + /* Relocate data if asked to */ + IF partition_data = true THEN + PERFORM @extschema@.set_enable_parent(parent_relid, false); + PERFORM @extschema@.partition_data(parent_relid); + ELSE + PERFORM @extschema@.set_enable_parent(parent_relid, true); + END IF; + + RETURN p_count; +END +$$ LANGUAGE plpgsql; + +/* + * Creates RANGE partitions for specified relation based on numerical attribute + */ +CREATE OR REPLACE FUNCTION @extschema@.create_range_partitions( + parent_relid REGCLASS, + attribute TEXT, + start_value ANYELEMENT, + p_interval ANYELEMENT, + p_count INTEGER DEFAULT NULL, + partition_data BOOLEAN DEFAULT TRUE) +RETURNS INTEGER AS +$$ +DECLARE + v_rows_count BIGINT; + v_max start_value%TYPE; + v_cur_value start_value%TYPE := start_value; + end_value start_value%TYPE; + i INTEGER; + +BEGIN + PERFORM @extschema@.validate_relname(parent_relid); + + IF partition_data = true THEN + /* Acquire data modification lock */ + PERFORM @extschema@.prevent_relation_modification(parent_relid); + ELSE + /* Acquire lock on parent */ + PERFORM @extschema@.lock_partitioned_relation(parent_relid); + END IF; + + attribute := lower(attribute); + PERFORM @extschema@.common_relation_checks(parent_relid, attribute); + + IF p_count < 0 THEN + RAISE EXCEPTION 'partitions count must not be less than zero'; + END IF; + + /* Try to determine partitions count if not set */ + IF p_count IS NULL THEN + EXECUTE format('SELECT count(*), max(%s) FROM %s', attribute, parent_relid) + INTO v_rows_count, v_max; + + IF v_rows_count = 0 THEN + RAISE EXCEPTION 'cannot determine partitions count for empty table'; + END IF; + + IF v_max IS NULL THEN + RAISE EXCEPTION '''%'' column has NULL values', attribute; + END IF; + + p_count := 0; + WHILE v_cur_value <= v_max + LOOP + v_cur_value := v_cur_value + p_interval; + p_count := p_count + 1; + END LOOP; + END IF; + + /* + * In case when user doesn't want to automatically create partitions + * and specifies partition count as 0 then do not check boundaries + */ + IF p_count != 0 THEN + /* compute right bound of partitioning through additions */ + end_value := start_value; + FOR i IN 1..p_count + LOOP + end_value := end_value + p_interval; + END LOOP; + + /* check boundaries */ + PERFORM @extschema@.check_boundaries(parent_relid, + attribute, + start_value, + end_value); + END IF; + + /* Create sequence for child partitions names */ + PERFORM @extschema@.create_or_replace_sequence(schema, relname) + FROM @extschema@.get_plain_schema_and_relname(parent_relid); + + /* Insert new entry to pathman config */ + INSERT INTO @extschema@.pathman_config (partrel, attname, parttype, range_interval) + VALUES (parent_relid, attribute, 2, p_interval::TEXT); + + /* create first partition */ + FOR i IN 1..p_count + LOOP + PERFORM @extschema@.create_single_range_partition( + parent_relid, + start_value, + start_value + p_interval, + tablespace := @extschema@.get_rel_tablespace_name(parent_relid)); + + start_value := start_value + p_interval; + END LOOP; + + /* Notify backend about changes */ + PERFORM @extschema@.on_create_partitions(parent_relid); + + /* Relocate data if asked to */ + IF partition_data = true THEN + PERFORM @extschema@.set_enable_parent(parent_relid, false); + PERFORM @extschema@.partition_data(parent_relid); + ELSE + PERFORM @extschema@.set_enable_parent(parent_relid, true); + END IF; + + RETURN p_count; +END +$$ LANGUAGE plpgsql; + +/* + * Creates RANGE partitions for specified range + */ +CREATE OR REPLACE FUNCTION @extschema@.create_partitions_from_range( + parent_relid REGCLASS, + attribute TEXT, + start_value ANYELEMENT, + end_value ANYELEMENT, + p_interval ANYELEMENT, + partition_data BOOLEAN DEFAULT TRUE) +RETURNS INTEGER AS +$$ +DECLARE + part_count INTEGER := 0; + +BEGIN + PERFORM @extschema@.validate_relname(parent_relid); + + IF partition_data = true THEN + /* Acquire data modification lock */ + PERFORM @extschema@.prevent_relation_modification(parent_relid); + ELSE + /* Acquire lock on parent */ + PERFORM @extschema@.lock_partitioned_relation(parent_relid); + END IF; + + attribute := lower(attribute); + PERFORM @extschema@.common_relation_checks(parent_relid, attribute); + + IF p_interval <= 0 THEN + RAISE EXCEPTION 'interval must be positive'; + END IF; + + /* Check boundaries */ + PERFORM @extschema@.check_boundaries(parent_relid, + attribute, + start_value, + end_value); + + /* Create sequence for child partitions names */ + PERFORM @extschema@.create_or_replace_sequence(schema, relname) + FROM @extschema@.get_plain_schema_and_relname(parent_relid); + + /* Insert new entry to pathman config */ + INSERT INTO @extschema@.pathman_config (partrel, attname, parttype, range_interval) + VALUES (parent_relid, attribute, 2, p_interval::TEXT); + + WHILE start_value <= end_value + LOOP + PERFORM @extschema@.create_single_range_partition( + parent_relid, + start_value, + start_value + p_interval, + tablespace := @extschema@.get_rel_tablespace_name(parent_relid)); + + start_value := start_value + p_interval; + part_count := part_count + 1; + END LOOP; + + /* Notify backend about changes */ + PERFORM @extschema@.on_create_partitions(parent_relid); + + /* Relocate data if asked to */ + IF partition_data = true THEN + PERFORM @extschema@.set_enable_parent(parent_relid, false); + PERFORM @extschema@.partition_data(parent_relid); + ELSE + PERFORM @extschema@.set_enable_parent(parent_relid, true); + END IF; + + RETURN part_count; /* number of created partitions */ +END +$$ LANGUAGE plpgsql; + +/* + * Creates RANGE partitions for specified range based on datetime attribute + */ +CREATE OR REPLACE FUNCTION @extschema@.create_partitions_from_range( + parent_relid REGCLASS, + attribute TEXT, + start_value ANYELEMENT, + end_value ANYELEMENT, + p_interval INTERVAL, + partition_data BOOLEAN DEFAULT TRUE) +RETURNS INTEGER AS +$$ +DECLARE + part_count INTEGER := 0; + +BEGIN + PERFORM @extschema@.validate_relname(parent_relid); + + IF partition_data = true THEN + /* Acquire data modification lock */ + PERFORM @extschema@.prevent_relation_modification(parent_relid); + ELSE + /* Acquire lock on parent */ + PERFORM @extschema@.lock_partitioned_relation(parent_relid); + END IF; + + attribute := lower(attribute); + PERFORM @extschema@.common_relation_checks(parent_relid, attribute); + + /* Check boundaries */ + PERFORM @extschema@.check_boundaries(parent_relid, + attribute, + start_value, + end_value); + + /* Create sequence for child partitions names */ + PERFORM @extschema@.create_or_replace_sequence(schema, relname) + FROM @extschema@.get_plain_schema_and_relname(parent_relid); + + /* Insert new entry to pathman config */ + INSERT INTO @extschema@.pathman_config (partrel, attname, parttype, range_interval) + VALUES (parent_relid, attribute, 2, p_interval::TEXT); + + WHILE start_value <= end_value + LOOP + EXECUTE + format('SELECT @extschema@.create_single_range_partition($1, $2, $3::%s, tablespace:=$4);', + @extschema@.get_base_type(pg_typeof(start_value))::TEXT) + USING + parent_relid, + start_value, + start_value + p_interval, + @extschema@.get_rel_tablespace_name(parent_relid); + + start_value := start_value + p_interval; + part_count := part_count + 1; + END LOOP; + + /* Notify backend about changes */ + PERFORM @extschema@.on_create_partitions(parent_relid); + + /* Relocate data if asked to */ + IF partition_data = true THEN + PERFORM @extschema@.set_enable_parent(parent_relid, false); + PERFORM @extschema@.partition_data(parent_relid); + ELSE + PERFORM @extschema@.set_enable_parent(parent_relid, true); + END IF; + + RETURN part_count; /* number of created partitions */ +END +$$ LANGUAGE plpgsql; + +/* + * Creates new RANGE partition. Returns partition name. + * NOTE: This function SHOULD NOT take xact_handling lock (BGWs in 9.5). + */ +CREATE OR REPLACE FUNCTION @extschema@.create_single_range_partition( + parent_relid REGCLASS, + start_value ANYELEMENT, + end_value ANYELEMENT, + partition_name TEXT DEFAULT NULL, + tablespace TEXT DEFAULT NULL) +RETURNS TEXT AS +$$ +DECLARE + v_part_num INT; + v_child_relname TEXT; + v_plain_child_relname TEXT; + v_attname TEXT; + v_plain_schema TEXT; + v_plain_relname TEXT; + v_child_relname_exists BOOL; + v_seq_name TEXT; + v_init_callback REGPROCEDURE; + +BEGIN + v_attname := attname FROM @extschema@.pathman_config + WHERE partrel = parent_relid; + + IF v_attname IS NULL THEN + RAISE EXCEPTION 'table "%" is not partitioned', parent_relid::TEXT; + END IF; + + SELECT * INTO v_plain_schema, v_plain_relname + FROM @extschema@.get_plain_schema_and_relname(parent_relid); + + v_seq_name := @extschema@.get_sequence_name(v_plain_schema, v_plain_relname); + + IF partition_name IS NULL THEN + /* Get next value from sequence */ + LOOP + v_part_num := nextval(v_seq_name); + v_plain_child_relname := format('%s_%s', v_plain_relname, v_part_num); + v_child_relname := format('%s.%s', + quote_ident(v_plain_schema), + quote_ident(v_plain_child_relname)); + + v_child_relname_exists := count(*) > 0 + FROM pg_class + WHERE relname = v_plain_child_relname AND + relnamespace = v_plain_schema::regnamespace + LIMIT 1; + + EXIT WHEN v_child_relname_exists = false; + END LOOP; + ELSE + v_child_relname := partition_name; + END IF; + + IF tablespace IS NULL THEN + tablespace := @extschema@.get_rel_tablespace_name(parent_relid); + END IF; + + EXECUTE format('CREATE TABLE %1$s (LIKE %2$s INCLUDING ALL) + INHERITS (%2$s) TABLESPACE %3$s', + v_child_relname, + parent_relid::TEXT, + tablespace); + + EXECUTE format('ALTER TABLE %s ADD CONSTRAINT %s CHECK (%s)', + v_child_relname, + @extschema@.build_check_constraint_name(v_child_relname::REGCLASS, + v_attname), + @extschema@.build_range_condition(v_attname, + start_value, + end_value)); + + PERFORM @extschema@.copy_foreign_keys(parent_relid, v_child_relname::REGCLASS); + + /* Fetch init_callback from 'params' table */ + WITH stub_callback(stub) as (values (0)) + SELECT coalesce(init_callback, 0::REGPROCEDURE) + FROM stub_callback + LEFT JOIN @extschema@.pathman_config_params AS params + ON params.partrel = parent_relid + INTO v_init_callback; + + PERFORM @extschema@.invoke_on_partition_created_callback(parent_relid, + v_child_relname::REGCLASS, + v_init_callback, + start_value, + end_value); + + RETURN v_child_relname; +END +$$ LANGUAGE plpgsql +SET client_min_messages = WARNING; + +/* + * Split RANGE partition + */ +CREATE OR REPLACE FUNCTION @extschema@.split_range_partition( + partition REGCLASS, + split_value ANYELEMENT, + partition_name TEXT DEFAULT NULL, + OUT p_range ANYARRAY) +RETURNS ANYARRAY AS +$$ +DECLARE + v_parent REGCLASS; + v_attname TEXT; + v_atttype REGTYPE; + v_cond TEXT; + v_new_partition TEXT; + v_part_type INTEGER; + v_check_name TEXT; + +BEGIN + v_parent = @extschema@.get_parent_of_partition(partition); + + /* Acquire lock on parent */ + PERFORM @extschema@.lock_partitioned_relation(v_parent); + + /* Acquire data modification lock (prevent further modifications) */ + PERFORM @extschema@.prevent_relation_modification(partition); + + SELECT attname, parttype + FROM @extschema@.pathman_config + WHERE partrel = v_parent + INTO v_attname, v_part_type; + + IF v_attname IS NULL THEN + RAISE EXCEPTION 'table "%" is not partitioned', v_parent::TEXT; + END IF; + + /* Check if this is a RANGE partition */ + IF v_part_type != 2 THEN + RAISE EXCEPTION '"%" is not a RANGE partition', partition::TEXT; + END IF; + + v_atttype = @extschema@.get_attribute_type(v_parent, v_attname); + + /* Get partition values range */ + EXECUTE format('SELECT @extschema@.get_part_range($1, NULL::%s)', + @extschema@.get_base_type(v_atttype)::TEXT) + USING partition + INTO p_range; + + IF p_range IS NULL THEN + RAISE EXCEPTION 'could not find specified partition'; + END IF; + + /* Check if value fit into the range */ + IF p_range[1] > split_value OR p_range[2] <= split_value + THEN + RAISE EXCEPTION 'specified value does not fit into the range [%, %)', + p_range[1], p_range[2]; + END IF; + + /* Create new partition */ + v_new_partition := @extschema@.create_single_range_partition(v_parent, + split_value, + p_range[2], + partition_name); + + /* Copy data */ + v_cond := @extschema@.build_range_condition(v_attname, split_value, p_range[2]); + EXECUTE format('WITH part_data AS (DELETE FROM %s WHERE %s RETURNING *) + INSERT INTO %s SELECT * FROM part_data', + partition::TEXT, + v_cond, + v_new_partition); + + /* Alter original partition */ + v_cond := @extschema@.build_range_condition(v_attname, p_range[1], split_value); + v_check_name := @extschema@.build_check_constraint_name(partition, v_attname); + + EXECUTE format('ALTER TABLE %s DROP CONSTRAINT %s', + partition::TEXT, + v_check_name); + + EXECUTE format('ALTER TABLE %s ADD CONSTRAINT %s CHECK (%s)', + partition::TEXT, + v_check_name, + v_cond); + + /* Tell backend to reload configuration */ + PERFORM @extschema@.on_update_partitions(v_parent); +END +$$ +LANGUAGE plpgsql; + +/* + * Merge RANGE partitions + */ +CREATE OR REPLACE FUNCTION @extschema@.merge_range_partitions( + partition1 REGCLASS, + partition2 REGCLASS) +RETURNS VOID AS +$$ +DECLARE + v_parent1 REGCLASS; + v_parent2 REGCLASS; + v_attname TEXT; + v_part_type INTEGER; + v_atttype REGTYPE; + +BEGIN + IF partition1 = partition2 THEN + RAISE EXCEPTION 'cannot merge partition with itself'; + END IF; + + v_parent1 := @extschema@.get_parent_of_partition(partition1); + v_parent2 := @extschema@.get_parent_of_partition(partition2); + + /* Acquire data modification locks (prevent further modifications) */ + PERFORM @extschema@.prevent_relation_modification(partition1); + PERFORM @extschema@.prevent_relation_modification(partition2); + + IF v_parent1 != v_parent2 THEN + RAISE EXCEPTION 'cannot merge partitions with different parents'; + END IF; + + /* Acquire lock on parent */ + PERFORM @extschema@.lock_partitioned_relation(v_parent1); + + SELECT attname, parttype + FROM @extschema@.pathman_config + WHERE partrel = v_parent1 + INTO v_attname, v_part_type; + + IF v_attname IS NULL THEN + RAISE EXCEPTION 'table "%" is not partitioned', v_parent1::TEXT; + END IF; + + /* Check if this is a RANGE partition */ + IF v_part_type != 2 THEN + RAISE EXCEPTION 'specified partitions aren''t RANGE partitions'; + END IF; + + v_atttype := @extschema@.get_attribute_type(partition1, v_attname); + + EXECUTE format('SELECT @extschema@.merge_range_partitions_internal($1, $2, $3, NULL::%s)', + @extschema@.get_base_type(v_atttype)::TEXT) + USING v_parent1, partition1, partition2; + + /* Tell backend to reload configuration */ + PERFORM @extschema@.on_update_partitions(v_parent1); +END +$$ +LANGUAGE plpgsql; + +/* + * Merge two partitions. All data will be copied to the first one. Second + * partition will be destroyed. + * + * NOTE: dummy field is used to pass the element type to the function + * (it is necessary because of pseudo-types used in function). + */ +CREATE OR REPLACE FUNCTION @extschema@.merge_range_partitions_internal( + parent_relid REGCLASS, + partition1 REGCLASS, + partition2 REGCLASS, + dummy ANYELEMENT, + OUT p_range ANYARRAY) +RETURNS ANYARRAY AS +$$ +DECLARE + v_attname TEXT; + v_atttype REGTYPE; + v_check_name TEXT; + +BEGIN + SELECT attname FROM @extschema@.pathman_config + WHERE partrel = parent_relid + INTO v_attname; + + IF v_attname IS NULL THEN + RAISE EXCEPTION 'table "%" is not partitioned', parent_relid::TEXT; + END IF; + + v_atttype = @extschema@.get_attribute_type(parent_relid, v_attname); + + /* We have to pass fake NULL casted to column's type */ + EXECUTE format('SELECT @extschema@.get_part_range($1, NULL::%1$s) || + @extschema@.get_part_range($2, NULL::%1$s)', + @extschema@.get_base_type(v_atttype)::TEXT) + USING partition1, partition2 + INTO p_range; + + /* Check if ranges are adjacent */ + IF p_range[1] != p_range[4] AND p_range[2] != p_range[3] THEN + RAISE EXCEPTION 'merge failed, partitions must be adjacent'; + END IF; + + /* Drop constraint on first partition... */ + v_check_name := @extschema@.build_check_constraint_name(partition1, v_attname); + EXECUTE format('ALTER TABLE %s DROP CONSTRAINT %s', + partition1::TEXT, + v_check_name); + + /* and create a new one */ + EXECUTE format('ALTER TABLE %s ADD CONSTRAINT %s CHECK (%s)', + partition1::TEXT, + v_check_name, + @extschema@.build_range_condition(v_attname, + least(p_range[1], p_range[3]), + greatest(p_range[2], p_range[4]))); + + /* Copy data from second partition to the first one */ + EXECUTE format('WITH part_data AS (DELETE FROM %s RETURNING *) + INSERT INTO %s SELECT * FROM part_data', + partition2::TEXT, + partition1::TEXT); + + /* Remove second partition */ + EXECUTE format('DROP TABLE %s', partition2::TEXT); +END +$$ LANGUAGE plpgsql; + +/* + * Append new partition. + */ +CREATE OR REPLACE FUNCTION @extschema@.append_range_partition( + parent_relid REGCLASS, + partition_name TEXT DEFAULT NULL, + tablespace TEXT DEFAULT NULL) +RETURNS TEXT AS +$$ +DECLARE + v_attname TEXT; + v_atttype REGTYPE; + v_part_name TEXT; + v_interval TEXT; + +BEGIN + PERFORM @extschema@.validate_relname(parent_relid); + + /* Acquire lock on parent */ + PERFORM @extschema@.lock_partitioned_relation(parent_relid); + + SELECT attname, range_interval + FROM @extschema@.pathman_config + WHERE partrel = parent_relid + INTO v_attname, v_interval; + + IF v_attname IS NULL THEN + RAISE EXCEPTION 'table "%" is not partitioned', parent_relid::TEXT; + END IF; + + v_atttype := @extschema@.get_attribute_type(parent_relid, v_attname); + + EXECUTE + format('SELECT @extschema@.append_partition_internal($1, $2, $3, ARRAY[]::%s[], $4, $5)', + @extschema@.get_base_type(v_atttype)::TEXT) + USING + parent_relid, + v_atttype, + v_interval, + partition_name, + tablespace + INTO + v_part_name; + + /* Invalidate cache */ + PERFORM @extschema@.on_update_partitions(parent_relid); + RETURN v_part_name; +END +$$ +LANGUAGE plpgsql; + +/* + * Spawn logic for append_partition(). We have to + * separate this in order to pass the 'p_range'. + * + * NOTE: we don't take a xact_handling lock here. + */ +CREATE OR REPLACE FUNCTION @extschema@.append_partition_internal( + parent_relid REGCLASS, + p_atttype REGTYPE, + p_interval TEXT, + p_range ANYARRAY DEFAULT NULL, + partition_name TEXT DEFAULT NULL, + tablespace TEXT DEFAULT NULL) +RETURNS TEXT AS +$$ +DECLARE + v_part_name TEXT; + v_atttype REGTYPE; + +BEGIN + IF @extschema@.partitions_count(parent_relid) = 0 THEN + RAISE EXCEPTION 'cannot append to empty partitions set'; + END IF; + + v_atttype := @extschema@.get_base_type(p_atttype); + + /* We have to pass fake NULL casted to column's type */ + EXECUTE format('SELECT @extschema@.get_part_range($1, -1, NULL::%s)', + v_atttype::TEXT) + USING parent_relid + INTO p_range; + + IF @extschema@.is_date_type(p_atttype) THEN + v_part_name := @extschema@.create_single_range_partition( + parent_relid, + p_range[2], + p_range[2] + p_interval::interval, + partition_name, + tablespace); + ELSE + EXECUTE + format('SELECT @extschema@.create_single_range_partition($1, $2, $2 + $3::%s, $4, $5)', + v_atttype::TEXT) + USING + parent_relid, + p_range[2], + p_interval, + partition_name, + tablespace + INTO + v_part_name; + END IF; + + RETURN v_part_name; +END +$$ +LANGUAGE plpgsql; + +/* + * Prepend new partition. + */ +CREATE OR REPLACE FUNCTION @extschema@.prepend_range_partition( + parent_relid REGCLASS, + partition_name TEXT DEFAULT NULL, + tablespace TEXT DEFAULT NULL) +RETURNS TEXT AS +$$ +DECLARE + v_attname TEXT; + v_atttype REGTYPE; + v_part_name TEXT; + v_interval TEXT; + +BEGIN + PERFORM @extschema@.validate_relname(parent_relid); + + /* Acquire lock on parent */ + PERFORM @extschema@.lock_partitioned_relation(parent_relid); + + SELECT attname, range_interval + FROM @extschema@.pathman_config + WHERE partrel = parent_relid + INTO v_attname, v_interval; + + IF v_attname IS NULL THEN + RAISE EXCEPTION 'table "%" is not partitioned', parent_relid::TEXT; + END IF; + + v_atttype := @extschema@.get_attribute_type(parent_relid, v_attname); + + EXECUTE + format('SELECT @extschema@.prepend_partition_internal($1, $2, $3, ARRAY[]::%s[], $4, $5)', + @extschema@.get_base_type(v_atttype)::TEXT) + USING + parent_relid, + v_atttype, + v_interval, + partition_name, + tablespace + INTO + v_part_name; + + /* Invalidate cache */ + PERFORM @extschema@.on_update_partitions(parent_relid); + RETURN v_part_name; +END +$$ +LANGUAGE plpgsql; + +/* + * Spawn logic for prepend_partition(). We have to + * separate this in order to pass the 'p_range'. + * + * NOTE: we don't take a xact_handling lock here. + */ +CREATE OR REPLACE FUNCTION @extschema@.prepend_partition_internal( + parent_relid REGCLASS, + p_atttype REGTYPE, + p_interval TEXT, + p_range ANYARRAY DEFAULT NULL, + partition_name TEXT DEFAULT NULL, + tablespace TEXT DEFAULT NULL) +RETURNS TEXT AS +$$ +DECLARE + v_part_name TEXT; + v_atttype REGTYPE; + +BEGIN + IF @extschema@.partitions_count(parent_relid) = 0 THEN + RAISE EXCEPTION 'cannot prepend to empty partitions set'; + END IF; + + v_atttype := @extschema@.get_base_type(p_atttype); + + /* We have to pass fake NULL casted to column's type */ + EXECUTE format('SELECT @extschema@.get_part_range($1, 0, NULL::%s)', + v_atttype::TEXT) + USING parent_relid + INTO p_range; + + IF @extschema@.is_date_type(p_atttype) THEN + v_part_name := @extschema@.create_single_range_partition( + parent_relid, + p_range[1] - p_interval::interval, + p_range[1], + partition_name, + tablespace); + ELSE + EXECUTE + format('SELECT @extschema@.create_single_range_partition($1, $2 - $3::%s, $2, $4, $5)', + v_atttype::TEXT) + USING + parent_relid, + p_range[1], + p_interval, + partition_name, + tablespace + INTO + v_part_name; + END IF; + + RETURN v_part_name; +END +$$ +LANGUAGE plpgsql; + +/* + * Add new partition + */ +CREATE OR REPLACE FUNCTION @extschema@.add_range_partition( + parent_relid REGCLASS, + start_value ANYELEMENT, + end_value ANYELEMENT, + partition_name TEXT DEFAULT NULL, + tablespace TEXT DEFAULT NULL) +RETURNS TEXT AS +$$ +DECLARE + v_part_name TEXT; + +BEGIN + PERFORM @extschema@.validate_relname(parent_relid); + + /* Acquire lock on parent */ + PERFORM @extschema@.lock_partitioned_relation(parent_relid); + + IF start_value >= end_value THEN + RAISE EXCEPTION 'failed to create partition: start_value is greater than end_value'; + END IF; + + /* check range overlap */ + IF @extschema@.partitions_count(parent_relid) > 0 + AND @extschema@.check_overlap(parent_relid, start_value, end_value) THEN + RAISE EXCEPTION 'specified range overlaps with existing partitions'; + END IF; + + /* Create new partition */ + v_part_name := @extschema@.create_single_range_partition(parent_relid, + start_value, + end_value, + partition_name, + tablespace); + PERFORM @extschema@.on_update_partitions(parent_relid); + + RETURN v_part_name; +END +$$ +LANGUAGE plpgsql; + +/* + * Drop range partition + */ +CREATE OR REPLACE FUNCTION @extschema@.drop_range_partition( + partition REGCLASS, + delete_data BOOLEAN DEFAULT TRUE) +RETURNS TEXT AS +$$ +DECLARE + parent_relid REGCLASS; + part_name TEXT; + v_relkind CHAR; + v_rows BIGINT; + v_part_type INTEGER; + +BEGIN + parent_relid := @extschema@.get_parent_of_partition(partition); + part_name := partition::TEXT; /* save the name to be returned */ + + SELECT parttype + FROM @extschema@.pathman_config + WHERE partrel = parent_relid + INTO v_part_type; + + /* Check if this is a RANGE partition */ + IF v_part_type != 2 THEN + RAISE EXCEPTION '"%" is not a RANGE partition', partition::TEXT; + END IF; + + /* Acquire lock on parent */ + PERFORM @extschema@.lock_partitioned_relation(parent_relid); + + IF NOT delete_data THEN + EXECUTE format('INSERT INTO %s SELECT * FROM %s', + parent_relid::TEXT, + partition::TEXT); + GET DIAGNOSTICS v_rows = ROW_COUNT; + + /* Show number of copied rows */ + RAISE NOTICE '% rows copied from %', v_rows, partition::TEXT; + END IF; + + SELECT relkind FROM pg_catalog.pg_class + WHERE oid = partition + INTO v_relkind; + + /* + * Determine the kind of child relation. It can be either regular + * table (r) or foreign table (f). Depending on relkind we use + * DROP TABLE or DROP FOREIGN TABLE. + */ + IF v_relkind = 'f' THEN + EXECUTE format('DROP FOREIGN TABLE %s', partition::TEXT); + ELSE + EXECUTE format('DROP TABLE %s', partition::TEXT); + END IF; + + /* Invalidate cache */ + PERFORM @extschema@.on_update_partitions(parent_relid); + + RETURN part_name; +END +$$ +LANGUAGE plpgsql +SET pg_pathman.enable_partitionfilter = off; /* ensures that PartitionFilter is OFF */ + +/* + * Attach range partition + */ +CREATE OR REPLACE FUNCTION @extschema@.attach_range_partition( + parent_relid REGCLASS, + partition REGCLASS, + start_value ANYELEMENT, + end_value ANYELEMENT) +RETURNS TEXT AS +$$ +DECLARE + v_attname TEXT; + rel_persistence CHAR; + v_init_callback REGPROCEDURE; + +BEGIN + PERFORM @extschema@.validate_relname(parent_relid); + PERFORM @extschema@.validate_relname(partition); + + /* Acquire lock on parent */ + PERFORM @extschema@.lock_partitioned_relation(parent_relid); + + /* Ignore temporary tables */ + SELECT relpersistence FROM pg_catalog.pg_class + WHERE oid = partition INTO rel_persistence; + + IF rel_persistence = 't'::CHAR THEN + RAISE EXCEPTION 'temporary table "%" cannot be used as a partition', + partition::TEXT; + END IF; + + IF @extschema@.check_overlap(parent_relid, start_value, end_value) THEN + RAISE EXCEPTION 'specified range overlaps with existing partitions'; + END IF; + + IF NOT @extschema@.validate_relations_equality(parent_relid, partition) THEN + RAISE EXCEPTION 'partition must have the exact same structure as parent'; + END IF; + + /* Set inheritance */ + EXECUTE format('ALTER TABLE %s INHERIT %s', partition, parent_relid); + + v_attname := attname FROM @extschema@.pathman_config WHERE partrel = parent_relid; + + IF v_attname IS NULL THEN + RAISE EXCEPTION 'table "%" is not partitioned', parent_relid::TEXT; + END IF; + + /* Set check constraint */ + EXECUTE format('ALTER TABLE %s ADD CONSTRAINT %s CHECK (%s)', + partition::TEXT, + @extschema@.build_check_constraint_name(partition, v_attname), + @extschema@.build_range_condition(v_attname, + start_value, + end_value)); + + /* Fetch init_callback from 'params' table */ + WITH stub_callback(stub) as (values (0)) + SELECT coalesce(init_callback, 0::REGPROCEDURE) + FROM stub_callback + LEFT JOIN @extschema@.pathman_config_params AS params + ON params.partrel = parent_relid + INTO v_init_callback; + + PERFORM @extschema@.invoke_on_partition_created_callback(parent_relid, + partition, + v_init_callback, + start_value, + end_value); + + /* Invalidate cache */ + PERFORM @extschema@.on_update_partitions(parent_relid); + + RETURN partition; +END +$$ +LANGUAGE plpgsql; + +/* + * Detach range partition + */ +CREATE OR REPLACE FUNCTION @extschema@.detach_range_partition( + partition REGCLASS) +RETURNS TEXT AS +$$ +DECLARE + v_attname TEXT; + parent_relid REGCLASS; + +BEGIN + parent_relid := @extschema@.get_parent_of_partition(partition); + + /* Acquire lock on parent */ + PERFORM @extschema@.lock_partitioned_relation(parent_relid); + + v_attname := attname + FROM @extschema@.pathman_config + WHERE partrel = parent_relid; + + IF v_attname IS NULL THEN + RAISE EXCEPTION 'table "%" is not partitioned', parent_relid::TEXT; + END IF; + + /* Remove inheritance */ + EXECUTE format('ALTER TABLE %s NO INHERIT %s', + partition::TEXT, + parent_relid::TEXT); + + /* Remove check constraint */ + EXECUTE format('ALTER TABLE %s DROP CONSTRAINT %s', + partition::TEXT, + @extschema@.build_check_constraint_name(partition, v_attname)); + + /* Invalidate cache */ + PERFORM @extschema@.on_update_partitions(parent_relid); + + RETURN partition; +END +$$ +LANGUAGE plpgsql; + +/* + * Creates an update trigger + */ +CREATE OR REPLACE FUNCTION @extschema@.create_range_update_trigger( + IN parent_relid REGCLASS) +RETURNS TEXT AS +$$ +DECLARE + func TEXT := 'CREATE OR REPLACE FUNCTION %1$s() + RETURNS TRIGGER AS + $body$ + DECLARE + old_oid Oid; + new_oid Oid; + + BEGIN + old_oid := TG_RELID; + new_oid := @extschema@.find_or_create_range_partition( + ''%2$s''::regclass, NEW.%3$s); + + IF old_oid = new_oid THEN + RETURN NEW; + END IF; + + EXECUTE format(''DELETE FROM %%s WHERE %5$s'', + old_oid::regclass::text) + USING %6$s; + + EXECUTE format(''INSERT INTO %%s VALUES (%7$s)'', + new_oid::regclass::text) + USING %8$s; + + RETURN NULL; + END $body$ + LANGUAGE plpgsql'; + + trigger TEXT := 'CREATE TRIGGER %s ' || + 'BEFORE UPDATE ON %s ' || + 'FOR EACH ROW EXECUTE PROCEDURE %s()'; + + triggername TEXT; + funcname TEXT; + att_names TEXT; + old_fields TEXT; + new_fields TEXT; + att_val_fmt TEXT; + att_fmt TEXT; + attr TEXT; + rec RECORD; + +BEGIN + attr := attname FROM @extschema@.pathman_config WHERE partrel = parent_relid; + + IF attr IS NULL THEN + RAISE EXCEPTION 'table "%" is not partitioned', parent_relid::TEXT; + END IF; + + SELECT string_agg(attname, ', '), + string_agg('OLD.' || attname, ', '), + string_agg('NEW.' || attname, ', '), + string_agg('CASE WHEN NOT $' || attnum || ' IS NULL THEN ' || + attname || ' = $' || attnum || ' ' || + 'ELSE ' || + attname || ' IS NULL END', + ' AND '), + string_agg('$' || attnum, ', ') + FROM pg_attribute + WHERE attrelid::REGCLASS = parent_relid AND attnum > 0 + INTO att_names, + old_fields, + new_fields, + att_val_fmt, + att_fmt; + + /* Build trigger & trigger function's names */ + funcname := @extschema@.build_update_trigger_func_name(parent_relid); + triggername := @extschema@.build_update_trigger_name(parent_relid); + + /* Create function for trigger */ + EXECUTE format(func, funcname, parent_relid, attr, 0, att_val_fmt, + old_fields, att_fmt, new_fields); + + /* Create trigger on every partition */ + FOR rec in (SELECT * FROM pg_catalog.pg_inherits + WHERE inhparent = parent_relid) + LOOP + EXECUTE format(trigger, + triggername, + rec.inhrelid::REGCLASS::TEXT, + funcname); + END LOOP; + + RETURN funcname; +END +$$ LANGUAGE plpgsql; + +/* + * Construct CHECK constraint condition for a range partition. + */ +CREATE OR REPLACE FUNCTION @extschema@.build_range_condition( + p_attname TEXT, + start_value ANYELEMENT, + end_value ANYELEMENT) +RETURNS TEXT AS 'pg_pathman', 'build_range_condition' +LANGUAGE C; + +/* + * Returns N-th range (as an array of two elements). + */ +CREATE OR REPLACE FUNCTION @extschema@.get_part_range( + parent_relid REGCLASS, + partition_idx INTEGER, + dummy ANYELEMENT) +RETURNS ANYARRAY AS 'pg_pathman', 'get_part_range_by_idx' +LANGUAGE C; + +/* + * Returns min and max values for specified RANGE partition. + */ +CREATE OR REPLACE FUNCTION @extschema@.get_part_range( + partition_relid REGCLASS, + dummy ANYELEMENT) +RETURNS ANYARRAY AS 'pg_pathman', 'get_part_range_by_oid' +LANGUAGE C; + +/* + * Returns min and max values for specified RANGE partition. + */ +CREATE OR REPLACE FUNCTION @extschema@.get_part_range( + partition_relid REGCLASS, + dummy ANYELEMENT) +RETURNS ANYARRAY AS 'pg_pathman', 'get_part_range_by_oid' +LANGUAGE C; + diff --git a/pg_pathman--1.1--1.2.sql b/pg_pathman--1.1--1.2.sql new file mode 100644 index 00000000..4af311b3 --- /dev/null +++ b/pg_pathman--1.1--1.2.sql @@ -0,0 +1,1305 @@ +/* ------------------------------------------------------------------------ + * + * pg_pathman--1.1--1.2.sql + * Migration scripts to version 1.2 + * + * Copyright (c) 2015-2016, Postgres Professional + * + * ------------------------------------------------------------------------ + */ + + +/* ------------------------------------------------------------------------ + * Drop irrelevant objects + * ----------------------------------------------------------------------*/ +DROP INDEX i_pathman_config_params; +DROP FUNCTION @extschema@.partitions_count(REGCLASS); +DROP FUNCTION @extschema@.set_init_callback(REGCLASS, REGPROC); +DROP FUNCTION @extschema@.validate_relname(REGCLASS); +DROP FUNCTION @extschema@.get_schema_qualified_name(REGCLASS, TEXT, TEXT); +DROP FUNCTION @extschema@.get_rel_tablespace_name(REGCLASS); +DROP FUNCTION @extschema@.validate_on_partition_created_callback(REGPROC); +DROP FUNCTION @extschema@.get_sequence_name(TEXT, TEXT); +DROP FUNCTION @extschema@.create_or_replace_sequence(TEXT, TEXT); +DROP FUNCTION @extschema@.create_single_range_partition(REGCLASS, ANYELEMENT, ANYELEMENT, TEXT, TEXT); +DROP FUNCTION @extschema@.check_overlap(REGCLASS, ANYELEMENT, ANYELEMENT); +DROP FUNCTION @extschema@.split_range_partition(REGCLASS, ANYELEMENT, TEXT, OUT ANYARRAY); +DROP FUNCTION @extschema@.invalidate_relcache(OID); + +/* drop trigger and its function (PATHMAN_CONFIG_PARAMS) */ +DROP TRIGGER pathman_config_params_trigger ON @extschema@.pathman_config_params; +DROP FUNCTION @extschema@.pathman_config_params_trigger_func(); + + +/* ------------------------------------------------------------------------ + * Alter functions' modifiers + * ----------------------------------------------------------------------*/ +ALTER FUNCTION @extschema@.pathman_set_param(REGCLASS, TEXT, ANYELEMENT) STRICT; +ALTER FUNCTION @extschema@.build_range_condition(TEXT, ANYELEMENT, ANYELEMENT) STRICT; + + +/* ------------------------------------------------------------------------ + * (Re)create functions + * ----------------------------------------------------------------------*/ +CREATE OR REPLACE FUNCTION @extschema@.validate_part_callback( + callback REGPROC, + raise_error BOOL DEFAULT TRUE) +RETURNS BOOL AS 'pg_pathman', 'validate_part_callback_pl' +LANGUAGE C STRICT; + + +CREATE OR REPLACE FUNCTION @extschema@.set_init_callback( + relation REGCLASS, + callback REGPROC DEFAULT 0) +RETURNS VOID AS +$$ +BEGIN + PERFORM @extschema@.pathman_set_param(relation, 'init_callback', callback); +END +$$ +LANGUAGE plpgsql STRICT; + + +CREATE OR REPLACE FUNCTION @extschema@.set_spawn_using_bgw( + relation REGCLASS, + value BOOLEAN) +RETURNS VOID AS +$$ +BEGIN + PERFORM @extschema@.pathman_set_param(relation, 'spawn_using_bgw', value); +END +$$ +LANGUAGE plpgsql STRICT; + + +CREATE OR REPLACE FUNCTION @extschema@.disable_pathman_for( + parent_relid REGCLASS) +RETURNS VOID AS +$$ +BEGIN + PERFORM @extschema@.validate_relname(parent_relid); + + /* Delete rows from both config tables */ + DELETE FROM @extschema@.pathman_config WHERE partrel = parent_relid; + DELETE FROM @extschema@.pathman_config_params WHERE partrel = parent_relid; + + /* Drop triggers on update */ + PERFORM @extschema@.drop_triggers(parent_relid); + + /* Notify backend about changes */ + PERFORM @extschema@.on_remove_partitions(parent_relid); +END +$$ +LANGUAGE plpgsql STRICT; + + +CREATE OR REPLACE FUNCTION @extschema@.common_relation_checks( + relation REGCLASS, + p_attribute TEXT) +RETURNS BOOLEAN AS +$$ +DECLARE + v_rec RECORD; + is_referenced BOOLEAN; + rel_persistence CHAR; + +BEGIN + /* Ignore temporary tables */ + SELECT relpersistence FROM pg_catalog.pg_class + WHERE oid = relation INTO rel_persistence; + + IF rel_persistence = 't'::CHAR THEN + RAISE EXCEPTION 'temporary table "%" cannot be partitioned', + relation::TEXT; + END IF; + + IF EXISTS (SELECT * FROM @extschema@.pathman_config + WHERE partrel = relation) THEN + RAISE EXCEPTION 'relation "%" has already been partitioned', relation; + END IF; + + IF @extschema@.is_attribute_nullable(relation, p_attribute) THEN + RAISE EXCEPTION 'partitioning key "%" must be NOT NULL', p_attribute; + END IF; + + /* Check if there are foreign keys that reference the relation */ + FOR v_rec IN (SELECT * FROM pg_catalog.pg_constraint + WHERE confrelid = relation::REGCLASS::OID) + LOOP + is_referenced := TRUE; + RAISE WARNING 'foreign key "%" references relation "%"', + v_rec.conname, relation; + END LOOP; + + IF is_referenced THEN + RAISE EXCEPTION 'relation "%" is referenced from other relations', relation; + END IF; + + RETURN TRUE; +END +$$ +LANGUAGE plpgsql; + + +CREATE OR REPLACE FUNCTION @extschema@.pathman_ddl_trigger_func() +RETURNS event_trigger AS +$$ +DECLARE + obj record; + pg_class_oid oid; + relids regclass[]; +BEGIN + pg_class_oid = 'pg_catalog.pg_class'::regclass; + + /* Find relids to remove from config */ + SELECT array_agg(cfg.partrel) INTO relids + FROM pg_event_trigger_dropped_objects() AS events + JOIN @extschema@.pathman_config AS cfg ON cfg.partrel::oid = events.objid + WHERE events.classid = pg_class_oid AND events.objsubid = 0; + + /* Cleanup pathman_config */ + DELETE FROM @extschema@.pathman_config WHERE partrel = ANY(relids); + + /* Cleanup params table too */ + DELETE FROM @extschema@.pathman_config_params WHERE partrel = ANY(relids); +END +$$ +LANGUAGE plpgsql; + + +CREATE OR REPLACE FUNCTION @extschema@.get_number_of_partitions( + parent_relid REGCLASS) +RETURNS INT4 AS 'pg_pathman', 'get_number_of_partitions_pl' +LANGUAGE C STRICT; + + +CREATE OR REPLACE FUNCTION @extschema@.get_tablespace( + relid REGCLASS) +RETURNS TEXT AS 'pg_pathman', 'get_tablespace_pl' +LANGUAGE C STRICT; + + +CREATE OR REPLACE FUNCTION @extschema@.validate_relname( + relid REGCLASS) +RETURNS VOID AS 'pg_pathman', 'validate_relname' +LANGUAGE C; + + +CREATE OR REPLACE FUNCTION @extschema@.create_hash_partitions( + parent_relid REGCLASS, + attribute TEXT, + partitions_count INTEGER, + partition_data BOOLEAN DEFAULT TRUE) +RETURNS INTEGER AS +$$ +BEGIN + PERFORM @extschema@.validate_relname(parent_relid); + + IF partition_data = true THEN + /* Acquire data modification lock */ + PERFORM @extschema@.prevent_relation_modification(parent_relid); + ELSE + /* Acquire lock on parent */ + PERFORM @extschema@.lock_partitioned_relation(parent_relid); + END IF; + + attribute := lower(attribute); + PERFORM @extschema@.common_relation_checks(parent_relid, attribute); + + /* Insert new entry to pathman config */ + INSERT INTO @extschema@.pathman_config (partrel, attname, parttype) + VALUES (parent_relid, attribute, 1); + + /* Create partitions */ + PERFORM @extschema@.create_hash_partitions_internal(parent_relid, + attribute, + partitions_count); + + /* Notify backend about changes */ + PERFORM @extschema@.on_create_partitions(parent_relid); + + /* Copy data */ + IF partition_data = true THEN + PERFORM @extschema@.set_enable_parent(parent_relid, false); + PERFORM @extschema@.partition_data(parent_relid); + ELSE + PERFORM @extschema@.set_enable_parent(parent_relid, true); + END IF; + + RETURN partitions_count; +END +$$ LANGUAGE plpgsql +SET client_min_messages = WARNING; + + +CREATE OR REPLACE FUNCTION @extschema@.replace_hash_partition( + old_partition REGCLASS, + new_partition REGCLASS, + lock_parent BOOL DEFAULT TRUE) +RETURNS REGCLASS AS +$$ +DECLARE + parent_relid REGCLASS; + part_attname TEXT; /* partitioned column */ + old_constr_name TEXT; /* name of old_partition's constraint */ + old_constr_def TEXT; /* definition of old_partition's constraint */ + rel_persistence CHAR; + p_init_callback REGPROCEDURE; + +BEGIN + PERFORM @extschema@.validate_relname(old_partition); + PERFORM @extschema@.validate_relname(new_partition); + + /* Parent relation */ + parent_relid := @extschema@.get_parent_of_partition(old_partition); + + IF lock_parent THEN + /* Acquire data modification lock (prevent further modifications) */ + PERFORM @extschema@.prevent_relation_modification(parent_relid); + ELSE + /* Acquire lock on parent */ + PERFORM @extschema@.lock_partitioned_relation(parent_relid); + END IF; + + /* Acquire data modification lock (prevent further modifications) */ + PERFORM @extschema@.prevent_relation_modification(old_partition); + PERFORM @extschema@.prevent_relation_modification(new_partition); + + /* Ignore temporary tables */ + SELECT relpersistence FROM pg_catalog.pg_class + WHERE oid = new_partition INTO rel_persistence; + + IF rel_persistence = 't'::CHAR THEN + RAISE EXCEPTION 'temporary table "%" cannot be used as a partition', + new_partition::TEXT; + END IF; + + /* Check that new partition has an equal structure as parent does */ + IF NOT @extschema@.validate_relations_equality(parent_relid, new_partition) THEN + RAISE EXCEPTION 'partition must have the exact same structure as parent'; + END IF; + + /* Get partitioning key */ + part_attname := attname FROM @extschema@.pathman_config WHERE partrel = parent_relid; + IF part_attname IS NULL THEN + RAISE EXCEPTION 'table "%" is not partitioned', parent_relid::TEXT; + END IF; + + /* Fetch name of old_partition's HASH constraint */ + old_constr_name = @extschema@.build_check_constraint_name(old_partition::REGCLASS, + part_attname); + + /* Fetch definition of old_partition's HASH constraint */ + SELECT pg_catalog.pg_get_constraintdef(oid) FROM pg_catalog.pg_constraint + WHERE conrelid = old_partition AND conname = old_constr_name + INTO old_constr_def; + + /* Detach old partition */ + EXECUTE format('ALTER TABLE %s NO INHERIT %s', old_partition, parent_relid); + EXECUTE format('ALTER TABLE %s DROP CONSTRAINT %s', + old_partition, + old_constr_name); + + /* Attach the new one */ + EXECUTE format('ALTER TABLE %s INHERIT %s', new_partition, parent_relid); + EXECUTE format('ALTER TABLE %s ADD CONSTRAINT %s %s', + new_partition, + @extschema@.build_check_constraint_name(new_partition::REGCLASS, + part_attname), + old_constr_def); + + /* Fetch init_callback from 'params' table */ + WITH stub_callback(stub) as (values (0)) + SELECT coalesce(init_callback, 0::REGPROCEDURE) + FROM stub_callback + LEFT JOIN @extschema@.pathman_config_params AS params + ON params.partrel = parent_relid + INTO p_init_callback; + + /* Finally invoke init_callback */ + PERFORM @extschema@.invoke_on_partition_created_callback(parent_relid, + new_partition, + p_init_callback); + + /* Invalidate cache */ + PERFORM @extschema@.on_update_partitions(parent_relid); + + RETURN new_partition; +END +$$ +LANGUAGE plpgsql; + + +CREATE OR REPLACE FUNCTION @extschema@.create_hash_update_trigger( + parent_relid REGCLASS) +RETURNS TEXT AS +$$ +DECLARE + func TEXT := 'CREATE OR REPLACE FUNCTION %1$s() + RETURNS TRIGGER AS + $body$ + DECLARE + old_idx INTEGER; /* partition indices */ + new_idx INTEGER; + + BEGIN + old_idx := @extschema@.get_hash_part_idx(%9$s(OLD.%2$s), %3$s); + new_idx := @extschema@.get_hash_part_idx(%9$s(NEW.%2$s), %3$s); + + IF old_idx = new_idx THEN + RETURN NEW; + END IF; + + EXECUTE format(''DELETE FROM %8$s WHERE %4$s'', old_idx) + USING %5$s; + + EXECUTE format(''INSERT INTO %8$s VALUES (%6$s)'', new_idx) + USING %7$s; + + RETURN NULL; + END $body$ + LANGUAGE plpgsql'; + + trigger TEXT := 'CREATE TRIGGER %s + BEFORE UPDATE ON %s + FOR EACH ROW EXECUTE PROCEDURE %s()'; + + att_names TEXT; + old_fields TEXT; + new_fields TEXT; + att_val_fmt TEXT; + att_fmt TEXT; + attr TEXT; + plain_schema TEXT; + plain_relname TEXT; + child_relname_format TEXT; + funcname TEXT; + triggername TEXT; + atttype REGTYPE; + partitions_count INTEGER; + +BEGIN + attr := attname FROM @extschema@.pathman_config WHERE partrel = parent_relid; + + IF attr IS NULL THEN + RAISE EXCEPTION 'table "%" is not partitioned', parent_relid::TEXT; + END IF; + + SELECT string_agg(attname, ', '), + string_agg('OLD.' || attname, ', '), + string_agg('NEW.' || attname, ', '), + string_agg('CASE WHEN NOT $' || attnum || ' IS NULL THEN ' || + attname || ' = $' || attnum || ' ' || + 'ELSE ' || + attname || ' IS NULL END', + ' AND '), + string_agg('$' || attnum, ', ') + FROM pg_catalog.pg_attribute + WHERE attrelid = parent_relid AND attnum > 0 + INTO att_names, + old_fields, + new_fields, + att_val_fmt, + att_fmt; + + partitions_count := @extschema@.get_number_of_partitions(parent_relid); + + /* Build trigger & trigger function's names */ + funcname := @extschema@.build_update_trigger_func_name(parent_relid); + triggername := @extschema@.build_update_trigger_name(parent_relid); + + /* Build partition name template */ + SELECT * INTO plain_schema, plain_relname + FROM @extschema@.get_plain_schema_and_relname(parent_relid); + + child_relname_format := quote_ident(plain_schema) || '.' || + quote_ident(plain_relname || '_%s'); + + /* Fetch base hash function for atttype */ + atttype := @extschema@.get_attribute_type(parent_relid, attr); + + /* Format function definition and execute it */ + EXECUTE format(func, funcname, attr, partitions_count, att_val_fmt, + old_fields, att_fmt, new_fields, child_relname_format, + @extschema@.get_type_hash_func(atttype)::TEXT); + + /* Create trigger on each partition */ + FOR num IN 0..partitions_count-1 + LOOP + EXECUTE format(trigger, + triggername, + format(child_relname_format, num), + funcname); + END LOOP; + + return funcname; +END +$$ LANGUAGE plpgsql; + + +CREATE OR REPLACE FUNCTION @extschema@.create_hash_partitions_internal( + parent_relid REGCLASS, + attribute TEXT, + partitions_count INTEGER) +RETURNS VOID AS 'pg_pathman', 'create_hash_partitions_internal' +LANGUAGE C STRICT; + + +CREATE OR REPLACE FUNCTION @extschema@.build_hash_condition( + attribute_type REGTYPE, + attribute TEXT, + partitions_count INT4, + partitions_index INT4) +RETURNS TEXT AS 'pg_pathman', 'build_hash_condition' +LANGUAGE C STRICT; + + +CREATE OR REPLACE FUNCTION @extschema@.create_or_replace_sequence( + parent_relid REGCLASS, + OUT seq_name TEXT) +AS $$ +BEGIN + seq_name := @extschema@.build_sequence_name(parent_relid); + + EXECUTE format('DROP SEQUENCE IF EXISTS %s', seq_name); + EXECUTE format('CREATE SEQUENCE %s START 1', seq_name); +END +$$ +LANGUAGE plpgsql; + + +CREATE OR REPLACE FUNCTION @extschema@.check_boundaries( + parent_relid REGCLASS, + attribute TEXT, + start_value ANYELEMENT, + end_value ANYELEMENT) +RETURNS VOID AS +$$ +DECLARE + v_min start_value%TYPE; + v_max start_value%TYPE; + v_count BIGINT; + +BEGIN + /* Get min and max values */ + EXECUTE format('SELECT count(*), min(%1$s), max(%1$s) + FROM %2$s WHERE NOT %1$s IS NULL', + attribute, parent_relid::TEXT) + INTO v_count, v_min, v_max; + + /* Check if column has NULL values */ + IF v_count > 0 AND (v_min IS NULL OR v_max IS NULL) THEN + RAISE EXCEPTION 'column "%" contains NULL values', attribute; + END IF; + + /* Check lower boundary */ + IF start_value > v_min THEN + RAISE EXCEPTION 'start value is less than min value of "%"', attribute; + END IF; + + /* Check upper boundary */ + IF end_value <= v_max THEN + RAISE EXCEPTION 'not enough partitions to fit all values of "%"', attribute; + END IF; +END +$$ LANGUAGE plpgsql; + + +CREATE OR REPLACE FUNCTION @extschema@.create_range_partitions( + parent_relid REGCLASS, + attribute TEXT, + start_value ANYELEMENT, + p_interval INTERVAL, + p_count INTEGER DEFAULT NULL, + partition_data BOOLEAN DEFAULT TRUE) +RETURNS INTEGER AS +$$ +DECLARE + v_rows_count BIGINT; + v_atttype REGTYPE; + v_max start_value%TYPE; + v_cur_value start_value%TYPE := start_value; + end_value start_value%TYPE; + i INTEGER; + +BEGIN + PERFORM @extschema@.validate_relname(parent_relid); + + IF partition_data = true THEN + /* Acquire data modification lock */ + PERFORM @extschema@.prevent_relation_modification(parent_relid); + ELSE + /* Acquire lock on parent */ + PERFORM @extschema@.lock_partitioned_relation(parent_relid); + END IF; + + attribute := lower(attribute); + PERFORM @extschema@.common_relation_checks(parent_relid, attribute); + + IF p_count < 0 THEN + RAISE EXCEPTION '"p_count" must not be less than 0'; + END IF; + + /* Try to determine partitions count if not set */ + IF p_count IS NULL THEN + EXECUTE format('SELECT count(*), max(%s) FROM %s', attribute, parent_relid) + INTO v_rows_count, v_max; + + IF v_rows_count = 0 THEN + RAISE EXCEPTION 'cannot determine partitions count for empty table'; + END IF; + + p_count := 0; + WHILE v_cur_value <= v_max + LOOP + v_cur_value := v_cur_value + p_interval; + p_count := p_count + 1; + END LOOP; + END IF; + + v_atttype := @extschema@.get_base_type(pg_typeof(start_value)); + + /* + * In case when user doesn't want to automatically create partitions + * and specifies partition count as 0 then do not check boundaries + */ + IF p_count != 0 THEN + /* compute right bound of partitioning through additions */ + end_value := start_value; + FOR i IN 1..p_count + LOOP + end_value := end_value + p_interval; + END LOOP; + + /* Check boundaries */ + EXECUTE format('SELECT @extschema@.check_boundaries(''%s'', ''%s'', ''%s'', ''%s''::%s)', + parent_relid, + attribute, + start_value, + end_value, + v_atttype::TEXT); + END IF; + + /* Insert new entry to pathman config */ + INSERT INTO @extschema@.pathman_config (partrel, attname, parttype, range_interval) + VALUES (parent_relid, attribute, 2, p_interval::TEXT); + + /* Create sequence for child partitions names */ + PERFORM @extschema@.create_or_replace_sequence(parent_relid) + FROM @extschema@.get_plain_schema_and_relname(parent_relid); + + /* Create first partition */ + FOR i IN 1..p_count + LOOP + EXECUTE + format('SELECT @extschema@.create_single_range_partition($1, $2, $3::%s, tablespace:=$4)', + v_atttype::TEXT) + USING + parent_relid, + start_value, + start_value + p_interval, + @extschema@.get_tablespace(parent_relid); + + start_value := start_value + p_interval; + END LOOP; + + /* Notify backend about changes */ + PERFORM @extschema@.on_create_partitions(parent_relid); + + /* Relocate data if asked to */ + IF partition_data = true THEN + PERFORM @extschema@.set_enable_parent(parent_relid, false); + PERFORM @extschema@.partition_data(parent_relid); + ELSE + PERFORM @extschema@.set_enable_parent(parent_relid, true); + END IF; + + RETURN p_count; +END +$$ LANGUAGE plpgsql; + + +CREATE OR REPLACE FUNCTION @extschema@.create_range_partitions( + parent_relid REGCLASS, + attribute TEXT, + start_value ANYELEMENT, + p_interval ANYELEMENT, + p_count INTEGER DEFAULT NULL, + partition_data BOOLEAN DEFAULT TRUE) +RETURNS INTEGER AS +$$ +DECLARE + v_rows_count BIGINT; + v_max start_value%TYPE; + v_cur_value start_value%TYPE := start_value; + end_value start_value%TYPE; + i INTEGER; + +BEGIN + PERFORM @extschema@.validate_relname(parent_relid); + + IF partition_data = true THEN + /* Acquire data modification lock */ + PERFORM @extschema@.prevent_relation_modification(parent_relid); + ELSE + /* Acquire lock on parent */ + PERFORM @extschema@.lock_partitioned_relation(parent_relid); + END IF; + + attribute := lower(attribute); + PERFORM @extschema@.common_relation_checks(parent_relid, attribute); + + IF p_count < 0 THEN + RAISE EXCEPTION 'partitions count must not be less than zero'; + END IF; + + /* Try to determine partitions count if not set */ + IF p_count IS NULL THEN + EXECUTE format('SELECT count(*), max(%s) FROM %s', attribute, parent_relid) + INTO v_rows_count, v_max; + + IF v_rows_count = 0 THEN + RAISE EXCEPTION 'cannot determine partitions count for empty table'; + END IF; + + IF v_max IS NULL THEN + RAISE EXCEPTION 'column "%" has NULL values', attribute; + END IF; + + p_count := 0; + WHILE v_cur_value <= v_max + LOOP + v_cur_value := v_cur_value + p_interval; + p_count := p_count + 1; + END LOOP; + END IF; + + /* + * In case when user doesn't want to automatically create partitions + * and specifies partition count as 0 then do not check boundaries + */ + IF p_count != 0 THEN + /* compute right bound of partitioning through additions */ + end_value := start_value; + FOR i IN 1..p_count + LOOP + end_value := end_value + p_interval; + END LOOP; + + /* check boundaries */ + PERFORM @extschema@.check_boundaries(parent_relid, + attribute, + start_value, + end_value); + END IF; + + /* Insert new entry to pathman config */ + INSERT INTO @extschema@.pathman_config (partrel, attname, parttype, range_interval) + VALUES (parent_relid, attribute, 2, p_interval::TEXT); + + /* Create sequence for child partitions names */ + PERFORM @extschema@.create_or_replace_sequence(parent_relid) + FROM @extschema@.get_plain_schema_and_relname(parent_relid); + + /* create first partition */ + FOR i IN 1..p_count + LOOP + PERFORM @extschema@.create_single_range_partition( + parent_relid, + start_value, + start_value + p_interval, + tablespace := @extschema@.get_tablespace(parent_relid)); + + start_value := start_value + p_interval; + END LOOP; + + /* Notify backend about changes */ + PERFORM @extschema@.on_create_partitions(parent_relid); + + /* Relocate data if asked to */ + IF partition_data = true THEN + PERFORM @extschema@.set_enable_parent(parent_relid, false); + PERFORM @extschema@.partition_data(parent_relid); + ELSE + PERFORM @extschema@.set_enable_parent(parent_relid, true); + END IF; + + RETURN p_count; +END +$$ LANGUAGE plpgsql; + + +CREATE OR REPLACE FUNCTION @extschema@.create_partitions_from_range( + parent_relid REGCLASS, + attribute TEXT, + start_value ANYELEMENT, + end_value ANYELEMENT, + p_interval ANYELEMENT, + partition_data BOOLEAN DEFAULT TRUE) +RETURNS INTEGER AS +$$ +DECLARE + part_count INTEGER := 0; + +BEGIN + PERFORM @extschema@.validate_relname(parent_relid); + + IF partition_data = true THEN + /* Acquire data modification lock */ + PERFORM @extschema@.prevent_relation_modification(parent_relid); + ELSE + /* Acquire lock on parent */ + PERFORM @extschema@.lock_partitioned_relation(parent_relid); + END IF; + + attribute := lower(attribute); + PERFORM @extschema@.common_relation_checks(parent_relid, attribute); + + IF p_interval <= 0 THEN + RAISE EXCEPTION 'interval must be positive'; + END IF; + + /* Check boundaries */ + PERFORM @extschema@.check_boundaries(parent_relid, + attribute, + start_value, + end_value); + + /* Insert new entry to pathman config */ + INSERT INTO @extschema@.pathman_config (partrel, attname, parttype, range_interval) + VALUES (parent_relid, attribute, 2, p_interval::TEXT); + + /* Create sequence for child partitions names */ + PERFORM @extschema@.create_or_replace_sequence(parent_relid) + FROM @extschema@.get_plain_schema_and_relname(parent_relid); + + WHILE start_value <= end_value + LOOP + PERFORM @extschema@.create_single_range_partition( + parent_relid, + start_value, + start_value + p_interval, + tablespace := @extschema@.get_tablespace(parent_relid)); + + start_value := start_value + p_interval; + part_count := part_count + 1; + END LOOP; + + /* Notify backend about changes */ + PERFORM @extschema@.on_create_partitions(parent_relid); + + /* Relocate data if asked to */ + IF partition_data = true THEN + PERFORM @extschema@.set_enable_parent(parent_relid, false); + PERFORM @extschema@.partition_data(parent_relid); + ELSE + PERFORM @extschema@.set_enable_parent(parent_relid, true); + END IF; + + RETURN part_count; /* number of created partitions */ +END +$$ LANGUAGE plpgsql; + + +CREATE OR REPLACE FUNCTION @extschema@.create_partitions_from_range( + parent_relid REGCLASS, + attribute TEXT, + start_value ANYELEMENT, + end_value ANYELEMENT, + p_interval INTERVAL, + partition_data BOOLEAN DEFAULT TRUE) +RETURNS INTEGER AS +$$ +DECLARE + part_count INTEGER := 0; + +BEGIN + PERFORM @extschema@.validate_relname(parent_relid); + + IF partition_data = true THEN + /* Acquire data modification lock */ + PERFORM @extschema@.prevent_relation_modification(parent_relid); + ELSE + /* Acquire lock on parent */ + PERFORM @extschema@.lock_partitioned_relation(parent_relid); + END IF; + + attribute := lower(attribute); + PERFORM @extschema@.common_relation_checks(parent_relid, attribute); + + /* Check boundaries */ + PERFORM @extschema@.check_boundaries(parent_relid, + attribute, + start_value, + end_value); + + /* Insert new entry to pathman config */ + INSERT INTO @extschema@.pathman_config (partrel, attname, parttype, range_interval) + VALUES (parent_relid, attribute, 2, p_interval::TEXT); + + /* Create sequence for child partitions names */ + PERFORM @extschema@.create_or_replace_sequence(parent_relid) + FROM @extschema@.get_plain_schema_and_relname(parent_relid); + + WHILE start_value <= end_value + LOOP + EXECUTE + format('SELECT @extschema@.create_single_range_partition($1, $2, $3::%s, tablespace:=$4);', + @extschema@.get_base_type(pg_typeof(start_value))::TEXT) + USING + parent_relid, + start_value, + start_value + p_interval, + @extschema@.get_tablespace(parent_relid); + + start_value := start_value + p_interval; + part_count := part_count + 1; + END LOOP; + + /* Notify backend about changes */ + PERFORM @extschema@.on_create_partitions(parent_relid); + + /* Relocate data if asked to */ + IF partition_data = true THEN + PERFORM @extschema@.set_enable_parent(parent_relid, false); + PERFORM @extschema@.partition_data(parent_relid); + ELSE + PERFORM @extschema@.set_enable_parent(parent_relid, true); + END IF; + + RETURN part_count; /* number of created partitions */ +END +$$ LANGUAGE plpgsql; + + +CREATE OR REPLACE FUNCTION @extschema@.merge_range_partitions( + partition1 REGCLASS, + partition2 REGCLASS) +RETURNS VOID AS +$$ +DECLARE + v_parent1 REGCLASS; + v_parent2 REGCLASS; + v_attname TEXT; + v_part_type INTEGER; + v_atttype REGTYPE; + +BEGIN + IF partition1 = partition2 THEN + RAISE EXCEPTION 'cannot merge partition with itself'; + END IF; + + v_parent1 := @extschema@.get_parent_of_partition(partition1); + v_parent2 := @extschema@.get_parent_of_partition(partition2); + + /* Acquire data modification locks (prevent further modifications) */ + PERFORM @extschema@.prevent_relation_modification(partition1); + PERFORM @extschema@.prevent_relation_modification(partition2); + + IF v_parent1 != v_parent2 THEN + RAISE EXCEPTION 'cannot merge partitions with different parents'; + END IF; + + /* Acquire lock on parent */ + PERFORM @extschema@.lock_partitioned_relation(v_parent1); + + SELECT attname, parttype + FROM @extschema@.pathman_config + WHERE partrel = v_parent1 + INTO v_attname, v_part_type; + + IF v_attname IS NULL THEN + RAISE EXCEPTION 'table "%" is not partitioned', v_parent1::TEXT; + END IF; + + /* Check if this is a RANGE partition */ + IF v_part_type != 2 THEN + RAISE EXCEPTION 'specified partitions are not RANGE partitions'; + END IF; + + v_atttype := @extschema@.get_attribute_type(partition1, v_attname); + + EXECUTE format('SELECT @extschema@.merge_range_partitions_internal($1, $2, $3, NULL::%s)', + @extschema@.get_base_type(v_atttype)::TEXT) + USING v_parent1, partition1, partition2; + + /* Tell backend to reload configuration */ + PERFORM @extschema@.on_update_partitions(v_parent1); +END +$$ +LANGUAGE plpgsql; + + +CREATE OR REPLACE FUNCTION @extschema@.split_range_partition( + partition REGCLASS, + split_value ANYELEMENT, + partition_name TEXT DEFAULT NULL, + tablespace TEXT DEFAULT NULL, + OUT p_range ANYARRAY) +RETURNS ANYARRAY AS +$$ +DECLARE + v_parent REGCLASS; + v_attname TEXT; + v_atttype REGTYPE; + v_cond TEXT; + v_new_partition TEXT; + v_part_type INTEGER; + v_check_name TEXT; + +BEGIN + v_parent = @extschema@.get_parent_of_partition(partition); + + /* Acquire lock on parent */ + PERFORM @extschema@.lock_partitioned_relation(v_parent); + + /* Acquire data modification lock (prevent further modifications) */ + PERFORM @extschema@.prevent_relation_modification(partition); + + SELECT attname, parttype + FROM @extschema@.pathman_config + WHERE partrel = v_parent + INTO v_attname, v_part_type; + + IF v_attname IS NULL THEN + RAISE EXCEPTION 'table "%" is not partitioned', v_parent::TEXT; + END IF; + + /* Check if this is a RANGE partition */ + IF v_part_type != 2 THEN + RAISE EXCEPTION '"%" is not a RANGE partition', partition::TEXT; + END IF; + + v_atttype = @extschema@.get_attribute_type(v_parent, v_attname); + + /* Get partition values range */ + EXECUTE format('SELECT @extschema@.get_part_range($1, NULL::%s)', + @extschema@.get_base_type(v_atttype)::TEXT) + USING partition + INTO p_range; + + IF p_range IS NULL THEN + RAISE EXCEPTION 'could not find specified partition'; + END IF; + + /* Check if value fit into the range */ + IF p_range[1] > split_value OR p_range[2] <= split_value + THEN + RAISE EXCEPTION 'specified value does not fit into the range [%, %)', + p_range[1], p_range[2]; + END IF; + + /* Create new partition */ + v_new_partition := @extschema@.create_single_range_partition(v_parent, + split_value, + p_range[2], + partition_name, + tablespace); + + /* Copy data */ + v_cond := @extschema@.build_range_condition(v_attname, split_value, p_range[2]); + EXECUTE format('WITH part_data AS (DELETE FROM %s WHERE %s RETURNING *) + INSERT INTO %s SELECT * FROM part_data', + partition::TEXT, + v_cond, + v_new_partition); + + /* Alter original partition */ + v_cond := @extschema@.build_range_condition(v_attname, p_range[1], split_value); + v_check_name := @extschema@.build_check_constraint_name(partition, v_attname); + + EXECUTE format('ALTER TABLE %s DROP CONSTRAINT %s', + partition::TEXT, + v_check_name); + + EXECUTE format('ALTER TABLE %s ADD CONSTRAINT %s CHECK (%s)', + partition::TEXT, + v_check_name, + v_cond); + + /* Tell backend to reload configuration */ + PERFORM @extschema@.on_update_partitions(v_parent); +END +$$ +LANGUAGE plpgsql; + + +CREATE OR REPLACE FUNCTION @extschema@.append_partition_internal( + parent_relid REGCLASS, + p_atttype REGTYPE, + p_interval TEXT, + p_range ANYARRAY DEFAULT NULL, + partition_name TEXT DEFAULT NULL, + tablespace TEXT DEFAULT NULL) +RETURNS TEXT AS +$$ +DECLARE + v_part_name TEXT; + v_atttype REGTYPE; + +BEGIN + IF @extschema@.get_number_of_partitions(parent_relid) = 0 THEN + RAISE EXCEPTION 'cannot append to empty partitions set'; + END IF; + + v_atttype := @extschema@.get_base_type(p_atttype); + + /* We have to pass fake NULL casted to column's type */ + EXECUTE format('SELECT @extschema@.get_part_range($1, -1, NULL::%s)', + v_atttype::TEXT) + USING parent_relid + INTO p_range; + + IF @extschema@.is_date_type(p_atttype) THEN + v_part_name := @extschema@.create_single_range_partition( + parent_relid, + p_range[2], + p_range[2] + p_interval::interval, + partition_name, + tablespace); + ELSE + EXECUTE + format('SELECT @extschema@.create_single_range_partition($1, $2, $2 + $3::%s, $4, $5)', + v_atttype::TEXT) + USING + parent_relid, + p_range[2], + p_interval, + partition_name, + tablespace + INTO + v_part_name; + END IF; + + RETURN v_part_name; +END +$$ +LANGUAGE plpgsql; + + +CREATE OR REPLACE FUNCTION @extschema@.prepend_partition_internal( + parent_relid REGCLASS, + p_atttype REGTYPE, + p_interval TEXT, + p_range ANYARRAY DEFAULT NULL, + partition_name TEXT DEFAULT NULL, + tablespace TEXT DEFAULT NULL) +RETURNS TEXT AS +$$ +DECLARE + v_part_name TEXT; + v_atttype REGTYPE; + +BEGIN + IF @extschema@.get_number_of_partitions(parent_relid) = 0 THEN + RAISE EXCEPTION 'cannot prepend to empty partitions set'; + END IF; + + v_atttype := @extschema@.get_base_type(p_atttype); + + /* We have to pass fake NULL casted to column's type */ + EXECUTE format('SELECT @extschema@.get_part_range($1, 0, NULL::%s)', + v_atttype::TEXT) + USING parent_relid + INTO p_range; + + IF @extschema@.is_date_type(p_atttype) THEN + v_part_name := @extschema@.create_single_range_partition( + parent_relid, + p_range[1] - p_interval::interval, + p_range[1], + partition_name, + tablespace); + ELSE + EXECUTE + format('SELECT @extschema@.create_single_range_partition($1, $2 - $3::%s, $2, $4, $5)', + v_atttype::TEXT) + USING + parent_relid, + p_range[1], + p_interval, + partition_name, + tablespace + INTO + v_part_name; + END IF; + + RETURN v_part_name; +END +$$ +LANGUAGE plpgsql; + + +CREATE OR REPLACE FUNCTION @extschema@.add_range_partition( + parent_relid REGCLASS, + start_value ANYELEMENT, + end_value ANYELEMENT, + partition_name TEXT DEFAULT NULL, + tablespace TEXT DEFAULT NULL) +RETURNS TEXT AS +$$ +DECLARE + v_part_name TEXT; + +BEGIN + PERFORM @extschema@.validate_relname(parent_relid); + + /* Acquire lock on parent */ + PERFORM @extschema@.lock_partitioned_relation(parent_relid); + + IF start_value >= end_value THEN + RAISE EXCEPTION 'failed to create partition: start_value is greater than end_value'; + END IF; + + /* check range overlap */ + IF @extschema@.get_number_of_partitions(parent_relid) > 0 THEN + PERFORM @extschema@.check_range_available(parent_relid, + start_value, + end_value); + END IF; + + /* Create new partition */ + v_part_name := @extschema@.create_single_range_partition(parent_relid, + start_value, + end_value, + partition_name, + tablespace); + PERFORM @extschema@.on_update_partitions(parent_relid); + + RETURN v_part_name; +END +$$ +LANGUAGE plpgsql; + + +CREATE OR REPLACE FUNCTION @extschema@.attach_range_partition( + parent_relid REGCLASS, + partition REGCLASS, + start_value ANYELEMENT, + end_value ANYELEMENT) +RETURNS TEXT AS +$$ +DECLARE + v_attname TEXT; + rel_persistence CHAR; + v_init_callback REGPROCEDURE; + +BEGIN + PERFORM @extschema@.validate_relname(parent_relid); + PERFORM @extschema@.validate_relname(partition); + + /* Acquire lock on parent */ + PERFORM @extschema@.lock_partitioned_relation(parent_relid); + + /* Ignore temporary tables */ + SELECT relpersistence FROM pg_catalog.pg_class + WHERE oid = partition INTO rel_persistence; + + IF rel_persistence = 't'::CHAR THEN + RAISE EXCEPTION 'temporary table "%" cannot be used as a partition', + partition::TEXT; + END IF; + + /* check range overlap */ + PERFORM @extschema@.check_range_available(parent_relid, start_value, end_value); + + IF NOT @extschema@.validate_relations_equality(parent_relid, partition) THEN + RAISE EXCEPTION 'partition must have the exact same structure as parent'; + END IF; + + /* Set inheritance */ + EXECUTE format('ALTER TABLE %s INHERIT %s', partition, parent_relid); + + v_attname := attname FROM @extschema@.pathman_config WHERE partrel = parent_relid; + + IF v_attname IS NULL THEN + RAISE EXCEPTION 'table "%" is not partitioned', parent_relid::TEXT; + END IF; + + /* Set check constraint */ + EXECUTE format('ALTER TABLE %s ADD CONSTRAINT %s CHECK (%s)', + partition::TEXT, + @extschema@.build_check_constraint_name(partition, v_attname), + @extschema@.build_range_condition(v_attname, + start_value, + end_value)); + + /* Fetch init_callback from 'params' table */ + WITH stub_callback(stub) as (values (0)) + SELECT coalesce(init_callback, 0::REGPROCEDURE) + FROM stub_callback + LEFT JOIN @extschema@.pathman_config_params AS params + ON params.partrel = parent_relid + INTO v_init_callback; + + PERFORM @extschema@.invoke_on_partition_created_callback(parent_relid, + partition, + v_init_callback, + start_value, + end_value); + + /* Invalidate cache */ + PERFORM @extschema@.on_update_partitions(parent_relid); + + RETURN partition; +END +$$ +LANGUAGE plpgsql; + + +CREATE OR REPLACE FUNCTION @extschema@.create_single_range_partition( + parent_relid REGCLASS, + start_value ANYELEMENT, + end_value ANYELEMENT, + partition_name TEXT DEFAULT NULL, + tablespace TEXT DEFAULT NULL) +RETURNS REGCLASS AS 'pg_pathman', 'create_single_range_partition_pl' +LANGUAGE C +SET client_min_messages = WARNING; + + +CREATE OR REPLACE FUNCTION @extschema@.build_sequence_name( + parent_relid REGCLASS) +RETURNS TEXT AS 'pg_pathman', 'build_sequence_name' +LANGUAGE C; + + +CREATE OR REPLACE FUNCTION @extschema@.check_range_available( + parent_relid REGCLASS, + range_min ANYELEMENT, + range_max ANYELEMENT) +RETURNS VOID AS 'pg_pathman', 'check_range_available_pl' +LANGUAGE C; + + +/* Finally create function and trigger (PATHMAN_CONFIG_PARAMS) */ +CREATE OR REPLACE FUNCTION @extschema@.pathman_config_params_trigger_func() +RETURNS TRIGGER AS 'pg_pathman', 'pathman_config_params_trigger_func' +LANGUAGE C; + +CREATE TRIGGER pathman_config_params_trigger +BEFORE INSERT OR UPDATE OR DELETE ON @extschema@.pathman_config_params +FOR EACH ROW EXECUTE PROCEDURE @extschema@.pathman_config_params_trigger_func(); + + +CREATE OR REPLACE FUNCTION @extschema@.get_pathman_lib_version() +RETURNS CSTRING AS 'pg_pathman', 'get_pathman_lib_version' +LANGUAGE C STRICT; + + +/* ------------------------------------------------------------------------ + * Alter tables + * ----------------------------------------------------------------------*/ +ALTER TABLE @extschema@.pathman_config_params ADD COLUMN spawn_using_bgw BOOLEAN NOT NULL DEFAULT FALSE; +ALTER TABLE @extschema@.pathman_config_params ADD CHECK (@extschema@.validate_part_callback(init_callback)); + + +/* ------------------------------------------------------------------------ + * Final words of wisdom + * ----------------------------------------------------------------------*/ +DO language plpgsql +$$ + BEGIN + RAISE WARNING 'Don''t forget to execute "SET pg_pathman.enable = t" to activate pg_pathman'; + END +$$; \ No newline at end of file diff --git a/pg_pathman--1.2--1.3.sql b/pg_pathman--1.2--1.3.sql new file mode 100644 index 00000000..86f6d36e --- /dev/null +++ b/pg_pathman--1.2--1.3.sql @@ -0,0 +1,1056 @@ +/* ------------------------------------------------------------------------ + * + * pg_pathman--1.1--1.2.sql + * Migration scripts to version 1.2 + * + * Copyright (c) 2015-2016, Postgres Professional + * + * ------------------------------------------------------------------------ + */ + + +/* ------------------------------------------------------------------------ + * Alter config tables + * ----------------------------------------------------------------------*/ +CREATE OR REPLACE FUNCTION @extschema@.validate_interval_value( + partrel REGCLASS, + attname TEXT, + parttype INTEGER, + range_interval TEXT) +RETURNS BOOL AS 'pg_pathman', 'validate_interval_value' +LANGUAGE C; + +ALTER TABLE @extschema@.pathman_config +ADD CHECK (@extschema@.validate_interval_value(partrel, + attname, + parttype, + range_interval)); + +/* + * Drop check constraint to be able to update column type. We recreate it + * later and it will be slightly different + */ +DROP FUNCTION @extschema@.validate_part_callback(REGPROC, BOOL) CASCADE; + +/* Change type for init_callback attribute */ +ALTER TABLE @extschema@.pathman_config_params +ALTER COLUMN init_callback TYPE TEXT, +ALTER COLUMN init_callback DROP NOT NULL, +ALTER COLUMN init_callback SET DEFAULT NULL; + +/* Set init_callback to NULL where it used to be 0 */ +UPDATE @extschema@.pathman_config_params +SET init_callback = NULL +WHERE init_callback = '-'; + +CREATE OR REPLACE FUNCTION @extschema@.validate_part_callback( + callback REGPROCEDURE, + raise_error BOOL DEFAULT TRUE) +RETURNS BOOL AS 'pg_pathman', 'validate_part_callback_pl' +LANGUAGE C STRICT; + +ALTER TABLE @extschema@.pathman_config_params +ADD CHECK (@extschema@.validate_part_callback(CASE WHEN init_callback IS NULL + THEN 0::REGPROCEDURE + ELSE init_callback::REGPROCEDURE + END)); + +/* ------------------------------------------------------------------------ + * Drop irrelevant objects + * ----------------------------------------------------------------------*/ +DROP FUNCTION @extschema@.set_init_callback(REGCLASS, REGPROC); +DROP FUNCTION @extschema@.get_attribute_type(REGCLASS, TEXT); +DROP FUNCTION @extschema@.create_hash_partitions(REGCLASS, TEXT, INTEGER, BOOLEAN); +DROP FUNCTION @extschema@.create_hash_partitions_internal(REGCLASS, TEXT, INTEGER); +DROP FUNCTION @extschema@.build_range_condition(TEXT, ANYELEMENT, ANYELEMENT); +DROP FUNCTION @extschema@.split_range_partition(REGCLASS, ANYELEMENT, TEXT, TEXT, OUT ANYARRAY); +DROP FUNCTION @extschema@.drop_range_partition(REGCLASS, BOOLEAN); +DROP FUNCTION @extschema@.attach_range_partition(REGCLASS, REGCLASS, ANYELEMENT, ANYELEMENT); +DROP FUNCTION @extschema@.detach_range_partition(REGCLASS); +DROP FUNCTION @extschema@.merge_range_partitions_internal(REGCLASS, REGCLASS, REGCLASS, ANYELEMENT); +DROP FUNCTION @extschema@.copy_foreign_keys(REGCLASS, REGCLASS); +DROP FUNCTION @extschema@.invoke_on_partition_created_callback(REGCLASS, REGCLASS, REGPROCEDURE, ANYELEMENT, ANYELEMENT); +DROP FUNCTION @extschema@.invoke_on_partition_created_callback(REGCLASS, REGCLASS, REGPROCEDURE); + + +/* ------------------------------------------------------------------------ + * Alter functions' modifiers + * ----------------------------------------------------------------------*/ +ALTER FUNCTION @extschema@.pathman_set_param(REGCLASS, TEXT, ANYELEMENT) STRICT; + + +/* ------------------------------------------------------------------------ + * (Re)create functions + * ----------------------------------------------------------------------*/ + +/* + * Invoke init_callback on RANGE partition. + */ +CREATE OR REPLACE FUNCTION @extschema@.invoke_on_partition_created_callback( + parent_relid REGCLASS, + partition_relid REGCLASS, + init_callback REGPROCEDURE, + start_value ANYELEMENT, + end_value ANYELEMENT) +RETURNS VOID AS 'pg_pathman', 'invoke_on_partition_created_callback' +LANGUAGE C; + + +/* + * Invoke init_callback on HASH partition. + */ +CREATE OR REPLACE FUNCTION @extschema@.invoke_on_partition_created_callback( + parent_relid REGCLASS, + partition_relid REGCLASS, + init_callback REGPROCEDURE) +RETURNS VOID AS 'pg_pathman', 'invoke_on_partition_created_callback' +LANGUAGE C; + + +/* + * Copy all of parent's foreign keys. + */ +CREATE OR REPLACE FUNCTION @extschema@.copy_foreign_keys( + parent_relid REGCLASS, + partition_relid REGCLASS) +RETURNS VOID AS +$$ +DECLARE + rec RECORD; + +BEGIN + PERFORM @extschema@.validate_relname(parent_relid); + PERFORM @extschema@.validate_relname(partition_relid); + + FOR rec IN (SELECT oid as conid FROM pg_catalog.pg_constraint + WHERE conrelid = parent_relid AND contype = 'f') + LOOP + EXECUTE format('ALTER TABLE %s ADD %s', + partition_relid::TEXT, + pg_catalog.pg_get_constraintdef(rec.conid)); + END LOOP; +END +$$ LANGUAGE plpgsql STRICT; + + +CREATE OR REPLACE FUNCTION @extschema@.set_init_callback( + relation REGCLASS, + callback REGPROCEDURE DEFAULT 0) +RETURNS VOID AS +$$ +DECLARE + regproc_text TEXT := NULL; + +BEGIN + + /* Fetch schema-qualified name of callback */ + IF callback != 0 THEN + SELECT quote_ident(nspname) || '.' || + quote_ident(proname) || '(' || + (SELECT string_agg(x.argtype::REGTYPE::TEXT, ',') + FROM unnest(proargtypes) AS x(argtype)) || + ')' + FROM pg_catalog.pg_proc p JOIN pg_catalog.pg_namespace n + ON n.oid = p.pronamespace + WHERE p.oid = callback + INTO regproc_text; /* <= result */ + END IF; + + PERFORM @extschema@.pathman_set_param(relation, 'init_callback', regproc_text); +END +$$ +LANGUAGE plpgsql STRICT; + + +CREATE OR REPLACE FUNCTION @extschema@.set_interval( + relation REGCLASS, + value ANYELEMENT) +RETURNS VOID AS +$$ +DECLARE + affected INTEGER; +BEGIN + UPDATE @extschema@.pathman_config + SET range_interval = value::text + WHERE partrel = relation AND parttype = 2; + + /* Check number of affected rows */ + GET DIAGNOSTICS affected = ROW_COUNT; + + IF affected = 0 THEN + RAISE EXCEPTION 'table "%" is not partitioned by RANGE', relation; + END IF; +END +$$ +LANGUAGE plpgsql; + + +CREATE OR REPLACE FUNCTION @extschema@.alter_partition( + relation REGCLASS, + new_name TEXT, + new_schema REGNAMESPACE, + new_tablespace TEXT) +RETURNS VOID AS +$$ +DECLARE + orig_name TEXT; + orig_schema OID; + +BEGIN + SELECT relname, relnamespace FROM pg_class + WHERE oid = relation + INTO orig_name, orig_schema; + + /* Alter table name */ + IF new_name != orig_name THEN + EXECUTE format('ALTER TABLE %s RENAME TO %s', relation, new_name); + END IF; + + /* Alter table schema */ + IF new_schema != orig_schema THEN + EXECUTE format('ALTER TABLE %s SET SCHEMA %s', relation, new_schema); + END IF; + + /* Move to another tablespace */ + IF NOT new_tablespace IS NULL THEN + EXECUTE format('ALTER TABLE %s SET TABLESPACE %s', relation, new_tablespace); + END IF; +END +$$ LANGUAGE plpgsql; + + +CREATE OR REPLACE FUNCTION @extschema@.get_partition_key( + relid REGCLASS) +RETURNS TEXT AS +$$ + SELECT attname FROM pathman_config WHERE partrel = relid; +$$ +LANGUAGE sql STRICT; + + +CREATE OR REPLACE FUNCTION @extschema@.get_partition_key_type( + relid REGCLASS) +RETURNS REGTYPE AS 'pg_pathman', 'get_partition_key_type' +LANGUAGE C STRICT; + + +CREATE OR REPLACE FUNCTION @extschema@.create_hash_partitions( + parent_relid REGCLASS, + attribute TEXT, + partitions_count INTEGER, + partition_data BOOLEAN DEFAULT TRUE, + partition_names TEXT[] DEFAULT NULL, + tablespaces TEXT[] DEFAULT NULL) +RETURNS INTEGER AS +$$ +BEGIN + PERFORM @extschema@.validate_relname(parent_relid); + + IF partition_data = true THEN + /* Acquire data modification lock */ + PERFORM @extschema@.prevent_relation_modification(parent_relid); + ELSE + /* Acquire lock on parent */ + PERFORM @extschema@.lock_partitioned_relation(parent_relid); + END IF; + + attribute := lower(attribute); + PERFORM @extschema@.common_relation_checks(parent_relid, attribute); + + /* Insert new entry to pathman config */ + INSERT INTO @extschema@.pathman_config (partrel, attname, parttype) + VALUES (parent_relid, attribute, 1); + + /* Create partitions */ + PERFORM @extschema@.create_hash_partitions_internal(parent_relid, + attribute, + partitions_count, + partition_names, + tablespaces); + + /* Notify backend about changes */ + PERFORM @extschema@.on_create_partitions(parent_relid); + + /* Copy data */ + IF partition_data = true THEN + PERFORM @extschema@.set_enable_parent(parent_relid, false); + PERFORM @extschema@.partition_data(parent_relid); + ELSE + PERFORM @extschema@.set_enable_parent(parent_relid, true); + END IF; + + RETURN partitions_count; +END +$$ LANGUAGE plpgsql +SET client_min_messages = WARNING; + + +CREATE OR REPLACE FUNCTION @extschema@.replace_hash_partition( + old_partition REGCLASS, + new_partition REGCLASS, + lock_parent BOOL DEFAULT TRUE) +RETURNS REGCLASS AS +$$ +DECLARE + parent_relid REGCLASS; + part_attname TEXT; /* partitioned column */ + old_constr_name TEXT; /* name of old_partition's constraint */ + old_constr_def TEXT; /* definition of old_partition's constraint */ + rel_persistence CHAR; + p_init_callback REGPROCEDURE; + +BEGIN + PERFORM @extschema@.validate_relname(old_partition); + PERFORM @extschema@.validate_relname(new_partition); + + /* Parent relation */ + parent_relid := @extschema@.get_parent_of_partition(old_partition); + + IF lock_parent THEN + /* Acquire data modification lock (prevent further modifications) */ + PERFORM @extschema@.prevent_relation_modification(parent_relid); + ELSE + /* Acquire lock on parent */ + PERFORM @extschema@.lock_partitioned_relation(parent_relid); + END IF; + + /* Acquire data modification lock (prevent further modifications) */ + PERFORM @extschema@.prevent_relation_modification(old_partition); + PERFORM @extschema@.prevent_relation_modification(new_partition); + + /* Ignore temporary tables */ + SELECT relpersistence FROM pg_catalog.pg_class + WHERE oid = new_partition INTO rel_persistence; + + IF rel_persistence = 't'::CHAR THEN + RAISE EXCEPTION 'temporary table "%" cannot be used as a partition', + new_partition::TEXT; + END IF; + + /* Check that new partition has an equal structure as parent does */ + IF NOT @extschema@.validate_relations_equality(parent_relid, new_partition) THEN + RAISE EXCEPTION 'partition must have the exact same structure as parent'; + END IF; + + /* Get partitioning key */ + part_attname := attname FROM @extschema@.pathman_config WHERE partrel = parent_relid; + IF part_attname IS NULL THEN + RAISE EXCEPTION 'table "%" is not partitioned', parent_relid::TEXT; + END IF; + + /* Fetch name of old_partition's HASH constraint */ + old_constr_name = @extschema@.build_check_constraint_name(old_partition::REGCLASS, + part_attname); + + /* Fetch definition of old_partition's HASH constraint */ + SELECT pg_catalog.pg_get_constraintdef(oid) FROM pg_catalog.pg_constraint + WHERE conrelid = old_partition AND conname = old_constr_name + INTO old_constr_def; + + /* Detach old partition */ + EXECUTE format('ALTER TABLE %s NO INHERIT %s', old_partition, parent_relid); + EXECUTE format('ALTER TABLE %s DROP CONSTRAINT %s', + old_partition, + old_constr_name); + + /* Attach the new one */ + EXECUTE format('ALTER TABLE %s INHERIT %s', new_partition, parent_relid); + EXECUTE format('ALTER TABLE %s ADD CONSTRAINT %s %s', + new_partition, + @extschema@.build_check_constraint_name(new_partition::REGCLASS, + part_attname), + old_constr_def); + + /* Fetch init_callback from 'params' table */ + WITH stub_callback(stub) as (values (0)) + SELECT init_callback + FROM stub_callback + LEFT JOIN @extschema@.pathman_config_params AS params + ON params.partrel = parent_relid + INTO p_init_callback; + + /* Finally invoke init_callback */ + PERFORM @extschema@.invoke_on_partition_created_callback(parent_relid, + new_partition, + p_init_callback); + + /* Invalidate cache */ + PERFORM @extschema@.on_update_partitions(parent_relid); + + RETURN new_partition; +END +$$ +LANGUAGE plpgsql; + + +CREATE OR REPLACE FUNCTION @extschema@.create_hash_update_trigger( + parent_relid REGCLASS) +RETURNS TEXT AS +$$ +DECLARE + func TEXT := 'CREATE OR REPLACE FUNCTION %1$s() + RETURNS TRIGGER AS + $body$ + DECLARE + old_idx INTEGER; /* partition indices */ + new_idx INTEGER; + + BEGIN + old_idx := @extschema@.get_hash_part_idx(%9$s(OLD.%2$s), %3$s); + new_idx := @extschema@.get_hash_part_idx(%9$s(NEW.%2$s), %3$s); + + IF old_idx = new_idx THEN + RETURN NEW; + END IF; + + EXECUTE format(''DELETE FROM %8$s WHERE %4$s'', old_idx) + USING %5$s; + + EXECUTE format(''INSERT INTO %8$s VALUES (%6$s)'', new_idx) + USING %7$s; + + RETURN NULL; + END $body$ + LANGUAGE plpgsql'; + + trigger TEXT := 'CREATE TRIGGER %s + BEFORE UPDATE ON %s + FOR EACH ROW EXECUTE PROCEDURE %s()'; + + att_names TEXT; + old_fields TEXT; + new_fields TEXT; + att_val_fmt TEXT; + att_fmt TEXT; + attr TEXT; + plain_schema TEXT; + plain_relname TEXT; + child_relname_format TEXT; + funcname TEXT; + triggername TEXT; + atttype REGTYPE; + partitions_count INTEGER; + +BEGIN + attr := attname FROM @extschema@.pathman_config WHERE partrel = parent_relid; + + IF attr IS NULL THEN + RAISE EXCEPTION 'table "%" is not partitioned', parent_relid::TEXT; + END IF; + + SELECT string_agg(attname, ', '), + string_agg('OLD.' || attname, ', '), + string_agg('NEW.' || attname, ', '), + string_agg('CASE WHEN NOT $' || attnum || ' IS NULL THEN ' || + attname || ' = $' || attnum || ' ' || + 'ELSE ' || + attname || ' IS NULL END', + ' AND '), + string_agg('$' || attnum, ', ') + FROM pg_catalog.pg_attribute + WHERE attrelid = parent_relid AND attnum > 0 + INTO att_names, + old_fields, + new_fields, + att_val_fmt, + att_fmt; + + partitions_count := @extschema@.get_number_of_partitions(parent_relid); + + /* Build trigger & trigger function's names */ + funcname := @extschema@.build_update_trigger_func_name(parent_relid); + triggername := @extschema@.build_update_trigger_name(parent_relid); + + /* Build partition name template */ + SELECT * INTO plain_schema, plain_relname + FROM @extschema@.get_plain_schema_and_relname(parent_relid); + + child_relname_format := quote_ident(plain_schema) || '.' || + quote_ident(plain_relname || '_%s'); + + /* Fetch base hash function for atttype */ + atttype := @extschema@.get_partition_key_type(parent_relid); + + /* Format function definition and execute it */ + EXECUTE format(func, funcname, attr, partitions_count, att_val_fmt, + old_fields, att_fmt, new_fields, child_relname_format, + @extschema@.get_type_hash_func(atttype)::TEXT); + + /* Create trigger on each partition */ + FOR num IN 0..partitions_count-1 + LOOP + EXECUTE format(trigger, + triggername, + format(child_relname_format, num), + funcname); + END LOOP; + + return funcname; +END +$$ LANGUAGE plpgsql; + + +CREATE OR REPLACE FUNCTION @extschema@.create_hash_partitions_internal( + parent_relid REGCLASS, + attribute TEXT, + partitions_count INTEGER, + partition_names TEXT[] DEFAULT NULL, + tablespaces TEXT[] DEFAULT NULL) +RETURNS VOID AS 'pg_pathman', 'create_hash_partitions_internal' +LANGUAGE C; + + +CREATE OR REPLACE FUNCTION @extschema@.split_range_partition( + partition_relid REGCLASS, + split_value ANYELEMENT, + partition_name TEXT DEFAULT NULL, + tablespace TEXT DEFAULT NULL, + OUT p_range ANYARRAY) +RETURNS ANYARRAY AS +$$ +DECLARE + v_parent REGCLASS; + v_attname TEXT; + v_atttype REGTYPE; + v_cond TEXT; + v_new_partition TEXT; + v_part_type INTEGER; + v_check_name TEXT; + +BEGIN + v_parent = @extschema@.get_parent_of_partition(partition_relid); + + /* Acquire lock on parent */ + PERFORM @extschema@.lock_partitioned_relation(v_parent); + + /* Acquire data modification lock (prevent further modifications) */ + PERFORM @extschema@.prevent_relation_modification(partition_relid); + + v_atttype = @extschema@.get_partition_key_type(v_parent); + + SELECT attname, parttype + FROM @extschema@.pathman_config + WHERE partrel = v_parent + INTO v_attname, v_part_type; + + /* Check if this is a RANGE partition */ + IF v_part_type != 2 THEN + RAISE EXCEPTION '"%" is not a RANGE partition', partition_relid::TEXT; + END IF; + + /* Get partition values range */ + EXECUTE format('SELECT @extschema@.get_part_range($1, NULL::%s)', + @extschema@.get_base_type(v_atttype)::TEXT) + USING partition_relid + INTO p_range; + + IF p_range IS NULL THEN + RAISE EXCEPTION 'could not find specified partition'; + END IF; + + /* Check if value fit into the range */ + IF p_range[1] > split_value OR p_range[2] <= split_value + THEN + RAISE EXCEPTION 'specified value does not fit into the range [%, %)', + p_range[1], p_range[2]; + END IF; + + /* Create new partition */ + v_new_partition := @extschema@.create_single_range_partition(v_parent, + split_value, + p_range[2], + partition_name, + tablespace); + + /* Copy data */ + v_cond := @extschema@.build_range_condition(v_new_partition::regclass, + v_attname, split_value, p_range[2]); + EXECUTE format('WITH part_data AS (DELETE FROM %s WHERE %s RETURNING *) + INSERT INTO %s SELECT * FROM part_data', + partition_relid::TEXT, + v_cond, + v_new_partition); + + /* Alter original partition */ + v_cond := @extschema@.build_range_condition(partition_relid::regclass, + v_attname, p_range[1], split_value); + v_check_name := @extschema@.build_check_constraint_name(partition_relid, v_attname); + + EXECUTE format('ALTER TABLE %s DROP CONSTRAINT %s', + partition_relid::TEXT, + v_check_name); + + EXECUTE format('ALTER TABLE %s ADD CONSTRAINT %s CHECK (%s)', + partition_relid::TEXT, + v_check_name, + v_cond); + + /* Tell backend to reload configuration */ + PERFORM @extschema@.on_update_partitions(v_parent); +END +$$ +LANGUAGE plpgsql; + + +CREATE OR REPLACE FUNCTION @extschema@.merge_range_partitions( + partitions REGCLASS[]) +RETURNS VOID AS 'pg_pathman', 'merge_range_partitions' +LANGUAGE C STRICT; + + +CREATE OR REPLACE FUNCTION @extschema@.merge_range_partitions( + partition1 REGCLASS, + partition2 REGCLASS) +RETURNS VOID AS +$$ +BEGIN + PERFORM @extschema@.merge_range_partitions(array[partition1, partition2]::regclass[]); +END +$$ LANGUAGE plpgsql; + + +CREATE OR REPLACE FUNCTION @extschema@.append_range_partition( + parent_relid REGCLASS, + partition_name TEXT DEFAULT NULL, + tablespace TEXT DEFAULT NULL) +RETURNS TEXT AS +$$ +DECLARE + v_atttype REGTYPE; + v_part_name TEXT; + v_interval TEXT; + +BEGIN + PERFORM @extschema@.validate_relname(parent_relid); + + /* Acquire lock on parent */ + PERFORM @extschema@.lock_partitioned_relation(parent_relid); + + v_atttype := @extschema@.get_partition_key_type(parent_relid); + + SELECT range_interval + FROM @extschema@.pathman_config + WHERE partrel = parent_relid + INTO v_interval; + + EXECUTE + format('SELECT @extschema@.append_partition_internal($1, $2, $3, ARRAY[]::%s[], $4, $5)', + @extschema@.get_base_type(v_atttype)::TEXT) + USING + parent_relid, + v_atttype, + v_interval, + partition_name, + tablespace + INTO + v_part_name; + + /* Invalidate cache */ + PERFORM @extschema@.on_update_partitions(parent_relid); + RETURN v_part_name; +END +$$ +LANGUAGE plpgsql; + + +CREATE OR REPLACE FUNCTION @extschema@.append_partition_internal( + parent_relid REGCLASS, + p_atttype REGTYPE, + p_interval TEXT, + p_range ANYARRAY DEFAULT NULL, + partition_name TEXT DEFAULT NULL, + tablespace TEXT DEFAULT NULL) +RETURNS TEXT AS +$$ +DECLARE + v_part_name TEXT; + v_atttype REGTYPE; + v_args_format TEXT; + +BEGIN + IF @extschema@.get_number_of_partitions(parent_relid) = 0 THEN + RAISE EXCEPTION 'cannot append to empty partitions set'; + END IF; + + v_atttype := @extschema@.get_base_type(p_atttype); + + /* We have to pass fake NULL casted to column's type */ + EXECUTE format('SELECT @extschema@.get_part_range($1, -1, NULL::%s)', + v_atttype::TEXT) + USING parent_relid + INTO p_range; + + IF p_range[2] IS NULL THEN + RAISE EXCEPTION 'Cannot append partition because last partition''s range is half open'; + END IF; + + IF @extschema@.is_date_type(p_atttype) THEN + v_args_format := format('$1, $2, ($2 + $3::interval)::%s, $4, $5', v_atttype::TEXT); + ELSE + v_args_format := format('$1, $2, $2 + $3::%s, $4, $5', v_atttype::TEXT); + END IF; + + EXECUTE + format('SELECT @extschema@.create_single_range_partition(%s)', v_args_format) + USING + parent_relid, + p_range[2], + p_interval, + partition_name, + tablespace + INTO + v_part_name; + + RETURN v_part_name; +END +$$ +LANGUAGE plpgsql; + + +CREATE OR REPLACE FUNCTION @extschema@.prepend_range_partition( + parent_relid REGCLASS, + partition_name TEXT DEFAULT NULL, + tablespace TEXT DEFAULT NULL) +RETURNS TEXT AS +$$ +DECLARE + v_atttype REGTYPE; + v_part_name TEXT; + v_interval TEXT; + +BEGIN + PERFORM @extschema@.validate_relname(parent_relid); + + /* Acquire lock on parent */ + PERFORM @extschema@.lock_partitioned_relation(parent_relid); + + v_atttype := @extschema@.get_partition_key_type(parent_relid); + + SELECT range_interval + FROM @extschema@.pathman_config + WHERE partrel = parent_relid + INTO v_interval; + + EXECUTE + format('SELECT @extschema@.prepend_partition_internal($1, $2, $3, ARRAY[]::%s[], $4, $5)', + @extschema@.get_base_type(v_atttype)::TEXT) + USING + parent_relid, + v_atttype, + v_interval, + partition_name, + tablespace + INTO + v_part_name; + + /* Invalidate cache */ + PERFORM @extschema@.on_update_partitions(parent_relid); + RETURN v_part_name; +END +$$ +LANGUAGE plpgsql; + + +CREATE OR REPLACE FUNCTION @extschema@.prepend_partition_internal( + parent_relid REGCLASS, + p_atttype REGTYPE, + p_interval TEXT, + p_range ANYARRAY DEFAULT NULL, + partition_name TEXT DEFAULT NULL, + tablespace TEXT DEFAULT NULL) +RETURNS TEXT AS +$$ +DECLARE + v_part_name TEXT; + v_atttype REGTYPE; + v_args_format TEXT; + +BEGIN + IF @extschema@.get_number_of_partitions(parent_relid) = 0 THEN + RAISE EXCEPTION 'cannot prepend to empty partitions set'; + END IF; + + v_atttype := @extschema@.get_base_type(p_atttype); + + /* We have to pass fake NULL casted to column's type */ + EXECUTE format('SELECT @extschema@.get_part_range($1, 0, NULL::%s)', + v_atttype::TEXT) + USING parent_relid + INTO p_range; + + IF p_range[1] IS NULL THEN + RAISE EXCEPTION 'Cannot prepend partition because first partition''s range is half open'; + END IF; + + IF @extschema@.is_date_type(p_atttype) THEN + v_args_format := format('$1, ($2 - $3::interval)::%s, $2, $4, $5', v_atttype::TEXT); + ELSE + v_args_format := format('$1, $2 - $3::%s, $2, $4, $5', v_atttype::TEXT); + END IF; + + EXECUTE + format('SELECT @extschema@.create_single_range_partition(%s)', v_args_format) + USING + parent_relid, + p_range[1], + p_interval, + partition_name, + tablespace + INTO + v_part_name; + + RETURN v_part_name; +END +$$ +LANGUAGE plpgsql; + + +CREATE OR REPLACE FUNCTION @extschema@.attach_range_partition( + parent_relid REGCLASS, + partition_relid REGCLASS, + start_value ANYELEMENT, + end_value ANYELEMENT) +RETURNS TEXT AS +$$ +DECLARE + v_attname TEXT; + rel_persistence CHAR; + v_init_callback REGPROCEDURE; + +BEGIN + PERFORM @extschema@.validate_relname(parent_relid); + PERFORM @extschema@.validate_relname(partition_relid); + + /* Acquire lock on parent */ + PERFORM @extschema@.lock_partitioned_relation(parent_relid); + + /* Ignore temporary tables */ + SELECT relpersistence FROM pg_catalog.pg_class + WHERE oid = partition_relid INTO rel_persistence; + + IF rel_persistence = 't'::CHAR THEN + RAISE EXCEPTION 'temporary table "%" cannot be used as a partition', + partition_relid::TEXT; + END IF; + + /* check range overlap */ + PERFORM @extschema@.check_range_available(parent_relid, start_value, end_value); + + IF NOT @extschema@.validate_relations_equality(parent_relid, partition_relid) THEN + RAISE EXCEPTION 'partition must have the exact same structure as parent'; + END IF; + + /* Set inheritance */ + EXECUTE format('ALTER TABLE %s INHERIT %s', partition_relid, parent_relid); + + v_attname := attname FROM @extschema@.pathman_config WHERE partrel = parent_relid; + + IF v_attname IS NULL THEN + RAISE EXCEPTION 'table "%" is not partitioned', parent_relid::TEXT; + END IF; + + /* Set check constraint */ + EXECUTE format('ALTER TABLE %s ADD CONSTRAINT %s CHECK (%s)', + partition_relid::TEXT, + @extschema@.build_check_constraint_name(partition_relid, v_attname), + @extschema@.build_range_condition(partition_relid, + v_attname, + start_value, + end_value)); + + /* Fetch init_callback from 'params' table */ + WITH stub_callback(stub) as (values (0)) + SELECT init_callback + FROM stub_callback + LEFT JOIN @extschema@.pathman_config_params AS params + ON params.partrel = parent_relid + INTO v_init_callback; + + PERFORM @extschema@.invoke_on_partition_created_callback(parent_relid, + partition_relid, + v_init_callback, + start_value, + end_value); + + /* Invalidate cache */ + PERFORM @extschema@.on_update_partitions(parent_relid); + + RETURN partition_relid; +END +$$ +LANGUAGE plpgsql; + + +CREATE OR REPLACE FUNCTION @extschema@.detach_range_partition( + partition_relid REGCLASS) +RETURNS TEXT AS +$$ +DECLARE + v_attname TEXT; + parent_relid REGCLASS; + +BEGIN + parent_relid := @extschema@.get_parent_of_partition(partition_relid); + + /* Acquire lock on parent */ + PERFORM @extschema@.prevent_relation_modification(parent_relid); + + v_attname := attname + FROM @extschema@.pathman_config + WHERE partrel = parent_relid; + + IF v_attname IS NULL THEN + RAISE EXCEPTION 'table "%" is not partitioned', parent_relid::TEXT; + END IF; + + /* Remove inheritance */ + EXECUTE format('ALTER TABLE %s NO INHERIT %s', + partition_relid::TEXT, + parent_relid::TEXT); + + /* Remove check constraint */ + EXECUTE format('ALTER TABLE %s DROP CONSTRAINT %s', + partition_relid::TEXT, + @extschema@.build_check_constraint_name(partition_relid, v_attname)); + + /* Invalidate cache */ + PERFORM @extschema@.on_update_partitions(parent_relid); + + RETURN partition_relid; +END +$$ +LANGUAGE plpgsql; + + +CREATE OR REPLACE FUNCTION @extschema@.drop_range_partition( + partition_relid REGCLASS, + delete_data BOOLEAN DEFAULT TRUE) +RETURNS TEXT AS +$$ +DECLARE + parent_relid REGCLASS; + part_name TEXT; + v_relkind CHAR; + v_rows BIGINT; + v_part_type INTEGER; + +BEGIN + parent_relid := @extschema@.get_parent_of_partition(partition_relid); + part_name := partition_relid::TEXT; /* save the name to be returned */ + + SELECT parttype + FROM @extschema@.pathman_config + WHERE partrel = parent_relid + INTO v_part_type; + + /* Check if this is a RANGE partition */ + IF v_part_type != 2 THEN + RAISE EXCEPTION '"%" is not a RANGE partition', partition_relid::TEXT; + END IF; + + /* Acquire lock on parent */ + PERFORM @extschema@.lock_partitioned_relation(parent_relid); + + IF NOT delete_data THEN + EXECUTE format('INSERT INTO %s SELECT * FROM %s', + parent_relid::TEXT, + partition_relid::TEXT); + GET DIAGNOSTICS v_rows = ROW_COUNT; + + /* Show number of copied rows */ + RAISE NOTICE '% rows copied from %', v_rows, partition_relid::TEXT; + END IF; + + SELECT relkind FROM pg_catalog.pg_class + WHERE oid = partition_relid + INTO v_relkind; + + /* + * Determine the kind of child relation. It can be either regular + * table (r) or foreign table (f). Depending on relkind we use + * DROP TABLE or DROP FOREIGN TABLE. + */ + IF v_relkind = 'f' THEN + EXECUTE format('DROP FOREIGN TABLE %s', partition_relid::TEXT); + ELSE + EXECUTE format('DROP TABLE %s', partition_relid::TEXT); + END IF; + + /* Invalidate cache */ + PERFORM @extschema@.on_update_partitions(parent_relid); + + RETURN part_name; +END +$$ +LANGUAGE plpgsql +SET pg_pathman.enable_partitionfilter = off; + + +CREATE OR REPLACE FUNCTION @extschema@.drop_range_partition_expand_next( + partition_relid REGCLASS) +RETURNS VOID AS 'pg_pathman', 'drop_range_partition_expand_next' +LANGUAGE C STRICT; + + +CREATE OR REPLACE FUNCTION @extschema@.build_range_condition( + p_relid REGCLASS, + attribute TEXT, + start_value ANYELEMENT, + end_value ANYELEMENT) +RETURNS TEXT AS 'pg_pathman', 'build_range_condition' +LANGUAGE C; + + +/* + * Old school way to distribute rows to partitions. + */ +CREATE OR REPLACE FUNCTION @extschema@.partition_data( + parent_relid REGCLASS, + OUT p_total BIGINT) +AS +$$ +BEGIN + p_total := 0; + + /* Create partitions and copy rest of the data */ + EXECUTE format('WITH part_data AS (DELETE FROM ONLY %1$s RETURNING *) + INSERT INTO %1$s SELECT * FROM part_data', + parent_relid::TEXT); + + /* Get number of inserted rows */ + GET DIAGNOSTICS p_total = ROW_COUNT; + RETURN; +END +$$ +LANGUAGE plpgsql STRICT +SET pg_pathman.enable_partitionfilter = on; + +/* + * Add a row describing the optional parameter to pathman_config_params. + */ +CREATE OR REPLACE FUNCTION @extschema@.pathman_set_param( + relation REGCLASS, + param TEXT, + value ANYELEMENT) +RETURNS VOID AS +$$ +BEGIN + EXECUTE format('INSERT INTO @extschema@.pathman_config_params + (partrel, %1$s) VALUES ($1, $2) + ON CONFLICT (partrel) DO UPDATE SET %1$s = $2', param) + USING relation, value; +END +$$ +LANGUAGE plpgsql; + + + +/* ------------------------------------------------------------------------ + * Final words of wisdom + * ----------------------------------------------------------------------*/ +DO language plpgsql +$$ + BEGIN + RAISE WARNING 'Don''t forget to execute "SET pg_pathman.enable = t" to activate pg_pathman'; + END +$$; diff --git a/pg_pathman--1.3--1.4.sql b/pg_pathman--1.3--1.4.sql new file mode 100644 index 00000000..f60634fa --- /dev/null +++ b/pg_pathman--1.3--1.4.sql @@ -0,0 +1,1504 @@ +/* ------------------------------------------------------------------------ + * + * pg_pathman--1.3--1.4.sql + * Migration scripts to version 1.4 + * + * Copyright (c) 2015-2017, Postgres Professional + * + * ------------------------------------------------------------------------ + */ + + +/* ------------------------------------------------------------------------ + * Alter config tables + * ----------------------------------------------------------------------*/ +ALTER TABLE @extschema@.pathman_config RENAME COLUMN attname TO expr; +ALTER TABLE @extschema@.pathman_config ADD COLUMN cooked_expr TEXT; + +DROP TRIGGER pathman_config_params_trigger ON @extschema@.pathman_config_params; + +CREATE TRIGGER pathman_config_params_trigger +AFTER INSERT OR UPDATE OR DELETE ON @extschema@.pathman_config_params +FOR EACH ROW EXECUTE PROCEDURE @extschema@.pathman_config_params_trigger_func(); + + +DROP FUNCTION @extschema@.validate_interval_value(REGCLASS, TEXT, INTEGER, TEXT) CASCADE; + +CREATE OR REPLACE FUNCTION @extschema@.validate_interval_value( + partrel REGCLASS, + expr TEXT, + parttype INTEGER, + range_interval TEXT, + cooked_expr TEXT) +RETURNS BOOL AS 'pg_pathman', 'validate_interval_value' +LANGUAGE C; + +ALTER TABLE @extschema@.pathman_config +ADD CONSTRAINT pathman_config_interval_check + CHECK (@extschema@.validate_interval_value(partrel, + expr, + parttype, + range_interval, + cooked_expr)); + +DO $$ +DECLARE + v_rec RECORD; +BEGIN + FOR v_rec IN (SELECT conrelid::regclass AS t, conname, regexp_replace(conname, '\d+_check', 'check') as new_conname + FROM pg_constraint + WHERE conname ~ 'pathman_.*_\d+_\d+_check') + LOOP + EXECUTE format('ALTER TABLE %s RENAME CONSTRAINT %s TO %s', + v_rec.t, v_rec.conname, v_rec.new_conname); + END LOOP; +END +$$ LANGUAGE plpgsql; + + +DROP VIEW pathman_partition_list; + +DROP FUNCTION @extschema@.show_partition_list(); + +CREATE OR REPLACE FUNCTION @extschema@.show_partition_list() +RETURNS TABLE ( + parent REGCLASS, + partition REGCLASS, + parttype INT4, + expr TEXT, + range_min TEXT, + range_max TEXT) +AS 'pg_pathman', 'show_partition_list_internal' +LANGUAGE C STRICT; + +CREATE OR REPLACE VIEW @extschema@.pathman_partition_list +AS SELECT * FROM @extschema@.show_partition_list(); + +GRANT SELECT ON @extschema@.pathman_partition_list TO PUBLIC; + + +/* ------------------------------------------------------------------------ + * Drop irrelevant objects + * ----------------------------------------------------------------------*/ +DROP FUNCTION @extschema@._partition_data_concurrent(REGCLASS, ANYELEMENT, ANYELEMENT, INT, OUT BIGINT); +DROP FUNCTION @extschema@.disable_pathman_for(REGCLASS); +DROP FUNCTION @extschema@.common_relation_checks(REGCLASS, TEXT); +DROP FUNCTION @extschema@.validate_relations_equality(OID, OID); +DROP FUNCTION @extschema@.drop_partitions(REGCLASS, BOOLEAN); +DROP FUNCTION @extschema@.on_create_partitions(REGCLASS); +DROP FUNCTION @extschema@.on_update_partitions(REGCLASS); +DROP FUNCTION @extschema@.on_remove_partitions(REGCLASS); +DROP FUNCTION @extschema@.is_attribute_nullable(REGCLASS, TEXT); +DROP FUNCTION @extschema@.build_check_constraint_name(REGCLASS, TEXT); +DROP FUNCTION @extschema@.build_check_constraint_name(REGCLASS, INT2); +DROP FUNCTION @extschema@.add_to_pathman_config(REGCLASS, TEXT, TEXT); +DROP FUNCTION @extschema@.lock_partitioned_relation(REGCLASS); +DROP FUNCTION @extschema@.prevent_relation_modification(REGCLASS); +DROP FUNCTION @extschema@.create_hash_partitions(REGCLASS, TEXT, INTEGER, BOOLEAN, TEXT[], TEXT[]); +DROP FUNCTION @extschema@.create_hash_update_trigger(REGCLASS); +DROP FUNCTION @extschema@.get_type_hash_func(REGTYPE); +DROP FUNCTION @extschema@.build_hash_condition(REGTYPE, TEXT, INT4, INT4); +DROP FUNCTION @extschema@.create_or_replace_sequence(REGCLASS, OUT TEXT); +DROP FUNCTION @extschema@.check_boundaries(REGCLASS, TEXT, ANYELEMENT, ANYELEMENT); +DROP FUNCTION @extschema@.create_range_partitions(REGCLASS, TEXT, ANYELEMENT, INTERVAL, INTEGER, BOOLEAN); +DROP FUNCTION @extschema@.create_range_partitions(REGCLASS, TEXT, ANYELEMENT, ANYELEMENT, INTEGER, BOOLEAN); +DROP FUNCTION @extschema@.create_partitions_from_range(REGCLASS, TEXT, ANYELEMENT, ANYELEMENT, ANYELEMENT, BOOLEAN); +DROP FUNCTION @extschema@.create_partitions_from_range(REGCLASS, TEXT, ANYELEMENT, ANYELEMENT, INTERVAL, BOOLEAN); +DROP FUNCTION @extschema@.create_range_update_trigger(REGCLASS); +DROP FUNCTION @extschema@.build_range_condition(REGCLASS, TEXT, ANYELEMENT, ANYELEMENT); +DROP FUNCTION @extschema@.find_or_create_range_partition(REGCLASS, ANYELEMENT); + + +/* ------------------------------------------------------------------------ + * Alter functions' modifiers + * ----------------------------------------------------------------------*/ +ALTER FUNCTION @extschema@.build_sequence_name(REGCLASS) STRICT; + + +/* ------------------------------------------------------------------------ + * (Re)create functions + * ----------------------------------------------------------------------*/ +CREATE OR REPLACE FUNCTION @extschema@.show_cache_stats() +RETURNS TABLE ( + context TEXT, + size INT8, + used INT8, + entries INT8) +AS 'pg_pathman', 'show_cache_stats_internal' +LANGUAGE C STRICT; + +CREATE OR REPLACE VIEW @extschema@.pathman_cache_stats +AS SELECT * FROM @extschema@.show_cache_stats(); + + +CREATE OR REPLACE FUNCTION @extschema@._partition_data_concurrent( + relation REGCLASS, + p_min ANYELEMENT DEFAULT NULL::text, + p_max ANYELEMENT DEFAULT NULL::text, + p_limit INT DEFAULT NULL, + OUT p_total BIGINT) +AS $$ +DECLARE + part_expr TEXT; + v_limit_clause TEXT := ''; + v_where_clause TEXT := ''; + ctids TID[]; + +BEGIN + part_expr := @extschema@.get_partition_key(relation); + + p_total := 0; + + /* Format LIMIT clause if needed */ + IF NOT p_limit IS NULL THEN + v_limit_clause := format('LIMIT %s', p_limit); + END IF; + + /* Format WHERE clause if needed */ + IF NOT p_min IS NULL THEN + v_where_clause := format('%1$s >= $1', part_expr); + END IF; + + IF NOT p_max IS NULL THEN + IF NOT p_min IS NULL THEN + v_where_clause := v_where_clause || ' AND '; + END IF; + v_where_clause := v_where_clause || format('%1$s < $2', part_expr); + END IF; + + IF v_where_clause != '' THEN + v_where_clause := 'WHERE ' || v_where_clause; + END IF; + + /* Lock rows and copy data */ + RAISE NOTICE 'Copying data to partitions...'; + EXECUTE format('SELECT array(SELECT ctid FROM ONLY %1$s %2$s %3$s FOR UPDATE NOWAIT)', + relation, v_where_clause, v_limit_clause) + USING p_min, p_max + INTO ctids; + + EXECUTE format('WITH data AS ( + DELETE FROM ONLY %1$s WHERE ctid = ANY($1) RETURNING *) + INSERT INTO %1$s SELECT * FROM data', + relation) + USING ctids; + + /* Get number of inserted rows */ + GET DIAGNOSTICS p_total = ROW_COUNT; + RETURN; +END +$$ LANGUAGE plpgsql +SET pg_pathman.enable_partitionfilter = on; /* ensures that PartitionFilter is ON */ + + +CREATE OR REPLACE FUNCTION @extschema@.disable_pathman_for( + parent_relid REGCLASS) +RETURNS VOID AS $$ +BEGIN + PERFORM @extschema@.validate_relname(parent_relid); + + /* Delete rows from both config tables */ + DELETE FROM @extschema@.pathman_config WHERE partrel = parent_relid; + DELETE FROM @extschema@.pathman_config_params WHERE partrel = parent_relid; + + /* Drop triggers on update */ + PERFORM @extschema@.drop_triggers(parent_relid); +END +$$ LANGUAGE plpgsql STRICT; + + +CREATE OR REPLACE FUNCTION @extschema@.prepare_for_partitioning( + parent_relid REGCLASS, + expression TEXT, + partition_data BOOLEAN) +RETURNS VOID AS $$ +DECLARE + constr_name TEXT; + is_referenced BOOLEAN; + rel_persistence CHAR; + +BEGIN + PERFORM @extschema@.validate_relname(parent_relid); + PERFORM @extschema@.validate_expression(parent_relid, expression); + + IF partition_data = true THEN + /* Acquire data modification lock */ + PERFORM @extschema@.prevent_data_modification(parent_relid); + ELSE + /* Acquire lock on parent */ + PERFORM @extschema@.prevent_part_modification(parent_relid); + END IF; + + /* Ignore temporary tables */ + SELECT relpersistence FROM pg_catalog.pg_class + WHERE oid = parent_relid INTO rel_persistence; + + IF rel_persistence = 't'::CHAR THEN + RAISE EXCEPTION 'temporary table "%" cannot be partitioned', parent_relid; + END IF; + + IF EXISTS (SELECT * FROM @extschema@.pathman_config + WHERE partrel = parent_relid) THEN + RAISE EXCEPTION 'table "%" has already been partitioned', parent_relid; + END IF; + + /* Check if there are foreign keys that reference the relation */ + FOR constr_name IN (SELECT conname FROM pg_catalog.pg_constraint + WHERE confrelid = parent_relid::REGCLASS::OID) + LOOP + is_referenced := TRUE; + RAISE WARNING 'foreign key "%" references table "%"', constr_name, parent_relid; + END LOOP; + + IF is_referenced THEN + RAISE EXCEPTION 'table "%" is referenced from other tables', parent_relid; + END IF; + +END +$$ LANGUAGE plpgsql; + + +CREATE OR REPLACE FUNCTION @extschema@.pathman_ddl_trigger_func() +RETURNS event_trigger AS $$ +DECLARE + obj RECORD; + pg_class_oid OID; + relids REGCLASS[]; + +BEGIN + pg_class_oid = 'pg_catalog.pg_class'::regclass; + + /* Find relids to remove from config */ + SELECT array_agg(cfg.partrel) INTO relids + FROM pg_event_trigger_dropped_objects() AS events + JOIN @extschema@.pathman_config AS cfg ON cfg.partrel::oid = events.objid + WHERE events.classid = pg_class_oid AND events.objsubid = 0; + + /* Cleanup pathman_config */ + DELETE FROM @extschema@.pathman_config WHERE partrel = ANY(relids); + + /* Cleanup params table too */ + DELETE FROM @extschema@.pathman_config_params WHERE partrel = ANY(relids); +END +$$ LANGUAGE plpgsql; + + +CREATE OR REPLACE FUNCTION @extschema@.create_naming_sequence( + parent_relid REGCLASS) +RETURNS TEXT AS $$ +DECLARE + seq_name TEXT; + +BEGIN + seq_name := @extschema@.build_sequence_name(parent_relid); + + EXECUTE format('DROP SEQUENCE IF EXISTS %s', seq_name); + EXECUTE format('CREATE SEQUENCE %s START 1', seq_name); + + RETURN seq_name; +END +$$ LANGUAGE plpgsql +SET client_min_messages = WARNING; /* mute NOTICE message */ + + +CREATE OR REPLACE FUNCTION @extschema@.drop_naming_sequence( + parent_relid REGCLASS) +RETURNS VOID AS $$ +DECLARE + seq_name TEXT; + +BEGIN + seq_name := @extschema@.build_sequence_name(parent_relid); + + EXECUTE format('DROP SEQUENCE IF EXISTS %s', seq_name); +END +$$ LANGUAGE plpgsql +SET client_min_messages = WARNING; /* mute NOTICE message */ + + +CREATE OR REPLACE FUNCTION @extschema@.drop_triggers( + parent_relid REGCLASS) +RETURNS VOID AS $$ +DECLARE + triggername TEXT; + relation OID; + +BEGIN + triggername := @extschema@.build_update_trigger_name(parent_relid); + + /* Drop trigger for each partition if exists */ + FOR relation IN (SELECT pg_catalog.pg_inherits.inhrelid + FROM pg_catalog.pg_inherits + JOIN pg_catalog.pg_trigger ON inhrelid = tgrelid + WHERE inhparent = parent_relid AND tgname = triggername) + LOOP + EXECUTE format('DROP TRIGGER IF EXISTS %s ON %s', + triggername, + relation::REGCLASS); + END LOOP; + + /* Drop trigger on parent */ + IF EXISTS (SELECT * FROM pg_catalog.pg_trigger + WHERE tgname = triggername AND tgrelid = parent_relid) + THEN + EXECUTE format('DROP TRIGGER IF EXISTS %s ON %s', + triggername, + parent_relid::TEXT); + END IF; +END +$$ LANGUAGE plpgsql STRICT; + + +CREATE OR REPLACE FUNCTION @extschema@.drop_partitions( + parent_relid REGCLASS, + delete_data BOOLEAN DEFAULT FALSE) +RETURNS INTEGER AS $$ +DECLARE + child REGCLASS; + rows_count BIGINT; + part_count INTEGER := 0; + rel_kind CHAR; + +BEGIN + PERFORM @extschema@.validate_relname(parent_relid); + + /* Acquire data modification lock */ + PERFORM @extschema@.prevent_data_modification(parent_relid); + + IF NOT EXISTS (SELECT FROM @extschema@.pathman_config + WHERE partrel = parent_relid) THEN + RAISE EXCEPTION 'table "%" has no partitions', parent_relid::TEXT; + END IF; + + /* First, drop all triggers */ + PERFORM @extschema@.drop_triggers(parent_relid); + + /* Also drop naming sequence */ + PERFORM @extschema@.drop_naming_sequence(parent_relid); + + FOR child IN (SELECT inhrelid::REGCLASS + FROM pg_catalog.pg_inherits + WHERE inhparent::regclass = parent_relid + ORDER BY inhrelid ASC) + LOOP + IF NOT delete_data THEN + EXECUTE format('INSERT INTO %s SELECT * FROM %s', + parent_relid::TEXT, + child::TEXT); + GET DIAGNOSTICS rows_count = ROW_COUNT; + + /* Show number of copied rows */ + RAISE NOTICE '% rows copied from %', rows_count, child; + END IF; + + SELECT relkind FROM pg_catalog.pg_class + WHERE oid = child + INTO rel_kind; + + /* + * Determine the kind of child relation. It can be either a regular + * table (r) or a foreign table (f). Depending on relkind we use + * DROP TABLE or DROP FOREIGN TABLE. + */ + IF rel_kind = 'f' THEN + EXECUTE format('DROP FOREIGN TABLE %s', child); + ELSE + EXECUTE format('DROP TABLE %s', child); + END IF; + + part_count := part_count + 1; + END LOOP; + + /* Finally delete both config entries */ + DELETE FROM @extschema@.pathman_config WHERE partrel = parent_relid; + DELETE FROM @extschema@.pathman_config_params WHERE partrel = parent_relid; + + RETURN part_count; +END +$$ LANGUAGE plpgsql +SET pg_pathman.enable_partitionfilter = off; /* ensures that PartitionFilter is OFF */ + + +CREATE OR REPLACE FUNCTION @extschema@.copy_foreign_keys( + parent_relid REGCLASS, + partition_relid REGCLASS) +RETURNS VOID AS $$ +DECLARE + conid OID; + +BEGIN + PERFORM @extschema@.validate_relname(parent_relid); + PERFORM @extschema@.validate_relname(partition_relid); + + FOR conid IN (SELECT oid FROM pg_catalog.pg_constraint + WHERE conrelid = parent_relid AND contype = 'f') + LOOP + EXECUTE format('ALTER TABLE %s ADD %s', + partition_relid::TEXT, + pg_catalog.pg_get_constraintdef(conid)); + END LOOP; +END +$$ LANGUAGE plpgsql STRICT; + + +CREATE OR REPLACE FUNCTION @extschema@.get_partition_key( + relid REGCLASS) +RETURNS TEXT AS +$$ + SELECT expr FROM @extschema@.pathman_config WHERE partrel = relid; +$$ +LANGUAGE sql STRICT; + + +CREATE OR REPLACE FUNCTION @extschema@.get_partition_type( + relid REGCLASS) +RETURNS INT4 AS +$$ + SELECT parttype FROM @extschema@.pathman_config WHERE partrel = relid; +$$ +LANGUAGE sql STRICT; + + +CREATE OR REPLACE FUNCTION @extschema@.pathman_update_trigger_func() +RETURNS TRIGGER AS 'pg_pathman', 'pathman_update_trigger_func' +LANGUAGE C STRICT; + + +CREATE OR REPLACE FUNCTION @extschema@.create_update_triggers( + parent_relid REGCLASS) +RETURNS VOID AS 'pg_pathman', 'create_update_triggers' +LANGUAGE C STRICT; + + +CREATE OR REPLACE FUNCTION @extschema@.create_single_update_trigger( + parent_relid REGCLASS, + partition_relid REGCLASS) +RETURNS VOID AS 'pg_pathman', 'create_single_update_trigger' +LANGUAGE C STRICT; + + +CREATE OR REPLACE FUNCTION @extschema@.has_update_trigger( + parent_relid REGCLASS) +RETURNS BOOL AS 'pg_pathman', 'has_update_trigger' +LANGUAGE C STRICT; + + +CREATE OR REPLACE FUNCTION @extschema@.validate_expression( + relid REGCLASS, + expression TEXT) +RETURNS VOID AS 'pg_pathman', 'validate_expression' +LANGUAGE C; + + +CREATE OR REPLACE FUNCTION @extschema@.is_operator_supported( + type_oid REGTYPE, + opname TEXT) +RETURNS BOOLEAN AS 'pg_pathman', 'is_operator_supported' +LANGUAGE C STRICT; + + +CREATE OR REPLACE FUNCTION @extschema@.is_tuple_convertible( + relation1 REGCLASS, + relation2 REGCLASS) +RETURNS BOOL AS 'pg_pathman', 'is_tuple_convertible' +LANGUAGE C STRICT; + + +CREATE OR REPLACE FUNCTION @extschema@.build_check_constraint_name( + partition_relid REGCLASS) +RETURNS TEXT AS 'pg_pathman', 'build_check_constraint_name' +LANGUAGE C STRICT; + + +CREATE OR REPLACE FUNCTION @extschema@.add_to_pathman_config( + parent_relid REGCLASS, + expression TEXT, + range_interval TEXT) +RETURNS BOOLEAN AS 'pg_pathman', 'add_to_pathman_config' +LANGUAGE C; + + +CREATE OR REPLACE FUNCTION @extschema@.add_to_pathman_config( + parent_relid REGCLASS, + expression TEXT) +RETURNS BOOLEAN AS 'pg_pathman', 'add_to_pathman_config' +LANGUAGE C; + + +CREATE OR REPLACE FUNCTION @extschema@.prevent_part_modification( + parent_relid REGCLASS) +RETURNS VOID AS 'pg_pathman', 'prevent_part_modification' +LANGUAGE C STRICT; + + +CREATE OR REPLACE FUNCTION @extschema@.prevent_data_modification( + parent_relid REGCLASS) +RETURNS VOID AS 'pg_pathman', 'prevent_data_modification' +LANGUAGE C STRICT; + + +CREATE OR REPLACE FUNCTION @extschema@.create_hash_partitions( + parent_relid REGCLASS, + expression TEXT, + partitions_count INT4, + partition_data BOOLEAN DEFAULT TRUE, + partition_names TEXT[] DEFAULT NULL, + tablespaces TEXT[] DEFAULT NULL) +RETURNS INTEGER AS $$ +BEGIN + expression := lower(expression); + PERFORM @extschema@.prepare_for_partitioning(parent_relid, + expression, + partition_data); + + /* Insert new entry to pathman config */ + PERFORM @extschema@.add_to_pathman_config(parent_relid, expression); + + /* Create partitions */ + PERFORM @extschema@.create_hash_partitions_internal(parent_relid, + expression, + partitions_count, + partition_names, + tablespaces); + + /* Copy data */ + IF partition_data = true THEN + PERFORM @extschema@.set_enable_parent(parent_relid, false); + PERFORM @extschema@.partition_data(parent_relid); + ELSE + PERFORM @extschema@.set_enable_parent(parent_relid, true); + END IF; + + RETURN partitions_count; +END +$$ LANGUAGE plpgsql +SET client_min_messages = WARNING; + + +CREATE OR REPLACE FUNCTION @extschema@.replace_hash_partition( + old_partition REGCLASS, + new_partition REGCLASS, + lock_parent BOOL DEFAULT TRUE) +RETURNS REGCLASS AS $$ +DECLARE + parent_relid REGCLASS; + old_constr_name TEXT; /* name of old_partition's constraint */ + old_constr_def TEXT; /* definition of old_partition's constraint */ + rel_persistence CHAR; + p_init_callback REGPROCEDURE; + +BEGIN + PERFORM @extschema@.validate_relname(old_partition); + PERFORM @extschema@.validate_relname(new_partition); + + /* Parent relation */ + parent_relid := @extschema@.get_parent_of_partition(old_partition); + + IF lock_parent THEN + /* Acquire data modification lock (prevent further modifications) */ + PERFORM @extschema@.prevent_data_modification(parent_relid); + ELSE + /* Acquire lock on parent */ + PERFORM @extschema@.prevent_part_modification(parent_relid); + END IF; + + /* Acquire data modification lock (prevent further modifications) */ + PERFORM @extschema@.prevent_data_modification(old_partition); + PERFORM @extschema@.prevent_data_modification(new_partition); + + /* Ignore temporary tables */ + SELECT relpersistence FROM pg_catalog.pg_class + WHERE oid = new_partition INTO rel_persistence; + + IF rel_persistence = 't'::CHAR THEN + RAISE EXCEPTION 'temporary table "%" cannot be used as a partition', + new_partition::TEXT; + END IF; + + /* Check that new partition has an equal structure as parent does */ + IF NOT @extschema@.is_tuple_convertible(parent_relid, new_partition) THEN + RAISE EXCEPTION 'partition must have a compatible tuple format'; + END IF; + + /* Check that table is partitioned */ + IF @extschema@.get_partition_key(parent_relid) IS NULL THEN + RAISE EXCEPTION 'table "%" is not partitioned', parent_relid::TEXT; + END IF; + + /* Fetch name of old_partition's HASH constraint */ + old_constr_name = @extschema@.build_check_constraint_name(old_partition::REGCLASS); + + /* Fetch definition of old_partition's HASH constraint */ + SELECT pg_catalog.pg_get_constraintdef(oid) FROM pg_catalog.pg_constraint + WHERE conrelid = old_partition AND conname = old_constr_name + INTO old_constr_def; + + /* Detach old partition */ + EXECUTE format('ALTER TABLE %s NO INHERIT %s', old_partition, parent_relid); + EXECUTE format('ALTER TABLE %s DROP CONSTRAINT %s', + old_partition, + old_constr_name); + + /* Attach the new one */ + EXECUTE format('ALTER TABLE %s INHERIT %s', new_partition, parent_relid); + EXECUTE format('ALTER TABLE %s ADD CONSTRAINT %s %s', + new_partition, + @extschema@.build_check_constraint_name(new_partition::REGCLASS), + old_constr_def); + + /* Fetch init_callback from 'params' table */ + WITH stub_callback(stub) as (values (0)) + SELECT init_callback + FROM stub_callback + LEFT JOIN @extschema@.pathman_config_params AS params + ON params.partrel = parent_relid + INTO p_init_callback; + + /* Finally invoke init_callback */ + PERFORM @extschema@.invoke_on_partition_created_callback(parent_relid, + new_partition, + p_init_callback); + + RETURN new_partition; +END +$$ LANGUAGE plpgsql; + + +CREATE OR REPLACE FUNCTION @extschema@.build_hash_condition( + attribute_type REGTYPE, + attribute TEXT, + partitions_count INT4, + partition_index INT4) +RETURNS TEXT AS 'pg_pathman', 'build_hash_condition' +LANGUAGE C STRICT; + + +CREATE OR REPLACE FUNCTION @extschema@.check_boundaries( + parent_relid REGCLASS, + expression TEXT, + start_value ANYELEMENT, + end_value ANYELEMENT) +RETURNS VOID AS $$ +DECLARE + min_value start_value%TYPE; + max_value start_value%TYPE; + rows_count BIGINT; + +BEGIN + /* Get min and max values */ + EXECUTE format('SELECT count(*), min(%1$s), max(%1$s) + FROM %2$s WHERE NOT %1$s IS NULL', + expression, parent_relid::TEXT) + INTO rows_count, min_value, max_value; + + /* Check if column has NULL values */ + IF rows_count > 0 AND (min_value IS NULL OR max_value IS NULL) THEN + RAISE EXCEPTION 'expression "%" returns NULL values', expression; + END IF; + + /* Check lower boundary */ + IF start_value > min_value THEN + RAISE EXCEPTION 'start value is greater than min value of "%"', expression; + END IF; + + /* Check upper boundary */ + IF end_value <= max_value THEN + RAISE EXCEPTION 'not enough partitions to fit all values of "%"', expression; + END IF; +END +$$ LANGUAGE plpgsql; + + +CREATE OR REPLACE FUNCTION @extschema@.create_range_partitions( + parent_relid REGCLASS, + expression TEXT, + start_value ANYELEMENT, + p_interval INTERVAL, + p_count INTEGER DEFAULT NULL, + partition_data BOOLEAN DEFAULT TRUE) +RETURNS INTEGER AS $$ +DECLARE + rows_count BIGINT; + value_type REGTYPE; + max_value start_value%TYPE; + cur_value start_value%TYPE := start_value; + end_value start_value%TYPE; + part_count INTEGER := 0; + i INTEGER; + +BEGIN + PERFORM @extschema@.prepare_for_partitioning(parent_relid, + expression, + partition_data); + + IF p_count < 0 THEN + RAISE EXCEPTION '"p_count" must not be less than 0'; + END IF; + + /* Try to determine partitions count if not set */ + IF p_count IS NULL THEN + EXECUTE format('SELECT count(*), max(%s) FROM %s', expression, parent_relid) + INTO rows_count, max_value; + + IF rows_count = 0 THEN + RAISE EXCEPTION 'cannot determine partitions count for empty table'; + END IF; + + p_count := 0; + WHILE cur_value <= max_value + LOOP + cur_value := cur_value + p_interval; + p_count := p_count + 1; + END LOOP; + END IF; + + value_type := @extschema@.get_base_type(pg_typeof(start_value)); + + /* + * In case when user doesn't want to automatically create partitions + * and specifies partition count as 0 then do not check boundaries + */ + IF p_count != 0 THEN + /* compute right bound of partitioning through additions */ + end_value := start_value; + FOR i IN 1..p_count + LOOP + end_value := end_value + p_interval; + END LOOP; + + /* Check boundaries */ + EXECUTE + format('SELECT @extschema@.check_boundaries(''%s'', $1, ''%s'', ''%s''::%s)', + parent_relid, + start_value, + end_value, + value_type::TEXT) + USING + expression; + END IF; + + /* Create sequence for child partitions names */ + PERFORM @extschema@.create_naming_sequence(parent_relid); + + /* Insert new entry to pathman config */ + PERFORM @extschema@.add_to_pathman_config(parent_relid, expression, + p_interval::TEXT); + + IF p_count != 0 THEN + part_count := @extschema@.create_range_partitions_internal( + parent_relid, + @extschema@.generate_range_bounds(start_value, + p_interval, + p_count), + NULL, + NULL); + END IF; + + /* Relocate data if asked to */ + IF partition_data = true THEN + PERFORM @extschema@.set_enable_parent(parent_relid, false); + PERFORM @extschema@.partition_data(parent_relid); + ELSE + PERFORM @extschema@.set_enable_parent(parent_relid, true); + END IF; + + RETURN part_count; +END +$$ LANGUAGE plpgsql; + + +CREATE OR REPLACE FUNCTION @extschema@.create_range_partitions( + parent_relid REGCLASS, + expression TEXT, + start_value ANYELEMENT, + p_interval ANYELEMENT, + p_count INTEGER DEFAULT NULL, + partition_data BOOLEAN DEFAULT TRUE) +RETURNS INTEGER AS $$ +DECLARE + rows_count BIGINT; + max_value start_value%TYPE; + cur_value start_value%TYPE := start_value; + end_value start_value%TYPE; + part_count INTEGER := 0; + i INTEGER; + +BEGIN + PERFORM @extschema@.prepare_for_partitioning(parent_relid, + expression, + partition_data); + + IF p_count < 0 THEN + RAISE EXCEPTION 'partitions count must not be less than zero'; + END IF; + + /* Try to determine partitions count if not set */ + IF p_count IS NULL THEN + EXECUTE format('SELECT count(*), max(%s) FROM %s', expression, parent_relid) + INTO rows_count, max_value; + + IF rows_count = 0 THEN + RAISE EXCEPTION 'cannot determine partitions count for empty table'; + END IF; + + IF max_value IS NULL THEN + RAISE EXCEPTION 'expression "%" can return NULL values', expression; + END IF; + + p_count := 0; + WHILE cur_value <= max_value + LOOP + cur_value := cur_value + p_interval; + p_count := p_count + 1; + END LOOP; + END IF; + + /* + * In case when user doesn't want to automatically create partitions + * and specifies partition count as 0 then do not check boundaries + */ + IF p_count != 0 THEN + /* compute right bound of partitioning through additions */ + end_value := start_value; + FOR i IN 1..p_count + LOOP + end_value := end_value + p_interval; + END LOOP; + + /* check boundaries */ + PERFORM @extschema@.check_boundaries(parent_relid, + expression, + start_value, + end_value); + END IF; + + /* Create sequence for child partitions names */ + PERFORM @extschema@.create_naming_sequence(parent_relid); + + /* Insert new entry to pathman config */ + PERFORM @extschema@.add_to_pathman_config(parent_relid, expression, + p_interval::TEXT); + + IF p_count != 0 THEN + part_count := @extschema@.create_range_partitions_internal( + parent_relid, + @extschema@.generate_range_bounds(start_value, + p_interval, + p_count), + NULL, + NULL); + END IF; + + /* Relocate data if asked to */ + IF partition_data = true THEN + PERFORM @extschema@.set_enable_parent(parent_relid, false); + PERFORM @extschema@.partition_data(parent_relid); + ELSE + PERFORM @extschema@.set_enable_parent(parent_relid, true); + END IF; + + RETURN p_count; +END +$$ LANGUAGE plpgsql; + + +CREATE OR REPLACE FUNCTION @extschema@.create_range_partitions( + parent_relid REGCLASS, + expression TEXT, + bounds ANYARRAY, + partition_names TEXT[] DEFAULT NULL, + tablespaces TEXT[] DEFAULT NULL, + partition_data BOOLEAN DEFAULT TRUE) +RETURNS INTEGER AS $$ +DECLARE + part_count INTEGER := 0; + +BEGIN + IF array_ndims(bounds) > 1 THEN + RAISE EXCEPTION 'Bounds array must be a one dimensional array'; + END IF; + + IF array_length(bounds, 1) < 2 THEN + RAISE EXCEPTION 'Bounds array must have at least two values'; + END IF; + + PERFORM @extschema@.prepare_for_partitioning(parent_relid, + expression, + partition_data); + + /* Check boundaries */ + PERFORM @extschema@.check_boundaries(parent_relid, + expression, + bounds[0], + bounds[array_length(bounds, 1) - 1]); + + /* Create sequence for child partitions names */ + PERFORM @extschema@.create_naming_sequence(parent_relid); + + /* Insert new entry to pathman config */ + PERFORM @extschema@.add_to_pathman_config(parent_relid, expression, NULL); + + /* Create partitions */ + part_count := @extschema@.create_range_partitions_internal(parent_relid, + bounds, + partition_names, + tablespaces); + + /* Relocate data if asked to */ + IF partition_data = true THEN + PERFORM @extschema@.set_enable_parent(parent_relid, false); + PERFORM @extschema@.partition_data(parent_relid); + ELSE + PERFORM @extschema@.set_enable_parent(parent_relid, true); + END IF; + + RETURN part_count; +END +$$ +LANGUAGE plpgsql; + + +CREATE OR REPLACE FUNCTION @extschema@.split_range_partition( + partition_relid REGCLASS, + split_value ANYELEMENT, + partition_name TEXT DEFAULT NULL, + tablespace TEXT DEFAULT NULL, + OUT p_range ANYARRAY) +RETURNS ANYARRAY AS $$ +DECLARE + parent_relid REGCLASS; + part_type INTEGER; + part_expr TEXT; + part_expr_type REGTYPE; + check_name TEXT; + check_cond TEXT; + new_partition TEXT; + +BEGIN + parent_relid = @extschema@.get_parent_of_partition(partition_relid); + + PERFORM @extschema@.validate_relname(parent_relid); + PERFORM @extschema@.validate_relname(partition_relid); + + /* Acquire lock on parent */ + PERFORM @extschema@.prevent_part_modification(parent_relid); + + /* Acquire data modification lock (prevent further modifications) */ + PERFORM @extschema@.prevent_data_modification(partition_relid); + + part_expr_type = @extschema@.get_partition_key_type(parent_relid); + part_expr := @extschema@.get_partition_key(parent_relid); + + part_type := @extschema@.get_partition_type(parent_relid); + + /* Check if this is a RANGE partition */ + IF part_type != 2 THEN + RAISE EXCEPTION '"%" is not a RANGE partition', partition_relid::TEXT; + END IF; + + /* Get partition values range */ + EXECUTE format('SELECT @extschema@.get_part_range($1, NULL::%s)', + @extschema@.get_base_type(part_expr_type)::TEXT) + USING partition_relid + INTO p_range; + + IF p_range IS NULL THEN + RAISE EXCEPTION 'could not find specified partition'; + END IF; + + /* Check if value fit into the range */ + IF p_range[1] > split_value OR p_range[2] <= split_value + THEN + RAISE EXCEPTION 'specified value does not fit into the range [%, %)', + p_range[1], p_range[2]; + END IF; + + /* Create new partition */ + new_partition := @extschema@.create_single_range_partition(parent_relid, + split_value, + p_range[2], + partition_name, + tablespace); + + /* Copy data */ + check_cond := @extschema@.build_range_condition(new_partition::regclass, + part_expr, split_value, p_range[2]); + EXECUTE format('WITH part_data AS (DELETE FROM %s WHERE %s RETURNING *) + INSERT INTO %s SELECT * FROM part_data', + partition_relid::TEXT, + check_cond, + new_partition); + + /* Alter original partition */ + check_cond := @extschema@.build_range_condition(partition_relid::regclass, + part_expr, p_range[1], split_value); + check_name := @extschema@.build_check_constraint_name(partition_relid); + + EXECUTE format('ALTER TABLE %s DROP CONSTRAINT %s', + partition_relid::TEXT, + check_name); + + EXECUTE format('ALTER TABLE %s ADD CONSTRAINT %s CHECK (%s)', + partition_relid::TEXT, + check_name, + check_cond); +END +$$ LANGUAGE plpgsql; + + +CREATE OR REPLACE FUNCTION @extschema@.append_range_partition( + parent_relid REGCLASS, + partition_name TEXT DEFAULT NULL, + tablespace TEXT DEFAULT NULL) +RETURNS TEXT AS $$ +DECLARE + part_expr_type REGTYPE; + part_name TEXT; + part_interval TEXT; + +BEGIN + PERFORM @extschema@.validate_relname(parent_relid); + + /* Acquire lock on parent */ + PERFORM @extschema@.prevent_part_modification(parent_relid); + + part_expr_type := @extschema@.get_partition_key_type(parent_relid); + + IF NOT @extschema@.is_date_type(part_expr_type) AND + NOT @extschema@.is_operator_supported(part_expr_type, '+') THEN + RAISE EXCEPTION 'type % does not support ''+'' operator', part_expr_type::REGTYPE; + END IF; + + SELECT range_interval + FROM @extschema@.pathman_config + WHERE partrel = parent_relid + INTO part_interval; + + EXECUTE + format('SELECT @extschema@.append_partition_internal($1, $2, $3, ARRAY[]::%s[], $4, $5)', + @extschema@.get_base_type(part_expr_type)::TEXT) + USING + parent_relid, + part_expr_type, + part_interval, + partition_name, + tablespace + INTO + part_name; + + RETURN part_name; +END +$$ LANGUAGE plpgsql; + + +CREATE OR REPLACE FUNCTION @extschema@.append_partition_internal( + parent_relid REGCLASS, + p_atttype REGTYPE, + p_interval TEXT, + p_range ANYARRAY DEFAULT NULL, + partition_name TEXT DEFAULT NULL, + tablespace TEXT DEFAULT NULL) +RETURNS TEXT AS $$ +DECLARE + part_expr_type REGTYPE; + part_name TEXT; + v_args_format TEXT; + +BEGIN + IF @extschema@.get_number_of_partitions(parent_relid) = 0 THEN + RAISE EXCEPTION 'cannot append to empty partitions set'; + END IF; + + part_expr_type := @extschema@.get_base_type(p_atttype); + + /* We have to pass fake NULL casted to column's type */ + EXECUTE format('SELECT @extschema@.get_part_range($1, -1, NULL::%s)', + part_expr_type::TEXT) + USING parent_relid + INTO p_range; + + IF p_range[2] IS NULL THEN + RAISE EXCEPTION 'Cannot append partition because last partition''s range is half open'; + END IF; + + IF @extschema@.is_date_type(p_atttype) THEN + v_args_format := format('$1, $2, ($2 + $3::interval)::%s, $4, $5', part_expr_type::TEXT); + ELSE + v_args_format := format('$1, $2, $2 + $3::%s, $4, $5', part_expr_type::TEXT); + END IF; + + EXECUTE + format('SELECT @extschema@.create_single_range_partition(%s)', v_args_format) + USING + parent_relid, + p_range[2], + p_interval, + partition_name, + tablespace + INTO + part_name; + + RETURN part_name; +END +$$ LANGUAGE plpgsql; + + +CREATE OR REPLACE FUNCTION @extschema@.prepend_range_partition( + parent_relid REGCLASS, + partition_name TEXT DEFAULT NULL, + tablespace TEXT DEFAULT NULL) +RETURNS TEXT AS $$ +DECLARE + part_expr_type REGTYPE; + part_name TEXT; + part_interval TEXT; + +BEGIN + PERFORM @extschema@.validate_relname(parent_relid); + + /* Acquire lock on parent */ + PERFORM @extschema@.prevent_part_modification(parent_relid); + + part_expr_type := @extschema@.get_partition_key_type(parent_relid); + + IF NOT @extschema@.is_date_type(part_expr_type) AND + NOT @extschema@.is_operator_supported(part_expr_type, '-') THEN + RAISE EXCEPTION 'type % does not support ''-'' operator', part_expr_type::REGTYPE; + END IF; + + SELECT range_interval + FROM @extschema@.pathman_config + WHERE partrel = parent_relid + INTO part_interval; + + EXECUTE + format('SELECT @extschema@.prepend_partition_internal($1, $2, $3, ARRAY[]::%s[], $4, $5)', + @extschema@.get_base_type(part_expr_type)::TEXT) + USING + parent_relid, + part_expr_type, + part_interval, + partition_name, + tablespace + INTO + part_name; + + RETURN part_name; +END +$$ LANGUAGE plpgsql; + + +CREATE OR REPLACE FUNCTION @extschema@.prepend_partition_internal( + parent_relid REGCLASS, + p_atttype REGTYPE, + p_interval TEXT, + p_range ANYARRAY DEFAULT NULL, + partition_name TEXT DEFAULT NULL, + tablespace TEXT DEFAULT NULL) +RETURNS TEXT AS $$ +DECLARE + part_expr_type REGTYPE; + part_name TEXT; + v_args_format TEXT; + +BEGIN + IF @extschema@.get_number_of_partitions(parent_relid) = 0 THEN + RAISE EXCEPTION 'cannot prepend to empty partitions set'; + END IF; + + part_expr_type := @extschema@.get_base_type(p_atttype); + + /* We have to pass fake NULL casted to column's type */ + EXECUTE format('SELECT @extschema@.get_part_range($1, 0, NULL::%s)', + part_expr_type::TEXT) + USING parent_relid + INTO p_range; + + IF p_range[1] IS NULL THEN + RAISE EXCEPTION 'Cannot prepend partition because first partition''s range is half open'; + END IF; + + IF @extschema@.is_date_type(p_atttype) THEN + v_args_format := format('$1, ($2 - $3::interval)::%s, $2, $4, $5', part_expr_type::TEXT); + ELSE + v_args_format := format('$1, $2 - $3::%s, $2, $4, $5', part_expr_type::TEXT); + END IF; + + EXECUTE + format('SELECT @extschema@.create_single_range_partition(%s)', v_args_format) + USING + parent_relid, + p_range[1], + p_interval, + partition_name, + tablespace + INTO + part_name; + + RETURN part_name; +END +$$ LANGUAGE plpgsql; + + +CREATE OR REPLACE FUNCTION @extschema@.add_range_partition( + parent_relid REGCLASS, + start_value ANYELEMENT, + end_value ANYELEMENT, + partition_name TEXT DEFAULT NULL, + tablespace TEXT DEFAULT NULL) +RETURNS TEXT AS $$ +DECLARE + part_name TEXT; + +BEGIN + PERFORM @extschema@.validate_relname(parent_relid); + + /* Acquire lock on parent */ + PERFORM @extschema@.prevent_part_modification(parent_relid); + + IF start_value >= end_value THEN + RAISE EXCEPTION 'failed to create partition: start_value is greater than end_value'; + END IF; + + /* check range overlap */ + IF @extschema@.get_number_of_partitions(parent_relid) > 0 THEN + PERFORM @extschema@.check_range_available(parent_relid, + start_value, + end_value); + END IF; + + /* Create new partition */ + part_name := @extschema@.create_single_range_partition(parent_relid, + start_value, + end_value, + partition_name, + tablespace); + + RETURN part_name; +END +$$ LANGUAGE plpgsql; + + +CREATE OR REPLACE FUNCTION @extschema@.drop_range_partition( + partition_relid REGCLASS, + delete_data BOOLEAN DEFAULT TRUE) +RETURNS TEXT AS $$ +DECLARE + parent_relid REGCLASS; + part_name TEXT; + part_type INTEGER; + v_relkind CHAR; + v_rows BIGINT; + +BEGIN + parent_relid := @extschema@.get_parent_of_partition(partition_relid); + + PERFORM @extschema@.validate_relname(parent_relid); + PERFORM @extschema@.validate_relname(partition_relid); + + part_name := partition_relid::TEXT; /* save the name to be returned */ + part_type := @extschema@.get_partition_type(parent_relid); + + /* Check if this is a RANGE partition */ + IF part_type != 2 THEN + RAISE EXCEPTION '"%" is not a RANGE partition', partition_relid::TEXT; + END IF; + + /* Acquire lock on parent */ + PERFORM @extschema@.prevent_part_modification(parent_relid); + + IF NOT delete_data THEN + EXECUTE format('INSERT INTO %s SELECT * FROM %s', + parent_relid::TEXT, + partition_relid::TEXT); + GET DIAGNOSTICS v_rows = ROW_COUNT; + + /* Show number of copied rows */ + RAISE NOTICE '% rows copied from %', v_rows, partition_relid::TEXT; + END IF; + + SELECT relkind FROM pg_catalog.pg_class + WHERE oid = partition_relid + INTO v_relkind; + + /* + * Determine the kind of child relation. It can be either regular + * table (r) or foreign table (f). Depending on relkind we use + * DROP TABLE or DROP FOREIGN TABLE. + */ + IF v_relkind = 'f' THEN + EXECUTE format('DROP FOREIGN TABLE %s', partition_relid::TEXT); + ELSE + EXECUTE format('DROP TABLE %s', partition_relid::TEXT); + END IF; + + RETURN part_name; +END +$$ LANGUAGE plpgsql +SET pg_pathman.enable_partitionfilter = off; /* ensures that PartitionFilter is OFF */ + + +CREATE OR REPLACE FUNCTION @extschema@.attach_range_partition( + parent_relid REGCLASS, + partition_relid REGCLASS, + start_value ANYELEMENT, + end_value ANYELEMENT) +RETURNS TEXT AS $$ +DECLARE + part_expr TEXT; + rel_persistence CHAR; + v_init_callback REGPROCEDURE; + +BEGIN + PERFORM @extschema@.validate_relname(parent_relid); + PERFORM @extschema@.validate_relname(partition_relid); + + /* Acquire lock on parent */ + PERFORM @extschema@.prevent_part_modification(parent_relid); + + /* Ignore temporary tables */ + SELECT relpersistence FROM pg_catalog.pg_class + WHERE oid = partition_relid INTO rel_persistence; + + IF rel_persistence = 't'::CHAR THEN + RAISE EXCEPTION 'temporary table "%" cannot be used as a partition', + partition_relid::TEXT; + END IF; + + /* check range overlap */ + PERFORM @extschema@.check_range_available(parent_relid, start_value, end_value); + + IF NOT @extschema@.is_tuple_convertible(parent_relid, partition_relid) THEN + RAISE EXCEPTION 'partition must have a compatible tuple format'; + END IF; + + /* Set inheritance */ + EXECUTE format('ALTER TABLE %s INHERIT %s', partition_relid, parent_relid); + + part_expr := @extschema@.get_partition_key(parent_relid); + + IF part_expr IS NULL THEN + RAISE EXCEPTION 'table "%" is not partitioned', parent_relid::TEXT; + END IF; + + /* Set check constraint */ + EXECUTE format('ALTER TABLE %s ADD CONSTRAINT %s CHECK (%s)', + partition_relid::TEXT, + @extschema@.build_check_constraint_name(partition_relid), + @extschema@.build_range_condition(partition_relid, + part_expr, + start_value, + end_value)); + + /* Fetch init_callback from 'params' table */ + WITH stub_callback(stub) as (values (0)) + SELECT init_callback + FROM stub_callback + LEFT JOIN @extschema@.pathman_config_params AS params + ON params.partrel = parent_relid + INTO v_init_callback; + + /* If update trigger is enabled then create one for this partition */ + if @extschema@.has_update_trigger(parent_relid) THEN + PERFORM @extschema@.create_single_update_trigger(parent_relid, partition_relid); + END IF; + + /* Invoke an initialization callback */ + PERFORM @extschema@.invoke_on_partition_created_callback(parent_relid, + partition_relid, + v_init_callback, + start_value, + end_value); + + RETURN partition_relid; +END +$$ LANGUAGE plpgsql; + + +CREATE OR REPLACE FUNCTION @extschema@.detach_range_partition( + partition_relid REGCLASS) +RETURNS TEXT AS $$ +DECLARE + parent_relid REGCLASS; + part_type INTEGER; + +BEGIN + parent_relid := @extschema@.get_parent_of_partition(partition_relid); + + PERFORM @extschema@.validate_relname(parent_relid); + PERFORM @extschema@.validate_relname(partition_relid); + + /* Acquire lock on parent */ + PERFORM @extschema@.prevent_data_modification(parent_relid); + + part_type := @extschema@.get_partition_type(parent_relid); + + /* Check if this is a RANGE partition */ + IF part_type != 2 THEN + RAISE EXCEPTION '"%" is not a RANGE partition', partition_relid::TEXT; + END IF; + + /* Remove inheritance */ + EXECUTE format('ALTER TABLE %s NO INHERIT %s', + partition_relid::TEXT, + parent_relid::TEXT); + + /* Remove check constraint */ + EXECUTE format('ALTER TABLE %s DROP CONSTRAINT %s', + partition_relid::TEXT, + @extschema@.build_check_constraint_name(partition_relid)); + + /* Remove update trigger */ + EXECUTE format('DROP TRIGGER IF EXISTS %s ON %s', + @extschema@.build_update_trigger_name(parent_relid), + partition_relid::TEXT); + + RETURN partition_relid; +END +$$ LANGUAGE plpgsql; + + +CREATE OR REPLACE FUNCTION @extschema@.create_range_partitions_internal( + parent_relid REGCLASS, + bounds ANYARRAY, + partition_names TEXT[], + tablespaces TEXT[]) +RETURNS REGCLASS AS 'pg_pathman', 'create_range_partitions_internal' +LANGUAGE C; + + +CREATE OR REPLACE FUNCTION @extschema@.build_range_condition( + partition_relid REGCLASS, + expression TEXT, + start_value ANYELEMENT, + end_value ANYELEMENT) +RETURNS TEXT AS 'pg_pathman', 'build_range_condition' +LANGUAGE C; + + +CREATE OR REPLACE FUNCTION @extschema@.generate_range_bounds( + p_start ANYELEMENT, + p_interval INTERVAL, + p_count INTEGER) +RETURNS ANYARRAY AS 'pg_pathman', 'generate_range_bounds_pl' +LANGUAGE C STRICT; + + +CREATE OR REPLACE FUNCTION @extschema@.generate_range_bounds( + p_start ANYELEMENT, + p_interval ANYELEMENT, + p_count INTEGER) +RETURNS ANYARRAY AS 'pg_pathman', 'generate_range_bounds_pl' +LANGUAGE C STRICT; + + + +/* ------------------------------------------------------------------------ + * Final words of wisdom + * ----------------------------------------------------------------------*/ +DO language plpgsql +$$ + BEGIN + RAISE WARNING 'Don''t forget to execute "SET pg_pathman.enable = t" to activate pg_pathman'; + END +$$; diff --git a/pg_pathman--1.4--1.5.sql b/pg_pathman--1.4--1.5.sql new file mode 100644 index 00000000..2aa02bf9 --- /dev/null +++ b/pg_pathman--1.4--1.5.sql @@ -0,0 +1,955 @@ +ALTER TABLE @extschema@.pathman_config DROP CONSTRAINT pathman_config_interval_check; + +DROP FUNCTION @extschema@.validate_interval_value(REGCLASS, TEXT, INTEGER, + TEXT, TEXT); +CREATE OR REPLACE FUNCTION @extschema@.validate_interval_value( + partrel REGCLASS, + expr TEXT, + parttype INTEGER, + range_interval TEXT) +RETURNS BOOL AS 'pg_pathman', 'validate_interval_value' +LANGUAGE C; + +ALTER TABLE @extschema@.pathman_config DROP COLUMN cooked_expr; +/* + * Dropped columns are never actually purged, entry in pg_attribute remains. + * Since dealing with different number of attrs in C code is cumbersome, + * let's recreate table instead. + */ +CREATE TABLE @extschema@.pathman_config_tmp (LIKE @extschema@.pathman_config INCLUDING ALL); +INSERT INTO @extschema@.pathman_config_tmp SELECT * FROM @extschema@.pathman_config; +ALTER EVENT TRIGGER pathman_ddl_trigger DISABLE; +DROP TABLE @extschema@.pathman_config; +ALTER TABLE @extschema@.pathman_config_tmp RENAME TO pathman_config; +ALTER EVENT TRIGGER pathman_ddl_trigger ENABLE; + +/* + * Get back stuff not preserved by CREATE TABLE LIKE: ACL, RLS and + * pg_extension_config_dump mark. + */ + +GRANT SELECT, INSERT, UPDATE, DELETE +ON @extschema@.pathman_config +TO public; + +/* + * Row security policy to restrict partitioning operations to owner and superusers only + */ +CREATE POLICY deny_modification ON @extschema@.pathman_config +FOR ALL USING (check_security_policy(partrel)); +CREATE POLICY allow_select ON @extschema@.pathman_config FOR SELECT USING (true); +ALTER TABLE @extschema@.pathman_config ENABLE ROW LEVEL SECURITY; + +/* + * Enable dump of config tables with pg_dump. + */ +SELECT pg_catalog.pg_extension_config_dump('@extschema@.pathman_config', ''); + + +ALTER TABLE @extschema@.pathman_config ADD CONSTRAINT pathman_config_interval_check + CHECK (@extschema@.validate_interval_value(partrel, + expr, + parttype, + range_interval)); + +CREATE TRIGGER pathman_config_trigger +AFTER INSERT OR UPDATE OR DELETE ON @extschema@.pathman_config +FOR EACH ROW EXECUTE PROCEDURE @extschema@.pathman_config_params_trigger_func(); + +/* + * Get parsed and analyzed expression. + */ +CREATE OR REPLACE FUNCTION @extschema@.get_partition_cooked_key( + parent_relid REGCLASS) +RETURNS TEXT AS 'pg_pathman', 'get_partition_cooked_key_pl' +LANGUAGE C STRICT; + +/* + * Add new partition + */ +CREATE OR REPLACE FUNCTION @extschema@.add_range_partition( + parent_relid REGCLASS, + start_value ANYELEMENT, + end_value ANYELEMENT, + partition_name TEXT DEFAULT NULL, + tablespace TEXT DEFAULT NULL) +RETURNS TEXT AS $$ +DECLARE + part_name TEXT; + +BEGIN + PERFORM @extschema@.validate_relname(parent_relid); + + /* Acquire lock on parent's scheme */ + PERFORM @extschema@.prevent_part_modification(parent_relid); + + IF start_value >= end_value THEN + RAISE EXCEPTION 'failed to create partition: start_value is greater than end_value'; + END IF; + + /* Check range overlap */ + IF @extschema@.get_number_of_partitions(parent_relid) > 0 THEN + PERFORM @extschema@.check_range_available(parent_relid, + start_value, + end_value); + END IF; + + /* Create new partition */ + part_name := @extschema@.create_single_range_partition(parent_relid, + start_value, + end_value, + partition_name, + tablespace); + + RETURN part_name; +END +$$ LANGUAGE plpgsql; + +/* + * Append new partition. + */ +CREATE OR REPLACE FUNCTION @extschema@.append_range_partition( + parent_relid REGCLASS, + partition_name TEXT DEFAULT NULL, + tablespace TEXT DEFAULT NULL) +RETURNS TEXT AS $$ +DECLARE + part_expr_type REGTYPE; + part_name TEXT; + part_interval TEXT; + +BEGIN + PERFORM @extschema@.validate_relname(parent_relid); + + /* Acquire lock on parent's scheme */ + PERFORM @extschema@.prevent_part_modification(parent_relid); + + part_expr_type := @extschema@.get_partition_key_type(parent_relid); + + IF NOT @extschema@.is_date_type(part_expr_type) AND + NOT @extschema@.is_operator_supported(part_expr_type, '+') THEN + RAISE EXCEPTION 'type % does not support ''+'' operator', part_expr_type::REGTYPE; + END IF; + + SELECT range_interval + FROM @extschema@.pathman_config + WHERE partrel = parent_relid + INTO part_interval; + + EXECUTE + format('SELECT @extschema@.append_partition_internal($1, $2, $3, ARRAY[]::%s[], $4, $5)', + @extschema@.get_base_type(part_expr_type)::TEXT) + USING + parent_relid, + part_expr_type, + part_interval, + partition_name, + tablespace + INTO + part_name; + + RETURN part_name; +END +$$ LANGUAGE plpgsql; + +/* + * Attach range partition + */ +CREATE OR REPLACE FUNCTION @extschema@.attach_range_partition( + parent_relid REGCLASS, + partition_relid REGCLASS, + start_value ANYELEMENT, + end_value ANYELEMENT) +RETURNS TEXT AS $$ +DECLARE + part_expr TEXT; + part_type INTEGER; + rel_persistence CHAR; + v_init_callback REGPROCEDURE; + +BEGIN + PERFORM @extschema@.validate_relname(parent_relid); + PERFORM @extschema@.validate_relname(partition_relid); + + /* Acquire lock on parent's scheme */ + PERFORM @extschema@.prevent_part_modification(parent_relid); + + /* Ignore temporary tables */ + SELECT relpersistence FROM pg_catalog.pg_class + WHERE oid = partition_relid INTO rel_persistence; + + IF rel_persistence = 't'::CHAR THEN + RAISE EXCEPTION 'temporary table "%" cannot be used as a partition', + partition_relid::TEXT; + END IF; + + /* Check range overlap */ + PERFORM @extschema@.check_range_available(parent_relid, start_value, end_value); + + IF NOT @extschema@.is_tuple_convertible(parent_relid, partition_relid) THEN + RAISE EXCEPTION 'partition must have a compatible tuple format'; + END IF; + + part_expr := @extschema@.get_partition_key(parent_relid); + part_type := @extschema@.get_partition_type(parent_relid); + + IF part_expr IS NULL THEN + RAISE EXCEPTION 'table "%" is not partitioned', parent_relid::TEXT; + END IF; + + /* Check if this is a RANGE partition */ + IF part_type != 2 THEN + RAISE EXCEPTION '"%" is not a RANGE partition', partition_relid::TEXT; + END IF; + + /* Set inheritance */ + EXECUTE format('ALTER TABLE %s INHERIT %s', partition_relid, parent_relid); + + /* Set check constraint */ + EXECUTE format('ALTER TABLE %s ADD CONSTRAINT %s CHECK (%s)', + partition_relid::TEXT, + @extschema@.build_check_constraint_name(partition_relid), + @extschema@.build_range_condition(partition_relid, + part_expr, + start_value, + end_value)); + + /* Fetch init_callback from 'params' table */ + WITH stub_callback(stub) as (values (0)) + SELECT init_callback + FROM stub_callback + LEFT JOIN @extschema@.pathman_config_params AS params + ON params.partrel = parent_relid + INTO v_init_callback; + + /* Invoke an initialization callback */ + PERFORM @extschema@.invoke_on_partition_created_callback(parent_relid, + partition_relid, + v_init_callback, + start_value, + end_value); + + RETURN partition_relid; +END +$$ LANGUAGE plpgsql; + +/* + * Create a naming sequence for partitioned table. + */ +CREATE OR REPLACE FUNCTION @extschema@.create_naming_sequence( + parent_relid REGCLASS) +RETURNS TEXT AS $$ +DECLARE + seq_name TEXT; + +BEGIN + seq_name := @extschema@.build_sequence_name(parent_relid); + + EXECUTE format('DROP SEQUENCE IF EXISTS %s', seq_name); + EXECUTE format('CREATE SEQUENCE %s START 1', seq_name); + + RETURN seq_name; +END +$$ LANGUAGE plpgsql +SET client_min_messages = WARNING; /* mute NOTICE message */ + +/* + * Creates RANGE partitions for specified relation based on datetime attribute + */ +CREATE OR REPLACE FUNCTION @extschema@.create_range_partitions( + parent_relid REGCLASS, + expression TEXT, + start_value ANYELEMENT, + p_interval INTERVAL, + p_count INTEGER DEFAULT NULL, + partition_data BOOLEAN DEFAULT TRUE) +RETURNS INTEGER AS $$ +DECLARE + rows_count BIGINT; + max_value start_value%TYPE; + cur_value start_value%TYPE := start_value; + end_value start_value%TYPE; + part_count INTEGER := 0; + i INTEGER; + +BEGIN + PERFORM @extschema@.prepare_for_partitioning(parent_relid, + expression, + partition_data); + + IF p_count < 0 THEN + RAISE EXCEPTION '"p_count" must not be less than 0'; + END IF; + + /* Try to determine partitions count if not set */ + IF p_count IS NULL THEN + EXECUTE format('SELECT count(*), max(%s) FROM %s', expression, parent_relid) + INTO rows_count, max_value; + + IF rows_count = 0 THEN + RAISE EXCEPTION 'cannot determine partitions count for empty table'; + END IF; + + p_count := 0; + WHILE cur_value <= max_value + LOOP + cur_value := cur_value + p_interval; + p_count := p_count + 1; + END LOOP; + END IF; + + /* + * In case when user doesn't want to automatically create partitions + * and specifies partition count as 0 then do not check boundaries + */ + IF p_count != 0 THEN + /* Compute right bound of partitioning through additions */ + end_value := start_value; + FOR i IN 1..p_count + LOOP + end_value := end_value + p_interval; + END LOOP; + + /* Check boundaries */ + PERFORM @extschema@.check_boundaries(parent_relid, + expression, + start_value, + end_value); + END IF; + + /* Create sequence for child partitions names */ + PERFORM @extschema@.create_naming_sequence(parent_relid); + + /* Insert new entry to pathman config */ + PERFORM @extschema@.add_to_pathman_config(parent_relid, expression, + p_interval::TEXT); + + IF p_count != 0 THEN + part_count := @extschema@.create_range_partitions_internal( + parent_relid, + @extschema@.generate_range_bounds(start_value, + p_interval, + p_count), + NULL, + NULL); + END IF; + + /* Relocate data if asked to */ + IF partition_data = true THEN + PERFORM @extschema@.set_enable_parent(parent_relid, false); + PERFORM @extschema@.partition_data(parent_relid); + ELSE + PERFORM @extschema@.set_enable_parent(parent_relid, true); + END IF; + + RETURN part_count; +END +$$ LANGUAGE plpgsql; + +/* + * Creates RANGE partitions for specified relation based on numerical expression + */ +CREATE OR REPLACE FUNCTION @extschema@.create_range_partitions( + parent_relid REGCLASS, + expression TEXT, + start_value ANYELEMENT, + p_interval ANYELEMENT, + p_count INTEGER DEFAULT NULL, + partition_data BOOLEAN DEFAULT TRUE) +RETURNS INTEGER AS $$ +DECLARE + rows_count BIGINT; + max_value start_value%TYPE; + cur_value start_value%TYPE := start_value; + end_value start_value%TYPE; + part_count INTEGER := 0; + i INTEGER; + +BEGIN + PERFORM @extschema@.prepare_for_partitioning(parent_relid, + expression, + partition_data); + + IF p_count < 0 THEN + RAISE EXCEPTION 'partitions count must not be less than zero'; + END IF; + + /* Try to determine partitions count if not set */ + IF p_count IS NULL THEN + EXECUTE format('SELECT count(*), max(%s) FROM %s', expression, parent_relid) + INTO rows_count, max_value; + + IF rows_count = 0 THEN + RAISE EXCEPTION 'cannot determine partitions count for empty table'; + END IF; + + IF max_value IS NULL THEN + RAISE EXCEPTION 'expression "%" can return NULL values', expression; + END IF; + + p_count := 0; + WHILE cur_value <= max_value + LOOP + cur_value := cur_value + p_interval; + p_count := p_count + 1; + END LOOP; + END IF; + + /* + * In case when user doesn't want to automatically create partitions + * and specifies partition count as 0 then do not check boundaries + */ + IF p_count != 0 THEN + /* Compute right bound of partitioning through additions */ + end_value := start_value; + FOR i IN 1..p_count + LOOP + end_value := end_value + p_interval; + END LOOP; + + /* Check boundaries */ + PERFORM @extschema@.check_boundaries(parent_relid, + expression, + start_value, + end_value); + END IF; + + /* Create sequence for child partitions names */ + PERFORM @extschema@.create_naming_sequence(parent_relid); + + /* Insert new entry to pathman config */ + PERFORM @extschema@.add_to_pathman_config(parent_relid, expression, + p_interval::TEXT); + + IF p_count != 0 THEN + part_count := @extschema@.create_range_partitions_internal( + parent_relid, + @extschema@.generate_range_bounds(start_value, + p_interval, + p_count), + NULL, + NULL); + END IF; + + /* Relocate data if asked to */ + IF partition_data = true THEN + PERFORM @extschema@.set_enable_parent(parent_relid, false); + PERFORM @extschema@.partition_data(parent_relid); + ELSE + PERFORM @extschema@.set_enable_parent(parent_relid, true); + END IF; + + RETURN p_count; +END +$$ LANGUAGE plpgsql; + +/* + * Creates RANGE partitions for specified relation based on bounds array + */ +CREATE OR REPLACE FUNCTION @extschema@.create_range_partitions( + parent_relid REGCLASS, + expression TEXT, + bounds ANYARRAY, + partition_names TEXT[] DEFAULT NULL, + tablespaces TEXT[] DEFAULT NULL, + partition_data BOOLEAN DEFAULT TRUE) +RETURNS INTEGER AS $$ +DECLARE + part_count INTEGER := 0; + +BEGIN + IF array_ndims(bounds) > 1 THEN + RAISE EXCEPTION 'Bounds array must be a one dimensional array'; + END IF; + + IF array_length(bounds, 1) < 2 THEN + RAISE EXCEPTION 'Bounds array must have at least two values'; + END IF; + + PERFORM @extschema@.prepare_for_partitioning(parent_relid, + expression, + partition_data); + + /* Check boundaries */ + PERFORM @extschema@.check_boundaries(parent_relid, + expression, + bounds[1], + bounds[array_length(bounds, 1)]); + + /* Create sequence for child partitions names */ + PERFORM @extschema@.create_naming_sequence(parent_relid); + + /* Insert new entry to pathman config */ + PERFORM @extschema@.add_to_pathman_config(parent_relid, expression, NULL); + + /* Create partitions */ + part_count := @extschema@.create_range_partitions_internal(parent_relid, + bounds, + partition_names, + tablespaces); + + /* Relocate data if asked to */ + IF partition_data = true THEN + PERFORM @extschema@.set_enable_parent(parent_relid, false); + PERFORM @extschema@.partition_data(parent_relid); + ELSE + PERFORM @extschema@.set_enable_parent(parent_relid, true); + END IF; + + RETURN part_count; +END +$$ +LANGUAGE plpgsql; + +/* + * Detach range partition + */ +CREATE OR REPLACE FUNCTION @extschema@.detach_range_partition( + partition_relid REGCLASS) +RETURNS TEXT AS $$ +DECLARE + parent_relid REGCLASS; + part_type INTEGER; + +BEGIN + parent_relid := @extschema@.get_parent_of_partition(partition_relid); + + PERFORM @extschema@.validate_relname(parent_relid); + PERFORM @extschema@.validate_relname(partition_relid); + + /* Acquire lock on partition's scheme */ + PERFORM @extschema@.prevent_part_modification(partition_relid); + + /* Acquire lock on parent */ + PERFORM @extschema@.prevent_data_modification(parent_relid); + + part_type := @extschema@.get_partition_type(parent_relid); + + /* Check if this is a RANGE partition */ + IF part_type != 2 THEN + RAISE EXCEPTION '"%" is not a RANGE partition', partition_relid::TEXT; + END IF; + + /* Remove inheritance */ + EXECUTE format('ALTER TABLE %s NO INHERIT %s', + partition_relid::TEXT, + parent_relid::TEXT); + + /* Remove check constraint */ + EXECUTE format('ALTER TABLE %s DROP CONSTRAINT %s', + partition_relid::TEXT, + @extschema@.build_check_constraint_name(partition_relid)); + + RETURN partition_relid; +END +$$ LANGUAGE plpgsql; + +/* + * Replace hash partition with another one. It could be useful in case when + * someone wants to attach foreign table as a partition. + * + * lock_parent - should we take an exclusive lock? + */ +CREATE OR REPLACE FUNCTION @extschema@.replace_hash_partition( + old_partition REGCLASS, + new_partition REGCLASS, + lock_parent BOOL DEFAULT TRUE) +RETURNS REGCLASS AS $$ +DECLARE + parent_relid REGCLASS; + old_constr_name TEXT; /* name of old_partition's constraint */ + old_constr_def TEXT; /* definition of old_partition's constraint */ + rel_persistence CHAR; + p_init_callback REGPROCEDURE; + +BEGIN + PERFORM @extschema@.validate_relname(old_partition); + PERFORM @extschema@.validate_relname(new_partition); + + /* Parent relation */ + parent_relid := @extschema@.get_parent_of_partition(old_partition); + + IF lock_parent THEN + /* Acquire data modification lock (prevent further modifications) */ + PERFORM @extschema@.prevent_data_modification(parent_relid); + ELSE + /* Acquire lock on parent */ + PERFORM @extschema@.prevent_part_modification(parent_relid); + END IF; + + /* Acquire data modification lock (prevent further modifications) */ + PERFORM @extschema@.prevent_data_modification(old_partition); + PERFORM @extschema@.prevent_data_modification(new_partition); + + /* Ignore temporary tables */ + SELECT relpersistence FROM pg_catalog.pg_class + WHERE oid = new_partition INTO rel_persistence; + + IF rel_persistence = 't'::CHAR THEN + RAISE EXCEPTION 'temporary table "%" cannot be used as a partition', + new_partition::TEXT; + END IF; + + /* Check that new partition has an equal structure as parent does */ + IF NOT @extschema@.is_tuple_convertible(parent_relid, new_partition) THEN + RAISE EXCEPTION 'partition must have a compatible tuple format'; + END IF; + + /* Check that table is partitioned */ + IF @extschema@.get_partition_key(parent_relid) IS NULL THEN + RAISE EXCEPTION 'table "%" is not partitioned', parent_relid::TEXT; + END IF; + + /* Fetch name of old_partition's HASH constraint */ + old_constr_name = @extschema@.build_check_constraint_name(old_partition::REGCLASS); + + /* Fetch definition of old_partition's HASH constraint */ + SELECT pg_catalog.pg_get_constraintdef(oid) FROM pg_catalog.pg_constraint + WHERE conrelid = old_partition AND quote_ident(conname) = old_constr_name + INTO old_constr_def; + + /* Detach old partition */ + EXECUTE format('ALTER TABLE %s NO INHERIT %s', old_partition, parent_relid); + EXECUTE format('ALTER TABLE %s DROP CONSTRAINT %s', + old_partition, + old_constr_name); + + /* Attach the new one */ + EXECUTE format('ALTER TABLE %s INHERIT %s', new_partition, parent_relid); + EXECUTE format('ALTER TABLE %s ADD CONSTRAINT %s %s', + new_partition, + @extschema@.build_check_constraint_name(new_partition::REGCLASS), + old_constr_def); + + /* Fetch init_callback from 'params' table */ + WITH stub_callback(stub) as (values (0)) + SELECT init_callback + FROM stub_callback + LEFT JOIN @extschema@.pathman_config_params AS params + ON params.partrel = parent_relid + INTO p_init_callback; + + /* Finally invoke init_callback */ + PERFORM @extschema@.invoke_on_partition_created_callback(parent_relid, + new_partition, + p_init_callback); + + RETURN new_partition; +END +$$ LANGUAGE plpgsql; + +/* + * Disable pathman partitioning for specified relation. + */ +CREATE OR REPLACE FUNCTION @extschema@.disable_pathman_for( + parent_relid REGCLASS) +RETURNS VOID AS $$ +BEGIN + PERFORM @extschema@.validate_relname(parent_relid); + + /* Delete rows from both config tables */ + DELETE FROM @extschema@.pathman_config WHERE partrel = parent_relid; + DELETE FROM @extschema@.pathman_config_params WHERE partrel = parent_relid; +END +$$ LANGUAGE plpgsql STRICT; + +/* + * Drop a naming sequence for partitioned table. + */ +CREATE OR REPLACE FUNCTION @extschema@.drop_naming_sequence( + parent_relid REGCLASS) +RETURNS VOID AS $$ +DECLARE + seq_name TEXT; + +BEGIN + seq_name := @extschema@.build_sequence_name(parent_relid); + + EXECUTE format('DROP SEQUENCE IF EXISTS %s', seq_name); +END +$$ LANGUAGE plpgsql +SET client_min_messages = WARNING; /* mute NOTICE message */ + +/* + * Drop partitions. If delete_data set to TRUE, partitions + * will be dropped with all the data. + */ +CREATE OR REPLACE FUNCTION @extschema@.drop_partitions( + parent_relid REGCLASS, + delete_data BOOLEAN DEFAULT FALSE) +RETURNS INTEGER AS $$ +DECLARE + child REGCLASS; + rows_count BIGINT; + part_count INTEGER := 0; + rel_kind CHAR; + +BEGIN + PERFORM @extschema@.validate_relname(parent_relid); + + /* Acquire data modification lock */ + PERFORM @extschema@.prevent_data_modification(parent_relid); + + IF NOT EXISTS (SELECT FROM @extschema@.pathman_config + WHERE partrel = parent_relid) THEN + RAISE EXCEPTION 'table "%" has no partitions', parent_relid::TEXT; + END IF; + + /* Also drop naming sequence */ + PERFORM @extschema@.drop_naming_sequence(parent_relid); + + FOR child IN (SELECT inhrelid::REGCLASS + FROM pg_catalog.pg_inherits + WHERE inhparent::regclass = parent_relid + ORDER BY inhrelid ASC) + LOOP + IF NOT delete_data THEN + EXECUTE format('INSERT INTO %s SELECT * FROM %s', + parent_relid::TEXT, + child::TEXT); + GET DIAGNOSTICS rows_count = ROW_COUNT; + + /* Show number of copied rows */ + RAISE NOTICE '% rows copied from %', rows_count, child; + END IF; + + SELECT relkind FROM pg_catalog.pg_class + WHERE oid = child + INTO rel_kind; + + /* + * Determine the kind of child relation. It can be either a regular + * table (r) or a foreign table (f). Depending on relkind we use + * DROP TABLE or DROP FOREIGN TABLE. + */ + IF rel_kind = 'f' THEN + EXECUTE format('DROP FOREIGN TABLE %s', child); + ELSE + EXECUTE format('DROP TABLE %s', child); + END IF; + + part_count := part_count + 1; + END LOOP; + + /* Finally delete both config entries */ + DELETE FROM @extschema@.pathman_config WHERE partrel = parent_relid; + DELETE FROM @extschema@.pathman_config_params WHERE partrel = parent_relid; + + RETURN part_count; +END +$$ LANGUAGE plpgsql +SET pg_pathman.enable_partitionfilter = off; /* ensures that PartitionFilter is OFF */ + +/* + * Drop range partition + */ +CREATE OR REPLACE FUNCTION @extschema@.drop_range_partition( + partition_relid REGCLASS, + delete_data BOOLEAN DEFAULT TRUE) +RETURNS TEXT AS $$ +DECLARE + parent_relid REGCLASS; + part_name TEXT; + part_type INTEGER; + v_relkind CHAR; + v_rows BIGINT; + +BEGIN + parent_relid := @extschema@.get_parent_of_partition(partition_relid); + + PERFORM @extschema@.validate_relname(parent_relid); + PERFORM @extschema@.validate_relname(partition_relid); + + part_name := partition_relid::TEXT; /* save the name to be returned */ + part_type := @extschema@.get_partition_type(parent_relid); + + /* Check if this is a RANGE partition */ + IF part_type != 2 THEN + RAISE EXCEPTION '"%" is not a RANGE partition', partition_relid::TEXT; + END IF; + + /* Acquire lock on parent's scheme */ + PERFORM @extschema@.prevent_part_modification(parent_relid); + + IF NOT delete_data THEN + EXECUTE format('INSERT INTO %s SELECT * FROM %s', + parent_relid::TEXT, + partition_relid::TEXT); + GET DIAGNOSTICS v_rows = ROW_COUNT; + + /* Show number of copied rows */ + RAISE NOTICE '% rows copied from %', v_rows, partition_relid::TEXT; + END IF; + + SELECT relkind FROM pg_catalog.pg_class + WHERE oid = partition_relid + INTO v_relkind; + + /* + * Determine the kind of child relation. It can be either regular + * table (r) or foreign table (f). Depending on relkind we use + * DROP TABLE or DROP FOREIGN TABLE. + */ + IF v_relkind = 'f' THEN + EXECUTE format('DROP FOREIGN TABLE %s', partition_relid::TEXT); + ELSE + EXECUTE format('DROP TABLE %s', partition_relid::TEXT); + END IF; + + RETURN part_name; +END +$$ LANGUAGE plpgsql +SET pg_pathman.enable_partitionfilter = off; /* ensures that PartitionFilter is OFF */ + +CREATE FUNCTION @extschema@.pathman_version() +RETURNS CSTRING AS 'pg_pathman', 'pathman_version' +LANGUAGE C STRICT; + +/* + * Get number of partitions managed by pg_pathman. + */ +CREATE OR REPLACE FUNCTION @extschema@.get_number_of_partitions( + parent_relid REGCLASS) +RETURNS INT4 AS +$$ + SELECT count(*)::INT4 + FROM pg_catalog.pg_inherits + WHERE inhparent = parent_relid; +$$ +LANGUAGE sql STRICT; + +/* + * Get partitioning key. + */ +DROP FUNCTION @extschema@.get_partition_key(REGCLASS); +CREATE FUNCTION @extschema@.get_partition_key( + parent_relid REGCLASS) +RETURNS TEXT AS +$$ + SELECT expr + FROM @extschema@.pathman_config + WHERE partrel = parent_relid; +$$ +LANGUAGE sql STRICT; + +/* + * Get partitioning key type. + */ +DROP FUNCTION @extschema@.get_partition_key_type(REGCLASS); +CREATE FUNCTION @extschema@.get_partition_key_type( + parent_relid REGCLASS) +RETURNS REGTYPE AS 'pg_pathman', 'get_partition_key_type_pl' +LANGUAGE C STRICT; + +/* + * Get partitioning type. + */ +DROP FUNCTION @extschema@.get_partition_type(REGCLASS); +CREATE OR REPLACE FUNCTION @extschema@.get_partition_type( + parent_relid REGCLASS) +RETURNS INT4 AS +$$ + SELECT parttype + FROM @extschema@.pathman_config + WHERE partrel = parent_relid; +$$ +LANGUAGE sql STRICT; + +/* + * Merge RANGE partitions. + */ +DROP FUNCTION @extschema@.merge_range_partitions(regclass[]); +DROP FUNCTION @extschema@.merge_range_partitions(regclass, regclass); + +CREATE OR REPLACE FUNCTION @extschema@.merge_range_partitions( + variadic partitions REGCLASS[]) +RETURNS REGCLASS AS 'pg_pathman', 'merge_range_partitions' +LANGUAGE C STRICT; + +/* + * Prepend new partition. + */ +CREATE OR REPLACE FUNCTION @extschema@.prepend_range_partition( + parent_relid REGCLASS, + partition_name TEXT DEFAULT NULL, + tablespace TEXT DEFAULT NULL) +RETURNS TEXT AS $$ +DECLARE + part_expr_type REGTYPE; + part_name TEXT; + part_interval TEXT; + +BEGIN + PERFORM @extschema@.validate_relname(parent_relid); + + /* Acquire lock on parent's scheme */ + PERFORM @extschema@.prevent_part_modification(parent_relid); + + part_expr_type := @extschema@.get_partition_key_type(parent_relid); + + IF NOT @extschema@.is_date_type(part_expr_type) AND + NOT @extschema@.is_operator_supported(part_expr_type, '-') THEN + RAISE EXCEPTION 'type % does not support ''-'' operator', part_expr_type::REGTYPE; + END IF; + + SELECT range_interval + FROM @extschema@.pathman_config + WHERE partrel = parent_relid + INTO part_interval; + + EXECUTE + format('SELECT @extschema@.prepend_partition_internal($1, $2, $3, ARRAY[]::%s[], $4, $5)', + @extschema@.get_base_type(part_expr_type)::TEXT) + USING + parent_relid, + part_expr_type, + part_interval, + partition_name, + tablespace + INTO + part_name; + + RETURN part_name; +END +$$ LANGUAGE plpgsql; + +/* + * Show all existing concurrent partitioning tasks. + */ +DROP VIEW @extschema@.pathman_concurrent_part_tasks; +DROP FUNCTION @extschema@.show_concurrent_part_tasks(); +CREATE FUNCTION @extschema@.show_concurrent_part_tasks() +RETURNS TABLE ( + userid REGROLE, + pid INT, + dbid OID, + relid REGCLASS, + processed INT8, + status TEXT) +AS 'pg_pathman', 'show_concurrent_part_tasks_internal' +LANGUAGE C STRICT; + +CREATE VIEW @extschema@.pathman_concurrent_part_tasks +AS SELECT * FROM @extschema@.show_concurrent_part_tasks(); +GRANT SELECT ON @extschema@.pathman_concurrent_part_tasks TO PUBLIC; + +/* + * Split RANGE partition in two using a pivot. + */ +DROP FUNCTION @extschema@.split_range_partition(regclass, anyelement, text, text, OUT anyarray); +CREATE OR REPLACE FUNCTION @extschema@.split_range_partition( + partition_relid REGCLASS, + split_value ANYELEMENT, + partition_name TEXT DEFAULT NULL, + tablespace TEXT DEFAULT NULL) +RETURNS REGCLASS AS 'pg_pathman', 'split_range_partition' +LANGUAGE C; + +DROP FUNCTION @extschema@.build_update_trigger_func_name(regclass); +DROP FUNCTION @extschema@.build_update_trigger_name(regclass); +DROP FUNCTION @extschema@.create_single_update_trigger(regclass, regclass); +DROP FUNCTION @extschema@.create_update_triggers(regclass); +DROP FUNCTION @extschema@.drop_triggers(regclass); +DROP FUNCTION @extschema@.has_update_trigger(regclass); +DROP FUNCTION @extschema@.pathman_update_trigger_func() CASCADE; +DROP FUNCTION @extschema@.get_pathman_lib_version(); diff --git a/pg_pathman.control b/pg_pathman.control index ecc4ef64..138b26c6 100644 --- a/pg_pathman.control +++ b/pg_pathman.control @@ -1,4 +1,4 @@ # pg_pathman extension -comment 'Partitioning tool ver. 1.0' -default_version = '1.0' -module_pathname='$libdir/pg_pathman' +comment = 'Partitioning tool for PostgreSQL' +default_version = '1.5' +module_pathname = '$libdir/pg_pathman' diff --git a/range.sql b/range.sql index 8c1511c0..5af17014 100644 --- a/range.sql +++ b/range.sql @@ -1,76 +1,47 @@ /* ------------------------------------------------------------------------ * * range.sql - * RANGE partitioning functions + * RANGE partitioning functions * - * Copyright (c) 2015-2016, Postgres Professional + * Copyright (c) 2015-2020, Postgres Professional * * ------------------------------------------------------------------------ */ -CREATE OR REPLACE FUNCTION @extschema@.get_sequence_name( - plain_schema TEXT, - plain_relname TEXT) -RETURNS TEXT AS -$$ -BEGIN - RETURN format('%s.%s', - quote_ident(plain_schema), - quote_ident(format('%s_seq', plain_relname))); -END -$$ -LANGUAGE plpgsql; - -CREATE OR REPLACE FUNCTION @extschema@.create_or_replace_sequence( - plain_schema TEXT, - plain_relname TEXT, - OUT seq_name TEXT) -AS $$ -BEGIN - seq_name := @extschema@.get_sequence_name(plain_schema, plain_relname); - EXECUTE format('DROP SEQUENCE IF EXISTS %s', seq_name); - EXECUTE format('CREATE SEQUENCE %s START 1', seq_name); -END -$$ -LANGUAGE plpgsql; - /* * Check RANGE partition boundaries. */ -CREATE OR REPLACE FUNCTION @extschema@.check_boundaries( +CREATE FUNCTION @extschema@.check_boundaries( parent_relid REGCLASS, - p_attribute TEXT, - p_start_value ANYELEMENT, - p_end_value ANYELEMENT) -RETURNS VOID AS -$$ + expression TEXT, + start_value ANYELEMENT, + end_value ANYELEMENT) +RETURNS VOID AS $$ DECLARE - v_min p_start_value%TYPE; - v_max p_start_value%TYPE; - v_count BIGINT; + min_value start_value%TYPE; + max_value start_value%TYPE; + rows_count BIGINT; BEGIN /* Get min and max values */ - EXECUTE format('SELECT count(*), min(%1$s), max(%1$s) + EXECUTE pg_catalog.format('SELECT count(*), min(%1$s), max(%1$s) FROM %2$s WHERE NOT %1$s IS NULL', - p_attribute, parent_relid::TEXT) - INTO v_count, v_min, v_max; + expression, parent_relid::TEXT) + INTO rows_count, min_value, max_value; /* Check if column has NULL values */ - IF v_count > 0 AND (v_min IS NULL OR v_max IS NULL) THEN - RAISE EXCEPTION '''%'' column contains NULL values', p_attribute; + IF rows_count > 0 AND (min_value IS NULL OR max_value IS NULL) THEN + RAISE EXCEPTION 'expression "%" returns NULL values', expression; END IF; /* Check lower boundary */ - IF p_start_value > v_min THEN - RAISE EXCEPTION 'Start value is less than minimum value of ''%''', - p_attribute; + IF start_value > min_value THEN + RAISE EXCEPTION 'start value is greater than min value of "%"', expression; END IF; /* Check upper boundary */ - IF p_end_value <= v_max THEN - RAISE EXCEPTION 'Not enough partitions to fit all values of ''%''', - p_attribute; + IF end_value <= max_value THEN + RAISE EXCEPTION 'not enough partitions to fit all values of "%"', expression; END IF; END $$ LANGUAGE plpgsql; @@ -78,51 +49,44 @@ $$ LANGUAGE plpgsql; /* * Creates RANGE partitions for specified relation based on datetime attribute */ -CREATE OR REPLACE FUNCTION @extschema@.create_range_partitions( +CREATE FUNCTION @extschema@.create_range_partitions( parent_relid REGCLASS, - p_attribute TEXT, - p_start_value ANYELEMENT, + expression TEXT, + start_value ANYELEMENT, p_interval INTERVAL, p_count INTEGER DEFAULT NULL, - partition_data BOOLEAN DEFAULT true) -RETURNS INTEGER AS -$$ + partition_data BOOLEAN DEFAULT TRUE) +RETURNS INTEGER AS $$ DECLARE - v_rows_count INTEGER; - v_max p_start_value%TYPE; - v_cur_value p_start_value%TYPE := p_start_value; - i INTEGER; + rows_count BIGINT; + max_value start_value%TYPE; + cur_value start_value%TYPE := start_value; + end_value start_value%TYPE; + part_count INTEGER := 0; + i INTEGER; BEGIN - IF partition_data = true THEN - /* Acquire data modification lock */ - PERFORM @extschema@.prevent_relation_modification(parent_relid); - ELSE - /* Acquire lock on parent */ - PERFORM @extschema@.lock_partitioned_relation(parent_relid); - END IF; - - PERFORM @extschema@.validate_relname(parent_relid); - p_attribute := lower(p_attribute); - PERFORM @extschema@.common_relation_checks(parent_relid, p_attribute); + PERFORM @extschema@.prepare_for_partitioning(parent_relid, + expression, + partition_data); IF p_count < 0 THEN - RAISE EXCEPTION 'Partitions count must not be less than zero'; + RAISE EXCEPTION '"p_count" must not be less than 0'; END IF; /* Try to determine partitions count if not set */ IF p_count IS NULL THEN - EXECUTE format('SELECT count(*), max(%s) FROM %s', p_attribute, parent_relid) - INTO v_rows_count, v_max; + EXECUTE pg_catalog.format('SELECT count(*), max(%s) FROM %s', expression, parent_relid) + INTO rows_count, max_value; - IF v_rows_count = 0 THEN - RAISE EXCEPTION 'Cannot determine partitions count for empty table'; + IF rows_count = 0 THEN + RAISE EXCEPTION 'cannot determine partitions count for empty table'; END IF; p_count := 0; - WHILE v_cur_value <= v_max + WHILE cur_value <= max_value LOOP - v_cur_value := v_cur_value + p_interval; + cur_value := cur_value + p_interval; p_count := p_count + 1; END LOOP; END IF; @@ -132,100 +96,94 @@ BEGIN * and specifies partition count as 0 then do not check boundaries */ IF p_count != 0 THEN + /* Compute right bound of partitioning through additions */ + end_value := start_value; + FOR i IN 1..p_count + LOOP + end_value := end_value + p_interval; + END LOOP; + /* Check boundaries */ - EXECUTE format('SELECT @extschema@.check_boundaries(''%s'', ''%s'', ''%s'', ''%s''::%s)', - parent_relid, - p_attribute, - p_start_value, - p_start_value + p_interval * p_count, - pg_typeof(p_start_value)); + PERFORM @extschema@.check_boundaries(parent_relid, + expression, + start_value, + end_value); END IF; /* Create sequence for child partitions names */ - PERFORM @extschema@.create_or_replace_sequence(schema, relname) - FROM @extschema@.get_plain_schema_and_relname(parent_relid); + PERFORM @extschema@.create_naming_sequence(parent_relid); /* Insert new entry to pathman config */ - INSERT INTO @extschema@.pathman_config (partrel, attname, parttype, range_interval) - VALUES (parent_relid, p_attribute, 2, p_interval::TEXT); - - /* Create first partition */ - FOR i IN 1..p_count - LOOP - EXECUTE format('SELECT @extschema@.create_single_range_partition($1, $2, $3::%s)', - pg_typeof(p_start_value)) - USING parent_relid, p_start_value, p_start_value + p_interval; - - p_start_value := p_start_value + p_interval; - END LOOP; + PERFORM @extschema@.add_to_pathman_config(parent_relid, expression, + p_interval::TEXT); - /* Notify backend about changes */ - PERFORM @extschema@.on_create_partitions(parent_relid); + IF p_count != 0 THEN + part_count := @extschema@.create_range_partitions_internal( + parent_relid, + @extschema@.generate_range_bounds(start_value, + p_interval, + p_count), + NULL, + NULL); + END IF; /* Relocate data if asked to */ IF partition_data = true THEN - PERFORM @extschema@.disable_parent(parent_relid); + PERFORM @extschema@.set_enable_parent(parent_relid, false); PERFORM @extschema@.partition_data(parent_relid); ELSE - PERFORM @extschema@.enable_parent(parent_relid); + PERFORM @extschema@.set_enable_parent(parent_relid, true); END IF; - RETURN p_count; + RETURN part_count; END $$ LANGUAGE plpgsql; /* - * Creates RANGE partitions for specified relation based on numerical attribute + * Creates RANGE partitions for specified relation based on numerical expression */ -CREATE OR REPLACE FUNCTION @extschema@.create_range_partitions( +CREATE FUNCTION @extschema@.create_range_partitions( parent_relid REGCLASS, - p_attribute TEXT, - p_start_value ANYELEMENT, + expression TEXT, + start_value ANYELEMENT, p_interval ANYELEMENT, p_count INTEGER DEFAULT NULL, - partition_data BOOLEAN DEFAULT true) -RETURNS INTEGER AS -$$ + partition_data BOOLEAN DEFAULT TRUE) +RETURNS INTEGER AS $$ DECLARE - v_rows_count INTEGER; - v_max p_start_value%TYPE; - v_cur_value p_start_value%TYPE := p_start_value; - i INTEGER; + rows_count BIGINT; + max_value start_value%TYPE; + cur_value start_value%TYPE := start_value; + end_value start_value%TYPE; + part_count INTEGER := 0; + i INTEGER; BEGIN - IF partition_data = true THEN - /* Acquire data modification lock */ - PERFORM @extschema@.prevent_relation_modification(parent_relid); - ELSE - /* Acquire lock on parent */ - PERFORM @extschema@.lock_partitioned_relation(parent_relid); - END IF; - - PERFORM @extschema@.validate_relname(parent_relid); - p_attribute := lower(p_attribute); - PERFORM @extschema@.common_relation_checks(parent_relid, p_attribute); + PERFORM @extschema@.prepare_for_partitioning(parent_relid, + expression, + partition_data); IF p_count < 0 THEN - RAISE EXCEPTION 'Partitions count must not be less than zero'; + RAISE EXCEPTION 'partitions count must not be less than zero'; END IF; /* Try to determine partitions count if not set */ IF p_count IS NULL THEN - EXECUTE format('SELECT count(*), max(%s) FROM %s', p_attribute, parent_relid) - INTO v_rows_count, v_max; + EXECUTE pg_catalog.format('SELECT count(*), max(%s) FROM %s', expression, parent_relid) + INTO rows_count, max_value; - IF v_rows_count = 0 THEN - RAISE EXCEPTION 'Cannot determine partitions count for empty table'; + IF rows_count = 0 THEN + RAISE EXCEPTION 'cannot determine partitions count for empty table'; END IF; - IF v_max IS NULL THEN - RAISE EXCEPTION '''%'' column has NULL values', p_attribute; + IF max_value IS NULL THEN + RAISE EXCEPTION 'expression "%" can return NULL values', expression; END IF; p_count := 0; - WHILE v_cur_value <= v_max + WHILE cur_value <= max_value LOOP - v_cur_value := v_cur_value + p_interval; + cur_value := cur_value + p_interval; p_count := p_count + 1; END LOOP; END IF; @@ -235,39 +193,43 @@ BEGIN * and specifies partition count as 0 then do not check boundaries */ IF p_count != 0 THEN - /* check boundaries */ + /* Compute right bound of partitioning through additions */ + end_value := start_value; + FOR i IN 1..p_count + LOOP + end_value := end_value + p_interval; + END LOOP; + + /* Check boundaries */ PERFORM @extschema@.check_boundaries(parent_relid, - p_attribute, - p_start_value, - p_start_value + p_interval * p_count); + expression, + start_value, + end_value); END IF; /* Create sequence for child partitions names */ - PERFORM @extschema@.create_or_replace_sequence(schema, relname) - FROM @extschema@.get_plain_schema_and_relname(parent_relid); + PERFORM @extschema@.create_naming_sequence(parent_relid); /* Insert new entry to pathman config */ - INSERT INTO @extschema@.pathman_config (partrel, attname, parttype, range_interval) - VALUES (parent_relid, p_attribute, 2, p_interval::TEXT); + PERFORM @extschema@.add_to_pathman_config(parent_relid, expression, + p_interval::TEXT); - /* create first partition */ - FOR i IN 1..p_count - LOOP - PERFORM @extschema@.create_single_range_partition(parent_relid, - p_start_value, - p_start_value + p_interval); - p_start_value := p_start_value + p_interval; - END LOOP; - - /* Notify backend about changes */ - PERFORM @extschema@.on_create_partitions(parent_relid); + IF p_count != 0 THEN + part_count := @extschema@.create_range_partitions_internal( + parent_relid, + @extschema@.generate_range_bounds(start_value, + p_interval, + p_count), + NULL, + NULL); + END IF; /* Relocate data if asked to */ IF partition_data = true THEN - PERFORM @extschema@.disable_parent(parent_relid); + PERFORM @extschema@.set_enable_parent(parent_relid, false); PERFORM @extschema@.partition_data(parent_relid); ELSE - PERFORM @extschema@.enable_parent(parent_relid); + PERFORM @extschema@.set_enable_parent(parent_relid, true); END IF; RETURN p_count; @@ -275,479 +237,109 @@ END $$ LANGUAGE plpgsql; /* - * Creates RANGE partitions for specified range + * Creates RANGE partitions for specified relation based on bounds array */ -CREATE OR REPLACE FUNCTION @extschema@.create_partitions_from_range( +CREATE FUNCTION @extschema@.create_range_partitions( parent_relid REGCLASS, - p_attribute TEXT, - p_start_value ANYELEMENT, - p_end_value ANYELEMENT, - p_interval ANYELEMENT, - partition_data BOOLEAN DEFAULT true) -RETURNS INTEGER AS -$$ + expression TEXT, + bounds ANYARRAY, + partition_names TEXT[] DEFAULT NULL, + tablespaces TEXT[] DEFAULT NULL, + partition_data BOOLEAN DEFAULT TRUE) +RETURNS INTEGER AS $$ DECLARE part_count INTEGER := 0; BEGIN - IF partition_data = true THEN - /* Acquire data modification lock */ - PERFORM @extschema@.prevent_relation_modification(parent_relid); - ELSE - /* Acquire lock on parent */ - PERFORM @extschema@.lock_partitioned_relation(parent_relid); - END IF; - - PERFORM @extschema@.validate_relname(parent_relid); - p_attribute := lower(p_attribute); - PERFORM @extschema@.common_relation_checks(parent_relid, p_attribute); - - IF p_interval <= 0 THEN - RAISE EXCEPTION 'Interval must be positive'; + IF array_ndims(bounds) > 1 THEN + RAISE EXCEPTION 'Bounds array must be a one dimensional array'; END IF; - /* Check boundaries */ - PERFORM @extschema@.check_boundaries(parent_relid, - p_attribute, - p_start_value, - p_end_value); - - /* Create sequence for child partitions names */ - PERFORM @extschema@.create_or_replace_sequence(schema, relname) - FROM @extschema@.get_plain_schema_and_relname(parent_relid); - - /* Insert new entry to pathman config */ - INSERT INTO @extschema@.pathman_config (partrel, attname, parttype, range_interval) - VALUES (parent_relid, p_attribute, 2, p_interval::TEXT); - - WHILE p_start_value <= p_end_value - LOOP - PERFORM @extschema@.create_single_range_partition(parent_relid, - p_start_value, - p_start_value + p_interval); - p_start_value := p_start_value + p_interval; - part_count := part_count + 1; - END LOOP; - - /* Notify backend about changes */ - PERFORM @extschema@.on_create_partitions(parent_relid); - - /* Relocate data if asked to */ - IF partition_data = true THEN - PERFORM @extschema@.disable_parent(parent_relid); - PERFORM @extschema@.partition_data(parent_relid); - ELSE - PERFORM @extschema@.enable_parent(parent_relid); + IF array_length(bounds, 1) < 2 THEN + RAISE EXCEPTION 'Bounds array must have at least two values'; END IF; - RETURN part_count; /* number of created partitions */ -END -$$ LANGUAGE plpgsql; - -/* - * Creates RANGE partitions for specified range based on datetime attribute - */ -CREATE OR REPLACE FUNCTION @extschema@.create_partitions_from_range( - parent_relid REGCLASS, - p_attribute TEXT, - p_start_value ANYELEMENT, - p_end_value ANYELEMENT, - p_interval INTERVAL, - partition_data BOOLEAN DEFAULT true) -RETURNS INTEGER AS -$$ -DECLARE - part_count INTEGER := 0; - -BEGIN - IF partition_data = true THEN - /* Acquire data modification lock */ - PERFORM @extschema@.prevent_relation_modification(parent_relid); - ELSE - /* Acquire lock on parent */ - PERFORM @extschema@.lock_partitioned_relation(parent_relid); - END IF; - - PERFORM @extschema@.validate_relname(parent_relid); - p_attribute := lower(p_attribute); - PERFORM @extschema@.common_relation_checks(parent_relid, p_attribute); + PERFORM @extschema@.prepare_for_partitioning(parent_relid, + expression, + partition_data); /* Check boundaries */ PERFORM @extschema@.check_boundaries(parent_relid, - p_attribute, - p_start_value, - p_end_value); + expression, + bounds[1], + bounds[array_length(bounds, 1)]); /* Create sequence for child partitions names */ - PERFORM @extschema@.create_or_replace_sequence(schema, relname) - FROM @extschema@.get_plain_schema_and_relname(parent_relid); + PERFORM @extschema@.create_naming_sequence(parent_relid); /* Insert new entry to pathman config */ - INSERT INTO @extschema@.pathman_config (partrel, attname, parttype, range_interval) - VALUES (parent_relid, p_attribute, 2, p_interval::TEXT); - - WHILE p_start_value <= p_end_value - LOOP - EXECUTE format('SELECT @extschema@.create_single_range_partition($1, $2, $3::%s);', - pg_typeof(p_start_value)) - USING parent_relid, p_start_value, p_start_value + p_interval; - - p_start_value := p_start_value + p_interval; - part_count := part_count + 1; - END LOOP; + PERFORM @extschema@.add_to_pathman_config(parent_relid, expression, NULL); - /* Notify backend about changes */ - PERFORM @extschema@.on_create_partitions(parent_relid); + /* Create partitions */ + part_count := @extschema@.create_range_partitions_internal(parent_relid, + bounds, + partition_names, + tablespaces); /* Relocate data if asked to */ IF partition_data = true THEN - PERFORM @extschema@.disable_parent(parent_relid); + PERFORM @extschema@.set_enable_parent(parent_relid, false); PERFORM @extschema@.partition_data(parent_relid); ELSE - PERFORM @extschema@.enable_parent(parent_relid); - END IF; - - RETURN part_count; /* number of created partitions */ -END -$$ LANGUAGE plpgsql; - -/* - * Creates new RANGE partition. Returns partition name. - * NOTE: This function SHOULD NOT take xact_handling lock (BGWs in 9.5). - */ -CREATE OR REPLACE FUNCTION @extschema@.create_single_range_partition( - parent_relid REGCLASS, - p_start_value ANYELEMENT, - p_end_value ANYELEMENT, - partition_name TEXT DEFAULT NULL) -RETURNS TEXT AS -$$ -DECLARE - v_part_num INT; - v_child_relname TEXT; - v_plain_child_relname TEXT; - v_attname TEXT; - v_plain_schema TEXT; - v_plain_relname TEXT; - v_child_relname_exists BOOL; - v_seq_name TEXT; - -BEGIN - v_attname := attname FROM @extschema@.pathman_config - WHERE partrel = parent_relid; - - IF v_attname IS NULL THEN - RAISE EXCEPTION 'Table "%" is not partitioned', parent_relid::TEXT; - END IF; - - SELECT * INTO v_plain_schema, v_plain_relname - FROM @extschema@.get_plain_schema_and_relname(parent_relid); - - v_seq_name := @extschema@.get_sequence_name(v_plain_schema, v_plain_relname); - - IF partition_name IS NULL THEN - /* Get next value from sequence */ - LOOP - v_part_num := nextval(v_seq_name); - v_plain_child_relname := format('%s_%s', v_plain_relname, v_part_num); - v_child_relname := format('%s.%s', - quote_ident(v_plain_schema), - quote_ident(v_plain_child_relname)); - - v_child_relname_exists := count(*) > 0 - FROM pg_class - WHERE relname = v_plain_child_relname AND - relnamespace = v_plain_schema::regnamespace - LIMIT 1; - - EXIT WHEN v_child_relname_exists = false; - END LOOP; - ELSE - v_child_relname := partition_name; + PERFORM @extschema@.set_enable_parent(parent_relid, true); END IF; - EXECUTE format('CREATE TABLE %1$s (LIKE %2$s INCLUDING ALL) INHERITS (%2$s)', - v_child_relname, - parent_relid::TEXT); - - EXECUTE format('ALTER TABLE %s ADD CONSTRAINT %s CHECK (%s)', - v_child_relname, - @extschema@.build_check_constraint_name(v_child_relname::REGCLASS, - v_attname), - @extschema@.build_range_condition(v_attname, - p_start_value, - p_end_value)); - - RETURN v_child_relname; -END -$$ LANGUAGE plpgsql -SET client_min_messages = WARNING; - -/* - * Split RANGE partition - */ -CREATE OR REPLACE FUNCTION @extschema@.split_range_partition( - p_partition REGCLASS, - p_value ANYELEMENT, - partition_name TEXT DEFAULT NULL, - OUT p_range ANYARRAY) -RETURNS ANYARRAY AS -$$ -DECLARE - v_parent_relid REGCLASS; - v_attname TEXT; - v_cond TEXT; - v_new_partition TEXT; - v_part_type INTEGER; - v_part_relname TEXT; - v_check_name TEXT; - -BEGIN - v_part_relname := @extschema@.validate_relname(p_partition); - v_parent_relid = @extschema@.get_parent_of_partition(p_partition); - - /* Acquire lock on parent */ - PERFORM @extschema@.lock_partitioned_relation(v_parent_relid); - - /* Acquire data modification lock (prevent further modifications) */ - PERFORM @extschema@.prevent_relation_modification(p_partition); - - SELECT attname, parttype - FROM @extschema@.pathman_config - WHERE partrel = v_parent_relid - INTO v_attname, v_part_type; - - IF v_attname IS NULL THEN - RAISE EXCEPTION 'Table "%" is not partitioned', v_parent_relid::TEXT; - END IF; - - /* Check if this is a RANGE partition */ - IF v_part_type != 2 THEN - RAISE EXCEPTION 'Specified partition isn''t RANGE partition'; - END IF; - - /* Get partition values range */ - p_range := @extschema@.get_range_by_part_oid(v_parent_relid, p_partition, 0); - IF p_range IS NULL THEN - RAISE EXCEPTION 'Could not find specified partition'; - END IF; - - /* Check if value fit into the range */ - IF p_range[1] > p_value OR p_range[2] <= p_value - THEN - RAISE EXCEPTION 'Specified value does not fit into the range [%, %)', - p_range[1], p_range[2]; - END IF; - - /* Create new partition */ - v_new_partition := @extschema@.create_single_range_partition(v_parent_relid, - p_value, - p_range[2], - partition_name); - - /* Copy data */ - v_cond := @extschema@.build_range_condition(v_attname, p_value, p_range[2]); - EXECUTE format('WITH part_data AS (DELETE FROM %s WHERE %s RETURNING *) - INSERT INTO %s SELECT * FROM part_data', - p_partition::TEXT, - v_cond, - v_new_partition); - - /* Alter original partition */ - v_cond := @extschema@.build_range_condition(v_attname, p_range[1], p_value); - v_check_name := @extschema@.build_check_constraint_name(p_partition, v_attname); - - EXECUTE format('ALTER TABLE %s DROP CONSTRAINT %s', - p_partition::TEXT, - v_check_name); - - EXECUTE format('ALTER TABLE %s ADD CONSTRAINT %s CHECK (%s)', - p_partition::TEXT, - v_check_name, - v_cond); - - /* Tell backend to reload configuration */ - PERFORM @extschema@.on_update_partitions(v_parent_relid); + RETURN part_count; END $$ LANGUAGE plpgsql; - /* - * Merge RANGE partitions - */ -CREATE OR REPLACE FUNCTION @extschema@.merge_range_partitions( - partition1 REGCLASS, - partition2 REGCLASS) -RETURNS VOID AS -$$ -DECLARE - v_parent_relid1 REGCLASS; - v_parent_relid2 REGCLASS; - v_attname TEXT; - v_part_type INTEGER; - v_atttype TEXT; - -BEGIN - IF partition1 = partition2 THEN - RAISE EXCEPTION 'Cannot merge partition with itself'; - END IF; - - v_parent_relid1 := @extschema@.get_parent_of_partition(partition1); - v_parent_relid2 := @extschema@.get_parent_of_partition(partition2); - - /* Acquire data modification locks (prevent further modifications) */ - PERFORM @extschema@.prevent_relation_modification(partition1); - PERFORM @extschema@.prevent_relation_modification(partition2); - - IF v_parent_relid1 != v_parent_relid2 THEN - RAISE EXCEPTION 'Cannot merge partitions with different parents'; - END IF; - - /* Acquire lock on parent */ - PERFORM @extschema@.lock_partitioned_relation(v_parent_relid1); - - SELECT attname, parttype - FROM @extschema@.pathman_config - WHERE partrel = v_parent_relid1 - INTO v_attname, v_part_type; - - IF v_attname IS NULL THEN - RAISE EXCEPTION 'Table "%" is not partitioned', v_parent_relid1::TEXT; - END IF; - - /* Check if this is a RANGE partition */ - IF v_part_type != 2 THEN - RAISE EXCEPTION 'Specified partitions aren''t RANGE partitions'; - END IF; - - v_atttype := @extschema@.get_attribute_type_name(partition1, v_attname); - - EXECUTE format('SELECT @extschema@.merge_range_partitions_internal($1, $2, $3, NULL::%s)', - v_atttype) - USING v_parent_relid1, partition1, partition2; - - /* Tell backend to reload configuration */ - PERFORM @extschema@.on_update_partitions(v_parent_relid1); -END -$$ -LANGUAGE plpgsql; - - -/* - * Merge two partitions. All data will be copied to the first one. Second - * partition will be destroyed. - * - * NOTE: dummy field is used to pass the element type to the function - * (it is necessary because of pseudo-types used in function). + * Append new partition. */ -CREATE OR REPLACE FUNCTION @extschema@.merge_range_partitions_internal( +CREATE FUNCTION @extschema@.append_range_partition( parent_relid REGCLASS, - partition1 REGCLASS, - partition2 REGCLASS, - dummy ANYELEMENT, - OUT p_range ANYARRAY) -RETURNS ANYARRAY AS -$$ + partition_name TEXT DEFAULT NULL, + tablespace TEXT DEFAULT NULL) +RETURNS TEXT AS $$ DECLARE - v_attname TEXT; - v_check_name TEXT; + part_expr_type REGTYPE; + part_name TEXT; + part_interval TEXT; BEGIN - SELECT attname FROM @extschema@.pathman_config - WHERE partrel = parent_relid - INTO v_attname; + PERFORM @extschema@.validate_relname(parent_relid); - IF v_attname IS NULL THEN - RAISE EXCEPTION 'Table "%" is not partitioned', parent_relid::TEXT; - END IF; + /* Acquire lock on parent's scheme */ + PERFORM @extschema@.prevent_part_modification(parent_relid); - /* - * Get ranges - * first and second elements of array are MIN and MAX of partition1 - * third and forth elements are MIN and MAX of partition2 - */ - p_range := @extschema@.get_range_by_part_oid(parent_relid, partition1, 0) || - @extschema@.get_range_by_part_oid(parent_relid, partition2, 0); + part_expr_type := @extschema@.get_partition_key_type(parent_relid); - /* Check if ranges are adjacent */ - IF p_range[1] != p_range[4] AND p_range[2] != p_range[3] THEN - RAISE EXCEPTION 'Merge failed. Partitions must be adjacent'; + IF NOT @extschema@.is_date_type(part_expr_type) AND + NOT @extschema@.is_operator_supported(part_expr_type, '+') THEN + RAISE EXCEPTION 'type % does not support ''+'' operator', part_expr_type::REGTYPE; END IF; - /* Drop constraint on first partition... */ - v_check_name := @extschema@.build_check_constraint_name(partition1, v_attname); - EXECUTE format('ALTER TABLE %s DROP CONSTRAINT %s', - partition1::TEXT, - v_check_name); - - /* and create a new one */ - EXECUTE format('ALTER TABLE %s ADD CONSTRAINT %s CHECK (%s)', - partition1::TEXT, - v_check_name, - @extschema@.build_range_condition(v_attname, - least(p_range[1], p_range[3]), - greatest(p_range[2], p_range[4]))); - - /* Copy data from second partition to the first one */ - EXECUTE format('WITH part_data AS (DELETE FROM %s RETURNING *) - INSERT INTO %s SELECT * FROM part_data', - partition2::TEXT, - partition1::TEXT); - - /* Remove second partition */ - EXECUTE format('DROP TABLE %s', partition2::TEXT); -END -$$ LANGUAGE plpgsql; - - -/* - * Append new partition. - */ -CREATE OR REPLACE FUNCTION @extschema@.append_range_partition( - parent_relid REGCLASS, - partition_name TEXT DEFAULT NULL) -RETURNS TEXT AS -$$ -DECLARE - v_attname TEXT; - v_atttype TEXT; - v_part_name TEXT; - v_interval TEXT; - -BEGIN - /* Acquire lock on parent */ - PERFORM @extschema@.lock_partitioned_relation(parent_relid); - - SELECT attname, range_interval + SELECT range_interval FROM @extschema@.pathman_config WHERE partrel = parent_relid - INTO v_attname, v_interval; - - IF v_attname IS NULL THEN - RAISE EXCEPTION 'Table "%" is not partitioned', parent_relid::TEXT; - END IF; - - v_atttype := @extschema@.get_attribute_type_name(parent_relid, v_attname); + INTO part_interval; EXECUTE - format( - 'SELECT @extschema@.append_partition_internal($1, $2, $3, ARRAY[]::%s[], $4)', - v_atttype) + pg_catalog.format('SELECT @extschema@.append_partition_internal($1, $2, $3, ARRAY[]::%s[], $4, $5)', + @extschema@.get_base_type(part_expr_type)::TEXT) USING parent_relid, - v_atttype, - v_interval, - partition_name + part_expr_type, + part_interval, + partition_name, + tablespace INTO - v_part_name; + part_name; - /* Invalidate cache */ - PERFORM @extschema@.on_update_partitions(parent_relid); - RETURN v_part_name; + RETURN part_name; END -$$ -LANGUAGE plpgsql; +$$ LANGUAGE plpgsql; /* * Spawn logic for append_partition(). We have to @@ -755,94 +347,103 @@ LANGUAGE plpgsql; * * NOTE: we don't take a xact_handling lock here. */ -CREATE OR REPLACE FUNCTION @extschema@.append_partition_internal( +CREATE FUNCTION @extschema@.append_partition_internal( parent_relid REGCLASS, - p_atttype TEXT, + p_atttype REGTYPE, p_interval TEXT, p_range ANYARRAY DEFAULT NULL, - partition_name TEXT DEFAULT NULL) -RETURNS TEXT AS -$$ + partition_name TEXT DEFAULT NULL, + tablespace TEXT DEFAULT NULL) +RETURNS TEXT AS $$ DECLARE - v_part_name TEXT; + part_expr_type REGTYPE; + part_name TEXT; + v_args_format TEXT; BEGIN - IF @extschema@.partitions_count(parent_relid) = 0 THEN - RAISE EXCEPTION 'Cannot append to empty partitions set'; + IF @extschema@.get_number_of_partitions(parent_relid) = 0 THEN + RAISE EXCEPTION 'cannot append to empty partitions set'; END IF; - p_range := @extschema@.get_range_by_idx(parent_relid, -1, 0); + part_expr_type := @extschema@.get_base_type(p_atttype); - IF @extschema@.is_date_type(p_atttype::regtype) THEN - v_part_name := @extschema@.create_single_range_partition( - parent_relid, - p_range[2], - p_range[2] + p_interval::interval, - partition_name); + /* We have to pass fake NULL casted to column's type */ + EXECUTE pg_catalog.format('SELECT @extschema@.get_part_range($1, -1, NULL::%s)', + part_expr_type::TEXT) + USING parent_relid + INTO p_range; + + IF p_range[2] IS NULL THEN + RAISE EXCEPTION 'Cannot append partition because last partition''s range is half open'; + END IF; + + IF @extschema@.is_date_type(p_atttype) THEN + v_args_format := pg_catalog.format('$1, $2, ($2 + $3::interval)::%s, $4, $5', part_expr_type::TEXT); ELSE - EXECUTE - format( - 'SELECT @extschema@.create_single_range_partition($1, $2, $2 + $3::%s, $4)', - p_atttype) - USING - parent_relid, - p_range[2], - p_interval, - partition_name - INTO - v_part_name; + v_args_format := pg_catalog.format('$1, $2, $2 + $3::%s, $4, $5', part_expr_type::TEXT); END IF; - RETURN v_part_name; -END -$$ -LANGUAGE plpgsql; + EXECUTE + pg_catalog.format('SELECT @extschema@.create_single_range_partition(%s)', v_args_format) + USING + parent_relid, + p_range[2], + p_interval, + partition_name, + tablespace + INTO + part_name; + RETURN part_name; +END +$$ LANGUAGE plpgsql; /* * Prepend new partition. */ -CREATE OR REPLACE FUNCTION @extschema@.prepend_range_partition( +CREATE FUNCTION @extschema@.prepend_range_partition( parent_relid REGCLASS, - partition_name TEXT DEFAULT NULL) -RETURNS TEXT AS -$$ + partition_name TEXT DEFAULT NULL, + tablespace TEXT DEFAULT NULL) +RETURNS TEXT AS $$ DECLARE - v_attname TEXT; - v_atttype TEXT; - v_part_name TEXT; - v_interval TEXT; + part_expr_type REGTYPE; + part_name TEXT; + part_interval TEXT; BEGIN - SELECT attname, range_interval - FROM @extschema@.pathman_config - WHERE partrel = parent_relid - INTO v_attname, v_interval; + PERFORM @extschema@.validate_relname(parent_relid); + + /* Acquire lock on parent's scheme */ + PERFORM @extschema@.prevent_part_modification(parent_relid); - IF v_attname IS NULL THEN - RAISE EXCEPTION 'Table "%" is not partitioned', parent_relid::TEXT; + part_expr_type := @extschema@.get_partition_key_type(parent_relid); + + IF NOT @extschema@.is_date_type(part_expr_type) AND + NOT @extschema@.is_operator_supported(part_expr_type, '-') THEN + RAISE EXCEPTION 'type % does not support ''-'' operator', part_expr_type::REGTYPE; END IF; - v_atttype := @extschema@.get_attribute_type_name(parent_relid, v_attname); + SELECT range_interval + FROM @extschema@.pathman_config + WHERE partrel = parent_relid + INTO part_interval; EXECUTE - format( - 'SELECT @extschema@.prepend_partition_internal($1, $2, $3, ARRAY[]::%s[], $4)', - v_atttype) + pg_catalog.format('SELECT @extschema@.prepend_partition_internal($1, $2, $3, ARRAY[]::%s[], $4, $5)', + @extschema@.get_base_type(part_expr_type)::TEXT) USING parent_relid, - v_atttype, - v_interval, - partition_name + part_expr_type, + part_interval, + partition_name, + tablespace INTO - v_part_name; + part_name; - /* Invalidate cache */ - PERFORM @extschema@.on_update_partitions(parent_relid); - RETURN v_part_name; + RETURN part_name; END -$$ -LANGUAGE plpgsql; +$$ LANGUAGE plpgsql; /* * Spawn logic for prepend_partition(). We have to @@ -850,386 +451,439 @@ LANGUAGE plpgsql; * * NOTE: we don't take a xact_handling lock here. */ -CREATE OR REPLACE FUNCTION @extschema@.prepend_partition_internal( +CREATE FUNCTION @extschema@.prepend_partition_internal( parent_relid REGCLASS, - p_atttype TEXT, + p_atttype REGTYPE, p_interval TEXT, p_range ANYARRAY DEFAULT NULL, - partition_name TEXT DEFAULT NULL) -RETURNS TEXT AS -$$ + partition_name TEXT DEFAULT NULL, + tablespace TEXT DEFAULT NULL) +RETURNS TEXT AS $$ DECLARE - v_part_name TEXT; + part_expr_type REGTYPE; + part_name TEXT; + v_args_format TEXT; BEGIN - IF @extschema@.partitions_count(parent_relid) = 0 THEN - RAISE EXCEPTION 'Cannot prepend to empty partitions set'; + IF @extschema@.get_number_of_partitions(parent_relid) = 0 THEN + RAISE EXCEPTION 'cannot prepend to empty partitions set'; END IF; - p_range := @extschema@.get_range_by_idx(parent_relid, 0, 0); + part_expr_type := @extschema@.get_base_type(p_atttype); + + /* We have to pass fake NULL casted to column's type */ + EXECUTE pg_catalog.format('SELECT @extschema@.get_part_range($1, 0, NULL::%s)', + part_expr_type::TEXT) + USING parent_relid + INTO p_range; + + IF p_range[1] IS NULL THEN + RAISE EXCEPTION 'Cannot prepend partition because first partition''s range is half open'; + END IF; - IF @extschema@.is_date_type(p_atttype::regtype) THEN - v_part_name := @extschema@.create_single_range_partition( - parent_relid, - p_range[1] - p_interval::interval, - p_range[1], - partition_name); + IF @extschema@.is_date_type(p_atttype) THEN + v_args_format := pg_catalog.format('$1, ($2 - $3::interval)::%s, $2, $4, $5', part_expr_type::TEXT); ELSE - EXECUTE - format( - 'SELECT @extschema@.create_single_range_partition($1, $2 - $3::%s, $2, $4)', - p_atttype) - USING - parent_relid, - p_range[1], - p_interval, - partition_name - INTO - v_part_name; + v_args_format := pg_catalog.format('$1, $2 - $3::%s, $2, $4, $5', part_expr_type::TEXT); END IF; - RETURN v_part_name; -END -$$ -LANGUAGE plpgsql; + EXECUTE + pg_catalog.format('SELECT @extschema@.create_single_range_partition(%s)', v_args_format) + USING + parent_relid, + p_range[1], + p_interval, + partition_name, + tablespace + INTO + part_name; + RETURN part_name; +END +$$ LANGUAGE plpgsql; /* * Add new partition */ -CREATE OR REPLACE FUNCTION @extschema@.add_range_partition( +CREATE FUNCTION @extschema@.add_range_partition( parent_relid REGCLASS, - p_start_value ANYELEMENT, - p_end_value ANYELEMENT, - partition_name TEXT DEFAULT NULL) -RETURNS TEXT AS -$$ + start_value ANYELEMENT, + end_value ANYELEMENT, + partition_name TEXT DEFAULT NULL, + tablespace TEXT DEFAULT NULL) +RETURNS TEXT AS $$ DECLARE - v_part_name TEXT; + part_name TEXT; BEGIN - /* Acquire lock on parent */ - PERFORM @extschema@.lock_partitioned_relation(parent_relid); + PERFORM @extschema@.validate_relname(parent_relid); + + /* Acquire lock on parent's scheme */ + PERFORM @extschema@.prevent_part_modification(parent_relid); - IF p_start_value >= p_end_value THEN - RAISE EXCEPTION 'Failed to create partition: p_start_value is greater than p_end_value'; + IF start_value >= end_value THEN + RAISE EXCEPTION 'failed to create partition: start_value is greater than end_value'; END IF; - /* check range overlap */ - IF @extschema@.partitions_count(parent_relid) > 0 - AND @extschema@.check_overlap(parent_relid, p_start_value, p_end_value) THEN - RAISE EXCEPTION 'Specified range overlaps with existing partitions'; + /* Check range overlap */ + IF @extschema@.get_number_of_partitions(parent_relid) > 0 THEN + PERFORM @extschema@.check_range_available(parent_relid, + start_value, + end_value); END IF; /* Create new partition */ - v_part_name := @extschema@.create_single_range_partition(parent_relid, - p_start_value, - p_end_value, - partition_name); - PERFORM @extschema@.on_update_partitions(parent_relid); + part_name := @extschema@.create_single_range_partition(parent_relid, + start_value, + end_value, + partition_name, + tablespace); - RETURN v_part_name; + RETURN part_name; END -$$ -LANGUAGE plpgsql; +$$ LANGUAGE plpgsql; /* * Drop range partition */ -CREATE OR REPLACE FUNCTION @extschema@.drop_range_partition( - p_partition REGCLASS) -RETURNS TEXT AS -$$ +CREATE FUNCTION @extschema@.drop_range_partition( + partition_relid REGCLASS, + delete_data BOOLEAN DEFAULT TRUE) +RETURNS TEXT AS $$ DECLARE parent_relid REGCLASS; part_name TEXT; + part_type INTEGER; + v_relkind CHAR; + v_rows BIGINT; BEGIN - parent_relid := @extschema@.get_parent_of_partition(p_partition); - part_name := p_partition::TEXT; /* save the name to be returned */ + parent_relid := @extschema@.get_parent_of_partition(partition_relid); - /* Acquire lock on parent */ - PERFORM @extschema@.lock_partitioned_relation(parent_relid); + PERFORM @extschema@.validate_relname(parent_relid); + PERFORM @extschema@.validate_relname(partition_relid); + + part_name := partition_relid::TEXT; /* save the name to be returned */ + part_type := @extschema@.get_partition_type(parent_relid); + + /* Check if this is a RANGE partition */ + IF part_type != 2 THEN + RAISE EXCEPTION '"%" is not a RANGE partition', partition_relid::TEXT; + END IF; + + /* Acquire lock on parent's scheme */ + PERFORM @extschema@.prevent_part_modification(parent_relid); + + IF NOT delete_data THEN + EXECUTE pg_catalog.format('INSERT INTO %s SELECT * FROM %s', + parent_relid::TEXT, + partition_relid::TEXT); + GET DIAGNOSTICS v_rows = ROW_COUNT; + + /* Show number of copied rows */ + RAISE NOTICE '% rows copied from %', v_rows, partition_relid::TEXT; + END IF; - /* Drop table */ - EXECUTE format('DROP TABLE %s', part_name); + SELECT relkind FROM pg_catalog.pg_class + WHERE oid = partition_relid + INTO v_relkind; - /* Invalidate cache */ - PERFORM @extschema@.on_update_partitions(parent_relid); + /* + * Determine the kind of child relation. It can be either regular + * table (r) or foreign table (f). Depending on relkind we use + * DROP TABLE or DROP FOREIGN TABLE. + */ + IF v_relkind = 'f' THEN + EXECUTE pg_catalog.format('DROP FOREIGN TABLE %s', partition_relid::TEXT); + ELSE + EXECUTE pg_catalog.format('DROP TABLE %s', partition_relid::TEXT); + END IF; RETURN part_name; END -$$ -LANGUAGE plpgsql; - +$$ LANGUAGE plpgsql +SET pg_pathman.enable_partitionfilter = off; /* ensures that PartitionFilter is OFF */ /* * Attach range partition */ -CREATE OR REPLACE FUNCTION @extschema@.attach_range_partition( +CREATE FUNCTION @extschema@.attach_range_partition( parent_relid REGCLASS, - p_partition REGCLASS, - p_start_value ANYELEMENT, - p_end_value ANYELEMENT) -RETURNS TEXT AS -$$ + partition_relid REGCLASS, + start_value ANYELEMENT, + end_value ANYELEMENT) +RETURNS TEXT AS $$ DECLARE - v_attname TEXT; + part_expr TEXT; + part_type INTEGER; rel_persistence CHAR; + v_init_callback REGPROCEDURE; BEGIN - /* Acquire lock on parent */ - PERFORM @extschema@.lock_partitioned_relation(parent_relid); + PERFORM @extschema@.validate_relname(parent_relid); + PERFORM @extschema@.validate_relname(partition_relid); + + /* Acquire lock on parent's scheme */ + PERFORM @extschema@.prevent_part_modification(parent_relid); /* Ignore temporary tables */ SELECT relpersistence FROM pg_catalog.pg_class - WHERE oid = p_partition INTO rel_persistence; + WHERE oid = partition_relid INTO rel_persistence; IF rel_persistence = 't'::CHAR THEN - RAISE EXCEPTION 'Temporary table "%" cannot be used as a partition', - p_partition::TEXT; - END IF; - - IF @extschema@.check_overlap(parent_relid, p_start_value, p_end_value) THEN - RAISE EXCEPTION 'Specified range overlaps with existing partitions'; + RAISE EXCEPTION 'temporary table "%" cannot be used as a partition', + partition_relid::TEXT; END IF; - IF NOT @extschema@.validate_relations_equality(parent_relid, p_partition) THEN - RAISE EXCEPTION 'Partition must have the exact same structure as parent'; - END IF; + /* Check range overlap */ + PERFORM @extschema@.check_range_available(parent_relid, start_value, end_value); - /* Set inheritance */ - EXECUTE format('ALTER TABLE %s INHERIT %s', p_partition, parent_relid); + BEGIN + PERFORM @extschema@.is_tuple_convertible(parent_relid, partition_relid); + EXCEPTION WHEN OTHERS THEN + RAISE EXCEPTION 'partition must have a compatible tuple format'; + END; - v_attname := attname FROM @extschema@.pathman_config WHERE partrel = parent_relid; + part_expr := @extschema@.get_partition_key(parent_relid); + part_type := @extschema@.get_partition_type(parent_relid); - IF v_attname IS NULL THEN - RAISE EXCEPTION 'Table "%" is not partitioned', parent_relid::TEXT; + IF part_expr IS NULL THEN + RAISE EXCEPTION 'table "%" is not partitioned', parent_relid::TEXT; END IF; - /* Set check constraint */ - EXECUTE format('ALTER TABLE %s ADD CONSTRAINT %s CHECK (%s)', - p_partition::TEXT, - @extschema@.build_check_constraint_name(p_partition, v_attname), - @extschema@.build_range_condition(v_attname, - p_start_value, - p_end_value)); + /* Check if this is a RANGE partition */ + IF part_type != 2 THEN + RAISE EXCEPTION '"%" is not a RANGE partition', partition_relid::TEXT; + END IF; - /* Invalidate cache */ - PERFORM @extschema@.on_update_partitions(parent_relid); + /* Set inheritance */ + EXECUTE pg_catalog.format('ALTER TABLE %s INHERIT %s', partition_relid, parent_relid); - RETURN p_partition; + /* Set check constraint */ + EXECUTE pg_catalog.format('ALTER TABLE %s ADD CONSTRAINT %s CHECK (%s)', + partition_relid::TEXT, + @extschema@.build_check_constraint_name(partition_relid), + @extschema@.build_range_condition(partition_relid, + part_expr, + start_value, + end_value)); + + /* Fetch init_callback from 'params' table */ + WITH stub_callback(stub) as (values (0)) + SELECT init_callback + FROM stub_callback + LEFT JOIN @extschema@.pathman_config_params AS params + ON params.partrel = parent_relid + INTO v_init_callback; + + /* Invoke an initialization callback */ + PERFORM @extschema@.invoke_on_partition_created_callback(parent_relid, + partition_relid, + v_init_callback, + start_value, + end_value); + + RETURN partition_relid; END -$$ -LANGUAGE plpgsql; - +$$ LANGUAGE plpgsql; /* * Detach range partition */ -CREATE OR REPLACE FUNCTION @extschema@.detach_range_partition( - p_partition REGCLASS) -RETURNS TEXT AS -$$ +CREATE FUNCTION @extschema@.detach_range_partition( + partition_relid REGCLASS) +RETURNS TEXT AS $$ DECLARE - v_attname TEXT; parent_relid REGCLASS; + part_type INTEGER; BEGIN - parent_relid = @extschema@.get_parent_of_partition(p_partition); + parent_relid := @extschema@.get_parent_of_partition(partition_relid); + + PERFORM @extschema@.validate_relname(parent_relid); + PERFORM @extschema@.validate_relname(partition_relid); + + /* Acquire lock on partition's scheme */ + PERFORM @extschema@.prevent_part_modification(partition_relid); /* Acquire lock on parent */ - PERFORM @extschema@.lock_partitioned_relation(parent_relid); + PERFORM @extschema@.prevent_data_modification(parent_relid); - v_attname := attname - FROM @extschema@.pathman_config - WHERE partrel = parent_relid; + part_type := @extschema@.get_partition_type(parent_relid); - IF v_attname IS NULL THEN - RAISE EXCEPTION 'Table "%" is not partitioned', parent_relid::TEXT; + /* Check if this is a RANGE partition */ + IF part_type != 2 THEN + RAISE EXCEPTION '"%" is not a RANGE partition', partition_relid::TEXT; END IF; /* Remove inheritance */ - EXECUTE format('ALTER TABLE %s NO INHERIT %s', - p_partition::TEXT, + EXECUTE pg_catalog.format('ALTER TABLE %s NO INHERIT %s', + partition_relid::TEXT, parent_relid::TEXT); /* Remove check constraint */ - EXECUTE format('ALTER TABLE %s DROP CONSTRAINT %s', - p_partition::TEXT, - @extschema@.build_check_constraint_name(p_partition, v_attname)); - - /* Invalidate cache */ - PERFORM @extschema@.on_update_partitions(parent_relid); + EXECUTE pg_catalog.format('ALTER TABLE %s DROP CONSTRAINT %s', + partition_relid::TEXT, + @extschema@.build_check_constraint_name(partition_relid)); - RETURN p_partition; + RETURN partition_relid; END -$$ -LANGUAGE plpgsql; +$$ LANGUAGE plpgsql; /* - * Creates an update trigger + * Create a naming sequence for partitioned table. */ -CREATE OR REPLACE FUNCTION @extschema@.create_range_update_trigger( - IN parent_relid REGCLASS) -RETURNS TEXT AS -$$ +CREATE FUNCTION @extschema@.create_naming_sequence( + parent_relid REGCLASS) +RETURNS TEXT AS $$ DECLARE - func TEXT := 'CREATE OR REPLACE FUNCTION %1$s() - RETURNS TRIGGER AS - $body$ - DECLARE - old_oid Oid; - new_oid Oid; - - BEGIN - old_oid := TG_RELID; - new_oid := @extschema@.find_or_create_range_partition( - ''%2$s''::regclass, NEW.%3$s); - - IF old_oid = new_oid THEN - RETURN NEW; - END IF; - - EXECUTE format(''DELETE FROM %%s WHERE %5$s'', - old_oid::regclass::text) - USING %6$s; - - EXECUTE format(''INSERT INTO %%s VALUES (%7$s)'', - new_oid::regclass::text) - USING %8$s; - - RETURN NULL; - END $body$ - LANGUAGE plpgsql'; - - trigger TEXT := 'CREATE TRIGGER %s ' || - 'BEFORE UPDATE ON %s ' || - 'FOR EACH ROW EXECUTE PROCEDURE %s()'; - - triggername TEXT; - funcname TEXT; - att_names TEXT; - old_fields TEXT; - new_fields TEXT; - att_val_fmt TEXT; - att_fmt TEXT; - attr TEXT; - rec RECORD; + seq_name TEXT; BEGIN - attr := attname FROM @extschema@.pathman_config WHERE partrel = parent_relid; + seq_name := @extschema@.build_sequence_name(parent_relid); - IF attr IS NULL THEN - RAISE EXCEPTION 'Table "%" is not partitioned', parent_relid::TEXT; - END IF; + EXECUTE pg_catalog.format('DROP SEQUENCE IF EXISTS %s', seq_name); + EXECUTE pg_catalog.format('CREATE SEQUENCE %s START 1', seq_name); - SELECT string_agg(attname, ', '), - string_agg('OLD.' || attname, ', '), - string_agg('NEW.' || attname, ', '), - string_agg('CASE WHEN NOT $' || attnum || ' IS NULL THEN ' || - attname || ' = $' || attnum || ' ' || - 'ELSE ' || - attname || ' IS NULL END', - ' AND '), - string_agg('$' || attnum, ', ') - FROM pg_attribute - WHERE attrelid::REGCLASS = parent_relid AND attnum > 0 - INTO att_names, - old_fields, - new_fields, - att_val_fmt, - att_fmt; - - /* Build trigger & trigger function's names */ - funcname := @extschema@.build_update_trigger_func_name(parent_relid); - triggername := @extschema@.build_update_trigger_name(parent_relid); - - /* Create function for trigger */ - EXECUTE format(func, funcname, parent_relid, attr, 0, att_val_fmt, - old_fields, att_fmt, new_fields); - - /* Create trigger on every partition */ - FOR rec in (SELECT * FROM pg_catalog.pg_inherits - WHERE inhparent = parent_relid) - LOOP - EXECUTE format(trigger, - triggername, - rec.inhrelid::REGCLASS::TEXT, - funcname); - END LOOP; - - RETURN funcname; + RETURN seq_name; END -$$ LANGUAGE plpgsql; +$$ LANGUAGE plpgsql +SET client_min_messages = WARNING; /* mute NOTICE message */ + +/* + * Drop a naming sequence for partitioned table. + */ +CREATE FUNCTION @extschema@.drop_naming_sequence( + parent_relid REGCLASS) +RETURNS VOID AS $$ +DECLARE + seq_name TEXT; + +BEGIN + seq_name := @extschema@.build_sequence_name(parent_relid); + + EXECUTE pg_catalog.format('DROP SEQUENCE IF EXISTS %s', seq_name); +END +$$ LANGUAGE plpgsql +SET client_min_messages = WARNING; /* mute NOTICE message */ /* - * Construct CHECK constraint condition for a range partition. + * Split RANGE partition in two using a pivot. */ -CREATE OR REPLACE FUNCTION @extschema@.build_range_condition( - p_attname TEXT, - p_start_value ANYELEMENT, - p_end_value ANYELEMENT) -RETURNS TEXT AS 'pg_pathman', 'build_range_condition' +CREATE FUNCTION @extschema@.split_range_partition( + partition_relid REGCLASS, + split_value ANYELEMENT, + partition_name TEXT DEFAULT NULL, + tablespace TEXT DEFAULT NULL) +RETURNS REGCLASS AS 'pg_pathman', 'split_range_partition' +LANGUAGE C; + +/* + * Merge RANGE partitions. + */ +CREATE FUNCTION @extschema@.merge_range_partitions( + variadic partitions REGCLASS[]) +RETURNS REGCLASS AS 'pg_pathman', 'merge_range_partitions' LANGUAGE C STRICT; /* - * Returns N-th range (as an array of two elements). + * Drops partition and expands the next partition so that it cover dropped one + * + * This function was written in order to support Oracle-like ALTER TABLE ... + * DROP PARTITION. In Oracle partitions only have upper bound and when + * partition is dropped the next one automatically covers freed range */ -CREATE OR REPLACE FUNCTION @extschema@.get_range_by_idx( - parent_relid REGCLASS, - idx INTEGER, - dummy ANYELEMENT) -RETURNS ANYARRAY AS 'pg_pathman', 'get_range_by_idx' +CREATE FUNCTION @extschema@.drop_range_partition_expand_next( + partition_relid REGCLASS) +RETURNS VOID AS 'pg_pathman', 'drop_range_partition_expand_next' LANGUAGE C STRICT; +CREATE FUNCTION @extschema@.create_range_partitions_internal( + parent_relid REGCLASS, + bounds ANYARRAY, + partition_names TEXT[], + tablespaces TEXT[]) +RETURNS REGCLASS AS 'pg_pathman', 'create_range_partitions_internal' +LANGUAGE C; + /* - * Returns min and max values for specified RANGE partition. + * Creates new RANGE partition. Returns partition name. + * NOTE: This function SHOULD NOT take xact_handling lock (BGWs in 9.5). */ -CREATE OR REPLACE FUNCTION @extschema@.get_range_by_part_oid( +CREATE FUNCTION @extschema@.create_single_range_partition( parent_relid REGCLASS, + start_value ANYELEMENT, + end_value ANYELEMENT, + partition_name TEXT DEFAULT NULL, + tablespace TEXT DEFAULT NULL) +RETURNS REGCLASS AS 'pg_pathman', 'create_single_range_partition_pl' +LANGUAGE C; + +/* + * Construct CHECK constraint condition for a range partition. + */ +CREATE FUNCTION @extschema@.build_range_condition( partition_relid REGCLASS, - dummy ANYELEMENT) -RETURNS ANYARRAY AS 'pg_pathman', 'get_range_by_part_oid' + expression TEXT, + start_value ANYELEMENT, + end_value ANYELEMENT) +RETURNS TEXT AS 'pg_pathman', 'build_range_condition' +LANGUAGE C; + +/* + * Generate a name for naming sequence. + */ +CREATE FUNCTION @extschema@.build_sequence_name( + parent_relid REGCLASS) +RETURNS TEXT AS 'pg_pathman', 'build_sequence_name' LANGUAGE C STRICT; /* - * Returns min value of the first partition's RangeEntry. + * Returns N-th range (as an array of two elements). */ -CREATE OR REPLACE FUNCTION @extschema@.get_min_range_value( +CREATE FUNCTION @extschema@.get_part_range( parent_relid REGCLASS, + partition_idx INTEGER, dummy ANYELEMENT) -RETURNS ANYELEMENT AS 'pg_pathman', 'get_min_range_value' -LANGUAGE C STRICT; +RETURNS ANYARRAY AS 'pg_pathman', 'get_part_range_by_idx' +LANGUAGE C; /* - * Returns max value of the last partition's RangeEntry. + * Returns min and max values for specified RANGE partition. */ -CREATE OR REPLACE FUNCTION @extschema@.get_max_range_value( - parent_relid REGCLASS, +CREATE FUNCTION @extschema@.get_part_range( + partition_relid REGCLASS, dummy ANYELEMENT) -RETURNS ANYELEMENT AS 'pg_pathman', 'get_max_range_value' -LANGUAGE C STRICT; +RETURNS ANYARRAY AS 'pg_pathman', 'get_part_range_by_oid' +LANGUAGE C; /* * Checks if range overlaps with existing partitions. * Returns TRUE if overlaps and FALSE otherwise. */ -CREATE OR REPLACE FUNCTION @extschema@.check_overlap( +CREATE FUNCTION @extschema@.check_range_available( parent_relid REGCLASS, range_min ANYELEMENT, range_max ANYELEMENT) -RETURNS BOOLEAN AS 'pg_pathman', 'check_overlap' -LANGUAGE C STRICT; +RETURNS VOID AS 'pg_pathman', 'check_range_available_pl' +LANGUAGE C; /* - * Needed for an UPDATE trigger. + * Generate range bounds starting with 'p_start' using 'p_interval'. */ -CREATE OR REPLACE FUNCTION @extschema@.find_or_create_range_partition( - parent_relid REGCLASS, - value ANYELEMENT) -RETURNS REGCLASS AS 'pg_pathman', 'find_or_create_range_partition' +CREATE FUNCTION @extschema@.generate_range_bounds( + p_start ANYELEMENT, + p_interval INTERVAL, + p_count INTEGER) +RETURNS ANYARRAY AS 'pg_pathman', 'generate_range_bounds_pl' +LANGUAGE C STRICT; + +CREATE FUNCTION @extschema@.generate_range_bounds( + p_start ANYELEMENT, + p_interval ANYELEMENT, + p_count INTEGER) +RETURNS ANYARRAY AS 'pg_pathman', 'generate_range_bounds_pl' LANGUAGE C STRICT; diff --git a/run_tests.sh b/run_tests.sh new file mode 100755 index 00000000..2e2edc6f --- /dev/null +++ b/run_tests.sh @@ -0,0 +1,194 @@ +#!/usr/bin/env bash + +# +# Copyright (c) 2018, Postgres Professional +# +# supported levels: +# * standard +# * scan-build +# * hardcore +# * nightmare +# + +set -ux +status=0 + +# global exports +export PGPORT=55435 +export VIRTUAL_ENV_DISABLE_PROMPT=1 + +PATHMAN_DIR=$PWD + +# indicator of using cassert + valgrind support +USE_ASSERT_VALGRIND=false +if [ "$LEVEL" = "hardcore" ] || \ + [ "$LEVEL" = "nightmare" ]; then + USE_ASSERT_VALGRIND=true +fi + +# indicator of using special patch for vanilla +if [ "$(printf '%s\n' "14" "$PG_VERSION" | sort -V | head -n1)" = "$PG_VERSION" ]; then + USE_PATH=false +else + #patch version 14 and newer + USE_PATH=true +fi + +# rebuild PostgreSQL with cassert + valgrind support +if [ "$USE_ASSERT_VALGRIND" = true ] || \ + [ "$USE_PATH" = true ]; then + + set -e + + CUSTOM_PG_BIN=$PWD/pg_bin + CUSTOM_PG_SRC=$PWD/postgresql + + # here PG_VERSION is provided by postgres:X-alpine docker image + curl "https://ftp.postgresql.org/pub/source/v$PG_VERSION/postgresql-$PG_VERSION.tar.bz2" -o postgresql.tar.bz2 + echo "$PG_SHA256 *postgresql.tar.bz2" | sha256sum -c - + + mkdir $CUSTOM_PG_SRC + + tar \ + --extract \ + --file postgresql.tar.bz2 \ + --directory $CUSTOM_PG_SRC \ + --strip-components 1 + + cd $CUSTOM_PG_SRC + + if [ "$USE_PATH" = true ]; then + # apply the patch + patch -p1 < $PATHMAN_DIR/patches/REL_${PG_VERSION%.*}_STABLE-pg_pathman-core.diff + fi + + if [ "$USE_ASSERT_VALGRIND" = true ]; then + # enable Valgrind support + sed -i.bak "s/\/* #define USE_VALGRIND *\//#define USE_VALGRIND/g" src/include/pg_config_manual.h + + # enable additional options + ./configure \ + CFLAGS='-Og -ggdb3 -fno-omit-frame-pointer' \ + --enable-cassert \ + --prefix=$CUSTOM_PG_BIN \ + --quiet + else + # without additional options + ./configure \ + --enable-cassert \ + --prefix=$CUSTOM_PG_BIN \ + --quiet + fi + + # build & install PG + time make -s -j$(nproc) && make -s install + + # build & install FDW + time make -s -C contrib/postgres_fdw -j$(nproc) && \ + make -s -C contrib/postgres_fdw install + + # override default PostgreSQL instance + export PATH=$CUSTOM_PG_BIN/bin:$PATH + export LD_LIBRARY_PATH=$CUSTOM_PG_BIN/lib + + # show pg_config path (just in case) + which pg_config + + cd - + + set +e +fi + +# show pg_config just in case +pg_config + +# perform code checks if asked to +if [ "$LEVEL" = "scan-build" ] || \ + [ "$LEVEL" = "hardcore" ] || \ + [ "$LEVEL" = "nightmare" ]; then + + # perform static analyzis + scan-build --status-bugs make USE_PGXS=1 || status=$? + + # something's wrong, exit now! + if [ $status -ne 0 ]; then exit 1; fi + + # don't forget to "make clean" + make USE_PGXS=1 clean +fi + + +# build and install extension (using PG_CPPFLAGS and SHLIB_LINK for gcov) +make USE_PGXS=1 PG_CPPFLAGS="-coverage" SHLIB_LINK="-coverage" +make USE_PGXS=1 install + +# initialize database +initdb -D $PGDATA + +# change PG's config +echo "port = $PGPORT" >> $PGDATA/postgresql.conf +cat conf.add >> $PGDATA/postgresql.conf + +# restart cluster 'test' +if [ "$LEVEL" = "nightmare" ]; then + ls $CUSTOM_PG_BIN/bin + + valgrind \ + --tool=memcheck \ + --leak-check=no \ + --time-stamp=yes \ + --track-origins=yes \ + --trace-children=yes \ + --gen-suppressions=all \ + --suppressions=$CUSTOM_PG_SRC/src/tools/valgrind.supp \ + --log-file=/tmp/valgrind-%p.log \ + pg_ctl start -l /tmp/postgres.log -w || status=$? +else + pg_ctl start -l /tmp/postgres.log -w || status=$? +fi + +# something's wrong, exit now! +if [ $status -ne 0 ]; then cat /tmp/postgres.log; exit 1; fi + +# run regression tests +export PG_REGRESS_DIFF_OPTS="-w -U3" # for alpine's diff (BusyBox) +make USE_PGXS=1 installcheck || status=$? + +# show diff if it exists +if [ -f regression.diffs ]; then cat regression.diffs; fi + +# run python tests +set +x +virtualenv /tmp/env && source /tmp/env/bin/activate && pip install testgres +make USE_PGXS=1 python_tests || status=$? +deactivate +set -x + +if [ $status -ne 0 ]; then tail -n 2000 tests/python/tests.log; fi + +# show Valgrind logs if necessary +if [ "$LEVEL" = "nightmare" ]; then + for f in $(find /tmp -name valgrind-*.log); do + if grep -q 'Command: [^ ]*/postgres' $f && grep -q 'ERROR SUMMARY: [1-9]' $f; then + echo "========= Contents of $f" + cat $f + status=1 + fi + done +fi + +# run cmocka tests (using CFLAGS_SL for gcov) +make USE_PGXS=1 PG_CPPFLAGS="-coverage" cmocka_tests || status=$? + +# something's wrong, exit now! +if [ $status -ne 0 ]; then exit 1; fi + +# generate *.gcov files +gcov *.c *.h + + +set +ux + + +# send coverage stats to Codecov +bash <(curl -s https://codecov.io/bash) diff --git a/specs/for_update.spec b/specs/for_update.spec index 55ea24af..c18cd4f8 100644 --- a/specs/for_update.spec +++ b/specs/for_update.spec @@ -13,16 +13,14 @@ teardown } session "s1" -step "s1_b" { begin; } -step "s1_c" { commit; } -step "s1_r" { rollback; } -step "s1_update" { update test_tbl set id = 2 where id = 1; } +step "s1_b" { begin; } +step "s1_c" { commit; } +step "s1_r" { rollback; } +step "s1_update" { update test_tbl set id = 2 where id = 1; } session "s2" -step "s2_b" { begin; } -step "s2_c" { commit; } -step "s2_select_locked" { select * from test_tbl where id = 1 for share; } -step "s2_select" { select * from test_tbl where id = 1; } +step "s2_select_locked" { select * from test_tbl where id = 1 for share; } +step "s2_select" { select * from test_tbl where id = 1; } permutation "s1_b" "s1_update" "s2_select" "s1_r" diff --git a/specs/insert_nodes.spec b/specs/insert_nodes.spec index 93df4102..a5d0c7f9 100644 --- a/specs/insert_nodes.spec +++ b/specs/insert_nodes.spec @@ -3,6 +3,7 @@ setup CREATE EXTENSION pg_pathman; CREATE TABLE range_rel(id serial primary key); SELECT create_range_partitions('range_rel', 'id', 1, 100, 1); + SELECT set_spawn_using_bgw('range_rel', true); } teardown @@ -13,20 +14,25 @@ teardown } session "s1" -step "s1b" { BEGIN; } -step "s1_insert_150" { INSERT INTO range_rel SELECT generate_series(1, 150); } -step "s1_insert_300" { INSERT INTO range_rel SELECT generate_series(151, 300); } -step "s1_show_partitions" { SELECT c.consrc FROM pg_inherits i LEFT JOIN pg_constraint c ON c.conrelid = i.inhrelid AND c.consrc IS NOT NULL WHERE i.inhparent = 'range_rel'::regclass::oid ORDER BY c.oid; } -step "s1r" { ROLLBACK; } -step "s1c" { COMMIT; } +step "s1b" { BEGIN; } +step "s1_insert_150" { INSERT INTO range_rel SELECT generate_series(1, 150); } +step "s1_insert_300" { INSERT INTO range_rel SELECT generate_series(151, 300); } +step "s1_show_partitions" { SELECT pg_get_constraintdef(c.oid) FROM pg_inherits i LEFT JOIN pg_constraint c + ON c.conrelid = i.inhrelid + WHERE i.inhparent = 'range_rel'::regclass AND c.contype = 'c' + ORDER BY c.oid; } +step "s1r" { ROLLBACK; } session "s2" -step "s2b" { BEGIN; } -step "s2_insert_150" { INSERT INTO range_rel SELECT generate_series(1, 150); } -step "s2_insert_300" { INSERT INTO range_rel SELECT generate_series(151, 300); } -step "s2_show_partitions" { SELECT c.consrc FROM pg_inherits i LEFT JOIN pg_constraint c ON c.conrelid = i.inhrelid AND c.consrc IS NOT NULL WHERE i.inhparent = 'range_rel'::regclass::oid ORDER BY c.oid; } -step "s2r" { ROLLBACK; } -step "s2c" { COMMIT; } +step "s2b" { BEGIN; } +step "s2_insert_150" { INSERT INTO range_rel SELECT generate_series(1, 150); } +step "s2_insert_300" { INSERT INTO range_rel SELECT generate_series(151, 300); } +step "s2_show_partitions" { SELECT pg_get_constraintdef(c.oid) FROM pg_inherits i LEFT JOIN pg_constraint c + ON c.conrelid = i.inhrelid + WHERE i.inhparent = 'range_rel'::regclass AND c.contype = 'c' + ORDER BY c.oid; } +step "s2r" { ROLLBACK; } +step "s2c" { COMMIT; } # Rollback first transactions permutation "s1b" "s1_insert_150" "s1r" "s1_show_partitions" "s2b" "s2_insert_150" "s2c" "s2_show_partitions" diff --git a/specs/rollback_on_create_partitions.spec b/specs/rollback_on_create_partitions.spec index 41fc48d1..806e6072 100644 --- a/specs/rollback_on_create_partitions.spec +++ b/specs/rollback_on_create_partitions.spec @@ -11,18 +11,18 @@ teardown } session "s1" -step "begin" { BEGIN; } -step "rollback" { ROLLBACK; } -step "commit" { COMMIT; } -step "insert_data" { INSERT INTO range_rel SELECT generate_series(1, 10000); } -step "create_partitions" { SELECT create_range_partitions('range_rel', 'id', 1, 1000); } -step "drop_partitions" { SELECT drop_partitions('range_rel'); } -step "savepoint_a" { SAVEPOINT a; } -step "rollback_a" { ROLLBACK TO SAVEPOINT a; } -step "savepoint_b" { SAVEPOINT b; } -step "rollback_b" { ROLLBACK TO SAVEPOINT b; } -step "savepoint_c" { SAVEPOINT c; } -step "show_rel" { EXPLAIN (COSTS OFF) SELECT * FROM range_rel; } +step "begin" { BEGIN; } +step "rollback" { ROLLBACK; } +step "commit" { COMMIT; } +step "insert_data" { INSERT INTO range_rel SELECT generate_series(1, 10000); } +step "create_partitions" { SELECT create_range_partitions('range_rel', 'id', 1, 1000); } +step "drop_partitions" { SELECT drop_partitions('range_rel'); } +step "savepoint_a" { SAVEPOINT a; } +step "rollback_a" { ROLLBACK TO SAVEPOINT a; } +step "savepoint_b" { SAVEPOINT b; } +step "rollback_b" { ROLLBACK TO SAVEPOINT b; } +step "savepoint_c" { SAVEPOINT c; } +step "show_rel" { SELECT l.parent, l.partition FROM pathman_partition_list l WHERE l.parent = 'range_rel'::regclass; } permutation "begin" "insert_data" "create_partitions" "show_rel" "rollback" "show_rel" diff --git a/sql/pathman_CVE-2020-14350.sql b/sql/pathman_CVE-2020-14350.sql new file mode 100644 index 00000000..07daa617 --- /dev/null +++ b/sql/pathman_CVE-2020-14350.sql @@ -0,0 +1,78 @@ +/* + * Check fix for CVE-2020-14350. + * See also 7eeb1d986 postgresql commit. + */ + +SET client_min_messages = 'warning'; +DROP FUNCTION IF EXISTS _partition_data_concurrent(oid,integer); +DROP FUNCTION IF EXISTS create_single_range_partition(TEXT,ANYELEMENT,ANYELEMENT,TEXT); +DROP TABLE IF EXISTS test1 CASCADE; +DROP TABLE IF EXISTS test2 CASCADE; +DROP ROLE IF EXISTS pathman_regress_hacker; +SET client_min_messages = 'notice'; +GRANT CREATE ON SCHEMA public TO PUBLIC; + +CREATE EXTENSION pg_pathman; +CREATE ROLE pathman_regress_hacker LOGIN; + +-- Test 1 +RESET ROLE; +ALTER ROLE pathman_regress_hacker NOSUPERUSER; + +SET ROLE pathman_regress_hacker; +SHOW is_superuser; +CREATE FUNCTION _partition_data_concurrent(relation oid, p_limit INT, OUT p_total BIGINT) +RETURNS bigint +AS $$ +BEGIN + ALTER ROLE pathman_regress_hacker SUPERUSER; + SELECT _partition_data_concurrent(relation, NULL::text, NULL::text, p_limit) INTO p_total; +END +$$ LANGUAGE plpgsql; + +CREATE TABLE test1(i INT4 NOT NULL); +INSERT INTO test1 SELECT generate_series(1, 500); +SELECT create_hash_partitions('test1', 'i', 5, false); + +RESET ROLE; +SELECT partition_table_concurrently('test1', 10, 1); +SELECT pg_sleep(1); + +-- Test result (must be 'off') +SET ROLE pathman_regress_hacker; +SHOW is_superuser; + +-- Test 2 +RESET ROLE; +ALTER ROLE pathman_regress_hacker NOSUPERUSER; + +SET ROLE pathman_regress_hacker; +SHOW is_superuser; +CREATE FUNCTION create_single_range_partition(parent_relid TEXT, start_value ANYELEMENT, end_value ANYELEMENT, partition_name TEXT) +RETURNS REGCLASS +AS $$ +BEGIN + ALTER ROLE pathman_regress_hacker SUPERUSER; + RETURN create_single_range_partition(parent_relid, start_value, end_value, partition_name, NULL::text); +END +$$ LANGUAGE plpgsql; + +RESET ROLE; +CREATE TABLE test2(i INT4 NOT NULL); +INSERT INTO test2 VALUES(0); +SELECT create_range_partitions('test2', 'i', 0, 1); +INSERT INTO test2 values(1); + +-- Test result (must be 'off') +SET ROLE pathman_regress_hacker; +SHOW is_superuser; + +-- Cleanup +RESET ROLE; +DROP FUNCTION _partition_data_concurrent(oid,integer); +DROP FUNCTION create_single_range_partition(TEXT,ANYELEMENT,ANYELEMENT,TEXT); +DROP TABLE test1 CASCADE; +DROP TABLE test2 CASCADE; +DROP ROLE pathman_regress_hacker; +DROP EXTENSION pg_pathman; + diff --git a/sql/pathman_array_qual.sql b/sql/pathman_array_qual.sql new file mode 100644 index 00000000..9f1b0c1e --- /dev/null +++ b/sql/pathman_array_qual.sql @@ -0,0 +1,432 @@ +/* + * Since 8edd0e794 (>= 12) Append nodes with single subplan are eliminated, + * causing different output. + */ + +\set VERBOSITY terse + +SET search_path = 'public'; +CREATE EXTENSION pg_pathman; +CREATE SCHEMA array_qual; + + + +CREATE TABLE array_qual.test(val TEXT NOT NULL); +CREATE SEQUENCE array_qual.test_seq; +SELECT add_to_pathman_config('array_qual.test', 'val', NULL); +SELECT add_range_partition('array_qual.test', 'a'::TEXT, 'b'); +SELECT add_range_partition('array_qual.test', 'b'::TEXT, 'c'); +SELECT add_range_partition('array_qual.test', 'c'::TEXT, 'd'); +SELECT add_range_partition('array_qual.test', 'd'::TEXT, 'e'); +INSERT INTO array_qual.test VALUES ('aaaa'); +INSERT INTO array_qual.test VALUES ('bbbb'); +INSERT INTO array_qual.test VALUES ('cccc'); + +ANALYZE; + +/* + * Test expr op ANY (...) + */ + +/* matching collations */ +EXPLAIN (COSTS OFF) SELECT * FROM array_qual.test WHERE val < ANY (array['a', 'b']); +EXPLAIN (COSTS OFF) SELECT * FROM array_qual.test WHERE val < ANY (array['a', 'z']); + +/* different collations */ +EXPLAIN (COSTS OFF) SELECT * FROM array_qual.test WHERE val COLLATE "POSIX" < ANY (array['a', 'b']); +EXPLAIN (COSTS OFF) SELECT * FROM array_qual.test WHERE val < ANY (array['a', 'b' COLLATE "POSIX"]); +EXPLAIN (COSTS OFF) SELECT * FROM array_qual.test WHERE val COLLATE "C" < ANY (array['a', 'b' COLLATE "POSIX"]); + +/* different collations (pruning should work) */ +EXPLAIN (COSTS OFF) SELECT * FROM array_qual.test WHERE val COLLATE "POSIX" = ANY (array['a', 'b']); + +/* non-btree operator */ +EXPLAIN (COSTS OFF) SELECT * FROM array_qual.test WHERE val ~~ ANY (array['a', 'b']); + + + +DROP TABLE array_qual.test CASCADE; + + + +CREATE TABLE array_qual.test(a INT4 NOT NULL, b INT4); +SELECT create_range_partitions('array_qual.test', 'a', 1, 100, 10); +INSERT INTO array_qual.test SELECT i, i FROM generate_series(1, 1000) g(i); + +ANALYZE; + +/* + * Test expr IN (...) + */ + +/* a IN (...) - pruning should work */ +EXPLAIN (COSTS OFF) SELECT * FROM array_qual.test WHERE a IN (1, 2, 3, 4); +EXPLAIN (COSTS OFF) SELECT * FROM array_qual.test WHERE a IN (100, 200, 300, 400); +EXPLAIN (COSTS OFF) SELECT * FROM array_qual.test WHERE a IN (-100, 100); +EXPLAIN (COSTS OFF) SELECT * FROM array_qual.test WHERE a IN (-100, -200, -300); +EXPLAIN (COSTS OFF) SELECT * FROM array_qual.test WHERE a IN (-100, -200, -300, NULL); +EXPLAIN (COSTS OFF) SELECT * FROM array_qual.test WHERE a IN (NULL, NULL, NULL, NULL); + +/* b IN (...) - pruning should not work */ +EXPLAIN (COSTS OFF) SELECT * FROM array_qual.test WHERE b IN (1, 2, 3, 4); +EXPLAIN (COSTS OFF) SELECT * FROM array_qual.test WHERE b IN (100, 200, 300, 400); +EXPLAIN (COSTS OFF) SELECT * FROM array_qual.test WHERE b IN (-100, 100); +EXPLAIN (COSTS OFF) SELECT * FROM array_qual.test WHERE b IN (-100, -200, -300); +EXPLAIN (COSTS OFF) SELECT * FROM array_qual.test WHERE b IN (-100, -200, -300, NULL); +EXPLAIN (COSTS OFF) SELECT * FROM array_qual.test WHERE b IN (NULL, NULL, NULL, NULL); + + +/* + * Test expr = ANY (...) + */ + +/* a = ANY (...) - pruning should work */ +EXPLAIN (COSTS OFF) SELECT * FROM array_qual.test WHERE a = ANY (NULL); +EXPLAIN (COSTS OFF) SELECT * FROM array_qual.test WHERE a = ANY (array[]::int4[]); +EXPLAIN (COSTS OFF) SELECT * FROM array_qual.test WHERE a = ANY (array[100, 100]); +EXPLAIN (COSTS OFF) SELECT * FROM array_qual.test WHERE a = ANY (array[100, 200, 300, 400]); +EXPLAIN (COSTS OFF) SELECT * FROM array_qual.test WHERE a = ANY (array[array[100, 200], array[300, 400]]); +EXPLAIN (COSTS OFF) SELECT * FROM array_qual.test WHERE a = ANY (array[array[100, 200], array[300, 400], array[NULL, NULL]::int4[]]); +EXPLAIN (COSTS OFF) SELECT * FROM array_qual.test WHERE a = ANY (array[array[100, 200], array[300, NULL]]); +EXPLAIN (COSTS OFF) SELECT * FROM array_qual.test WHERE a = ANY (array[NULL, NULL]::int4[]); + + +/* + * Test expr = ALL (...) + */ + +/* a = ALL (...) - pruning should work */ +EXPLAIN (COSTS OFF) SELECT * FROM array_qual.test WHERE a = ALL (NULL); +EXPLAIN (COSTS OFF) SELECT * FROM array_qual.test WHERE a = ALL (array[]::int4[]); +EXPLAIN (COSTS OFF) SELECT * FROM array_qual.test WHERE a = ALL (array[100, 100]); +EXPLAIN (COSTS OFF) SELECT * FROM array_qual.test WHERE a = ALL (array[100, 200, 300, 400]); +EXPLAIN (COSTS OFF) SELECT * FROM array_qual.test WHERE a = ALL (array[array[100, 200], array[300, 400]]); +EXPLAIN (COSTS OFF) SELECT * FROM array_qual.test WHERE a = ALL (array[array[100, 200], array[300, 400], array[NULL, NULL]::int4[]]); +EXPLAIN (COSTS OFF) SELECT * FROM array_qual.test WHERE a = ALL (array[array[100, 200], array[300, NULL]]); +EXPLAIN (COSTS OFF) SELECT * FROM array_qual.test WHERE a = ALL (array[NULL, NULL]::int4[]); + + +/* + * Test expr < ANY (...) + */ + +/* a < ANY (...) - pruning should work */ +EXPLAIN (COSTS OFF) SELECT * FROM array_qual.test WHERE a < ANY (NULL); +EXPLAIN (COSTS OFF) SELECT * FROM array_qual.test WHERE a < ANY (array[]::int4[]); +EXPLAIN (COSTS OFF) SELECT * FROM array_qual.test WHERE a < ANY (array[100, 100]); +EXPLAIN (COSTS OFF) SELECT * FROM array_qual.test WHERE a < ANY (array[99, 100, 101]); +EXPLAIN (COSTS OFF) SELECT * FROM array_qual.test WHERE a < ANY (array[500, 550]); +EXPLAIN (COSTS OFF) SELECT * FROM array_qual.test WHERE a < ANY (array[100, 700]); +EXPLAIN (COSTS OFF) SELECT * FROM array_qual.test WHERE a < ANY (array[NULL, 700]); +EXPLAIN (COSTS OFF) SELECT * FROM array_qual.test WHERE a < ANY (array[NULL, NULL]::int4[]); + +SET pg_pathman.enable = f; +SELECT count(*) FROM array_qual.test WHERE a < ANY (array[NULL, 700]); +SET pg_pathman.enable = t; +SELECT count(*) FROM array_qual.test WHERE a < ANY (array[NULL, 700]); + + +/* + * Test expr < ALL (...) + */ + +/* a < ALL (...) - pruning should work */ +EXPLAIN (COSTS OFF) SELECT * FROM array_qual.test WHERE a < ALL (NULL); +EXPLAIN (COSTS OFF) SELECT * FROM array_qual.test WHERE a < ALL (array[]::int4[]); +EXPLAIN (COSTS OFF) SELECT * FROM array_qual.test WHERE a < ALL (array[100, 100]); +EXPLAIN (COSTS OFF) SELECT * FROM array_qual.test WHERE a < ALL (array[99, 100, 101]); +EXPLAIN (COSTS OFF) SELECT * FROM array_qual.test WHERE a < ALL (array[500, 550]); +EXPLAIN (COSTS OFF) SELECT * FROM array_qual.test WHERE a < ALL (array[100, 700]); +EXPLAIN (COSTS OFF) SELECT * FROM array_qual.test WHERE a < ALL (array[NULL, 700]); +EXPLAIN (COSTS OFF) SELECT * FROM array_qual.test WHERE a < ALL (array[NULL, NULL]::int4[]); + +SET pg_pathman.enable = f; +SELECT count(*) FROM array_qual.test WHERE a < ALL (array[NULL, 700]); +SET pg_pathman.enable = t; +SELECT count(*) FROM array_qual.test WHERE a < ALL (array[NULL, 700]); + + +/* + * Test expr > ANY (...) + */ + +/* a > ANY (...) - pruning should work */ +EXPLAIN (COSTS OFF) SELECT * FROM array_qual.test WHERE a > ANY (NULL); +EXPLAIN (COSTS OFF) SELECT * FROM array_qual.test WHERE a > ANY (array[]::int4[]); +EXPLAIN (COSTS OFF) SELECT * FROM array_qual.test WHERE a > ANY (array[100, 100]); +EXPLAIN (COSTS OFF) SELECT * FROM array_qual.test WHERE a > ANY (array[99, 100, 101]); +EXPLAIN (COSTS OFF) SELECT * FROM array_qual.test WHERE a > ANY (array[500, 550]); +EXPLAIN (COSTS OFF) SELECT * FROM array_qual.test WHERE a > ANY (array[100, 700]); +EXPLAIN (COSTS OFF) SELECT * FROM array_qual.test WHERE a > ANY (array[NULL, 700]); +EXPLAIN (COSTS OFF) SELECT * FROM array_qual.test WHERE a > ANY (array[NULL, NULL]::int4[]); + +SET pg_pathman.enable = f; +SELECT count(*) FROM array_qual.test WHERE a > ANY (array[NULL, 700]); +SET pg_pathman.enable = t; +SELECT count(*) FROM array_qual.test WHERE a > ANY (array[NULL, 700]); + + +/* + * Test expr > ALL (...) + */ + +/* a > ALL (...) - pruning should work */ +EXPLAIN (COSTS OFF) SELECT * FROM array_qual.test WHERE a > ALL (NULL); +EXPLAIN (COSTS OFF) SELECT * FROM array_qual.test WHERE a > ALL (array[]::int4[]); +EXPLAIN (COSTS OFF) SELECT * FROM array_qual.test WHERE a > ALL (array[100, 100]); +EXPLAIN (COSTS OFF) SELECT * FROM array_qual.test WHERE a > ALL (array[99, 100, 101]); +EXPLAIN (COSTS OFF) SELECT * FROM array_qual.test WHERE a > ALL (array[500, 550]); +EXPLAIN (COSTS OFF) SELECT * FROM array_qual.test WHERE a > ALL (array[100, 700]); +EXPLAIN (COSTS OFF) SELECT * FROM array_qual.test WHERE a > ALL (array[NULL, 700]); +EXPLAIN (COSTS OFF) SELECT * FROM array_qual.test WHERE a > ALL (array[NULL, NULL]::int4[]); + +SET pg_pathman.enable = f; +SELECT count(*) FROM array_qual.test WHERE a > ALL (array[NULL, 700]); +SET pg_pathman.enable = t; +SELECT count(*) FROM array_qual.test WHERE a > ALL (array[NULL, 700]); + + +/* + * Test expr > ANY (... $1 ...) + */ + +PREPARE q(int4) AS SELECT * FROM array_qual.test WHERE a > ANY (array[$1, 100, 600]); +EXPLAIN (COSTS OFF) EXECUTE q(1); +EXPLAIN (COSTS OFF) EXECUTE q(1); +EXPLAIN (COSTS OFF) EXECUTE q(1); +EXPLAIN (COSTS OFF) EXECUTE q(1); +EXPLAIN (COSTS OFF) EXECUTE q(1); +EXPLAIN (COSTS OFF) EXECUTE q(1); +EXPLAIN (COSTS OFF) EXECUTE q(1); +DEALLOCATE q; + +PREPARE q(int4) AS SELECT * FROM array_qual.test WHERE a > ANY (array[100, 600, $1]); +EXPLAIN (COSTS OFF) EXECUTE q(1); +EXPLAIN (COSTS OFF) EXECUTE q(1); +EXPLAIN (COSTS OFF) EXECUTE q(1); +EXPLAIN (COSTS OFF) EXECUTE q(1); +EXPLAIN (COSTS OFF) EXECUTE q(1); +EXPLAIN (COSTS OFF) EXECUTE q(1); +EXPLAIN (COSTS OFF) EXECUTE q(1); +DEALLOCATE q; + +PREPARE q(int4) AS SELECT * FROM array_qual.test WHERE a > ANY (array[NULL, $1]); +EXPLAIN (COSTS OFF) EXECUTE q(500); +EXPLAIN (COSTS OFF) EXECUTE q(500); +EXPLAIN (COSTS OFF) EXECUTE q(500); +EXPLAIN (COSTS OFF) EXECUTE q(500); +EXPLAIN (COSTS OFF) EXECUTE q(500); +EXPLAIN (COSTS OFF) EXECUTE q(500); +EXPLAIN (COSTS OFF) EXECUTE q(500); +EXECUTE q(NULL); +DEALLOCATE q; + + +/* + * Test expr > ALL (... $1 ...) + */ + +PREPARE q(int4) AS SELECT * FROM array_qual.test WHERE a > ALL (array[$1, 1000000, 600]); +EXPLAIN (COSTS OFF) EXECUTE q(1); +EXPLAIN (COSTS OFF) EXECUTE q(1); +EXPLAIN (COSTS OFF) EXECUTE q(1); +EXPLAIN (COSTS OFF) EXECUTE q(1); +EXPLAIN (COSTS OFF) EXECUTE q(1); +EXPLAIN (COSTS OFF) EXECUTE q(1); +EXPLAIN (COSTS OFF) EXECUTE q(1); +DEALLOCATE q; + +PREPARE q(int4) AS SELECT * FROM array_qual.test WHERE a > ALL (array[$1, NULL, 600]); +EXPLAIN (COSTS OFF) EXECUTE q(1); +EXPLAIN (COSTS OFF) EXECUTE q(1); +EXPLAIN (COSTS OFF) EXECUTE q(1); +EXPLAIN (COSTS OFF) EXECUTE q(1); +EXPLAIN (COSTS OFF) EXECUTE q(1); +EXPLAIN (COSTS OFF) EXECUTE q(1); +EXPLAIN (COSTS OFF) EXECUTE q(1); +DEALLOCATE q; + +PREPARE q(int4) AS SELECT * FROM array_qual.test WHERE a > ALL (array[NULL, $1, NULL]); +EXPLAIN (COSTS OFF) EXECUTE q(500); +EXPLAIN (COSTS OFF) EXECUTE q(500); +EXPLAIN (COSTS OFF) EXECUTE q(500); +EXPLAIN (COSTS OFF) EXECUTE q(500); +EXPLAIN (COSTS OFF) EXECUTE q(500); +EXPLAIN (COSTS OFF) EXECUTE q(500); +EXPLAIN (COSTS OFF) EXECUTE q(500); +EXECUTE q(NULL); +DEALLOCATE q; + +PREPARE q(int4) AS SELECT * FROM array_qual.test WHERE a > ALL (array[$1, 100, 600]); +EXPLAIN (COSTS OFF) EXECUTE q(1); +EXPLAIN (COSTS OFF) EXECUTE q(1); +EXPLAIN (COSTS OFF) EXECUTE q(1); +EXPLAIN (COSTS OFF) EXECUTE q(1); +EXPLAIN (COSTS OFF) EXECUTE q(1); +EXPLAIN (COSTS OFF) EXECUTE q(1); +EXPLAIN (COSTS OFF) EXECUTE q(1); +DEALLOCATE q; + +PREPARE q(int4) AS SELECT * FROM array_qual.test WHERE a > ALL (array[100, $1, 600]); +EXPLAIN (COSTS OFF) EXECUTE q(1); +EXPLAIN (COSTS OFF) EXECUTE q(1); +EXPLAIN (COSTS OFF) EXECUTE q(1); +EXPLAIN (COSTS OFF) EXECUTE q(1); +EXPLAIN (COSTS OFF) EXECUTE q(1); +EXPLAIN (COSTS OFF) EXECUTE q(1); +EXPLAIN (COSTS OFF) EXECUTE q(1); +DEALLOCATE q; + +PREPARE q(int4) AS SELECT * FROM array_qual.test WHERE a > ALL (array[100, 600, $1]); +EXPLAIN (COSTS OFF) EXECUTE q(1); +EXPLAIN (COSTS OFF) EXECUTE q(1); +EXPLAIN (COSTS OFF) EXECUTE q(1); +EXPLAIN (COSTS OFF) EXECUTE q(1); +EXPLAIN (COSTS OFF) EXECUTE q(1); +EXPLAIN (COSTS OFF) EXECUTE q(1); +EXPLAIN (COSTS OFF) EXECUTE q(1); +DEALLOCATE q; + +PREPARE q(int4) AS SELECT * FROM array_qual.test WHERE a > ALL (array[array[100, NULL], array[1, $1]]); +EXPLAIN (COSTS OFF) EXECUTE q(1); +EXPLAIN (COSTS OFF) EXECUTE q(1); +EXPLAIN (COSTS OFF) EXECUTE q(1); +EXPLAIN (COSTS OFF) EXECUTE q(1); +EXPLAIN (COSTS OFF) EXECUTE q(1); +EXPLAIN (COSTS OFF) EXECUTE q(1); +EXPLAIN (COSTS OFF) EXECUTE q(1); +DEALLOCATE q; + +PREPARE q(int4) AS SELECT * FROM array_qual.test WHERE a > ALL (array[array[100, 600], array[1, $1]]); +EXPLAIN (COSTS OFF) EXECUTE q(1); +EXPLAIN (COSTS OFF) EXECUTE q(1); +EXPLAIN (COSTS OFF) EXECUTE q(1); +EXPLAIN (COSTS OFF) EXECUTE q(1); +EXPLAIN (COSTS OFF) EXECUTE q(1); +EXPLAIN (COSTS OFF) EXECUTE q(1); +EXPLAIN (COSTS OFF) EXECUTE q(1); +EXPLAIN (COSTS OFF) EXECUTE q(999); +/* check query plan: EXECUTE q(999) */ +DO language plpgsql +$$ + DECLARE + query text; + result jsonb; + num int; + + BEGIN + query := 'EXECUTE q(999)'; + + EXECUTE format('EXPLAIN (ANALYZE, COSTS OFF, TIMING OFF, FORMAT JSON) %s', query) + INTO result; + + SELECT count(*) FROM jsonb_array_elements_text(result->0->'Plan'->'Plans') INTO num; + + RAISE notice '%: number of partitions: %', query, num; + END +$$; +DEALLOCATE q; + +PREPARE q(int4[]) AS SELECT * FROM array_qual.test WHERE a > ALL (array[array[100, 600], $1]); +EXPLAIN (COSTS OFF) EXECUTE q('{1, 1}'); +EXPLAIN (COSTS OFF) EXECUTE q('{1, 1}'); +EXPLAIN (COSTS OFF) EXECUTE q('{1, 1}'); +EXPLAIN (COSTS OFF) EXECUTE q('{1, 1}'); +EXPLAIN (COSTS OFF) EXECUTE q('{1, 1}'); +EXPLAIN (COSTS OFF) EXECUTE q('{1, 1}'); +EXPLAIN (COSTS OFF) EXECUTE q('{1, 1}'); +EXPLAIN (COSTS OFF) EXECUTE q('{1, 999}'); +/* check query plan: EXECUTE q('{1, 999}') */ +DO language plpgsql +$$ + DECLARE + query text; + result jsonb; + num int; + + BEGIN + query := 'EXECUTE q(''{1, 999}'')'; + + EXECUTE format('EXPLAIN (ANALYZE, COSTS OFF, TIMING OFF, FORMAT JSON) %s', query) + INTO result; + + SELECT count(*) FROM jsonb_array_elements_text(result->0->'Plan'->'Plans') INTO num; + + RAISE notice '%: number of partitions: %', query, num; + END +$$; +DEALLOCATE q; + +PREPARE q(int4) AS SELECT * FROM array_qual.test WHERE a > ALL (array[$1, 898]); +EXPLAIN (COSTS OFF) EXECUTE q(1); +EXPLAIN (COSTS OFF) EXECUTE q(1); +EXPLAIN (COSTS OFF) EXECUTE q(1); +EXPLAIN (COSTS OFF) EXECUTE q(1); +EXPLAIN (COSTS OFF) EXECUTE q(1); +EXPLAIN (COSTS OFF) EXECUTE q(1); +EXPLAIN (COSTS OFF) EXECUTE q(1); +EXPLAIN (COSTS OFF) EXECUTE q(900); /* check quals optimization */ +EXECUTE q(1000); +/* check query plan: EXECUTE q(999) */ +DO language plpgsql +$$ + DECLARE + query text; + result jsonb; + num int; + + BEGIN + query := 'EXECUTE q(999)'; + + EXECUTE format('EXPLAIN (ANALYZE, COSTS OFF, TIMING OFF, FORMAT JSON) %s', query) + INTO result; + + SELECT count(*) FROM jsonb_array_elements_text(result->0->'Plan'->'Plans') INTO num; + + RAISE notice '%: number of partitions: %', query, num; + END +$$; +DEALLOCATE q; + + +/* + * Test expr = ALL (... $1 ...) + */ + +PREPARE q(int4) AS SELECT * FROM array_qual.test WHERE a = ALL (array[$1, 100, 600]); +EXPLAIN (COSTS OFF) EXECUTE q(1); +EXPLAIN (COSTS OFF) EXECUTE q(1); +EXPLAIN (COSTS OFF) EXECUTE q(1); +EXPLAIN (COSTS OFF) EXECUTE q(1); +EXPLAIN (COSTS OFF) EXECUTE q(1); +EXPLAIN (COSTS OFF) EXECUTE q(1); +EXPLAIN (COSTS OFF) EXECUTE q(1); +DEALLOCATE q; + +PREPARE q(int4) AS SELECT * FROM array_qual.test WHERE a = ALL (array[100, 600, $1]); +EXPLAIN (COSTS OFF) EXECUTE q(1); +EXPLAIN (COSTS OFF) EXECUTE q(1); +EXPLAIN (COSTS OFF) EXECUTE q(1); +EXPLAIN (COSTS OFF) EXECUTE q(1); +EXPLAIN (COSTS OFF) EXECUTE q(1); +EXPLAIN (COSTS OFF) EXECUTE q(1); +EXPLAIN (COSTS OFF) EXECUTE q(1); +DEALLOCATE q; + +PREPARE q(int4) AS SELECT * FROM array_qual.test WHERE a = ALL (array[100, $1]); +EXPLAIN (COSTS OFF) EXECUTE q(1); +EXPLAIN (COSTS OFF) EXECUTE q(1); +EXPLAIN (COSTS OFF) EXECUTE q(1); +EXPLAIN (COSTS OFF) EXECUTE q(1); +EXPLAIN (COSTS OFF) EXECUTE q(1); +EXPLAIN (COSTS OFF) EXECUTE q(1); +EXPLAIN (COSTS OFF) EXECUTE q(1); +EXECUTE q(1); +EXECUTE q(100); +DEALLOCATE q; + + + +DROP TABLE array_qual.test CASCADE; +DROP SCHEMA array_qual; +DROP EXTENSION pg_pathman; diff --git a/sql/pathman_basic.sql b/sql/pathman_basic.sql new file mode 100644 index 00000000..478935c5 --- /dev/null +++ b/sql/pathman_basic.sql @@ -0,0 +1,579 @@ +/* + * Since 8edd0e794 (>= 12) Append nodes with single subplan are eliminated, + * causing different output. Also, EXPLAIN now always shows key first in quals + * ('test commutator' queries). + * + * Since 55a1954da16 and 6ef77cf46e8 (>= 13) output of EXPLAIN was changed, + * now it includes aliases for inherited tables. + */ + +\set VERBOSITY terse + +SET search_path = 'public'; +CREATE SCHEMA pathman; +CREATE EXTENSION pg_pathman SCHEMA pathman; +CREATE SCHEMA test; + +CREATE TABLE test.hash_rel ( + id SERIAL PRIMARY KEY, + value INTEGER); +INSERT INTO test.hash_rel VALUES (1, 1); +INSERT INTO test.hash_rel VALUES (2, 2); +INSERT INTO test.hash_rel VALUES (3, 3); + +\set VERBOSITY default +SELECT pathman.create_hash_partitions('test.hash_rel', 'value', 3); +\set VERBOSITY terse + +ALTER TABLE test.hash_rel ALTER COLUMN value SET NOT NULL; + +SELECT pathman.create_hash_partitions('test.hash_rel', 'value', 3, partition_data:=false); +EXPLAIN (COSTS OFF) SELECT * FROM test.hash_rel; +SELECT * FROM test.hash_rel; +SELECT pathman.set_enable_parent('test.hash_rel', false); +EXPLAIN (COSTS OFF) SELECT * FROM test.hash_rel; +SELECT * FROM test.hash_rel; +SELECT pathman.set_enable_parent('test.hash_rel', true); +EXPLAIN (COSTS OFF) SELECT * FROM test.hash_rel; +SELECT * FROM test.hash_rel; +SELECT pathman.drop_partitions('test.hash_rel'); +SELECT pathman.create_hash_partitions('test.hash_rel', 'Value', 3); +SELECT COUNT(*) FROM test.hash_rel; +SELECT COUNT(*) FROM ONLY test.hash_rel; +INSERT INTO test.hash_rel VALUES (4, 4); +INSERT INTO test.hash_rel VALUES (5, 5); +INSERT INTO test.hash_rel VALUES (6, 6); +SELECT COUNT(*) FROM test.hash_rel; +SELECT COUNT(*) FROM ONLY test.hash_rel; + +CREATE TABLE test.range_rel ( + id SERIAL PRIMARY KEY, + dt TIMESTAMP, + txt TEXT); +CREATE INDEX ON test.range_rel (dt); +INSERT INTO test.range_rel (dt, txt) +SELECT g, md5(g::TEXT) FROM generate_series('2015-01-01', '2015-04-30', '1 day'::interval) as g; + +\set VERBOSITY default +SELECT pathman.create_range_partitions('test.range_rel', 'dt', '2015-01-01'::DATE, '1 month'::INTERVAL); +\set VERBOSITY terse + +ALTER TABLE test.range_rel ALTER COLUMN dt SET NOT NULL; + +SELECT pathman.create_range_partitions('test.range_rel', 'dt', '2015-01-01'::DATE, '1 month'::INTERVAL, 2); +SELECT pathman.create_range_partitions('test.range_rel', 'DT', '2015-01-01'::DATE, '1 month'::INTERVAL); +SELECT COUNT(*) FROM test.range_rel; +SELECT COUNT(*) FROM ONLY test.range_rel; + +CREATE TABLE test.num_range_rel ( + id SERIAL PRIMARY KEY, + txt TEXT); +SELECT pathman.create_range_partitions('test.num_range_rel', 'id', 0, 1000, 4); +SELECT COUNT(*) FROM test.num_range_rel; +SELECT COUNT(*) FROM ONLY test.num_range_rel; +INSERT INTO test.num_range_rel + SELECT g, md5(g::TEXT) FROM generate_series(1, 3000) as g; +SELECT COUNT(*) FROM test.num_range_rel; +SELECT COUNT(*) FROM ONLY test.num_range_rel; + + +/* since rel_1_2_beta: check append_child_relation(), make_ands_explicit(), dummy path */ +CREATE TABLE test.improved_dummy (id BIGSERIAL, name TEXT NOT NULL); +INSERT INTO test.improved_dummy (name) SELECT md5(g::TEXT) FROM generate_series(1, 100) as g; +SELECT pathman.create_range_partitions('test.improved_dummy', 'id', 1, 10); +INSERT INTO test.improved_dummy (name) VALUES ('test'); /* spawns new partition */ + +EXPLAIN (COSTS OFF) SELECT * FROM test.improved_dummy WHERE id = 101 OR id = 5 AND name = 'ib'; +SELECT pathman.set_enable_parent('test.improved_dummy', true); /* enable parent */ +EXPLAIN (COSTS OFF) SELECT * FROM test.improved_dummy WHERE id = 101 OR id = 5 AND name = 'ib'; +SELECT pathman.set_enable_parent('test.improved_dummy', false); /* disable parent */ + +ALTER TABLE test.improved_dummy_1 ADD CHECK (name != 'ib'); /* make test.improved_dummy_1 disappear */ + +EXPLAIN (COSTS OFF) SELECT * FROM test.improved_dummy WHERE id = 101 OR id = 5 AND name = 'ib'; +SELECT pathman.set_enable_parent('test.improved_dummy', true); /* enable parent */ +EXPLAIN (COSTS OFF) SELECT * FROM test.improved_dummy WHERE id = 101 OR id = 5 AND name = 'ib'; + +DROP TABLE test.improved_dummy CASCADE; + + +/* since rel_1_4_beta: check create_range_partitions(bounds array) */ +CREATE TABLE test.improved_dummy (val INT NOT NULL); + +SELECT pathman.create_range_partitions('test.improved_dummy', 'val', + pathman.generate_range_bounds(1, 1, 2)); + +SELECT * FROM pathman.pathman_partition_list +WHERE parent = 'test.improved_dummy'::REGCLASS +ORDER BY partition; + +SELECT pathman.drop_partitions('test.improved_dummy'); + +SELECT pathman.create_range_partitions('test.improved_dummy', 'val', + pathman.generate_range_bounds(1, 1, 2), + partition_names := '{p1, p2}'); + +SELECT * FROM pathman.pathman_partition_list +WHERE parent = 'test.improved_dummy'::REGCLASS +ORDER BY partition; + +SELECT pathman.drop_partitions('test.improved_dummy'); + +SELECT pathman.create_range_partitions('test.improved_dummy', 'val', + pathman.generate_range_bounds(1, 1, 2), + partition_names := '{p1, p2}', + tablespaces := '{pg_default, pg_default}'); + +SELECT * FROM pathman.pathman_partition_list +WHERE parent = 'test.improved_dummy'::REGCLASS +ORDER BY partition; + +DROP TABLE test.improved_dummy CASCADE; + + +/* Test pathman_rel_pathlist_hook() with INSERT query */ +CREATE TABLE test.insert_into_select(val int NOT NULL); +INSERT INTO test.insert_into_select SELECT generate_series(1, 100); +SELECT pathman.create_range_partitions('test.insert_into_select', 'val', 1, 20); +CREATE TABLE test.insert_into_select_copy (LIKE test.insert_into_select); /* INSERT INTO ... SELECT ... */ + +EXPLAIN (COSTS OFF) +INSERT INTO test.insert_into_select_copy +SELECT * FROM test.insert_into_select +WHERE val <= 80; + +SELECT pathman.set_enable_parent('test.insert_into_select', true); + +EXPLAIN (COSTS OFF) +INSERT INTO test.insert_into_select_copy +SELECT * FROM test.insert_into_select +WHERE val <= 80; + +INSERT INTO test.insert_into_select_copy SELECT * FROM test.insert_into_select; +SELECT count(*) FROM test.insert_into_select_copy; +DROP TABLE test.insert_into_select_copy, test.insert_into_select CASCADE; + + +SET pg_pathman.enable_runtimeappend = OFF; +SET pg_pathman.enable_runtimemergeappend = OFF; + +VACUUM; + +SET enable_indexscan = OFF; +SET enable_bitmapscan = OFF; +SET enable_seqscan = ON; + +EXPLAIN (COSTS OFF) SELECT * FROM test.hash_rel; +EXPLAIN (COSTS OFF) SELECT * FROM test.hash_rel WHERE false; +EXPLAIN (COSTS OFF) SELECT * FROM test.hash_rel WHERE value = NULL; +EXPLAIN (COSTS OFF) SELECT * FROM test.hash_rel WHERE value = 2; +EXPLAIN (COSTS OFF) SELECT * FROM test.hash_rel WHERE 2 = value; /* test commutator */ +EXPLAIN (COSTS OFF) SELECT * FROM test.hash_rel WHERE value = 2 OR value = 1; + +EXPLAIN (COSTS OFF) SELECT * FROM test.num_range_rel WHERE 2500 = id; /* test commutator */ +EXPLAIN (COSTS OFF) SELECT * FROM test.num_range_rel WHERE 2500 < id; /* test commutator */ +EXPLAIN (COSTS OFF) SELECT * FROM test.num_range_rel WHERE id > 2500; +EXPLAIN (COSTS OFF) SELECT * FROM test.num_range_rel WHERE id >= 1000 AND id < 3000; +EXPLAIN (COSTS OFF) SELECT * FROM test.num_range_rel WHERE id >= 1500 AND id < 2500; +EXPLAIN (COSTS OFF) SELECT * FROM test.num_range_rel WHERE (id >= 500 AND id < 1500) OR (id > 2500); + +EXPLAIN (COSTS OFF) SELECT * FROM test.range_rel WHERE dt > '2015-02-15'; +EXPLAIN (COSTS OFF) SELECT * FROM test.range_rel WHERE '2015-02-15' < dt; /* test commutator */ +EXPLAIN (COSTS OFF) SELECT * FROM test.range_rel WHERE dt >= '2015-02-01' AND dt < '2015-03-01'; +EXPLAIN (COSTS OFF) SELECT * FROM test.range_rel WHERE dt >= '2015-02-15' AND dt < '2015-03-15'; +EXPLAIN (COSTS OFF) SELECT * FROM test.range_rel WHERE (dt >= '2015-01-15' AND dt < '2015-02-15') OR (dt > '2015-03-15'); + + +SET enable_indexscan = ON; +SET enable_bitmapscan = OFF; +SET enable_seqscan = OFF; + +EXPLAIN (COSTS OFF) SELECT * FROM test.hash_rel; +EXPLAIN (COSTS OFF) SELECT * FROM test.hash_rel WHERE false; +EXPLAIN (COSTS OFF) SELECT * FROM test.hash_rel WHERE value = NULL; +EXPLAIN (COSTS OFF) SELECT * FROM test.hash_rel WHERE value = 2; +EXPLAIN (COSTS OFF) SELECT * FROM test.hash_rel WHERE 2 = value; /* test commutator */ +EXPLAIN (COSTS OFF) SELECT * FROM test.hash_rel WHERE value = 2 OR value = 1; + +EXPLAIN (COSTS OFF) SELECT * FROM test.num_range_rel WHERE 2500 = id; /* test commutator */ +EXPLAIN (COSTS OFF) SELECT * FROM test.num_range_rel WHERE 2500 < id; /* test commutator */ +EXPLAIN (COSTS OFF) SELECT * FROM test.num_range_rel WHERE id > 2500; +EXPLAIN (COSTS OFF) SELECT * FROM test.num_range_rel WHERE id >= 1000 AND id < 3000; +EXPLAIN (COSTS OFF) SELECT * FROM test.num_range_rel WHERE id >= 1500 AND id < 2500; +EXPLAIN (COSTS OFF) SELECT * FROM test.num_range_rel WHERE (id >= 500 AND id < 1500) OR (id > 2500); +EXPLAIN (COSTS OFF) SELECT * FROM test.num_range_rel ORDER BY id; +EXPLAIN (COSTS OFF) SELECT * FROM test.num_range_rel WHERE id <= 2500 ORDER BY id; + +EXPLAIN (COSTS OFF) SELECT * FROM test.range_rel WHERE dt > '2015-02-15'; +EXPLAIN (COSTS OFF) SELECT * FROM test.range_rel WHERE '2015-02-15' < dt; /* test commutator */ +EXPLAIN (COSTS OFF) SELECT * FROM test.range_rel WHERE dt >= '2015-02-01' AND dt < '2015-03-01'; +EXPLAIN (COSTS OFF) SELECT * FROM test.range_rel WHERE dt >= '2015-02-15' AND dt < '2015-03-15'; +EXPLAIN (COSTS OFF) SELECT * FROM test.range_rel WHERE (dt >= '2015-01-15' AND dt < '2015-02-15') OR (dt > '2015-03-15'); +EXPLAIN (COSTS OFF) SELECT * FROM test.range_rel ORDER BY dt; +EXPLAIN (COSTS OFF) SELECT * FROM test.range_rel WHERE dt >= '2015-01-15' ORDER BY dt DESC; + +/* + * Sorting + */ +SET enable_indexscan = OFF; +SET enable_seqscan = ON; +EXPLAIN (COSTS OFF) SELECT * FROM test.range_rel WHERE dt < '2015-03-01' ORDER BY dt; +EXPLAIN (COSTS OFF) SELECT * FROM test.range_rel_1 UNION ALL SELECT * FROM test.range_rel_2 ORDER BY dt; +SET enable_indexscan = ON; +SET enable_seqscan = OFF; +EXPLAIN (COSTS OFF) SELECT * FROM test.range_rel WHERE dt < '2015-03-01' ORDER BY dt; +EXPLAIN (COSTS OFF) SELECT * FROM test.range_rel_1 UNION ALL SELECT * FROM test.range_rel_2 ORDER BY dt; + +/* + * Test inlined SQL functions + */ +CREATE TABLE test.sql_inline (id INT NOT NULL); +SELECT pathman.create_hash_partitions('test.sql_inline', 'id', 3); + +CREATE OR REPLACE FUNCTION test.sql_inline_func(i_id int) RETURNS SETOF INT AS $$ + select * from test.sql_inline where id = i_id limit 1; +$$ LANGUAGE sql STABLE; + +EXPLAIN (COSTS OFF) SELECT * FROM test.sql_inline_func(5); +EXPLAIN (COSTS OFF) SELECT * FROM test.sql_inline_func(1); + +DROP FUNCTION test.sql_inline_func(int); +DROP TABLE test.sql_inline CASCADE; + +/* + * Test by @baiyinqiqi (issue #60) + */ +CREATE TABLE test.hash_varchar(val VARCHAR(40) NOT NULL); +INSERT INTO test.hash_varchar SELECT generate_series(1, 20); + +SELECT pathman.create_hash_partitions('test.hash_varchar', 'val', 4); +SELECT * FROM test.hash_varchar WHERE val = 'a'; +SELECT * FROM test.hash_varchar WHERE val = '12'::TEXT; + +DROP TABLE test.hash_varchar CASCADE; + + +/* + * Test split and merge + */ + +/* Split first partition in half */ +SELECT pathman.split_range_partition('test.num_range_rel_1', 500); +EXPLAIN (COSTS OFF) SELECT * FROM test.num_range_rel WHERE id BETWEEN 100 AND 700; +SELECT tableoid::regclass, id FROM test.num_range_rel WHERE id IN (499, 500, 501) ORDER BY id; + +SELECT pathman.split_range_partition('test.range_rel_1', '2015-01-15'::DATE); + +/* Merge two partitions into one */ +SELECT pathman.merge_range_partitions('test.num_range_rel_1', 'test.num_range_rel_' || currval('test.num_range_rel_seq')); +EXPLAIN (COSTS OFF) SELECT * FROM test.num_range_rel WHERE id BETWEEN 100 AND 700; + +SELECT pathman.merge_range_partitions('test.range_rel_1', 'test.range_rel_' || currval('test.range_rel_seq')); + +/* Append and prepend partitions */ +SELECT pathman.append_range_partition('test.num_range_rel'); +EXPLAIN (COSTS OFF) SELECT * FROM test.num_range_rel WHERE id >= 4000; +SELECT pathman.prepend_range_partition('test.num_range_rel'); +EXPLAIN (COSTS OFF) SELECT * FROM test.num_range_rel WHERE id < 0; +SELECT pathman.drop_range_partition('test.num_range_rel_7'); + +SELECT pathman.drop_range_partition_expand_next('test.num_range_rel_4'); +SELECT * FROM pathman.pathman_partition_list WHERE parent = 'test.num_range_rel'::regclass; +SELECT pathman.drop_range_partition_expand_next('test.num_range_rel_6'); +SELECT * FROM pathman.pathman_partition_list WHERE parent = 'test.num_range_rel'::regclass; + +SELECT pathman.append_range_partition('test.range_rel'); +SELECT pathman.prepend_range_partition('test.range_rel'); +EXPLAIN (COSTS OFF) SELECT * FROM test.range_rel WHERE dt BETWEEN '2014-12-15' AND '2015-01-15'; +SELECT pathman.drop_range_partition('test.range_rel_7'); +EXPLAIN (COSTS OFF) SELECT * FROM test.range_rel WHERE dt BETWEEN '2014-12-15' AND '2015-01-15'; +SELECT pathman.add_range_partition('test.range_rel', '2014-12-01'::DATE, '2015-01-02'::DATE); +SELECT pathman.add_range_partition('test.range_rel', '2014-12-01'::DATE, '2015-01-01'::DATE); +EXPLAIN (COSTS OFF) SELECT * FROM test.range_rel WHERE dt BETWEEN '2014-12-15' AND '2015-01-15'; +CREATE TABLE test.range_rel_archive (LIKE test.range_rel INCLUDING ALL); +SELECT pathman.attach_range_partition('test.range_rel', 'test.range_rel_archive', '2014-01-01'::DATE, '2015-01-01'::DATE); +SELECT pathman.attach_range_partition('test.range_rel', 'test.range_rel_archive', '2014-01-01'::DATE, '2014-12-01'::DATE); +EXPLAIN (COSTS OFF) SELECT * FROM test.range_rel WHERE dt BETWEEN '2014-11-15' AND '2015-01-15'; +SELECT pathman.detach_range_partition('test.range_rel_archive'); +EXPLAIN (COSTS OFF) SELECT * FROM test.range_rel WHERE dt BETWEEN '2014-11-15' AND '2015-01-15'; +CREATE TABLE test.range_rel_test1 ( + id SERIAL PRIMARY KEY, + dt TIMESTAMP, + txt TEXT, + abc INTEGER); +SELECT pathman.attach_range_partition('test.range_rel', 'test.range_rel_test1', '2013-01-01'::DATE, '2014-01-01'::DATE); +CREATE TABLE test.range_rel_test2 ( + id SERIAL PRIMARY KEY, + dt TIMESTAMP); +SELECT pathman.attach_range_partition('test.range_rel', 'test.range_rel_test2', '2013-01-01'::DATE, '2014-01-01'::DATE); + +/* Half open ranges */ +SELECT pathman.add_range_partition('test.range_rel', NULL, '2014-12-01'::DATE, 'test.range_rel_minus_infinity'); +SELECT pathman.add_range_partition('test.range_rel', '2015-06-01'::DATE, NULL, 'test.range_rel_plus_infinity'); +SELECT pathman.append_range_partition('test.range_rel'); +SELECT pathman.prepend_range_partition('test.range_rel'); +DROP TABLE test.range_rel_minus_infinity; +CREATE TABLE test.range_rel_minus_infinity (LIKE test.range_rel INCLUDING ALL); +SELECT pathman.attach_range_partition('test.range_rel', 'test.range_rel_minus_infinity', NULL, '2014-12-01'::DATE); +SELECT * FROM pathman.pathman_partition_list WHERE parent = 'test.range_rel'::REGCLASS; +INSERT INTO test.range_rel (dt) VALUES ('2012-06-15'); +INSERT INTO test.range_rel (dt) VALUES ('2015-12-15'); +EXPLAIN (COSTS OFF) SELECT * FROM test.range_rel WHERE dt < '2015-01-01'; +EXPLAIN (COSTS OFF) SELECT * FROM test.range_rel WHERE dt >= '2015-05-01'; + +/* + * Zero partitions count and adding partitions with specified name + */ +CREATE TABLE test.zero( + id SERIAL PRIMARY KEY, + value INT NOT NULL); +INSERT INTO test.zero SELECT g, g FROM generate_series(1, 100) as g; +SELECT pathman.create_range_partitions('test.zero', 'value', 50, 10, 0); +SELECT pathman.append_range_partition('test.zero', 'test.zero_0'); +SELECT pathman.prepend_range_partition('test.zero', 'test.zero_1'); +SELECT pathman.add_range_partition('test.zero', 50, 70, 'test.zero_50'); +SELECT pathman.append_range_partition('test.zero', 'test.zero_appended'); +SELECT pathman.prepend_range_partition('test.zero', 'test.zero_prepended'); +SELECT pathman.split_range_partition('test.zero_50', 60, 'test.zero_60'); +DROP TABLE test.zero CASCADE; + +/* + * Check that altering table columns doesn't break trigger + */ +ALTER TABLE test.hash_rel ADD COLUMN abc int; +INSERT INTO test.hash_rel (id, value, abc) VALUES (123, 456, 789); +SELECT * FROM test.hash_rel WHERE id = 123; + +/* Test replacing hash partition */ +CREATE TABLE test.hash_rel_extern (LIKE test.hash_rel INCLUDING ALL); +SELECT pathman.replace_hash_partition('test.hash_rel_0', 'test.hash_rel_extern'); + +/* Check the consistency of test.hash_rel_0 and test.hash_rel_extern relations */ +EXPLAIN(COSTS OFF) SELECT * FROM test.hash_rel; +SELECT parent, partition, parttype +FROM pathman.pathman_partition_list +WHERE parent='test.hash_rel'::regclass +ORDER BY 2; +SELECT c.oid::regclass::text, + array_agg(pg_get_indexdef(i.indexrelid)) AS indexes, + array_agg(pg_get_triggerdef(t.oid)) AS triggers +FROM pg_class c + LEFT JOIN pg_index i ON c.oid=i.indrelid + LEFT JOIN pg_trigger t ON c.oid=t.tgrelid +WHERE c.oid IN ('test.hash_rel_0'::regclass, 'test.hash_rel_extern'::regclass) +GROUP BY 1 ORDER BY 1; +SELECT pathman.is_tuple_convertible('test.hash_rel_0', 'test.hash_rel_extern'); + +INSERT INTO test.hash_rel SELECT * FROM test.hash_rel_0; +DROP TABLE test.hash_rel_0; +/* Table with which we are replacing partition must have exact same structure */ +CREATE TABLE test.hash_rel_wrong( + id INTEGER NOT NULL, + value INTEGER); +SELECT pathman.replace_hash_partition('test.hash_rel_1', 'test.hash_rel_wrong'); +EXPLAIN (COSTS OFF) SELECT * FROM test.hash_rel; + +/* + * Clean up + */ +SELECT pathman.drop_partitions('test.hash_rel'); +SELECT COUNT(*) FROM ONLY test.hash_rel; +SELECT pathman.create_hash_partitions('test.hash_rel', 'value', 3); +SELECT pathman.drop_partitions('test.hash_rel', TRUE); +SELECT COUNT(*) FROM ONLY test.hash_rel; +DROP TABLE test.hash_rel CASCADE; + +SELECT pathman.drop_partitions('test.num_range_rel'); +DROP TABLE test.num_range_rel CASCADE; + +DROP TABLE test.range_rel CASCADE; + +/* Test attributes copying */ +CREATE TABLE test.range_rel ( + id SERIAL PRIMARY KEY, + dt DATE NOT NULL) +WITH (fillfactor = 70); +INSERT INTO test.range_rel (dt) + SELECT g FROM generate_series('2015-01-01', '2015-02-15', '1 month'::interval) AS g; +SELECT pathman.create_range_partitions('test.range_rel', 'dt', + '2015-01-01'::date, '1 month'::interval); +SELECT reloptions, relpersistence FROM pg_class WHERE oid='test.range_rel'::REGCLASS; +SELECT reloptions, relpersistence FROM pg_class WHERE oid='test.range_rel_1'::REGCLASS; +DROP TABLE test.range_rel CASCADE; + +/* Test automatic partition creation */ +CREATE TABLE test.range_rel ( + id SERIAL PRIMARY KEY, + dt TIMESTAMP NOT NULL, + data TEXT); +SELECT pathman.create_range_partitions('test.range_rel', 'dt', '2015-01-01'::DATE, '10 days'::INTERVAL, 1); +INSERT INTO test.range_rel (dt) +SELECT generate_series('2015-01-01', '2015-04-30', '1 day'::interval); + +INSERT INTO test.range_rel (dt) +SELECT generate_series('2014-12-31', '2014-12-01', '-1 day'::interval); + +EXPLAIN (COSTS OFF) SELECT * FROM test.range_rel WHERE dt = '2014-12-15'; +SELECT * FROM test.range_rel WHERE dt = '2014-12-15'; +EXPLAIN (COSTS OFF) SELECT * FROM test.range_rel WHERE dt = '2015-03-15'; +SELECT * FROM test.range_rel WHERE dt = '2015-03-15'; + +SELECT pathman.set_auto('test.range_rel', false); +INSERT INTO test.range_rel (dt) VALUES ('2015-06-01'); +SELECT pathman.set_auto('test.range_rel', true); +INSERT INTO test.range_rel (dt) VALUES ('2015-06-01'); + +/* + * Test auto removing record from config on table DROP (but not on column drop + * as it used to be before version 1.2) + */ +ALTER TABLE test.range_rel DROP COLUMN data; +SELECT * FROM pathman.pathman_config; +DROP TABLE test.range_rel CASCADE; +SELECT * FROM pathman.pathman_config; + +/* Check overlaps */ +CREATE TABLE test.num_range_rel ( + id SERIAL PRIMARY KEY, + txt TEXT); +SELECT pathman.create_range_partitions('test.num_range_rel', 'id', 1000, 1000, 4); +SELECT pathman.check_range_available('test.num_range_rel'::regclass, 4001, 5000); +SELECT pathman.check_range_available('test.num_range_rel'::regclass, 4000, 5000); +SELECT pathman.check_range_available('test.num_range_rel'::regclass, 3999, 5000); +SELECT pathman.check_range_available('test.num_range_rel'::regclass, 3000, 3500); +SELECT pathman.check_range_available('test.num_range_rel'::regclass, 0, 999); +SELECT pathman.check_range_available('test.num_range_rel'::regclass, 0, 1000); +SELECT pathman.check_range_available('test.num_range_rel'::regclass, 0, 1001); + +/* CaMeL cAsE table names and attributes */ +CREATE TABLE test."TeSt" (a INT NOT NULL, b INT); +SELECT pathman.create_hash_partitions('test.TeSt', 'a', 3); +SELECT pathman.create_hash_partitions('test."TeSt"', 'a', 3); +INSERT INTO test."TeSt" VALUES (1, 1); +INSERT INTO test."TeSt" VALUES (2, 2); +INSERT INTO test."TeSt" VALUES (3, 3); +SELECT * FROM test."TeSt"; +DROP TABLE test."TeSt" CASCADE; + +CREATE TABLE test."RangeRel" ( + id SERIAL PRIMARY KEY, + dt TIMESTAMP NOT NULL, + txt TEXT); +INSERT INTO test."RangeRel" (dt, txt) +SELECT g, md5(g::TEXT) FROM generate_series('2015-01-01', '2015-01-03', '1 day'::interval) as g; +SELECT pathman.create_range_partitions('test."RangeRel"', 'dt', '2015-01-01'::DATE, '1 day'::INTERVAL); +SELECT pathman.append_range_partition('test."RangeRel"'); +SELECT pathman.prepend_range_partition('test."RangeRel"'); +SELECT pathman.merge_range_partitions('test."RangeRel_1"', 'test."RangeRel_' || currval('test."RangeRel_seq"') || '"'); +SELECT pathman.split_range_partition('test."RangeRel_1"', '2015-01-01'::DATE); +DROP TABLE test."RangeRel" CASCADE; +SELECT * FROM pathman.pathman_config; +CREATE TABLE test."RangeRel" ( + id SERIAL PRIMARY KEY, + dt TIMESTAMP NOT NULL, + txt TEXT); +SELECT pathman.create_range_partitions('test."RangeRel"', 'id', 1, 100, 3); +DROP TABLE test."RangeRel" CASCADE; + +DROP EXTENSION pg_pathman; + + +/* Test that everything works fine without schemas */ +CREATE EXTENSION pg_pathman; + +/* Hash */ +CREATE TABLE test.hash_rel ( + id SERIAL PRIMARY KEY, + value INTEGER NOT NULL); +INSERT INTO test.hash_rel (value) SELECT g FROM generate_series(1, 10000) as g; +SELECT create_hash_partitions('test.hash_rel', 'value', 3); +EXPLAIN (COSTS OFF) SELECT * FROM test.hash_rel WHERE id = 1234; + +/* Range */ +CREATE TABLE test.range_rel ( + id SERIAL PRIMARY KEY, + dt TIMESTAMP NOT NULL, + value INTEGER); +INSERT INTO test.range_rel (dt, value) SELECT g, extract(day from g) FROM generate_series('2010-01-01'::date, '2010-12-31'::date, '1 day') as g; +SELECT create_range_partitions('test.range_rel', 'dt', '2010-01-01'::date, '1 month'::interval, 12); +SELECT merge_range_partitions('test.range_rel_1', 'test.range_rel_2'); +SELECT split_range_partition('test.range_rel_1', '2010-02-15'::date); +SELECT append_range_partition('test.range_rel'); +SELECT prepend_range_partition('test.range_rel'); +EXPLAIN (COSTS OFF) SELECT * FROM test.range_rel WHERE dt < '2010-03-01'; +EXPLAIN (COSTS OFF) SELECT * FROM test.range_rel WHERE dt > '2010-12-15'; + +/* Create range partitions from whole range */ +SELECT drop_partitions('test.range_rel'); + +/* Test NOT operator */ +CREATE TABLE bool_test(a INT NOT NULL, b BOOLEAN); +SELECT create_hash_partitions('bool_test', 'a', 3); +INSERT INTO bool_test SELECT g, (g % 4) = 0 FROM generate_series(1, 100) AS g; +SELECT count(*) FROM bool_test; +SELECT count(*) FROM bool_test WHERE (b = true AND b = false); +SELECT count(*) FROM bool_test WHERE b = false; /* 75 values */ +SELECT count(*) FROM bool_test WHERE b = true; /* 25 values */ +DROP TABLE bool_test CASCADE; + +/* Special test case (quals generation) -- fixing commit f603e6c5 */ +CREATE TABLE test.special_case_1_ind_o_s(val serial, comment text); +INSERT INTO test.special_case_1_ind_o_s SELECT generate_series(1, 200), NULL; +SELECT create_range_partitions('test.special_case_1_ind_o_s', 'val', 1, 50); +INSERT INTO test.special_case_1_ind_o_s_2 SELECT 75 FROM generate_series(1, 6000); +CREATE INDEX ON test.special_case_1_ind_o_s_2 (val, comment); +VACUUM ANALYZE test.special_case_1_ind_o_s_2; +EXPLAIN (COSTS OFF) SELECT * FROM test.special_case_1_ind_o_s WHERE val < 75 AND comment = 'a'; +SELECT set_enable_parent('test.special_case_1_ind_o_s', true); +EXPLAIN (COSTS OFF) SELECT * FROM test.special_case_1_ind_o_s WHERE val < 75 AND comment = 'a'; +SELECT set_enable_parent('test.special_case_1_ind_o_s', false); +EXPLAIN (COSTS OFF) SELECT * FROM test.special_case_1_ind_o_s WHERE val < 75 AND comment = 'a'; + +/* Test index scans on child relation under enable_parent is set */ +CREATE TABLE test.index_on_childs(c1 integer not null, c2 integer); +CREATE INDEX ON test.index_on_childs(c2); +INSERT INTO test.index_on_childs SELECT i, (random()*10000)::integer FROM generate_series(1, 10000) i; +SELECT create_range_partitions('test.index_on_childs', 'c1', 1, 1000, 0, false); +SELECT add_range_partition('test.index_on_childs', 1, 1000, 'test.index_on_childs_1_1k'); +SELECT append_range_partition('test.index_on_childs', 'test.index_on_childs_1k_2k'); +SELECT append_range_partition('test.index_on_childs', 'test.index_on_childs_2k_3k'); +SELECT append_range_partition('test.index_on_childs', 'test.index_on_childs_3k_4k'); +SELECT append_range_partition('test.index_on_childs', 'test.index_on_childs_4k_5k'); +SELECT set_enable_parent('test.index_on_childs', true); +VACUUM ANALYZE test.index_on_childs; +EXPLAIN (COSTS OFF) SELECT * FROM test.index_on_childs WHERE c1 > 100 AND c1 < 2500 AND c2 = 500; + +/* Test create_range_partitions() + partition_names */ +CREATE TABLE test.provided_part_names(id INT NOT NULL); +INSERT INTO test.provided_part_names SELECT generate_series(1, 10); +SELECT create_hash_partitions('test.provided_part_names', 'id', 2, + partition_names := ARRAY['p1', 'p2']::TEXT[]); /* ok */ +/* list partitions */ +SELECT partition FROM pathman_partition_list +WHERE parent = 'test.provided_part_names'::REGCLASS +ORDER BY partition; + +DROP TABLE test.provided_part_names CASCADE; + +/* test preventing of double expand of inherited tables */ +CREATE TABLE test.mixinh_parent (id INT PRIMARY KEY); +CREATE TABLE test.mixinh_child1 () INHERITS (test.mixinh_parent); +SELECT create_range_partitions('test.mixinh_child1', 'id', 1, 10, 1); +INSERT INTO test.mixinh_child1 VALUES (1); +SELECT * FROM test.mixinh_child1; +SELECT * FROM test.mixinh_parent; + +DROP TABLE test.hash_rel CASCADE; +DROP TABLE test.index_on_childs CASCADE; +DROP TABLE test.mixinh_child1 CASCADE; +DROP TABLE test.mixinh_parent CASCADE; +DROP TABLE test.num_range_rel CASCADE; +DROP TABLE test.hash_rel_wrong CASCADE; +DROP TABLE test.range_rel CASCADE; +DROP TABLE test.range_rel_archive CASCADE; +DROP TABLE test.special_case_1_ind_o_s CASCADE; +DROP TABLE test.range_rel_test1 CASCADE; +DROP TABLE test.range_rel_test2 CASCADE; +DROP SCHEMA test; +DROP EXTENSION pg_pathman CASCADE; +DROP SCHEMA pathman; diff --git a/sql/pathman_bgw.sql b/sql/pathman_bgw.sql new file mode 100644 index 00000000..74239e99 --- /dev/null +++ b/sql/pathman_bgw.sql @@ -0,0 +1,149 @@ +\set VERBOSITY terse + +SET search_path = 'public'; +CREATE EXTENSION pg_pathman; +CREATE SCHEMA test_bgw; + + + +/* + * Tests for SpawnPartitionsWorker + */ + +/* int4, size of Datum == 4 */ +CREATE TABLE test_bgw.test_1(val INT4 NOT NULL); +SELECT create_range_partitions('test_bgw.test_1', 'val', 1, 5, 2); + +SELECT set_spawn_using_bgw('test_bgw.test_1', true); +INSERT INTO test_bgw.test_1 VALUES (11); +SELECT * FROM pathman_partition_list ORDER BY partition; /* should contain 3 partitions */ + +DROP TABLE test_bgw.test_1 CASCADE; + + +/* int8, size of Datum == 8 */ +CREATE TABLE test_bgw.test_2(val INT8 NOT NULL); +SELECT create_range_partitions('test_bgw.test_2', 'val', 1, 5, 2); + +SELECT set_spawn_using_bgw('test_bgw.test_2', true); +INSERT INTO test_bgw.test_2 VALUES (11); +SELECT * FROM pathman_partition_list ORDER BY partition; /* should contain 3 partitions */ + +DROP TABLE test_bgw.test_2 CASCADE; + + +/* numeric, size of Datum == var */ +CREATE TABLE test_bgw.test_3(val NUMERIC NOT NULL); +SELECT create_range_partitions('test_bgw.test_3', 'val', 1, 5, 2); + +SELECT set_spawn_using_bgw('test_bgw.test_3', true); +INSERT INTO test_bgw.test_3 VALUES (11); +SELECT * FROM pathman_partition_list ORDER BY partition; /* should contain 3 partitions */ + +DROP TABLE test_bgw.test_3 CASCADE; + + +/* date, size of Datum == var */ +CREATE TABLE test_bgw.test_4(val DATE NOT NULL); +SELECT create_range_partitions('test_bgw.test_4', 'val', '20170213'::date, '1 day'::interval, 2); + +SELECT set_spawn_using_bgw('test_bgw.test_4', true); +INSERT INTO test_bgw.test_4 VALUES ('20170215'); +SELECT * FROM pathman_partition_list ORDER BY partition; /* should contain 3 partitions */ + +DROP TABLE test_bgw.test_4 CASCADE; + + +/* test error handling in BGW */ +CREATE TABLE test_bgw.test_5(val INT4 NOT NULL); +SELECT create_range_partitions('test_bgw.test_5', 'val', 1, 10, 2); + +CREATE OR REPLACE FUNCTION test_bgw.abort_xact(args JSONB) +RETURNS VOID AS $$ +BEGIN + RAISE EXCEPTION 'aborting xact!'; +END +$$ language plpgsql; + +SELECT set_spawn_using_bgw('test_bgw.test_5', true); +SELECT set_init_callback('test_bgw.test_5', 'test_bgw.abort_xact(jsonb)'); +INSERT INTO test_bgw.test_5 VALUES (-100); +SELECT * FROM pathman_partition_list ORDER BY partition; /* should contain 3 partitions */ + +DROP FUNCTION test_bgw.abort_xact(args JSONB); +DROP TABLE test_bgw.test_5 CASCADE; + + + +/* + * Tests for ConcurrentPartWorker + */ + +CREATE TABLE test_bgw.conc_part(id INT4 NOT NULL); +INSERT INTO test_bgw.conc_part SELECT generate_series(1, 500); +SELECT create_hash_partitions('test_bgw.conc_part', 'id', 5, false); + +BEGIN; +/* Also test FOR SHARE/UPDATE conflicts in BGW */ +SELECT * FROM test_bgw.conc_part ORDER BY id LIMIT 1 FOR SHARE; +/* Run partitioning bgworker */ +SELECT partition_table_concurrently('test_bgw.conc_part', 10, 1); +/* Wait until bgworker starts */ +SELECT pg_sleep(1); +ROLLBACK; + +/* Wait until it finises */ +DO $$ +DECLARE + ops int8; + rows int8; + rows_old int8 := 0; + i int4 := 0; -- protect from endless loop +BEGIN + LOOP + -- get total number of processed rows + SELECT processed + FROM pathman_concurrent_part_tasks + WHERE relid = 'test_bgw.conc_part'::regclass + INTO rows; + + -- get number of partitioning tasks + GET DIAGNOSTICS ops = ROW_COUNT; + + IF ops > 0 THEN + PERFORM pg_sleep(0.2); + + ASSERT rows IS NOT NULL; + + IF rows_old = rows THEN + i = i + 1; + ELSIF rows < rows_old THEN + RAISE EXCEPTION 'rows is decreasing: new %, old %', rows, rows_old; + ELSIF rows > 500 THEN + RAISE EXCEPTION 'processed % rows', rows; + END IF; + ELSE + EXIT; -- exit loop + END IF; + + IF i > 500 THEN + RAISE WARNING 'looks like partitioning bgw is stuck!'; + EXIT; -- exit loop + END IF; + + rows_old = rows; + END LOOP; +END +$$ LANGUAGE plpgsql; + +/* Check amount of tasks and rows in parent and partitions */ +SELECT count(*) FROM pathman_concurrent_part_tasks; +SELECT count(*) FROM ONLY test_bgw.conc_part; +SELECT count(*) FROM test_bgw.conc_part; + +DROP TABLE test_bgw.conc_part CASCADE; + + + +DROP SCHEMA test_bgw; +DROP EXTENSION pg_pathman; diff --git a/sql/pathman_cache_pranks.sql b/sql/pathman_cache_pranks.sql new file mode 100644 index 00000000..e3fe00d9 --- /dev/null +++ b/sql/pathman_cache_pranks.sql @@ -0,0 +1,122 @@ +\set VERBOSITY terse +-- is pathman (caches, in particular) strong enough to carry out this? + +SET search_path = 'public'; + +-- make sure nothing breaks on disable/enable when nothing was initialized yet +SET pg_pathman.enable = false; +SET pg_pathman.enable = true; + +-- wobble with create-drop ext: tests cached relids sanity +CREATE EXTENSION pg_pathman; +SET pg_pathman.enable = f; +DROP EXTENSION pg_pathman; +CREATE EXTENSION pg_pathman; +SET pg_pathman.enable = true; +DROP EXTENSION pg_pathman; +CREATE EXTENSION pg_pathman; +DROP EXTENSION pg_pathman; + +-- create it for further tests +CREATE EXTENSION pg_pathman; + +-- 079797e0d5 +CREATE TABLE part_test(val serial); +INSERT INTO part_test SELECT generate_series(1, 30); +SELECT create_range_partitions('part_test', 'val', 1, 10); +SELECT set_interval('part_test', 100); +DELETE FROM pathman_config WHERE partrel = 'part_test'::REGCLASS; +SELECT drop_partitions('part_test'); +SELECT disable_pathman_for('part_test'); + +CREATE TABLE wrong_partition (LIKE part_test) INHERITS (part_test); +SELECT add_to_pathman_config('part_test', 'val', '10'); +SELECT add_to_pathman_config('part_test', 'val'); + +DROP TABLE part_test CASCADE; +-- + +-- 85fc5ccf121 +CREATE TABLE part_test(val serial); +INSERT INTO part_test SELECT generate_series(1, 3000); +SELECT create_range_partitions('part_test', 'val', 1, 10); +SELECT append_range_partition('part_test'); +DELETE FROM part_test; +SELECT create_single_range_partition('part_test', NULL::INT4, NULL); /* not ok */ +DELETE FROM pathman_config WHERE partrel = 'part_test'::REGCLASS; +SELECT create_hash_partitions('part_test', 'val', 2, partition_names := ARRAY[]::TEXT[]); /* not ok */ + +DROP TABLE part_test CASCADE; +-- +-- +-- PGPRO-7870 +-- Added error for case executing prepared query after DROP/CREATE EXTENSION. +-- +-- DROP/CREATE extension +CREATE TABLE part_test(a INT4 NOT NULL, b INT4); +PREPARE q(int4) AS SELECT * FROM part_test WHERE a > ALL (array[$1, 898]); +SELECT create_range_partitions('part_test', 'a', 1, 100, 10); + +EXECUTE q(1); +EXECUTE q(1); +EXECUTE q(1); +EXECUTE q(1); +EXECUTE q(1); +EXECUTE q(1); + +DROP EXTENSION pg_pathman; +CREATE EXTENSION pg_pathman; + +EXECUTE q(1); + +DEALLOCATE q; +DROP TABLE part_test CASCADE; + +-- DROP/CREATE disabled extension +CREATE TABLE part_test(a INT4 NOT NULL, b INT4); +PREPARE q(int4) AS SELECT * FROM part_test WHERE a > ALL (array[$1, 898]); +SELECT create_range_partitions('part_test', 'a', 1, 100, 10); + +EXECUTE q(1); +EXECUTE q(1); +EXECUTE q(1); +EXECUTE q(1); +EXECUTE q(1); +EXECUTE q(1); + +SET pg_pathman.enable = f; +DROP EXTENSION pg_pathman; +CREATE EXTENSION pg_pathman; +SET pg_pathman.enable = t; + +EXECUTE q(1); + +DEALLOCATE q; +DROP TABLE part_test CASCADE; + +-- DROP/CREATE extension in autonomous transaction +CREATE TABLE part_test(a INT4 NOT NULL, b INT4); +PREPARE q(int4) AS SELECT * FROM part_test WHERE a > ALL (array[$1, 198]); +SELECT create_range_partitions('part_test', 'a', 1, 100, 2); + +EXECUTE q(1); +EXECUTE q(1); +EXECUTE q(1); +EXECUTE q(1); +EXECUTE q(1); +EXECUTE q(1); + +BEGIN; + BEGIN AUTONOMOUS; + DROP EXTENSION pg_pathman; + CREATE EXTENSION pg_pathman; + COMMIT; +COMMIT; + +EXECUTE q(1); + +DEALLOCATE q; +DROP TABLE part_test CASCADE; + +-- finalize +DROP EXTENSION pg_pathman; diff --git a/sql/pathman_calamity.sql b/sql/pathman_calamity.sql new file mode 100644 index 00000000..ecc2c30f --- /dev/null +++ b/sql/pathman_calamity.sql @@ -0,0 +1,476 @@ +/* + * pathman_calamity.out and pathman_calamity_1.out differ only in that since + * 12 we get + * ERROR: invalid input syntax for type integer: "abc" + * instead of + * ERROR: invalid input syntax for integer: "15.6" + * + * Since 55a1954da16 and 6ef77cf46e8 (>= 13) output of EXPLAIN was changed, + * now it includes aliases for inherited tables. + */ + +\set VERBOSITY terse +SET search_path = 'public'; +CREATE EXTENSION pg_pathman; +CREATE SCHEMA calamity; + + +/* call for coverage test */ +set client_min_messages = ERROR; +SELECT debug_capture(); +SELECT pathman_version(); +set client_min_messages = NOTICE; + + +/* create table to be partitioned */ +CREATE TABLE calamity.part_test(val serial); + + +/* test pg_pathman's cache */ +INSERT INTO calamity.part_test SELECT generate_series(1, 30); + +SELECT create_range_partitions('calamity.part_test', 'val', 1, 10); +SELECT drop_partitions('calamity.part_test'); +SELECT create_range_partitions('calamity.part_test', 'val', 1, 10); +SELECT drop_partitions('calamity.part_test'); + +SELECT create_range_partitions('calamity.part_test', 'val', 1, 10); +SELECT append_range_partition('calamity.part_test'); +SELECT drop_partitions('calamity.part_test'); + +SELECT create_range_partitions('calamity.part_test', 'val', 1, 10); +SELECT append_range_partition('calamity.part_test'); +SELECT drop_partitions('calamity.part_test'); + +SELECT count(*) FROM calamity.part_test; + +DELETE FROM calamity.part_test; + + +/* test function create_single_range_partition() */ +SELECT create_single_range_partition(NULL, NULL::INT4, NULL); /* not ok */ +SELECT create_single_range_partition('pg_class', NULL::INT4, NULL); /* not ok */ + +SELECT add_to_pathman_config('calamity.part_test', 'val'); +SELECT create_single_range_partition('calamity.part_test', NULL::INT4, NULL); /* not ok */ +DELETE FROM pathman_config WHERE partrel = 'calamity.part_test'::REGCLASS; + + +/* test function create_range_partitions_internal() */ +SELECT create_range_partitions_internal(NULL, '{}'::INT[], NULL, NULL); /* not ok */ + +SELECT create_range_partitions_internal('calamity.part_test', + NULL::INT[], NULL, NULL); /* not ok */ + +SELECT create_range_partitions_internal('calamity.part_test', '{1}'::INT[], + '{part_1}'::TEXT[], NULL); /* not ok */ + +SELECT create_range_partitions_internal('calamity.part_test', '{1}'::INT[], + NULL, '{tblspc_1}'::TEXT[]); /* not ok */ + +SELECT create_range_partitions_internal('calamity.part_test', + '{1, NULL}'::INT[], NULL, NULL); /* not ok */ + +SELECT create_range_partitions_internal('calamity.part_test', + '{2, 1}'::INT[], NULL, NULL); /* not ok */ + +/* test function create_hash_partitions() */ +SELECT create_hash_partitions('calamity.part_test', 'val', 2, + partition_names := ARRAY[]::TEXT[]); /* not ok */ + +SELECT create_hash_partitions('calamity.part_test', 'val', 2, + partition_names := ARRAY[ 'p1', NULL ]::TEXT[]); /* not ok */ + +SELECT create_hash_partitions('calamity.part_test', 'val', 2, + partition_names := ARRAY[ ['p1'], ['p2'] ]::TEXT[]); /* not ok */ + +SELECT create_hash_partitions('calamity.part_test', 'val', 2, + partition_names := ARRAY['calamity.p1']::TEXT[]); /* not ok */ + +SELECT create_hash_partitions('calamity.part_test', 'val', 2, + tablespaces := ARRAY['abcd']::TEXT[]); /* not ok */ + + +/* test case when naming sequence does not exist */ +CREATE TABLE calamity.no_naming_seq(val INT4 NOT NULL); +SELECT add_to_pathman_config('calamity.no_naming_seq', 'val', '100'); +select add_range_partition(' calamity.no_naming_seq', 10, 20); +DROP TABLE calamity.no_naming_seq CASCADE; + + +/* test (-inf, +inf) partition creation */ +CREATE TABLE calamity.double_inf(val INT4 NOT NULL); +SELECT add_to_pathman_config('calamity.double_inf', 'val', '10'); +select add_range_partition('calamity.double_inf', NULL::INT4, NULL::INT4, + partition_name := 'double_inf_part'); +DROP TABLE calamity.double_inf CASCADE; + + +/* test stub 'enable_parent' value for PATHMAN_CONFIG_PARAMS */ +INSERT INTO calamity.part_test SELECT generate_series(1, 30); +SELECT create_range_partitions('calamity.part_test', 'val', 1, 10); +DELETE FROM pathman_config_params WHERE partrel = 'calamity.part_test'::regclass; +SELECT append_range_partition('calamity.part_test'); +EXPLAIN (COSTS OFF) SELECT * FROM calamity.part_test; +SELECT drop_partitions('calamity.part_test', true); +DELETE FROM calamity.part_test; + + +/* check function validate_interval_value() */ +SELECT set_interval('pg_catalog.pg_class', 100); /* not ok */ + +INSERT INTO calamity.part_test SELECT generate_series(1, 30); +SELECT create_range_partitions('calamity.part_test', 'val', 1, 10); +SELECT set_interval('calamity.part_test', 100); /* ok */ +SELECT set_interval('calamity.part_test', 15.6); /* not ok */ +SELECT set_interval('calamity.part_test', 'abc'::text); /* not ok */ +SELECT drop_partitions('calamity.part_test', true); +DELETE FROM calamity.part_test; + +/* check function build_hash_condition() */ +SELECT build_hash_condition('int4', 'val', 10, 1); +SELECT build_hash_condition('text', 'val', 10, 1); +SELECT build_hash_condition('int4', 'val', 1, 1); +SELECT build_hash_condition('int4', 'val', 10, 20); +SELECT build_hash_condition('text', 'val', 10, NULL) IS NULL; +SELECT build_hash_condition('calamity.part_test', 'val', 10, 1); + +/* check function build_range_condition() */ +SELECT build_range_condition(NULL, 'val', 10, 20); /* not ok */ +SELECT build_range_condition('calamity.part_test', NULL, 10, 20); /* not ok */ +SELECT build_range_condition('calamity.part_test', 'val', 10, 20); /* OK */ +SELECT build_range_condition('calamity.part_test', 'val', 10, NULL); /* OK */ +SELECT build_range_condition('calamity.part_test', 'val', NULL, 10); /* OK */ + +/* check function validate_interval_value() */ +SELECT validate_interval_value(1::REGCLASS, 'expr', 2, '1 mon'); /* not ok */ +SELECT validate_interval_value(NULL, 'expr', 2, '1 mon'); /* not ok */ +SELECT validate_interval_value('pg_class', NULL, 2, '1 mon'); /* not ok */ +SELECT validate_interval_value('pg_class', 'relname', NULL, '1 mon'); /* not ok */ +SELECT validate_interval_value('pg_class', 'relname', 1, 'HASH'); /* not ok */ +SELECT validate_interval_value('pg_class', 'expr', 2, '1 mon'); /* not ok */ +SELECT validate_interval_value('pg_class', 'expr', 2, NULL); /* not ok */ +SELECT validate_interval_value('pg_class', 'EXPR', 1, 'HASH'); /* not ok */ + +/* check function validate_relname() */ +SELECT validate_relname('calamity.part_test'); +SELECT validate_relname(1::REGCLASS); +SELECT validate_relname(NULL); + +/* check function validate_expression() */ +SELECT validate_expression(1::regclass, NULL); /* not ok */ +SELECT validate_expression(NULL::regclass, NULL); /* not ok */ +SELECT validate_expression('calamity.part_test', NULL); /* not ok */ +SELECT validate_expression('calamity.part_test', 'valval'); /* not ok */ +SELECT validate_expression('calamity.part_test', 'random()'); /* not ok */ +SELECT validate_expression('calamity.part_test', 'val'); /* OK */ +SELECT validate_expression('calamity.part_test', 'VaL'); /* OK */ + +/* check function get_number_of_partitions() */ +SELECT get_number_of_partitions('calamity.part_test'); +SELECT get_number_of_partitions(NULL) IS NULL; + +/* check function get_parent_of_partition() */ +SELECT get_parent_of_partition('calamity.part_test'); +SELECT get_parent_of_partition(NULL) IS NULL; + +/* check function get_base_type() */ +CREATE DOMAIN calamity.test_domain AS INT4; +SELECT get_base_type('int4'::regtype); +SELECT get_base_type('calamity.test_domain'::regtype); +SELECT get_base_type(NULL) IS NULL; + +/* check function get_partition_key_type() */ +SELECT get_partition_key_type('calamity.part_test'); +SELECT get_partition_key_type(0::regclass); +SELECT get_partition_key_type(NULL) IS NULL; + +/* check function build_check_constraint_name() */ +SELECT build_check_constraint_name('calamity.part_test'); /* OK */ +SELECT build_check_constraint_name(0::REGCLASS); /* not ok */ +SELECT build_check_constraint_name(NULL) IS NULL; + +/* check function build_sequence_name() */ +SELECT build_sequence_name('calamity.part_test'); /* OK */ +SELECT build_sequence_name(1::REGCLASS); /* not ok */ +SELECT build_sequence_name(NULL) IS NULL; + +/* check function partition_table_concurrently() */ +SELECT partition_table_concurrently(1::REGCLASS); /* not ok */ +SELECT partition_table_concurrently('pg_class', 0); /* not ok */ +SELECT partition_table_concurrently('pg_class', 1, 1E-5); /* not ok */ +SELECT partition_table_concurrently('pg_class'); /* not ok */ + +/* check function stop_concurrent_part_task() */ +SELECT stop_concurrent_part_task(1::REGCLASS); /* not ok */ + +/* check function drop_range_partition_expand_next() */ +SELECT drop_range_partition_expand_next('pg_class'); /* not ok */ +SELECT drop_range_partition_expand_next(NULL) IS NULL; + +/* check function generate_range_bounds() */ +SELECT generate_range_bounds(NULL, 100, 10) IS NULL; +SELECT generate_range_bounds(0, NULL::INT4, 10) IS NULL; +SELECT generate_range_bounds(0, 100, NULL) IS NULL; +SELECT generate_range_bounds(0, 100, 0); /* not ok */ +SELECT generate_range_bounds('a'::TEXT, 'test'::TEXT, 10); /* not ok */ +SELECT generate_range_bounds('a'::TEXT, '1 mon'::INTERVAL, 10); /* not ok */ +SELECT generate_range_bounds(0::NUMERIC, 1::NUMERIC, 10); /* OK */ +SELECT generate_range_bounds('1-jan-2017'::DATE, + '1 day'::INTERVAL, + 4); /* OK */ + +SELECT check_range_available(NULL, NULL::INT4, NULL); /* not ok */ +SELECT check_range_available('pg_class', 1, 10); /* OK (not partitioned) */ + +/* check invoke_on_partition_created_callback() */ +CREATE FUNCTION calamity.dummy_cb(arg jsonb) RETURNS void AS $$ + begin + raise warning 'arg: %', arg::text; + end +$$ LANGUAGE plpgsql; + +/* Invalid args */ +SELECT invoke_on_partition_created_callback(NULL, 'calamity.part_test', 1); +SELECT invoke_on_partition_created_callback('calamity.part_test', NULL, 1); +SELECT invoke_on_partition_created_callback('calamity.part_test', 'calamity.part_test', 0); +SELECT invoke_on_partition_created_callback('calamity.part_test', 'calamity.part_test', 1); +SELECT invoke_on_partition_created_callback('calamity.part_test', 'calamity.part_test', NULL); + +/* HASH */ +SELECT invoke_on_partition_created_callback(0::regclass, 1::regclass, 'calamity.dummy_cb(jsonb)'::regprocedure); + +/* RANGE */ +SELECT invoke_on_partition_created_callback('calamity.part_test'::regclass, 'pg_class'::regclass, 'calamity.dummy_cb(jsonb)'::regprocedure, NULL::int, NULL); +SELECT invoke_on_partition_created_callback(0::regclass, 1::regclass, 'calamity.dummy_cb(jsonb)'::regprocedure, NULL::int, NULL); +SELECT invoke_on_partition_created_callback(0::regclass, 1::regclass, 'calamity.dummy_cb(jsonb)'::regprocedure, 1, NULL); +SELECT invoke_on_partition_created_callback(0::regclass, 1::regclass, 'calamity.dummy_cb(jsonb)'::regprocedure, NULL, 1); + +DROP FUNCTION calamity.dummy_cb(arg jsonb); + + +/* check function add_to_pathman_config() -- PHASE #1 */ +SELECT add_to_pathman_config(NULL, 'val'); /* no table */ +SELECT add_to_pathman_config(0::REGCLASS, 'val'); /* no table (oid) */ +SELECT add_to_pathman_config('calamity.part_test', NULL); /* no expr */ +SELECT add_to_pathman_config('calamity.part_test', 'V_A_L'); /* wrong expr */ +SELECT add_to_pathman_config('calamity.part_test', 'val'); /* OK */ +SELECT disable_pathman_for('calamity.part_test'); +SELECT add_to_pathman_config('calamity.part_test', 'val', '10'); /* OK */ +SELECT disable_pathman_for('calamity.part_test'); + + +/* check function add_to_pathman_config() -- PHASE #2 */ +CREATE TABLE calamity.part_ok(val serial); +INSERT INTO calamity.part_ok SELECT generate_series(1, 2); +SELECT create_hash_partitions('calamity.part_ok', 'val', 4); +CREATE TABLE calamity.wrong_partition (LIKE calamity.part_test) INHERITS (calamity.part_test); /* wrong partition w\o constraints */ + +SELECT add_to_pathman_config('calamity.part_test', 'val'); +EXPLAIN (COSTS OFF) SELECT * FROM calamity.part_ok; /* check that pathman is enabled */ + +SELECT add_to_pathman_config('calamity.part_test', 'val', '10'); +EXPLAIN (COSTS OFF) SELECT * FROM calamity.part_ok; /* check that pathman is enabled */ + +ALTER TABLE calamity.wrong_partition +ADD CONSTRAINT pathman_wrong_partition_check +CHECK (val = 1 OR val = 2); /* wrong constraint */ +SELECT add_to_pathman_config('calamity.part_test', 'val', '10'); +EXPLAIN (COSTS OFF) SELECT * FROM calamity.part_ok; /* check that pathman is enabled */ +ALTER TABLE calamity.wrong_partition DROP CONSTRAINT pathman_wrong_partition_check; + +ALTER TABLE calamity.wrong_partition +ADD CONSTRAINT pathman_wrong_partition_check +CHECK (val >= 10 AND val = 2); /* wrong constraint */ +SELECT add_to_pathman_config('calamity.part_test', 'val', '10'); +EXPLAIN (COSTS OFF) SELECT * FROM calamity.part_ok; /* check that pathman is enabled */ +ALTER TABLE calamity.wrong_partition DROP CONSTRAINT pathman_wrong_partition_check; + +/* check GUC variable */ +SHOW pg_pathman.enable; + +/* check function create_hash_partitions_internal() (called for the 2nd time) */ +CREATE TABLE calamity.hash_two_times(val serial); +SELECT create_hash_partitions_internal('calamity.hash_two_times', 'val', 2); +SELECT create_hash_partitions('calamity.hash_two_times', 'val', 2); +SELECT create_hash_partitions_internal('calamity.hash_two_times', 'val', 2); + +/* check function disable_pathman_for() */ +CREATE TABLE calamity.to_be_disabled(val INT NOT NULL); +SELECT create_hash_partitions('calamity.to_be_disabled', 'val', 3); /* add row to main config */ +SELECT set_enable_parent('calamity.to_be_disabled', true); /* add row to params */ +SELECT disable_pathman_for('calamity.to_be_disabled'); /* should delete both rows */ +SELECT count(*) FROM pathman_config WHERE partrel = 'calamity.to_be_disabled'::REGCLASS; +SELECT count(*) FROM pathman_config_params WHERE partrel = 'calamity.to_be_disabled'::REGCLASS; + + +/* check function get_part_range_by_idx() */ +CREATE TABLE calamity.test_range_idx(val INT4 NOT NULL); +SELECT create_range_partitions('calamity.test_range_idx', 'val', 1, 10, 1); + +SELECT get_part_range(NULL, 1, NULL::INT4); /* not ok */ +SELECT get_part_range('calamity.test_range_idx', NULL, NULL::INT4); /* not ok */ +SELECT get_part_range('calamity.test_range_idx', 0, NULL::INT2); /* not ok */ +SELECT get_part_range('calamity.test_range_idx', -2, NULL::INT4); /* not ok */ +SELECT get_part_range('calamity.test_range_idx', 4, NULL::INT4); /* not ok */ +SELECT get_part_range('calamity.test_range_idx', 0, NULL::INT4); /* OK */ + +DROP TABLE calamity.test_range_idx CASCADE; + + +/* check function get_part_range_by_oid() */ +CREATE TABLE calamity.test_range_oid(val INT4 NOT NULL); +SELECT create_range_partitions('calamity.test_range_oid', 'val', 1, 10, 1); + +SELECT get_part_range(NULL, NULL::INT4); /* not ok */ +SELECT get_part_range('pg_class', NULL::INT4); /* not ok */ +SELECT get_part_range('calamity.test_range_oid_1', NULL::INT2); /* not ok */ +SELECT get_part_range('calamity.test_range_oid_1', NULL::INT4); /* OK */ + +DROP TABLE calamity.test_range_oid CASCADE; + + +/* check function merge_range_partitions() */ +SELECT merge_range_partitions('pg_class'); /* not ok */ +SELECT merge_range_partitions('pg_class', 'pg_inherits'); /* not ok */ + +CREATE TABLE calamity.merge_test_a(val INT4 NOT NULL); +CREATE TABLE calamity.merge_test_b(val INT4 NOT NULL); + +SELECT create_range_partitions('calamity.merge_test_a', 'val', 1, 10, 2); +SELECT create_range_partitions('calamity.merge_test_b', 'val', 1, 10, 2); + +SELECT merge_range_partitions('calamity.merge_test_a_1', + 'calamity.merge_test_b_1'); /* not ok */ + +DROP TABLE calamity.merge_test_a,calamity.merge_test_b CASCADE; + +DROP DOMAIN calamity.test_domain; +DROP TABLE calamity.part_test CASCADE; +DROP TABLE calamity.part_ok CASCADE; +DROP TABLE calamity.hash_two_times CASCADE; +DROP TABLE calamity.to_be_disabled CASCADE; +DROP SCHEMA calamity; +DROP EXTENSION pg_pathman; + + + +/* + * ------------------------------- + * Special tests (SET statement) + * ------------------------------- + */ + +CREATE EXTENSION pg_pathman; + +SET pg_pathman.enable = false; +SET pg_pathman.enable = true; +SET pg_pathman.enable = false; +RESET pg_pathman.enable; +RESET ALL; +BEGIN; ROLLBACK; +BEGIN ISOLATION LEVEL SERIALIZABLE; ROLLBACK; +BEGIN; SET TRANSACTION ISOLATION LEVEL SERIALIZABLE; ROLLBACK; + +DROP EXTENSION pg_pathman; + + + +/* + * ------------------------------------- + * Special tests (pathman_cache_stats) + * ------------------------------------- + */ + +CREATE SCHEMA calamity; +CREATE EXTENSION pg_pathman; + +/* check that cache loading is lazy */ +CREATE TABLE calamity.test_pathman_cache_stats(val NUMERIC NOT NULL); +SELECT create_range_partitions('calamity.test_pathman_cache_stats', 'val', 1, 10, 10); +SELECT context, entries FROM pathman_cache_stats + WHERE context != 'partition status cache' ORDER BY context; /* OK */ +DROP TABLE calamity.test_pathman_cache_stats CASCADE; +SELECT context, entries FROM pathman_cache_stats + WHERE context != 'partition status cache' ORDER BY context; /* OK */ + +/* Change this setting for code coverage */ +SET pg_pathman.enable_bounds_cache = false; + +/* check view pathman_cache_stats (bounds cache disabled) */ +CREATE TABLE calamity.test_pathman_cache_stats(val NUMERIC NOT NULL); +SELECT create_range_partitions('calamity.test_pathman_cache_stats', 'val', 1, 10, 10); +EXPLAIN (COSTS OFF) SELECT * FROM calamity.test_pathman_cache_stats; +SELECT context, entries FROM pathman_cache_stats + WHERE context != 'partition status cache' ORDER BY context; /* OK */ +DROP TABLE calamity.test_pathman_cache_stats CASCADE; +SELECT context, entries FROM pathman_cache_stats + WHERE context != 'partition status cache' ORDER BY context; /* OK */ + +/* Restore this GUC */ +SET pg_pathman.enable_bounds_cache = true; + +/* check view pathman_cache_stats (bounds cache enabled) */ +CREATE TABLE calamity.test_pathman_cache_stats(val NUMERIC NOT NULL); +SELECT create_range_partitions('calamity.test_pathman_cache_stats', 'val', 1, 10, 10); +EXPLAIN (COSTS OFF) SELECT * FROM calamity.test_pathman_cache_stats; +SELECT context, entries FROM pathman_cache_stats + WHERE context != 'partition status cache' ORDER BY context; /* OK */ +DROP TABLE calamity.test_pathman_cache_stats CASCADE; +SELECT context, entries FROM pathman_cache_stats + WHERE context != 'partition status cache' ORDER BY context; /* OK */ + +/* check that parents cache has been flushed after partition was dropped */ +CREATE TABLE calamity.test_pathman_cache_stats(val NUMERIC NOT NULL); +SELECT create_range_partitions('calamity.test_pathman_cache_stats', 'val', 1, 10, 10); +EXPLAIN (COSTS OFF) SELECT * FROM calamity.test_pathman_cache_stats; +SELECT context, entries FROM pathman_cache_stats + WHERE context != 'partition status cache' ORDER BY context; /* OK */ +SELECT drop_range_partition('calamity.test_pathman_cache_stats_1'); +SELECT context, entries FROM pathman_cache_stats + WHERE context != 'partition status cache' ORDER BY context; /* OK */ +DROP TABLE calamity.test_pathman_cache_stats CASCADE; +SELECT context, entries FROM pathman_cache_stats + WHERE context != 'partition status cache' ORDER BY context; /* OK */ + +DROP SCHEMA calamity; +DROP EXTENSION pg_pathman; + + + +/* + * ------------------------------------------ + * Special tests (uninitialized pg_pathman) + * ------------------------------------------ + */ + +CREATE SCHEMA calamity; +CREATE EXTENSION pg_pathman; + + +/* check function pathman_cache_search_relid() */ +CREATE TABLE calamity.survivor(val INT NOT NULL); +SELECT create_range_partitions('calamity.survivor', 'val', 1, 10, 2); + +DROP EXTENSION pg_pathman CASCADE; +SET pg_pathman.enable = f; /* DON'T LOAD CONFIG */ +CREATE EXTENSION pg_pathman; +SHOW pg_pathman.enable; + +SELECT add_to_pathman_config('calamity.survivor', 'val', '10'); /* not ok */ +SELECT * FROM pathman_partition_list; /* not ok */ +SELECT get_part_range('calamity.survivor', 0, NULL::INT); /* not ok */ +EXPLAIN (COSTS OFF) SELECT * FROM calamity.survivor; /* OK */ + +SET pg_pathman.enable = t; /* LOAD CONFIG */ + +SELECT add_to_pathman_config('calamity.survivor', 'val', '10'); /* OK */ +SELECT * FROM pathman_partition_list; /* OK */ +SELECT get_part_range('calamity.survivor', 0, NULL::INT); /* OK */ +EXPLAIN (COSTS OFF) SELECT * FROM calamity.survivor; /* OK */ + +DROP TABLE calamity.survivor CASCADE; + + +DROP SCHEMA calamity; +DROP EXTENSION pg_pathman; diff --git a/sql/pathman_callbacks.sql b/sql/pathman_callbacks.sql new file mode 100644 index 00000000..096a55ad --- /dev/null +++ b/sql/pathman_callbacks.sql @@ -0,0 +1,151 @@ +\set VERBOSITY terse +SET search_path = 'public'; +CREATE EXTENSION pg_pathman; +CREATE SCHEMA callbacks; + + + +/* callback #1 */ +CREATE OR REPLACE FUNCTION callbacks.abc_on_part_created_callback(args JSONB) +RETURNS VOID AS $$ +BEGIN + RAISE WARNING 'callback arg: %', args::TEXT; +END +$$ language plpgsql; + +/* callback #2 */ +CREATE OR REPLACE FUNCTION public.dummy_cb(args JSONB) +RETURNS VOID AS $$ +BEGIN +END +$$ language plpgsql; + + + +CREATE TABLE callbacks.abc(a serial, b int); +SELECT create_range_partitions('callbacks.abc', 'a', 1, 100, 2); + +SELECT set_init_callback('callbacks.abc', 'public.dummy_cb(jsonb)'); + +/* check that callback is schema-qualified */ +SELECT init_callback FROM pathman_config_params +WHERE partrel = 'callbacks.abc'::REGCLASS; + +/* reset callback */ +SELECT set_init_callback('callbacks.abc'); + +/* should return NULL */ +SELECT init_callback FROM pathman_config_params +WHERE partrel = 'callbacks.abc'::REGCLASS; + +SELECT set_init_callback('callbacks.abc', + 'callbacks.abc_on_part_created_callback(jsonb)'); + +/* check that callback is schema-qualified */ +SELECT init_callback FROM pathman_config_params +WHERE partrel = 'callbacks.abc'::REGCLASS; + +DROP TABLE callbacks.abc CASCADE; + + +/* set callback to be called on RANGE partitions */ +CREATE TABLE callbacks.abc(a serial, b int); +SELECT create_range_partitions('callbacks.abc', 'a', 1, 100, 2); + +SELECT set_init_callback('callbacks.abc', + 'callbacks.abc_on_part_created_callback(jsonb)'); + +INSERT INTO callbacks.abc VALUES (123, 1); +INSERT INTO callbacks.abc VALUES (223, 1); /* show warning */ + +SELECT set_spawn_using_bgw('callbacks.abc', true); +SELECT get_number_of_partitions('callbacks.abc'); +INSERT INTO callbacks.abc VALUES (323, 1); +SELECT get_number_of_partitions('callbacks.abc'); /* +1 partition (created by BGW) */ +SELECT set_spawn_using_bgw('callbacks.abc', false); + + +SELECT append_range_partition('callbacks.abc'); +SELECT prepend_range_partition('callbacks.abc'); +SELECT add_range_partition('callbacks.abc', 501, 602); + +SELECT drop_partitions('callbacks.abc'); + + +/* set callback to be called on HASH partitions */ +SELECT set_init_callback('callbacks.abc', + 'callbacks.abc_on_part_created_callback(jsonb)'); +SELECT create_hash_partitions('callbacks.abc', 'a', 5); + +DROP TABLE callbacks.abc CASCADE; + + +/* test the temprary deletion of callback function */ +CREATE TABLE callbacks.abc(a serial, b int); +SELECT set_init_callback('callbacks.abc', + 'callbacks.abc_on_part_created_callback(jsonb)'); +SELECT create_range_partitions('callbacks.abc', 'a', 1, 100, 2); + +INSERT INTO callbacks.abc VALUES (201, 0); /* +1 new partition */ + +BEGIN; +DROP FUNCTION callbacks.abc_on_part_created_callback(jsonb); +INSERT INTO callbacks.abc VALUES (301, 0); /* +0 new partitions (ERROR) */ +ROLLBACK; + +INSERT INTO callbacks.abc VALUES (301, 0); /* +1 new partition */ + +DROP TABLE callbacks.abc CASCADE; + + +/* more complex test using rotation of tables */ +CREATE TABLE callbacks.abc(a INT4 NOT NULL); +INSERT INTO callbacks.abc + SELECT a FROM generate_series(1, 100) a; +SELECT create_range_partitions('callbacks.abc', 'a', 1, 10, 10); + +CREATE OR REPLACE FUNCTION callbacks.rotation_callback(params jsonb) +RETURNS VOID AS +$$ +DECLARE + relation regclass; + parent_rel regclass; +BEGIN + parent_rel := concat(params->>'partition_schema', '.', params->>'parent')::regclass; + + -- drop "old" partitions + FOR relation IN (SELECT partition FROM + (SELECT partition, range_min::INT4 FROM pathman_partition_list + WHERE parent = parent_rel + ORDER BY range_min::INT4 DESC + OFFSET 4) t -- remain 4 last partitions + ORDER BY range_min) + LOOP + RAISE NOTICE 'dropping partition %', relation; + PERFORM drop_range_partition(relation); + END LOOP; +END +$$ LANGUAGE plpgsql; + +SELECT * FROM pathman_partition_list +WHERE parent = 'callbacks.abc'::REGCLASS +ORDER BY range_min::INT4; + +SELECT set_init_callback('callbacks.abc', + 'callbacks.rotation_callback(jsonb)'); + +INSERT INTO callbacks.abc VALUES (1000); +INSERT INTO callbacks.abc VALUES (1500); + +SELECT * FROM pathman_partition_list +WHERE parent = 'callbacks.abc'::REGCLASS +ORDER BY range_min::INT4; + + + +DROP TABLE callbacks.abc CASCADE; +DROP FUNCTION callbacks.abc_on_part_created_callback(jsonb); +DROP FUNCTION public.dummy_cb(jsonb); +DROP FUNCTION callbacks.rotation_callback(jsonb); +DROP SCHEMA callbacks; +DROP EXTENSION pg_pathman CASCADE; diff --git a/sql/pathman_column_type.sql b/sql/pathman_column_type.sql new file mode 100644 index 00000000..d3f16107 --- /dev/null +++ b/sql/pathman_column_type.sql @@ -0,0 +1,98 @@ +/* + * In 9ce77d75c5a (>= 13) struct Var was changed, which caused the output + * of get_partition_cooked_key to change. + */ + +\set VERBOSITY terse + +SET search_path = 'public'; +CREATE EXTENSION pg_pathman; +CREATE SCHEMA test_column_type; + + +/* + * RANGE partitioning. + */ + +/* create new table (val int) */ +CREATE TABLE test_column_type.test(val INT4 NOT NULL); +SELECT create_range_partitions('test_column_type.test', 'val', 1, 10, 10); + +/* make sure that bounds and dispatch info has been cached */ +SELECT * FROM test_column_type.test; +SELECT context, entries FROM pathman_cache_stats + WHERE context != 'partition status cache' ORDER BY context; + +/* + * Get parsed and analyzed expression. + */ +CREATE FUNCTION get_cached_partition_cooked_key(REGCLASS) +RETURNS TEXT AS 'pg_pathman', 'get_cached_partition_cooked_key_pl' +LANGUAGE C STRICT; + +SELECT get_partition_cooked_key('test_column_type.test'::REGCLASS); +SELECT get_cached_partition_cooked_key('test_column_type.test'::REGCLASS); +SELECT get_partition_key_type('test_column_type.test'::REGCLASS); + +/* change column's type (should also flush caches) */ +ALTER TABLE test_column_type.test ALTER val TYPE NUMERIC; + +/* check that correct expression has been built */ +SELECT get_partition_key_type('test_column_type.test'::REGCLASS); +SELECT get_partition_cooked_key('test_column_type.test'::REGCLASS); +SELECT get_cached_partition_cooked_key('test_column_type.test'::REGCLASS); +DROP FUNCTION get_cached_partition_cooked_key(REGCLASS); + +/* make sure that everything works properly */ +SELECT * FROM test_column_type.test; + +SELECT context, entries FROM pathman_cache_stats + WHERE context != 'partition status cache' ORDER BY context; + +/* check insert dispatching */ +INSERT INTO test_column_type.test VALUES (1); +SELECT tableoid::regclass, * FROM test_column_type.test; + +SELECT drop_partitions('test_column_type.test'); +DROP TABLE test_column_type.test CASCADE; + + +/* + * HASH partitioning. + */ + +/* create new table (id int, val int) */ +CREATE TABLE test_column_type.test(id INT4 NOT NULL, val INT4); +SELECT create_hash_partitions('test_column_type.test', 'id', 5); + +/* make sure that bounds and dispatch info has been cached */ +SELECT * FROM test_column_type.test; +SELECT context, entries FROM pathman_cache_stats + WHERE context != 'partition status cache' ORDER BY context; + +/* change column's type (should NOT work) */ +ALTER TABLE test_column_type.test ALTER id TYPE NUMERIC; + +/* make sure that everything works properly */ +SELECT * FROM test_column_type.test; +SELECT context, entries FROM pathman_cache_stats + WHERE context != 'partition status cache' ORDER BY context; + +/* change column's type (should flush caches) */ +ALTER TABLE test_column_type.test ALTER val TYPE NUMERIC; + +/* make sure that everything works properly */ +SELECT * FROM test_column_type.test; +SELECT context, entries FROM pathman_cache_stats + WHERE context != 'partition status cache' ORDER BY context; + +/* check insert dispatching */ +INSERT INTO test_column_type.test VALUES (1); +SELECT tableoid::regclass, * FROM test_column_type.test; + +SELECT drop_partitions('test_column_type.test'); +DROP TABLE test_column_type.test CASCADE; + + +DROP SCHEMA test_column_type; +DROP EXTENSION pg_pathman; diff --git a/sql/pathman_cte.sql b/sql/pathman_cte.sql new file mode 100644 index 00000000..594c6db7 --- /dev/null +++ b/sql/pathman_cte.sql @@ -0,0 +1,162 @@ +/* + * Test simple CTE queries. + * Since 12 (608b167f9f), CTEs which are scanned once are no longer an + * optimization fence, which changes practically all plans here. There is + * an option to forcibly make them MATERIALIZED, but we also need to run tests + * on older versions, so create pathman_cte_1.out instead. + */ + +\set VERBOSITY terse + +SET search_path = 'public'; +CREATE EXTENSION pg_pathman; +CREATE SCHEMA test_cte; + +CREATE TABLE test_cte.range_rel ( + id INT4, + dt TIMESTAMP NOT NULL, + txt TEXT); + +INSERT INTO test_cte.range_rel (dt, txt) +SELECT g, md5(g::TEXT) +FROM generate_series('2015-01-01', '2015-04-30', '1 day'::interval) AS g; + +SELECT create_range_partitions('test_cte.range_rel', 'DT', '2015-01-01'::DATE, '1 month'::INTERVAL); + +/* perform a query */ +EXPLAIN (COSTS OFF) + WITH ttt AS (SELECT * FROM test_cte.range_rel WHERE dt >= '2015-02-01' AND dt < '2015-03-15') +SELECT * FROM ttt; + +DROP TABLE test_cte.range_rel CASCADE; + + +CREATE TABLE test_cte.hash_rel ( + id INT4, + value INTEGER NOT NULL); +INSERT INTO test_cte.hash_rel VALUES (1, 1); +INSERT INTO test_cte.hash_rel VALUES (2, 2); +INSERT INTO test_cte.hash_rel VALUES (3, 3); +SELECT create_hash_partitions('test_cte.hash_rel', 'value', 3); + +/* perform a query */ +EXPLAIN (COSTS OFF) + WITH ttt AS (SELECT * FROM test_cte.hash_rel WHERE value = 2) +SELECT * FROM ttt; + +DROP TABLE test_cte.hash_rel CASCADE; + + + +/* + * Test CTE query - by @parihaaraka (add varno to WalkerContext) + */ +CREATE TABLE test_cte.cte_del_xacts (id BIGSERIAL PRIMARY KEY, pdate DATE NOT NULL); + +INSERT INTO test_cte.cte_del_xacts (pdate) +SELECT gen_date +FROM generate_series('2016-01-01'::date, '2016-04-9'::date, '1 day') AS gen_date; + +CREATE TABLE test_cte.cte_del_xacts_specdata +( + tid BIGINT PRIMARY KEY, + test_mode SMALLINT, + state_code SMALLINT NOT NULL DEFAULT 8, + regtime TIMESTAMP WITHOUT TIME ZONE NOT NULL +); + +INSERT INTO test_cte.cte_del_xacts_specdata VALUES (1, 1, 1, current_timestamp); /* for subquery test */ + +/* create 2 partitions */ +SELECT create_range_partitions('test_cte.cte_del_xacts'::regclass, 'pdate', + '2016-01-01'::date, '50 days'::interval); + +EXPLAIN (COSTS OFF) +WITH tmp AS ( + SELECT tid, test_mode, regtime::DATE AS pdate, state_code + FROM test_cte.cte_del_xacts_specdata) +DELETE FROM test_cte.cte_del_xacts t USING tmp +WHERE t.id = tmp.tid AND t.pdate = tmp.pdate AND tmp.test_mode > 0; + +SELECT drop_partitions('test_cte.cte_del_xacts'); /* now drop partitions */ + +/* create 1 partition */ +SELECT create_range_partitions('test_cte.cte_del_xacts'::regclass, 'pdate', + '2016-01-01'::date, '1 year'::interval); + +/* parent enabled! */ +SELECT set_enable_parent('test_cte.cte_del_xacts', true); +EXPLAIN (COSTS OFF) +WITH tmp AS ( + SELECT tid, test_mode, regtime::DATE AS pdate, state_code + FROM test_cte.cte_del_xacts_specdata) +DELETE FROM test_cte.cte_del_xacts t USING tmp +WHERE t.id = tmp.tid AND t.pdate = tmp.pdate AND tmp.test_mode > 0; + +/* parent disabled! */ +SELECT set_enable_parent('test_cte.cte_del_xacts', false); +EXPLAIN (COSTS OFF) +WITH tmp AS ( + SELECT tid, test_mode, regtime::DATE AS pdate, state_code + FROM test_cte.cte_del_xacts_specdata) +DELETE FROM test_cte.cte_del_xacts t USING tmp +WHERE t.id = tmp.tid AND t.pdate = tmp.pdate AND tmp.test_mode > 0; + +/* create stub pl/PgSQL function */ +CREATE OR REPLACE FUNCTION test_cte.cte_del_xacts_stab(name TEXT) +RETURNS smallint AS +$$ +begin + return 2::smallint; +end +$$ +LANGUAGE plpgsql STABLE; + +/* test subquery planning */ +WITH tmp AS ( + SELECT tid FROM test_cte.cte_del_xacts_specdata + WHERE state_code != test_cte.cte_del_xacts_stab('test')) +SELECT * FROM test_cte.cte_del_xacts t JOIN tmp ON t.id = tmp.tid; + +/* test subquery planning (one more time) */ +WITH tmp AS ( + SELECT tid FROM test_cte.cte_del_xacts_specdata + WHERE state_code != test_cte.cte_del_xacts_stab('test')) +SELECT * FROM test_cte.cte_del_xacts t JOIN tmp ON t.id = tmp.tid; + +DROP FUNCTION test_cte.cte_del_xacts_stab(TEXT); +DROP TABLE test_cte.cte_del_xacts, test_cte.cte_del_xacts_specdata CASCADE; + + +/* Test recursive CTE */ +CREATE TABLE test_cte.recursive_cte_test_tbl(id INT NOT NULL, name TEXT NOT NULL); +SELECT create_hash_partitions('test_cte.recursive_cte_test_tbl', 'id', 2); + +INSERT INTO test_cte.recursive_cte_test_tbl (id, name) +SELECT id, 'name'||id FROM generate_series(1,100) f(id); + +INSERT INTO test_cte.recursive_cte_test_tbl (id, name) +SELECT id, 'name'||(id + 1) FROM generate_series(1,100) f(id); + +INSERT INTO test_cte.recursive_cte_test_tbl (id, name) +SELECT id, 'name'||(id + 2) FROM generate_series(1,100) f(id); + +SELECT * FROM test_cte.recursive_cte_test_tbl WHERE id = 5; + +WITH RECURSIVE test AS ( + SELECT min(name) AS name + FROM test_cte.recursive_cte_test_tbl + WHERE id = 5 + UNION ALL + SELECT (SELECT min(name) + FROM test_cte.recursive_cte_test_tbl + WHERE id = 5 AND name > test.name) + FROM test + WHERE name IS NOT NULL) +SELECT * FROM test; + + + +DROP TABLE test_cte.recursive_cte_test_tbl CASCADE; +DROP SCHEMA test_cte; +DROP EXTENSION pg_pathman; diff --git a/sql/pathman_declarative.sql b/sql/pathman_declarative.sql new file mode 100644 index 00000000..eb12c295 --- /dev/null +++ b/sql/pathman_declarative.sql @@ -0,0 +1,50 @@ +\set VERBOSITY terse + +SET search_path = 'public'; +CREATE SCHEMA pathman; +CREATE EXTENSION pg_pathman SCHEMA pathman; +CREATE SCHEMA test; + +CREATE TABLE test.range_rel ( + id SERIAL PRIMARY KEY, + dt DATE NOT NULL +); +CREATE TABLE test.r2 (LIKE test.range_rel); +ALTER TABLE test.range_rel ATTACH PARTITION test.r2 + FOR VALUES FROM ('2015-05-01') TO ('2015-06-01'); + +INSERT INTO test.range_rel (dt) +SELECT g FROM generate_series('2015-01-01', '2015-04-30', '1 day'::interval) AS g; +SELECT pathman.create_range_partitions('test.range_rel', 'dt', + '2015-01-01'::DATE, '1 month'::INTERVAL); + +SELECT * FROM pathman.pathman_partition_list; +ALTER TABLE test.range_rel ATTACH PARTITION test.r2 + FOR VALUES IN ('2015-05-01', '2015-06-01'); +ALTER TABLE test.range_rel ATTACH PARTITION test.r2 + FOR VALUES FROM ('2014-05-01') TO ('2015-06-01'); +ALTER TABLE test.range_rel ATTACH PARTITION test.r2 + FOR VALUES FROM ('2015-05-01') TO ('2015-06-01'); +SELECT * FROM pathman.pathman_partition_list; +\d+ test.r2; +ALTER TABLE test.range_rel DETACH PARTITION test.r2; +SELECT * FROM pathman.pathman_partition_list; +\d+ test.r2; + +CREATE TABLE test.r4 PARTITION OF test.range_rel + FOR VALUES IN ('2015-05-01', '2015-06-01'); +CREATE TABLE test.r4 PARTITION OF test.range_rel + FOR VALUES FROM ('2014-05-01') TO ('2015-06-01'); +CREATE TABLE test.r4 PARTITION OF test.range_rel + FOR VALUES FROM ('2015-06-01') TO ('2016-01-01'); +\d+ test.r4; + +/* Note: PG-10 doesn't support ATTACH PARTITION ... DEFAULT */ +ALTER TABLE IF EXISTS test.nonexistent_table ATTACH PARTITION baz FOR VALUES IN (42); +ALTER TABLE IF EXISTS test.nonexistent_table DETACH PARTITION baz; + +DROP TABLE test.r2 CASCADE; +DROP TABLE test.range_rel CASCADE; +DROP SCHEMA test; +DROP EXTENSION pg_pathman CASCADE; +DROP SCHEMA pathman; diff --git a/sql/pathman_domains.sql b/sql/pathman_domains.sql new file mode 100644 index 00000000..105b2399 --- /dev/null +++ b/sql/pathman_domains.sql @@ -0,0 +1,47 @@ +\set VERBOSITY terse + +SET search_path = 'public'; +CREATE EXTENSION pg_pathman; +CREATE SCHEMA domains; + +CREATE DOMAIN domains.dom_test AS numeric CHECK (value < 1200); + +CREATE TABLE domains.dom_table(val domains.dom_test NOT NULL); +INSERT INTO domains.dom_table SELECT generate_series(1, 999); + +SELECT create_range_partitions('domains.dom_table', 'val', 1, 100); + +EXPLAIN (COSTS OFF) +SELECT * FROM domains.dom_table +WHERE val < 250; + +INSERT INTO domains.dom_table VALUES(1500); +INSERT INTO domains.dom_table VALUES(-10); + +SELECT append_range_partition('domains.dom_table'); +SELECT prepend_range_partition('domains.dom_table'); +SELECT merge_range_partitions('domains.dom_table_1', 'domains.dom_table_2'); +SELECT split_range_partition('domains.dom_table_1', 50); + +INSERT INTO domains.dom_table VALUES(1101); + +EXPLAIN (COSTS OFF) +SELECT * FROM domains.dom_table +WHERE val < 450; + + +SELECT * FROM pathman_partition_list +ORDER BY range_min::INT, range_max::INT; + + +SELECT drop_partitions('domains.dom_table'); +SELECT create_hash_partitions('domains.dom_table', 'val', 5); + +SELECT * FROM pathman_partition_list +ORDER BY "partition"::TEXT; + + +DROP TABLE domains.dom_table CASCADE; +DROP DOMAIN domains.dom_test CASCADE; +DROP SCHEMA domains; +DROP EXTENSION pg_pathman CASCADE; diff --git a/sql/pathman_dropped_cols.sql b/sql/pathman_dropped_cols.sql new file mode 100644 index 00000000..2a128df2 --- /dev/null +++ b/sql/pathman_dropped_cols.sql @@ -0,0 +1,104 @@ +\set VERBOSITY terse + +SET search_path = 'public'; +CREATE EXTENSION pg_pathman; +CREATE SCHEMA dropped_cols; + + +/* + * we should be able to manage tables with dropped columns + */ + +create table test_range(a int, b int, key int not null); + +alter table test_range drop column a; +select create_range_partitions('test_range', 'key', 1, 10, 2); + +alter table test_range drop column b; +select prepend_range_partition('test_range'); + +select * from pathman_partition_list order by parent, partition; +select pg_get_constraintdef(oid, true) from pg_constraint where conname = 'pathman_test_range_1_check'; +select pg_get_constraintdef(oid, true) from pg_constraint where conname = 'pathman_test_range_3_check'; + +drop table test_range cascade; + + +create table test_hash(a int, b int, key int not null); + +alter table test_hash drop column a; +select create_hash_partitions('test_hash', 'key', 3); + +alter table test_hash drop column b; +create table test_dummy (like test_hash); +select replace_hash_partition('test_hash_2', 'test_dummy', true); + +select * from pathman_partition_list order by parent, partition; +select pg_get_constraintdef(oid, true) from pg_constraint where conname = 'pathman_test_hash_1_check'; +select pg_get_constraintdef(oid, true) from pg_constraint where conname = 'pathman_test_dummy_check'; +drop table test_hash cascade; + +-- Yury Smirnov case +CREATE TABLE root_dict ( + id BIGSERIAL PRIMARY KEY NOT NULL, + root_id BIGINT NOT NULL, + start_date DATE, + num TEXT, + main TEXT, + dict_code TEXT, + dict_name TEXT, + edit_num TEXT, + edit_date DATE, + sign CHAR(4) +); + +CREATE INDEX "root_dict_root_id_idx" ON "root_dict" ("root_id"); + +DO +$$ +DECLARE + r RECORD; +BEGIN + FOR r IN SELECT * FROM generate_series(1, 3) r + LOOP + FOR d IN 1..2 LOOP + INSERT INTO root_dict (root_id, start_date, num, main, dict_code, dict_name, edit_num, edit_date, sign) VALUES + (r.r, '2010-10-10'::date, 'num_' || d, (d % 2) + 1, 'code_' || d, 'name_' || d, NULL, NULL, '2014'); + END LOOP; + END LOOP; +END +$$; + +ALTER TABLE root_dict ADD COLUMN dict_id BIGINT DEFAULT 3; +ALTER TABLE root_dict DROP COLUMN dict_code, + DROP COLUMN dict_name, + DROP COLUMN sign; + +SELECT create_hash_partitions('root_dict' :: REGCLASS, + 'root_id', + 3, + true); +VACUUM FULL ANALYZE "root_dict"; +SELECT set_enable_parent('root_dict' :: REGCLASS, FALSE); + +PREPARE getbyroot AS +SELECT + id, root_id, start_date, num, main, edit_num, edit_date, dict_id +FROM root_dict +WHERE root_id = $1; + +EXECUTE getbyroot(2); +EXECUTE getbyroot(2); +EXECUTE getbyroot(2); +EXECUTE getbyroot(2); +EXECUTE getbyroot(2); + +-- errors usually start here +EXECUTE getbyroot(2); +EXECUTE getbyroot(2); +EXPLAIN (COSTS OFF) EXECUTE getbyroot(2); + +DEALLOCATE getbyroot; +DROP TABLE root_dict CASCADE; +DROP SCHEMA dropped_cols; +DROP EXTENSION pg_pathman; diff --git a/sql/pathman_expressions.sql b/sql/pathman_expressions.sql new file mode 100644 index 00000000..bf29f896 --- /dev/null +++ b/sql/pathman_expressions.sql @@ -0,0 +1,186 @@ +/* + * ------------------------------------------- + * NOTE: This test behaves differenly on < 11 because planner now turns + * Row(Const, Const) into just Const of record type, apparently since 3decd150 + * ------------------------------------------- + * + * Also since 8edd0e794 (>= 12) Append nodes with single subplan are eliminated, + * causing different output; pathman_expressions_2.out is the updated version. + */ + +\set VERBOSITY terse + +SET search_path = 'public'; +CREATE EXTENSION pg_pathman; +CREATE SCHEMA test_exprs; + + + +/* + * Test partitioning expression canonicalization process + */ + +CREATE TABLE test_exprs.canon(c JSONB NOT NULL); +SELECT create_range_partitions('test_exprs.canon', '(C->>''key'')::int8', 1, 10, 2); +SELECT expr FROM pathman_config; /* check expression */ +INSERT INTO test_exprs.canon VALUES ('{ "key": 2, "value": 0 }'); +SELECT *, tableoid::REGCLASS FROM test_exprs.canon; +DROP TABLE test_exprs.canon CASCADE; + + +CREATE TABLE test_exprs.canon(val TEXT NOT NULL); +CREATE SEQUENCE test_exprs.canon_seq; +SELECT add_to_pathman_config('test_exprs.canon', 'VAL collate "C"', NULL); +SELECT add_range_partition('test_exprs.canon', 'a'::TEXT, 'b'); +SELECT add_range_partition('test_exprs.canon', 'b'::TEXT, 'c'); +SELECT add_range_partition('test_exprs.canon', 'c'::TEXT, 'd'); +SELECT add_range_partition('test_exprs.canon', 'd'::TEXT, 'e'); +SELECT expr FROM pathman_config; /* check expression */ +INSERT INTO test_exprs.canon VALUES ('b'); +SELECT *, tableoid::REGCLASS FROM test_exprs.canon; +EXPLAIN (COSTS OFF) SELECT * FROM test_exprs.canon WHERE val COLLATE "C" < ALL (array['b', 'c']); +EXPLAIN (COSTS OFF) SELECT * FROM test_exprs.canon WHERE val COLLATE "POSIX" < ALL (array['b', 'c']); +DROP TABLE test_exprs.canon CASCADE; + + + +/* + * Test composite key. + */ +CREATE TABLE test_exprs.composite(a INT4 NOT NULL, b TEXT NOT NULL); +CREATE SEQUENCE test_exprs.composite_seq; +SELECT add_to_pathman_config('test_exprs.composite', + '(a, b)::test_exprs.composite', + NULL); +SELECT add_range_partition('test_exprs.composite', + '(1,a)'::test_exprs.composite, + '(10,a)'::test_exprs.composite); +SELECT add_range_partition('test_exprs.composite', + '(10,a)'::test_exprs.composite, + '(20,a)'::test_exprs.composite); +SELECT add_range_partition('test_exprs.composite', + '(20,a)'::test_exprs.composite, + '(30,a)'::test_exprs.composite); +SELECT add_range_partition('test_exprs.composite', + '(30,a)'::test_exprs.composite, + '(40,a)'::test_exprs.composite); +SELECT expr FROM pathman_config; /* check expression */ +INSERT INTO test_exprs.composite VALUES(2, 'a'); +INSERT INTO test_exprs.composite VALUES(11, 'a'); +INSERT INTO test_exprs.composite VALUES(2, 'b'); +INSERT INTO test_exprs.composite VALUES(50, 'b'); +SELECT *, tableoid::REGCLASS FROM test_exprs.composite; +EXPLAIN (COSTS OFF) SELECT * FROM test_exprs.composite WHERE (a, b)::test_exprs.composite < (21, 0)::test_exprs.composite; +EXPLAIN (COSTS OFF) SELECT * FROM test_exprs.composite WHERE (a, b) < (21, 0)::test_exprs.composite; +EXPLAIN (COSTS OFF) SELECT * FROM test_exprs.composite WHERE (a, b)::test_exprs.composite < (21, 0); +DROP TABLE test_exprs.composite CASCADE; + + +/* We use this rel to check 'pathman_hooks_enabled' */ +CREATE TABLE test_exprs.canary(val INT4 NOT NULL); +CREATE TABLE test_exprs.canary_copy (LIKE test_exprs.canary); +SELECT create_hash_partitions('test_exprs.canary', 'val', 5); + + + +/* + * Test HASH + */ + +CREATE TABLE test_exprs.hash_rel ( + id SERIAL PRIMARY KEY, + value INTEGER NOT NULL, + value2 INTEGER NOT NULL +); +INSERT INTO test_exprs.hash_rel (value, value2) + SELECT val, val * 2 FROM generate_series(1, 5) val; + +SELECT COUNT(*) FROM test_exprs.hash_rel; + + + +\set VERBOSITY default + +/* Try using constant expression */ +SELECT create_hash_partitions('test_exprs.hash_rel', '1 + 1', 4); + +/* Try using system attributes */ +SELECT create_hash_partitions('test_exprs.hash_rel', 'xmin', 4); + +/* Try using subqueries */ +SELECT create_hash_partitions('test_exprs.hash_rel', + 'value, (select oid from pg_class limit 1)', + 4); + +/* Try using mutable expression */ +SELECT create_hash_partitions('test_exprs.hash_rel', 'random()', 4); + +/* Try using broken parentheses */ +SELECT create_hash_partitions('test_exprs.hash_rel', 'value * value2))', 4); + +/* Try using missing columns */ +SELECT create_hash_partitions('test_exprs.hash_rel', 'value * value3', 4); + +/* Check that 'pathman_hooks_enabled' is true (1 partition in plan) */ +EXPLAIN (COSTS OFF) INSERT INTO test_exprs.canary_copy +SELECT * FROM test_exprs.canary WHERE val = 1; + +\set VERBOSITY terse + + +SELECT create_hash_partitions('test_exprs.hash_rel', 'value * value2', 4); +SELECT COUNT(*) FROM ONLY test_exprs.hash_rel; +SELECT COUNT(*) FROM test_exprs.hash_rel; + +INSERT INTO test_exprs.hash_rel (value, value2) + SELECT val, val * 2 FROM generate_series(6, 10) val; +SELECT COUNT(*) FROM ONLY test_exprs.hash_rel; +SELECT COUNT(*) FROM test_exprs.hash_rel; + +EXPLAIN (COSTS OFF) SELECT * FROM test_exprs.hash_rel WHERE value = 5; +EXPLAIN (COSTS OFF) SELECT * FROM test_exprs.hash_rel WHERE (value * value2) = 5; + + + +/* + * Test RANGE + */ + +CREATE TABLE test_exprs.range_rel (id SERIAL PRIMARY KEY, dt TIMESTAMP NOT NULL, txt TEXT); + +INSERT INTO test_exprs.range_rel (dt, txt) +SELECT g, md5(g::TEXT) FROM generate_series('2015-01-01', '2020-04-30', '1 month'::interval) as g; + + + +\set VERBOSITY default + +/* Try using constant expression */ +SELECT create_range_partitions('test_exprs.range_rel', '''16 years''::interval', + '15 years'::INTERVAL, '1 year'::INTERVAL, 10); + +/* Try using mutable expression */ +SELECT create_range_partitions('test_exprs.range_rel', 'RANDOM()', + '15 years'::INTERVAL, '1 year'::INTERVAL, 10); + +/* Check that 'pathman_hooks_enabled' is true (1 partition in plan) */ +EXPLAIN (COSTS OFF) INSERT INTO test_exprs.canary_copy +SELECT * FROM test_exprs.canary WHERE val = 1; + +\set VERBOSITY terse + + +SELECT create_range_partitions('test_exprs.range_rel', 'AGE(dt, ''2000-01-01''::DATE)', + '15 years'::INTERVAL, '1 year'::INTERVAL, 10); +INSERT INTO test_exprs.range_rel_1 (dt, txt) VALUES ('2020-01-01'::DATE, md5('asdf')); +SELECT COUNT(*) FROM test_exprs.range_rel_6; +INSERT INTO test_exprs.range_rel_6 (dt, txt) VALUES ('2020-01-01'::DATE, md5('asdf')); +SELECT COUNT(*) FROM test_exprs.range_rel_6; +EXPLAIN (COSTS OFF) SELECT * FROM test_exprs.range_rel WHERE (AGE(dt, '2000-01-01'::DATE)) = '18 years'::interval; + +DROP TABLE test_exprs.canary CASCADE; +DROP TABLE test_exprs.canary_copy CASCADE; +DROP TABLE test_exprs.range_rel CASCADE; +DROP TABLE test_exprs.hash_rel CASCADE; +DROP SCHEMA test_exprs; +DROP EXTENSION pg_pathman; diff --git a/sql/pathman_foreign_keys.sql b/sql/pathman_foreign_keys.sql new file mode 100644 index 00000000..74dee25f --- /dev/null +++ b/sql/pathman_foreign_keys.sql @@ -0,0 +1,58 @@ +\set VERBOSITY terse + +SET search_path = 'public'; +CREATE EXTENSION pg_pathman; +CREATE SCHEMA fkeys; + + + +/* Check primary keys generation */ +CREATE TABLE fkeys.test_ref(comment TEXT UNIQUE); +INSERT INTO fkeys.test_ref VALUES('test'); + +CREATE TABLE fkeys.test_fkey( + id INT NOT NULL, + comment TEXT, + FOREIGN KEY (comment) REFERENCES fkeys.test_ref(comment)); + +INSERT INTO fkeys.test_fkey SELECT generate_series(1, 1000), 'test'; + +SELECT create_range_partitions('fkeys.test_fkey', 'id', 1, 100); +INSERT INTO fkeys.test_fkey VALUES(1, 'wrong'); +INSERT INTO fkeys.test_fkey VALUES(1, 'test'); +SELECT drop_partitions('fkeys.test_fkey'); + +SELECT create_hash_partitions('fkeys.test_fkey', 'id', 10); +INSERT INTO fkeys.test_fkey VALUES(1, 'wrong'); +INSERT INTO fkeys.test_fkey VALUES(1, 'test'); +SELECT drop_partitions('fkeys.test_fkey'); + + +/* Try to partition table that's being referenced */ +CREATE TABLE fkeys.messages( + id SERIAL PRIMARY KEY, + msg TEXT); + +CREATE TABLE fkeys.replies( + id SERIAL PRIMARY KEY, + message_id INTEGER REFERENCES fkeys.messages(id), + msg TEXT); + +INSERT INTO fkeys.messages SELECT g, md5(g::text) FROM generate_series(1, 10) as g; +INSERT INTO fkeys.replies SELECT g, g, md5(g::text) FROM generate_series(1, 10) as g; + +SELECT create_range_partitions('fkeys.messages', 'id', 1, 100, 2); /* not ok */ + +ALTER TABLE fkeys.replies DROP CONSTRAINT replies_message_id_fkey; + +SELECT create_range_partitions('fkeys.messages', 'id', 1, 100, 2); /* ok */ +EXPLAIN (COSTS OFF) SELECT * FROM fkeys.messages; + +DROP TABLE fkeys.messages, fkeys.replies CASCADE; + + + +DROP TABLE fkeys.test_fkey CASCADE; +DROP TABLE fkeys.test_ref CASCADE; +DROP SCHEMA fkeys; +DROP EXTENSION pg_pathman CASCADE; diff --git a/sql/pathman_gaps.sql b/sql/pathman_gaps.sql new file mode 100644 index 00000000..129b210c --- /dev/null +++ b/sql/pathman_gaps.sql @@ -0,0 +1,145 @@ +/* + * Since 8edd0e794 (>= 12) Append nodes with single subplan are eliminated, + * causing different output; pathman_gaps_1.out is the updated version. + */ +\set VERBOSITY terse + +SET search_path = 'public'; +CREATE EXTENSION pg_pathman; +CREATE SCHEMA gaps; + + + +CREATE TABLE gaps.test_1(val INT8 NOT NULL); +SELECT create_range_partitions('gaps.test_1', 'val', 1, 10, 3); +DROP TABLE gaps.test_1_2; + +CREATE TABLE gaps.test_2(val INT8 NOT NULL); +SELECT create_range_partitions('gaps.test_2', 'val', 1, 10, 5); +DROP TABLE gaps.test_2_3; + +CREATE TABLE gaps.test_3(val INT8 NOT NULL); +SELECT create_range_partitions('gaps.test_3', 'val', 1, 10, 8); +DROP TABLE gaps.test_3_4; + +CREATE TABLE gaps.test_4(val INT8 NOT NULL); +SELECT create_range_partitions('gaps.test_4', 'val', 1, 10, 11); +DROP TABLE gaps.test_4_4; +DROP TABLE gaps.test_4_5; + + + +/* Check existing partitions */ +SELECT * FROM pathman_partition_list ORDER BY parent, partition; + + + +/* Pivot values */ +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_1 WHERE val = 11; +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_1 WHERE val = 16; +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_1 WHERE val = 21; + +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_1 WHERE val < 11; +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_1 WHERE val <= 11; +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_1 WHERE val < 16; +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_1 WHERE val <= 16; +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_1 WHERE val < 21; +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_1 WHERE val <= 21; + +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_1 WHERE val > 11; +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_1 WHERE val >= 11; +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_1 WHERE val > 16; +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_1 WHERE val >= 16; +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_1 WHERE val > 21; +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_1 WHERE val >= 21; + + +/* Pivot values */ +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_2 WHERE val = 21; +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_2 WHERE val = 26; +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_2 WHERE val = 31; + +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_2 WHERE val < 21; +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_2 WHERE val <= 21; +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_2 WHERE val < 26; +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_2 WHERE val <= 26; +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_2 WHERE val < 31; +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_2 WHERE val <= 31; +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_2 WHERE val < 41; +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_2 WHERE val <= 41; + +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_2 WHERE val > 11; +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_2 WHERE val >= 11; +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_2 WHERE val > 21; +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_2 WHERE val >= 21; +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_2 WHERE val > 26; +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_2 WHERE val >= 26; +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_2 WHERE val > 31; +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_2 WHERE val >= 31; + + +/* Pivot values */ +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_3 WHERE val = 31; +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_3 WHERE val = 36; +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_3 WHERE val = 41; + +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_3 WHERE val < 31; +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_3 WHERE val <= 31; +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_3 WHERE val < 36; +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_3 WHERE val <= 36; +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_3 WHERE val < 41; +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_3 WHERE val <= 41; +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_3 WHERE val < 51; +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_3 WHERE val <= 51; + +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_3 WHERE val > 21; +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_3 WHERE val >= 21; +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_3 WHERE val > 31; +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_3 WHERE val >= 31; +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_3 WHERE val > 36; +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_3 WHERE val >= 36; +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_3 WHERE val > 41; +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_3 WHERE val >= 41; + + +/* Pivot values */ +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_4 WHERE val = 31; +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_4 WHERE val = 36; +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_4 WHERE val = 41; +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_4 WHERE val = 46; +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_4 WHERE val = 51; + +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_4 WHERE val < 31; +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_4 WHERE val <= 31; +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_4 WHERE val < 36; +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_4 WHERE val <= 36; +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_4 WHERE val < 41; +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_4 WHERE val <= 41; +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_4 WHERE val < 46; +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_4 WHERE val <= 46; +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_4 WHERE val < 51; +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_4 WHERE val <= 51; +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_4 WHERE val < 61; +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_4 WHERE val <= 61; + +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_4 WHERE val > 21; +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_4 WHERE val >= 21; +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_4 WHERE val > 31; +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_4 WHERE val >= 31; +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_4 WHERE val > 36; +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_4 WHERE val >= 36; +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_4 WHERE val > 41; +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_4 WHERE val >= 41; +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_4 WHERE val > 46; +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_4 WHERE val >= 46; +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_4 WHERE val > 51; +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_4 WHERE val >= 51; + + + +DROP TABLE gaps.test_1 CASCADE; +DROP TABLE gaps.test_2 CASCADE; +DROP TABLE gaps.test_3 CASCADE; +DROP TABLE gaps.test_4 CASCADE; +DROP SCHEMA gaps; +DROP EXTENSION pg_pathman; diff --git a/sql/pathman_hashjoin.sql b/sql/pathman_hashjoin.sql new file mode 100644 index 00000000..620dee5f --- /dev/null +++ b/sql/pathman_hashjoin.sql @@ -0,0 +1,56 @@ +/* + * pathman_hashjoin_1.out and pathman_hashjoin_2.out seem to deal with pgpro's + * different behaviour. 8edd0e794 (>= 12) Append nodes with single subplan + * are eliminated, hence pathman_hashjoin_3.out + * + * Since 55a1954da16 and 6ef77cf46e8 (>= 13) output of EXPLAIN was changed, + * now it includes aliases for inherited tables. + */ + +\set VERBOSITY terse + +SET search_path = 'public'; +CREATE SCHEMA pathman; +CREATE EXTENSION pg_pathman SCHEMA pathman; +CREATE SCHEMA test; + +CREATE TABLE test.range_rel ( + id SERIAL PRIMARY KEY, + dt TIMESTAMP NOT NULL, + txt TEXT); +CREATE INDEX ON test.range_rel (dt); +INSERT INTO test.range_rel (dt, txt) + SELECT g, md5(g::TEXT) FROM generate_series('2015-01-01', '2015-04-30', '1 day'::interval) as g; +SELECT pathman.create_range_partitions('test.range_rel', 'DT', '2015-01-01'::DATE, '1 month'::INTERVAL); + +CREATE TABLE test.num_range_rel ( + id SERIAL PRIMARY KEY, + txt TEXT); +SELECT pathman.create_range_partitions('test.num_range_rel', 'id', 0, 1000, 4); +INSERT INTO test.num_range_rel + SELECT g, md5(g::TEXT) FROM generate_series(1, 3000) as g; + +SET pg_pathman.enable_runtimeappend = OFF; +SET pg_pathman.enable_runtimemergeappend = OFF; +VACUUM; + +/* + * Hash join + */ +SET enable_indexscan = ON; +SET enable_seqscan = OFF; +SET enable_nestloop = OFF; +SET enable_hashjoin = ON; +SET enable_mergejoin = OFF; + +EXPLAIN (COSTS OFF) +SELECT * FROM test.range_rel j1 +JOIN test.range_rel j2 on j2.id = j1.id +JOIN test.num_range_rel j3 on j3.id = j1.id +WHERE j1.dt < '2015-03-01' AND j2.dt >= '2015-02-01' ORDER BY j2.dt; + +DROP TABLE test.num_range_rel CASCADE; +DROP TABLE test.range_rel CASCADE; +DROP SCHEMA test; +DROP EXTENSION pg_pathman CASCADE; +DROP SCHEMA pathman; diff --git a/sql/pathman_inserts.sql b/sql/pathman_inserts.sql new file mode 100644 index 00000000..aa5b6c1c --- /dev/null +++ b/sql/pathman_inserts.sql @@ -0,0 +1,231 @@ +/* + * Since 55a1954da16 and 6ef77cf46e8 (>= 13) output of EXPLAIN was changed, + * now it includes aliases for inherited tables. + */ + +\set VERBOSITY terse + +SET search_path = 'public'; +CREATE EXTENSION pg_pathman; +CREATE SCHEMA test_inserts; + + +/* create a partitioned table */ +CREATE TABLE test_inserts.storage(a INT4, b INT4 NOT NULL, c NUMERIC, d TEXT); +INSERT INTO test_inserts.storage SELECT i * 2, i, i, i::text FROM generate_series(1, 100) i; +CREATE UNIQUE INDEX ON test_inserts.storage(a); +SELECT create_range_partitions('test_inserts.storage', 'b', 1, 10); + +/* attach before and after insertion triggers to partitioned table */ +CREATE OR REPLACE FUNCTION test_inserts.print_cols_before_change() RETURNS TRIGGER AS $$ +BEGIN + RAISE NOTICE 'BEFORE INSERTION TRIGGER ON TABLE % HAS EXPIRED. INSERTED ROW: %', tg_table_name, new; + RETURN new; +END; +$$ LANGUAGE plpgsql; + +CREATE OR REPLACE FUNCTION test_inserts.print_cols_after_change() RETURNS TRIGGER AS $$ +BEGIN + RAISE NOTICE 'AFTER INSERTION TRIGGER ON TABLE % HAS EXPIRED. INSERTED ROW: %', tg_table_name, new; + RETURN new; +END; +$$ LANGUAGE plpgsql; + +/* set triggers on existing first partition and new generated partitions */ +CREATE TRIGGER print_new_row_before_insert BEFORE INSERT ON test_inserts.storage_1 + FOR EACH ROW EXECUTE PROCEDURE test_inserts.print_cols_before_change(); + +CREATE TRIGGER print_new_row_after_insert AFTER INSERT ON test_inserts.storage_1 + FOR EACH ROW EXECUTE PROCEDURE test_inserts.print_cols_after_change(); + +/* set partition init callback that will add triggers to partitions */ +CREATE OR REPLACE FUNCTION test_inserts.set_triggers(args jsonb) RETURNS VOID AS $$ +BEGIN + EXECUTE format('create trigger print_new_row_before_insert before insert on %s.%s + for each row execute procedure test_inserts.print_cols_before_change();', + args->>'partition_schema', args->>'partition'); + EXECUTE format('create trigger print_new_row_after_insert after insert on %s.%s + for each row execute procedure test_inserts.print_cols_after_change();', + args->>'partition_schema', args->>'partition'); +END; +$$ LANGUAGE plpgsql; +SELECT set_init_callback('test_inserts.storage', 'test_inserts.set_triggers(jsonb)'); + + +/* we don't support ON CONLICT */ +INSERT INTO test_inserts.storage VALUES(0, 0, 0, 'UNSUPPORTED_1') +ON CONFLICT (a) DO UPDATE SET a = 3; +INSERT INTO test_inserts.storage VALUES(0, 0, 0, 'UNSUPPORTED_2') +ON CONFLICT (a) DO NOTHING; + + +/* implicitly prepend a partition (no columns have been dropped yet) */ +INSERT INTO test_inserts.storage VALUES(0, 0, 0, 'PREPEND.') RETURNING *; +SELECT * FROM test_inserts.storage_11; + +INSERT INTO test_inserts.storage VALUES(1, 0, 0, 'PREPEND..') RETURNING tableoid::regclass; +SELECT * FROM test_inserts.storage_11; + +INSERT INTO test_inserts.storage VALUES(3, 0, 0, 'PREPEND...') RETURNING a + b / 3; +SELECT * FROM test_inserts.storage_11; + +/* cause an unique index conflict (a = 0) */ +INSERT INTO test_inserts.storage VALUES(0, 0, 0, 'CONFLICT') RETURNING *; + + +/* drop first column */ +ALTER TABLE test_inserts.storage DROP COLUMN a CASCADE; + + +/* will have 3 columns (b, c, d) */ +SELECT append_range_partition('test_inserts.storage'); +INSERT INTO test_inserts.storage (b, c, d) VALUES (101, 17, '3 cols!'); +SELECT * FROM test_inserts.storage_12; /* direct access */ +SELECT * FROM test_inserts.storage WHERE b > 100; /* via parent */ + +/* spawn a new partition (b, c, d) */ +INSERT INTO test_inserts.storage (b, c, d) VALUES (111, 17, '3 cols as well!'); +SELECT * FROM test_inserts.storage_13; /* direct access */ +SELECT * FROM test_inserts.storage WHERE b > 110; /* via parent */ + + +/* column 'a' has been dropped */ +INSERT INTO test_inserts.storage VALUES(111, 0, 'DROP_COL_1.') RETURNING *, 17; +INSERT INTO test_inserts.storage VALUES(111, 0, 'DROP_COL_1..') RETURNING tableoid::regclass; +INSERT INTO test_inserts.storage VALUES(111, 0, 'DROP_COL_1...') RETURNING b * 2, b; + + +/* drop third column */ +ALTER TABLE test_inserts.storage DROP COLUMN c CASCADE; + + +/* will have 2 columns (b, d) */ +SELECT append_range_partition('test_inserts.storage'); +INSERT INTO test_inserts.storage (b, d) VALUES (121, '2 cols!'); +SELECT * FROM test_inserts.storage_14; /* direct access */ +SELECT * FROM test_inserts.storage WHERE b > 120; /* via parent */ + + +/* column 'c' has been dropped */ +INSERT INTO test_inserts.storage VALUES(121, 'DROP_COL_2.') RETURNING *; +INSERT INTO test_inserts.storage VALUES(121, 'DROP_COL_2..') RETURNING tableoid::regclass; +INSERT INTO test_inserts.storage VALUES(121, 'DROP_COL_2...') RETURNING d || '0_0', b * 3; + + + +INSERT INTO test_inserts.storage VALUES(121, 'query_1') +RETURNING (SELECT 1); + +INSERT INTO test_inserts.storage VALUES(121, 'query_2') +RETURNING (SELECT generate_series(1, 10) LIMIT 1); + +INSERT INTO test_inserts.storage VALUES(121, 'query_3') +RETURNING (SELECT get_partition_key('test_inserts.storage')); + +INSERT INTO test_inserts.storage VALUES(121, 'query_4') +RETURNING 1, 2, 3, 4; + + + +/* show number of columns in each partition */ +SELECT partition, range_min, range_max, count(partition) +FROM pathman_partition_list JOIN pg_attribute ON partition = attrelid +WHERE attnum > 0 +GROUP BY partition, range_min, range_max +ORDER BY range_min::INT4; + + +/* check the data */ +SELECT *, tableoid::regclass FROM test_inserts.storage ORDER BY b, d; + +/* drop data */ +TRUNCATE test_inserts.storage; + + +/* one more time! */ +INSERT INTO test_inserts.storage (b, d) SELECT i, i FROM generate_series(-2, 120) i; +SELECT *, tableoid::regclass FROM test_inserts.storage ORDER BY b, d; + +/* drop data */ +TRUNCATE test_inserts.storage; + + +/* add new column */ +ALTER TABLE test_inserts.storage ADD COLUMN e INT8 NOT NULL; + + +/* one more time! x2 */ +INSERT INTO test_inserts.storage (b, d, e) SELECT i, i, i FROM generate_series(-2, 120) i; +SELECT *, tableoid::regclass FROM test_inserts.storage ORDER BY b, d; + +/* drop data */ +TRUNCATE test_inserts.storage; + + +/* now test RETURNING list using our new column 'e' */ +INSERT INTO test_inserts.storage (b, d, e) SELECT i, i, i +FROM generate_series(-2, 130, 5) i +RETURNING e * 2, b, tableoid::regclass; + + +/* test EXPLAIN (VERBOSE) - for PartitionFilter's targetlists */ +EXPLAIN (VERBOSE, COSTS OFF) +INSERT INTO test_inserts.storage (b, d, e) SELECT i, i, i +FROM generate_series(1, 10) i +RETURNING e * 2, b, tableoid::regclass; + +EXPLAIN (VERBOSE, COSTS OFF) +INSERT INTO test_inserts.storage (d, e) SELECT i, i +FROM generate_series(1, 10) i; + +EXPLAIN (VERBOSE, COSTS OFF) +INSERT INTO test_inserts.storage (b) SELECT i +FROM generate_series(1, 10) i; + +EXPLAIN (VERBOSE, COSTS OFF) +INSERT INTO test_inserts.storage (b, d, e) SELECT b, d, e +FROM test_inserts.storage; + +EXPLAIN (VERBOSE, COSTS OFF) +INSERT INTO test_inserts.storage (b, d) SELECT b, d +FROM test_inserts.storage; + +EXPLAIN (VERBOSE, COSTS OFF) +INSERT INTO test_inserts.storage (b) SELECT b +FROM test_inserts.storage; + + +/* test gap case (missing partition in between) */ +CREATE TABLE test_inserts.test_gap(val INT NOT NULL); +INSERT INTO test_inserts.test_gap SELECT generate_series(1, 30); +SELECT create_range_partitions('test_inserts.test_gap', 'val', 1, 10); +DROP TABLE test_inserts.test_gap_2; /* make a gap */ +INSERT INTO test_inserts.test_gap VALUES(15); /* not ok */ +DROP TABLE test_inserts.test_gap CASCADE; + + +/* test a few "special" ONLY queries used in pg_repack */ +CREATE TABLE test_inserts.test_special_only(val INT NOT NULL); +INSERT INTO test_inserts.test_special_only SELECT generate_series(1, 30); +SELECT create_hash_partitions('test_inserts.test_special_only', 'val', 4); + +/* create table as select only */ +CREATE TABLE test_inserts.special_1 AS SELECT * FROM ONLY test_inserts.test_special_only; +SELECT count(*) FROM test_inserts.special_1; +DROP TABLE test_inserts.special_1; + +/* insert into ... select only */ +CREATE TABLE test_inserts.special_2 AS SELECT * FROM ONLY test_inserts.test_special_only WITH NO DATA; +INSERT INTO test_inserts.special_2 SELECT * FROM ONLY test_inserts.test_special_only; +SELECT count(*) FROM test_inserts.special_2; +DROP TABLE test_inserts.special_2; + +DROP TABLE test_inserts.test_special_only CASCADE; + + +DROP TABLE test_inserts.storage CASCADE; +DROP FUNCTION test_inserts.set_triggers(jsonb); +DROP FUNCTION test_inserts.print_cols_before_change(); +DROP FUNCTION test_inserts.print_cols_after_change(); +DROP SCHEMA test_inserts; +DROP EXTENSION pg_pathman CASCADE; diff --git a/sql/pathman_interval.sql b/sql/pathman_interval.sql new file mode 100644 index 00000000..3a457e7a --- /dev/null +++ b/sql/pathman_interval.sql @@ -0,0 +1,172 @@ +\set VERBOSITY terse + +SET search_path = 'public'; +CREATE EXTENSION pg_pathman; +CREATE SCHEMA test_interval; + + + +/* Range partitions for INT2 type */ +CREATE TABLE test_interval.abc (id INT2 NOT NULL); +SELECT create_range_partitions('test_interval.abc', 'id', 0, 100, 2); +SELECT set_interval('test_interval.abc', NULL::INT2); + +/* pg_pathman shouldn't be able to create a new partition */ +INSERT INTO test_interval.abc VALUES (250); + +/* Set a trivial interval */ +SELECT set_interval('test_interval.abc', 0); + +/* Set a negative interval */ +SELECT set_interval('test_interval.abc', -100); + +/* We also shouldn't be able to set a trivial interval directly */ +UPDATE pathman_config SET range_interval = '0' +WHERE partrel = 'test_interval.abc'::REGCLASS; + +/* Set a normal interval */ +SELECT set_interval('test_interval.abc', 1000); +INSERT INTO test_interval.abc VALUES (250); +SELECT partrel, range_interval FROM pathman_config; + +DROP TABLE test_interval.abc CASCADE; + + +/* Range partitions for INT4 type */ +CREATE TABLE test_interval.abc (id INT4 NOT NULL); +SELECT create_range_partitions('test_interval.abc', 'id', 0, 100, 2); +SELECT set_interval('test_interval.abc', NULL::INT4); + +/* pg_pathman shouldn't be able to create a new partition */ +INSERT INTO test_interval.abc VALUES (250); + +/* Set a trivial interval */ +SELECT set_interval('test_interval.abc', 0); + +/* Set a negative interval */ +SELECT set_interval('test_interval.abc', -100); + +/* We also shouldn't be able to set a trivial interval directly */ +UPDATE pathman_config SET range_interval = '0' +WHERE partrel = 'test_interval.abc'::REGCLASS; + +/* Set a normal interval */ +SELECT set_interval('test_interval.abc', 1000); +INSERT INTO test_interval.abc VALUES (250); +SELECT partrel, range_interval FROM pathman_config; + +DROP TABLE test_interval.abc CASCADE; + + +/* Range partitions for INT8 type */ +CREATE TABLE test_interval.abc (id INT8 NOT NULL); +SELECT create_range_partitions('test_interval.abc', 'id', 0, 100, 2); +SELECT set_interval('test_interval.abc', NULL::INT8); + +/* pg_pathman shouldn't be able to create a new partition */ +INSERT INTO test_interval.abc VALUES (250); + +/* Set a trivial interval */ +SELECT set_interval('test_interval.abc', 0); + +/* Set a negative interval */ +SELECT set_interval('test_interval.abc', -100); + +/* We also shouldn't be able to set a trivial interval directly */ +UPDATE pathman_config SET range_interval = '0' +WHERE partrel = 'test_interval.abc'::REGCLASS; + +/* Set a normal interval */ +SELECT set_interval('test_interval.abc', 1000); +INSERT INTO test_interval.abc VALUES (250); +SELECT partrel, range_interval FROM pathman_config; + +DROP TABLE test_interval.abc CASCADE; + + +/* Range partitions for DATE type */ +CREATE TABLE test_interval.abc (dt DATE NOT NULL); +SELECT create_range_partitions('test_interval.abc', 'dt', + '2016-01-01'::DATE, '1 day'::INTERVAL, 2); +SELECT set_interval('test_interval.abc', NULL::INTERVAL); + +/* Set a trivial interval */ +SELECT set_interval('test_interval.abc', '1 second'::INTERVAL); + +/* Set a normal interval */ +SELECT set_interval('test_interval.abc', '1 month'::INTERVAL); + +SELECT partrel, range_interval FROM pathman_config; + +DROP TABLE test_interval.abc CASCADE; + + +/* Range partitions for FLOAT4 type */ +CREATE TABLE test_interval.abc (x FLOAT4 NOT NULL); +SELECT create_range_partitions('test_interval.abc', 'x', 0, 100, 2); +SELECT set_interval('test_interval.abc', NULL::FLOAT4); + +/* Set a trivial interval */ +SELECT set_interval('test_interval.abc', 0); + +/* Set NaN float as interval */ +SELECT set_interval('test_interval.abc', 'NaN'::FLOAT4); + +/* Set INF float as interval */ +SELECT set_interval('test_interval.abc', 'Infinity'::FLOAT4); + +/* Set a normal interval */ +SELECT set_interval('test_interval.abc', 100); + +DROP TABLE test_interval.abc CASCADE; + + +/* Range partitions for FLOAT8 type */ +CREATE TABLE test_interval.abc (x FLOAT8 NOT NULL); +SELECT create_range_partitions('test_interval.abc', 'x', 0, 100, 2); +SELECT set_interval('test_interval.abc', NULL::FLOAT8); + +/* Set a trivial interval */ +SELECT set_interval('test_interval.abc', 0); + +/* Set NaN float as interval */ +SELECT set_interval('test_interval.abc', 'NaN'::FLOAT8); + +/* Set INF float as interval */ +SELECT set_interval('test_interval.abc', 'Infinity'::FLOAT8); + +/* Set a normal interval */ +SELECT set_interval('test_interval.abc', 100); + +DROP TABLE test_interval.abc CASCADE; + + +/* Range partitions for NUMERIC type */ +CREATE TABLE test_interval.abc (x NUMERIC NOT NULL); +SELECT create_range_partitions('test_interval.abc', 'x', 0, 100, 2); +SELECT set_interval('test_interval.abc', NULL::NUMERIC); + +/* Set a trivial interval */ +SELECT set_interval('test_interval.abc', 0); + +/* Set NaN numeric as interval */ +SELECT set_interval('test_interval.abc', 'NaN'::NUMERIC); + +/* Set a normal interval */ +SELECT set_interval('test_interval.abc', 100); + +DROP TABLE test_interval.abc CASCADE; + + +/* Hash partitioned table shouldn't accept any interval value */ +CREATE TABLE test_interval.abc (id SERIAL); +SELECT create_hash_partitions('test_interval.abc', 'id', 3); +SELECT set_interval('test_interval.abc', 100); +SELECT set_interval('test_interval.abc', NULL::INTEGER); + +DROP TABLE test_interval.abc CASCADE; + + + +DROP SCHEMA test_interval; +DROP EXTENSION pg_pathman; diff --git a/sql/pathman_join_clause.sql b/sql/pathman_join_clause.sql new file mode 100644 index 00000000..aa30b0b8 --- /dev/null +++ b/sql/pathman_join_clause.sql @@ -0,0 +1,116 @@ +/* + * Since 8edd0e794 (>= 12) Append nodes with single subplan are eliminated, + * causing different output; pathman_gaps_1.out is the updated version. + */ +\set VERBOSITY terse +SET search_path = 'public'; +CREATE SCHEMA pathman; +CREATE EXTENSION pg_pathman SCHEMA pathman; +CREATE SCHEMA test; + + + +/* + * Test push down a join clause into child nodes of append + */ + +/* create test tables */ +CREATE TABLE test.fk ( + id1 INT NOT NULL, + id2 INT NOT NULL, + start_key INT, + end_key INT, + PRIMARY KEY (id1, id2)); + +CREATE TABLE test.mytbl ( + id1 INT NOT NULL, + id2 INT NOT NULL, + key INT NOT NULL, + CONSTRAINT fk_fk FOREIGN KEY (id1, id2) REFERENCES test.fk(id1, id2), + PRIMARY KEY (id1, key)); + +SELECT pathman.create_hash_partitions('test.mytbl', 'id1', 8); + +/* ...fill out with test data */ +INSERT INTO test.fk VALUES (1, 1); +INSERT INTO test.mytbl VALUES (1, 1, 5), (1, 1, 6); + +/* gather statistics on test tables to have deterministic plans */ +ANALYZE; + + +/* run test queries */ +EXPLAIN (COSTS OFF) /* test plan */ +SELECT m.tableoid::regclass, id1, id2, key, start_key, end_key +FROM test.mytbl m JOIN test.fk USING(id1, id2) +WHERE NOT key <@ int4range(6, end_key); + +/* test joint data */ +SELECT m.tableoid::regclass, id1, id2, key, start_key, end_key +FROM test.mytbl m JOIN test.fk USING(id1, id2) +WHERE NOT key <@ int4range(6, end_key); + + + +/* + * Test case by @dimarick + */ + +CREATE TABLE test.parent ( + id SERIAL NOT NULL, + owner_id INTEGER NOT NULL +); + +CREATE TABLE test.child ( + parent_id INTEGER NOT NULL, + owner_id INTEGER NOT NULL +); + +CREATE TABLE test.child_nopart ( + parent_id INTEGER NOT NULL, + owner_id INTEGER NOT NULL +); + +INSERT INTO test.parent (owner_id) VALUES (1), (2), (3), (3); +INSERT INTO test.child (parent_id, owner_id) VALUES (1, 1), (2, 2), (3, 3), (5, 3); +INSERT INTO test.child_nopart (parent_id, owner_id) VALUES (1, 1), (2, 2), (3, 3), (5, 3); + +SELECT pathman.create_hash_partitions('test.child', 'owner_id', 2); + +/* gather statistics on test tables to have deterministic plans */ +ANALYZE; + + +/* Query #1 */ +EXPLAIN (COSTS OFF) SELECT * FROM test.parent +LEFT JOIN test.child ON test.child.parent_id = test.parent.id AND + test.child.owner_id = test.parent.owner_id +WHERE test.parent.owner_id = 3 and test.parent.id IN (3, 4); + +SELECT * FROM test.parent +LEFT JOIN test.child ON test.child.parent_id = test.parent.id AND + test.child.owner_id = test.parent.owner_id +WHERE test.parent.owner_id = 3 and test.parent.id IN (3, 4); + + +/* Query #2 */ +EXPLAIN (COSTS OFF) SELECT * FROM test.parent +LEFT JOIN test.child ON test.child.parent_id = test.parent.id AND + test.child.owner_id = 3 +WHERE test.parent.owner_id = 3 and test.parent.id IN (3, 4); + +SELECT * FROM test.parent +LEFT JOIN test.child ON test.child.parent_id = test.parent.id AND + test.child.owner_id = 3 +WHERE test.parent.owner_id = 3 and test.parent.id IN (3, 4); + + + +DROP TABLE test.child CASCADE; +DROP TABLE test.child_nopart CASCADE; +DROP TABLE test.mytbl CASCADE; +DROP TABLE test.fk CASCADE; +DROP TABLE test.parent CASCADE; +DROP SCHEMA test; +DROP EXTENSION pg_pathman CASCADE; +DROP SCHEMA pathman; diff --git a/sql/pathman_lateral.sql b/sql/pathman_lateral.sql new file mode 100644 index 00000000..d5def38c --- /dev/null +++ b/sql/pathman_lateral.sql @@ -0,0 +1,50 @@ +/* + * Sometimes join selectivity improvements patches in pgpro force nested loop + * members swap -- in pathman_lateral_1.out and pathman_lateral_3.out + * + * Since 55a1954da16 and 6ef77cf46e8 (>= 13) output of EXPLAIN was changed, + * now it includes aliases for inherited tables. + */ + + +\set VERBOSITY terse + +SET search_path = 'public'; +CREATE EXTENSION pg_pathman; +CREATE SCHEMA test_lateral; + + +/* create table partitioned by HASH */ +create table test_lateral.data(id int8 not null); +select create_hash_partitions('test_lateral.data', 'id', 10); +insert into test_lateral.data select generate_series(1, 10000); + + +VACUUM ANALYZE; + + +set enable_hashjoin = off; +set enable_mergejoin = off; + + +/* all credits go to Ivan Frolkov */ +explain (costs off) +select * from + test_lateral.data as t1, + lateral(select * from test_lateral.data as t2 where t2.id > t1.id) t2, + lateral(select * from test_lateral.data as t3 where t3.id = t2.id + t1.id) t3 + where t1.id between 1 and 100 and + t2.id between 2 and 299 and + t1.id > t2.id and + exists(select * from test_lateral.data t + where t1.id = t2.id and t.id = t3.id); + + +set enable_hashjoin = on; +set enable_mergejoin = on; + + + +DROP TABLE test_lateral.data CASCADE; +DROP SCHEMA test_lateral; +DROP EXTENSION pg_pathman; diff --git a/sql/pathman_mergejoin.sql b/sql/pathman_mergejoin.sql new file mode 100644 index 00000000..d1084375 --- /dev/null +++ b/sql/pathman_mergejoin.sql @@ -0,0 +1,68 @@ +/* + * pathman_mergejoin_1.out and pathman_mergejoin_2.out seem to deal with pgpro's + * different behaviour. 8edd0e794 (>= 12) Append nodes with single subplan + * are eliminated, hence pathman_mergejoin_3.out + * + * Since 55a1954da16 and 6ef77cf46e8 (>= 13) output of EXPLAIN was changed, + * now it includes aliases for inherited tables. + * + * --------------------------------------------- + * NOTE: This test behaves differenly on PgPro + * --------------------------------------------- + */ + +\set VERBOSITY terse + +SET search_path = 'public'; +CREATE SCHEMA pathman; +CREATE EXTENSION pg_pathman SCHEMA pathman; +CREATE SCHEMA test; + + +CREATE TABLE test.range_rel ( + id SERIAL PRIMARY KEY, + dt TIMESTAMP NOT NULL, + txt TEXT); +CREATE INDEX ON test.range_rel (dt); + +INSERT INTO test.range_rel (dt, txt) +SELECT g, md5(g::TEXT) FROM generate_series('2015-01-01', '2015-04-30', '1 day'::interval) as g; + +SELECT pathman.create_range_partitions('test.range_rel', 'DT', '2015-01-01'::DATE, '1 month'::INTERVAL); + +CREATE TABLE test.num_range_rel ( + id SERIAL PRIMARY KEY, + txt TEXT); + +INSERT INTO test.num_range_rel SELECT g, md5(g::TEXT) FROM generate_series(1, 3000) as g; + +SELECT pathman.create_range_partitions('test.num_range_rel', 'id', 0, 1000, 4); + +/* + * Merge join between 3 partitioned tables + * + * test case for the fix of sorting, merge append and index scan issues + * details in commit 54dd0486fc55b2d25cf7d095f83dee6ff4adee06 + */ +SET enable_hashjoin = OFF; +SET enable_nestloop = OFF; +SET enable_mergejoin = ON; + +SET enable_indexscan = ON; +SET enable_seqscan = OFF; + +EXPLAIN (COSTS OFF) +SELECT * FROM test.range_rel j1 +JOIN test.range_rel j2 on j2.id = j1.id +JOIN test.num_range_rel j3 on j3.id = j1.id +WHERE j1.dt < '2015-03-01' AND j2.dt >= '2015-02-01' ORDER BY j2.dt; + +SET enable_hashjoin = ON; +SET enable_nestloop = ON; +SET enable_seqscan = ON; + +DROP TABLE test.num_range_rel CASCADE; +DROP TABLE test.range_rel CASCADE; +DROP SCHEMA test; +DROP EXTENSION pg_pathman; +DROP SCHEMA pathman; diff --git a/sql/pathman_only.sql b/sql/pathman_only.sql new file mode 100644 index 00000000..68dc4ca1 --- /dev/null +++ b/sql/pathman_only.sql @@ -0,0 +1,97 @@ +/* + * --------------------------------------------- + * NOTE: This test behaves differenly on PgPro + * --------------------------------------------- + * + * -------------------- + * pathman_only_1.sql + * -------------------- + * Since 608b167f9f in PostgreSQL 12, CTEs which are scanned once are no longer + * an optimization fence, which changes practically all plans here. There is + * an option to forcibly make them MATERIALIZED, but we also need to run tests + * on older versions, so create pathman_only_1.out instead. + * + * -------------------- + * pathman_only_2.sql + * -------------------- + * Since 55a1954da16 and 6ef77cf46e8 in PostgreSQL 13, output of EXPLAIN was + * changed, now it includes aliases for inherited tables. + * + * -------------------- + * pathman_only_3.sql + * -------------------- + * Since a5fc46414de in PostgreSQL 16, the order of the operands was changed, + * which affected the output of the "Prune by" in EXPLAIN. + * + * -------------------- + * pathman_only_4.sql + * -------------------- + * Since fd0398fcb09 in PostgreSQL 17, output of EXPLAIN was + * changed, now it displays SubPlan nodes and output parameters. + */ + +\set VERBOSITY terse + +SET search_path = 'public'; +CREATE EXTENSION pg_pathman; +CREATE SCHEMA test_only; + + + +/* Test special case: ONLY statement with not-ONLY for partitioned table */ +CREATE TABLE test_only.from_only_test(val INT NOT NULL); +INSERT INTO test_only.from_only_test SELECT generate_series(1, 20); +SELECT create_range_partitions('test_only.from_only_test', 'val', 1, 2); + +VACUUM ANALYZE; + +/* should be OK */ +EXPLAIN (COSTS OFF) +SELECT * FROM ONLY test_only.from_only_test +UNION SELECT * FROM test_only.from_only_test; + +/* should be OK */ +EXPLAIN (COSTS OFF) +SELECT * FROM test_only.from_only_test +UNION SELECT * FROM ONLY test_only.from_only_test; + +/* should be OK */ +EXPLAIN (COSTS OFF) +SELECT * FROM test_only.from_only_test +UNION SELECT * FROM test_only.from_only_test +UNION SELECT * FROM ONLY test_only.from_only_test; + +/* should be OK */ +EXPLAIN (COSTS OFF) +SELECT * FROM ONLY test_only.from_only_test +UNION SELECT * FROM test_only.from_only_test +UNION SELECT * FROM test_only.from_only_test; + +/* not ok, ONLY|non-ONLY in one query (this is not the case for PgPro) */ +EXPLAIN (COSTS OFF) +SELECT * FROM test_only.from_only_test a +JOIN ONLY test_only.from_only_test b USING(val); + +/* should be OK */ +EXPLAIN (COSTS OFF) +WITH q1 AS (SELECT * FROM test_only.from_only_test), + q2 AS (SELECT * FROM ONLY test_only.from_only_test) +SELECT * FROM q1 JOIN q2 USING(val); + +/* should be OK */ +EXPLAIN (COSTS OFF) +WITH q1 AS (SELECT * FROM ONLY test_only.from_only_test) +SELECT * FROM test_only.from_only_test JOIN q1 USING(val); + +/* should be OK */ +EXPLAIN (COSTS OFF) +SELECT * FROM test_only.from_only_test +WHERE val = (SELECT val FROM ONLY test_only.from_only_test + ORDER BY val ASC + LIMIT 1); + + + +DROP TABLE test_only.from_only_test CASCADE; +DROP SCHEMA test_only; +DROP EXTENSION pg_pathman; diff --git a/sql/pathman_param_upd_del.sql b/sql/pathman_param_upd_del.sql new file mode 100644 index 00000000..0f3030e7 --- /dev/null +++ b/sql/pathman_param_upd_del.sql @@ -0,0 +1,50 @@ +\set VERBOSITY terse + +SET search_path = 'public'; +CREATE EXTENSION pg_pathman; +CREATE SCHEMA param_upd_del; + + +CREATE TABLE param_upd_del.test(key INT4 NOT NULL, val INT4); +SELECT create_hash_partitions('param_upd_del.test', 'key', 10); +INSERT INTO param_upd_del.test SELECT i, i FROM generate_series(1, 1000) i; + +ANALYZE; + + +PREPARE upd(INT4) AS UPDATE param_upd_del.test SET val = val + 1 WHERE key = $1; +EXPLAIN (COSTS OFF) EXECUTE upd(10); +EXPLAIN (COSTS OFF) EXECUTE upd(10); +EXPLAIN (COSTS OFF) EXECUTE upd(10); +EXPLAIN (COSTS OFF) EXECUTE upd(10); +EXPLAIN (COSTS OFF) EXECUTE upd(10); +EXPLAIN (COSTS OFF) EXECUTE upd(10); +EXPLAIN (COSTS OFF) EXECUTE upd(11); +DEALLOCATE upd; + + +PREPARE upd(INT4) AS UPDATE param_upd_del.test SET val = val + 1 WHERE key = ($1 + 3) * 2; +EXPLAIN (COSTS OFF) EXECUTE upd(5); +EXPLAIN (COSTS OFF) EXECUTE upd(5); +EXPLAIN (COSTS OFF) EXECUTE upd(5); +EXPLAIN (COSTS OFF) EXECUTE upd(5); +EXPLAIN (COSTS OFF) EXECUTE upd(5); +EXPLAIN (COSTS OFF) EXECUTE upd(5); +EXPLAIN (COSTS OFF) EXECUTE upd(6); +DEALLOCATE upd; + + +PREPARE del(INT4) AS DELETE FROM param_upd_del.test WHERE key = $1; +EXPLAIN (COSTS OFF) EXECUTE del(10); +EXPLAIN (COSTS OFF) EXECUTE del(10); +EXPLAIN (COSTS OFF) EXECUTE del(10); +EXPLAIN (COSTS OFF) EXECUTE del(10); +EXPLAIN (COSTS OFF) EXECUTE del(10); +EXPLAIN (COSTS OFF) EXECUTE del(10); +EXPLAIN (COSTS OFF) EXECUTE del(11); +DEALLOCATE del; + + +DROP TABLE param_upd_del.test CASCADE; +DROP SCHEMA param_upd_del; +DROP EXTENSION pg_pathman; diff --git a/sql/pathman_permissions.sql b/sql/pathman_permissions.sql new file mode 100644 index 00000000..3e2cf92a --- /dev/null +++ b/sql/pathman_permissions.sql @@ -0,0 +1,178 @@ +\set VERBOSITY terse + +SET search_path = 'public'; +CREATE EXTENSION pg_pathman; +CREATE SCHEMA permissions; + +CREATE ROLE pathman_user1 LOGIN; +CREATE ROLE pathman_user2 LOGIN; + +GRANT USAGE, CREATE ON SCHEMA permissions TO pathman_user1; +GRANT USAGE, CREATE ON SCHEMA permissions TO pathman_user2; + + +/* Switch to #1 */ +SET ROLE pathman_user1; +CREATE TABLE permissions.pathman_user1_table(id serial, a int); +INSERT INTO permissions.pathman_user1_table SELECT g, g FROM generate_series(1, 20) as g; + +/* Should fail (can't SELECT) */ +SET ROLE pathman_user2; +DO $$ +BEGIN + SELECT create_range_partitions('permissions.pathman_user1_table', 'id', 1, 10, 2); +EXCEPTION + WHEN insufficient_privilege THEN + RAISE NOTICE 'Insufficient priviliges'; +END$$; + +/* Grant SELECT to pathman_user2 */ +SET ROLE pathman_user1; +GRANT SELECT ON permissions.pathman_user1_table TO pathman_user2; + +/* Should fail (don't own parent) */ +SET ROLE pathman_user2; +DO $$ +BEGIN + SELECT create_range_partitions('permissions.pathman_user1_table', 'id', 1, 10, 2); +EXCEPTION + WHEN insufficient_privilege THEN + RAISE NOTICE 'Insufficient priviliges'; +END$$; + +/* Should be ok */ +SET ROLE pathman_user1; +SELECT create_range_partitions('permissions.pathman_user1_table', 'id', 1, 10, 2); + +/* Should be able to see */ +SET ROLE pathman_user2; +SELECT * FROM pathman_config; +SELECT * FROM pathman_config_params; + +/* Should fail */ +SET ROLE pathman_user2; +SELECT set_enable_parent('permissions.pathman_user1_table', true); +SELECT set_auto('permissions.pathman_user1_table', false); + +/* Should fail */ +SET ROLE pathman_user2; +DELETE FROM pathman_config +WHERE partrel = 'permissions.pathman_user1_table'::regclass; + +/* No rights to insert, should fail */ +SET ROLE pathman_user2; +DO $$ +BEGIN + INSERT INTO permissions.pathman_user1_table (id, a) VALUES (35, 0); +EXCEPTION + WHEN insufficient_privilege THEN + RAISE NOTICE 'Insufficient priviliges'; +END$$; + +/* No rights to create partitions (need INSERT privilege) */ +SET ROLE pathman_user2; +SELECT prepend_range_partition('permissions.pathman_user1_table'); + +/* Allow pathman_user2 to create partitions */ +SET ROLE pathman_user1; +GRANT INSERT ON permissions.pathman_user1_table TO pathman_user2; +GRANT UPDATE(a) ON permissions.pathman_user1_table TO pathman_user2; /* per-column ACL */ + +/* Should be able to prepend a partition */ +SET ROLE pathman_user2; +SELECT prepend_range_partition('permissions.pathman_user1_table'); +SELECT attname, attacl FROM pg_attribute +WHERE attrelid = (SELECT "partition" FROM pathman_partition_list + WHERE parent = 'permissions.pathman_user1_table'::REGCLASS + ORDER BY range_min::int ASC /* prepend */ + LIMIT 1) +ORDER BY attname; /* check ACL for each column */ + +/* Have rights, should be ok (parent's ACL is shared by new children) */ +SET ROLE pathman_user2; +INSERT INTO permissions.pathman_user1_table (id, a) VALUES (35, 0) RETURNING *; +SELECT relname, relacl FROM pg_class +WHERE oid = ANY (SELECT "partition" FROM pathman_partition_list + WHERE parent = 'permissions.pathman_user1_table'::REGCLASS + ORDER BY range_max::int DESC /* append */ + LIMIT 3) +ORDER BY relname; /* we also check ACL for "pathman_user1_table_2" */ + +/* Try to drop partition, should fail */ +DO $$ +BEGIN + SELECT drop_range_partition('permissions.pathman_user1_table_4'); +EXCEPTION + WHEN insufficient_privilege THEN + RAISE NOTICE 'Insufficient priviliges'; +END$$; + +/* Disable automatic partition creation */ +SET ROLE pathman_user1; +SELECT set_auto('permissions.pathman_user1_table', false); + +/* Partition creation, should fail */ +SET ROLE pathman_user2; +INSERT INTO permissions.pathman_user1_table (id, a) VALUES (55, 0) RETURNING *; + +/* Finally drop partitions */ +SET ROLE pathman_user1; +SELECT drop_partitions('permissions.pathman_user1_table'); + + +/* Switch to #2 */ +SET ROLE pathman_user2; +/* Test ddl event trigger */ +CREATE TABLE permissions.pathman_user2_table(id serial); +SELECT create_hash_partitions('permissions.pathman_user2_table', 'id', 3); +INSERT INTO permissions.pathman_user2_table SELECT generate_series(1, 30); +SELECT drop_partitions('permissions.pathman_user2_table'); + + +/* Switch to #1 */ +SET ROLE pathman_user1; +CREATE TABLE permissions.dropped_column(a int, val int not null, b int, c int); +INSERT INTO permissions.dropped_column SELECT i,i,i,i FROM generate_series(1, 30) i; + +GRANT SELECT(val), INSERT(val) ON permissions.dropped_column TO pathman_user2; + +SELECT create_range_partitions('permissions.dropped_column', 'val', 1, 10); + +SELECT attrelid::regclass, attname, attacl FROM pg_attribute +WHERE attrelid = ANY (SELECT "partition" FROM pathman_partition_list + WHERE parent = 'permissions.dropped_column'::REGCLASS) + AND attacl IS NOT NULL +ORDER BY attrelid::regclass::text; /* check ACL for each column */ + +ALTER TABLE permissions.dropped_column DROP COLUMN a; /* DROP "a" */ +SELECT append_range_partition('permissions.dropped_column'); + +SELECT attrelid::regclass, attname, attacl FROM pg_attribute +WHERE attrelid = ANY (SELECT "partition" FROM pathman_partition_list + WHERE parent = 'permissions.dropped_column'::REGCLASS) + AND attacl IS NOT NULL +ORDER BY attrelid::regclass::text; /* check ACL for each column (+1 partition) */ + +ALTER TABLE permissions.dropped_column DROP COLUMN b; /* DROP "b" */ +SELECT append_range_partition('permissions.dropped_column'); + +SELECT attrelid::regclass, attname, attacl FROM pg_attribute +WHERE attrelid = ANY (SELECT "partition" FROM pathman_partition_list + WHERE parent = 'permissions.dropped_column'::REGCLASS) + AND attacl IS NOT NULL +ORDER BY attrelid::regclass::text; /* check ACL for each column (+1 partition) */ + +DROP TABLE permissions.dropped_column CASCADE; + + +/* Finally reset user */ +RESET ROLE; + +DROP OWNED BY pathman_user1; +DROP OWNED BY pathman_user2; +DROP USER pathman_user1; +DROP USER pathman_user2; + + +DROP SCHEMA permissions; +DROP EXTENSION pg_pathman; diff --git a/sql/pathman_rebuild_deletes.sql b/sql/pathman_rebuild_deletes.sql new file mode 100644 index 00000000..1af6b61a --- /dev/null +++ b/sql/pathman_rebuild_deletes.sql @@ -0,0 +1,65 @@ +/* + * ------------------------------------------- + * NOTE: This test behaves differenly on < 11 because planner now turns + * Row(Const, Const) into just Const of record type, apparently since 3decd150 + * ------------------------------------------- + */ + +\set VERBOSITY terse + +SET search_path = 'public'; +CREATE EXTENSION pg_pathman; +CREATE SCHEMA test_deletes; + + +/* + * Test DELETEs on a partition with different TupleDescriptor. + */ + +/* create partitioned table */ +CREATE TABLE test_deletes.test(a FLOAT4, val INT4 NOT NULL, b FLOAT8); +INSERT INTO test_deletes.test SELECT i, i, i FROM generate_series(1, 100) AS i; +SELECT create_range_partitions('test_deletes.test', 'val', 1, 10); + +/* drop column 'a' */ +ALTER TABLE test_deletes.test DROP COLUMN a; + +/* append new partition */ +SELECT append_range_partition('test_deletes.test'); +INSERT INTO test_deletes.test_11 (val, b) VALUES (101, 10); + + +VACUUM ANALYZE; + + +/* tuple descs are the same */ +EXPLAIN (COSTS OFF) DELETE FROM test_deletes.test WHERE val = 1; +DELETE FROM test_deletes.test WHERE val = 1 RETURNING *, tableoid::REGCLASS; + + +/* tuple descs are different */ +EXPLAIN (COSTS OFF) DELETE FROM test_deletes.test WHERE val = 101; +DELETE FROM test_deletes.test WHERE val = 101 RETURNING *, tableoid::REGCLASS; + +CREATE TABLE test_deletes.test_dummy (val INT4); + +EXPLAIN (COSTS OFF) DELETE FROM test_deletes.test +WHERE val = 101 AND val = ANY (TABLE test_deletes.test_dummy) +RETURNING *, tableoid::REGCLASS; + +EXPLAIN (COSTS OFF) DELETE FROM test_deletes.test t1 +USING test_deletes.test_dummy t2 +WHERE t1.val = 101 AND t1.val = t2.val +RETURNING t1.*, t1.tableoid::REGCLASS; + +EXPLAIN (COSTS OFF) DELETE FROM test_deletes.test +WHERE val = 101 AND test >= (100, 8) +RETURNING *, tableoid::REGCLASS; + +DROP TABLE test_deletes.test_dummy; + + + +DROP TABLE test_deletes.test CASCADE; +DROP SCHEMA test_deletes; +DROP EXTENSION pg_pathman; diff --git a/sql/pathman_rebuild_updates.sql b/sql/pathman_rebuild_updates.sql new file mode 100644 index 00000000..fbbbcbba --- /dev/null +++ b/sql/pathman_rebuild_updates.sql @@ -0,0 +1,104 @@ +/* + * ------------------------------------------- + * NOTE: This test behaves differenly on < 11 because planner now turns + * Row(Const, Const) into just Const of record type, apparently since 3decd150 + * ------------------------------------------- + */ + +\set VERBOSITY terse + +SET search_path = 'public'; +CREATE EXTENSION pg_pathman; +CREATE SCHEMA test_updates; + + +/* + * Test UPDATEs on a partition with different TupleDescriptor. + */ + +/* create partitioned table */ +CREATE TABLE test_updates.test(a FLOAT4, val INT4 NOT NULL, b FLOAT8); +INSERT INTO test_updates.test SELECT i, i, i FROM generate_series(1, 100) AS i; +SELECT create_range_partitions('test_updates.test', 'val', 1, 10); + +/* drop column 'a' */ +ALTER TABLE test_updates.test DROP COLUMN a; + +/* append new partition */ +SELECT append_range_partition('test_updates.test'); +INSERT INTO test_updates.test_11 (val, b) VALUES (101, 10); + + +VACUUM ANALYZE; + + +/* tuple descs are the same */ +EXPLAIN (COSTS OFF) UPDATE test_updates.test SET b = 0 WHERE val = 1; +UPDATE test_updates.test SET b = 0 WHERE val = 1 RETURNING *, tableoid::REGCLASS; + + +/* tuple descs are different */ +EXPLAIN (COSTS OFF) UPDATE test_updates.test SET b = 0 WHERE val = 101; +UPDATE test_updates.test SET b = 0 WHERE val = 101 RETURNING *, tableoid::REGCLASS; + +CREATE TABLE test_updates.test_dummy (val INT4); + +EXPLAIN (COSTS OFF) UPDATE test_updates.test SET val = val + 1 +WHERE val = 101 AND val = ANY (TABLE test_updates.test_dummy) +RETURNING *, tableoid::REGCLASS; + +EXPLAIN (COSTS OFF) UPDATE test_updates.test t1 SET b = 0 +FROM test_updates.test_dummy t2 +WHERE t1.val = 101 AND t1.val = t2.val +RETURNING t1.*, t1.tableoid::REGCLASS; + +EXPLAIN (COSTS OFF) UPDATE test_updates.test SET b = 0 +WHERE val = 101 AND test >= (100, 8) +RETURNING *, tableoid::REGCLASS; + +/* execute this one */ +UPDATE test_updates.test SET b = 0 +WHERE val = 101 AND test >= (100, -1) +RETURNING test; + +DROP TABLE test_updates.test_dummy; + + +/* cross-partition updates (& different tuple descs) */ +TRUNCATE test_updates.test; +SET pg_pathman.enable_partitionrouter = ON; + +SELECT *, (select count(*) from pg_attribute where attrelid = partition) as columns +FROM pathman_partition_list +ORDER BY range_min::int, range_max::int; + +INSERT INTO test_updates.test VALUES (105, 105); +UPDATE test_updates.test SET val = 106 WHERE val = 105 RETURNING *, tableoid::REGCLASS; +UPDATE test_updates.test SET val = 115 WHERE val = 106 RETURNING *, tableoid::REGCLASS; +UPDATE test_updates.test SET val = 95 WHERE val = 115 RETURNING *, tableoid::REGCLASS; +UPDATE test_updates.test SET val = -1 WHERE val = 95 RETURNING *, tableoid::REGCLASS; + + +/* basic check for 'ALTER TABLE ... ADD COLUMN'; PGPRO-5113 */ +create table test_updates.test_5113(val int4 not null); +insert into test_updates.test_5113 values (1); +select create_range_partitions('test_updates.test_5113', 'val', 1, 10); +update test_updates.test_5113 set val = 11 where val = 1; +alter table test_updates.test_5113 add column x varchar; +/* no error here: */ +select * from test_updates.test_5113 where val = 11; +drop table test_updates.test_5113 cascade; + +create table test_updates.test_5113(val int4 not null); +insert into test_updates.test_5113 values (1); +select create_range_partitions('test_updates.test_5113', 'val', 1, 10); +update test_updates.test_5113 set val = 11 where val = 1; +alter table test_updates.test_5113 add column x int8; +/* no extra data in column 'x' here: */ +select * from test_updates.test_5113 where val = 11; +drop table test_updates.test_5113 cascade; + + +DROP TABLE test_updates.test CASCADE; +DROP SCHEMA test_updates; +DROP EXTENSION pg_pathman; diff --git a/sql/pathman_rowmarks.sql b/sql/pathman_rowmarks.sql new file mode 100644 index 00000000..8847b80c --- /dev/null +++ b/sql/pathman_rowmarks.sql @@ -0,0 +1,158 @@ +/* + * ------------------------------------------- + * NOTE: This test behaves differenly on PgPro + * ------------------------------------------- + * + * ------------------------ + * pathman_rowmarks_1.sql + * ------------------------ + * Since PostgreSQL 9.5, output of EXPLAIN was changed. + * + * ------------------------ + * pathman_rowmarks_2.sql + * ------------------------ + * Since 8edd0e794 in PostgreSQL 12, append nodes with single subplan are + * eliminated, causing different output. + * + * ------------------------ + * pathman_rowmarks_3.sql + * ------------------------ + * Since 55a1954da16 and 6ef77cf46e8 in PostgreSQL 13 output of EXPLAIN was + * changed, now it includes aliases for inherited tables. + * + * ------------------------ + * pathman_rowmarks_3.sql + * ------------------------ + * Since fd0398fcb09 in PostgreSQL 17, output of EXPLAIN was + * changed, now it displays SubPlan nodes and output parameters. + */ +SET search_path = 'public'; +CREATE EXTENSION pg_pathman; +CREATE SCHEMA rowmarks; + + + +CREATE TABLE rowmarks.first(id int NOT NULL); +CREATE TABLE rowmarks.second(id int NOT NULL); + +INSERT INTO rowmarks.first SELECT generate_series(1, 10); +INSERT INTO rowmarks.second SELECT generate_series(1, 10); + + +SELECT create_hash_partitions('rowmarks.first', 'id', 5); + + +VACUUM ANALYZE; + + +/* Not partitioned */ +SELECT * FROM rowmarks.second ORDER BY id FOR UPDATE; + +/* Simple case (plan) */ +EXPLAIN (COSTS OFF) +SELECT * FROM rowmarks.first ORDER BY id FOR UPDATE; + +/* Simple case (execution) */ +SELECT * FROM rowmarks.first ORDER BY id FOR UPDATE; +SELECT FROM rowmarks.first ORDER BY id FOR UPDATE; +SELECT tableoid > 0 FROM rowmarks.first ORDER BY id FOR UPDATE; + +/* A little harder (plan) */ +EXPLAIN (COSTS OFF) +SELECT * FROM rowmarks.first +WHERE id = (SELECT id FROM rowmarks.first + ORDER BY id + OFFSET 10 LIMIT 1 + FOR UPDATE) +FOR SHARE; + +/* A little harder (execution) */ +SELECT * FROM rowmarks.first +WHERE id = (SELECT id FROM rowmarks.first + ORDER BY id + OFFSET 5 LIMIT 1 + FOR UPDATE) +FOR SHARE; + +/* Two tables (plan) */ +EXPLAIN (COSTS OFF) +SELECT * FROM rowmarks.first +WHERE id = (SELECT id FROM rowmarks.second + ORDER BY id + OFFSET 5 LIMIT 1 + FOR UPDATE) +FOR SHARE; + +/* Two tables (execution) */ +SELECT * FROM rowmarks.first +WHERE id = (SELECT id FROM rowmarks.second + ORDER BY id + OFFSET 5 LIMIT 1 + FOR UPDATE) +FOR SHARE; + +/* JOIN (plan) */ +EXPLAIN (COSTS OFF) +SELECT * FROM rowmarks.first +JOIN rowmarks.second USING(id) +ORDER BY id +FOR UPDATE; + +/* JOIN (execution) */ +SELECT * FROM rowmarks.first +JOIN rowmarks.second USING(id) +ORDER BY id +FOR UPDATE; + +/* ONLY (plan) */ +EXPLAIN (COSTS OFF) +SELECT * FROM ONLY rowmarks.first FOR SHARE; + +/* ONLY (execution) */ +SELECT * FROM ONLY rowmarks.first FOR SHARE; + +/* Check updates (plan) */ +SET enable_hashjoin = f; /* Hash Semi Join on 10 vs Hash Join on 9.6 */ +SET enable_mergejoin = f; /* Merge Semi Join on 10 vs Merge Join on 9.6 */ +EXPLAIN (COSTS OFF) +UPDATE rowmarks.second SET id = 2 +WHERE rowmarks.second.id IN (SELECT id FROM rowmarks.first WHERE id = 1); +EXPLAIN (COSTS OFF) +UPDATE rowmarks.second SET id = 2 +WHERE rowmarks.second.id IN (SELECT id FROM rowmarks.first WHERE id < 1); +EXPLAIN (COSTS OFF) +UPDATE rowmarks.second SET id = 2 +WHERE rowmarks.second.id IN (SELECT id FROM rowmarks.first WHERE id = 1 OR id = 2); +EXPLAIN (COSTS OFF) +UPDATE rowmarks.second SET id = 2 +WHERE rowmarks.second.id IN (SELECT id FROM rowmarks.first WHERE id = 1) +RETURNING *, tableoid::regclass; +SET enable_hashjoin = t; +SET enable_mergejoin = t; + +/* Check updates (execution) */ +UPDATE rowmarks.second SET id = 1 +WHERE rowmarks.second.id IN (SELECT id FROM rowmarks.first WHERE id = 1 OR id = 2) +RETURNING *, tableoid::regclass; + +/* Check deletes (plan) */ +SET enable_hashjoin = f; /* Hash Semi Join on 10 vs Hash Join on 9.6 */ +SET enable_mergejoin = f; /* Merge Semi Join on 10 vs Merge Join on 9.6 */ +EXPLAIN (COSTS OFF) +DELETE FROM rowmarks.second +WHERE rowmarks.second.id IN (SELECT id FROM rowmarks.first WHERE id = 1); +EXPLAIN (COSTS OFF) +DELETE FROM rowmarks.second +WHERE rowmarks.second.id IN (SELECT id FROM rowmarks.first WHERE id < 1); +EXPLAIN (COSTS OFF) +DELETE FROM rowmarks.second +WHERE rowmarks.second.id IN (SELECT id FROM rowmarks.first WHERE id = 1 OR id = 2); +SET enable_hashjoin = t; +SET enable_mergejoin = t; + + + +DROP TABLE rowmarks.first CASCADE; +DROP TABLE rowmarks.second CASCADE; +DROP SCHEMA rowmarks; +DROP EXTENSION pg_pathman; diff --git a/sql/pathman_runtime_nodes.sql b/sql/pathman_runtime_nodes.sql new file mode 100644 index 00000000..bf917d88 --- /dev/null +++ b/sql/pathman_runtime_nodes.sql @@ -0,0 +1,372 @@ +\set VERBOSITY terse +SET search_path = 'public'; +CREATE SCHEMA pathman; +CREATE EXTENSION pg_pathman SCHEMA pathman; +CREATE SCHEMA test; + +/* + * Test RuntimeAppend + */ + +create or replace function test.pathman_assert(smt bool, error_msg text) returns text as $$ +begin + if not smt then + raise exception '%', error_msg; + end if; + + return 'ok'; +end; +$$ language plpgsql; + +create or replace function test.pathman_equal(a text, b text, error_msg text) returns text as $$ +begin + if a != b then + raise exception '''%'' is not equal to ''%'', %', a, b, error_msg; + end if; + + return 'equal'; +end; +$$ language plpgsql; + +create or replace function test.pathman_test(query text) returns jsonb as $$ +declare + plan jsonb; +begin + execute 'explain (analyze, format json)' || query into plan; + + return plan; +end; +$$ language plpgsql; + +create or replace function test.pathman_test_1() returns text as $$ +declare + plan jsonb; + num int; +begin + plan = test.pathman_test('select * from test.runtime_test_1 where id = (select * from test.run_values limit 1)'); + + perform test.pathman_equal((plan->0->'Plan'->'Node Type')::text, + '"Custom Scan"', + 'wrong plan type'); + + perform test.pathman_equal((plan->0->'Plan'->'Custom Plan Provider')::text, + '"RuntimeAppend"', + 'wrong plan provider'); + + perform test.pathman_equal((plan->0->'Plan'->'Plans'->1->'Relation Name')::text, + format('"runtime_test_1_%s"', pathman.get_hash_part_idx(hashint4(1), 6)), + 'wrong partition'); + + select count(*) from jsonb_array_elements_text(plan->0->'Plan'->'Plans') into num; + perform test.pathman_equal(num::text, '2', 'expected 2 child plans for custom scan'); + + return 'ok'; +end; +$$ language plpgsql +set enable_mergejoin = off +set enable_hashjoin = off; + +create or replace function test.pathman_test_2() returns text as $$ +declare + plan jsonb; + num int; + c text; +begin + plan = test.pathman_test('select * from test.runtime_test_1 where id = any (select * from test.run_values limit 4)'); + + perform test.pathman_equal((plan->0->'Plan'->'Node Type')::text, + '"Nested Loop"', + 'wrong plan type'); + + perform test.pathman_equal((plan->0->'Plan'->'Plans'->1->'Node Type')::text, + '"Custom Scan"', + 'wrong plan type'); + + perform test.pathman_equal((plan->0->'Plan'->'Plans'->1->'Custom Plan Provider')::text, + '"RuntimeAppend"', + 'wrong plan provider'); + + select count(*) from jsonb_array_elements_text(plan->0->'Plan'->'Plans'->1->'Plans') into num; + perform test.pathman_equal(num::text, '4', 'expected 4 child plans for custom scan'); + + execute 'select string_agg(y.z, '','') from + (select (x->''Relation Name'')::text as z from + jsonb_array_elements($1->0->''Plan''->''Plans''->1->''Plans'') x + order by x->''Relation Name'') y' + into c using plan; + perform test.pathman_equal(c, '"runtime_test_1_2","runtime_test_1_3","runtime_test_1_4","runtime_test_1_5"', + 'wrong partitions'); + + for i in 0..3 loop + num = plan->0->'Plan'->'Plans'->1->'Plans'->i->'Actual Loops'; + perform test.pathman_equal(num::text, '1', 'expected 1 loop'); + end loop; + + return 'ok'; +end; +$$ language plpgsql +set enable_mergejoin = off +set enable_hashjoin = off; + +create or replace function test.pathman_test_3() returns text as $$ +declare + plan jsonb; + num int; +begin + plan = test.pathman_test('select * from test.runtime_test_1 a join test.run_values b on a.id = b.val'); + + perform test.pathman_equal((plan->0->'Plan'->'Node Type')::text, + '"Nested Loop"', + 'wrong plan type'); + + perform test.pathman_equal((plan->0->'Plan'->'Plans'->1->'Node Type')::text, + '"Custom Scan"', + 'wrong plan type'); + + perform test.pathman_equal((plan->0->'Plan'->'Plans'->1->'Custom Plan Provider')::text, + '"RuntimeAppend"', + 'wrong plan provider'); + + select count(*) from jsonb_array_elements_text(plan->0->'Plan'->'Plans'->1->'Plans') into num; + perform test.pathman_equal(num::text, '6', 'expected 6 child plans for custom scan'); + + for i in 0..5 loop + num = plan->0->'Plan'->'Plans'->1->'Plans'->i->'Actual Loops'; + perform test.pathman_assert(num > 0 and num <= 1718, 'expected no more than 1718 loops'); + end loop; + + return 'ok'; +end; +$$ language plpgsql +set enable_mergejoin = off +set enable_hashjoin = off; + +create or replace function test.pathman_test_4() returns text as $$ +declare + plan jsonb; + num int; +begin + plan = test.pathman_test('select * from test.category c, lateral' || + '(select * from test.runtime_test_2 g where g.category_id = c.id order by rating limit 4) as tg'); + + perform test.pathman_equal((plan->0->'Plan'->'Node Type')::text, + '"Nested Loop"', + 'wrong plan type'); + + /* Limit -> Custom Scan */ + perform test.pathman_equal((plan->0->'Plan'->'Plans'->1->0->'Node Type')::text, + '"Custom Scan"', + 'wrong plan type'); + + perform test.pathman_equal((plan->0->'Plan'->'Plans'->1->0->'Custom Plan Provider')::text, + '"RuntimeMergeAppend"', + 'wrong plan provider'); + + select count(*) from jsonb_array_elements_text(plan->0->'Plan'->'Plans'->1->'Plans'->0->'Plans') into num; + perform test.pathman_equal(num::text, '4', 'expected 4 child plans for custom scan'); + + for i in 0..3 loop + perform test.pathman_equal((plan->0->'Plan'->'Plans'->1->'Plans'->0->'Plans'->i->'Relation Name')::text, + format('"runtime_test_2_%s"', pathman.get_hash_part_idx(hashint4(i + 1), 6)), + 'wrong partition'); + + num = plan->0->'Plan'->'Plans'->1->'Plans'->0->'Plans'->i->'Actual Loops'; + perform test.pathman_assert(num = 1, 'expected no more than 1 loops'); + end loop; + + return 'ok'; +end; +$$ language plpgsql +set enable_mergejoin = off +set enable_hashjoin = off; + +create or replace function test.pathman_test_5() returns text as $$ +declare + res record; +begin + select + from test.runtime_test_3 + where id = (select * from test.vals order by val limit 1) + limit 1 + into res; /* test empty tlist */ + + + select id * 2, id, 17 + from test.runtime_test_3 + where id = (select * from test.vals order by val limit 1) + limit 1 + into res; /* test computations */ + + + select test.vals.* from test.vals, lateral (select from test.runtime_test_3 + where id = test.vals.val) as q + into res; /* test lateral */ + + + select id, generate_series(1, 2) gen, val + from test.runtime_test_3 + where id = (select * from test.vals order by val limit 1) + order by id, gen, val + offset 1 limit 1 + into res; /* without IndexOnlyScan */ + + perform test.pathman_equal(res.id::text, '1', 'id is incorrect (t2)'); + perform test.pathman_equal(res.gen::text, '2', 'gen is incorrect (t2)'); + perform test.pathman_equal(res.val::text, 'k = 1', 'val is incorrect (t2)'); + + + select id + from test.runtime_test_3 + where id = any (select * from test.vals order by val limit 5) + order by id + offset 3 limit 1 + into res; /* with IndexOnlyScan */ + + perform test.pathman_equal(res.id::text, '4', 'id is incorrect (t3)'); + + + select v.val v1, generate_series(2, 2) gen, t.val v2 + from test.runtime_test_3 t join test.vals v on id = v.val + order by v1, gen, v2 + limit 1 + into res; + + perform test.pathman_equal(res.v1::text, '1', 'v1 is incorrect (t4)'); + perform test.pathman_equal(res.gen::text, '2', 'gen is incorrect (t4)'); + perform test.pathman_equal(res.v2::text, 'k = 1', 'v2 is incorrect (t4)'); + + return 'ok'; +end; +$$ language plpgsql +set enable_hashjoin = off +set enable_mergejoin = off; + + + +create table test.run_values as select generate_series(1, 10000) val; +create table test.runtime_test_1(id serial primary key, val real); +insert into test.runtime_test_1 select generate_series(1, 10000), random(); +select pathman.create_hash_partitions('test.runtime_test_1', 'id', 6); + +create table test.category as (select id, 'cat' || id::text as name from generate_series(1, 4) id); +create table test.runtime_test_2 (id serial, category_id int not null, name text, rating real); +insert into test.runtime_test_2 (select id, (id % 6) + 1 as category_id, 'good' || id::text as name, random() as rating from generate_series(1, 100000) id); +create index on test.runtime_test_2 (category_id, rating); +select pathman.create_hash_partitions('test.runtime_test_2', 'category_id', 6); + +create table test.vals as (select generate_series(1, 10000) as val); +create table test.runtime_test_3(val text, id serial not null); +insert into test.runtime_test_3(id, val) select * from generate_series(1, 10000) k, format('k = %s', k); +select pathman.create_hash_partitions('test.runtime_test_3', 'id', 4); +create index on test.runtime_test_3 (id); +create index on test.runtime_test_3_0 (id); + +create table test.runtime_test_4(val text, id int not null); +insert into test.runtime_test_4(id, val) select * from generate_series(1, 10000) k, md5(k::text); +select pathman.create_range_partitions('test.runtime_test_4', 'id', 1, 2000); + + +VACUUM ANALYZE; + + +set pg_pathman.enable_runtimeappend = on; +set pg_pathman.enable_runtimemergeappend = on; + +select test.pathman_test_1(); /* RuntimeAppend (select ... where id = (subquery)) */ +select test.pathman_test_2(); /* RuntimeAppend (select ... where id = any(subquery)) */ +select test.pathman_test_3(); /* RuntimeAppend (a join b on a.id = b.val) */ +select test.pathman_test_4(); /* RuntimeMergeAppend (lateral) */ +select test.pathman_test_5(); /* projection tests for RuntimeXXX nodes */ + + +/* RuntimeAppend (join, enabled parent) */ +select pathman.set_enable_parent('test.runtime_test_1', true); + +explain (costs off) +select from test.runtime_test_1 as t1 +join (select * from test.run_values limit 4) as t2 on t1.id = t2.val; + +select from test.runtime_test_1 as t1 +join (select * from test.run_values limit 4) as t2 on t1.id = t2.val; + +/* RuntimeAppend (join, disabled parent) */ +select pathman.set_enable_parent('test.runtime_test_1', false); + +explain (costs off) +select from test.runtime_test_1 as t1 +join (select * from test.run_values limit 4) as t2 on t1.id = t2.val; + +select from test.runtime_test_1 as t1 +join (select * from test.run_values limit 4) as t2 on t1.id = t2.val; + +/* RuntimeAppend (join, additional projections) */ +select generate_series(1, 2) from test.runtime_test_1 as t1 +join (select * from test.run_values limit 4) as t2 on t1.id = t2.val; + +/* RuntimeAppend (select ... where id = ANY (subquery), missing partitions) */ +select count(*) = 0 from pathman.pathman_partition_list +where parent = 'test.runtime_test_4'::regclass and coalesce(range_min::int, 1) < 0; + +/* RuntimeAppend (check that dropped columns don't break tlists) */ +create table test.dropped_cols(val int4 not null); +select pathman.create_hash_partitions('test.dropped_cols', 'val', 4); +insert into test.dropped_cols select generate_series(1, 100); +alter table test.dropped_cols add column new_col text; /* add column */ +alter table test.dropped_cols drop column new_col; /* drop column! */ +explain (costs off) select * from generate_series(1, 10) f(id), lateral (select count(1) FILTER (WHERE true) from test.dropped_cols where val = f.id) c; +drop table test.dropped_cols cascade; + +set enable_hashjoin = off; +set enable_mergejoin = off; + +select from test.runtime_test_4 +where id = any (select generate_series(-10, -1)); /* should be empty */ + +set enable_hashjoin = on; +set enable_mergejoin = on; + + +DROP TABLE test.vals CASCADE; +DROP TABLE test.category CASCADE; +DROP TABLE test.run_values CASCADE; +DROP TABLE test.runtime_test_1 CASCADE; +DROP TABLE test.runtime_test_2 CASCADE; +DROP TABLE test.runtime_test_3 CASCADE; +DROP TABLE test.runtime_test_4 CASCADE; +DROP FUNCTION test.pathman_assert(bool, text); +DROP FUNCTION test.pathman_equal(text, text, text); +DROP FUNCTION test.pathman_test(text); +DROP FUNCTION test.pathman_test_1(); +DROP FUNCTION test.pathman_test_2(); +DROP FUNCTION test.pathman_test_3(); +DROP FUNCTION test.pathman_test_4(); +DROP FUNCTION test.pathman_test_5(); +DROP SCHEMA test; +-- +-- +-- PGPRO-7928 +-- Variable pg_pathman.enable must be called before any query. +-- +CREATE TABLE part_test (val int NOT NULL); +SELECT pathman.create_hash_partitions('part_test', 'val', 2, partition_names := array['part_test_1','pg_pathman']); +CREATE OR REPLACE FUNCTION part_test_trigger() RETURNS TRIGGER AS $$ +BEGIN + RAISE NOTICE '%', format('%s %s %s (%s)', TG_WHEN, TG_OP, TG_LEVEL, TG_TABLE_NAME); + IF TG_OP::text = 'DELETE'::text then + SET pg_pathman.enable = f; + RETURN new; + END IF; +END; +$$ LANGUAGE PLPGSQL; +SET pg_pathman.enable_partitionrouter = t; +CREATE TRIGGER ad AFTER DELETE ON part_test_1 FOR EACH ROW EXECUTE PROCEDURE part_test_trigger (); +INSERT INTO part_test VALUES (1); +UPDATE part_test SET val = val + 1 RETURNING *, tableoid::regclass; +UPDATE part_test SET val = val + 1 RETURNING *, tableoid::regclass; + +RESET pg_pathman.enable_partitionrouter; +DROP TABLE part_test CASCADE; +DROP FUNCTION part_test_trigger(); + +DROP EXTENSION pg_pathman CASCADE; +DROP SCHEMA pathman; diff --git a/sql/pathman_subpartitions.sql b/sql/pathman_subpartitions.sql new file mode 100644 index 00000000..5515874c --- /dev/null +++ b/sql/pathman_subpartitions.sql @@ -0,0 +1,169 @@ +/* + * Since 8edd0e794 (>= 12) Append nodes with single subplan are eliminated, + * causing different output; pathman_subpartitions_1.out is the updated version. + */ + +\set VERBOSITY terse + +CREATE EXTENSION pg_pathman; +CREATE SCHEMA subpartitions; + + + +/* Create two level partitioning structure */ +CREATE TABLE subpartitions.abc(a INTEGER NOT NULL, b INTEGER NOT NULL); +INSERT INTO subpartitions.abc SELECT i, i FROM generate_series(1, 200, 20) as i; +SELECT create_range_partitions('subpartitions.abc', 'a', 0, 100, 2); +SELECT create_hash_partitions('subpartitions.abc_1', 'a', 3); +SELECT create_hash_partitions('subpartitions.abc_2', 'b', 2); +SELECT * FROM pathman_partition_list; +SELECT tableoid::regclass, * FROM subpartitions.abc ORDER BY a, b; + +/* Insert should result in creation of new subpartition */ +SELECT append_range_partition('subpartitions.abc', 'subpartitions.abc_3'); +SELECT create_range_partitions('subpartitions.abc_3', 'b', 200, 10, 2); +SELECT * FROM pathman_partition_list WHERE parent = 'subpartitions.abc_3'::regclass; +INSERT INTO subpartitions.abc VALUES (215, 215); +SELECT * FROM pathman_partition_list WHERE parent = 'subpartitions.abc_3'::regclass; +SELECT tableoid::regclass, * FROM subpartitions.abc WHERE a = 215 AND b = 215 ORDER BY a, b; + +/* Pruning tests */ +EXPLAIN (COSTS OFF) SELECT * FROM subpartitions.abc WHERE a < 150; +EXPLAIN (COSTS OFF) SELECT * FROM subpartitions.abc WHERE b = 215; +EXPLAIN (COSTS OFF) SELECT * FROM subpartitions.abc WHERE a = 215 AND b = 215; +EXPLAIN (COSTS OFF) SELECT * FROM subpartitions.abc WHERE a >= 210 AND b >= 210; + +CREATE OR REPLACE FUNCTION check_multilevel_queries() +RETURNS VOID AS +$$ +BEGIN + IF NOT EXISTS(SELECT * FROM (SELECT tableoid::regclass, * + FROM subpartitions.abc + WHERE a = 215 AND b = 215 + ORDER BY a, b) t1) + THEN + RAISE EXCEPTION 'should be at least one record in result'; + END IF; +END +$$ LANGUAGE plpgsql; +SELECT check_multilevel_queries(); +DROP FUNCTION check_multilevel_queries(); + +/* Multilevel partitioning with updates */ +CREATE OR REPLACE FUNCTION subpartitions.partitions_tree( + rel REGCLASS, + level TEXT DEFAULT ' ' +) +RETURNS SETOF TEXT AS +$$ +DECLARE + partition REGCLASS; + subpartition TEXT; +BEGIN + IF rel IS NULL THEN + RETURN; + END IF; + + RETURN NEXT rel::TEXT; + + FOR partition IN (SELECT l.partition FROM pathman_partition_list l WHERE parent = rel) + LOOP + FOR subpartition IN (SELECT subpartitions.partitions_tree(partition, level || ' ')) + LOOP + RETURN NEXT level || subpartition::TEXT; + END LOOP; + END LOOP; +END +$$ LANGUAGE plpgsql; + +SELECT append_range_partition('subpartitions.abc', 'subpartitions.abc_4'); +SELECT create_hash_partitions('subpartitions.abc_4', 'b', 2); +SELECT subpartitions.partitions_tree('subpartitions.abc'); +DROP TABLE subpartitions.abc CASCADE; + + +/* Test that update works correctly */ +SET pg_pathman.enable_partitionrouter = ON; + +CREATE TABLE subpartitions.abc(a INTEGER NOT NULL, b INTEGER NOT NULL); +SELECT create_range_partitions('subpartitions.abc', 'a', 0, 100, 2); +SELECT create_range_partitions('subpartitions.abc_1', 'b', 0, 50, 2); /* 0 - 100 */ +SELECT create_range_partitions('subpartitions.abc_2', 'b', 0, 50, 2); /* 100 - 200 */ + +INSERT INTO subpartitions.abc SELECT 25, 25 FROM generate_series(1, 10); +SELECT tableoid::regclass, * FROM subpartitions.abc; /* subpartitions.abc_1_1 */ + +UPDATE subpartitions.abc SET a = 125 WHERE a = 25 and b = 25; +SELECT tableoid::regclass, * FROM subpartitions.abc; /* subpartitions.abc_2_1 */ + +UPDATE subpartitions.abc SET b = 75 WHERE a = 125 and b = 25; +SELECT tableoid::regclass, * FROM subpartitions.abc; /* subpartitions.abc_2_2 */ + +UPDATE subpartitions.abc SET b = 125 WHERE a = 125 and b = 75; +SELECT tableoid::regclass, * FROM subpartitions.abc; /* subpartitions.abc_2_3 */ + + +/* split_range_partition */ +SELECT split_range_partition('subpartitions.abc_2', 150); /* FAIL */ +SELECT split_range_partition('subpartitions.abc_2_2', 75); /* OK */ +SELECT subpartitions.partitions_tree('subpartitions.abc'); + + +/* merge_range_partitions */ +TRUNCATE subpartitions.abc; +INSERT INTO subpartitions.abc VALUES (150, 0); + +SELECT append_range_partition('subpartitions.abc', 'subpartitions.abc_3'); /* 200 - 300 */ +INSERT INTO subpartitions.abc VALUES (250, 50); + +SELECT merge_range_partitions('subpartitions.abc_2', 'subpartitions.abc_3'); /* OK */ +SELECT tableoid::regclass, * FROM subpartitions.abc ORDER BY a, b; + +SELECT merge_range_partitions('subpartitions.abc_2_1', 'subpartitions.abc_2_2'); /* OK */ +SELECT tableoid::regclass, * FROM subpartitions.abc ORDER BY a, b; + + +DROP TABLE subpartitions.abc CASCADE; + + +/* Check insert & update with dropped columns */ +CREATE TABLE subpartitions.abc(a int, b int, c int, id1 int not null, id2 int not null, val serial); +SELECT create_range_partitions('subpartitions.abc', 'id1', 0, 100, 2); +ALTER TABLE subpartitions.abc DROP COLUMN c; +SELECT prepend_range_partition('subpartitions.abc'); +ALTER TABLE subpartitions.abc DROP COLUMN b; +SELECT create_range_partitions('subpartitions.abc_3', 'id2', 0, 10, 3); +ALTER TABLE subpartitions.abc DROP COLUMN a; +SELECT prepend_range_partition('subpartitions.abc_3'); + +SELECT * FROM pathman_partition_list ORDER BY parent, partition; + +INSERT INTO subpartitions.abc VALUES (10, 0), (110, 0), (-1, 0), (-1, -1); +SELECT tableoid::regclass, * FROM subpartitions.abc ORDER BY id1, id2, val; + +SET pg_pathman.enable_partitionrouter = ON; +WITH updated AS (UPDATE subpartitions.abc SET id1 = -1, id2 = -1 RETURNING tableoid::regclass, *) +SELECT * FROM updated ORDER BY val ASC; + +DROP TABLE subpartitions.abc CASCADE; + + +--- basic check how rowmark plays along with subparts; PGPRO-2755 +CREATE TABLE subpartitions.a1(n1 integer); +CREATE TABLE subpartitions.a2(n1 integer not null, n2 integer not null); +SELECT create_range_partitions('subpartitions.a2', 'n1', 1, 2, 0); + +SELECT add_range_partition('subpartitions.a2', 10, 20, 'subpartitions.a2_1020'); +SELECT create_range_partitions('subpartitions.a2_1020'::regclass, 'n2'::text, array[30,40], array['subpartitions.a2_1020_3040']); +INSERT INTO subpartitions.a2 VALUES (10, 30), (11, 31), (12, 32), (19, 39); +INSERT INTO subpartitions.a1 VALUES (12), (19), (20); + +SELECT a2.* FROM subpartitions.a1 JOIN subpartitions.a2 ON a2.n1=a1.n1 FOR UPDATE; + +DROP TABLE subpartitions.a2 CASCADE; +DROP TABLE subpartitions.a1; + + +DROP FUNCTION subpartitions.partitions_tree(regclass, text); +DROP SCHEMA subpartitions; +DROP EXTENSION pg_pathman; diff --git a/sql/pathman_upd_del.sql b/sql/pathman_upd_del.sql new file mode 100644 index 00000000..c99b9666 --- /dev/null +++ b/sql/pathman_upd_del.sql @@ -0,0 +1,285 @@ +/* + * ------------------------------------------- + * NOTE: This test behaves differenly on 9.5 + * ------------------------------------------- + * + * Also since 8edd0e794 (>= 12) Append nodes with single subplan are eliminated, + * causing different output. Moreover, again since 12 (608b167f9f), CTEs which are + * scanned once are no longer an optimization fence, changing a good deal of + * plans here. There is an option to forcibly make them MATERIALIZED, but we + * also need to run tests on older versions, so put updated plans in + * pathman_upd_del_2.out instead. + * + * In Postgres Pro Standard/Enterprise 15+ the EXPLAIN output has changed so put + * the updated plan in pathman_upd_del_4.out. + */ + +\set VERBOSITY terse + +SET search_path = 'public'; +CREATE SCHEMA pathman; +CREATE EXTENSION pg_pathman SCHEMA pathman; +CREATE SCHEMA test; + + + +SET enable_indexscan = ON; +SET enable_seqscan = OFF; + + +/* Temporary tables for JOINs */ +CREATE TABLE test.tmp (id INTEGER NOT NULL, value INTEGER NOT NULL); +INSERT INTO test.tmp VALUES (1, 1), (2, 2); + +CREATE TABLE test.tmp2 (id INTEGER NOT NULL, value INTEGER NOT NULL); +INSERT INTO test.tmp2 SELECT i % 10 + 1, i FROM generate_series(1, 100) i; +SELECT pathman.create_range_partitions('test.tmp2', 'id', 1, 1, 10); + + +/* Partition table by RANGE */ +CREATE TABLE test.range_rel ( + id SERIAL PRIMARY KEY, + dt TIMESTAMP NOT NULL, + value INTEGER); + +INSERT INTO test.range_rel (dt, value) SELECT g, extract(day from g) +FROM generate_series('2010-01-01'::date, '2010-12-31'::date, '1 day') AS g; + +SELECT pathman.create_range_partitions('test.range_rel', 'dt', + '2010-01-01'::date, '1 month'::interval, + 12); + + +VACUUM ANALYZE; + + +/* + * Test UPDATE and DELETE + */ + +/* have partitions for this 'dt' */ +EXPLAIN (COSTS OFF) UPDATE test.range_rel SET value = 111 WHERE dt = '2010-06-15'; + +BEGIN; +UPDATE test.range_rel SET value = 111 WHERE dt = '2010-06-15'; +SELECT * FROM test.range_rel WHERE dt = '2010-06-15'; +ROLLBACK; + + +/* have partitions for this 'dt' */ +EXPLAIN (COSTS OFF) DELETE FROM test.range_rel WHERE dt = '2010-06-15'; + +BEGIN; +DELETE FROM test.range_rel WHERE dt = '2010-06-15'; +SELECT * FROM test.range_rel WHERE dt = '2010-06-15'; +ROLLBACK; + + +/* no partitions for this 'dt' */ +EXPLAIN (COSTS OFF) UPDATE test.range_rel SET value = 222 WHERE dt = '1990-01-01'; + +BEGIN; +UPDATE test.range_rel SET value = 111 WHERE dt = '1990-01-01'; +SELECT * FROM test.range_rel WHERE dt = '1990-01-01'; +ROLLBACK; + + +/* no partitions for this 'dt' */ +EXPLAIN (COSTS OFF) DELETE FROM test.range_rel WHERE dt < '1990-01-01'; + +BEGIN; +DELETE FROM test.range_rel WHERE dt < '1990-01-01'; +SELECT * FROM test.range_rel WHERE dt < '1990-01-01'; +ROLLBACK; + + +/* UPDATE + FROM, partitioned table */ +EXPLAIN (COSTS OFF) +UPDATE test.range_rel r SET value = t.value +FROM test.tmp t WHERE r.dt = '2010-01-01' AND r.id = t.id; + +BEGIN; +UPDATE test.range_rel r SET value = t.value +FROM test.tmp t WHERE r.dt = '2010-01-01' AND r.id = t.id; +ROLLBACK; + + +/* UPDATE + FROM, single table */ +EXPLAIN (COSTS OFF) +UPDATE test.tmp t SET value = r.value +FROM test.range_rel r WHERE r.dt = '2010-01-01' AND r.id = t.id; + +BEGIN; +UPDATE test.tmp t SET value = r.value +FROM test.range_rel r WHERE r.dt = '2010-01-01' AND r.id = t.id; +ROLLBACK; + + +/* DELETE + USING, partitioned table */ +EXPLAIN (COSTS OFF) +DELETE FROM test.range_rel r USING test.tmp t +WHERE r.dt = '2010-01-02' AND r.id = t.id; + +BEGIN; +DELETE FROM test.range_rel r USING test.tmp t +WHERE r.dt = '2010-01-02' AND r.id = t.id; +ROLLBACK; + + +/* DELETE + USING, single table */ +EXPLAIN (COSTS OFF) +DELETE FROM test.tmp t USING test.range_rel r +WHERE r.dt = '2010-01-02' AND r.id = t.id; + +BEGIN; +DELETE FROM test.tmp t USING test.range_rel r +WHERE r.dt = '2010-01-02' AND r.id = t.id; +ROLLBACK; + + +/* DELETE + USING, two partitioned tables */ +EXPLAIN (COSTS OFF) +DELETE FROM test.range_rel r USING test.tmp2 t +WHERE t.id = r.id; + +BEGIN; +DELETE FROM test.range_rel r USING test.tmp2 t +WHERE t.id = r.id; +ROLLBACK; + + +/* DELETE + USING, partitioned table + two partitioned tables in subselect */ +EXPLAIN (COSTS OFF) +DELETE FROM test.range_rel r +USING (SELECT * + FROM test.tmp2 a1 + JOIN test.tmp2 a2 + USING(id)) t +WHERE t.id = r.id; + +BEGIN; +DELETE FROM test.range_rel r +USING (SELECT * + FROM test.tmp2 a1 + JOIN test.tmp2 a2 + USING(id)) t +WHERE t.id = r.id; +ROLLBACK; + + +/* DELETE + USING, single table + two partitioned tables in subselect */ +EXPLAIN (COSTS OFF) +DELETE FROM test.tmp r +USING (SELECT * + FROM test.tmp2 a1 + JOIN test.tmp2 a2 + USING(id)) t +WHERE t.id = r.id; + +BEGIN; +DELETE FROM test.tmp r +USING (SELECT * + FROM test.tmp2 a1 + JOIN test.tmp2 a2 + USING(id)) t +WHERE t.id = r.id; +ROLLBACK; + + +/* UPDATE + FROM, two partitioned tables */ +EXPLAIN (COSTS OFF) +UPDATE test.range_rel r SET value = 1 FROM test.tmp2 t +WHERE t.id = r.id; + +BEGIN; +UPDATE test.range_rel r SET value = 1 FROM test.tmp2 t +WHERE t.id = r.id; +ROLLBACK; + + +/* + * UPDATE + subquery with partitioned table (PG 9.5). + * See pathman_rel_pathlist_hook() + RELOPT_OTHER_MEMBER_REL. + */ +EXPLAIN (COSTS OFF) +UPDATE test.tmp t SET value = 2 +WHERE t.id IN (SELECT id + FROM test.tmp2 t2 + WHERE id = t.id); + + +/* Test special rule for CTE; SELECT (PostgreSQL 9.5) */ +EXPLAIN (COSTS OFF) +WITH q AS (SELECT * FROM test.range_rel r + WHERE r.dt = '2010-01-02') +DELETE FROM test.tmp USING q; + +BEGIN; +WITH q AS (SELECT * FROM test.range_rel r + WHERE r.dt = '2010-01-02') +DELETE FROM test.tmp USING q; +ROLLBACK; + + +/* Test special rule for CTE; DELETE (PostgreSQL 9.5) */ +EXPLAIN (COSTS OFF) +WITH q AS (DELETE FROM test.range_rel r + WHERE r.dt = '2010-01-02' + RETURNING *) +DELETE FROM test.tmp USING q; + +BEGIN; +WITH q AS (DELETE FROM test.range_rel r + WHERE r.dt = '2010-01-02' + RETURNING *) +DELETE FROM test.tmp USING q; +ROLLBACK; + + +/* Test special rule for CTE; DELETE + USING (PostgreSQL 9.5) */ +EXPLAIN (COSTS OFF) +WITH q AS (DELETE FROM test.tmp t + USING test.range_rel r + WHERE r.dt = '2010-01-02' AND r.id = t.id + RETURNING *) +DELETE FROM test.tmp USING q; + +BEGIN; +WITH q AS (DELETE FROM test.tmp t + USING test.range_rel r + WHERE r.dt = '2010-01-02' AND r.id = t.id + RETURNING *) +DELETE FROM test.tmp USING q; +ROLLBACK; + + +/* Test special rule for CTE; Nested CTEs (PostgreSQL 9.5) */ +EXPLAIN (COSTS OFF) +WITH q AS (WITH n AS (SELECT id FROM test.tmp2 WHERE id = 2) + DELETE FROM test.tmp t + USING n + WHERE t.id = n.id + RETURNING *) +DELETE FROM test.tmp USING q; + + +/* Test special rule for CTE; CTE in quals (PostgreSQL 9.5) */ +EXPLAIN (COSTS OFF) +WITH q AS (SELECT id FROM test.tmp2 + WHERE id < 3) +DELETE FROM test.tmp t WHERE t.id in (SELECT id FROM q); + +BEGIN; +WITH q AS (SELECT id FROM test.tmp2 + WHERE id < 3) +DELETE FROM test.tmp t WHERE t.id in (SELECT id FROM q); +ROLLBACK; + + + +DROP TABLE test.tmp CASCADE; +DROP TABLE test.tmp2 CASCADE; +DROP TABLE test.range_rel CASCADE; +DROP SCHEMA test; +DROP EXTENSION pg_pathman CASCADE; +DROP SCHEMA pathman; diff --git a/sql/pathman_update_node.sql b/sql/pathman_update_node.sql new file mode 100644 index 00000000..e70f60f4 --- /dev/null +++ b/sql/pathman_update_node.sql @@ -0,0 +1,220 @@ +\set VERBOSITY terse + +SET search_path = 'public'; +CREATE EXTENSION pg_pathman; +CREATE SCHEMA test_update_node; + + +SET pg_pathman.enable_partitionrouter = ON; + + +/* Partition table by RANGE (NUMERIC) */ +CREATE TABLE test_update_node.test_range(val NUMERIC NOT NULL, comment TEXT); +CREATE INDEX val_idx ON test_update_node.test_range (val); +INSERT INTO test_update_node.test_range SELECT i, i FROM generate_series(1, 100) i; +SELECT create_range_partitions('test_update_node.test_range', 'val', 1, 10); + +/* Moving from 2st to 1st partition */ +EXPLAIN (COSTS OFF) UPDATE test_update_node.test_range SET val = 5 WHERE val = 15; + +/* Keep same partition */ +EXPLAIN (COSTS OFF) UPDATE test_update_node.test_range SET val = 14 WHERE val = 15; + +/* Update values in 1st partition (rows remain there) */ +UPDATE test_update_node.test_range SET val = 5 WHERE val <= 10; + +/* Check values #1 */ +SELECT tableoid::REGCLASS, * +FROM test_update_node.test_range +WHERE val < 10 +ORDER BY comment; + +SELECT count(*) FROM test_update_node.test_range; + + +/* Update values in 2nd partition (rows move to 3rd partition) */ +UPDATE test_update_node.test_range SET val = val + 10 WHERE val > 10 AND val <= 20; + +/* Check values #2 */ +SELECT tableoid::REGCLASS, * +FROM test_update_node.test_range +WHERE val > 20 AND val <= 30 +ORDER BY comment; + +SELECT count(*) FROM test_update_node.test_range; + + +/* Move single row */ +UPDATE test_update_node.test_range SET val = 90 WHERE val = 80; + +/* Check values #3 */ +SELECT tableoid::REGCLASS, * +FROM test_update_node.test_range +WHERE val = 90 +ORDER BY comment; + +SELECT count(*) FROM test_update_node.test_range; + + +/* Move single row (create new partition) */ +UPDATE test_update_node.test_range SET val = -1 WHERE val = 50; + +/* Check values #4 */ +SELECT tableoid::REGCLASS, * +FROM test_update_node.test_range +WHERE val = -1 +ORDER BY comment; + +SELECT count(*) FROM test_update_node.test_range; + + +/* Update non-key column */ +UPDATE test_update_node.test_range SET comment = 'test!' WHERE val = 100; + +/* Check values #5 */ +SELECT tableoid::REGCLASS, * +FROM test_update_node.test_range +WHERE val = 100 +ORDER BY comment; + +SELECT count(*) FROM test_update_node.test_range; + + +/* Try moving row into a gap (ERROR) */ +DROP TABLE test_update_node.test_range_4; +UPDATE test_update_node.test_range SET val = 35 WHERE val = 70; + +/* Check values #6 */ +SELECT tableoid::REGCLASS, * +FROM test_update_node.test_range +WHERE val = 70 +ORDER BY comment; + +SELECT count(*) FROM test_update_node.test_range; + + +/* Test trivial move (same key) */ +UPDATE test_update_node.test_range SET val = 65 WHERE val = 65; + +/* Check values #7 */ +SELECT tableoid::REGCLASS, * +FROM test_update_node.test_range +WHERE val = 65 +ORDER BY comment; + +SELECT count(*) FROM test_update_node.test_range; + + +/* Test tuple conversion (attached partition) */ +CREATE TABLE test_update_node.test_range_inv(comment TEXT, val NUMERIC NOT NULL); +SELECT attach_range_partition('test_update_node.test_range', + 'test_update_node.test_range_inv', + 101::NUMERIC, 111::NUMERIC); +UPDATE test_update_node.test_range SET val = 105 WHERE val = 60; +UPDATE test_update_node.test_range SET val = 105 WHERE val = 105; + +/* Check values #8 */ +SELECT tableoid::REGCLASS, * +FROM test_update_node.test_range +WHERE val = 105 +ORDER BY comment; + +UPDATE test_update_node.test_range SET val = 60 WHERE val = 105; +SELECT count(*) FROM test_update_node.test_range; + +/* Test RETURNING */ +UPDATE test_update_node.test_range SET val = 71 WHERE val = 41 RETURNING val, comment; +UPDATE test_update_node.test_range SET val = 71 WHERE val = 71 RETURNING val, comment; +UPDATE test_update_node.test_range SET val = 106 WHERE val = 61 RETURNING val, comment; +UPDATE test_update_node.test_range SET val = 106 WHERE val = 106 RETURNING val, comment; +UPDATE test_update_node.test_range SET val = 61 WHERE val = 106 RETURNING val, comment; + +/* Just in case, check we don't duplicate anything */ +SELECT count(*) FROM test_update_node.test_range; + +/* Test tuple conversion (dropped column) */ +ALTER TABLE test_update_node.test_range DROP COLUMN comment CASCADE; +SELECT append_range_partition('test_update_node.test_range'); +UPDATE test_update_node.test_range SET val = 115 WHERE val = 55; +UPDATE test_update_node.test_range SET val = 115 WHERE val = 115; + +/* Check values #9 */ +SELECT tableoid::REGCLASS, * +FROM test_update_node.test_range +WHERE val = 115; + +UPDATE test_update_node.test_range SET val = 55 WHERE val = 115; +SELECT count(*) FROM test_update_node.test_range; + +DROP TABLE test_update_node.test_range CASCADE; + +/* recreate table and mass move */ +CREATE TABLE test_update_node.test_range(val NUMERIC NOT NULL, comment TEXT); +INSERT INTO test_update_node.test_range SELECT i, i FROM generate_series(1, 100) i; +SELECT create_range_partitions('test_update_node.test_range', 'val', 1, 10); + +SELECT tableoid::regclass, MIN(val) FROM test_update_node.test_range + GROUP BY tableoid::regclass ORDER BY tableoid::regclass; +SELECT count(*) FROM test_update_node.test_range; + +/* move everything to next partition */ +UPDATE test_update_node.test_range SET val = val + 10; +SELECT tableoid::regclass, MIN(val) FROM test_update_node.test_range + GROUP BY tableoid::regclass ORDER BY tableoid::regclass; + +/* move everything to previous partition */ +UPDATE test_update_node.test_range SET val = val - 10; +SELECT tableoid::regclass, MIN(val) FROM test_update_node.test_range + GROUP BY tableoid::regclass ORDER BY tableoid::regclass; +SELECT count(*) FROM test_update_node.test_range; + +/* Partition table by HASH (INT4) */ +CREATE TABLE test_update_node.test_hash(val INT4 NOT NULL, comment TEXT); +INSERT INTO test_update_node.test_hash SELECT i, i FROM generate_series(1, 10) i; +SELECT create_hash_partitions('test_update_node.test_hash', 'val', 3); + + +/* Shuffle rows a few times */ +UPDATE test_update_node.test_hash SET val = val + 1; +UPDATE test_update_node.test_hash SET val = val + 1; +UPDATE test_update_node.test_hash SET val = val + 1; +UPDATE test_update_node.test_hash SET val = val + 1; +UPDATE test_update_node.test_hash SET val = val + 1; +UPDATE test_update_node.test_hash SET val = val + 1; +UPDATE test_update_node.test_hash SET val = val + 1; +UPDATE test_update_node.test_hash SET val = val + 1; +UPDATE test_update_node.test_hash SET val = val + 1; + +/* Check values #0 */ +SELECT tableoid::regclass, * FROM test_update_node.test_hash ORDER BY val; + + +/* Move all rows into single partition */ +UPDATE test_update_node.test_hash SET val = 1; + +/* Check values #1 */ +SELECT tableoid::REGCLASS, * +FROM test_update_node.test_hash +WHERE val = 1 +ORDER BY comment; + +SELECT count(*) FROM test_update_node.test_hash; + + +/* Don't move any rows */ +UPDATE test_update_node.test_hash SET val = 3 WHERE val = 2; + +/* Check values #2 */ +SELECT tableoid::REGCLASS, * +FROM test_update_node.test_hash +WHERE val = 3 +ORDER BY comment; + +SELECT count(*) FROM test_update_node.test_hash; + + + +DROP TABLE test_update_node.test_hash CASCADE; +DROP TABLE test_update_node.test_range CASCADE; +DROP SCHEMA test_update_node; +DROP EXTENSION pg_pathman; diff --git a/sql/pathman_update_triggers.sql b/sql/pathman_update_triggers.sql new file mode 100644 index 00000000..646afe65 --- /dev/null +++ b/sql/pathman_update_triggers.sql @@ -0,0 +1,146 @@ +\set VERBOSITY terse + +SET search_path = 'public'; +CREATE EXTENSION pg_pathman; +CREATE SCHEMA test_update_triggers; + + + +create table test_update_triggers.test (val int not null); +select create_hash_partitions('test_update_triggers.test', 'val', 2, + partition_names := array[ + 'test_update_triggers.test_1', + 'test_update_triggers.test_2']); + + +create or replace function test_update_triggers.test_trigger() returns trigger as $$ +begin + raise notice '%', format('%s %s %s (%s)', TG_WHEN, TG_OP, TG_LEVEL, TG_TABLE_NAME); + + if TG_OP::text = 'DELETE'::text then + return old; + else + return new; + end if; end; +$$ language plpgsql; + + +/* Enable our precious custom node */ +set pg_pathman.enable_partitionrouter = t; + + +/* + * Statement level triggers + */ + +create trigger bus before update ON test_update_triggers.test + execute procedure test_update_triggers.test_trigger (); +create trigger bds before delete ON test_update_triggers.test + execute procedure test_update_triggers.test_trigger (); +create trigger bis before insert ON test_update_triggers.test + execute procedure test_update_triggers.test_trigger (); + + +create trigger aus after update ON test_update_triggers.test + execute procedure test_update_triggers.test_trigger (); +create trigger ads after delete ON test_update_triggers.test + execute procedure test_update_triggers.test_trigger (); +create trigger ais after insert ON test_update_triggers.test + execute procedure test_update_triggers.test_trigger (); + + +create trigger bus before update ON test_update_triggers.test_1 + execute procedure test_update_triggers.test_trigger (); +create trigger bds before delete ON test_update_triggers.test_1 + execute procedure test_update_triggers.test_trigger (); +create trigger bis before insert ON test_update_triggers.test_1 + execute procedure test_update_triggers.test_trigger (); + +create trigger aus after update ON test_update_triggers.test_1 + execute procedure test_update_triggers.test_trigger (); +create trigger ads after delete ON test_update_triggers.test_1 + execute procedure test_update_triggers.test_trigger (); +create trigger ais after insert ON test_update_triggers.test_1 + execute procedure test_update_triggers.test_trigger (); + + +create trigger bus before update ON test_update_triggers.test_2 + execute procedure test_update_triggers.test_trigger (); +create trigger bds before delete ON test_update_triggers.test_2 + execute procedure test_update_triggers.test_trigger (); +create trigger bis before insert ON test_update_triggers.test_2 + execute procedure test_update_triggers.test_trigger (); + +create trigger aus after update ON test_update_triggers.test_2 + execute procedure test_update_triggers.test_trigger (); +create trigger ads after delete ON test_update_triggers.test_2 + execute procedure test_update_triggers.test_trigger (); +create trigger ais after insert ON test_update_triggers.test_2 + execute procedure test_update_triggers.test_trigger (); + + +/* multiple values */ +insert into test_update_triggers.test select generate_series(1, 200); + +update test_update_triggers.test set val = val + 1; +update test_update_triggers.test set val = val + 1; +update test_update_triggers.test set val = val + 1; +update test_update_triggers.test set val = val + 1; +update test_update_triggers.test set val = val + 1; + +select count(distinct val) from test_update_triggers.test; + + +truncate test_update_triggers.test; + + +/* + * Row level triggers + */ + +create trigger bu before update ON test_update_triggers.test_1 + for each row execute procedure test_update_triggers.test_trigger (); +create trigger bd before delete ON test_update_triggers.test_1 + for each row execute procedure test_update_triggers.test_trigger (); +create trigger bi before insert ON test_update_triggers.test_1 + for each row execute procedure test_update_triggers.test_trigger (); + +create trigger au after update ON test_update_triggers.test_1 + for each row execute procedure test_update_triggers.test_trigger (); +create trigger ad after delete ON test_update_triggers.test_1 + for each row execute procedure test_update_triggers.test_trigger (); +create trigger ai after insert ON test_update_triggers.test_1 + for each row execute procedure test_update_triggers.test_trigger (); + + +create trigger bu before update ON test_update_triggers.test_2 + for each row execute procedure test_update_triggers.test_trigger (); +create trigger bd before delete ON test_update_triggers.test_2 + for each row execute procedure test_update_triggers.test_trigger (); +create trigger bi before insert ON test_update_triggers.test_2 + for each row execute procedure test_update_triggers.test_trigger (); + +create trigger au after update ON test_update_triggers.test_2 + for each row execute procedure test_update_triggers.test_trigger (); +create trigger ad after delete ON test_update_triggers.test_2 + for each row execute procedure test_update_triggers.test_trigger (); +create trigger ai after insert ON test_update_triggers.test_2 + for each row execute procedure test_update_triggers.test_trigger (); + + +/* single value */ +insert into test_update_triggers.test values (1); + +update test_update_triggers.test set val = val + 1 returning *, tableoid::regclass; +update test_update_triggers.test set val = val + 1 returning *, tableoid::regclass; +update test_update_triggers.test set val = val + 1 returning *, tableoid::regclass; +update test_update_triggers.test set val = val + 1 returning *, tableoid::regclass; +update test_update_triggers.test set val = val + 1 returning *, tableoid::regclass; + +select count(distinct val) from test_update_triggers.test; + + +DROP TABLE test_update_triggers.test CASCADE; +DROP FUNCTION test_update_triggers.test_trigger(); +DROP SCHEMA test_update_triggers; +DROP EXTENSION pg_pathman CASCADE; diff --git a/sql/pathman_utility_stmt.sql b/sql/pathman_utility_stmt.sql new file mode 100644 index 00000000..08992835 --- /dev/null +++ b/sql/pathman_utility_stmt.sql @@ -0,0 +1,309 @@ +\set VERBOSITY terse +SET search_path = 'public'; +CREATE EXTENSION pg_pathman; + + + +/* + * Test COPY + */ +CREATE SCHEMA copy_stmt_hooking; + +CREATE TABLE copy_stmt_hooking.test( + val int not null, + comment text, + c3 int, + c4 int); +INSERT INTO copy_stmt_hooking.test SELECT generate_series(1, 20), 'comment'; +CREATE INDEX ON copy_stmt_hooking.test(val); + + +/* test for RANGE partitioning */ +SELECT create_range_partitions('copy_stmt_hooking.test', 'val', 1, 5); + +/* perform VACUUM */ +VACUUM FULL copy_stmt_hooking.test; +VACUUM FULL copy_stmt_hooking.test_1; +VACUUM FULL copy_stmt_hooking.test_2; +VACUUM FULL copy_stmt_hooking.test_3; +VACUUM FULL copy_stmt_hooking.test_4; + +/* DELETE ROWS, COPY FROM */ +DELETE FROM copy_stmt_hooking.test; +COPY copy_stmt_hooking.test FROM stdin; +1 test_1 0 0 +6 test_2 0 0 +7 test_2 0 0 +11 test_3 0 0 +16 test_4 0 0 +\. +SELECT count(*) FROM ONLY copy_stmt_hooking.test; +SELECT *, tableoid::REGCLASS FROM copy_stmt_hooking.test ORDER BY val; + +/* perform VACUUM */ +VACUUM FULL copy_stmt_hooking.test; +VACUUM FULL copy_stmt_hooking.test_1; +VACUUM FULL copy_stmt_hooking.test_2; +VACUUM FULL copy_stmt_hooking.test_3; +VACUUM FULL copy_stmt_hooking.test_4; + +/* COPY TO */ +COPY copy_stmt_hooking.test TO stdout; /* not ok */ +COPY copy_stmt_hooking.test (val) TO stdout; /* not ok */ +COPY (SELECT * FROM copy_stmt_hooking.test) TO stdout; +COPY (SELECT * FROM copy_stmt_hooking.test) TO stdout (FORMAT CSV); +\copy (SELECT * FROM copy_stmt_hooking.test) TO stdout + +/* COPY FROM (partition does not exist, NOT allowed to create partitions) */ +SET pg_pathman.enable_auto_partition = OFF; +COPY copy_stmt_hooking.test FROM stdin; +21 test_no_part 0 0 +\. +SELECT * FROM copy_stmt_hooking.test WHERE val > 20; + +/* COPY FROM (partition does not exist, allowed to create partitions) */ +SET pg_pathman.enable_auto_partition = ON; +COPY copy_stmt_hooking.test FROM stdin; +21 test_no_part 0 0 +\. +SELECT * FROM copy_stmt_hooking.test WHERE val > 20; + +/* COPY FROM (partitioned column is not specified) */ +COPY copy_stmt_hooking.test(comment) FROM stdin; +test_no_part +\. + +/* COPY FROM (we don't support FREEZE) */ +COPY copy_stmt_hooking.test FROM stdin WITH (FREEZE); + + +/* Drop column (make use of 'tuple_map') */ +ALTER TABLE copy_stmt_hooking.test DROP COLUMN comment; + + +/* create new partition */ +SELECT get_number_of_partitions('copy_stmt_hooking.test'); +INSERT INTO copy_stmt_hooking.test (val, c3, c4) VALUES (26, 1, 2); +SELECT get_number_of_partitions('copy_stmt_hooking.test'); + +/* check number of columns in 'test' */ +SELECT count(*) FROM pg_attribute +WHERE attnum > 0 AND attrelid = 'copy_stmt_hooking.test'::REGCLASS; + +/* check number of columns in 'test_6' */ +SELECT count(*) FROM pg_attribute +WHERE attnum > 0 AND attrelid = 'copy_stmt_hooking.test_6'::REGCLASS; + +/* test transformed tuples */ +COPY (SELECT * FROM copy_stmt_hooking.test) TO stdout; + + +/* COPY FROM (insert into table with dropped column) */ +COPY copy_stmt_hooking.test(val, c3, c4) FROM stdin; +2 1 2 +\. + +/* COPY FROM (insert into table without dropped column) */ +COPY copy_stmt_hooking.test(val, c3, c4) FROM stdin; +27 1 2 +\. + +/* check tuples from last partition (without dropped column) */ +SELECT *, tableoid::REGCLASS FROM copy_stmt_hooking.test ORDER BY val; + + +/* drop modified table */ +DROP TABLE copy_stmt_hooking.test CASCADE; + + +/* create table again */ +CREATE TABLE copy_stmt_hooking.test( + val int not null, + comment text, + c3 int, + c4 int); +CREATE INDEX ON copy_stmt_hooking.test(val); + + +/* test for HASH partitioning */ +SELECT create_hash_partitions('copy_stmt_hooking.test', 'val', 5); + +/* DELETE ROWS, COPY FROM */ +DELETE FROM copy_stmt_hooking.test; +COPY copy_stmt_hooking.test FROM stdin; +1 hash_1 0 0 +6 hash_2 0 0 +\. +SELECT count(*) FROM ONLY copy_stmt_hooking.test; +SELECT * FROM copy_stmt_hooking.test ORDER BY val; + +/* Check dropped colums before partitioning */ +CREATE TABLE copy_stmt_hooking.test2 ( + a varchar(50), + b varchar(50), + t timestamp without time zone not null +); +ALTER TABLE copy_stmt_hooking.test2 DROP COLUMN a; +SELECT create_range_partitions('copy_stmt_hooking.test2', + 't', + '2017-01-01 00:00:00'::timestamp, + interval '1 hour', 5, false +); +COPY copy_stmt_hooking.test2(t) FROM stdin; +2017-02-02 20:00:00 +\. +SELECT COUNT(*) FROM copy_stmt_hooking.test2; + +DROP TABLE copy_stmt_hooking.test CASCADE; +DROP TABLE copy_stmt_hooking.test2 CASCADE; +DROP SCHEMA copy_stmt_hooking; + + + +/* + * Test auto check constraint renaming + */ +CREATE SCHEMA rename; + + +/* + * Check that auto naming sequence is renamed + */ +CREATE TABLE rename.parent(id int not null); +SELECT create_range_partitions('rename.parent', 'id', 1, 2, 2); +SELECT 'rename.parent'::regclass; /* parent is OK */ +SELECT 'rename.parent_seq'::regclass; /* sequence is OK */ +ALTER TABLE rename.parent RENAME TO parent_renamed; +SELECT 'rename.parent_renamed'::regclass; /* parent is OK */ +SELECT 'rename.parent_renamed_seq'::regclass; /* sequence is OK */ +SELECT append_range_partition('rename.parent_renamed'); /* can append */ +DROP SEQUENCE rename.parent_renamed_seq; +ALTER TABLE rename.parent_renamed RENAME TO parent; +SELECT 'rename.parent'::regclass; /* parent is OK */ + +/* + * Check that partitioning constraints are renamed + */ +CREATE TABLE rename.test(a serial, b int); +SELECT create_hash_partitions('rename.test', 'a', 3); +ALTER TABLE rename.test_0 RENAME TO test_one; +/* We expect to find check constraint renamed as well */ +SELECT r.conname, pg_get_constraintdef(r.oid, true) +FROM pg_constraint r +WHERE r.conrelid = 'rename.test_one'::regclass AND r.contype = 'c'; + +/* Generates check constraint for relation */ +CREATE OR REPLACE FUNCTION add_constraint(rel regclass) +RETURNS VOID AS $$ +declare + constraint_name text := build_check_constraint_name(rel); +BEGIN + EXECUTE format('ALTER TABLE %s ADD CONSTRAINT %s CHECK (a < 100);', + rel, constraint_name); +END +$$ +LANGUAGE plpgsql; + +/* + * Check that it doesn't affect regular inherited + * tables that aren't managed by pg_pathman + */ +CREATE TABLE rename.test_inh (LIKE rename.test INCLUDING ALL); +CREATE TABLE rename.test_inh_1 (LIKE rename.test INCLUDING ALL); +ALTER TABLE rename.test_inh_1 INHERIT rename.test_inh; +SELECT add_constraint('rename.test_inh_1'); +ALTER TABLE rename.test_inh_1 RENAME TO test_inh_one; +/* Show check constraints of rename.test_inh_one */ +SELECT r.conname, pg_get_constraintdef(r.oid, true) +FROM pg_constraint r +WHERE r.conrelid = 'rename.test_inh_one'::regclass AND r.contype = 'c'; + +/* + * Check that plain tables are not affected too + */ +CREATE TABLE rename.plain_test(a serial, b int); +ALTER TABLE rename.plain_test RENAME TO plain_test_renamed; +SELECT add_constraint('rename.plain_test_renamed'); +/* Show check constraints of rename.plain_test_renamed */ +SELECT r.conname, pg_get_constraintdef(r.oid, true) +FROM pg_constraint r +WHERE r.conrelid = 'rename.plain_test_renamed'::regclass AND r.contype = 'c'; + +ALTER TABLE rename.plain_test_renamed RENAME TO plain_test; +/* ... and check constraints of rename.plain_test */ +SELECT r.conname, pg_get_constraintdef(r.oid, true) +FROM pg_constraint r +WHERE r.conrelid = 'rename.plain_test'::regclass AND r.contype = 'c'; + + +DROP TABLE rename.plain_test CASCADE; +DROP TABLE rename.test_inh CASCADE; +DROP TABLE rename.parent CASCADE; +DROP TABLE rename.test CASCADE; +DROP FUNCTION add_constraint(regclass); +DROP SCHEMA rename; + + + +/* + * Test DROP INDEX CONCURRENTLY (test snapshots) + */ +CREATE SCHEMA drop_index; + +CREATE TABLE drop_index.test (val INT4 NOT NULL); +CREATE INDEX ON drop_index.test (val); +SELECT create_hash_partitions('drop_index.test', 'val', 2); +DROP INDEX CONCURRENTLY drop_index.test_0_val_idx; + +DROP TABLE drop_index.test CASCADE; +DROP SCHEMA drop_index; + +/* + * Checking that ALTER TABLE IF EXISTS with loaded (and created) pg_pathman extension works the same as in vanilla + */ +CREATE SCHEMA test_nonexistance; + +ALTER TABLE IF EXISTS test_nonexistance.nonexistent_table RENAME TO other_table_name; +/* renaming existent tables already tested earlier (see rename.plain_test) */ + +ALTER TABLE IF EXISTS test_nonexistance.nonexistent_table ADD COLUMN j INT4; +CREATE TABLE test_nonexistance.existent_table(i INT4); +ALTER TABLE IF EXISTS test_nonexistance.existent_table ADD COLUMN j INT4; +SELECT attname FROM pg_attribute WHERE attnum > 0 AND attrelid = 'test_nonexistance.existent_table'::REGCLASS; +DROP TABLE test_nonexistance.existent_table; + +ALTER TABLE IF EXISTS test_nonexistance.nonexistent_table DROP COLUMN IF EXISTS i; +CREATE TABLE test_nonexistance.existent_table(i INT4); +ALTER TABLE IF EXISTS test_nonexistance.existent_table DROP COLUMN IF EXISTS i; +ALTER TABLE IF EXISTS test_nonexistance.existent_table DROP COLUMN IF EXISTS nonexistent_column; +SELECT attname FROM pg_attribute WHERE attnum > 0 AND attrelid = 'test_nonexistance.existent_table'::REGCLASS; +DROP TABLE test_nonexistance.existent_table; + +ALTER TABLE IF EXISTS test_nonexistance.nonexistent_table RENAME COLUMN i TO j; +CREATE TABLE test_nonexistance.existent_table(i INT4); +ALTER TABLE IF EXISTS test_nonexistance.existent_table RENAME COLUMN i TO j; +SELECT attname FROM pg_attribute WHERE attnum > 0 AND attrelid = 'test_nonexistance.existent_table'::REGCLASS; +DROP TABLE test_nonexistance.existent_table; + +ALTER TABLE IF EXISTS test_nonexistance.nonexistent_table RENAME CONSTRAINT baz TO bar; +CREATE TABLE test_nonexistance.existent_table(i INT4 CONSTRAINT existent_table_i_check CHECK (i < 100)); +ALTER TABLE IF EXISTS test_nonexistance.existent_table RENAME CONSTRAINT existent_table_i_check TO existent_table_i_other_check; +DROP TABLE test_nonexistance.existent_table; + +ALTER TABLE IF EXISTS test_nonexistance.nonexistent_table SET SCHEMA nonexistent_schema; +CREATE TABLE test_nonexistance.existent_table(i INT4); +ALTER TABLE IF EXISTS test_nonexistance.existent_table SET SCHEMA nonexistent_schema; +CREATE SCHEMA test_nonexistance2; +ALTER TABLE IF EXISTS test_nonexistance.existent_table SET SCHEMA test_nonexistance2; +DROP TABLE test_nonexistance2.existent_table; +DROP SCHEMA test_nonexistance2; + +ALTER TABLE IF EXISTS test_nonexistance.nonexistent_table SET TABLESPACE nonexistent_tablespace; +CREATE TABLE test_nonexistance.existent_table(i INT4); +ALTER TABLE IF EXISTS test_nonexistance.existent_table SET TABLESPACE nonexistent_tablespace; +DROP TABLE test_nonexistance.existent_table; + +DROP SCHEMA test_nonexistance; + + +DROP EXTENSION pg_pathman; diff --git a/sql/pathman_views.sql b/sql/pathman_views.sql new file mode 100644 index 00000000..36baa5c5 --- /dev/null +++ b/sql/pathman_views.sql @@ -0,0 +1,86 @@ +/* + * ------------------------------------------- + * NOTE: This test behaves differenly on 9.5 + * ------------------------------------------- + * + * Also since 8edd0e794 (>= 12) Append nodes with single subplan are eliminated, + * causing different output; pathman_views_2.out is the updated version. + */ + +\set VERBOSITY terse + +SET search_path = 'public'; +CREATE EXTENSION pg_pathman; +CREATE SCHEMA views; + + + +/* create a partitioned table */ +create table views._abc(id int4 not null); +select create_hash_partitions('views._abc', 'id', 10); +insert into views._abc select generate_series(1, 100); + +/* create a dummy table */ +create table views._abc_add (like views._abc); + + +vacuum analyze; + + +/* create a facade view */ +create view views.abc as select * from views._abc; + +create or replace function views.disable_modification() +returns trigger as +$$ +BEGIN + RAISE EXCEPTION '%', TG_OP; + RETURN NULL; +END; +$$ +language 'plpgsql'; + +create trigger abc_mod_tr +instead of insert or update or delete +on views.abc for each row +execute procedure views.disable_modification(); + + +/* Test SELECT */ +explain (costs off) select * from views.abc; +explain (costs off) select * from views.abc where id = 1; +explain (costs off) select * from views.abc where id = 1 for update; +select * from views.abc where id = 1 for update; +select count (*) from views.abc; + + +/* Test INSERT */ +explain (costs off) insert into views.abc values (1); +insert into views.abc values (1); + + +/* Test UPDATE */ +explain (costs off) update views.abc set id = 2 where id = 1 or id = 2; +update views.abc set id = 2 where id = 1 or id = 2; + + +/* Test DELETE */ +explain (costs off) delete from views.abc where id = 1 or id = 2; +delete from views.abc where id = 1 or id = 2; + + +/* Test SELECT with UNION */ +create view views.abc_union as table views._abc union table views._abc_add; +create view views.abc_union_all as table views._abc union all table views._abc_add; +explain (costs off) table views.abc_union; +explain (costs off) select * from views.abc_union where id = 5; +explain (costs off) table views.abc_union_all; +explain (costs off) select * from views.abc_union_all where id = 5; + + + +DROP TABLE views._abc CASCADE; +DROP TABLE views._abc_add CASCADE; +DROP FUNCTION views.disable_modification(); +DROP SCHEMA views; +DROP EXTENSION pg_pathman; diff --git a/sql/pg_pathman.sql b/sql/pg_pathman.sql deleted file mode 100644 index 1cf5118d..00000000 --- a/sql/pg_pathman.sql +++ /dev/null @@ -1,646 +0,0 @@ -\set VERBOSITY terse - -CREATE SCHEMA pathman; -CREATE EXTENSION pg_pathman SCHEMA pathman; -CREATE SCHEMA test; - -CREATE TABLE test.hash_rel ( - id SERIAL PRIMARY KEY, - value INTEGER); -INSERT INTO test.hash_rel VALUES (1, 1); -INSERT INTO test.hash_rel VALUES (2, 2); -INSERT INTO test.hash_rel VALUES (3, 3); -SELECT pathman.create_hash_partitions('test.hash_rel', 'value', 3); -ALTER TABLE test.hash_rel ALTER COLUMN value SET NOT NULL; -SELECT pathman.create_hash_partitions('test.hash_rel', 'value', 3, partition_data:=false); -EXPLAIN (COSTS OFF) SELECT * FROM test.hash_rel; -SELECT * FROM test.hash_rel; -SELECT pathman.disable_parent('test.hash_rel'); -EXPLAIN (COSTS OFF) SELECT * FROM test.hash_rel; -SELECT * FROM test.hash_rel; -SELECT pathman.enable_parent('test.hash_rel'); -EXPLAIN (COSTS OFF) SELECT * FROM test.hash_rel; -SELECT * FROM test.hash_rel; -SELECT pathman.drop_partitions('test.hash_rel'); -SELECT pathman.create_hash_partitions('test.hash_rel', 'Value', 3); -SELECT COUNT(*) FROM test.hash_rel; -SELECT COUNT(*) FROM ONLY test.hash_rel; -INSERT INTO test.hash_rel VALUES (4, 4); -INSERT INTO test.hash_rel VALUES (5, 5); -INSERT INTO test.hash_rel VALUES (6, 6); -SELECT COUNT(*) FROM test.hash_rel; -SELECT COUNT(*) FROM ONLY test.hash_rel; - -CREATE TABLE test.range_rel ( - id SERIAL PRIMARY KEY, - dt TIMESTAMP, - txt TEXT); -CREATE INDEX ON test.range_rel (dt); -INSERT INTO test.range_rel (dt, txt) -SELECT g, md5(g::TEXT) FROM generate_series('2015-01-01', '2015-04-30', '1 day'::interval) as g; -SELECT pathman.create_range_partitions('test.range_rel', 'dt', '2015-01-01'::DATE, '1 month'::INTERVAL, 2); -ALTER TABLE test.range_rel ALTER COLUMN dt SET NOT NULL; -SELECT pathman.create_range_partitions('test.range_rel', 'dt', '2015-01-01'::DATE, '1 month'::INTERVAL, 2); -SELECT pathman.create_range_partitions('test.range_rel', 'DT', '2015-01-01'::DATE, '1 month'::INTERVAL); -SELECT COUNT(*) FROM test.range_rel; -SELECT COUNT(*) FROM ONLY test.range_rel; - -CREATE TABLE test.num_range_rel ( - id SERIAL PRIMARY KEY, - txt TEXT); -SELECT pathman.create_range_partitions('test.num_range_rel', 'id', 0, 1000, 4); -SELECT COUNT(*) FROM test.num_range_rel; -SELECT COUNT(*) FROM ONLY test.num_range_rel; -INSERT INTO test.num_range_rel - SELECT g, md5(g::TEXT) FROM generate_series(1, 3000) as g; -SELECT COUNT(*) FROM test.num_range_rel; -SELECT COUNT(*) FROM ONLY test.num_range_rel; - -SELECT * FROM ONLY test.range_rel UNION SELECT * FROM test.range_rel; - -SET pg_pathman.enable_runtimeappend = OFF; -SET pg_pathman.enable_runtimemergeappend = OFF; - -VACUUM; - -/* update triggers test */ -SELECT pathman.create_hash_update_trigger('test.hash_rel'); -UPDATE test.hash_rel SET value = 7 WHERE value = 6; -EXPLAIN (COSTS OFF) SELECT * FROM test.hash_rel WHERE value = 7; -SELECT * FROM test.hash_rel WHERE value = 7; - -SELECT pathman.create_range_update_trigger('test.num_range_rel'); -UPDATE test.num_range_rel SET id = 3001 WHERE id = 1; -EXPLAIN (COSTS OFF) SELECT * FROM test.num_range_rel WHERE id = 3001; -SELECT * FROM test.num_range_rel WHERE id = 3001; - -SET enable_indexscan = OFF; -SET enable_bitmapscan = OFF; -SET enable_seqscan = ON; - -EXPLAIN (COSTS OFF) SELECT * FROM test.hash_rel; -EXPLAIN (COSTS OFF) SELECT * FROM test.hash_rel WHERE value = 2; -EXPLAIN (COSTS OFF) SELECT * FROM test.hash_rel WHERE value = 2 OR value = 1; --- Temporarily commented out --- EXPLAIN (COSTS OFF) SELECT * FROM test.hash_rel WHERE value BETWEEN 1 AND 2; --- QUERY PLAN --- ------------------------------------------------- --- Append --- -> Seq Scan on hash_rel_1 --- Filter: ((value >= 1) AND (value <= 2)) --- -> Seq Scan on hash_rel_2 --- Filter: ((value >= 1) AND (value <= 2)) --- (5 rows) -EXPLAIN (COSTS OFF) SELECT * FROM test.num_range_rel WHERE id > 2500; -EXPLAIN (COSTS OFF) SELECT * FROM test.num_range_rel WHERE id >= 1000 AND id < 3000; -EXPLAIN (COSTS OFF) SELECT * FROM test.num_range_rel WHERE id >= 1500 AND id < 2500; -EXPLAIN (COSTS OFF) SELECT * FROM test.num_range_rel WHERE (id >= 500 AND id < 1500) OR (id > 2500); -EXPLAIN (COSTS OFF) SELECT * FROM test.range_rel WHERE dt > '2015-02-15'; -EXPLAIN (COSTS OFF) SELECT * FROM test.range_rel WHERE dt >= '2015-02-01' AND dt < '2015-03-01'; -EXPLAIN (COSTS OFF) SELECT * FROM test.range_rel WHERE dt >= '2015-02-15' AND dt < '2015-03-15'; -EXPLAIN (COSTS OFF) SELECT * FROM test.range_rel WHERE (dt >= '2015-01-15' AND dt < '2015-02-15') OR (dt > '2015-03-15'); - - -SET enable_indexscan = ON; -SET enable_bitmapscan = OFF; -SET enable_seqscan = OFF; - -EXPLAIN (COSTS OFF) SELECT * FROM test.hash_rel; -EXPLAIN (COSTS OFF) SELECT * FROM test.hash_rel WHERE value = 2; -EXPLAIN (COSTS OFF) SELECT * FROM test.hash_rel WHERE value = 2 OR value = 1; -EXPLAIN (COSTS OFF) SELECT * FROM test.num_range_rel WHERE id > 2500; -EXPLAIN (COSTS OFF) SELECT * FROM test.num_range_rel WHERE id >= 1000 AND id < 3000; -EXPLAIN (COSTS OFF) SELECT * FROM test.num_range_rel WHERE id >= 1500 AND id < 2500; -EXPLAIN (COSTS OFF) SELECT * FROM test.num_range_rel WHERE (id >= 500 AND id < 1500) OR (id > 2500); -EXPLAIN (COSTS OFF) SELECT * FROM test.num_range_rel ORDER BY id; -EXPLAIN (COSTS OFF) SELECT * FROM test.num_range_rel WHERE id <= 2500 ORDER BY id; -EXPLAIN (COSTS OFF) SELECT * FROM test.range_rel WHERE dt > '2015-02-15'; -EXPLAIN (COSTS OFF) SELECT * FROM test.range_rel WHERE dt >= '2015-02-01' AND dt < '2015-03-01'; -EXPLAIN (COSTS OFF) SELECT * FROM test.range_rel WHERE dt >= '2015-02-15' AND dt < '2015-03-15'; -EXPLAIN (COSTS OFF) SELECT * FROM test.range_rel WHERE (dt >= '2015-01-15' AND dt < '2015-02-15') OR (dt > '2015-03-15'); -EXPLAIN (COSTS OFF) SELECT * FROM test.range_rel ORDER BY dt; -EXPLAIN (COSTS OFF) SELECT * FROM test.range_rel WHERE dt >= '2015-01-15' ORDER BY dt DESC; - -/* - * Sorting - */ -SET enable_indexscan = OFF; -SET enable_seqscan = ON; -EXPLAIN (COSTS OFF) SELECT * FROM test.range_rel WHERE dt < '2015-03-01' ORDER BY dt; -EXPLAIN (COSTS OFF) SELECT * FROM test.range_rel_1 UNION ALL SELECT * FROM test.range_rel_2 ORDER BY dt; -SET enable_indexscan = ON; -SET enable_seqscan = OFF; -EXPLAIN (COSTS OFF) SELECT * FROM test.range_rel WHERE dt < '2015-03-01' ORDER BY dt; -EXPLAIN (COSTS OFF) SELECT * FROM test.range_rel_1 UNION ALL SELECT * FROM test.range_rel_2 ORDER BY dt; - -/* - * Join - */ -SET enable_hashjoin = OFF; -set enable_nestloop = OFF; -SET enable_mergejoin = ON; - -EXPLAIN (COSTS OFF) -SELECT * FROM test.range_rel j1 -JOIN test.range_rel j2 on j2.id = j1.id -JOIN test.num_range_rel j3 on j3.id = j1.id -WHERE j1.dt < '2015-03-01' AND j2.dt >= '2015-02-01' ORDER BY j2.dt; -SET enable_hashjoin = ON; -SET enable_mergejoin = OFF; -EXPLAIN (COSTS OFF) -SELECT * FROM test.range_rel j1 -JOIN test.range_rel j2 on j2.id = j1.id -JOIN test.num_range_rel j3 on j3.id = j1.id -WHERE j1.dt < '2015-03-01' AND j2.dt >= '2015-02-01' ORDER BY j2.dt; - -/* - * Test CTE query - */ -EXPLAIN (COSTS OFF) - WITH ttt AS (SELECT * FROM test.range_rel WHERE dt >= '2015-02-01' AND dt < '2015-03-15') -SELECT * FROM ttt; - -EXPLAIN (COSTS OFF) - WITH ttt AS (SELECT * FROM test.hash_rel WHERE value = 2) -SELECT * FROM ttt; - - -/* - * Test RuntimeAppend - */ - -create or replace function test.pathman_assert(smt bool, error_msg text) returns text as $$ -begin - if not smt then - raise exception '%', error_msg; - end if; - - return 'ok'; -end; -$$ language plpgsql; - -create or replace function test.pathman_equal(a text, b text, error_msg text) returns text as $$ -begin - if a != b then - raise exception '''%'' is not equal to ''%'', %', a, b, error_msg; - end if; - - return 'equal'; -end; -$$ language plpgsql; - -create or replace function test.pathman_test(query text) returns jsonb as $$ -declare - plan jsonb; -begin - execute 'explain (analyze, format json)' || query into plan; - - return plan; -end; -$$ language plpgsql; - -create or replace function test.pathman_test_1() returns text as $$ -declare - plan jsonb; - num int; -begin - plan = test.pathman_test('select * from test.runtime_test_1 where id = (select * from test.run_values limit 1)'); - - perform test.pathman_equal((plan->0->'Plan'->'Node Type')::text, - '"Custom Scan"', - 'wrong plan type'); - - perform test.pathman_equal((plan->0->'Plan'->'Custom Plan Provider')::text, - '"RuntimeAppend"', - 'wrong plan provider'); - - perform test.pathman_equal((plan->0->'Plan'->'Plans'->1->'Relation Name')::text, - format('"runtime_test_1_%s"', pathman.get_hash_part_idx(hashint4(1), 6)), - 'wrong partition'); - - select count(*) from jsonb_array_elements_text(plan->0->'Plan'->'Plans') into num; - perform test.pathman_equal(num::text, '2', 'expected 2 child plans for custom scan'); - - return 'ok'; -end; -$$ language plpgsql; - -create or replace function test.pathman_test_2() returns text as $$ -declare - plan jsonb; - num int; -begin - plan = test.pathman_test('select * from test.runtime_test_1 where id = any (select * from test.run_values limit 4)'); - - perform test.pathman_equal((plan->0->'Plan'->'Node Type')::text, - '"Nested Loop"', - 'wrong plan type'); - - perform test.pathman_equal((plan->0->'Plan'->'Plans'->1->'Node Type')::text, - '"Custom Scan"', - 'wrong plan type'); - - perform test.pathman_equal((plan->0->'Plan'->'Plans'->1->'Custom Plan Provider')::text, - '"RuntimeAppend"', - 'wrong plan provider'); - - select count(*) from jsonb_array_elements_text(plan->0->'Plan'->'Plans'->1->'Plans') into num; - perform test.pathman_equal(num::text, '4', 'expected 4 child plans for custom scan'); - - for i in 0..3 loop - perform test.pathman_equal((plan->0->'Plan'->'Plans'->1->'Plans'->i->'Relation Name')::text, - format('"runtime_test_1_%s"', pathman.get_hash_part_idx(hashint4(i + 1), 6)), - 'wrong partition'); - - num = plan->0->'Plan'->'Plans'->1->'Plans'->i->'Actual Loops'; - perform test.pathman_equal(num::text, '1', 'expected 1 loop'); - end loop; - - return 'ok'; -end; -$$ language plpgsql; - -create or replace function test.pathman_test_3() returns text as $$ -declare - plan jsonb; - num int; -begin - plan = test.pathman_test('select * from test.runtime_test_1 a join test.run_values b on a.id = b.val'); - - perform test.pathman_equal((plan->0->'Plan'->'Node Type')::text, - '"Nested Loop"', - 'wrong plan type'); - - perform test.pathman_equal((plan->0->'Plan'->'Plans'->1->'Node Type')::text, - '"Custom Scan"', - 'wrong plan type'); - - perform test.pathman_equal((plan->0->'Plan'->'Plans'->1->'Custom Plan Provider')::text, - '"RuntimeAppend"', - 'wrong plan provider'); - - select count(*) from jsonb_array_elements_text(plan->0->'Plan'->'Plans'->1->'Plans') into num; - perform test.pathman_equal(num::text, '6', 'expected 6 child plans for custom scan'); - - for i in 0..5 loop - num = plan->0->'Plan'->'Plans'->1->'Plans'->i->'Actual Loops'; - perform test.pathman_assert(num > 0 and num <= 1718, 'expected no more than 1718 loops'); - end loop; - - return 'ok'; -end; -$$ language plpgsql; - -create or replace function test.pathman_test_4() returns text as $$ -declare - plan jsonb; - num int; -begin - plan = test.pathman_test('select * from test.category c, lateral' || - '(select * from test.runtime_test_2 g where g.category_id = c.id order by rating limit 4) as tg'); - - perform test.pathman_equal((plan->0->'Plan'->'Node Type')::text, - '"Nested Loop"', - 'wrong plan type'); - - /* Limit -> Custom Scan */ - perform test.pathman_equal((plan->0->'Plan'->'Plans'->1->0->'Node Type')::text, - '"Custom Scan"', - 'wrong plan type'); - - perform test.pathman_equal((plan->0->'Plan'->'Plans'->1->0->'Custom Plan Provider')::text, - '"RuntimeMergeAppend"', - 'wrong plan provider'); - - select count(*) from jsonb_array_elements_text(plan->0->'Plan'->'Plans'->1->'Plans'->0->'Plans') into num; - perform test.pathman_equal(num::text, '4', 'expected 4 child plans for custom scan'); - - for i in 0..3 loop - perform test.pathman_equal((plan->0->'Plan'->'Plans'->1->'Plans'->0->'Plans'->i->'Relation Name')::text, - format('"runtime_test_2_%s"', pathman.get_hash_part_idx(hashint4(i + 1), 6)), - 'wrong partition'); - - num = plan->0->'Plan'->'Plans'->1->'Plans'->0->'Plans'->i->'Actual Loops'; - perform test.pathman_assert(num = 1, 'expected no more than 1 loops'); - end loop; - - return 'ok'; -end; -$$ language plpgsql; - -create or replace function test.pathman_test_5() returns text as $$ -declare - res record; -begin - select - from test.runtime_test_3 - where id = (select * from test.vals order by val limit 1) - limit 1 - into res; /* test empty tlist */ - - - select id, generate_series(1, 2) gen, val - from test.runtime_test_3 - where id = any (select * from test.vals order by val limit 5) - order by id, gen, val - offset 1 limit 1 - into res; /* without IndexOnlyScan */ - - perform test.pathman_equal(res.id::text, '1', 'id is incorrect (t2)'); - perform test.pathman_equal(res.gen::text, '2', 'gen is incorrect (t2)'); - perform test.pathman_equal(res.val::text, 'k = 1', 'val is incorrect (t2)'); - - - select id - from test.runtime_test_3 - where id = any (select * from test.vals order by val limit 5) - order by id - offset 3 limit 1 - into res; /* with IndexOnlyScan */ - - perform test.pathman_equal(res.id::text, '4', 'id is incorrect (t3)'); - - - select v.val v1, generate_series(2, 2) gen, t.val v2 - from test.runtime_test_3 t join test.vals v on id = v.val - order by v1, gen, v2 - limit 1 - into res; - - perform test.pathman_equal(res.v1::text, '1', 'v1 is incorrect (t4)'); - perform test.pathman_equal(res.gen::text, '2', 'gen is incorrect (t4)'); - perform test.pathman_equal(res.v2::text, 'k = 1', 'v2 is incorrect (t4)'); - - return 'ok'; -end; -$$ language plpgsql -set pg_pathman.enable = true -set enable_hashjoin = off -set enable_mergejoin = off; - - - -create table test.run_values as select generate_series(1, 10000) val; -create table test.runtime_test_1(id serial primary key, val real); -insert into test.runtime_test_1 select generate_series(1, 10000), random(); -select pathman.create_hash_partitions('test.runtime_test_1', 'id', 6); - -create table test.category as (select id, 'cat' || id::text as name from generate_series(1, 4) id); -create table test.runtime_test_2 (id serial, category_id int not null, name text, rating real); -insert into test.runtime_test_2 (select id, (id % 6) + 1 as category_id, 'good' || id::text as name, random() as rating from generate_series(1, 100000) id); -create index on test.runtime_test_2 (category_id, rating); -select pathman.create_hash_partitions('test.runtime_test_2', 'category_id', 6); - -create table test.vals as (select generate_series(1, 10000) as val); -create table test.runtime_test_3(val text, id serial not null); -insert into test.runtime_test_3(id, val) select * from generate_series(1, 10000) k, format('k = %s', k); -select pathman.create_hash_partitions('test.runtime_test_3', 'id', 4); -create index on test.runtime_test_3 (id); -create index on test.runtime_test_3_0 (id); - - -analyze test.run_values; -analyze test.runtime_test_1; -analyze test.runtime_test_2; -analyze test.runtime_test_3; -analyze test.runtime_test_3_0; - - -set enable_mergejoin = off; -set enable_hashjoin = off; -set pg_pathman.enable_runtimeappend = on; -set pg_pathman.enable_runtimemergeappend = on; -select test.pathman_test_1(); /* RuntimeAppend (select ... where id = (subquery)) */ -select test.pathman_test_2(); /* RuntimeAppend (select ... where id = any(subquery)) */ -select test.pathman_test_3(); /* RuntimeAppend (a join b on a.id = b.val) */ -select test.pathman_test_4(); /* RuntimeMergeAppend (lateral) */ -select test.pathman_test_5(); /* projection tests for RuntimeXXX nodes */ - -set pg_pathman.enable_runtimeappend = off; -set pg_pathman.enable_runtimemergeappend = off; -set enable_mergejoin = on; -set enable_hashjoin = on; - -drop table test.run_values, test.runtime_test_1, test.runtime_test_2, test.runtime_test_3, test.vals cascade; - -/* - * Test split and merge - */ - -/* Split first partition in half */ -SELECT pathman.split_range_partition('test.num_range_rel_1', 500); -EXPLAIN (COSTS OFF) SELECT * FROM test.num_range_rel WHERE id BETWEEN 100 AND 700; - -SELECT pathman.split_range_partition('test.range_rel_1', '2015-01-15'::DATE); - -/* Merge two partitions into one */ -SELECT pathman.merge_range_partitions('test.num_range_rel_1', 'test.num_range_rel_' || currval('test.num_range_rel_seq')); -EXPLAIN (COSTS OFF) SELECT * FROM test.num_range_rel WHERE id BETWEEN 100 AND 700; - -SELECT pathman.merge_range_partitions('test.range_rel_1', 'test.range_rel_' || currval('test.range_rel_seq')); - -/* Append and prepend partitions */ -SELECT pathman.append_range_partition('test.num_range_rel'); -EXPLAIN (COSTS OFF) SELECT * FROM test.num_range_rel WHERE id >= 4000; -SELECT pathman.prepend_range_partition('test.num_range_rel'); -EXPLAIN (COSTS OFF) SELECT * FROM test.num_range_rel WHERE id < 0; -SELECT pathman.drop_range_partition('test.num_range_rel_7'); - -SELECT pathman.append_range_partition('test.range_rel'); -SELECT pathman.prepend_range_partition('test.range_rel'); -EXPLAIN (COSTS OFF) SELECT * FROM test.range_rel WHERE dt BETWEEN '2014-12-15' AND '2015-01-15'; -SELECT pathman.drop_range_partition('test.range_rel_7'); -EXPLAIN (COSTS OFF) SELECT * FROM test.range_rel WHERE dt BETWEEN '2014-12-15' AND '2015-01-15'; -SELECT pathman.add_range_partition('test.range_rel', '2014-12-01'::DATE, '2015-01-02'::DATE); -SELECT pathman.add_range_partition('test.range_rel', '2014-12-01'::DATE, '2015-01-01'::DATE); -EXPLAIN (COSTS OFF) SELECT * FROM test.range_rel WHERE dt BETWEEN '2014-12-15' AND '2015-01-15'; -CREATE TABLE test.range_rel_archive (LIKE test.range_rel INCLUDING ALL); -SELECT pathman.attach_range_partition('test.range_rel', 'test.range_rel_archive', '2014-01-01'::DATE, '2015-01-01'::DATE); -SELECT pathman.attach_range_partition('test.range_rel', 'test.range_rel_archive', '2014-01-01'::DATE, '2014-12-01'::DATE); -EXPLAIN (COSTS OFF) SELECT * FROM test.range_rel WHERE dt BETWEEN '2014-11-15' AND '2015-01-15'; -SELECT pathman.detach_range_partition('test.range_rel_archive'); -EXPLAIN (COSTS OFF) SELECT * FROM test.range_rel WHERE dt BETWEEN '2014-11-15' AND '2015-01-15'; -CREATE TABLE test.range_rel_test1 ( - id SERIAL PRIMARY KEY, - dt TIMESTAMP, - txt TEXT, - abc INTEGER); -SELECT pathman.attach_range_partition('test.range_rel', 'test.range_rel_test1', '2013-01-01'::DATE, '2014-01-01'::DATE); -CREATE TABLE test.range_rel_test2 ( - id SERIAL PRIMARY KEY, - dt TIMESTAMP); -SELECT pathman.attach_range_partition('test.range_rel', 'test.range_rel_test2', '2013-01-01'::DATE, '2014-01-01'::DATE); - -/* - * Zero partitions count and adding partitions with specified name - */ -CREATE TABLE test.zero( - id SERIAL PRIMARY KEY, - value INT NOT NULL); -INSERT INTO test.zero SELECT g, g FROM generate_series(1, 100) as g; -SELECT pathman.create_range_partitions('test.zero', 'value', 50, 10, 0); -SELECT pathman.append_range_partition('test.zero', 'test.zero_0'); -SELECT pathman.prepend_range_partition('test.zero', 'test.zero_1'); -SELECT pathman.add_range_partition('test.zero', 50, 70, 'test.zero_50'); -SELECT pathman.append_range_partition('test.zero', 'test.zero_appended'); -SELECT pathman.prepend_range_partition('test.zero', 'test.zero_prepended'); -SELECT pathman.split_range_partition('test.zero_50', 60, 'test.zero_60'); -DROP TABLE test.zero CASCADE; - -/* - * Check that altering table columns doesn't break trigger - */ -ALTER TABLE test.hash_rel ADD COLUMN abc int; -INSERT INTO test.hash_rel (id, value, abc) VALUES (123, 456, 789); -SELECT * FROM test.hash_rel WHERE id = 123; - -/* - * Clean up - */ -SELECT pathman.drop_partitions('test.hash_rel'); -SELECT COUNT(*) FROM ONLY test.hash_rel; -SELECT pathman.create_hash_partitions('test.hash_rel', 'value', 3); -SELECT pathman.drop_partitions('test.hash_rel', TRUE); -SELECT COUNT(*) FROM ONLY test.hash_rel; -DROP TABLE test.hash_rel CASCADE; - -SELECT pathman.drop_partitions('test.num_range_rel'); -DROP TABLE test.num_range_rel CASCADE; - -DROP TABLE test.range_rel CASCADE; - -/* Test automatic partition creation */ -CREATE TABLE test.range_rel ( - id SERIAL PRIMARY KEY, - dt TIMESTAMP NOT NULL); -SELECT pathman.create_range_partitions('test.range_rel', 'dt', '2015-01-01'::DATE, '10 days'::INTERVAL, 1); -INSERT INTO test.range_rel (dt) -SELECT generate_series('2015-01-01', '2015-04-30', '1 day'::interval); - -INSERT INTO test.range_rel (dt) -SELECT generate_series('2014-12-31', '2014-12-01', '-1 day'::interval); - -EXPLAIN (COSTS OFF) SELECT * FROM test.range_rel WHERE dt = '2014-12-15'; -SELECT * FROM test.range_rel WHERE dt = '2014-12-15'; -EXPLAIN (COSTS OFF) SELECT * FROM test.range_rel WHERE dt = '2015-03-15'; -SELECT * FROM test.range_rel WHERE dt = '2015-03-15'; - -SELECT pathman.disable_auto('test.range_rel'); -INSERT INTO test.range_rel (dt) VALUES ('2015-06-01'); -SELECT pathman.enable_auto('test.range_rel'); -INSERT INTO test.range_rel (dt) VALUES ('2015-06-01'); - -DROP TABLE test.range_rel CASCADE; -SELECT * FROM pathman.pathman_config; - -/* Check overlaps */ -CREATE TABLE test.num_range_rel ( - id SERIAL PRIMARY KEY, - txt TEXT); -SELECT pathman.create_range_partitions('test.num_range_rel', 'id', 1000, 1000, 4); -SELECT pathman.check_overlap('test.num_range_rel'::regclass::oid, 4001, 5000); -SELECT pathman.check_overlap('test.num_range_rel'::regclass::oid, 4000, 5000); -SELECT pathman.check_overlap('test.num_range_rel'::regclass::oid, 3999, 5000); -SELECT pathman.check_overlap('test.num_range_rel'::regclass::oid, 3000, 3500); -SELECT pathman.check_overlap('test.num_range_rel'::regclass::oid, 0, 999); -SELECT pathman.check_overlap('test.num_range_rel'::regclass::oid, 0, 1000); -SELECT pathman.check_overlap('test.num_range_rel'::regclass::oid, 0, 1001); - -/* CaMeL cAsE table names and attributes */ -CREATE TABLE test."TeSt" (a INT NOT NULL, b INT); -SELECT pathman.create_hash_partitions('test.TeSt', 'a', 3); -SELECT pathman.create_hash_partitions('test."TeSt"', 'a', 3); -INSERT INTO test."TeSt" VALUES (1, 1); -INSERT INTO test."TeSt" VALUES (2, 2); -INSERT INTO test."TeSt" VALUES (3, 3); -SELECT * FROM test."TeSt"; -SELECT pathman.create_hash_update_trigger('test."TeSt"'); -UPDATE test."TeSt" SET a = 1; -SELECT * FROM test."TeSt"; -SELECT * FROM test."TeSt" WHERE a = 1; -EXPLAIN (COSTS OFF) SELECT * FROM test."TeSt" WHERE a = 1; -SELECT pathman.drop_partitions('test."TeSt"'); -SELECT * FROM test."TeSt"; - -CREATE TABLE test."RangeRel" ( - id SERIAL PRIMARY KEY, - dt TIMESTAMP NOT NULL, - txt TEXT); -INSERT INTO test."RangeRel" (dt, txt) -SELECT g, md5(g::TEXT) FROM generate_series('2015-01-01', '2015-01-03', '1 day'::interval) as g; -SELECT pathman.create_range_partitions('test."RangeRel"', 'dt', '2015-01-01'::DATE, '1 day'::INTERVAL); -SELECT pathman.append_range_partition('test."RangeRel"'); -SELECT pathman.prepend_range_partition('test."RangeRel"'); -SELECT pathman.merge_range_partitions('test."RangeRel_1"', 'test."RangeRel_' || currval('test."RangeRel_seq"') || '"'); -SELECT pathman.split_range_partition('test."RangeRel_1"', '2015-01-01'::DATE); -SELECT pathman.drop_partitions('test."RangeRel"'); -SELECT pathman.create_partitions_from_range('test."RangeRel"', 'dt', '2015-01-01'::DATE, '2015-01-05'::DATE, '1 day'::INTERVAL); -DROP TABLE test."RangeRel" CASCADE; -SELECT * FROM pathman.pathman_config; -CREATE TABLE test."RangeRel" ( - id SERIAL PRIMARY KEY, - dt TIMESTAMP NOT NULL, - txt TEXT); -SELECT pathman.create_range_partitions('test."RangeRel"', 'id', 1, 100, 3); -SELECT pathman.drop_partitions('test."RangeRel"'); -SELECT pathman.create_partitions_from_range('test."RangeRel"', 'id', 1, 300, 100); -DROP TABLE test."RangeRel" CASCADE; - -DROP EXTENSION pg_pathman; - -/* Test that everithing works fine without schemas */ -CREATE EXTENSION pg_pathman; - -/* Hash */ -CREATE TABLE hash_rel ( - id SERIAL PRIMARY KEY, - value INTEGER NOT NULL); -INSERT INTO hash_rel (value) SELECT g FROM generate_series(1, 10000) as g; -SELECT create_hash_partitions('hash_rel', 'value', 3); -EXPLAIN (COSTS OFF) SELECT * FROM hash_rel WHERE id = 1234; - -/* Range */ -CREATE TABLE range_rel ( - id SERIAL PRIMARY KEY, - dt TIMESTAMP NOT NULL, - value INTEGER); -INSERT INTO range_rel (dt, value) SELECT g, extract(day from g) FROM generate_series('2010-01-01'::date, '2010-12-31'::date, '1 day') as g; -SELECT create_range_partitions('range_rel', 'dt', '2010-01-01'::date, '1 month'::interval, 12); -SELECT merge_range_partitions('range_rel_1', 'range_rel_2'); -SELECT split_range_partition('range_rel_1', '2010-02-15'::date); -SELECT append_range_partition('range_rel'); -SELECT prepend_range_partition('range_rel'); -EXPLAIN (COSTS OFF) SELECT * FROM range_rel WHERE dt < '2010-03-01'; -EXPLAIN (COSTS OFF) SELECT * FROM range_rel WHERE dt > '2010-12-15'; - -/* Temporary table for JOINs */ -CREATE TABLE tmp (id INTEGER NOT NULL, value INTEGER NOT NULL); -INSERT INTO tmp VALUES (1, 1), (2, 2); - -/* Test UPDATE and DELETE */ -EXPLAIN (COSTS OFF) UPDATE range_rel SET value = 111 WHERE dt = '2010-06-15'; -UPDATE range_rel SET value = 111 WHERE dt = '2010-06-15'; -SELECT * FROM range_rel WHERE dt = '2010-06-15'; -EXPLAIN (COSTS OFF) DELETE FROM range_rel WHERE dt = '2010-06-15'; -DELETE FROM range_rel WHERE dt = '2010-06-15'; -SELECT * FROM range_rel WHERE dt = '2010-06-15'; -EXPLAIN (COSTS OFF) UPDATE range_rel r SET value = t.value FROM tmp t WHERE r.dt = '2010-01-01' AND r.id = t.id; -UPDATE range_rel r SET value = t.value FROM tmp t WHERE r.dt = '2010-01-01' AND r.id = t.id; -EXPLAIN (COSTS OFF) DELETE FROM range_rel r USING tmp t WHERE r.dt = '2010-01-02' AND r.id = t.id; -DELETE FROM range_rel r USING tmp t WHERE r.dt = '2010-01-02' AND r.id = t.id; - -/* Create range partitions from whole range */ -SELECT drop_partitions('range_rel'); -SELECT create_partitions_from_range('range_rel', 'id', 1, 1000, 100); -SELECT drop_partitions('range_rel', TRUE); -SELECT create_partitions_from_range('range_rel', 'dt', '2015-01-01'::date, '2015-12-01'::date, '1 month'::interval); -EXPLAIN (COSTS OFF) SELECT * FROM range_rel WHERE dt = '2015-12-15'; - -CREATE TABLE messages(id SERIAL PRIMARY KEY, msg TEXT); -CREATE TABLE replies(id SERIAL PRIMARY KEY, message_id INTEGER REFERENCES messages(id), msg TEXT); -INSERT INTO messages SELECT g, md5(g::text) FROM generate_series(1, 10) as g; -INSERT INTO replies SELECT g, g, md5(g::text) FROM generate_series(1, 10) as g; -SELECT create_range_partitions('messages', 'id', 1, 100, 2); -ALTER TABLE replies DROP CONSTRAINT replies_message_id_fkey; -SELECT create_range_partitions('messages', 'id', 1, 100, 2); -EXPLAIN (COSTS OFF) SELECT * FROM messages; diff --git a/src/compat/pg_compat.c b/src/compat/pg_compat.c new file mode 100644 index 00000000..216fd382 --- /dev/null +++ b/src/compat/pg_compat.c @@ -0,0 +1,654 @@ +/* ------------------------------------------------------------------------ + * + * pg_compat.c + * Compatibility tools for PostgreSQL API + * + * Copyright (c) 2016, Postgres Professional + * + * Portions Copyright (c) 1996-2016, PostgreSQL Global Development Group + * Portions Copyright (c) 1994, Regents of the University of California + * + * ------------------------------------------------------------------------ + */ + +#include "compat/pg_compat.h" + +#include "utils.h" + +#include "access/htup_details.h" +#include "catalog/pg_class.h" +#include "catalog/pg_proc.h" +#include "foreign/fdwapi.h" +#include "optimizer/clauses.h" +#include "optimizer/pathnode.h" +#include "optimizer/prep.h" +#include "parser/parse_utilcmd.h" +#include "port.h" +#include "utils/builtins.h" +#include "utils/lsyscache.h" +#include "utils/syscache.h" + +#include + + +/* + * ---------- + * Variants + * ---------- + */ + + +/* + * create_plain_partial_paths + * Build partial access paths for parallel scan of a plain relation + */ +#if PG_VERSION_NUM >= 100000 +void +create_plain_partial_paths(PlannerInfo *root, RelOptInfo *rel) +{ + int parallel_workers; + + /* no more than max_parallel_workers_per_gather since 11 */ + parallel_workers = compute_parallel_worker_compat(rel, rel->pages, -1); + + /* If any limit was set to zero, the user doesn't want a parallel scan. */ + if (parallel_workers <= 0) + return; + + /* Add an unordered partial path based on a parallel sequential scan. */ + add_partial_path(rel, create_seqscan_path(root, rel, NULL, parallel_workers)); +} +#elif PG_VERSION_NUM >= 90600 +void +create_plain_partial_paths(PlannerInfo *root, RelOptInfo *rel) +{ + int parallel_workers; + + /* + * If the user has set the parallel_workers reloption, use that; otherwise + * select a default number of workers. + */ + if (rel->rel_parallel_workers != -1) + parallel_workers = rel->rel_parallel_workers; + else + { + int parallel_threshold; + + /* + * If this relation is too small to be worth a parallel scan, just + * return without doing anything ... unless it's an inheritance child. + * In that case, we want to generate a parallel path here anyway. It + * might not be worthwhile just for this relation, but when combined + * with all of its inheritance siblings it may well pay off. + */ + if (rel->pages < (BlockNumber) min_parallel_relation_size && + rel->reloptkind == RELOPT_BASEREL) + return; + + /* + * Select the number of workers based on the log of the size of the + * relation. This probably needs to be a good deal more + * sophisticated, but we need something here for now. Note that the + * upper limit of the min_parallel_relation_size GUC is chosen to + * prevent overflow here. + */ + parallel_workers = 1; + parallel_threshold = Max(min_parallel_relation_size, 1); + while (rel->pages >= (BlockNumber) (parallel_threshold * 3)) + { + parallel_workers++; + parallel_threshold *= 3; + if (parallel_threshold > INT_MAX / 3) + break; /* avoid overflow */ + } + } + + /* + * In no case use more than max_parallel_workers_per_gather workers. + */ + parallel_workers = Min(parallel_workers, max_parallel_workers_per_gather); + + /* If any limit was set to zero, the user doesn't want a parallel scan. */ + if (parallel_workers <= 0) + return; + + /* Add an unordered partial path based on a parallel sequential scan. */ + add_partial_path(rel, create_seqscan_path(root, rel, NULL, parallel_workers)); +} +#endif + + +/* + * get_all_actual_clauses + */ +#if PG_VERSION_NUM >= 100000 +List * +get_all_actual_clauses(List *restrictinfo_list) +{ + List *result = NIL; + ListCell *l; + + foreach(l, restrictinfo_list) + { + RestrictInfo *rinfo = (RestrictInfo *) lfirst(l); + + Assert(IsA(rinfo, RestrictInfo)); + + result = lappend(result, rinfo->clause); + } + return result; +} +#endif + + +/* + * make_restrictinfos_from_actual_clauses + */ +#if PG_VERSION_NUM >= 100000 +#if PG_VERSION_NUM >= 120000 +#include "optimizer/optimizer.h" +#include "optimizer/restrictinfo.h" +#else +#include "optimizer/restrictinfo.h" +#include "optimizer/var.h" +#endif /* 12 */ + +List * +make_restrictinfos_from_actual_clauses(PlannerInfo *root, + List *clause_list) +{ + List *result = NIL; + ListCell *l; + + foreach(l, clause_list) + { + Expr *clause = (Expr *) lfirst(l); + bool pseudoconstant; + RestrictInfo *rinfo; + + /* + * It's pseudoconstant if it contains no Vars and no volatile + * functions. We probably can't see any sublinks here, so + * contain_var_clause() would likely be enough, but for safety use + * contain_vars_of_level() instead. + */ + pseudoconstant = + !contain_vars_of_level((Node *) clause, 0) && + !contain_volatile_functions((Node *) clause); + if (pseudoconstant) + { + /* tell createplan.c to check for gating quals */ + root->hasPseudoConstantQuals = true; + } + + rinfo = make_restrictinfo_compat( + root, + clause, + true, + false, + pseudoconstant, + root->qual_security_level, + NULL, + NULL, + NULL); + result = lappend(result, rinfo); + } + return result; +} +#endif + + +/* + * make_result + * Build a Result plan node + */ +#if PG_VERSION_NUM >= 90600 +Result * +make_result(List *tlist, + Node *resconstantqual, + Plan *subplan) +{ + Result *node = makeNode(Result); + Plan *plan = &node->plan; + + plan->targetlist = tlist; + plan->qual = NIL; + plan->lefttree = subplan; + plan->righttree = NULL; + node->resconstantqual = resconstantqual; + + return node; +} +#endif + + +/* + * Examine contents of MemoryContext. + */ +#if PG_VERSION_NUM >= 90600 +void +McxtStatsInternal(MemoryContext context, int level, + bool examine_children, + MemoryContextCounters *totals) +{ + MemoryContextCounters local_totals; + MemoryContext child; + + Assert(MemoryContextIsValid(context)); + + /* Examine the context itself */ +#if PG_VERSION_NUM >= 140000 + (*context->methods->stats) (context, NULL, NULL, totals, true); +#elif PG_VERSION_NUM >= 110000 + (*context->methods->stats) (context, NULL, NULL, totals); +#else + (*context->methods->stats) (context, level, false, totals); +#endif + + memset(&local_totals, 0, sizeof(local_totals)); + + if (!examine_children) + return; + + /* Examine children */ + for (child = context->firstchild; + child != NULL; + child = child->nextchild) + { + + McxtStatsInternal(child, level + 1, + examine_children, + &local_totals); + } + + /* Save children stats */ + totals->nblocks += local_totals.nblocks; + totals->freechunks += local_totals.freechunks; + totals->totalspace += local_totals.totalspace; + totals->freespace += local_totals.freespace; +} +#endif + + +/* + * oid_cmp + * + * qsort comparison function for Oids; + * needed for find_inheritance_children_array() function + */ +#if PG_VERSION_NUM >= 90500 && PG_VERSION_NUM < 100000 +int +oid_cmp(const void *p1, const void *p2) +{ + Oid v1 = *((const Oid *) p1); + Oid v2 = *((const Oid *) p2); + + if (v1 < v2) + return -1; + + if (v1 > v2) + return 1; + + return 0; +} +#endif + + +/* + * set_dummy_rel_pathlist + * Build a dummy path for a relation that's been excluded by constraints + * + * Rather than inventing a special "dummy" path type, we represent this as an + * AppendPath with no members (see also IS_DUMMY_PATH/IS_DUMMY_REL macros). + */ +#if PG_VERSION_NUM >= 90500 && PG_VERSION_NUM < 90600 +void +set_dummy_rel_pathlist(RelOptInfo *rel) +{ + /* Set dummy size estimates --- we leave attr_widths[] as zeroes */ + rel->rows = 0; + rel->width = 0; + + /* Discard any pre-existing paths; no further need for them */ + rel->pathlist = NIL; + + add_path(rel, (Path *) create_append_path(rel, NIL, NULL)); + + /* + * We set the cheapest path immediately, to ensure that IS_DUMMY_REL() + * will recognize the relation as dummy if anyone asks. This is redundant + * when we're called from set_rel_size(), but not when called from + * elsewhere, and doing it twice is harmless anyway. + */ + set_cheapest(rel); +} +#endif + + +#if PG_VERSION_NUM >= 90600 +/* + * If this relation could possibly be scanned from within a worker, then set + * its consider_parallel flag. + */ +void +set_rel_consider_parallel(PlannerInfo *root, RelOptInfo *rel, + RangeTblEntry *rte) +{ +#if PG_VERSION_NUM >= 100000 +#define is_parallel_safe_compat(root, exprs) is_parallel_safe((root), (exprs)) +#elif PG_VERSION_NUM >= 90500 +#define is_parallel_safe_compat(root, exprs) \ + (!has_parallel_hazard((exprs), false)) +#endif + + /* + * The flag has previously been initialized to false, so we can just + * return if it becomes clear that we can't safely set it. + */ + Assert(!rel->consider_parallel); + + /* Don't call this if parallelism is disallowed for the entire query. */ + Assert(root->glob->parallelModeOK); + + /* This should only be called for baserels and appendrel children. */ + Assert(rel->reloptkind == RELOPT_BASEREL || + rel->reloptkind == RELOPT_OTHER_MEMBER_REL); + + /* Assorted checks based on rtekind. */ + switch (rte->rtekind) + { + case RTE_RELATION: + + /* + * Currently, parallel workers can't access the leader's temporary + * tables. We could possibly relax this if the wrote all of its + * local buffers at the start of the query and made no changes + * thereafter (maybe we could allow hint bit changes), and if we + * taught the workers to read them. Writing a large number of + * temporary buffers could be expensive, though, and we don't have + * the rest of the necessary infrastructure right now anyway. So + * for now, bail out if we see a temporary table. + */ + if (get_rel_persistence(rte->relid) == RELPERSISTENCE_TEMP) + return; + + /* + * Table sampling can be pushed down to workers if the sample + * function and its arguments are safe. + */ + if (rte->tablesample != NULL) + { + char proparallel = func_parallel(rte->tablesample->tsmhandler); + + if (proparallel != PROPARALLEL_SAFE) + return; + if (!is_parallel_safe_compat( + root, (Node *) rte->tablesample->args)) + return; + } + + /* + * Ask FDWs whether they can support performing a ForeignScan + * within a worker. Most often, the answer will be no. For + * example, if the nature of the FDW is such that it opens a TCP + * connection with a remote server, each parallel worker would end + * up with a separate connection, and these connections might not + * be appropriately coordinated between workers and the leader. + */ + if (rte->relkind == RELKIND_FOREIGN_TABLE) + { + Assert(rel->fdwroutine); + if (!rel->fdwroutine->IsForeignScanParallelSafe) + return; + if (!rel->fdwroutine->IsForeignScanParallelSafe(root, rel, rte)) + return; + } + + /* + * There are additional considerations for appendrels, which we'll + * deal with in set_append_rel_size and set_append_rel_pathlist. + * For now, just set consider_parallel based on the rel's own + * quals and targetlist. + */ + break; + + case RTE_SUBQUERY: + + /* + * There's no intrinsic problem with scanning a subquery-in-FROM + * (as distinct from a SubPlan or InitPlan) in a parallel worker. + * If the subquery doesn't happen to have any parallel-safe paths, + * then flagging it as consider_parallel won't change anything, + * but that's true for plain tables, too. We must set + * consider_parallel based on the rel's own quals and targetlist, + * so that if a subquery path is parallel-safe but the quals and + * projection we're sticking onto it are not, we correctly mark + * the SubqueryScanPath as not parallel-safe. (Note that + * set_subquery_pathlist() might push some of these quals down + * into the subquery itself, but that doesn't change anything.) + */ + break; + + case RTE_JOIN: + /* Shouldn't happen; we're only considering baserels here. */ + Assert(false); + return; + + case RTE_FUNCTION: + /* Check for parallel-restricted functions. */ + if (!is_parallel_safe_compat(root, (Node *) rte->functions)) + return; + break; + +#if PG_VERSION_NUM >= 100000 + case RTE_TABLEFUNC: + /* not parallel safe */ + return; +#endif + + case RTE_VALUES: + /* Check for parallel-restricted functions. */ + if (!is_parallel_safe_compat(root, (Node *) rte->values_lists)) + return; + break; + + case RTE_CTE: + + /* + * CTE tuplestores aren't shared among parallel workers, so we + * force all CTE scans to happen in the leader. Also, populating + * the CTE would require executing a subplan that's not available + * in the worker, might be parallel-restricted, and must get + * executed only once. + */ + return; + +#if PG_VERSION_NUM >= 100000 + case RTE_NAMEDTUPLESTORE: + /* + * tuplestore cannot be shared, at least without more + * infrastructure to support that. + */ + return; +#endif + +#if PG_VERSION_NUM >= 120000 + case RTE_RESULT: + /* RESULT RTEs, in themselves, are no problem. */ + break; +#endif /* 12 */ + + } + + /* + * If there's anything in baserestrictinfo that's parallel-restricted, we + * give up on parallelizing access to this relation. We could consider + * instead postponing application of the restricted quals until we're + * above all the parallelism in the plan tree, but it's not clear that + * that would be a win in very many cases, and it might be tricky to make + * outer join clauses work correctly. It would likely break equivalence + * classes, too. + */ + if (!is_parallel_safe_compat(root, (Node *) rel->baserestrictinfo)) + return; + + /* + * Likewise, if the relation's outputs are not parallel-safe, give up. + * (Usually, they're just Vars, but sometimes they're not.) + */ + if (!is_parallel_safe_compat(root, (Node *) rel->reltarget->exprs)) + return; + + /* We have a winner. */ + rel->consider_parallel = true; +} +#endif + + +/* + * Returns the relpersistence associated with a given relation. + * + * NOTE: this function is implemented in 9.6 + */ +#if PG_VERSION_NUM >= 90500 && PG_VERSION_NUM < 90600 +char +get_rel_persistence(Oid relid) +{ + HeapTuple tp; + Form_pg_class reltup; + char result; + + tp = SearchSysCache1(RELOID, ObjectIdGetDatum(relid)); + if (!HeapTupleIsValid(tp)) + elog(ERROR, "cache lookup failed for relation %u", relid); + + reltup = (Form_pg_class) GETSTRUCT(tp); + result = reltup->relpersistence; + ReleaseSysCache(tp); + + return result; +} +#endif + +#if (PG_VERSION_NUM >= 90500 && PG_VERSION_NUM <= 90505) || \ + (PG_VERSION_NUM >= 90600 && PG_VERSION_NUM <= 90601) +/* + * Return a palloc'd bare attribute map for tuple conversion, matching input + * and output columns by name. (Dropped columns are ignored in both input and + * output.) This is normally a subroutine for convert_tuples_by_name, but can + * be used standalone. + */ +AttrNumber * +convert_tuples_by_name_map(TupleDesc indesc, + TupleDesc outdesc, + const char *msg) +{ + AttrNumber *attrMap; + int n; + int i; + + n = outdesc->natts; + attrMap = (AttrNumber *) palloc0(n * sizeof(AttrNumber)); + for (i = 0; i < n; i++) + { + Form_pg_attribute att = TupleDescAttr(outdesc, i); + char *attname; + Oid atttypid; + int32 atttypmod; + int j; + + if (att->attisdropped) + continue; /* attrMap[i] is already 0 */ + attname = NameStr(att->attname); + atttypid = att->atttypid; + atttypmod = att->atttypmod; + for (j = 0; j < indesc->natts; j++) + { + att = TupleDescAttr(indesc, j); + if (att->attisdropped) + continue; + if (strcmp(attname, NameStr(att->attname)) == 0) + { + /* Found it, check type */ + if (atttypid != att->atttypid || atttypmod != att->atttypmod) + ereport(ERROR, + (errcode(ERRCODE_DATATYPE_MISMATCH), + errmsg_internal("%s", _(msg)), + errdetail("Attribute \"%s\" of type %s does not match corresponding attribute of type %s.", + attname, + format_type_be(outdesc->tdtypeid), + format_type_be(indesc->tdtypeid)))); + attrMap[i] = (AttrNumber) (j + 1); + break; + } + } + if (attrMap[i] == 0) + ereport(ERROR, + (errcode(ERRCODE_DATATYPE_MISMATCH), + errmsg_internal("%s", _(msg)), + errdetail("Attribute \"%s\" of type %s does not exist in type %s.", + attname, + format_type_be(outdesc->tdtypeid), + format_type_be(indesc->tdtypeid)))); + } + + return attrMap; +} +#endif + +/* + * ------------- + * Common code + * ------------- + */ + +void +set_append_rel_size_compat(PlannerInfo *root, RelOptInfo *rel, Index rti) +{ + double parent_rows = 0; + double parent_size = 0; + ListCell *l; + + foreach(l, root->append_rel_list) + { + AppendRelInfo *appinfo = (AppendRelInfo *) lfirst(l); + Index childRTindex, + parentRTindex = rti; + RelOptInfo *childrel; + + /* append_rel_list contains all append rels; ignore others */ + if (appinfo->parent_relid != parentRTindex) + continue; + + childRTindex = appinfo->child_relid; + + childrel = find_base_rel(root, childRTindex); + Assert(childrel->reloptkind == RELOPT_OTHER_MEMBER_REL); + + /* + * Accumulate size information from each live child. + */ + Assert(childrel->rows >= 0); + parent_rows += childrel->rows; + +#if PG_VERSION_NUM >= 90600 + parent_size += childrel->reltarget->width * childrel->rows; +#else + parent_size += childrel->width * childrel->rows; +#endif + } + + /* Set 'rows' for append relation */ + rel->rows = parent_rows; + + if (parent_rows == 0) + parent_rows = 1; + +#if PG_VERSION_NUM >= 90600 + rel->reltarget->width = rint(parent_size / parent_rows); +#else + rel->width = rint(parent_size / parent_rows); +#endif + + rel->tuples = parent_rows; +} diff --git a/src/compat/rowmarks_fix.c b/src/compat/rowmarks_fix.c new file mode 100644 index 00000000..35eea44b --- /dev/null +++ b/src/compat/rowmarks_fix.c @@ -0,0 +1,54 @@ +/* ------------------------------------------------------------------------ + * + * rowmarks_fix.h + * Hack incorrect RowMark generation due to unset 'RTE->inh' flag + * NOTE: this code is only useful for vanilla + * + * Copyright (c) 2017, Postgres Professional + * + * ------------------------------------------------------------------------ + */ + +#include "compat/rowmarks_fix.h" +#include "planner_tree_modification.h" + +#include "access/sysattr.h" +#include "catalog/pg_type.h" +#include "nodes/nodeFuncs.h" +#include "optimizer/planmain.h" +#include "utils/builtins.h" +#include "utils/rel.h" + + +#if PG_VERSION_NUM >= 90600 + + +/* Add missing "tableoid" column for partitioned table */ +void +append_tle_for_rowmark(PlannerInfo *root, PlanRowMark *rc) +{ + Var *var; + char resname[32]; + TargetEntry *tle; + + var = makeVar(rc->rti, + TableOidAttributeNumber, + OIDOID, + -1, + InvalidOid, + 0); + + snprintf(resname, sizeof(resname), "tableoid%u", rc->rowmarkId); + + tle = makeTargetEntry((Expr *) var, + list_length(root->processed_tlist) + 1, + pstrdup(resname), + true); + + root->processed_tlist = lappend(root->processed_tlist, tle); + + add_vars_to_targetlist_compat(root, list_make1(var), bms_make_singleton(0)); +} + + +#endif diff --git a/src/debug_print.c b/src/debug_print.c new file mode 100644 index 00000000..bac1d622 --- /dev/null +++ b/src/debug_print.c @@ -0,0 +1,106 @@ +/* ------------------------------------------------------------------------ + * + * debug_print.c + * Print sophisticated structs as CSTRING + * + * Copyright (c) 2016, Postgres Professional + * + * ------------------------------------------------------------------------ + */ + +#include +#include "rangeset.h" + +#include "postgres.h" +#include "fmgr.h" +#include "executor/tuptable.h" +#include "nodes/bitmapset.h" +#include "nodes/parsenodes.h" +#include "nodes/pg_list.h" +#include "lib/stringinfo.h" +#include "utils/lsyscache.h" + + +/* + * Print Bitmapset as cstring. + */ +#ifdef __GNUC__ +__attribute__((unused)) +#endif +static char * +bms_print(Bitmapset *bms) +{ + StringInfoData str; + int x; + + initStringInfo(&str); + x = -1; + while ((x = bms_next_member(bms, x)) >= 0) + appendStringInfo(&str, " %d", x); + + return str.data; +} + +/* + * Print list of IndexRanges as cstring. + */ +#ifdef __GNUC__ +__attribute__((unused)) +#endif +static char * +rangeset_print(List *rangeset) +{ + StringInfoData str; + ListCell *lc; + bool first_irange = true; + char lossy = 'L', /* Lossy IndexRange */ + complete = 'C'; /* Complete IndexRange */ + + initStringInfo(&str); + + foreach (lc, rangeset) + { + IndexRange irange = lfirst_irange(lc); + + /* Append comma if needed */ + if (!first_irange) + appendStringInfo(&str, ", "); + + if (!is_irange_valid(irange)) + appendStringInfo(&str, "X"); + else if (irange_lower(irange) == irange_upper(irange)) + appendStringInfo(&str, "%u%c", + irange_lower(irange), + (is_irange_lossy(irange) ? lossy : complete)); + else + appendStringInfo(&str, "[%u-%u]%c", + irange_lower(irange), irange_upper(irange), + (is_irange_lossy(irange) ? lossy : complete)); + + first_irange = false; + } + + return str.data; +} + +/* + * Print IndexRange struct as cstring. + */ +#ifdef __GNUC__ +__attribute__((unused)) +#endif +static char * +irange_print(IndexRange irange) +{ + StringInfoData str; + + initStringInfo(&str); + + appendStringInfo(&str, "{ valid: %s, lossy: %s, lower: %u, upper: %u }", + (is_irange_valid(irange) ? "true" : "false"), + (is_irange_lossy(irange) ? "true" : "false"), + irange_lower(irange), + irange_upper(irange)); + + return str.data; +} diff --git a/src/declarative.c b/src/declarative.c new file mode 100644 index 00000000..42e9ffac --- /dev/null +++ b/src/declarative.c @@ -0,0 +1,382 @@ +#include "pathman.h" +#include "declarative.h" +#include "utils.h" +#include "partition_creation.h" + +#include "access/htup_details.h" +#include "catalog/namespace.h" +#include "catalog/pg_proc.h" +#include "catalog/pg_type.h" +#include "fmgr.h" +#include "nodes/makefuncs.h" +#include "nodes/nodeFuncs.h" +#include "optimizer/planner.h" +#include "parser/parse_coerce.h" +#include "parser/parse_func.h" +#include "utils/builtins.h" +#include "utils/int8.h" +#include "utils/int8.h" +#include "utils/lsyscache.h" +#include "utils/lsyscache.h" +#include "utils/syscache.h" +#include "utils/varbit.h" + +/* + * Modifies query of declarative partitioning commands, + * There is a little hack here, ATTACH PARTITION command + * expects relation with REL_PARTITIONED_TABLE relkind. + * To avoid this check we negate subtype, and then after the checks + * we set it back (look `is_pathman_related_partitioning_cmd`) + */ +void +modify_declarative_partitioning_query(Query *query) +{ + if (query->commandType != CMD_UTILITY) + return; + + if (IsA(query->utilityStmt, AlterTableStmt)) + { + PartRelationInfo *prel; + ListCell *lcmd; + Oid relid; + + AlterTableStmt *stmt = (AlterTableStmt *) query->utilityStmt; + relid = RangeVarGetRelid(stmt->relation, NoLock, true); + if ((prel = get_pathman_relation_info(relid)) != NULL) + { + close_pathman_relation_info(prel); + + foreach(lcmd, stmt->cmds) + { + AlterTableCmd *cmd = (AlterTableCmd *) lfirst(lcmd); + switch (cmd->subtype) + { + case AT_AttachPartition: + case AT_DetachPartition: + cmd->subtype = -cmd->subtype; + break; + default: + break; + } + } + } + } +} + +/* is it one of declarative partitioning commands? */ +bool +is_pathman_related_partitioning_cmd(Node *parsetree, Oid *parent_relid) +{ + PartRelationInfo *prel; + + if (IsA(parsetree, AlterTableStmt)) + { + ListCell *lc; + AlterTableStmt *stmt = (AlterTableStmt *) parsetree; + int cnt = 0; + + *parent_relid = RangeVarGetRelid(stmt->relation, NoLock, stmt->missing_ok); + + if (stmt->missing_ok && *parent_relid == InvalidOid) + return false; + + if ((prel = get_pathman_relation_info(*parent_relid)) == NULL) + return false; + + close_pathman_relation_info(prel); + + /* + * Since cmds can contain multiple commmands but we can handle only + * two of them here, so we need to check that there are only commands + * we can handle. In case if cmds contain other commands we skip all + * commands in this statement. + */ + foreach(lc, stmt->cmds) + { + AlterTableCmd *cmd = (AlterTableCmd *) lfirst(lc); + switch (abs(cmd->subtype)) + { + case AT_AttachPartition: + case AT_DetachPartition: + /* + * We need to fix all subtypes, + * possibly we're not going to handle this + */ + cmd->subtype = abs(cmd->subtype); + continue; + default: + cnt++; + } + } + + return (cnt == 0); + } + else if (IsA(parsetree, CreateStmt)) + { + /* inhRelations != NULL, partbound != NULL, tableElts == NULL */ + CreateStmt *stmt = (CreateStmt *) parsetree; + + if (stmt->inhRelations && stmt->partbound != NULL) + { + RangeVar *rv = castNode(RangeVar, linitial(stmt->inhRelations)); + *parent_relid = RangeVarGetRelid(rv, NoLock, false); + if ((prel = get_pathman_relation_info(*parent_relid)) == NULL) + return false; + + close_pathman_relation_info(prel); + if (stmt->tableElts != NIL) + elog(ERROR, "pg_pathman doesn't support column definitions " + "in declarative syntax yet"); + + return true; + + } + } + return false; +} + +static FuncExpr * +make_fn_expr(Oid funcOid, List *args) +{ + FuncExpr *fn_expr; + HeapTuple procTup; + Form_pg_proc procStruct; + + procTup = SearchSysCache1(PROCOID, ObjectIdGetDatum(funcOid)); + if (!HeapTupleIsValid(procTup)) + elog(ERROR, "cache lookup failed for function %u", funcOid); + procStruct = (Form_pg_proc) GETSTRUCT(procTup); + + fn_expr = makeFuncExpr(funcOid, procStruct->prorettype, args, + InvalidOid, InvalidOid, COERCE_EXPLICIT_CALL); + ReleaseSysCache(procTup); + return fn_expr; +} + +/* + * Transform one constant in a partition bound spec + */ +static Const * +transform_bound_value(ParseState *pstate, A_Const *con, + Oid colType, int32 colTypmod) +{ + Node *value; + + /* Make it into a Const */ + value = (Node *) make_const(pstate, &con->val, con->location); + + /* Coerce to correct type */ + value = coerce_to_target_type(pstate, + value, exprType(value), + colType, + colTypmod, + COERCION_ASSIGNMENT, + COERCE_IMPLICIT_CAST, + -1); + + if (value == NULL) + ereport(ERROR, + (errcode(ERRCODE_DATATYPE_MISMATCH), + errmsg("specified value cannot be cast to type %s", + format_type_be(colType)), + parser_errposition(pstate, con->location))); + + /* Simplify the expression, in case we had a coercion */ + if (!IsA(value, Const)) + value = (Node *) expression_planner((Expr *) value); + + /* Fail if we don't have a constant (i.e., non-immutable coercion) */ + if (!IsA(value, Const)) + ereport(ERROR, + (errcode(ERRCODE_DATATYPE_MISMATCH), + errmsg("specified value cannot be cast to type %s", + format_type_be(colType)), + errdetail("The cast requires a non-immutable conversion."), + errhint("Try putting the literal value in single quotes."), + parser_errposition(pstate, con->location))); + + return (Const *) value; +} + +/* handle ALTER TABLE .. ATTACH PARTITION command */ +void +handle_attach_partition(Oid parent_relid, AlterTableCmd *cmd) +{ + Oid partition_relid, + proc_args[] = { REGCLASSOID, REGCLASSOID, + ANYELEMENTOID, ANYELEMENTOID }; + + List *proc_name; + FmgrInfo proc_flinfo; + FunctionCallInfoData proc_fcinfo; + char *pathman_schema; + PartitionRangeDatum *ldatum, + *rdatum; + Const *lval, + *rval; + A_Const *con; + List *fn_args; + ParseState *pstate = make_parsestate(NULL); + PartRelationInfo *prel; + + PartitionCmd *pcmd = (PartitionCmd *) cmd->def; + + /* in 10beta1, PartitionCmd->bound is (Node *) */ + PartitionBoundSpec *bound = (PartitionBoundSpec *) pcmd->bound; + + Assert(cmd->subtype == AT_AttachPartition); + + if (bound->strategy != PARTITION_STRATEGY_RANGE) + ereport(ERROR, (errcode(ERRCODE_INVALID_PARAMETER_VALUE), + errmsg("pg_pathman only supports queries for range partitions"))); + + if ((prel = get_pathman_relation_info(parent_relid)) == NULL) + elog(ERROR, "relation is not partitioned"); + + partition_relid = RangeVarGetRelid(pcmd->name, NoLock, false); + + /* Fetch pg_pathman's schema */ + pathman_schema = get_namespace_name(get_pathman_schema()); + if (pathman_schema == NULL) + elog(ERROR, "pg_pathman schema not initialized"); + + /* Build function's name */ + proc_name = list_make2(makeString(pathman_schema), + makeString(CppAsString(attach_range_partition))); + + if ((!list_length(bound->lowerdatums)) || + (!list_length(bound->upperdatums))) + elog(ERROR, "provide start and end value for range partition"); + + ldatum = (PartitionRangeDatum *) linitial(bound->lowerdatums); + con = castNode(A_Const, ldatum->value); + lval = transform_bound_value(pstate, con, prel->ev_type, prel->ev_typmod); + + rdatum = (PartitionRangeDatum *) linitial(bound->upperdatums); + con = castNode(A_Const, rdatum->value); + rval = transform_bound_value(pstate, con, prel->ev_type, prel->ev_typmod); + close_pathman_relation_info(prel); + + /* Lookup function's Oid and get FmgrInfo */ + fmgr_info(LookupFuncName(proc_name, 4, proc_args, false), &proc_flinfo); + + InitFunctionCallInfoData(proc_fcinfo, &proc_flinfo, + 4, InvalidOid, NULL, NULL); + proc_fcinfo.arg[0] = ObjectIdGetDatum(parent_relid); + proc_fcinfo.argnull[0] = false; + proc_fcinfo.arg[1] = ObjectIdGetDatum(partition_relid); + proc_fcinfo.argnull[1] = false; + + /* Make function expression, we will need it to determine argument types */ + fn_args = list_make4(NULL, NULL, lval, rval); + proc_fcinfo.flinfo->fn_expr = + (Node *) make_fn_expr(proc_fcinfo.flinfo->fn_oid, fn_args); + + proc_fcinfo.arg[2] = lval->constvalue; + proc_fcinfo.argnull[2] = lval->constisnull; + proc_fcinfo.arg[3] = rval->constvalue; + proc_fcinfo.argnull[3] = rval->constisnull; + + /* Invoke the callback */ + FunctionCallInvoke(&proc_fcinfo); +} + +/* handle ALTER TABLE .. DETACH PARTITION command */ +void +handle_detach_partition(AlterTableCmd *cmd) +{ + List *proc_name; + FmgrInfo proc_flinfo; + FunctionCallInfoData proc_fcinfo; + char *pathman_schema; + Oid partition_relid, + args = REGCLASSOID; + PartitionCmd *pcmd = (PartitionCmd *) cmd->def; + + Assert(cmd->subtype == AT_DetachPartition); + partition_relid = RangeVarGetRelid(pcmd->name, NoLock, false); + + /* Fetch pg_pathman's schema */ + pathman_schema = get_namespace_name(get_pathman_schema()); + if (pathman_schema == NULL) + elog(ERROR, "pg_pathman schema not initialized"); + + /* Build function's name */ + proc_name = list_make2(makeString(pathman_schema), + makeString(CppAsString(detach_range_partition))); + + /* Lookup function's Oid and get FmgrInfo */ + fmgr_info(LookupFuncName(proc_name, 1, &args, false), &proc_flinfo); + + InitFunctionCallInfoData(proc_fcinfo, &proc_flinfo, + 4, InvalidOid, NULL, NULL); + proc_fcinfo.arg[0] = ObjectIdGetDatum(partition_relid); + proc_fcinfo.argnull[0] = false; + + /* Invoke the callback */ + FunctionCallInvoke(&proc_fcinfo); +} + +/* handle CREATE TABLE .. PARTITION OF FOR VALUES FROM .. TO .. */ +void +handle_create_partition_of(Oid parent_relid, CreateStmt *stmt) +{ + Bound start, + end; + PartRelationInfo *prel; + ParseState *pstate = make_parsestate(NULL); + PartitionRangeDatum *ldatum, + *rdatum; + Const *lval, + *rval; + A_Const *con; + + /* in 10beta1, PartitionCmd->bound is (Node *) */ + PartitionBoundSpec *bound = (PartitionBoundSpec *) stmt->partbound; + + /* we show errors earlier for these asserts */ + Assert(stmt->inhRelations != NULL); + Assert(stmt->tableElts == NIL); + + if (bound->strategy != PARTITION_STRATEGY_RANGE) + ereport(ERROR, (errcode(ERRCODE_INVALID_PARAMETER_VALUE), + errmsg("pg_pathman only supports queries for range partitions"))); + + if ((prel = get_pathman_relation_info(parent_relid)) == NULL) + ereport(ERROR, (errcode(ERRCODE_INVALID_PARAMETER_VALUE), + errmsg("table \"%s\" is not partitioned", + get_rel_name_or_relid(parent_relid)))); + + if (prel->parttype != PT_RANGE) + ereport(ERROR, (errcode(ERRCODE_INVALID_PARAMETER_VALUE), + errmsg("table \"%s\" is not partitioned by RANGE", + get_rel_name_or_relid(parent_relid)))); + + ldatum = (PartitionRangeDatum *) linitial(bound->lowerdatums); + con = castNode(A_Const, ldatum->value); + lval = transform_bound_value(pstate, con, prel->ev_type, prel->ev_typmod); + + rdatum = (PartitionRangeDatum *) linitial(bound->upperdatums); + con = castNode(A_Const, rdatum->value); + rval = transform_bound_value(pstate, con, prel->ev_type, prel->ev_typmod); + close_pathman_relation_info(prel); + + start = lval->constisnull? + MakeBoundInf(MINUS_INFINITY) : + MakeBound(lval->constvalue); + + end = rval->constisnull? + MakeBoundInf(PLUS_INFINITY) : + MakeBound(rval->constvalue); + + /* more checks */ + check_range_available(parent_relid, &start, &end, lval->consttype, true); + + /* Create a new RANGE partition and return its Oid */ + create_single_range_partition_internal(parent_relid, + &start, + &end, + lval->consttype, + stmt->relation, + stmt->tablespacename); +} diff --git a/src/dsm_array.c b/src/dsm_array.c deleted file mode 100644 index 62039fb8..00000000 --- a/src/dsm_array.c +++ /dev/null @@ -1,321 +0,0 @@ -/* ------------------------------------------------------------------------ - * - * dsm_array.c - * Allocate data in shared memory - * - * Copyright (c) 2015-2016, Postgres Professional - * - * ------------------------------------------------------------------------ - */ - -#include "pathman.h" -#include "dsm_array.h" - -#include "storage/shmem.h" -#include "storage/dsm.h" - - -static dsm_segment *segment = NULL; - -typedef struct DsmConfig -{ - dsm_handle segment_handle; - size_t block_size; - size_t blocks_count; - size_t first_free; -} DsmConfig; - -static DsmConfig *dsm_cfg = NULL; - - -/* - * Block header - * - * Its size must be equal to 4 bytes for 32bit and 8 bytes for 64bit. - * Otherwise it could screw up an alignment (for example on Sparc9) - */ -typedef uintptr_t BlockHeader; -typedef BlockHeader* BlockHeaderPtr; - -#define FREE_BIT 0x80000000 -#define is_free(header) \ - ((*header) & FREE_BIT) -#define set_free(header) \ - ((*header) | FREE_BIT) -#define set_used(header) \ - ((*header) & ~FREE_BIT) -#define get_length(header) \ - ((*header) & ~FREE_BIT) -#define set_length(header, length) \ - ((length) | ((*header) & FREE_BIT)) - -/* - * Amount of memory that need to be requested - * for shared memory to store dsm config - */ -Size -estimate_dsm_config_size() -{ - return (Size) MAXALIGN(sizeof(DsmConfig)); -} - -/* - * Initialize dsm config for arrays - */ -void -init_dsm_config() -{ - bool found; - dsm_cfg = ShmemInitStruct("pathman dsm_array config", sizeof(DsmConfig), &found); - if (!found) - { - dsm_cfg->segment_handle = 0; - dsm_cfg->block_size = 0; - dsm_cfg->blocks_count = INITIAL_BLOCKS_COUNT; - dsm_cfg->first_free = 0; - } -} - -/* - * Attach process to dsm_array segment. This function is used for - * background workers only. Use init_dsm_segment() in backend processes. - */ -void -attach_dsm_array_segment() -{ - segment = dsm_attach(dsm_cfg->segment_handle); -} - -/* - * Initialize dsm segment. Returns true if new segment was created and - * false if attached to existing segment - */ -bool -init_dsm_segment(size_t blocks_count, size_t block_size) -{ - bool ret; - - /* if there is already an existing segment then attach to it */ - if (dsm_cfg->segment_handle != 0) - { - ret = false; - segment = dsm_attach(dsm_cfg->segment_handle); - } - - /* - * If segment hasn't been created yet or has already been destroyed - * (it happens when last session detaches segment) then create new one - */ - if (dsm_cfg->segment_handle == 0 || segment == NULL) - { - /* create segment */ - segment = dsm_create(block_size * blocks_count, 0); - dsm_cfg->segment_handle = dsm_segment_handle(segment); - dsm_cfg->first_free = 0; - dsm_cfg->block_size = block_size; - dsm_cfg->blocks_count = blocks_count; - init_dsm_table(block_size, 0, dsm_cfg->blocks_count); - ret = true; - } - - /* - * Keep mapping till the end of the session. Otherwise it would be - * destroyed by the end of transaction - */ - dsm_pin_mapping(segment); - - return ret; -} - -/* - * Initialize allocated segment with block structure - */ -void -init_dsm_table(size_t block_size, size_t start, size_t end) -{ - size_t i; - BlockHeaderPtr header; - char *ptr = dsm_segment_address(segment); - - /* create blocks */ - for (i = start; i < end; i++) - { - header = (BlockHeaderPtr) &ptr[i * block_size]; - *header = set_free(header); - *header = set_length(header, 1); - } - - return; -} - -/* - * Allocate array inside dsm_segment - */ -void -alloc_dsm_array(DsmArray *arr, size_t entry_size, size_t elem_count) -{ - size_t i = 0; - size_t size_requested = entry_size * elem_count; - size_t min_pos = 0; - size_t max_pos = 0; - bool found = false; - bool collecting_blocks = false; - size_t offset = -1; - size_t total_length = 0; - BlockHeaderPtr header; - char *ptr = dsm_segment_address(segment); - - arr->entry_size = entry_size; - - for (i = dsm_cfg->first_free; i < dsm_cfg->blocks_count; ) - { - header = (BlockHeaderPtr) &ptr[i * dsm_cfg->block_size]; - if (is_free(header)) - { - if (!collecting_blocks) - { - offset = i * dsm_cfg->block_size; - total_length = dsm_cfg->block_size - sizeof(BlockHeader); - min_pos = i; - collecting_blocks = true; - } - else - { - total_length += dsm_cfg->block_size; - } - i++; - } - else - { - collecting_blocks = false; - offset = 0; - total_length = 0; - i += get_length(header); - } - - if (total_length >= size_requested) - { - max_pos = i-1; - found = true; - break; - } - } - - /* - * If dsm segment size is not enough then resize it (or allocate bigger - * for segment SysV and Windows, not implemented yet) - */ - if (!found) - { - size_t new_blocks_count = dsm_cfg->blocks_count * 2; - - dsm_resize(segment, new_blocks_count * dsm_cfg->block_size); - init_dsm_table(dsm_cfg->block_size, dsm_cfg->blocks_count, new_blocks_count); - dsm_cfg->blocks_count = new_blocks_count; - - /* try again */ - return alloc_dsm_array(arr, entry_size, elem_count); - } - - /* look up for first free block */ - if (dsm_cfg->first_free == min_pos) - { - for (; iblocks_count; ) - { - header = (BlockHeaderPtr) &ptr[i * dsm_cfg->block_size]; - if (is_free(header)) - { - dsm_cfg->first_free = i; - break; - } - else - { - i += get_length(header); - } - } - } - - /* if we found enough of space */ - if (total_length >= size_requested) - { - header = (BlockHeaderPtr) &ptr[min_pos * dsm_cfg->block_size]; - *header = set_used(header); - *header = set_length(header, max_pos - min_pos + 1); - - arr->offset = offset; - arr->elem_count = elem_count; - } -} - -void -free_dsm_array(DsmArray *arr) -{ - size_t i = 0, - start = arr->offset / dsm_cfg->block_size; - char *ptr = dsm_segment_address(segment); - BlockHeaderPtr header = (BlockHeaderPtr) &ptr[start * dsm_cfg->block_size]; - size_t blocks_count = get_length(header); - - /* set blocks free */ - for(; i < blocks_count; i++) - { - header = (BlockHeaderPtr) &ptr[(start + i) * dsm_cfg->block_size]; - *header = set_free(header); - *header = set_length(header, 1); - } - - if (start < dsm_cfg->first_free) - dsm_cfg->first_free = start; - - arr->offset = 0; - arr->elem_count = 0; -} - -void -resize_dsm_array(DsmArray *arr, size_t entry_size, size_t elem_count) -{ - void *array_data; - size_t array_data_size; - void *buffer; - - /* Copy data from array to temporary buffer */ - array_data = dsm_array_get_pointer(arr, false); - array_data_size = arr->elem_count * entry_size; - buffer = palloc(array_data_size); - memcpy(buffer, array_data, array_data_size); - - /* Free array */ - free_dsm_array(arr); - - /* Allocate new array */ - alloc_dsm_array(arr, entry_size, elem_count); - - /* Copy data to new array */ - array_data = dsm_array_get_pointer(arr, false); - memcpy(array_data, buffer, array_data_size); - - pfree(buffer); -} - -void * -dsm_array_get_pointer(const DsmArray *arr, bool copy) -{ - uint8 *segment_address, - *dsm_array, - *result; - size_t size; - - segment_address = (uint8 *) dsm_segment_address(segment); - dsm_array = segment_address + arr->offset + sizeof(BlockHeader); - - if (copy) - { - size = arr->elem_count * arr->entry_size; - result = palloc(size); - memcpy((void *) result, (void *) dsm_array, size); - } - else - result = dsm_array; - - return result; -} diff --git a/src/dsm_array.h b/src/dsm_array.h deleted file mode 100644 index 2b7184d8..00000000 --- a/src/dsm_array.h +++ /dev/null @@ -1,47 +0,0 @@ -/* ------------------------------------------------------------------------ - * - * dsm_array.h - * Allocate data in shared memory - * - * Copyright (c) 2016, Postgres Professional - * - * ------------------------------------------------------------------------ - */ - -#ifndef DSM_ARRAY_H -#define DSM_ARRAY_H - -#include "postgres.h" -#include "storage/dsm.h" - - -/* - * Dynamic shared memory array - */ -typedef struct -{ - dsm_handle segment; - size_t offset; - size_t elem_count; - size_t entry_size; -} DsmArray; - - -#define InvalidDsmArray { 0, 0, 0, 0 } - -#define INITIAL_BLOCKS_COUNT 8192 - - -/* Dynamic shared memory functions */ -Size estimate_dsm_config_size(void); -void init_dsm_config(void); -bool init_dsm_segment(size_t blocks_count, size_t block_size); -void init_dsm_table(size_t block_size, size_t start, size_t end); -void alloc_dsm_array(DsmArray *arr, size_t entry_size, size_t elem_count); -void free_dsm_array(DsmArray *arr); -void resize_dsm_array(DsmArray *arr, size_t entry_size, size_t elem_count); -void *dsm_array_get_pointer(const DsmArray *arr, bool copy); -dsm_handle get_dsm_array_segment(void); -void attach_dsm_array_segment(void); - -#endif diff --git a/src/hooks.c b/src/hooks.c index c29820ed..2ff2667c 100644 --- a/src/hooks.c +++ b/src/hooks.c @@ -3,30 +3,80 @@ * hooks.c * definitions of rel_pathlist and join_pathlist hooks * - * Copyright (c) 2016, Postgres Professional + * Copyright (c) 2016-2020, Postgres Professional + * Portions Copyright (c) 1996-2016, PostgreSQL Global Development Group + * Portions Copyright (c) 1994, Regents of the University of California * * ------------------------------------------------------------------------ */ +#include "compat/pg_compat.h" +#include "compat/rowmarks_fix.h" + +#if PG_VERSION_NUM >= 120000 +#include "access/table.h" +#endif + +#include "declarative.h" #include "hooks.h" #include "init.h" #include "partition_filter.h" -#include "runtimeappend.h" +#include "partition_overseer.h" +#include "partition_router.h" +#include "pathman_workers.h" +#include "planner_tree_modification.h" +#include "runtime_append.h" #include "runtime_merge_append.h" +#include "utility_stmt_hooking.h" #include "utils.h" #include "xact_handling.h" +#include "access/transam.h" +#include "access/xact.h" +#include "catalog/pg_authid.h" #include "miscadmin.h" #include "optimizer/cost.h" +#include "optimizer/prep.h" #include "optimizer/restrictinfo.h" +#include "rewrite/rewriteManip.h" +#include "utils/lsyscache.h" #include "utils/typcache.h" +#include "utils/snapmgr.h" + + +#ifdef USE_ASSERT_CHECKING +#define USE_RELCACHE_LOGGING +#endif -set_join_pathlist_hook_type set_join_pathlist_next = NULL; -set_rel_pathlist_hook_type set_rel_pathlist_hook_next = NULL; -planner_hook_type planner_hook_next = NULL; -post_parse_analyze_hook_type post_parse_analyze_hook_next = NULL; -shmem_startup_hook_type shmem_startup_hook_next = NULL; +/* Borrowed from joinpath.c */ +#define PATH_PARAM_BY_REL(path, rel) \ + ((path)->param_info && bms_overlap(PATH_REQ_OUTER(path), (rel)->relids)) + +static inline bool +allow_star_schema_join(PlannerInfo *root, + Path *outer_path, + Path *inner_path) +{ + Relids innerparams = PATH_REQ_OUTER(inner_path); + Relids outerrelids = outer_path->parent->relids; + + /* + * It's a star-schema case if the outer rel provides some but not all of + * the inner rel's parameterization. + */ + return (bms_overlap(innerparams, outerrelids) && + bms_nonempty_difference(innerparams, outerrelids)); +} + + +set_join_pathlist_hook_type pathman_set_join_pathlist_next = NULL; +set_rel_pathlist_hook_type pathman_set_rel_pathlist_hook_next = NULL; +planner_hook_type pathman_planner_hook_next = NULL; +post_parse_analyze_hook_type pathman_post_parse_analyze_hook_next = NULL; +shmem_startup_hook_type pathman_shmem_startup_hook_next = NULL; +ProcessUtility_hook_type pathman_process_utility_hook_next = NULL; +ExecutorStart_hook_type pathman_executor_start_hook_prev = NULL; /* Take care of joins */ @@ -39,47 +89,97 @@ pathman_join_pathlist_hook(PlannerInfo *root, JoinPathExtraData *extra) { JoinCostWorkspace workspace; + JoinType saved_jointype = jointype; RangeTblEntry *inner_rte = root->simple_rte_array[innerrel->relid]; - const PartRelationInfo *inner_prel; - List *pathkeys = NIL, - *joinclauses, + PartRelationInfo *inner_prel; + List *joinclauses, *otherclauses; - ListCell *lc; WalkerContext context; double paramsel; - bool innerrel_rinfo_contains_part_attr; + Node *part_expr; + ListCell *lc; /* Call hooks set by other extensions */ - if (set_join_pathlist_next) - set_join_pathlist_next(root, joinrel, outerrel, - innerrel, jointype, extra); + if (pathman_set_join_pathlist_next) + pathman_set_join_pathlist_next(root, joinrel, outerrel, + innerrel, jointype, extra); /* Check that both pg_pathman & RuntimeAppend nodes are enabled */ if (!IsPathmanReady() || !pg_pathman_enable_runtimeappend) return; - if (jointype == JOIN_FULL) - return; /* handling full joins is meaningless */ + /* We should only consider base inner relations */ + if (innerrel->reloptkind != RELOPT_BASEREL) + return; - /* Check that innerrel is a BASEREL with inheritors & PartRelationInfo */ - if (innerrel->reloptkind != RELOPT_BASEREL || !inner_rte->inh || - !(inner_prel = get_pathman_relation_info(inner_rte->relid))) - { - return; /* Obviously not our case */ - } + /* We shouldn't process tables with active children */ + if (inner_rte->inh) + return; + + /* We shouldn't process functions etc */ + if (inner_rte->rtekind != RTE_RELATION) + return; + + /* We don't support these join types (since inner will be parameterized) */ + if (jointype == JOIN_FULL || + jointype == JOIN_RIGHT || + jointype == JOIN_UNIQUE_INNER) + return; + + /* Skip if inner table is not allowed to act as parent (e.g. FROM ONLY) */ + if (PARENTHOOD_DISALLOWED == get_rel_parenthood_status(inner_rte)) + return; + + /* Proceed iff relation 'innerrel' is partitioned */ + if ((inner_prel = get_pathman_relation_info(inner_rte->relid)) == NULL) + return; /* - * These codes are used internally in the planner, but are not supported - * by the executor (nor, indeed, by most of the planner). + * Check if query is: + * 1) UPDATE part_table SET = .. FROM part_table. + * 2) DELETE FROM part_table USING part_table. + * + * Either outerrel or innerrel may be a result relation. */ + if ((root->parse->resultRelation == outerrel->relid || + root->parse->resultRelation == innerrel->relid) && + (root->parse->commandType == CMD_UPDATE || + root->parse->commandType == CMD_DELETE)) + { + int rti = -1, + count = 0; + + /* Inner relation must be partitioned */ + Assert(inner_prel); + + /* Check each base rel of outer relation */ + while ((rti = bms_next_member(outerrel->relids, rti)) >= 0) + { + Oid outer_baserel = root->simple_rte_array[rti]->relid; + + /* Is it partitioned? */ + if (has_pathman_relation_info(outer_baserel)) + count++; + } + + if (count > 0) + ereport(ERROR, + (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), + errmsg("DELETE and UPDATE queries with a join " + "of partitioned tables are not supported"))); + } + + /* Replace virtual join types with a real one */ if (jointype == JOIN_UNIQUE_OUTER || jointype == JOIN_UNIQUE_INNER) - jointype = JOIN_INNER; /* replace with a proper value */ + jointype = JOIN_INNER; /* Extract join clauses which will separate partitions */ if (IS_OUTER_JOIN(extra->sjinfo->jointype)) { - extract_actual_join_clauses(extra->restrictlist, - &joinclauses, &otherclauses); + extract_actual_join_clauses_compat(extra->restrictlist, + joinrel->relids, + &joinclauses, + &otherclauses); } else { @@ -88,31 +188,32 @@ pathman_join_pathlist_hook(PlannerInfo *root, otherclauses = NIL; } + /* Make copy of partitioning expression and fix Var's varno attributes */ + part_expr = PrelExpressionForRelid(inner_prel, innerrel->relid); + paramsel = 1.0; foreach (lc, joinclauses) { WrapperNode *wrap; - InitWalkerContext(&context, inner_prel, NULL, false); - + InitWalkerContext(&context, part_expr, inner_prel, NULL); wrap = walk_expr_tree((Expr *) lfirst(lc), &context); paramsel *= wrap->paramsel; } - /* Check that innerrel's RestrictInfo contains partitioned column */ - innerrel_rinfo_contains_part_attr = - check_rinfo_for_partitioned_attr(innerrel->baserestrictinfo, - innerrel->relid, - inner_prel->attnum); - foreach (lc, innerrel->pathlist) { + AppendPath *cur_inner_path = (AppendPath *) lfirst(lc); Path *outer, *inner; NestPath *nest_path; /* NestLoop we're creating */ ParamPathInfo *ppi; /* parameterization info */ - Relids inner_required; /* required paremeterization relids */ - AppendPath *cur_inner_path = (AppendPath *) lfirst(lc); + Relids required_nestloop, + required_inner; + List *filtered_joinclauses = NIL, + *saved_ppi_list, + *pathkeys; + ListCell *rinfo_lc; if (!IsA(cur_inner_path, AppendPath)) continue; @@ -120,347 +221,665 @@ pathman_join_pathlist_hook(PlannerInfo *root, /* Select cheapest path for outerrel */ outer = outerrel->cheapest_total_path; - /* Make innerrel path depend on outerrel's column */ - inner_required = bms_union(PATH_REQ_OUTER((Path *) cur_inner_path), - bms_make_singleton(outerrel->relid)); + /* We cannot use an outer path that is parameterized by the inner rel */ + if (PATH_PARAM_BY_REL(outer, innerrel)) + continue; + + /* Wrap 'outer' in unique path if needed */ + if (saved_jointype == JOIN_UNIQUE_OUTER) + { + outer = (Path *) create_unique_path(root, outerrel, + outer, extra->sjinfo); + Assert(outer); + } + + /* Make inner path depend on outerrel's columns */ + required_inner = bms_union(PATH_REQ_OUTER((Path *) cur_inner_path), + outerrel->relids); + + /* Preserve existing ppis built by get_appendrel_parampathinfo() */ + saved_ppi_list = innerrel->ppilist; /* Get the ParamPathInfo for a parameterized path */ - ppi = get_baserel_parampathinfo(root, innerrel, inner_required); + innerrel->ppilist = NIL; + ppi = get_baserel_parampathinfo(root, innerrel, required_inner); + innerrel->ppilist = saved_ppi_list; + + /* Skip ppi->ppi_clauses don't reference partition attribute */ + if (!(ppi && get_partitioning_clauses(ppi->ppi_clauses, + inner_prel, + innerrel->relid))) + continue; + + /* Try building RuntimeAppend path, skip if it's not possible */ + inner = create_runtime_append_path(root, cur_inner_path, ppi, paramsel); + if (!inner) + continue; + + required_nestloop = calc_nestloop_required_outer_compat(outer, inner); /* - * Skip if neither rel->baserestrictinfo nor - * ppi->ppi_clauses reference partition attribute + * Check to see if proposed path is still parameterized, and reject if the + * parameterization wouldn't be sensible --- unless allow_star_schema_join + * says to allow it anyway. Also, we must reject if have_dangerous_phv + * doesn't like the look of it, which could only happen if the nestloop is + * still parameterized. */ - if (!(innerrel_rinfo_contains_part_attr || - (ppi && check_rinfo_for_partitioned_attr(ppi->ppi_clauses, - innerrel->relid, - inner_prel->attnum)))) + if (required_nestloop && + ((!bms_overlap(required_nestloop, extra->param_source_rels) && + !allow_star_schema_join(root, outer, inner)) || + have_dangerous_phv(root, outer->parent->relids, required_inner))) continue; - inner = create_runtimeappend_path(root, cur_inner_path, - ppi, paramsel); - - initial_cost_nestloop(root, &workspace, jointype, - outer, inner, /* built paths */ - extra->sjinfo, &extra->semifactors); + initial_cost_nestloop_compat(root, &workspace, jointype, outer, inner, extra); pathkeys = build_join_pathkeys(root, joinrel, jointype, outer->pathkeys); - nest_path = create_nestloop_path(root, joinrel, jointype, &workspace, - extra->sjinfo, &extra->semifactors, - outer, inner, extra->restrictlist, - pathkeys, - calc_nestloop_required_outer(outer, inner)); + /* Discard all clauses that are to be evaluated by 'inner' */ + foreach (rinfo_lc, extra->restrictlist) + { + RestrictInfo *rinfo = (RestrictInfo *) lfirst(rinfo_lc); + + Assert(IsA(rinfo, RestrictInfo)); + if (!join_clause_is_movable_to(rinfo, inner->parent)) + filtered_joinclauses = lappend(filtered_joinclauses, rinfo); + } + + nest_path = + create_nestloop_path_compat(root, joinrel, jointype, + &workspace, extra, outer, inner, + filtered_joinclauses, pathkeys, + calc_nestloop_required_outer_compat(outer, inner)); - /* Finally we can add new NestLoop path */ + /* + * NOTE: Override 'rows' value produced by standard estimator. + * Currently we use get_parameterized_joinrel_size() since + * it works just fine, but this might change some day. + */ +#if PG_VERSION_NUM >= 150000 /* for commit 18fea737b5e4 */ + nest_path->jpath.path.rows = +#else + nest_path->path.rows = +#endif + get_parameterized_joinrel_size_compat(root, joinrel, + outer, inner, + extra->sjinfo, + filtered_joinclauses); + + /* Finally we can add the new NestLoop path */ add_path(joinrel, (Path *) nest_path); } + + /* Don't forget to close 'inner_prel'! */ + close_pathman_relation_info(inner_prel); } /* Cope with simple relations */ void -pathman_rel_pathlist_hook(PlannerInfo *root, RelOptInfo *rel, Index rti, RangeTblEntry *rte) +pathman_rel_pathlist_hook(PlannerInfo *root, + RelOptInfo *rel, + Index rti, + RangeTblEntry *rte) { - const PartRelationInfo *prel; - RangeTblEntry **new_rte_array; - RelOptInfo **new_rel_array; - int len; + PartRelationInfo *prel; + Relation parent_rel; /* parent's relation (heap) */ + PlanRowMark *parent_rowmark; /* parent's rowmark */ + Oid *children; /* selected children oids */ + List *ranges, /* a list of IndexRanges */ + *wrappers; /* a list of WrapperNodes */ + PathKey *pathkeyAsc = NULL, + *pathkeyDesc = NULL; + double paramsel = 1.0; /* default part selectivity */ + WalkerContext context; + Node *part_expr; + List *part_clauses; + ListCell *lc; + int irange_len, + i; /* Invoke original hook if needed */ - if (set_rel_pathlist_hook_next != NULL) - set_rel_pathlist_hook_next(root, rel, rti, rte); + if (pathman_set_rel_pathlist_hook_next) + pathman_set_rel_pathlist_hook_next(root, rel, rti, rte); + /* Make sure that pg_pathman is ready */ if (!IsPathmanReady()) - return; /* pg_pathman is not ready */ + return; - /* This works only for SELECT queries (at least for now) */ - if (root->parse->commandType != CMD_SELECT || - !list_member_oid(inheritance_enabled_relids, rte->relid)) + /* We shouldn't process tables with active children */ + if (rte->inh) return; - /* Proceed iff relation 'rel' is partitioned */ - if ((prel = get_pathman_relation_info(rte->relid)) != NULL) + /* + * Skip if it's a result relation (UPDATE | DELETE | INSERT), + * or not a (partitioned) physical relation at all. + */ + if (rte->rtekind != RTE_RELATION || + rte->relkind != RELKIND_RELATION || + root->parse->resultRelation == rti) + return; + +#ifdef LEGACY_ROWMARKS_95 + /* It's better to exit, since RowMarks might be broken */ + if (root->parse->commandType != CMD_SELECT && + root->parse->commandType != CMD_INSERT) + return; + + /* SELECT FOR SHARE/UPDATE is not handled by above check */ + foreach(lc, root->rowMarks) { - ListCell *lc; - Oid *children; - List *ranges, - *wrappers; - PathKey *pathkeyAsc = NULL, - *pathkeyDesc = NULL; - double paramsel = 1.0; - WalkerContext context; - int i; - bool rel_rinfo_contains_part_attr = false; + PlanRowMark *rc = (PlanRowMark *) lfirst(lc); - if (prel->parttype == PT_RANGE) - { - /* - * Get pathkeys for ascending and descending sort by partition - * column - */ - List *pathkeys; - Var *var; - Oid vartypeid, - varcollid; - int32 type_mod; - TypeCacheEntry *tce; - - /* Make Var from patition column */ - get_rte_attribute_type(rte, prel->attnum, - &vartypeid, &type_mod, &varcollid); - var = makeVar(rti, prel->attnum, vartypeid, type_mod, varcollid, 0); - var->location = -1; - - /* Determine operator type */ - tce = lookup_type_cache(var->vartype, TYPECACHE_LT_OPR | TYPECACHE_GT_OPR); - - /* Make pathkeys */ - pathkeys = build_expression_pathkey(root, (Expr *)var, NULL, - tce->lt_opr, NULL, false); - if (pathkeys) - pathkeyAsc = (PathKey *) linitial(pathkeys); - pathkeys = build_expression_pathkey(root, (Expr *)var, NULL, - tce->gt_opr, NULL, false); - if (pathkeys) - pathkeyDesc = (PathKey *) linitial(pathkeys); - } + if (rc->rti == rti) + return; + } +#endif - rte->inh = true; /* we must restore 'inh' flag! */ + /* Skip if this table is not allowed to act as parent (e.g. FROM ONLY) */ + if (PARENTHOOD_DISALLOWED == get_rel_parenthood_status(rte)) + return; - children = PrelGetChildrenArray(prel); - ranges = list_make1_irange(make_irange(0, PrelLastChild(prel), false)); + /* Proceed iff relation 'rel' is partitioned */ + if ((prel = get_pathman_relation_info(rte->relid)) == NULL) + return; - /* Make wrappers over restrictions and collect final rangeset */ - InitWalkerContext(&context, prel, NULL, false); - wrappers = NIL; - foreach(lc, rel->baserestrictinfo) + /* + * Check that this child is not the parent table itself. + * This is exactly how standard inheritance works. + * + * Helps with queries like this one: + * + * UPDATE test.tmp t SET value = 2 + * WHERE t.id IN (SELECT id + * FROM test.tmp2 t2 + * WHERE id = t.id); + * + * or unions, multilevel partitioning, etc. + * + * Since we disable optimizations on 9.5, we + * have to skip parent table that has already + * been expanded by standard inheritance. + */ + if (rel->reloptkind == RELOPT_OTHER_MEMBER_REL) + { + foreach (lc, root->append_rel_list) { - WrapperNode *wrap; - RestrictInfo *rinfo = (RestrictInfo *) lfirst(lc); + AppendRelInfo *appinfo = (AppendRelInfo *) lfirst(lc); + Oid child_oid, + parent_oid; - wrap = walk_expr_tree(rinfo->clause, &context); + /* Is it actually the same table? */ + child_oid = root->simple_rte_array[appinfo->child_relid]->relid; + parent_oid = root->simple_rte_array[appinfo->parent_relid]->relid; - paramsel *= wrap->paramsel; - wrappers = lappend(wrappers, wrap); - ranges = irange_list_intersect(ranges, wrap->rangeset); + /* + * If there's an 'appinfo', it means that somebody + * (PG?) has already processed this partitioned table + * and added its children to the plan. + */ + if (appinfo->child_relid == rti && + OidIsValid(appinfo->parent_reloid)) + { + if (child_oid == parent_oid) + goto cleanup; + else if (!has_pathman_relation_info(parent_oid)) + ereport(ERROR, + (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), + errmsg("could not expand partitioned table \"%s\"", + get_rel_name(child_oid)), + errhint("Do not use inheritance and pg_pathman partitions together"))); + } } + } + /* Make copy of partitioning expression and fix Var's varno attributes */ + part_expr = PrelExpressionForRelid(prel, rti); + + /* Get partitioning-related clauses (do this before append_child_relation()) */ + part_clauses = get_partitioning_clauses(rel->baserestrictinfo, prel, rti); + + if (prel->parttype == PT_RANGE) + { /* - * Expand simple_rte_array and simple_rel_array + * Get pathkeys for ascending and descending sort by partitioned column. */ - len = irange_list_length(ranges); - if (prel->enable_parent) - len++; + List *pathkeys; + TypeCacheEntry *tce; + + /* Determine operator type */ + tce = lookup_type_cache(prel->ev_type, TYPECACHE_LT_OPR | TYPECACHE_GT_OPR); + + /* Make pathkeys */ + pathkeys = build_expression_pathkey_compat(root, (Expr *) part_expr, NULL, + tce->lt_opr, NULL, false); + if (pathkeys) + pathkeyAsc = (PathKey *) linitial(pathkeys); + pathkeys = build_expression_pathkey_compat(root, (Expr *) part_expr, NULL, + tce->gt_opr, NULL, false); + if (pathkeys) + pathkeyDesc = (PathKey *) linitial(pathkeys); + } - if (len > 0) - { - /* Expand simple_rel_array and simple_rte_array */ - new_rel_array = (RelOptInfo **) - palloc0((root->simple_rel_array_size + len) * sizeof(RelOptInfo *)); + children = PrelGetChildrenArray(prel); + ranges = list_make1_irange_full(prel, IR_COMPLETE); - /* simple_rte_array is an array equivalent of the rtable list */ - new_rte_array = (RangeTblEntry **) - palloc0((root->simple_rel_array_size + len) * sizeof(RangeTblEntry *)); + /* Make wrappers over restrictions and collect final rangeset */ + InitWalkerContext(&context, part_expr, prel, NULL); + wrappers = NIL; + foreach(lc, rel->baserestrictinfo) + { + WrapperNode *wrap; + RestrictInfo *rinfo = (RestrictInfo *) lfirst(lc); - /* Copy relations to the new arrays */ - for (i = 0; i < root->simple_rel_array_size; i++) - { - new_rel_array[i] = root->simple_rel_array[i]; - new_rte_array[i] = root->simple_rte_array[i]; - } + wrap = walk_expr_tree(rinfo->clause, &context); + + paramsel *= wrap->paramsel; + wrappers = lappend(wrappers, wrap); + ranges = irange_list_intersection(ranges, wrap->rangeset); + } - /* Free old arrays */ - pfree(root->simple_rel_array); - pfree(root->simple_rte_array); + /* Get number of selected partitions */ + irange_len = irange_list_length(ranges); + if (prel->enable_parent) + irange_len++; /* also add parent */ - root->simple_rel_array_size += len; - root->simple_rel_array = new_rel_array; - root->simple_rte_array = new_rte_array; - } + /* Expand simple_rte_array and simple_rel_array */ + if (irange_len > 0) + { + int current_len = root->simple_rel_array_size, + new_len = current_len + irange_len; + + /* Expand simple_rel_array */ + root->simple_rel_array = (RelOptInfo **) + repalloc(root->simple_rel_array, + new_len * sizeof(RelOptInfo *)); - /* Add parent if needed */ - if (prel->enable_parent) - append_child_relation(root, rel, rti, rte, 0, rte->relid, NULL); + memset((void *) &root->simple_rel_array[current_len], 0, + irange_len * sizeof(RelOptInfo *)); + /* Expand simple_rte_array */ + root->simple_rte_array = (RangeTblEntry **) + repalloc(root->simple_rte_array, + new_len * sizeof(RangeTblEntry *)); + + memset((void *) &root->simple_rte_array[current_len], 0, + irange_len * sizeof(RangeTblEntry *)); + +#if PG_VERSION_NUM >= 110000 /* - * Iterate all indexes in rangeset and append corresponding child - * relations. + * Make sure append_rel_array is wide enough; if it hasn't been + * allocated previously, care to zero out [0; current_len) part. */ - foreach(lc, ranges) - { - IndexRange irange = lfirst_irange(lc); + if (root->append_rel_array == NULL) + root->append_rel_array = (AppendRelInfo **) + palloc0(current_len * sizeof(AppendRelInfo *)); + root->append_rel_array = (AppendRelInfo **) + repalloc(root->append_rel_array, + new_len * sizeof(AppendRelInfo *)); + memset((void *) &root->append_rel_array[current_len], 0, + irange_len * sizeof(AppendRelInfo *)); +#endif - for (i = irange.ir_lower; i <= irange.ir_upper; i++) - append_child_relation(root, rel, rti, rte, i, children[i], wrappers); - } + /* Don't forget to update array size! */ + root->simple_rel_array_size = new_len; + } - /* Clear old path list */ - list_free(rel->pathlist); + /* Parent has already been locked by rewriter */ + parent_rel = heap_open_compat(rte->relid, NoLock); - rel->pathlist = NIL; - set_append_rel_pathlist(root, rel, rti, rte, pathkeyAsc, pathkeyDesc); - set_append_rel_size(root, rel, rti, rte); + parent_rowmark = get_plan_rowmark(root->rowMarks, rti); - /* No need to go further (both nodes are disabled), return */ - if (!(pg_pathman_enable_runtimeappend || - pg_pathman_enable_runtime_merge_append)) - return; + /* Add parent if asked to */ + if (prel->enable_parent) + append_child_relation(root, parent_rel, parent_rowmark, + rti, 0, rte->relid, NULL); - /* Runtime[Merge]Append is pointless if there are no params in clauses */ - if (!clause_contains_params((Node *) get_actual_clauses(rel->baserestrictinfo))) - return; + /* Iterate all indexes in rangeset and append child relations */ + foreach(lc, ranges) + { + IndexRange irange = lfirst_irange(lc); - rel_rinfo_contains_part_attr = - check_rinfo_for_partitioned_attr(rel->baserestrictinfo, - rel->relid, - prel->attnum); + for (i = irange_lower(irange); i <= irange_upper(irange); i++) + append_child_relation(root, parent_rel, parent_rowmark, + rti, i, children[i], wrappers); + } + + /* Now close parent relation */ + heap_close_compat(parent_rel, NoLock); + + /* Clear path list and make it point to NIL */ + list_free_deep(rel->pathlist); + rel->pathlist = NIL; + +#if PG_VERSION_NUM >= 90600 + /* Clear old partial path list */ + list_free(rel->partial_pathlist); + rel->partial_pathlist = NIL; +#endif - foreach (lc, rel->pathlist) + /* Generate new paths using the rels we've just added */ + set_append_rel_pathlist(root, rel, rti, pathkeyAsc, pathkeyDesc); + set_append_rel_size_compat(root, rel, rti); + + /* consider gathering partial paths for the parent appendrel */ + generate_gather_paths_compat(root, rel); + + /* Skip if both custom nodes are disabled */ + if (!(pg_pathman_enable_runtimeappend || + pg_pathman_enable_runtime_merge_append)) + goto cleanup; + + /* Skip if there's no PARAMs in partitioning-related clauses */ + if (!clause_contains_params((Node *) part_clauses)) + goto cleanup; + + /* Generate Runtime[Merge]Append paths if needed */ + foreach (lc, rel->pathlist) + { + AppendPath *cur_path = (AppendPath *) lfirst(lc); + Relids inner_required = PATH_REQ_OUTER((Path *) cur_path); + Path *inner_path = NULL; + ParamPathInfo *ppi; + + /* Skip if rel contains some join-related stuff or path type mismatched */ + if (!(IsA(cur_path, AppendPath) || IsA(cur_path, MergeAppendPath)) || + rel->has_eclass_joins || rel->joininfo) { - AppendPath *cur_path = (AppendPath *) lfirst(lc); - Relids inner_required = PATH_REQ_OUTER((Path *) cur_path); - ParamPathInfo *ppi = get_appendrel_parampathinfo(rel, inner_required); - Path *inner_path = NULL; - - /* Skip if rel contains some join-related stuff or path type mismatched */ - if (!(IsA(cur_path, AppendPath) || IsA(cur_path, MergeAppendPath)) || - rel->has_eclass_joins || rel->joininfo) - { - continue; - } + continue; + } - /* - * Skip if neither rel->baserestrictinfo nor - * ppi->ppi_clauses reference partition attribute - */ - if (!(rel_rinfo_contains_part_attr || - (ppi && check_rinfo_for_partitioned_attr(ppi->ppi_clauses, - rel->relid, - prel->attnum)))) - continue; - - if (IsA(cur_path, AppendPath) && pg_pathman_enable_runtimeappend) - inner_path = create_runtimeappend_path(root, cur_path, - ppi, paramsel); - else if (IsA(cur_path, MergeAppendPath) && - pg_pathman_enable_runtime_merge_append) - inner_path = create_runtimemergeappend_path(root, cur_path, - ppi, paramsel); - - if (inner_path) - add_path(rel, inner_path); + /* Get existing parameterization */ + ppi = get_appendrel_parampathinfo(rel, inner_required); + + if (IsA(cur_path, AppendPath) && pg_pathman_enable_runtimeappend) + inner_path = create_runtime_append_path(root, cur_path, + ppi, paramsel); + else if (IsA(cur_path, MergeAppendPath) && + pg_pathman_enable_runtime_merge_append) + { + /* Check struct layout compatibility */ + if (offsetof(AppendPath, subpaths) != + offsetof(MergeAppendPath, subpaths)) + elog(FATAL, "Struct layouts of AppendPath and " + "MergeAppendPath differ"); + + inner_path = create_runtime_merge_append_path(root, cur_path, + ppi, paramsel); } + + if (inner_path) + add_path(rel, inner_path); } + +cleanup: + /* Don't forget to close 'prel'! */ + close_pathman_relation_info(prel); +} + +/* + * 'pg_pathman.enable' GUC check. + */ +bool +pathman_enable_check_hook(bool *newval, void **extra, GucSource source) +{ + /* The top level statement requires immediate commit: accept GUC change */ + if (MyXactFlags & XACT_FLAGS_NEEDIMMEDIATECOMMIT) + return true; + + /* Ignore the case of re-setting the same value */ + if (*newval == pathman_init_state.pg_pathman_enable) + return true; + + /* Command must be at top level of a fresh transaction. */ + if (FirstSnapshotSet || + GetTopTransactionIdIfAny() != InvalidTransactionId || +#ifdef PGPRO_EE + getNestLevelATX() > 0 || +#endif + IsSubTransaction()) + { + ereport(WARNING, + (errcode(ERRCODE_ACTIVE_SQL_TRANSACTION), + errmsg("\"pg_pathman.enable\" must be called before any query, ignored"))); + + /* Keep the old value. */ + *newval = pathman_init_state.pg_pathman_enable; + } + + return true; } /* * Intercept 'pg_pathman.enable' GUC assignments. */ void -pg_pathman_enable_assign_hook(bool newval, void *extra) +pathman_enable_assign_hook(bool newval, void *extra) { elog(DEBUG2, "pg_pathman_enable_assign_hook() [newval = %s] triggered", newval ? "true" : "false"); - /* Return quickly if nothing has changed */ - if (newval == (pg_pathman_init_state.pg_pathman_enable && - pg_pathman_enable_runtimeappend && - pg_pathman_enable_runtime_merge_append && - pg_pathman_enable_partition_filter)) - return; + if (!(newval == pathman_init_state.pg_pathman_enable && + newval == pathman_init_state.auto_partition && + newval == pathman_init_state.override_copy && + newval == pg_pathman_enable_runtimeappend && + newval == pg_pathman_enable_runtime_merge_append && + newval == pg_pathman_enable_partition_filter && + newval == pg_pathman_enable_bounds_cache)) + { + elog(NOTICE, + "RuntimeAppend, RuntimeMergeAppend and PartitionFilter nodes " + "and some other options have been %s", + newval ? "enabled" : "disabled"); + } - pg_pathman_enable_runtime_merge_append = newval; - pg_pathman_enable_runtimeappend = newval; - pg_pathman_enable_partition_filter = newval; - elog(NOTICE, - "RuntimeAppend, RuntimeMergeAppend and PartitionFilter nodes have been %s", - newval ? "enabled" : "disabled"); + pathman_init_state.auto_partition = newval; + pathman_init_state.override_copy = newval; + pg_pathman_enable_runtimeappend = newval; + pg_pathman_enable_runtime_merge_append = newval; + pg_pathman_enable_partition_filter = newval; + pg_pathman_enable_bounds_cache = newval; + + /* Purge caches if pathman was disabled */ + if (!newval) + { + unload_config(); + } +} + +static void +execute_for_plantree(PlannedStmt *planned_stmt, + Plan *(*proc) (List *rtable, Plan *plan)) +{ + List *subplans = NIL; + ListCell *lc; + Plan *resplan = proc(planned_stmt->rtable, planned_stmt->planTree); + + if (resplan) + planned_stmt->planTree = resplan; + + foreach (lc, planned_stmt->subplans) + { + Plan *subplan = lfirst(lc); + resplan = proc(planned_stmt->rtable, (Plan *) lfirst(lc)); + if (resplan) + subplans = lappend(subplans, resplan); + else + subplans = lappend(subplans, subplan); + } + planned_stmt->subplans = subplans; +} + +/* + * Truncated version of set_plan_refs. + * Pathman can add nodes to already completed and post-processed plan tree. + * reset_plan_node_ids fixes some presentation values for updated plan tree + * to avoid problems in further processing. + */ +static Plan * +reset_plan_node_ids(Plan *plan, void *lastPlanNodeId) +{ + if (plan == NULL) + return NULL; + + plan->plan_node_id = (*(int *) lastPlanNodeId)++; + + return plan; } /* * Planner hook. It disables inheritance for tables that have been partitioned * by pathman to prevent standart PostgreSQL partitioning mechanism from - * handling that tables. + * handling those tables. + * + * Since >= 13 (6aba63ef3e6) query_string parameter was added. */ PlannedStmt * +#if PG_VERSION_NUM >= 130000 +pathman_planner_hook(Query *parse, const char *query_string, int cursorOptions, ParamListInfo boundParams) +#else pathman_planner_hook(Query *parse, int cursorOptions, ParamListInfo boundParams) +#endif { -#define ExecuteForPlanTree(planned_stmt, proc) \ - do { \ - ListCell *lc; \ - proc((planned_stmt)->rtable, (planned_stmt)->planTree); \ - foreach (lc, (planned_stmt)->subplans) \ - proc((planned_stmt)->rtable, (Plan *) lfirst(lc)); \ - } while (0) + PlannedStmt *result; + uint64 query_id = parse->queryId; - PlannedStmt *result; + /* Save the result in case it changes */ + bool pathman_ready = IsPathmanReady(); - /* FIXME: fix these commands (traverse whole query tree) */ - if (IsPathmanReady()) + PG_TRY(); { - switch(parse->commandType) + if (pathman_ready) { - case CMD_SELECT: - disable_inheritance(parse); - rowmark_add_tableoids(parse); /* add attributes for rowmarks */ - break; - - case CMD_UPDATE: - case CMD_DELETE: - disable_inheritance_cte(parse); - disable_inheritance_subselect(parse); - handle_modification_query(parse); - break; - - default: - break; + /* Increase planner() calls count */ + incr_planner_calls_count(); + + /* Modify query tree if needed */ + pathman_transform_query(parse, boundParams); } - } - /* Invoke original hook if needed */ - if (planner_hook_next) - result = planner_hook_next(parse, cursorOptions, boundParams); - else - result = standard_planner(parse, cursorOptions, boundParams); + /* Invoke original hook if needed */ + if (pathman_planner_hook_next) +#if PG_VERSION_NUM >= 130000 + result = pathman_planner_hook_next(parse, query_string, cursorOptions, boundParams); +#else + result = pathman_planner_hook_next(parse, cursorOptions, boundParams); +#endif + else +#if PG_VERSION_NUM >= 130000 + result = standard_planner(parse, query_string, cursorOptions, boundParams); +#else + result = standard_planner(parse, cursorOptions, boundParams); +#endif - if (IsPathmanReady()) - { - /* Give rowmark-related attributes correct names */ - ExecuteForPlanTree(result, postprocess_lock_rows); + if (pathman_ready) + { + int lastPlanNodeId = 0; + ListCell *l; + + /* Add PartitionFilter node for INSERT queries */ + execute_for_plantree(result, add_partition_filters); + + /* Add PartitionRouter node for UPDATE queries */ + execute_for_plantree(result, add_partition_routers); - /* Add PartitionFilter node for INSERT queries */ - ExecuteForPlanTree(result, add_partition_filters); + /* Decrement planner() calls count */ + decr_planner_calls_count(); + + /* remake parsed tree presentation fixes due to possible adding nodes */ + result->planTree = plan_tree_visitor(result->planTree, reset_plan_node_ids, &lastPlanNodeId); + foreach(l, result->subplans) + { + lfirst(l) = plan_tree_visitor((Plan *) lfirst(l), reset_plan_node_ids, &lastPlanNodeId); + } + + /* HACK: restore queryId set by pg_stat_statements */ + result->queryId = query_id; + } } + /* We must decrease parenthood statuses refcount on ERROR */ + PG_CATCH(); + { + if (pathman_ready) + { + /* Caught an ERROR, decrease count */ + decr_planner_calls_count(); + } - list_free(inheritance_disabled_relids); - list_free(inheritance_enabled_relids); - inheritance_disabled_relids = NIL; - inheritance_enabled_relids = NIL; + /* Rethrow ERROR further */ + PG_RE_THROW(); + } + PG_END_TRY(); + /* Finally return the Plan */ return result; } /* * Post parse analysis hook. It makes sure the config is loaded before executing - * any statement, including utility commands + * any statement, including utility commands. + */ +#if PG_VERSION_NUM >= 140000 +/* + * pathman_post_parse_analyze_hook(), pathman_post_parse_analyze_hook_next(): + * in 14 new argument was added (5fd9dfa5f50) */ void -pathman_post_parse_analysis_hook(ParseState *pstate, Query *query) +pathman_post_parse_analyze_hook(ParseState *pstate, Query *query, JumbleState *jstate) { /* Invoke original hook if needed */ - if (post_parse_analyze_hook_next) - post_parse_analyze_hook_next(pstate, query); + if (pathman_post_parse_analyze_hook_next) + pathman_post_parse_analyze_hook_next(pstate, query, jstate); +#else +void +pathman_post_parse_analyze_hook(ParseState *pstate, Query *query) +{ + /* Invoke original hook if needed */ + if (pathman_post_parse_analyze_hook_next) + pathman_post_parse_analyze_hook_next(pstate, query); +#endif - /* We shouldn't do anything on BEGIN or SET ISOLATION LEVEL stmts */ - if (query->commandType == CMD_UTILITY && - (xact_is_transaction_stmt(query->utilityStmt) || - xact_is_set_transaction_stmt(query->utilityStmt))) - { + /* See cook_partitioning_expression() */ + if (!pathman_hooks_enabled) return; + + /* We shouldn't proceed on: ... */ + if (query->commandType == CMD_UTILITY) + { + /* ... BEGIN */ + if (xact_is_transaction_stmt(query->utilityStmt)) + return; + + /* ... SET pg_pathman.enable */ + if (xact_is_set_stmt(query->utilityStmt, PATHMAN_ENABLE)) + { + /* Accept all events in case it's "enable = OFF" */ + if (IsPathmanReady()) + finish_delayed_invalidation(); + + return; + } + + /* ... SET [TRANSACTION] */ + if (xact_is_set_stmt(query->utilityStmt, NULL)) + return; + + /* ... ALTER EXTENSION pg_pathman */ + if (xact_is_alter_pathman_stmt(query->utilityStmt)) + { + /* Leave no delayed events before ALTER EXTENSION */ + if (IsPathmanReady()) + finish_delayed_invalidation(); + + /* Disable pg_pathman to perform a painless update */ + (void) set_config_option(PATHMAN_ENABLE, "off", + PGC_SUSET, PGC_S_SESSION, + GUC_ACTION_SAVE, true, 0, false); + + return; + } } - /* Finish delayed invalidation jobs */ + /* Finish all delayed invalidation jobs */ if (IsPathmanReady()) finish_delayed_invalidation(); @@ -472,9 +891,64 @@ pathman_post_parse_analysis_hook(ParseState *pstate, Query *query) { load_config(); /* perform main cache initialization */ } + if (!IsPathmanReady()) + return; + + /* Process inlined SQL functions (we've already entered planning stage) */ + if (IsPathmanReady() && get_planner_calls_count() > 0) + { + /* Check that pg_pathman is the last extension loaded */ + if (post_parse_analyze_hook != pathman_post_parse_analyze_hook) + { + Oid save_userid; + int save_sec_context; + bool need_priv_escalation = !superuser(); /* we might be a SU */ + char *spl_value; /* value of "shared_preload_libraries" GUC */ + + /* Do we have to escalate privileges? */ + if (need_priv_escalation) + { + /* Get current user's Oid and security context */ + GetUserIdAndSecContext(&save_userid, &save_sec_context); - inheritance_disabled_relids = NIL; - inheritance_enabled_relids = NIL; + /* Become superuser in order to bypass sequence ACL checks */ + SetUserIdAndSecContext(BOOTSTRAP_SUPERUSERID, + save_sec_context | SECURITY_LOCAL_USERID_CHANGE); + } + + /* TODO: add a test for this case (non-privileged user etc) */ + + /* Only SU can read this GUC */ +#if PG_VERSION_NUM >= 90600 + spl_value = GetConfigOptionByName("shared_preload_libraries", NULL, false); +#else + spl_value = GetConfigOptionByName("shared_preload_libraries", NULL); +#endif + + /* Restore user's privileges */ + if (need_priv_escalation) + SetUserIdAndSecContext(save_userid, save_sec_context); + + ereport(ERROR, + (errmsg("extension conflict has been detected"), + errdetail("shared_preload_libraries = \"%s\"", spl_value), + errhint("pg_pathman should be the last extension listed in " + "\"shared_preload_libraries\" GUC in order to " + "prevent possible conflicts with other extensions"))); + } + + /* Modify query tree if needed */ + pathman_transform_query(query, NULL); + return; + } + +#if PG_VERSION_NUM >= 100000 + /* + * for now this call works only for declarative partitioning so + * we disabled it + */ + pathman_post_analyze_query(query); +#endif } /* @@ -484,13 +958,12 @@ void pathman_shmem_startup_hook(void) { /* Invoke original hook if needed */ - if (shmem_startup_hook_next != NULL) - shmem_startup_hook_next(); + if (pathman_shmem_startup_hook_next) + pathman_shmem_startup_hook_next(); /* Allocate shared memory objects */ LWLockAcquire(AddinShmemInitLock, LW_EXCLUSIVE); - init_dsm_config(); - init_shmem_config(); + init_concurrent_part_task_slots(); LWLockRelease(AddinShmemInitLock); } @@ -500,56 +973,268 @@ pathman_shmem_startup_hook(void) void pathman_relcache_hook(Datum arg, Oid relid) { - PartParentSearch search; - Oid partitioned_table; + Oid pathman_config_relid; + + /* See cook_partitioning_expression() */ + if (!pathman_hooks_enabled) + return; if (!IsPathmanReady()) return; - /* Invalidation event for PATHMAN_CONFIG table (probably DROP) */ - if (relid == get_pathman_config_relid()) + /* Invalidation event for whole cache */ + if (relid == InvalidOid) + { + invalidate_bounds_cache(); + invalidate_parents_cache(); + invalidate_status_cache(); + delay_pathman_shutdown(); /* see below */ + } + + /* + * Invalidation event for PATHMAN_CONFIG table (probably DROP EXTENSION). + * Digging catalogs here is expensive and probably illegal, so we take + * cached relid. It is possible that we don't know it atm (e.g. pathman + * was disabled). However, in this case caches must have been cleaned + * on disable, and there is no DROP-specific additional actions. + */ + pathman_config_relid = get_pathman_config_relid(true); + if (relid == pathman_config_relid) + { delay_pathman_shutdown(); + } - /* Invalidate PartParentInfo cache if needed */ - partitioned_table = forget_parent_of_partition(relid, &search); + /* Invalidation event for some user table */ + else if (relid >= FirstNormalObjectId) + { + /* Invalidate PartBoundInfo entry if needed */ + forget_bounds_of_rel(relid); + + /* Invalidate PartStatusInfo entry if needed */ + forget_status_of_relation(relid); + + /* Invalidate PartParentInfo entry if needed */ + forget_parent_of_partition(relid); + } +} - switch (search) +/* + * Utility function invoker hook. + * NOTE: 'first_arg' is (PlannedStmt *) in PG 10, or (Node *) in PG <= 9.6. + * In PG 13 (2f9661311b8) command completion tags was reworked (added QueryCompletion struct) + */ +void +#if PG_VERSION_NUM >= 140000 +/* + * pathman_post_parse_analyze_hook(), pathman_post_parse_analyze_hook_next(): + * in 14 new argument was added (5fd9dfa5f50) + */ +pathman_process_utility_hook(PlannedStmt *first_arg, + const char *queryString, + bool readOnlyTree, + ProcessUtilityContext context, + ParamListInfo params, + QueryEnvironment *queryEnv, + DestReceiver *dest, QueryCompletion *queryCompletion) +{ + Node *parsetree = first_arg->utilityStmt; + int stmt_location = first_arg->stmt_location, + stmt_len = first_arg->stmt_len; +#elif PG_VERSION_NUM >= 130000 +pathman_process_utility_hook(PlannedStmt *first_arg, + const char *queryString, + ProcessUtilityContext context, + ParamListInfo params, + QueryEnvironment *queryEnv, + DestReceiver *dest, QueryCompletion *queryCompletion) +{ + Node *parsetree = first_arg->utilityStmt; + int stmt_location = first_arg->stmt_location, + stmt_len = first_arg->stmt_len; +#elif PG_VERSION_NUM >= 100000 +pathman_process_utility_hook(PlannedStmt *first_arg, + const char *queryString, + ProcessUtilityContext context, + ParamListInfo params, + QueryEnvironment *queryEnv, + DestReceiver *dest, char *completionTag) +{ + Node *parsetree = first_arg->utilityStmt; + int stmt_location = first_arg->stmt_location, + stmt_len = first_arg->stmt_len; +#else +pathman_process_utility_hook(Node *first_arg, + const char *queryString, + ProcessUtilityContext context, + ParamListInfo params, + DestReceiver *dest, + char *completionTag) +{ + Node *parsetree = first_arg; + int stmt_location = -1, + stmt_len = 0; +#endif + + if (IsPathmanReady()) { - /* It is (or was) a valid partition */ - case PPS_ENTRY_PART_PARENT: - case PPS_ENTRY_PARENT: - { - elog(DEBUG2, "Invalidation message for partition %u [%u]", - relid, MyProcPid); + Oid relation_oid; + PartType part_type; + AttrNumber attr_number; + bool is_parent; - delay_invalidation_parent_rel(partitioned_table); - } - break; + /* Override standard COPY statement if needed */ + if (is_pathman_related_copy(parsetree)) + { + uint64 processed; + + /* Handle our COPY case (and show a special cmd name) */ + PathmanDoCopy((CopyStmt *) parsetree, queryString, + stmt_location, stmt_len, &processed); +#if PG_VERSION_NUM >= 130000 + if (queryCompletion) + SetQueryCompletion(queryCompletion, CMDTAG_COPY, processed); +#else + if (completionTag) + snprintf(completionTag, COMPLETION_TAG_BUFSIZE, + "COPY " UINT64_FORMAT, processed); +#endif + + return; /* don't call standard_ProcessUtility() or hooks */ + } - /* Both syscache and pathman's cache say it isn't a partition */ - case PPS_ENTRY_NOT_FOUND: + /* Override standard RENAME statement if needed */ + else if (is_pathman_related_table_rename(parsetree, + &relation_oid, + &is_parent)) + { + const RenameStmt *rename_stmt = (const RenameStmt *) parsetree; + + if (is_parent) + PathmanRenameSequence(relation_oid, rename_stmt); + else + PathmanRenameConstraint(relation_oid, rename_stmt); + } + + /* Override standard ALTER COLUMN TYPE statement if needed */ + else if (is_pathman_related_alter_column_type(parsetree, + &relation_oid, + &attr_number, + &part_type)) + { + if (part_type == PT_HASH) + ereport(ERROR, + (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), + errmsg("cannot change type of column \"%s\"" + " of table \"%s\" partitioned by HASH", + get_attname_compat(relation_oid, attr_number), + get_rel_name(relation_oid)))); + } +#ifdef ENABLE_DECLARATIVE + else if (is_pathman_related_partitioning_cmd(parsetree, &relation_oid)) + { + /* we can handle all the partitioning commands in ALTER .. TABLE */ + if (IsA(parsetree, AlterTableStmt)) { - /* NOTE: Remove NOT_USED when it's time */ - delay_invalidation_parent_rel(partitioned_table); -#ifdef NOT_USED - elog(DEBUG2, "Invalidation message for relation %u [%u]", - relid, MyProcPid); -#endif + ListCell *lc; + AlterTableStmt *stmt = (AlterTableStmt *) parsetree; + + foreach(lc, stmt->cmds) + { + AlterTableCmd *cmd = (AlterTableCmd *) lfirst(lc); + switch (cmd->subtype) + { + case AT_AttachPartition: + handle_attach_partition(relation_oid, cmd); + return; + case AT_DetachPartition: + handle_detach_partition(cmd); + return; + default: + elog(ERROR, "can't handle this command"); + } + } } - break; - - /* We can't say anything (state is not transactional) */ - case PPS_NOT_SURE: + else if (IsA(parsetree, CreateStmt)) { - elog(DEBUG2, "Invalidation message for vague relation %u [%u]", - relid, MyProcPid); - - delay_invalidation_vague_rel(relid); + handle_create_partition_of(relation_oid, (CreateStmt *) parsetree); + return; } - break; + } +#endif + } - default: - elog(ERROR, "Not implemented yet"); - break; + /* Finally call process_utility_hook_next or standard_ProcessUtility */ +#if PG_VERSION_NUM >= 140000 + call_process_utility_compat((pathman_process_utility_hook_next ? + pathman_process_utility_hook_next : + standard_ProcessUtility), + first_arg, queryString, + readOnlyTree, + context, params, queryEnv, + dest, queryCompletion); +#elif PG_VERSION_NUM >= 130000 + call_process_utility_compat((pathman_process_utility_hook_next ? + pathman_process_utility_hook_next : + standard_ProcessUtility), + first_arg, queryString, + context, params, queryEnv, + dest, queryCompletion); +#else + call_process_utility_compat((pathman_process_utility_hook_next ? + pathman_process_utility_hook_next : + standard_ProcessUtility), + first_arg, queryString, + context, params, queryEnv, + dest, completionTag); +#endif +} + +/* + * Planstate tree nodes could have been copied. + * It breaks references on correspoding + * ModifyTable node from PartitionRouter nodes. + */ +static void +fix_mt_refs(PlanState *state, void *context) +{ + ModifyTableState *mt_state = (ModifyTableState *) state; + PartitionRouterState *pr_state; +#if PG_VERSION_NUM < 140000 + int i; +#endif + + if (!IsA(state, ModifyTableState)) + return; +#if PG_VERSION_NUM >= 140000 + { + CustomScanState *pf_state = (CustomScanState *) outerPlanState(mt_state); +#else + for (i = 0; i < mt_state->mt_nplans; i++) + { + CustomScanState *pf_state = (CustomScanState *) mt_state->mt_plans[i]; +#endif + if (IsPartitionFilterState(pf_state)) + { + pr_state = linitial(pf_state->custom_ps); + if (IsPartitionRouterState(pr_state)) + { + pr_state->mt_state = mt_state; + } + } } } + +void +pathman_executor_start_hook(QueryDesc *queryDesc, int eflags) +{ + if (pathman_executor_start_hook_prev) + pathman_executor_start_hook_prev(queryDesc, eflags); + else + standard_ExecutorStart(queryDesc, eflags); + + /* + * HACK for compatibility with pgpro_stats. + * Fix possibly broken planstate tree. + */ + state_tree_visitor(queryDesc->planstate, fix_mt_refs, NULL); +} diff --git a/src/hooks.h b/src/hooks.h deleted file mode 100644 index 022387b1..00000000 --- a/src/hooks.h +++ /dev/null @@ -1,53 +0,0 @@ -/* ------------------------------------------------------------------------ - * - * hooks.h - * prototypes of rel_pathlist and join_pathlist hooks - * - * Copyright (c) 2016, Postgres Professional - * - * ------------------------------------------------------------------------ - */ - -#ifndef JOIN_HOOK_H -#define JOIN_HOOK_H - -#include "postgres.h" -#include "optimizer/planner.h" -#include "optimizer/paths.h" -#include "parser/analyze.h" -#include "storage/ipc.h" - - -extern set_join_pathlist_hook_type set_join_pathlist_next; -extern set_rel_pathlist_hook_type set_rel_pathlist_hook_next; -extern planner_hook_type planner_hook_next; -extern post_parse_analyze_hook_type post_parse_analyze_hook_next; -extern shmem_startup_hook_type shmem_startup_hook_next; - - -void pathman_join_pathlist_hook(PlannerInfo *root, - RelOptInfo *joinrel, - RelOptInfo *outerrel, - RelOptInfo *innerrel, - JoinType jointype, - JoinPathExtraData *extra); - -void pathman_rel_pathlist_hook(PlannerInfo *root, - RelOptInfo *rel, - Index rti, - RangeTblEntry *rte); - -void pg_pathman_enable_assign_hook(char newval, void *extra); - -PlannedStmt * pathman_planner_hook(Query *parse, - int cursorOptions, - ParamListInfo boundParams); - -void pathman_post_parse_analysis_hook(ParseState *pstate, - Query *query); - -void pathman_shmem_startup_hook(void); - -void pathman_relcache_hook(Datum arg, Oid relid); - -#endif diff --git a/src/include/compat/debug_compat_features.h b/src/include/compat/debug_compat_features.h new file mode 100644 index 00000000..09f12849 --- /dev/null +++ b/src/include/compat/debug_compat_features.h @@ -0,0 +1,15 @@ +/* ------------------------------------------------------------------------ + * + * debug_custom_features.h + * Macros to control PgPro-related features etc + * + * Copyright (c) 2017, Postgres Professional + * + * ------------------------------------------------------------------------ + */ + +/* Main toggle */ +#define ENABLE_PGPRO_PATCHES + +/* PgPro exclusive features */ +#define ENABLE_PATHMAN_AWARE_COPY_WIN32 diff --git a/src/include/compat/pg_compat.h b/src/include/compat/pg_compat.h new file mode 100644 index 00000000..f6330627 --- /dev/null +++ b/src/include/compat/pg_compat.h @@ -0,0 +1,1248 @@ +/* ------------------------------------------------------------------------ + * + * pg_compat.h + * Compatibility tools for PostgreSQL API + * + * Copyright (c) 2016-2020, Postgres Professional + * + * ------------------------------------------------------------------------ + */ + +#ifndef PG_COMPAT_H +#define PG_COMPAT_H + +/* Check PostgreSQL version (9.5.4 contains an important fix for BGW) */ +#include "pg_config.h" +#if PG_VERSION_NUM < 90503 + #error "Cannot build pg_pathman with PostgreSQL version lower than 9.5.3" +#elif PG_VERSION_NUM < 90504 + #warning "It is STRONGLY recommended to use pg_pathman with PostgreSQL 9.5.4 since it contains important fixes" +#endif + +#include "compat/debug_compat_features.h" + +#include "postgres.h" +#include "access/tupdesc.h" +#include "commands/trigger.h" +#include "executor/executor.h" +#include "nodes/memnodes.h" +#include "nodes/nodeFuncs.h" +#if PG_VERSION_NUM >= 120000 +#include "nodes/pathnodes.h" +#else +#include "nodes/relation.h" +#endif +#include "nodes/pg_list.h" +#if PG_VERSION_NUM >= 120000 +#include "optimizer/appendinfo.h" +#endif +#include "optimizer/cost.h" +#include "optimizer/paths.h" +#include "optimizer/pathnode.h" +#include "optimizer/prep.h" +#include "utils/memutils.h" + +/* + * ---------- + * Variants + * ---------- + */ + +/* + * get_attname() + */ +#if PG_VERSION_NUM >= 110000 +#define get_attname_compat(relid, attnum) \ + get_attname((relid), (attnum), false) +#else +#define get_attname_compat(relid, attnum) \ + get_attname((relid), (attnum)) +#endif + + +/* + * calc_nestloop_required_outer + */ +#if PG_VERSION_NUM >= 110000 +#define calc_nestloop_required_outer_compat(outer, inner) \ + calc_nestloop_required_outer((outer)->parent->relids, PATH_REQ_OUTER(outer), \ + (inner)->parent->relids, PATH_REQ_OUTER(inner)) +#else +#define calc_nestloop_required_outer_compat(outer, inner) \ + calc_nestloop_required_outer((outer), (inner)) +#endif + + +/* + * adjust_appendrel_attrs() + */ +#if PG_VERSION_NUM >= 110000 +#define adjust_appendrel_attrs_compat(root, node, appinfo) \ + adjust_appendrel_attrs((root), \ + (node), \ + 1, &(appinfo)) +#elif PG_VERSION_NUM >= 90500 +#define adjust_appendrel_attrs_compat(root, node, appinfo) \ + adjust_appendrel_attrs((root), \ + (node), \ + (appinfo)) +#endif + + +#if PG_VERSION_NUM >= 110000 +#define adjust_rel_targetlist_compat(root, dst_rel, src_rel, appinfo) \ + do { \ + (dst_rel)->reltarget->exprs = (List *) \ + adjust_appendrel_attrs((root), \ + (Node *) (src_rel)->reltarget->exprs, \ + 1, \ + &(appinfo)); \ + } while (0) +#elif PG_VERSION_NUM >= 90600 +#define adjust_rel_targetlist_compat(root, dst_rel, src_rel, appinfo) \ + do { \ + (dst_rel)->reltarget->exprs = (List *) \ + adjust_appendrel_attrs((root), \ + (Node *) (src_rel)->reltarget->exprs, \ + (appinfo)); \ + } while (0) +#elif PG_VERSION_NUM >= 90500 +#define adjust_rel_targetlist_compat(root, dst_rel, src_rel, appinfo) \ + do { \ + (dst_rel)->reltargetlist = (List *) \ + adjust_appendrel_attrs((root), \ + (Node *) (src_rel)->reltargetlist, \ + (appinfo)); \ + } while (0) +#endif + +/* + * CheckValidResultRel() + */ +#if PG_VERSION_NUM >= 170000 +#define CheckValidResultRelCompat(rri, cmd) \ + CheckValidResultRel((rri), (cmd), NIL) +#elif PG_VERSION_NUM >= 100000 +#define CheckValidResultRelCompat(rri, cmd) \ + CheckValidResultRel((rri), (cmd)) +#elif PG_VERSION_NUM >= 90500 +#define CheckValidResultRelCompat(rri, cmd) \ + CheckValidResultRel((rri)->ri_RelationDesc, (cmd)) +#endif + +/* + * BeginCopyFrom() + */ +#if PG_VERSION_NUM >= 140000 +#define BeginCopyFromCompat(pstate, rel, filename, is_program, data_source_cb, \ + attnamelist, options) \ + BeginCopyFrom((pstate), (rel), NULL, (filename), (is_program), \ + (data_source_cb), (attnamelist), (options)) +#elif PG_VERSION_NUM >= 100000 +#define BeginCopyFromCompat(pstate, rel, filename, is_program, data_source_cb, \ + attnamelist, options) \ + BeginCopyFrom((pstate), (rel), (filename), (is_program), \ + (data_source_cb), (attnamelist), (options)) +#elif PG_VERSION_NUM >= 90500 +#define BeginCopyFromCompat(pstate, rel, filename, is_program, data_source_cb, \ + attnamelist, options) \ + BeginCopyFrom((rel), (filename), (is_program), (attnamelist), (options)) +#endif + + +/* + * build_simple_rel() + */ +#if PG_VERSION_NUM >= 100000 +#define build_simple_rel_compat(root, childRTindex, parent_rel) \ + build_simple_rel((root), (childRTindex), (parent_rel)) +#elif PG_VERSION_NUM >= 90500 +#define build_simple_rel_compat(root, childRTindex, parent_rel) \ + build_simple_rel((root), (childRTindex), \ + (parent_rel) ? RELOPT_OTHER_MEMBER_REL : RELOPT_BASEREL) +#endif + + +/* + * Define ALLOCSET_xxx_SIZES for our precious MemoryContexts + */ +#if PG_VERSION_NUM >= 90500 && PG_VERSION_NUM < 90600 +#define ALLOCSET_DEFAULT_SIZES \ + ALLOCSET_DEFAULT_MINSIZE, ALLOCSET_DEFAULT_INITSIZE, ALLOCSET_DEFAULT_MAXSIZE + +#define ALLOCSET_SMALL_SIZES \ + ALLOCSET_SMALL_MINSIZE, ALLOCSET_SMALL_INITSIZE, ALLOCSET_SMALL_MAXSIZE +#endif + + +/* + * call_process_utility_compat() + * + * the parameter 'first_arg' is: + * - in pg 10 PlannedStmt object + * - in pg 9.6 and lower Node parsetree + */ +#if PG_VERSION_NUM >= 140000 +#define call_process_utility_compat(process_utility, first_arg, query_string, \ + readOnlyTree, context, params, query_env, \ + dest, completion_tag) \ + (process_utility)((first_arg), (query_string), readOnlyTree, \ + (context), (params), \ + (query_env), (dest), (completion_tag)) +#elif PG_VERSION_NUM >= 100000 +#define call_process_utility_compat(process_utility, first_arg, query_string, \ + context, params, query_env, dest, \ + completion_tag) \ + (process_utility)((first_arg), (query_string), (context), (params), \ + (query_env), (dest), (completion_tag)) +#elif PG_VERSION_NUM >= 90500 +#define call_process_utility_compat(process_utility, first_arg, query_string, \ + context, params, query_env, dest, \ + completion_tag) \ + (process_utility)((first_arg), (query_string), (context), (params), \ + (dest), (completion_tag)) +#endif + + +/* + * CatalogTupleInsert() + */ +#if PG_VERSION_NUM >= 90500 && PG_VERSION_NUM < 100000 +#define CatalogTupleInsert(heapRel, heapTuple) \ + do { \ + simple_heap_insert((heapRel), (heapTuple)); \ + CatalogUpdateIndexes((heapRel), (heapTuple)); \ + } while (0) +#endif + + +/* + * CatalogTupleUpdate() + */ +#if PG_VERSION_NUM >= 90500 && PG_VERSION_NUM < 100000 +#define CatalogTupleUpdate(heapRel, updTid, heapTuple) \ + do { \ + simple_heap_update((heapRel), (updTid), (heapTuple)); \ + CatalogUpdateIndexes((heapRel), (heapTuple)); \ + } while (0) +#endif + + +/* + * check_index_predicates() + */ +#if PG_VERSION_NUM >= 90600 +#define check_index_predicates_compat(rool, rel) \ + check_index_predicates((root), (rel)) +#elif PG_VERSION_NUM >= 90500 +#define check_index_predicates_compat(rool, rel) \ + check_partial_indexes((root), (rel)) +#endif + + +/* + * create_append_path() + */ +#if PG_VERSION_NUM >= 140000 +#define create_append_path_compat(rel, subpaths, required_outer, parallel_workers) \ + create_append_path(NULL, (rel), (subpaths), NIL, NIL, (required_outer), \ + (parallel_workers), false, -1) +#elif PG_VERSION_NUM >= 130000 +/* + * PGPRO-3938 made create_append_path compatible with vanilla again + */ +#define create_append_path_compat(rel, subpaths, required_outer, parallel_workers) \ + create_append_path(NULL, (rel), (subpaths), NIL, NIL, (required_outer), \ + (parallel_workers), false, NIL, -1) +#elif PG_VERSION_NUM >= 120000 + +#ifndef PGPRO_VERSION +#define create_append_path_compat(rel, subpaths, required_outer, parallel_workers) \ + create_append_path(NULL, (rel), (subpaths), NIL, NIL, (required_outer), \ + (parallel_workers), false, NIL, -1) +#else +/* TODO pgpro version? Looks like something is not ported yet */ +#define create_append_path_compat(rel, subpaths, required_outer, parallel_workers) \ + create_append_path(NULL, (rel), (subpaths), NIL, NIL, (required_outer), \ + (parallel_workers), false, NIL, -1, false) +#endif /* PGPRO_VERSION */ + +#elif PG_VERSION_NUM >= 110000 + +#ifndef PGPRO_VERSION +#define create_append_path_compat(rel, subpaths, required_outer, parallel_workers) \ + create_append_path(NULL, (rel), (subpaths), NIL, (required_outer), \ + (parallel_workers), false, NIL, -1) +#else +#define create_append_path_compat(rel, subpaths, required_outer, parallel_workers) \ + create_append_path(NULL, (rel), (subpaths), NIL, (required_outer), \ + (parallel_workers), false, NIL, -1, false, NIL) +#endif /* PGPRO_VERSION */ + +#elif PG_VERSION_NUM >= 100000 + +#ifndef PGPRO_VERSION +#define create_append_path_compat(rel, subpaths, required_outer, parallel_workers) \ + create_append_path((rel), (subpaths), (required_outer), (parallel_workers), NIL) +#else +#define create_append_path_compat(rel, subpaths, required_outer, parallel_workers) \ + create_append_path((rel), (subpaths), (required_outer), (parallel_workers), NIL, \ + false, NIL) +#endif /* PGPRO_VERSION */ + +#elif PG_VERSION_NUM >= 90600 + +#ifndef PGPRO_VERSION +#define create_append_path_compat(rel, subpaths, required_outer, parallel_workers) \ + create_append_path((rel), (subpaths), (required_outer), (parallel_workers)) +#else +#define create_append_path_compat(rel, subpaths, required_outer, parallel_workers) \ + create_append_path((rel), (subpaths), (required_outer), \ + false, NIL, (parallel_workers)) +#endif /* PGPRO_VERSION */ + +#elif PG_VERSION_NUM >= 90500 +#define create_append_path_compat(rel, subpaths, required_outer, parallel_workers) \ + create_append_path((rel), (subpaths), (required_outer)) +#endif /* PG_VERSION_NUM */ + + +/* + * create_merge_append_path() + */ +#if PG_VERSION_NUM >= 140000 +#define create_merge_append_path_compat(root, rel, subpaths, pathkeys, \ + required_outer) \ + create_merge_append_path((root), (rel), (subpaths), (pathkeys), \ + (required_outer)) +#elif PG_VERSION_NUM >= 100000 +#define create_merge_append_path_compat(root, rel, subpaths, pathkeys, \ + required_outer) \ + create_merge_append_path((root), (rel), (subpaths), (pathkeys), \ + (required_outer), NIL) +#elif PG_VERSION_NUM >= 90500 +#define create_merge_append_path_compat(root, rel, subpaths, pathkeys, \ + required_outer) \ + create_merge_append_path((root), (rel), (subpaths), (pathkeys), \ + (required_outer)) +#endif + + +/* + * create_nestloop_path() + */ +#if PG_VERSION_NUM >= 100000 || (defined(PGPRO_VERSION) && PG_VERSION_NUM >= 90603) +#define create_nestloop_path_compat(root, joinrel, jointype, workspace, extra, \ + outer, inner, filtered_joinclauses, pathkeys, \ + required_outer) \ + create_nestloop_path((root), (joinrel), (jointype), (workspace), (extra), \ + (outer), (inner), (filtered_joinclauses), (pathkeys), \ + (required_outer)) +#elif PG_VERSION_NUM >= 90500 +#define create_nestloop_path_compat(root, joinrel, jointype, workspace, extra, \ + outer, inner, filtered_joinclauses, pathkeys, \ + required_outer) \ + create_nestloop_path((root), (joinrel), (jointype), (workspace), \ + (extra)->sjinfo, &(extra)->semifactors, (outer), \ + (inner), (filtered_joinclauses), (pathkeys), \ + (required_outer)) +#endif + + +/* + * create_plain_partial_paths() + */ +#if PG_VERSION_NUM >= 90600 +extern void create_plain_partial_paths(PlannerInfo *root, + RelOptInfo *rel); +#define create_plain_partial_paths_compat(root, rel) \ + create_plain_partial_paths((root), (rel)) +#endif + + +/* + * DefineRelation() + * + * for v10 set NULL into 'queryString' argument as it's used only under vanilla + * partition creating + */ +#if PG_VERSION_NUM >= 100000 +#define DefineRelationCompat(createstmt, relkind, ownerId, typaddress) \ + DefineRelation((createstmt), (relkind), (ownerId), (typaddress), NULL) +#elif PG_VERSION_NUM >= 90500 +#define DefineRelationCompat(createstmt, relkind, ownerId, typaddress) \ + DefineRelation((createstmt), (relkind), (ownerId), (typaddress)) +#endif + + +/* + * DoCopy() + */ +#if PG_VERSION_NUM >= 100000 +#define DoCopyCompat(pstate, copy_stmt, stmt_location, stmt_len, processed) \ + DoCopy((pstate), (copy_stmt), (stmt_location), (stmt_len), (processed)) +#elif PG_VERSION_NUM >= 90500 +#define DoCopyCompat(pstate, copy_stmt, stmt_location, stmt_len, processed) \ + DoCopy((copy_stmt), (pstate)->p_sourcetext, (processed)) +#endif + + +/* + * ExecBuildProjectionInfo() + */ +#if PG_VERSION_NUM >= 100000 +#define ExecBuildProjectionInfoCompat(targetList, econtext, resultSlot, \ + ownerPlanState, inputDesc) \ + ExecBuildProjectionInfo((targetList), (econtext), (resultSlot), \ + (ownerPlanState), (inputDesc)) +#elif PG_VERSION_NUM >= 90500 +#define ExecBuildProjectionInfoCompat(targetList, econtext, resultSlot, \ + ownerPlanState, inputDesc) \ + ExecBuildProjectionInfo((List *) ExecInitExpr((Expr *) (targetList), \ + (ownerPlanState)), \ + (econtext), (resultSlot), (inputDesc)) +#endif + + +/* + * ExecEvalExpr() + */ +#if PG_VERSION_NUM >= 100000 +#define ExecEvalExprCompat(expr, econtext, isNull) \ + ExecEvalExpr((expr), (econtext), (isNull)) +#elif PG_VERSION_NUM >= 90500 +static inline Datum +ExecEvalExprCompat(ExprState *expr, ExprContext *econtext, bool *isnull) +{ + ExprDoneCond isdone; + Datum result = ExecEvalExpr(expr, econtext, isnull, &isdone); + + if (isdone != ExprSingleResult) + elog(ERROR, "expression should return single value"); + + return result; +} +#endif + + +/* + * ExecCheck() + */ +#if PG_VERSION_NUM < 100000 +static inline bool +ExecCheck(ExprState *state, ExprContext *econtext) +{ + Datum ret; + bool isnull; + MemoryContext old_mcxt; + + /* short-circuit (here and in ExecInitCheck) for empty restriction list */ + if (state == NULL) + return true; + + old_mcxt = MemoryContextSwitchTo(econtext->ecxt_per_tuple_memory); + ret = ExecEvalExprCompat(state, econtext, &isnull); + MemoryContextSwitchTo(old_mcxt); + + if (isnull) + return true; + + return DatumGetBool(ret); +} +#endif + + +/* + * extract_actual_join_clauses() + */ +#if (PG_VERSION_NUM >= 100003) || \ + (PG_VERSION_NUM < 100000 && PG_VERSION_NUM >= 90609) || \ + (PG_VERSION_NUM < 90600 && PG_VERSION_NUM >= 90513) +#define extract_actual_join_clauses_compat(restrictinfo_list, \ + joinrelids, \ + joinquals, \ + otherquals) \ + extract_actual_join_clauses((restrictinfo_list), \ + (joinrelids), \ + (joinquals), \ + (otherquals)) +#else +#define extract_actual_join_clauses_compat(restrictinfo_list, \ + joinrelids, \ + joinquals, \ + otherquals) \ + extract_actual_join_clauses((restrictinfo_list), \ + (joinquals), \ + (otherquals)) +#endif + + +/* + * get_all_actual_clauses() + */ +#if PG_VERSION_NUM >= 100000 +extern List *get_all_actual_clauses(List *restrictinfo_list); +#endif + + +/* + * get_cheapest_path_for_pathkeys() + */ +#if PG_VERSION_NUM >= 100000 +#define get_cheapest_path_for_pathkeys_compat(paths, pathkeys, required_outer, \ + cost_criterion, \ + require_parallel_safe) \ + get_cheapest_path_for_pathkeys((paths), (pathkeys), (required_outer), \ + (cost_criterion), \ + (require_parallel_safe)) +#elif PG_VERSION_NUM >= 90500 +#define get_cheapest_path_for_pathkeys_compat(paths, pathkeys, required_outer, \ + cost_criterion, \ + require_parallel_safe) \ + get_cheapest_path_for_pathkeys((paths), (pathkeys), (required_outer), \ + (cost_criterion)) +#endif + + +/* + * get_parameterized_joinrel_size() + */ +#if PG_VERSION_NUM >= 90600 +#define get_parameterized_joinrel_size_compat(root, rel, outer_path, \ + inner_path, sjinfo, \ + restrict_clauses) \ + get_parameterized_joinrel_size((root), (rel), (outer_path), \ + (inner_path), (sjinfo), \ + (restrict_clauses)) +#elif PG_VERSION_NUM >= 90500 +#define get_parameterized_joinrel_size_compat(root, rel, \ + outer_path, \ + inner_path, \ + sjinfo, restrict_clauses) \ + get_parameterized_joinrel_size((root), (rel), \ + (outer_path)->rows, \ + (inner_path)->rows, \ + (sjinfo), (restrict_clauses)) +#endif + + +/* + * get_rel_persistence() + */ +#if PG_VERSION_NUM >= 90500 && PG_VERSION_NUM < 90600 +char get_rel_persistence(Oid relid); +#endif + + +/* + * initial_cost_nestloop() + */ +#if PG_VERSION_NUM >= 100000 || (defined(PGPRO_VERSION) && PG_VERSION_NUM >= 90603) +#define initial_cost_nestloop_compat(root, workspace, jointype, outer_path, \ + inner_path, extra) \ + initial_cost_nestloop((root), (workspace), (jointype), (outer_path), \ + (inner_path), (extra)) +#elif PG_VERSION_NUM >= 90500 +#define initial_cost_nestloop_compat(root, workspace, jointype, outer_path, \ + inner_path, extra) \ + initial_cost_nestloop((root), (workspace), (jointype), (outer_path), \ + (inner_path), (extra)->sjinfo, &(extra)->semifactors) +#endif + + +/* + * InitResultRelInfo() + * + * for v10 set NULL into 'partition_root' argument to specify that result + * relation is not vanilla partition + */ +#if PG_VERSION_NUM >= 100000 +#define InitResultRelInfoCompat(resultRelInfo, resultRelationDesc, \ + resultRelationIndex, instrument_options) \ + InitResultRelInfo((resultRelInfo), (resultRelationDesc), \ + (resultRelationIndex), NULL, (instrument_options)) +#elif PG_VERSION_NUM >= 90500 +#define InitResultRelInfoCompat(resultRelInfo, resultRelationDesc, \ + resultRelationIndex, instrument_options) \ + InitResultRelInfo((resultRelInfo), (resultRelationDesc), \ + (resultRelationIndex), (instrument_options)) +#endif + + +/* + * ItemPointerIndicatesMovedPartitions() + * + * supported since v11, provide a stub for previous versions. + */ +#if PG_VERSION_NUM < 110000 +#define ItemPointerIndicatesMovedPartitions(ctid) ( false ) +#endif + + +/* + * make_restrictinfo() + */ +#if PG_VERSION_NUM >= 100000 +extern List *make_restrictinfos_from_actual_clauses(PlannerInfo *root, + List *clause_list); +#endif + + +/* + * make_result() + */ +#if PG_VERSION_NUM >= 90600 +extern Result *make_result(List *tlist, + Node *resconstantqual, + Plan *subplan); +#define make_result_compat(root, tlist, resconstantqual, subplan) \ + make_result((tlist), (resconstantqual), (subplan)) +#elif PG_VERSION_NUM >= 90500 +#define make_result_compat(root, tlist, resconstantqual, subplan) \ + make_result((root), (tlist), (resconstantqual), (subplan)) +#endif + + +/* + * McxtStatsInternal() + */ +#if PG_VERSION_NUM >= 90600 +void McxtStatsInternal(MemoryContext context, int level, + bool examine_children, + MemoryContextCounters *totals); +#endif + + +/* + * oid_cmp() + */ +#if PG_VERSION_NUM >=90500 && PG_VERSION_NUM < 100000 +extern int oid_cmp(const void *p1, const void *p2); +#endif + + +/* + * parse_analyze() + * + * for v10 cast first arg to RawStmt type + */ +#if PG_VERSION_NUM >= 150000 /* for commit 791b1b71da35 */ +#define parse_analyze_compat(parse_tree, query_string, param_types, nparams, \ + query_env) \ + parse_analyze_fixedparams((RawStmt *) (parse_tree), (query_string), (param_types), \ + (nparams), (query_env)) +#elif PG_VERSION_NUM >= 100000 +#define parse_analyze_compat(parse_tree, query_string, param_types, nparams, \ + query_env) \ + parse_analyze((RawStmt *) (parse_tree), (query_string), (param_types), \ + (nparams), (query_env)) +#elif PG_VERSION_NUM >= 90500 +#define parse_analyze_compat(parse_tree, query_string, param_types, nparams, \ + query_env) \ + parse_analyze((Node *) (parse_tree), (query_string), (param_types), \ + (nparams)) +#endif + + +/* + * pg_analyze_and_rewrite() + * + * for v10 cast first arg to RawStmt type + */ +#if PG_VERSION_NUM >= 150000 /* for commit 791b1b71da35 */ +#define pg_analyze_and_rewrite_compat(parsetree, query_string, param_types, \ + nparams, query_env) \ + pg_analyze_and_rewrite_fixedparams((RawStmt *) (parsetree), (query_string), \ + (param_types), (nparams), (query_env)) +#elif PG_VERSION_NUM >= 100000 +#define pg_analyze_and_rewrite_compat(parsetree, query_string, param_types, \ + nparams, query_env) \ + pg_analyze_and_rewrite((RawStmt *) (parsetree), (query_string), \ + (param_types), (nparams), (query_env)) +#elif PG_VERSION_NUM >= 90500 +#define pg_analyze_and_rewrite_compat(parsetree, query_string, param_types, \ + nparams, query_env) \ + pg_analyze_and_rewrite((Node *) (parsetree), (query_string), \ + (param_types), (nparams)) +#endif + + +/* + * ProcessUtility() + * + * for v10 set NULL into 'queryEnv' argument + */ +#if PG_VERSION_NUM >= 140000 +#define ProcessUtilityCompat(parsetree, queryString, context, params, dest, \ + completionTag) \ + do { \ + PlannedStmt *stmt = makeNode(PlannedStmt); \ + stmt->commandType = CMD_UTILITY; \ + stmt->canSetTag = true; \ + stmt->utilityStmt = (parsetree); \ + stmt->stmt_location = -1; \ + stmt->stmt_len = 0; \ + ProcessUtility(stmt, (queryString), false, (context), (params), NULL, \ + (dest), (completionTag)); \ + } while (0) +#elif PG_VERSION_NUM >= 100000 +#define ProcessUtilityCompat(parsetree, queryString, context, params, dest, \ + completionTag) \ + do { \ + PlannedStmt *stmt = makeNode(PlannedStmt); \ + stmt->commandType = CMD_UTILITY; \ + stmt->canSetTag = true; \ + stmt->utilityStmt = (parsetree); \ + stmt->stmt_location = -1; \ + stmt->stmt_len = 0; \ + ProcessUtility(stmt, (queryString), (context), (params), NULL, \ + (dest), (completionTag)); \ + } while (0) +#elif PG_VERSION_NUM >= 90500 +#define ProcessUtilityCompat(parsetree, queryString, context, params, dest, \ + completionTag) \ + ProcessUtility((parsetree), (queryString), (context), (params), \ + (dest), (completionTag)) +#endif + + +/* + * pull_var_clause() + */ +#if PG_VERSION_NUM >= 90600 +#define pull_var_clause_compat(node, aggbehavior, phbehavior) \ + pull_var_clause((node), (aggbehavior) | (phbehavior)) +#elif PG_VERSION_NUM >= 90500 +#define pull_var_clause_compat(node, aggbehavior, phbehavior) \ + pull_var_clause((node), (aggbehavior), (phbehavior)) +#endif + + +/* + * set_dummy_rel_pathlist() + */ +#if PG_VERSION_NUM >= 90500 && PG_VERSION_NUM < 90600 +void set_dummy_rel_pathlist(RelOptInfo *rel); +#endif + + +/* + * set_rel_consider_parallel() + */ +#if PG_VERSION_NUM >= 90600 +extern void set_rel_consider_parallel(PlannerInfo *root, + RelOptInfo *rel, + RangeTblEntry *rte); +#define set_rel_consider_parallel_compat(root, rel, rte) \ + set_rel_consider_parallel((root), (rel), (rte)) +#endif + + +/* + * tlist_member_ignore_relabel() + * + * in compat version the type of first argument is (Expr *) + */ +#if PG_VERSION_NUM >= 100000 +#if PG_VERSION_NUM >= 140000 /* function removed in + * 375398244168add84a884347625d14581a421e71 */ +extern TargetEntry *tlist_member_ignore_relabel(Expr *node, List *targetlist); +#endif +#define tlist_member_ignore_relabel_compat(expr, targetlist) \ + tlist_member_ignore_relabel((expr), (targetlist)) +#elif PG_VERSION_NUM >= 90500 +#define tlist_member_ignore_relabel_compat(expr, targetlist) \ + tlist_member_ignore_relabel((Node *) (expr), (targetlist)) +#endif + + +/* + * convert_tuples_by_name_map() + */ +#if (PG_VERSION_NUM >= 90500 && PG_VERSION_NUM <= 90505) || \ + (PG_VERSION_NUM >= 90600 && PG_VERSION_NUM <= 90601) +extern AttrNumber *convert_tuples_by_name_map(TupleDesc indesc, + TupleDesc outdesc, + const char *msg); +#else +#include "access/tupconvert.h" +#endif + +/* + * ExecBRUpdateTriggers() + */ +#if PG_VERSION_NUM >= 160000 +#define ExecBRUpdateTriggersCompat(estate, epqstate, relinfo, \ + tupleid, fdw_trigtuple, newslot) \ + ExecBRUpdateTriggers((estate), (epqstate), (relinfo), (tupleid), \ + (fdw_trigtuple), (newslot), NULL, NULL) +#elif PG_VERSION_NUM >= 150000 /* for commit 7103ebb7aae8 */ +#define ExecBRUpdateTriggersCompat(estate, epqstate, relinfo, \ + tupleid, fdw_trigtuple, newslot) \ + ExecBRUpdateTriggers((estate), (epqstate), (relinfo), (tupleid), \ + (fdw_trigtuple), (newslot), NULL) +#else +#define ExecBRUpdateTriggersCompat(estate, epqstate, relinfo, \ + tupleid, fdw_trigtuple, newslot) \ + ExecBRUpdateTriggers((estate), (epqstate), (relinfo), (tupleid), \ + (fdw_trigtuple), (newslot)) +#endif + +/* + * ExecARInsertTriggers() + */ +#if PG_VERSION_NUM >= 100000 +#define ExecARInsertTriggersCompat(estate, relinfo, trigtuple, \ + recheck_indexes, transition_capture) \ + ExecARInsertTriggers((estate), (relinfo), (trigtuple), \ + (recheck_indexes), (transition_capture)) +#elif PG_VERSION_NUM >= 90500 +#define ExecARInsertTriggersCompat(estate, relinfo, trigtuple, \ + recheck_indexes, transition_capture) \ + ExecARInsertTriggers((estate), (relinfo), (trigtuple), (recheck_indexes)) +#endif + + +/* + * ExecBRDeleteTriggers() + */ +#if PG_VERSION_NUM >= 160000 +#define ExecBRDeleteTriggersCompat(estate, epqstate, relinfo, tupleid, \ + fdw_trigtuple, epqslot) \ + ExecBRDeleteTriggers((estate), (epqstate), (relinfo), (tupleid), \ + (fdw_trigtuple), (epqslot), NULL, NULL) +#elif PG_VERSION_NUM >= 110000 +#define ExecBRDeleteTriggersCompat(estate, epqstate, relinfo, tupleid, \ + fdw_trigtuple, epqslot) \ + ExecBRDeleteTriggers((estate), (epqstate), (relinfo), (tupleid), \ + (fdw_trigtuple), (epqslot)) +#else +#define ExecBRDeleteTriggersCompat(estate, epqstate, relinfo, tupleid, \ + fdw_trigtuple, epqslot) \ + ExecBRDeleteTriggers((estate), (epqstate), (relinfo), (tupleid), \ + (fdw_trigtuple)) +#endif + + +/* + * ExecARDeleteTriggers() + */ +#if PG_VERSION_NUM >= 150000 /* for commit ba9a7e392171 */ +#define ExecARDeleteTriggersCompat(estate, relinfo, tupleid, \ + fdw_trigtuple, transition_capture) \ + ExecARDeleteTriggers((estate), (relinfo), (tupleid), \ + (fdw_trigtuple), (transition_capture), false) +#elif PG_VERSION_NUM >= 100000 +#define ExecARDeleteTriggersCompat(estate, relinfo, tupleid, \ + fdw_trigtuple, transition_capture) \ + ExecARDeleteTriggers((estate), (relinfo), (tupleid), \ + (fdw_trigtuple), (transition_capture)) +#elif PG_VERSION_NUM >= 90500 +#define ExecARDeleteTriggersCompat(estate, relinfo, tupleid, \ + fdw_trigtuple, transition_capture) \ + ExecARDeleteTriggers((estate), (relinfo), (tupleid), (fdw_trigtuple)) +#endif + + +/* + * ExecASInsertTriggers() + */ +#if PG_VERSION_NUM >= 100000 +#define ExecASInsertTriggersCompat(estate, relinfo, transition_capture) \ + ExecASInsertTriggers((estate), (relinfo), (transition_capture)) +#elif PG_VERSION_NUM >= 90500 +#define ExecASInsertTriggersCompat(estate, relinfo, transition_capture) \ + ExecASInsertTriggers((estate), (relinfo)) +#endif + + +/* + * map_variable_attnos() + */ +#if PG_VERSION_NUM >= 100000 +#define map_variable_attnos_compat(node, varno, \ + sublevels_up, map, map_len, \ + to_rowtype, found_wholerow) \ + map_variable_attnos((node), (varno), \ + (sublevels_up), (map), (map_len), \ + (to_rowtype), (found_wholerow)) +#elif PG_VERSION_NUM >= 90500 +#define map_variable_attnos_compat(node, varno, \ + sublevels_up, map, map_len, \ + to_rowtype, found_wholerow) \ + map_variable_attnos((node), (varno), \ + (sublevels_up), (map), (map_len), \ + (found_wholerow)) +#endif + +#ifndef TupleDescAttr +#define TupleDescAttr(tupdesc, i) ((tupdesc)->attrs[(i)]) +#endif + + +/* + * RegisterCustomScanMethods() + */ +#if PG_VERSION_NUM < 90600 +#define RegisterCustomScanMethods(methods) +#endif + +/* + * MakeTupleTableSlot() + */ +#if PG_VERSION_NUM >= 120000 +#define MakeTupleTableSlotCompat(tts_ops) \ + MakeTupleTableSlot(NULL, (tts_ops)) +#elif PG_VERSION_NUM >= 110000 +#define MakeTupleTableSlotCompat(tts_ops) \ + MakeTupleTableSlot(NULL) +#else +#define MakeTupleTableSlotCompat(tts_ops) \ + MakeTupleTableSlot() +#endif + +/* + * BackgroundWorkerInitializeConnectionByOid() + */ +#if PG_VERSION_NUM >= 110000 +#define BackgroundWorkerInitializeConnectionByOidCompat(dboid, useroid) \ + BackgroundWorkerInitializeConnectionByOid((dboid), (useroid), 0) +#else +#define BackgroundWorkerInitializeConnectionByOidCompat(dboid, useroid) \ + BackgroundWorkerInitializeConnectionByOid((dboid), (useroid)) +#endif + +/* + * heap_delete() + */ +#if PG_VERSION_NUM >= 110000 +#define heap_delete_compat(relation, tid, cid, crosscheck, \ + wait, hufd, changing_part) \ + heap_delete((relation), (tid), (cid), (crosscheck), \ + (wait), (hufd), (changing_part)) +#else +#define heap_delete_compat(relation, tid, cid, crosscheck, \ + wait, hufd, changing_part) \ + heap_delete((relation), (tid), (cid), (crosscheck), \ + (wait), (hufd)) +#endif + +/* + * compute_parallel_worker() + */ +#if PG_VERSION_NUM >= 110000 +#define compute_parallel_worker_compat(rel, heap_pages, index_pages) \ + compute_parallel_worker((rel), (heap_pages), (index_pages), \ + max_parallel_workers_per_gather) +#elif PG_VERSION_NUM >= 100000 +#define compute_parallel_worker_compat(rel, heap_pages, index_pages) \ + compute_parallel_worker((rel), (heap_pages), (index_pages)) +#endif + + +/* + * generate_gather_paths() + */ +#if PG_VERSION_NUM >= 110000 +#define generate_gather_paths_compat(root, rel) \ + generate_gather_paths((root), (rel), false) +#elif PG_VERSION_NUM >= 90600 +#define generate_gather_paths_compat(root, rel) \ + generate_gather_paths((root), (rel)) +#else +#define generate_gather_paths_compat(root, rel) +#endif + + +/* + * find_childrel_appendrelinfo() + */ +#if PG_VERSION_NUM >= 110000 +#define find_childrel_appendrelinfo_compat(root, rel) \ + ((root)->append_rel_array[(rel)->relid]) +#else +#define find_childrel_appendrelinfo_compat(root, rel) \ + find_childrel_appendrelinfo((root), (rel)) +#endif + +/* + * HeapTupleGetXmin() + * Vanilla PostgreSQL has HeaptTupleHeaderGetXmin, but for 64-bit xid + * we need access to entire tuple, not just its header. + */ +#ifdef XID_IS_64BIT +#define HeapTupleGetXminCompat(htup) HeapTupleGetXmin(htup) +#else +#define HeapTupleGetXminCompat(htup) HeapTupleHeaderGetXmin((htup)->t_data) +#endif + +/* + * is_andclause + */ +#if PG_VERSION_NUM >= 120000 +#define is_andclause_compat(clause) is_andclause(clause) +#else +#define is_andclause_compat(clause) and_clause(clause) +#endif + +/* + * GetDefaultTablespace + */ +#if PG_VERSION_NUM >= 120000 +#define GetDefaultTablespaceCompat(relpersistence, partitioned) \ + GetDefaultTablespace((relpersistence), (partitioned)) +#else +#define GetDefaultTablespaceCompat(relpersistence, partitioned) \ + GetDefaultTablespace((relpersistence)) +#endif + +/* + * CreateTemplateTupleDesc + */ +#if PG_VERSION_NUM >= 120000 +#define CreateTemplateTupleDescCompat(natts, hasoid) CreateTemplateTupleDesc(natts) +#else +#define CreateTemplateTupleDescCompat(natts, hasoid) CreateTemplateTupleDesc((natts), (hasoid)) +#endif + +/* + * addRangeTableEntryForRelation + */ +#if PG_VERSION_NUM >= 120000 +#define addRangeTableEntryForRelationCompat(pstate, rel, lockmode, alias, inh, inFromCl) \ + addRangeTableEntryForRelation((pstate), (rel), (lockmode), (alias), (inh), (inFromCl)) +#else +#define addRangeTableEntryForRelationCompat(pstate, rel, lockmode, alias, inh, inFromCl) \ + addRangeTableEntryForRelation((pstate), (rel), (alias), (inh), (inFromCl)) +#endif + +/* + * nextCopyFrom (WITH_OIDS removed) + */ +#if PG_VERSION_NUM >= 120000 +#define NextCopyFromCompat(cstate, econtext, values, nulls, tupleOid) \ + NextCopyFrom((cstate), (econtext), (values), (nulls)) +#else +#define NextCopyFromCompat(cstate, econtext, values, nulls, tupleOid) \ + NextCopyFrom((cstate), (econtext), (values), (nulls), (tupleOid)) +#endif + +/* + * ExecInsertIndexTuples. Since 12 slot contains tupleid. + * Since 14: new fields "resultRelInfo", "update". + * Since 16: new bool field "onlySummarizing". + */ +#if PG_VERSION_NUM >= 160000 +#define ExecInsertIndexTuplesCompat(resultRelInfo, slot, tupleid, estate, update, noDupError, specConflict, arbiterIndexes, onlySummarizing) \ + ExecInsertIndexTuples((resultRelInfo), (slot), (estate), (update), (noDupError), (specConflict), (arbiterIndexes), (onlySummarizing)) +#elif PG_VERSION_NUM >= 140000 +#define ExecInsertIndexTuplesCompat(resultRelInfo, slot, tupleid, estate, update, noDupError, specConflict, arbiterIndexes, onlySummarizing) \ + ExecInsertIndexTuples((resultRelInfo), (slot), (estate), (update), (noDupError), (specConflict), (arbiterIndexes)) +#elif PG_VERSION_NUM >= 120000 +#define ExecInsertIndexTuplesCompat(resultRelInfo, slot, tupleid, estate, update, noDupError, specConflict, arbiterIndexes, onlySummarizing) \ + ExecInsertIndexTuples((slot), (estate), (noDupError), (specConflict), (arbiterIndexes)) +#else +#define ExecInsertIndexTuplesCompat(resultRelInfo, slot, tupleid, estate, update, noDupError, specConflict, arbiterIndexes, onlySummarizing) \ + ExecInsertIndexTuples((slot), (tupleid), (estate), (noDupError), (specConflict), (arbiterIndexes)) +#endif + +/* + * RenameRelationInternal + */ +#if PG_VERSION_NUM >= 120000 +#define RenameRelationInternalCompat(myrelid, newname, is_internal, is_index) \ + RenameRelationInternal((myrelid), (newname), (is_internal), (is_index)) +#else +#define RenameRelationInternalCompat(myrelid, newname, is_internal, is_index) \ + RenameRelationInternal((myrelid), (newname), (is_internal)) +#endif + +/* + * getrelid + */ +#if PG_VERSION_NUM >= 120000 +#define getrelid(rangeindex,rangetable) \ + (rt_fetch(rangeindex, rangetable)->relid) +#endif + +/* + * AddRelationNewConstraints + */ +#if PG_VERSION_NUM >= 120000 +#define AddRelationNewConstraintsCompat(rel, newColDefaults, newConstrains, allow_merge, is_local, is_internal) \ + AddRelationNewConstraints((rel), (newColDefaults), (newConstrains), (allow_merge), (is_local), (is_internal), NULL) +#else +#define AddRelationNewConstraintsCompat(rel, newColDefaults, newConstrains, allow_merge, is_local, is_internal) \ + AddRelationNewConstraints((rel), (newColDefaults), (newConstrains), (allow_merge), (is_local), (is_internal)) +#endif + +/* + * [PGPRO-3725] Since 11.7 and 12.1 in pgpro standard and ee PGPRO-2843 + * appeared, changing the signature, wow. There is no numeric pgpro edition + * macro (and never will be, for old versions), so distinguish via macro added + * by the commit. + */ +#if defined(QTW_DONT_COPY_DEFAULT) && (PG_VERSION_NUM < 140000) +#define expression_tree_mutator_compat(node, mutator, context) \ + expression_tree_mutator((node), (mutator), (context), 0) +#else +#define expression_tree_mutator_compat(node, mutator, context) \ + expression_tree_mutator((node), (mutator), (context)) +#endif + +/* + * stringToQualifiedNameList + */ +#if PG_VERSION_NUM >= 160000 +#define stringToQualifiedNameListCompat(string) \ + stringToQualifiedNameList((string), NULL) +#else +#define stringToQualifiedNameListCompat(string) \ + stringToQualifiedNameList((string)) +#endif + +/* + * ------------- + * Common code + * ------------- + */ +#if PG_VERSION_NUM >= 120000 +#define ExecInitExtraTupleSlotCompat(estate, tdesc, tts_ops) \ + ExecInitExtraTupleSlot((estate), (tdesc), (tts_ops)); +#else +#define ExecInitExtraTupleSlotCompat(estate, tdesc, tts_ops) \ + ExecInitExtraTupleSlotCompatHorse((estate), (tdesc)) +static inline TupleTableSlot * +ExecInitExtraTupleSlotCompatHorse(EState *s, TupleDesc t) +{ +#if PG_VERSION_NUM >= 110000 + return ExecInitExtraTupleSlot(s, t); +#else + TupleTableSlot *res = ExecInitExtraTupleSlot(s); + + if (t) + ExecSetSlotDescriptor(res, t); + + return res; +#endif +} +#endif + +/* See ExecEvalParamExtern() */ +static inline ParamExternData * +CustomEvalParamExternCompat(Param *param, + ParamListInfo params, + ParamExternData *prmdata) +{ + ParamExternData *prm; + +#if PG_VERSION_NUM >= 110000 + if (params->paramFetch != NULL) + prm = params->paramFetch(params, param->paramid, false, prmdata); + else + prm = ¶ms->params[param->paramid - 1]; +#else + prm = ¶ms->params[param->paramid - 1]; + + if (!OidIsValid(prm->ptype) && params->paramFetch != NULL) + params->paramFetch(params, param->paramid); +#endif + + return prm; +} + +void set_append_rel_size_compat(PlannerInfo *root, RelOptInfo *rel, Index rti); + +/* + * lnext() + * In >=13 list implementation was reworked (1cff1b95ab6) + */ +#if PG_VERSION_NUM >= 130000 +#define lnext_compat(l, lc) lnext((l), (lc)) +#else +#define lnext_compat(l, lc) lnext((lc)) +#endif + +/* + * heap_open() + * heap_openrv() + * heap_close() + * In >=13 heap_* was replaced with table_* (e0c4ec07284) + */ +#if PG_VERSION_NUM >= 130000 +#define heap_open_compat(r, l) table_open((r), (l)) +#define heap_openrv_compat(r, l) table_openrv((r), (l)) +#define heap_close_compat(r, l) table_close((r), (l)) +#else +#define heap_open_compat(r, l) heap_open((r), (l)) +#define heap_openrv_compat(r, l) heap_openrv((r), (l)) +#define heap_close_compat(r, l) heap_close((r), (l)) +#endif + +/* + * convert_tuples_by_name() + * In >=13 msg parameter in convert_tuples_by_name function was removed (fe66125974c) + */ +#if PG_VERSION_NUM >= 130000 +#define convert_tuples_by_name_compat(i, o, m) convert_tuples_by_name((i), (o)) +#else +#define convert_tuples_by_name_compat(i, o, m) convert_tuples_by_name((i), (o), (m)) +#endif + +/* + * raw_parser() + * In 14 new argument was added (844fe9f159a) + */ +#if PG_VERSION_NUM >= 140000 +#define raw_parser_compat(s) raw_parser((s), RAW_PARSE_DEFAULT) +#else +#define raw_parser_compat(s) raw_parser(s) +#endif + +/* + * make_restrictinfo() + * In >=16 4th, 5th and 9th arguments were added (991a3df227e) + * In >=16 3th and 9th arguments were removed (b448f1c8d83) + * In >=14 new argument was added (55dc86eca70) + */ +#if PG_VERSION_NUM >= 160000 +#define make_restrictinfo_compat(r, c, ipd, od, p, sl, rr, or, nr) make_restrictinfo((r), (c), (ipd), false, false, (p), (sl), (rr), NULL, (or)) +#else +#if PG_VERSION_NUM >= 140000 +#define make_restrictinfo_compat(r, c, ipd, od, p, sl, rr, or, nr) make_restrictinfo((r), (c), (ipd), (od), (p), (sl), (rr), (or), (nr)) +#else +#define make_restrictinfo_compat(r, c, ipd, od, p, sl, rr, or, nr) make_restrictinfo((c), (ipd), (od), (p), (sl), (rr), (or), (nr)) +#endif /* #if PG_VERSION_NUM >= 140000 */ +#endif /* #if PG_VERSION_NUM >= 160000 */ + +/* + * pull_varnos() + * In >=14 new argument was added (55dc86eca70) + */ +#if PG_VERSION_NUM >= 140000 +#define pull_varnos_compat(r, n) pull_varnos((r), (n)) +#else +#define pull_varnos_compat(r, n) pull_varnos(n) +#endif + +/* + * build_expression_pathkey() + * In >=16 argument was removed (b448f1c8d83) + */ +#if PG_VERSION_NUM >= 160000 +#define build_expression_pathkey_compat(root, expr, nullable_relids, opno, rel, create_it) build_expression_pathkey(root, expr, opno, rel, create_it) +#else +#define build_expression_pathkey_compat(root, expr, nullable_relids, opno, rel, create_it) build_expression_pathkey(root, expr, nullable_relids, opno, rel, create_it) +#endif + +/* + * EvalPlanQualInit() + * In >=16 argument was added (70b42f27902) + */ +#if PG_VERSION_NUM >= 160000 +#define EvalPlanQualInit_compat(epqstate, parentestate, subplan, auxrowmarks, epqParam) EvalPlanQualInit(epqstate, parentestate, subplan, auxrowmarks, epqParam, NIL) +#else +#define EvalPlanQualInit_compat(epqstate, parentestate, subplan, auxrowmarks, epqParam) EvalPlanQualInit(epqstate, parentestate, subplan, auxrowmarks, epqParam) +#endif + +#endif /* PG_COMPAT_H */ diff --git a/src/include/compat/rowmarks_fix.h b/src/include/compat/rowmarks_fix.h new file mode 100644 index 00000000..c94504c3 --- /dev/null +++ b/src/include/compat/rowmarks_fix.h @@ -0,0 +1,61 @@ +/* ------------------------------------------------------------------------ + * + * rowmarks_fix.h + * Hack incorrect RowMark generation due to unset 'RTE->inh' flag + * NOTE: this code is only useful for vanilla + * + * Copyright (c) 2017, Postgres Professional + * + * ------------------------------------------------------------------------ + */ + +#ifndef ROWMARKS_FIX_H +#define ROWMARKS_FIX_H + +#include "compat/debug_compat_features.h" + +#include "postgres.h" +#include "nodes/parsenodes.h" +#include "nodes/plannodes.h" +#if PG_VERSION_NUM < 120000 +#include "nodes/relation.h" +#else +#include "optimizer/optimizer.h" +#endif + + +#if PG_VERSION_NUM >= 90600 + +void append_tle_for_rowmark(PlannerInfo *root, PlanRowMark *rc); + +#else + +/* + * Starting from 9.6, it's possible to append junk + * tableoid columns using the PlannerInfo->processed_tlist. + * This is absolutely crucial for UPDATE and DELETE queries, + * so we had to add some special fixes for 9.5: + * + * 1) disable dangerous UPDATE & DELETE optimizations. + * 2) disable optimizations for SELECT .. FOR UPDATE etc. + */ +#define LEGACY_ROWMARKS_95 + +#define append_tle_for_rowmark(root, rc) ( (void) true ) + +#endif + +/* + * add_vars_to_targetlist() + * In >=16 last argument was removed (b3ff6c742f6c) + */ +#if PG_VERSION_NUM >= 160000 +#define add_vars_to_targetlist_compat(root, vars, where_needed) \ + add_vars_to_targetlist((root), (vars), (where_needed)); +#else +#define add_vars_to_targetlist_compat(root, vars, where_needed) \ + add_vars_to_targetlist((root), (vars), (where_needed), true); +#endif + + +#endif /* ROWMARKS_FIX_H */ diff --git a/src/include/declarative.h b/src/include/declarative.h new file mode 100644 index 00000000..ee4ea40b --- /dev/null +++ b/src/include/declarative.h @@ -0,0 +1,16 @@ +#ifndef DECLARATIVE_H +#define DECLARATIVE_H + +#include "postgres.h" +#include "nodes/nodes.h" +#include "nodes/parsenodes.h" + +void modify_declarative_partitioning_query(Query *query); +bool is_pathman_related_partitioning_cmd(Node *parsetree, Oid *parent_relid); + +/* actual actions */ +void handle_attach_partition(Oid parent_relid, AlterTableCmd *cmd); +void handle_detach_partition(AlterTableCmd *cmd); +void handle_create_partition_of(Oid parent_relid, CreateStmt *stmt); + +#endif /* DECLARATIVE_H */ diff --git a/src/include/hooks.h b/src/include/hooks.h new file mode 100644 index 00000000..4d426f5a --- /dev/null +++ b/src/include/hooks.h @@ -0,0 +1,122 @@ +/* ------------------------------------------------------------------------ + * + * hooks.h + * prototypes of rel_pathlist and join_pathlist hooks + * + * Copyright (c) 2016-2020, Postgres Professional + * + * ------------------------------------------------------------------------ + */ + +#ifndef PATHMAN_HOOKS_H +#define PATHMAN_HOOKS_H + + +#include "postgres.h" +#include "executor/executor.h" +#include "optimizer/planner.h" +#include "optimizer/paths.h" +#include "parser/analyze.h" +#include "storage/ipc.h" +#include "tcop/utility.h" + + +extern set_join_pathlist_hook_type pathman_set_join_pathlist_next; +extern set_rel_pathlist_hook_type pathman_set_rel_pathlist_hook_next; +extern planner_hook_type pathman_planner_hook_next; +extern post_parse_analyze_hook_type pathman_post_parse_analyze_hook_next; +extern shmem_startup_hook_type pathman_shmem_startup_hook_next; +extern ProcessUtility_hook_type pathman_process_utility_hook_next; +extern ExecutorRun_hook_type pathman_executor_run_hook_next; +extern ExecutorStart_hook_type pathman_executor_start_hook_prev; + + +void pathman_join_pathlist_hook(PlannerInfo *root, + RelOptInfo *joinrel, + RelOptInfo *outerrel, + RelOptInfo *innerrel, + JoinType jointype, + JoinPathExtraData *extra); + +void pathman_rel_pathlist_hook(PlannerInfo *root, + RelOptInfo *rel, + Index rti, + RangeTblEntry *rte); + +void pathman_enable_assign_hook(bool newval, void *extra); +bool pathman_enable_check_hook(bool *newval, void **extra, GucSource source); + +PlannedStmt * pathman_planner_hook(Query *parse, +#if PG_VERSION_NUM >= 130000 + const char *query_string, +#endif + int cursorOptions, + ParamListInfo boundParams); + +#if PG_VERSION_NUM >= 140000 +void pathman_post_parse_analyze_hook(ParseState *pstate, + Query *query, + JumbleState *jstate); +#else +void pathman_post_parse_analyze_hook(ParseState *pstate, + Query *query); +#endif + +void pathman_shmem_startup_hook(void); + +void pathman_relcache_hook(Datum arg, Oid relid); + +#if PG_VERSION_NUM >= 140000 +void pathman_process_utility_hook(PlannedStmt *pstmt, + const char *queryString, + bool readOnlyTree, + ProcessUtilityContext context, + ParamListInfo params, + QueryEnvironment *queryEnv, + DestReceiver *dest, + QueryCompletion *qc); +#elif PG_VERSION_NUM >= 130000 +void pathman_process_utility_hook(PlannedStmt *pstmt, + const char *queryString, + ProcessUtilityContext context, + ParamListInfo params, + QueryEnvironment *queryEnv, + DestReceiver *dest, + QueryCompletion *qc); +#elif PG_VERSION_NUM >= 100000 +void pathman_process_utility_hook(PlannedStmt *pstmt, + const char *queryString, + ProcessUtilityContext context, + ParamListInfo params, + QueryEnvironment *queryEnv, + DestReceiver *dest, + char *completionTag); +#else +void pathman_process_utility_hook(Node *parsetree, + const char *queryString, + ProcessUtilityContext context, + ParamListInfo params, + DestReceiver *dest, + char *completionTag); +#endif + +#if PG_VERSION_NUM >= 90600 +typedef uint64 ExecutorRun_CountArgType; +#else +typedef long ExecutorRun_CountArgType; +#endif + +#if PG_VERSION_NUM >= 100000 +void pathman_executor_hook(QueryDesc *queryDesc, + ScanDirection direction, + ExecutorRun_CountArgType count, + bool execute_once); +#else +void pathman_executor_hook(QueryDesc *queryDesc, + ScanDirection direction, + ExecutorRun_CountArgType count); +#endif + +void pathman_executor_start_hook(QueryDesc *queryDescc, + int eflags); +#endif /* PATHMAN_HOOKS_H */ diff --git a/src/include/init.h b/src/include/init.h new file mode 100644 index 00000000..58335c46 --- /dev/null +++ b/src/include/init.h @@ -0,0 +1,242 @@ +/* ------------------------------------------------------------------------ + * + * init.h + * Initialization functions + * + * Copyright (c) 2015-2016, Postgres Professional + * + * ------------------------------------------------------------------------ + */ + +#ifndef PATHMAN_INIT_H +#define PATHMAN_INIT_H + + +#include "relation_info.h" + +#include "postgres.h" +#include "storage/lmgr.h" +#include "utils/guc.h" +#include "utils/hsearch.h" +#include "utils/snapshot.h" + + +/* Help user in case of emergency */ +#define INIT_ERROR_HINT "pg_pathman will be disabled to allow you to resolve this issue" + +/* Initial size of 'partitioned_rels' table */ +#define PART_RELS_SIZE 10 +#define CHILD_FACTOR 500 + + +/* + * pg_pathman's initialization state structure. + */ +typedef struct +{ + bool pg_pathman_enable; /* GUC variable implementation */ + bool auto_partition; /* GUC variable for auto partition propagation */ + bool override_copy; /* override COPY TO/FROM */ + bool initialization_needed; /* do we need to perform init? */ +} PathmanInitState; + + +/* Check that this is a temporary memory context that's going to be destroyed */ +#define AssertTemporaryContext() \ + do { \ + Assert(CurrentMemoryContext != TopMemoryContext); \ + Assert(CurrentMemoryContext != TopPathmanContext); \ + Assert(CurrentMemoryContext != PathmanParentsCacheContext); \ + Assert(CurrentMemoryContext != PathmanStatusCacheContext); \ + Assert(CurrentMemoryContext != PathmanBoundsCacheContext); \ + } while (0) + + +#define PATHMAN_MCXT_COUNT 4 +extern MemoryContext TopPathmanContext; +extern MemoryContext PathmanParentsCacheContext; +extern MemoryContext PathmanStatusCacheContext; +extern MemoryContext PathmanBoundsCacheContext; + +extern HTAB *parents_cache; +extern HTAB *status_cache; +extern HTAB *bounds_cache; + +/* pg_pathman's initialization state */ +extern PathmanInitState pathman_init_state; + +/* pg_pathman's hooks state */ +extern bool pathman_hooks_enabled; + + +#define PATHMAN_TOP_CONTEXT "maintenance" +#define PATHMAN_PARENTS_CACHE "partition parents cache" +#define PATHMAN_STATUS_CACHE "partition status cache" +#define PATHMAN_BOUNDS_CACHE "partition bounds cache" + + +/* Transform pg_pathman's memory context into simple name */ +static inline const char * +simplify_mcxt_name(MemoryContext mcxt) +{ + if (mcxt == TopPathmanContext) + return PATHMAN_TOP_CONTEXT; + + else if (mcxt == PathmanParentsCacheContext) + return PATHMAN_PARENTS_CACHE; + + else if (mcxt == PathmanStatusCacheContext) + return PATHMAN_STATUS_CACHE; + + else if (mcxt == PathmanBoundsCacheContext) + return PATHMAN_BOUNDS_CACHE; + + else elog(ERROR, "unknown memory context"); + + return NULL; /* keep compiler quiet */ +} + + +/* + * Check if pg_pathman is initialized. + */ +#define IsPathmanInitialized() ( !pathman_init_state.initialization_needed ) + +/* + * Check if pg_pathman is enabled. + */ +#define IsPathmanEnabled() ( pathman_init_state.pg_pathman_enable ) + +/* + * Check if pg_pathman is initialized & enabled. + */ +#define IsPathmanReady() ( IsPathmanInitialized() && IsPathmanEnabled() ) + +/* + * Should we override COPY stmt handling? + */ +#define IsOverrideCopyEnabled() ( pathman_init_state.override_copy ) + +/* + * Check if auto partition creation is enabled. + */ +#define IsAutoPartitionEnabled() ( pathman_init_state.auto_partition ) + +/* + * Enable/disable auto partition propagation. Note that this only works if + * partitioned relation supports this. See enable_auto() and disable_auto() + * functions. + */ +#define SetAutoPartitionEnabled(value) \ + do { \ + Assert((value) == true || (value) == false); \ + pathman_init_state.auto_partition = (value); \ + } while (0) + +/* + * Emergency disable mechanism. + */ +#define DisablePathman() \ + do { \ + pathman_init_state.pg_pathman_enable = false; \ + pathman_init_state.auto_partition = false; \ + pathman_init_state.override_copy = false; \ + unload_config(); \ + } while (0) + + +/* Default column values for PATHMAN_CONFIG_PARAMS */ +#define DEFAULT_PATHMAN_ENABLE_PARENT false +#define DEFAULT_PATHMAN_AUTO true +#define DEFAULT_PATHMAN_INIT_CALLBACK InvalidOid +#define DEFAULT_PATHMAN_SPAWN_USING_BGW false + +/* Other default values (for GUCs etc) */ +#define DEFAULT_PATHMAN_ENABLE true +#define DEFAULT_PATHMAN_OVERRIDE_COPY true + + +/* Lowest version of Pl/PgSQL frontend compatible with internals */ +#define LOWEST_COMPATIBLE_FRONT "1.5.0" + +/* Current version of native C library */ +#define CURRENT_LIB_VERSION "1.5.12" + + +void *pathman_cache_search_relid(HTAB *cache_table, + Oid relid, + HASHACTION action, + bool *found); + +/* + * Save and restore PathmanInitState. + */ +void save_pathman_init_state(volatile PathmanInitState *temp_init_state); +void restore_pathman_init_state(const volatile PathmanInitState *temp_init_state); + +/* + * Create main GUC variables. + */ +void init_main_pathman_toggles(void); + +/* + * Shared & local config. + */ +Size estimate_pathman_shmem_size(void); +bool load_config(void); +void unload_config(void); + + +/* Result of find_inheritance_children_array() */ +typedef enum +{ + FCS_NO_CHILDREN = 0, /* could not find any children (GOOD) */ + FCS_COULD_NOT_LOCK, /* could not lock one of the children */ + FCS_FOUND /* found some children (GOOD) */ +} find_children_status; + +find_children_status find_inheritance_children_array(Oid parentrelId, + LOCKMODE lockmode, + bool nowait, + uint32 *children_size, + Oid **children); + +char *build_check_constraint_name_relid_internal(Oid relid); +char *build_check_constraint_name_relname_internal(const char *relname); + +char *build_sequence_name_relid_internal(Oid relid); +char *build_sequence_name_relname_internal(const char *relname); + +char *build_update_trigger_name_internal(Oid relid); +char *build_update_trigger_func_name_internal(Oid relid); + +bool pathman_config_contains_relation(Oid relid, + Datum *values, + bool *isnull, + TransactionId *xmin, + ItemPointerData *iptr); + +void pathman_config_invalidate_parsed_expression(Oid relid); + +void pathman_config_refresh_parsed_expression(Oid relid, + Datum *values, + bool *isnull, + ItemPointer iptr); + + +bool read_pathman_params(Oid relid, + Datum *values, + bool *isnull); + + +bool validate_range_constraint(const Expr *expr, + const PartRelationInfo *prel, + Datum *lower, Datum *upper, + bool *lower_null, bool *upper_null); + +bool validate_hash_constraint(const Expr *expr, + const PartRelationInfo *prel, + uint32 *part_idx); + + +#endif /* PATHMAN_INIT_H */ diff --git a/src/nodes_common.h b/src/include/nodes_common.h similarity index 87% rename from src/nodes_common.h rename to src/include/nodes_common.h index ef3cb3df..d41a453d 100644 --- a/src/nodes_common.h +++ b/src/include/nodes_common.h @@ -11,12 +11,17 @@ #ifndef NODES_COMMON_H #define NODES_COMMON_H + #include "relation_info.h" #include "postgres.h" #include "commands/explain.h" #include "optimizer/planner.h" +#if PG_VERSION_NUM >= 90600 +#include "nodes/extensible.h" +#endif + /* * Common structure for storing selected @@ -49,7 +54,7 @@ typedef ChildScanCommonData *ChildScanCommon; /* * Destroy exhausted plan states */ -inline static void +static inline void clear_plan_states(CustomScanState *scan_state) { ListCell *state_cell; @@ -60,6 +65,10 @@ clear_plan_states(CustomScanState *scan_state) } } +List * get_partitioning_clauses(List *restrictinfo_list, + const PartRelationInfo *prel, + Index partitioned_rel); + Oid * get_partition_oids(List *ranges, int *n, const PartRelationInfo *prel, bool include_parent); @@ -89,7 +98,10 @@ void end_append_common(CustomScanState *node); void rescan_append_common(CustomScanState *node); void explain_append_common(CustomScanState *node, + List *ancestors, + ExplainState *es, HTAB *children_table, - ExplainState *es); + List *custom_exprs); -#endif + +#endif /* NODES_COMMON_H */ diff --git a/src/include/partition_creation.h b/src/include/partition_creation.h new file mode 100644 index 00000000..cc666923 --- /dev/null +++ b/src/include/partition_creation.h @@ -0,0 +1,150 @@ +/*------------------------------------------------------------------------- + * + * partition_creation.h + * Various functions for partition creation. + * + * Copyright (c) 2016, Postgres Professional + * + *------------------------------------------------------------------------- + */ + +#ifndef PARTITION_CREATION_H +#define PARTITION_CREATION_H + + +#include "relation_info.h" + +#include "postgres.h" +#include "nodes/parsenodes.h" + + +/* ACL privilege for partition creation */ +#define ACL_SPAWN_PARTITIONS ACL_INSERT + + +/* Create RANGE partitions to store some value */ +Oid create_partitions_for_value(Oid relid, Datum value, Oid value_type); +Oid create_partitions_for_value_internal(Oid relid, Datum value, Oid value_type); + + +/* Create one RANGE partition */ +Oid create_single_range_partition_internal(Oid parent_relid, + const Bound *start_value, + const Bound *end_value, + Oid value_type, + RangeVar *partition_rv, + char *tablespace); + +/* Create one HASH partition */ +Oid create_single_hash_partition_internal(Oid parent_relid, + uint32 part_idx, + uint32 part_count, + RangeVar *partition_rv, + char *tablespace); + + +/* RANGE constraints */ +Constraint * build_range_check_constraint(Oid child_relid, + Node *raw_expression, + const Bound *start_value, + const Bound *end_value, + Oid expr_type); + +Node * build_raw_range_check_tree(Node *raw_expression, + const Bound *start_value, + const Bound *end_value, + Oid value_type); + +bool check_range_available(Oid parent_relid, + const Bound *start_value, + const Bound *end_value, + Oid value_type, + bool raise_error); + + +/* HASH constraints */ +Constraint * build_hash_check_constraint(Oid child_relid, + Node *raw_expression, + uint32 part_idx, + uint32 part_count, + Oid value_type); + +Node * build_raw_hash_check_tree(Node *raw_expression, + uint32 part_idx, + uint32 part_count, + Oid relid, + Oid value_type); + +/* Add & drop pg_pathman's check constraint */ +void drop_pathman_check_constraint(Oid relid); +void add_pathman_check_constraint(Oid relid, Constraint *constraint); + + +/* Partitioning callback type */ +typedef enum +{ + PT_INIT_CALLBACK = 0 +} part_callback_type; + +/* Args for partitioning 'init_callback' */ +typedef struct +{ + part_callback_type cb_type; + Oid callback; + bool callback_is_cached; + + PartType parttype; + + Oid parent_relid; + Oid partition_relid; + + union + { + struct + { + void *none; /* nothing (struct should have at least 1 element) */ + } hash_params; + + struct + { + Bound start_value, + end_value; + Oid value_type; + } range_params; + + } params; +} init_callback_params; + + +#define MakeInitCallbackRangeParams(params_p, cb, parent, child, start, end, type) \ + do \ + { \ + memset((void *) (params_p), 0, sizeof(init_callback_params)); \ + (params_p)->cb_type = PT_INIT_CALLBACK; \ + (params_p)->callback = (cb); \ + (params_p)->callback_is_cached = false; \ + (params_p)->parttype = PT_RANGE; \ + (params_p)->parent_relid = (parent); \ + (params_p)->partition_relid = (child); \ + (params_p)->params.range_params.start_value = (start); \ + (params_p)->params.range_params.end_value = (end); \ + (params_p)->params.range_params.value_type = (type); \ + } while (0) + +#define MakeInitCallbackHashParams(params_p, cb, parent, child) \ + do \ + { \ + memset((void *) (params_p), 0, sizeof(init_callback_params)); \ + (params_p)->cb_type = PT_INIT_CALLBACK; \ + (params_p)->callback = (cb); \ + (params_p)->callback_is_cached = false; \ + (params_p)->parttype = PT_HASH; \ + (params_p)->parent_relid = (parent); \ + (params_p)->partition_relid = (child); \ + } while (0) + + +void invoke_part_callback(init_callback_params *cb_params); +bool validate_part_callback(Oid procid, bool emit_error); + +#endif /* PARTITION_CREATION_H */ diff --git a/src/include/partition_filter.h b/src/include/partition_filter.h new file mode 100644 index 00000000..4aae0bbb --- /dev/null +++ b/src/include/partition_filter.h @@ -0,0 +1,225 @@ +/* ------------------------------------------------------------------------ + * + * partition_filter.h + * Select partition for INSERT operation + * + * Copyright (c) 2016-2020, Postgres Professional + * + * ------------------------------------------------------------------------ + */ + +#ifndef PARTITION_FILTER_H +#define PARTITION_FILTER_H + + +#include "relation_info.h" +#include "utils.h" + +#include "postgres.h" +#include "access/tupconvert.h" +#include "commands/explain.h" +#include "optimizer/planner.h" + +#if PG_VERSION_NUM >= 90600 +#include "nodes/extensible.h" +#endif + + +#define INSERT_NODE_NAME "PartitionFilter" + + +#define ERR_PART_ATTR_NULL "partitioning expression's value should not be NULL" +#define ERR_PART_ATTR_NO_PART "no suitable partition for key '%s'" +#define ERR_PART_ATTR_MULTIPLE INSERT_NODE_NAME " selected more than one partition" +#if PG_VERSION_NUM < 130000 +/* + * In >=13 msg parameter in convert_tuples_by_name function was removed (fe66125974c) + * and ERR_PART_DESC_CONVERT become unusable + */ +#define ERR_PART_DESC_CONVERT "could not convert row type for partition" +#endif + + +/* + * Single element of 'result_rels_table'. + */ +typedef struct +{ + Oid partid; /* partition's relid */ + ResultRelInfo *result_rel_info; /* cached ResultRelInfo */ + TupleConversionMap *tuple_map; /* tuple mapping (parent => child) */ + TupleConversionMap *tuple_map_child; /* tuple mapping (child => child), for exclude 'ctid' */ + + PartRelationInfo *prel; /* this child might be a parent... */ + ExprState *prel_expr_state; /* and have its own part. expression */ +} ResultRelInfoHolder; + + +/* Default settings for ResultPartsStorage */ +#define RPS_DEFAULT_SPECULATIVE false /* speculative inserts */ +#define RPS_CLOSE_RELATIONS true +#define RPS_SKIP_RELATIONS false + +/* Neat wrapper for readability */ +#define RPS_RRI_CB(cb, args) (cb), ((void *) args) + + +/* Forward declaration (for on_rri_holder()) */ +struct ResultPartsStorage; +typedef struct ResultPartsStorage ResultPartsStorage; + +/* + * Callback to be fired at rri_holder creation/destruction. + */ +typedef void (*rri_holder_cb)(ResultRelInfoHolder *rri_holder, + const ResultPartsStorage *rps_storage); + +/* + * Cached ResultRelInfos of partitions. + */ +struct ResultPartsStorage +{ + ResultRelInfo *base_rri; /* original ResultRelInfo */ + EState *estate; /* pointer to executor's state */ + CmdType command_type; /* INSERT | UPDATE */ + + /* partition relid -> ResultRelInfoHolder */ + HTAB *result_rels_table; + HASHCTL result_rels_table_config; + + bool speculative_inserts; /* for ExecOpenIndices() */ + + rri_holder_cb init_rri_holder_cb; + void *init_rri_holder_cb_arg; + + rri_holder_cb fini_rri_holder_cb; + void *fini_rri_holder_cb_arg; + + bool close_relations; + LOCKMODE head_open_lock_mode; + + PartRelationInfo *prel; + ExprState *prel_expr_state; + ExprContext *prel_econtext; +#if PG_VERSION_NUM >= 160000 /* for commit a61b1f74823c */ + ResultRelInfo *init_rri; /* first initialized ResultRelInfo */ +#endif +}; + +typedef struct +{ + CustomScanState css; + + Oid partitioned_table; + OnConflictAction on_conflict_action; + List *returning_list; + + Plan *subplan; /* proxy variable to store subplan */ + ResultPartsStorage result_parts; /* partition ResultRelInfo cache */ + CmdType command_type; + + TupleTableSlot *tup_convert_slot; /* slot for rebuilt tuples */ + +#if PG_VERSION_NUM >= 160000 /* for commit 178ee1d858 */ + Index parent_rti; /* Parent RT index for use of EXPLAIN, + see "ModifyTable::nominalRelation" */ +#endif +} PartitionFilterState; + + +extern bool pg_pathman_enable_partition_filter; +extern int pg_pathman_insert_into_fdw; + +extern CustomScanMethods partition_filter_plan_methods; +extern CustomExecMethods partition_filter_exec_methods; + + +#define IsPartitionFilterPlan(node) \ + ( \ + IsA((node), CustomScan) && \ + (((CustomScan *) (node))->methods == &partition_filter_plan_methods) \ + ) + +#define IsPartitionFilterState(node) \ + ( \ + IsA((node), CustomScanState) && \ + (((CustomScanState *) (node))->methods == &partition_filter_exec_methods) \ + ) + +#define IsPartitionFilter(node) \ + ( IsPartitionFilterPlan(node) || IsPartitionFilterState(node) ) + + + +void init_partition_filter_static_data(void); + + +/* + * ResultPartsStorage API (select partition for INSERT & UPDATE). + */ + +/* Initialize storage for some parent table */ +void init_result_parts_storage(ResultPartsStorage *parts_storage, + Oid parent_relid, + ResultRelInfo *current_rri, + EState *estate, + CmdType cmd_type, + bool close_relations, + bool speculative_inserts, + rri_holder_cb init_rri_holder_cb, + void *init_rri_holder_cb_arg, + rri_holder_cb fini_rri_holder_cb, + void *fini_rri_holder_cb_arg); + +/* Free storage and opened relations */ +void fini_result_parts_storage(ResultPartsStorage *parts_storage); + +/* Find ResultRelInfo holder in storage */ +ResultRelInfoHolder * scan_result_parts_storage(EState *estate, ResultPartsStorage *storage, Oid partid); + +/* Refresh PartRelationInfo in storage */ +PartRelationInfo * refresh_result_parts_storage(ResultPartsStorage *parts_storage, Oid partid); + +TupleConversionMap * build_part_tuple_map(Relation parent_rel, Relation child_rel); + +TupleConversionMap * build_part_tuple_map_child(Relation child_rel); + +void destroy_tuple_map(TupleConversionMap *tuple_map); + +List * pfilter_build_tlist(Plan *subplan, Index varno); + +/* Find suitable partition using 'value' */ +Oid * find_partitions_for_value(Datum value, Oid value_type, + const PartRelationInfo *prel, + int *nparts); + +ResultRelInfoHolder *select_partition_for_insert(EState *estate, + ResultPartsStorage *parts_storage, + TupleTableSlot *slot); + +Plan * make_partition_filter(Plan *subplan, + Oid parent_relid, + Index parent_rti, + OnConflictAction conflict_action, + CmdType command_type, + List *returning_list); + + +Node * partition_filter_create_scan_state(CustomScan *node); + +void partition_filter_begin(CustomScanState *node, + EState *estate, + int eflags); + +TupleTableSlot * partition_filter_exec(CustomScanState *node); + +void partition_filter_end(CustomScanState *node); + +void partition_filter_rescan(CustomScanState *node); + +void partition_filter_explain(CustomScanState *node, + List *ancestors, + ExplainState *es); + + +#endif /* PARTITION_FILTER_H */ diff --git a/src/include/partition_overseer.h b/src/include/partition_overseer.h new file mode 100644 index 00000000..ddf84c7a --- /dev/null +++ b/src/include/partition_overseer.h @@ -0,0 +1,54 @@ +/* ------------------------------------------------------------------------ + * + * partition_overseer.h + * Restart ModifyTable for unobvious reasons + * + * Copyright (c) 2018, Postgres Professional + * + * ------------------------------------------------------------------------ + */ + +#ifndef PARTITION_OVERSEER_H +#define PARTITION_OVERSEER_H + +#include "relation_info.h" +#include "utils.h" + +#include "postgres.h" +#include "access/tupconvert.h" +#include "commands/explain.h" +#include "optimizer/planner.h" + +#if PG_VERSION_NUM >= 90600 +#include "nodes/extensible.h" +#endif + + +#define OVERSEER_NODE_NAME "PartitionOverseer" + + +extern CustomScanMethods partition_overseer_plan_methods; +extern CustomExecMethods partition_overseer_exec_methods; + + +void init_partition_overseer_static_data(void); +Plan *make_partition_overseer(Plan *subplan); + +Node *partition_overseer_create_scan_state(CustomScan *node); + +void partition_overseer_begin(CustomScanState *node, + EState *estate, + int eflags); + +TupleTableSlot *partition_overseer_exec(CustomScanState *node); + +void partition_overseer_end(CustomScanState *node); + +void partition_overseer_rescan(CustomScanState *node); + +void partition_overseer_explain(CustomScanState *node, + List *ancestors, + ExplainState *es); + + +#endif /* PARTITION_OVERSEER_H */ diff --git a/src/include/partition_router.h b/src/include/partition_router.h new file mode 100644 index 00000000..d5684eba --- /dev/null +++ b/src/include/partition_router.h @@ -0,0 +1,85 @@ +/* ------------------------------------------------------------------------ + * + * partition_update.h + * Insert row to right partition in UPDATE operation + * + * Copyright (c) 2017, Postgres Professional + * + * ------------------------------------------------------------------------ + */ + +#ifndef PARTITION_UPDATE_H +#define PARTITION_UPDATE_H + +#include "relation_info.h" +#include "utils.h" + +#include "postgres.h" +#include "commands/explain.h" +#include "optimizer/planner.h" + +#if PG_VERSION_NUM >= 90600 +#include "nodes/extensible.h" +#endif + + +#define UPDATE_NODE_NAME "PartitionRouter" + + +typedef struct PartitionRouterState +{ + CustomScanState css; + + Plan *subplan; /* proxy variable to store subplan */ + ExprState *constraint; /* should tuple remain in partition? */ +#if PG_VERSION_NUM < 140000 /* field removed in 86dc90056dfd */ + JunkFilter *junkfilter; /* 'ctid' extraction facility */ +#endif + ResultRelInfo *current_rri; + + /* Machinery required for EvalPlanQual */ + EPQState epqstate; + int epqparam; + + /* Preserved slot from last call */ + bool yielded; + TupleTableSlot *yielded_slot; +#if PG_VERSION_NUM >= 140000 + TupleTableSlot *yielded_original_slot; +#endif + + /* Need these for a GREAT deal of hackery */ + ModifyTableState *mt_state; + bool update_stmt_triggers, + insert_stmt_triggers; +} PartitionRouterState; + + +extern bool pg_pathman_enable_partition_router; + +extern CustomScanMethods partition_router_plan_methods; +extern CustomExecMethods partition_router_exec_methods; + + +#define IsPartitionRouterState(node) \ + ( \ + IsA((node), CustomScanState) && \ + (((CustomScanState *) (node))->methods == &partition_router_exec_methods) \ + ) + +/* Highlight hacks with ModifyTable's fields */ +#define MTHackField(mt_state, field) ( (mt_state)->field ) + +void init_partition_router_static_data(void); +void partition_router_begin(CustomScanState *node, EState *estate, int eflags); +void partition_router_end(CustomScanState *node); +void partition_router_rescan(CustomScanState *node); +void partition_router_explain(CustomScanState *node, + List *ancestors, + ExplainState *es); + +Plan *make_partition_router(Plan *subplan, int epq_param, Index parent_rti); +Node *partition_router_create_scan_state(CustomScan *node); +TupleTableSlot *partition_router_exec(CustomScanState *node); + +#endif /* PARTITION_UPDATE_H */ diff --git a/src/include/pathman.h b/src/include/pathman.h new file mode 100644 index 00000000..28f6ef30 --- /dev/null +++ b/src/include/pathman.h @@ -0,0 +1,210 @@ +/* ------------------------------------------------------------------------ + * + * pathman.h + * structures and prototypes for pathman functions + * + * Copyright (c) 2015-2016, Postgres Professional + * + * ------------------------------------------------------------------------ + */ + +#ifndef PATHMAN_H +#define PATHMAN_H + + +#include "relation_info.h" +#include "rangeset.h" + +#include "postgres.h" +#include "fmgr.h" +#include "nodes/makefuncs.h" +#include "nodes/primnodes.h" +#include "nodes/execnodes.h" +#include "optimizer/planner.h" +#include "parser/parsetree.h" + + +/* Get CString representation of Datum (simple wrapper) */ +#ifdef USE_ASSERT_CHECKING + #include "utils.h" + #define DebugPrintDatum(datum, typid) ( datum_to_cstring((datum), (typid)) ) +#else + #define DebugPrintDatum(datum, typid) ( "[use --enable-cassert]" ) +#endif + + +/* + * Main GUC variables. + */ +#define PATHMAN_ENABLE "pg_pathman.enable" +#define PATHMAN_ENABLE_AUTO_PARTITION "pg_pathman.enable_auto_partition" +#define PATHMAN_OVERRIDE_COPY "pg_pathman.override_copy" + + +/* + * Definitions for the "pathman_config" table. + */ +#define PATHMAN_CONFIG "pathman_config" +#define Natts_pathman_config 4 +#define Anum_pathman_config_partrel 1 /* partitioned relation (regclass) */ +#define Anum_pathman_config_expr 2 /* partition expression (original) */ +#define Anum_pathman_config_parttype 3 /* partitioning type (1|2) */ +#define Anum_pathman_config_range_interval 4 /* interval for RANGE pt. (text) */ + +/* type modifier (typmod) for 'range_interval' */ +#define PATHMAN_CONFIG_interval_typmod -1 + +/* + * Definitions for the "pathman_config_params" table. + */ +#define PATHMAN_CONFIG_PARAMS "pathman_config_params" +#define Natts_pathman_config_params 5 +#define Anum_pathman_config_params_partrel 1 /* primary key */ +#define Anum_pathman_config_params_enable_parent 2 /* include parent into plan */ +#define Anum_pathman_config_params_auto 3 /* auto partitions creation */ +#define Anum_pathman_config_params_init_callback 4 /* partition action callback */ +#define Anum_pathman_config_params_spawn_using_bgw 5 /* should we use spawn BGW? */ + +/* + * Definitions for the "pathman_partition_list" view. + */ +#define PATHMAN_PARTITION_LIST "pathman_partition_list" +#define Natts_pathman_partition_list 6 +#define Anum_pathman_pl_parent 1 /* partitioned relation (regclass) */ +#define Anum_pathman_pl_partition 2 /* child partition (regclass) */ +#define Anum_pathman_pl_parttype 3 /* partitioning type (1|2) */ +#define Anum_pathman_pl_partattr 4 /* partitioned column (text) */ +#define Anum_pathman_pl_range_min 5 /* partition's min value */ +#define Anum_pathman_pl_range_max 6 /* partition's max value */ + +/* + * Definitions for the "pathman_cache_stats" view. + */ +#define PATHMAN_CACHE_STATS "pathman_cache_stats" +#define Natts_pathman_cache_stats 4 +#define Anum_pathman_cs_context 1 /* name of memory context */ +#define Anum_pathman_cs_size 2 /* size of memory context */ +#define Anum_pathman_cs_used 3 /* used space */ +#define Anum_pathman_cs_entries 4 /* number of cache entries */ + + +/* + * Cache current PATHMAN_CONFIG relid (set during load_config()). + */ +extern Oid pathman_config_relid; +extern Oid pathman_config_params_relid; + +/* + * Just to clarify our intentions (return the corresponding relid). + */ +Oid get_pathman_config_relid(bool invalid_is_ok); +Oid get_pathman_config_params_relid(bool invalid_is_ok); +Oid get_pathman_schema(void); + + +/* + * Create RelOptInfo & RTE for a selected partition. + */ +Index append_child_relation(PlannerInfo *root, + Relation parent_relation, + PlanRowMark *parent_rowmark, + Index parent_rti, + int ir_index, + Oid child_oid, + List *wrappers); + + +/* + * Copied from PostgreSQL (prepunion.c) + */ +void make_inh_translation_list(Relation oldrelation, Relation newrelation, + Index newvarno, List **translated_vars, + AppendRelInfo *appinfo); + +Bitmapset *translate_col_privs(const Bitmapset *parent_privs, + List *translated_vars); + + +/* + * Copied from PostgreSQL (allpaths.c) + */ +void set_append_rel_pathlist(PlannerInfo *root, RelOptInfo *rel, Index rti, + PathKey *pathkeyAsc, PathKey *pathkeyDesc); + +Path *get_cheapest_parameterized_child_path(PlannerInfo *root, RelOptInfo *rel, + Relids required_outer); + + +typedef struct +{ + const Node *orig; /* examined expression */ + List *args; /* clauses/wrappers extracted from 'orig' */ + List *rangeset; /* IndexRanges representing selected parts */ + double paramsel; /* estimated selectivity of PARAMs + (for RuntimeAppend costs) */ + bool found_gap; /* were there any gaps? */ +} WrapperNode; + +#define InvalidWrapperNode { NULL, NIL, NIL, 0.0, false } + +typedef struct +{ + Node *prel_expr; /* expression from PartRelationInfo */ + const PartRelationInfo *prel; /* main partitioning structure */ + ExprContext *econtext; /* for ExecEvalExpr() */ +} WalkerContext; + +/* Usual initialization procedure for WalkerContext */ +#define InitWalkerContext(context, expr, prel_info, ecxt) \ + do { \ + (context)->prel_expr = (expr); \ + (context)->prel = (prel_info); \ + (context)->econtext = (ecxt); \ + } while (0) + +/* Check that WalkerContext contains ExprContext (plan execution stage) */ +#define WcxtHasExprContext(wcxt) ( (wcxt)->econtext != NULL ) + +/* Examine expression in order to select partitions */ +WrapperNode *walk_expr_tree(Expr *expr, const WalkerContext *context); + + +void select_range_partitions(const Datum value, + const Oid collid, + FmgrInfo *cmp_func, + const RangeEntry *ranges, + const int nranges, + const int strategy, + WrapperNode *result); + + +/* Convert hash value to the partition index */ +static inline uint32 +hash_to_part_index(uint32 value, uint32 partitions) +{ + return value % partitions; +} + + +/* + * Compare two Datums using the given comarison function. + * + * flinfo is a pointer to FmgrInfo, arg1 & arg2 are Datums. + */ +#define check_lt(finfo, collid, arg1, arg2) \ + ( DatumGetInt32(FunctionCall2Coll((finfo), (collid), (arg1), (arg2))) < 0 ) + +#define check_le(finfo, collid, arg1, arg2) \ + ( DatumGetInt32(FunctionCall2Coll((finfo), (collid), (arg1), (arg2))) <= 0 ) + +#define check_eq(finfo, collid, arg1, arg2) \ + ( DatumGetInt32(FunctionCall2Coll((finfo), (collid), (arg1), (arg2))) == 0 ) + +#define check_ge(finfo, collid, arg1, arg2) \ + ( DatumGetInt32(FunctionCall2Coll((finfo), (collid), (arg1), (arg2))) >= 0 ) + +#define check_gt(finfo, collid, arg1, arg2) \ + ( DatumGetInt32(FunctionCall2Coll((finfo), (collid), (arg1), (arg2))) > 0 ) + + +#endif /* PATHMAN_H */ diff --git a/src/pathman_workers.h b/src/include/pathman_workers.h similarity index 84% rename from src/pathman_workers.h rename to src/include/pathman_workers.h index 3ea664d5..be4d6425 100644 --- a/src/pathman_workers.h +++ b/src/include/pathman_workers.h @@ -17,9 +17,14 @@ #ifndef PATHMAN_WORKERS_H #define PATHMAN_WORKERS_H + #include "postgres.h" #include "storage/spin.h" +#if PG_VERSION_NUM >= 90600 +#include "storage/lock.h" +#endif + /* * Store args, result and execution status of CreatePartitionsWorker. @@ -32,6 +37,12 @@ typedef struct Oid dbid; /* database which stores 'partitioned_table' */ Oid partitioned_table; +#if PG_VERSION_NUM >= 90600 + /* Args for BecomeLockGroupMember() function */ + PGPROC *parallel_master_pgproc; + pid_t parallel_master_pid; +#endif + /* Needed to decode Datum from 'values' */ Oid value_type; Size value_size; @@ -63,7 +74,7 @@ typedef struct pid_t pid; /* worker's PID */ Oid dbid; /* database which contains the relation */ Oid relid; /* table to be partitioned concurrently */ - uint64 total_rows; /* total amount of rows processed */ + int64 total_rows; /* total amount of rows processed */ int32 batch_size; /* number of rows in a batch */ float8 sleep_time; /* how long should we sleep in case of error? */ @@ -101,17 +112,36 @@ cps_set_status(ConcurrentPartSlot *slot, ConcurrentPartSlotStatus status) SpinLockRelease(&slot->mutex); } +static inline const char * +cps_print_status(ConcurrentPartSlotStatus status) +{ + switch(status) + { + case CPS_FREE: + return "free"; + + case CPS_WORKING: + return "working"; + + case CPS_STOPPING: + return "stopping"; + + default: + return "[unknown]"; + } +} + /* Number of worker slots for concurrent partitioning */ -#define PART_WORKER_SLOTS 10 +#define PART_WORKER_SLOTS max_worker_processes /* Max number of attempts per batch */ #define PART_WORKER_MAX_ATTEMPTS 60 /* - * Definitions for the "pathman_concurrent_part_tasks" view + * Definitions for the "pathman_concurrent_part_tasks" view. */ #define PATHMAN_CONCURRENT_PART_TASKS "pathman_concurrent_part_tasks" #define Natts_pathman_cp_tasks 6 @@ -178,4 +208,10 @@ UnpackDatumFromByteArray(Datum *datum, Size datum_size, bool typbyval, return ((uint8 *) byte_array) + datum_size; } -#endif +/* + * Create partition to store 'value' using specific BGW. + */ +Oid create_partitions_for_value_bg_worker(Oid relid, Datum value, Oid value_type); + + +#endif /* PATHMAN_WORKERS_H */ diff --git a/src/include/planner_tree_modification.h b/src/include/planner_tree_modification.h new file mode 100644 index 00000000..edca73a0 --- /dev/null +++ b/src/include/planner_tree_modification.h @@ -0,0 +1,66 @@ +/* ------------------------------------------------------------------------ + * + * planner_tree_modification.h + * Functions for query- and plan- tree modification + * + * Copyright (c) 2016, Postgres Professional + * + * ------------------------------------------------------------------------ + */ + +#ifndef PLANNER_TREE_MODIFICATION_H +#define PLANNER_TREE_MODIFICATION_H + + +#include "pathman.h" + +#include "postgres.h" +#include "utils/rel.h" +/* #include "nodes/relation.h" */ +#include "nodes/nodeFuncs.h" + + +/* Query ID generator */ +void assign_query_id(Query *query); +void reset_query_id_generator(void); + +/* Plan tree rewriting utility */ +Plan * plan_tree_visitor(Plan *plan, + Plan *(*visitor) (Plan *plan, void *context), + void *context); + +/* PlanState tree rewriting utility */ +void state_tree_visitor(PlanState *state, + void (*visitor) (PlanState *state, void *context), + void *context); + +/* Query tree rewriting utilities */ +void pathman_transform_query(Query *parse, ParamListInfo params); +void pathman_post_analyze_query(Query *parse); + +/* These functions scribble on Plan tree */ +Plan *add_partition_filters(List *rtable, Plan *plan); +Plan *add_partition_routers(List *rtable, Plan *plan); + + +/* used by assign_rel_parenthood_status() etc */ +typedef enum +{ + PARENTHOOD_NOT_SET = 0, /* relation hasn't been tracked */ + PARENTHOOD_DISALLOWED, /* children are disabled (e.g. ONLY) */ + PARENTHOOD_ALLOWED /* children are enabled (default) */ +} rel_parenthood_status; + +void assign_rel_parenthood_status(RangeTblEntry *rte, + rel_parenthood_status new_status); + +rel_parenthood_status get_rel_parenthood_status(RangeTblEntry *rte); + + +/* used to determine nested planner() calls */ +void incr_planner_calls_count(void); +void decr_planner_calls_count(void); +int32 get_planner_calls_count(void); + + +#endif /* PLANNER_TREE_MODIFICATION_H */ diff --git a/src/include/rangeset.h b/src/include/rangeset.h new file mode 100644 index 00000000..39db6a53 --- /dev/null +++ b/src/include/rangeset.h @@ -0,0 +1,172 @@ +/* ------------------------------------------------------------------------ + * + * rangeset.h + * + * Copyright (c) 2015-2016, Postgres Professional + * + * ------------------------------------------------------------------------ + */ + +#ifndef PATHMAN_RANGESET_H +#define PATHMAN_RANGESET_H + + +#include "postgres.h" +#include "nodes/pg_list.h" + + +/* + * IndexRange is essentially a segment [lower; upper]. This module provides + * functions for efficient working (intersection, union) with Lists of + * IndexRange's; this is used for quick selection of partitions. Numbers are + * indexes of partitions in PartRelationInfo's children. + */ +typedef struct { + /* lossy == should we use quals? */ + /* valid == is this IndexRange valid? */ + + /* Don't swap these fields */ + uint32 lower; /* valid + lower_bound */ + uint32 upper; /* lossy + upper_bound */ +} IndexRange; + +/* Convenience macros for make_irange(...) */ +#define IR_LOSSY true +#define IR_COMPLETE false + +#define IRANGE_SPECIAL_BIT ( (uint32) ( ((uint32) 1) << 31) ) +#define IRANGE_BOUNDARY_MASK ( (uint32) (~IRANGE_SPECIAL_BIT) ) + +#define InvalidIndexRange { 0, 0 } + +#define is_irange_valid(irange) ( (irange.lower & IRANGE_SPECIAL_BIT) > 0 ) +#define is_irange_lossy(irange) ( (irange.upper & IRANGE_SPECIAL_BIT) > 0 ) +#define irange_lower(irange) ( (uint32) (irange.lower & IRANGE_BOUNDARY_MASK) ) +#define irange_upper(irange) ( (uint32) (irange.upper & IRANGE_BOUNDARY_MASK) ) + +#define lfirst_irange(lc) ( *(IndexRange *) lfirst(lc) ) +#define lappend_irange(list, irange) ( lappend((list), alloc_irange(irange)) ) +#define lcons_irange(irange, list) ( lcons(alloc_irange(irange), (list)) ) +#define list_make1_irange(irange) ( lcons_irange(irange, NIL) ) +#define llast_irange(list) ( lfirst_irange(list_tail(list)) ) +#define linitial_irange(list) ( lfirst_irange(list_head(list)) ) + + +/* convenience macro (requires relation_info.h) */ +#define list_make1_irange_full(prel, lossy) \ + ( list_make1_irange(make_irange(0, PrelLastChild(prel), (lossy))) ) + + +static inline IndexRange +make_irange(uint32 lower, uint32 upper, bool lossy) +{ + IndexRange result = { lower & IRANGE_BOUNDARY_MASK, + upper & IRANGE_BOUNDARY_MASK }; + + /* Set VALID */ + result.lower |= IRANGE_SPECIAL_BIT; + + /* Set LOSSY if needed */ + if (lossy) result.upper |= IRANGE_SPECIAL_BIT; + + Assert(lower <= upper); + + return result; +} + +static inline IndexRange * +alloc_irange(IndexRange irange) +{ + IndexRange *result = (IndexRange *) palloc(sizeof(IndexRange)); + + /* Copy all fields of IndexRange */ + *result = irange; + + return result; +} + +/* Return predecessor or 0 if boundary is 0 */ +static inline uint32 +irb_pred(uint32 boundary) +{ + if (boundary > 0) + return (boundary - 1) & IRANGE_BOUNDARY_MASK; + + return 0; +} + +/* Return successor or IRANGE_BONDARY_MASK */ +static inline uint32 +irb_succ(uint32 boundary) +{ + if (boundary >= IRANGE_BOUNDARY_MASK) + return IRANGE_BOUNDARY_MASK; + + return boundary + 1; +} + + +/* Result of function irange_cmp_lossiness() */ +typedef enum +{ + IR_EQ_LOSSINESS = 0, /* IndexRanges share same lossiness */ + IR_A_LOSSY, /* IndexRange 'a' is lossy ('b' is not) */ + IR_B_LOSSY /* IndexRange 'b' is lossy ('a' is not) */ +} ir_cmp_lossiness; + +/* Comapre lossiness factor of two IndexRanges */ +static inline ir_cmp_lossiness +irange_cmp_lossiness(IndexRange a, IndexRange b) +{ + if (is_irange_lossy(a) == is_irange_lossy(b)) + return IR_EQ_LOSSINESS; + + if (is_irange_lossy(a)) + return IR_A_LOSSY; + + if (is_irange_lossy(b)) + return IR_B_LOSSY; + + return IR_EQ_LOSSINESS; +} + + +/* Check if two ranges intersect */ +static inline bool +iranges_intersect(IndexRange a, IndexRange b) +{ + return (irange_lower(a) <= irange_upper(b)) && + (irange_lower(b) <= irange_upper(a)); +} + +/* Check if two ranges adjoin */ +static inline bool +iranges_adjoin(IndexRange a, IndexRange b) +{ + return (irange_upper(a) == irb_pred(irange_lower(b))) || + (irange_upper(b) == irb_pred(irange_lower(a))); +} + +/* Check if two ranges cover the same area */ +static inline bool +irange_eq_bounds(IndexRange a, IndexRange b) +{ + return (irange_lower(a) == irange_lower(b)) && + (irange_upper(a) == irange_upper(b)); +} + + +/* Basic operations on IndexRanges */ +IndexRange irange_union_simple(IndexRange a, IndexRange b); +IndexRange irange_intersection_simple(IndexRange a, IndexRange b); + +/* Operations on Lists of IndexRanges */ +List *irange_list_union(List *a, List *b); +List *irange_list_intersection(List *a, List *b); +List *irange_list_set_lossiness(List *ranges, bool lossy); + +/* Utility functions */ +int irange_list_length(List *rangeset); +bool irange_list_find(List *rangeset, int index, bool *lossy); + +#endif /* PATHMAN_RANGESET_H */ diff --git a/src/include/relation_info.h b/src/include/relation_info.h new file mode 100644 index 00000000..a42bf727 --- /dev/null +++ b/src/include/relation_info.h @@ -0,0 +1,431 @@ +/* ------------------------------------------------------------------------ + * + * relation_info.h + * Data structures describing partitioned relations + * + * Copyright (c) 2016-2020, Postgres Professional + * + * ------------------------------------------------------------------------ + */ + +#ifndef RELATION_INFO_H +#define RELATION_INFO_H + +#include "compat/pg_compat.h" + +#include "utils.h" + +#include "access/attnum.h" +#include "access/sysattr.h" +#include "fmgr.h" +#include "nodes/bitmapset.h" +#include "nodes/nodes.h" +#include "nodes/memnodes.h" +#include "nodes/primnodes.h" +#include "nodes/value.h" +#include "port/atomics.h" +#include "rewrite/rewriteManip.h" +#include "storage/lock.h" +#include "utils/datum.h" +#include "utils/lsyscache.h" +#include "utils/relcache.h" + + +#ifdef USE_ASSERT_CHECKING +#define USE_RELINFO_LOGGING +#define USE_RELINFO_LEAK_TRACKER +#endif + + +/* Range bound */ +typedef struct +{ + Datum value; /* actual value if not infinite */ + int8 is_infinite; /* -inf | +inf | finite */ +} Bound; + + +#define FINITE ( 0 ) +#define PLUS_INFINITY ( +1 ) +#define MINUS_INFINITY ( -1 ) + +#define IsInfinite(i) ( (i)->is_infinite != FINITE ) +#define IsPlusInfinity(i) ( (i)->is_infinite == PLUS_INFINITY ) +#define IsMinusInfinity(i) ( (i)->is_infinite == MINUS_INFINITY ) + +static inline Bound +CopyBound(const Bound *src, bool byval, int typlen) +{ + Bound bound = { + IsInfinite(src) ? + src->value : + datumCopy(src->value, byval, typlen), + src->is_infinite + }; + + return bound; +} + +static inline Bound +MakeBound(Datum value) +{ + Bound bound = { value, FINITE }; + + return bound; +} + +static inline Bound +MakeBoundInf(int8 infinity_type) +{ + Bound bound = { (Datum) 0, infinity_type }; + + return bound; +} + +static inline Datum +BoundGetValue(const Bound *bound) +{ + Assert(!IsInfinite(bound)); + + return bound->value; +} + +static inline void +FreeBound(Bound *bound, bool byval) +{ + if (!IsInfinite(bound) && !byval) + pfree(DatumGetPointer(BoundGetValue(bound))); +} + +static inline char * +BoundToCString(const Bound *bound, Oid value_type) +{ + return IsInfinite(bound) ? + pstrdup("NULL") : + datum_to_cstring(bound->value, value_type); +} + +static inline int +cmp_bounds(FmgrInfo *cmp_func, + const Oid collid, + const Bound *b1, + const Bound *b2) +{ + if (IsMinusInfinity(b1) || IsPlusInfinity(b2)) + return -1; + + if (IsMinusInfinity(b2) || IsPlusInfinity(b1)) + return 1; + + Assert(cmp_func); + + return DatumGetInt32(FunctionCall2Coll(cmp_func, + collid, + BoundGetValue(b1), + BoundGetValue(b2))); +} + + +/* Partitioning type */ +typedef enum +{ + PT_ANY = 0, /* for part type traits (virtual type) */ + PT_HASH, + PT_RANGE +} PartType; + +/* Child relation info for RANGE partitioning */ +typedef struct +{ + Oid child_oid; + Bound min, + max; +} RangeEntry; + +/* + * PartStatusInfo + * Cached partitioning status of the specified relation. + * Allows us to quickly search for PartRelationInfo. + */ +typedef struct PartStatusInfo +{ + Oid relid; /* key */ + struct PartRelationInfo *prel; +} PartStatusInfo; + +/* + * PartParentInfo + * Cached parent of the specified partition. + * Allows us to quickly search for parent PartRelationInfo. + */ +typedef struct PartParentInfo +{ + Oid child_relid; /* key */ + Oid parent_relid; +} PartParentInfo; + +/* + * PartBoundInfo + * Cached bounds of the specified partition. + * Allows us to deminish overhead of check constraints. + */ +typedef struct PartBoundInfo +{ + Oid child_relid; /* key */ + + PartType parttype; + + /* For RANGE partitions */ + Bound range_min; + Bound range_max; + bool byval; + + /* For HASH partitions */ + uint32 part_idx; +} PartBoundInfo; + +static inline void +FreePartBoundInfo(PartBoundInfo *pbin) +{ + if (pbin->parttype == PT_RANGE) + { + FreeBound(&pbin->range_min, pbin->byval); + FreeBound(&pbin->range_max, pbin->byval); + } +} + +/* + * PartRelationInfo + * Per-relation partitioning information. + * Allows us to perform partition pruning. + */ +typedef struct PartRelationInfo +{ + Oid relid; /* key */ + int32 refcount; /* reference counter */ + bool fresh; /* is this entry fresh? */ + + bool enable_parent; /* should plan include parent? */ + + PartType parttype; /* partitioning type (HASH | RANGE) */ + + /* Partition dispatch info */ + uint32 children_count; + Oid *children; /* Oids of child partitions */ + RangeEntry *ranges; /* per-partition range entry or NULL */ + + /* Partitioning expression */ + const char *expr_cstr; /* original expression */ + Node *expr; /* planned expression */ + List *expr_vars; /* vars from expression, lazy */ + Bitmapset *expr_atts; /* attnums from expression */ + + /* Partitioning expression's value */ + Oid ev_type; /* expression type */ + int32 ev_typmod; /* expression type modifier */ + bool ev_byval; /* is expression's val stored by value? */ + int16 ev_len; /* length of the expression val's type */ + int ev_align; /* alignment of the expression val's type */ + Oid ev_collid; /* collation of the expression val */ + + Oid cmp_proc, /* comparison function for 'ev_type' */ + hash_proc; /* hash function for 'ev_type' */ + +#ifdef USE_RELINFO_LEAK_TRACKER + List *owners; /* saved callers of get_pathman_relation_info() */ + uint64 access_total; /* total amount of accesses to this entry */ +#endif + + MemoryContext mcxt; /* memory context holding this struct */ +} PartRelationInfo; + +#define PART_EXPR_VARNO ( 1 ) + +/* + * PartRelationInfo field access macros & functions. + */ + +#define PrelParentRelid(prel) ( (prel)->relid ) + +#define PrelGetChildrenArray(prel) ( (prel)->children ) + +#define PrelGetRangesArray(prel) ( (prel)->ranges ) + +#define PrelChildrenCount(prel) ( (prel)->children_count ) + +#define PrelReferenceCount(prel) ( (prel)->refcount ) + +#define PrelIsFresh(prel) ( (prel)->fresh ) + +static inline uint32 +PrelHasPartition(const PartRelationInfo *prel, Oid partition_relid) +{ + Oid *children = PrelGetChildrenArray(prel); + uint32 i; + + for (i = 0; i < PrelChildrenCount(prel); i++) + if (children[i] == partition_relid) + return i + 1; + + return 0; +} + +static inline uint32 +PrelLastChild(const PartRelationInfo *prel) +{ + if (PrelChildrenCount(prel) == 0) + elog(ERROR, "pg_pathman's cache entry for relation %u has 0 children", + PrelParentRelid(prel)); + + return PrelChildrenCount(prel) - 1; /* last partition */ +} + +static inline List * +PrelExpressionColumnNames(const PartRelationInfo *prel) +{ + List *columns = NIL; + int i = -1; + + while ((i = bms_next_member(prel->expr_atts, i)) >= 0) + { + AttrNumber attnum = i + FirstLowInvalidHeapAttributeNumber; + char *attname = get_attname_compat(PrelParentRelid(prel), attnum); + + columns = lappend(columns, makeString(attname)); + } + + return columns; +} + +static inline Node * +PrelExpressionForRelid(const PartRelationInfo *prel, Index rti) +{ + /* TODO: implement some kind of cache */ + Node *expr = copyObject(prel->expr); + + if (rti != PART_EXPR_VARNO) + ChangeVarNodes(expr, PART_EXPR_VARNO, rti, 0); + + return expr; +} + +#if PG_VERSION_NUM >= 130000 +AttrMap *PrelExpressionAttributesMap(const PartRelationInfo *prel, + TupleDesc source_tupdesc); +#else +AttrNumber *PrelExpressionAttributesMap(const PartRelationInfo *prel, + TupleDesc source_tupdesc, + int *map_length); +#endif + + +/* PartType wrappers */ +static inline void +WrongPartType(PartType parttype) +{ + elog(ERROR, "Unknown partitioning type %u", parttype); +} + +static inline PartType +DatumGetPartType(Datum datum) +{ + uint32 parttype = DatumGetUInt32(datum); + + if (parttype < 1 || parttype > 2) + WrongPartType(parttype); + + return (PartType) parttype; +} + +static inline char * +PartTypeToCString(PartType parttype) +{ + switch (parttype) + { + case PT_HASH: + return "1"; + + case PT_RANGE: + return "2"; + + default: + WrongPartType(parttype); + return NULL; /* keep compiler happy */ + } +} + + +/* Status chache */ +void forget_status_of_relation(Oid relid); +void invalidate_status_cache(void); + +/* Dispatch cache */ +bool has_pathman_relation_info(Oid relid); +PartRelationInfo *get_pathman_relation_info(Oid relid); +void close_pathman_relation_info(PartRelationInfo *prel); + +void qsort_range_entries(RangeEntry *entries, int nentries, + const PartRelationInfo *prel); + +void shout_if_prel_is_invalid(const Oid parent_oid, + const PartRelationInfo *prel, + const PartType expected_part_type); + +/* Bounds cache */ +void forget_bounds_of_rel(Oid partition); +PartBoundInfo *get_bounds_of_partition(Oid partition, const PartRelationInfo *prel); +Expr *get_partition_constraint_expr(Oid partition, bool raise_error); +void invalidate_bounds_cache(void); + +/* Parents cache */ +void cache_parent_of_partition(Oid partition, Oid parent); +void forget_parent_of_partition(Oid partition); +Oid get_parent_of_partition(Oid partition); +void invalidate_parents_cache(void); + +/* Partitioning expression routines */ +Node *parse_partitioning_expression(const Oid relid, + const char *expr_cstr, + char **query_string_out, + Node **parsetree_out); + +Node *cook_partitioning_expression(const Oid relid, + const char *expr_cstr, + Oid *expr_type); + +char *canonicalize_partitioning_expression(const Oid relid, + const char *expr_cstr); + +/* Global invalidation routines */ +void delay_pathman_shutdown(void); +void finish_delayed_invalidation(void); + +void init_relation_info_static_data(void); + + +/* For pg_pathman.enable_bounds_cache GUC */ +extern bool pg_pathman_enable_bounds_cache; + +extern HTAB *prel_resowner; + +/* This allows us to track leakers of PartRelationInfo */ +#ifdef USE_RELINFO_LEAK_TRACKER +extern const char *prel_resowner_function; +extern int prel_resowner_line; + +#define get_pathman_relation_info(relid) \ + ( \ + prel_resowner_function = __FUNCTION__, \ + prel_resowner_line = __LINE__, \ + get_pathman_relation_info(relid) \ + ) + +#define close_pathman_relation_info(prel) \ + do { \ + close_pathman_relation_info(prel); \ + prel = NULL; \ + } while (0) +#endif /* USE_RELINFO_LEAK_TRACKER */ + + +#endif /* RELATION_INFO_H */ diff --git a/src/runtimeappend.h b/src/include/runtime_append.h similarity index 59% rename from src/runtimeappend.h rename to src/include/runtime_append.h index 55c1320e..bc76ea70 100644 --- a/src/runtimeappend.h +++ b/src/include/runtime_append.h @@ -11,6 +11,7 @@ #ifndef RUNTIME_APPEND_H #define RUNTIME_APPEND_H + #include "pathman.h" #include "nodes_common.h" @@ -20,6 +21,9 @@ #include "commands/explain.h" +#define RUNTIME_APPEND_NODE_NAME "RuntimeAppend" + + typedef struct { CustomPath cpath; @@ -36,7 +40,13 @@ typedef struct /* Restrictions to be checked during ReScan and Exec */ List *custom_exprs; - List *custom_expr_states; + + /* Refined clauses for partition pruning */ + List *canon_custom_exprs; + + /* Copy of partitioning expression and dispatch info */ + Node *prel_expr; + PartRelationInfo *prel; /* All available plans \ plan states */ HTAB *children_table; @@ -64,31 +74,32 @@ extern CustomScanMethods runtimeappend_plan_methods; extern CustomExecMethods runtimeappend_exec_methods; -void init_runtimeappend_static_data(void); +void init_runtime_append_static_data(void); + +Path * create_runtime_append_path(PlannerInfo *root, + AppendPath *inner_append, + ParamPathInfo *param_info, + double sel); -Path * create_runtimeappend_path(PlannerInfo *root, - AppendPath *inner_append, - ParamPathInfo *param_info, - double sel); +Plan * create_runtime_append_plan(PlannerInfo *root, RelOptInfo *rel, + CustomPath *best_path, List *tlist, + List *clauses, List *custom_plans); -Plan * create_runtimeappend_plan(PlannerInfo *root, RelOptInfo *rel, - CustomPath *best_path, List *tlist, - List *clauses, List *custom_plans); +Node * runtime_append_create_scan_state(CustomScan *node); -Node * runtimeappend_create_scan_state(CustomScan *node); +void runtime_append_begin(CustomScanState *node, + EState *estate, + int eflags); -void runtimeappend_begin(CustomScanState *node, - EState *estate, - int eflags); +TupleTableSlot * runtime_append_exec(CustomScanState *node); -TupleTableSlot * runtimeappend_exec(CustomScanState *node); +void runtime_append_end(CustomScanState *node); -void runtimeappend_end(CustomScanState *node); +void runtime_append_rescan(CustomScanState *node); -void runtimeappend_rescan(CustomScanState *node); +void runtime_append_explain(CustomScanState *node, + List *ancestors, + ExplainState *es); -void runtimeappend_explain(CustomScanState *node, - List *ancestors, - ExplainState *es); -#endif +#endif /* RUNTIME_APPEND_H */ diff --git a/src/runtime_merge_append.h b/src/include/runtime_merge_append.h similarity index 61% rename from src/runtime_merge_append.h rename to src/include/runtime_merge_append.h index 8dd8dcb1..8d24bf20 100644 --- a/src/runtime_merge_append.h +++ b/src/include/runtime_merge_append.h @@ -13,12 +13,16 @@ #ifndef RUNTIME_MERGE_APPEND_H #define RUNTIME_MERGE_APPEND_H -#include "runtimeappend.h" + +#include "runtime_append.h" #include "pathman.h" #include "postgres.h" +#define RUNTIME_MERGE_APPEND_NODE_NAME "RuntimeMergeAppend" + + typedef struct { RuntimeAppendPath rpath; @@ -53,29 +57,30 @@ extern CustomExecMethods runtime_merge_append_exec_methods; void init_runtime_merge_append_static_data(void); -Path * create_runtimemergeappend_path(PlannerInfo *root, - AppendPath *inner_append, - ParamPathInfo *param_info, - double sel); +Path * create_runtime_merge_append_path(PlannerInfo *root, + AppendPath *inner_append, + ParamPathInfo *param_info, + double sel); + +Plan * create_runtime_merge_append_plan(PlannerInfo *root, RelOptInfo *rel, + CustomPath *best_path, List *tlist, + List *clauses, List *custom_plans); -Plan * create_runtimemergeappend_plan(PlannerInfo *root, RelOptInfo *rel, - CustomPath *best_path, List *tlist, - List *clauses, List *custom_plans); +Node * runtime_merge_append_create_scan_state(CustomScan *node); -Node * runtimemergeappend_create_scan_state(CustomScan *node); +void runtime_merge_append_begin(CustomScanState *node, + EState *estate, + int eflags); -void runtimemergeappend_begin(CustomScanState *node, - EState *estate, - int eflags); +TupleTableSlot * runtime_merge_append_exec(CustomScanState *node); -TupleTableSlot * runtimemergeappend_exec(CustomScanState *node); +void runtime_merge_append_end(CustomScanState *node); -void runtimemergeappend_end(CustomScanState *node); +void runtime_merge_append_rescan(CustomScanState *node); -void runtimemergeappend_rescan(CustomScanState *node); +void runtime_merge_append_explain(CustomScanState *node, + List *ancestors, + ExplainState *es); -void runtimemergeappend_explain(CustomScanState *node, - List *ancestors, - ExplainState *es); -#endif +#endif /* RUNTIME_MERGE_APPEND_H */ diff --git a/src/include/utility_stmt_hooking.h b/src/include/utility_stmt_hooking.h new file mode 100644 index 00000000..cc22efaf --- /dev/null +++ b/src/include/utility_stmt_hooking.h @@ -0,0 +1,41 @@ +/* ------------------------------------------------------------------------ + * + * utility_stmt_hooking.h + * Override COPY TO/FROM and ALTER TABLE ... RENAME statements + * for partitioned tables + * + * Copyright (c) 2016, Postgres Professional + * + * ------------------------------------------------------------------------ + */ + +#ifndef COPY_STMT_HOOKING_H +#define COPY_STMT_HOOKING_H + + +#include "relation_info.h" + +#include "postgres.h" +#include "commands/copy.h" +#include "nodes/nodes.h" + + +/* Various traits */ +bool is_pathman_related_copy(Node *parsetree); +bool is_pathman_related_table_rename(Node *parsetree, + Oid *relation_oid_out, + bool *is_parent_out); +bool is_pathman_related_alter_column_type(Node *parsetree, + Oid *parent_relid_out, + AttrNumber *attr_number, + PartType *part_type_out); + +/* Statement handlers */ +void PathmanDoCopy(const CopyStmt *stmt, const char *queryString, + int stmt_location, int stmt_len, uint64 *processed); + +void PathmanRenameConstraint(Oid partition_relid, const RenameStmt *rename_stmt); +void PathmanRenameSequence(Oid parent_relid, const RenameStmt *rename_stmt); + + +#endif /* COPY_STMT_HOOKING_H */ diff --git a/src/include/utils.h b/src/include/utils.h new file mode 100644 index 00000000..566c04db --- /dev/null +++ b/src/include/utils.h @@ -0,0 +1,89 @@ +/* ------------------------------------------------------------------------ + * + * utils.h + * prototypes of various support functions + * + * Copyright (c) 2016, Postgres Professional + * + * ------------------------------------------------------------------------ + */ + +#ifndef PATHMAN_UTILS_H +#define PATHMAN_UTILS_H + + +#include "postgres.h" +#include "parser/parse_oper.h" +#include "fmgr.h" + + +/* + * Various traits. + */ +bool clause_contains_params(Node *clause); +bool is_date_type_internal(Oid typid); +bool check_security_policy_internal(Oid relid, Oid role); +bool match_expr_to_operand(const Node *expr, const Node *operand); + +/* + * Misc. + */ +List *list_reverse(List *l); + +/* + * Dynamic arrays. + */ + +#define ARRAY_EXP 2 + +#define ArrayAlloc(array, alloced, used, size) \ + do { \ + (array) = palloc((size) * sizeof(*(array))); \ + (alloced) = (size); \ + (used) = 0; \ + } while (0) + +#define ArrayPush(array, alloced, used, value) \ + do { \ + if ((alloced) <= (used)) \ + { \ + (alloced) = (alloced) * ARRAY_EXP + 1; \ + (array) = repalloc((array), (alloced) * sizeof(*(array))); \ + } \ + \ + (array)[(used)] = (value); \ + \ + (used)++; \ + } while (0) + +/* + * Useful functions for relations. + */ +Oid get_rel_owner(Oid relid); +char *get_rel_name_or_relid(Oid relid); +char *get_qualified_rel_name(Oid relid); +RangeVar *makeRangeVarFromRelid(Oid relid); + +/* + * Operator-related stuff. + */ +Operator get_binary_operator(char *opname, Oid arg1, Oid arg2); +void fill_type_cmp_fmgr_info(FmgrInfo *finfo, Oid type1, Oid type2); +void extract_op_func_and_ret_type(char *opname, + Oid type1, Oid type2, + Oid *op_func, + Oid *op_ret_type); + +/* + * Print values and cast types. + */ +char *datum_to_cstring(Datum datum, Oid typid); +Datum perform_type_cast(Datum value, Oid in_type, Oid out_type, bool *success); +Datum extract_binary_interval_from_text(Datum interval_text, + Oid part_atttype, + Oid *interval_type); +char **deconstruct_text_array(Datum array, int *array_size); +RangeVar **qualified_relnames_to_rangevars(char **relnames, size_t nrelnames); +void check_relation_oid(Oid relid); + +#endif /* PATHMAN_UTILS_H */ diff --git a/src/xact_handling.h b/src/include/xact_handling.h similarity index 68% rename from src/xact_handling.h rename to src/include/xact_handling.h index b5f8ed3c..fe9f976c 100644 --- a/src/xact_handling.h +++ b/src/include/xact_handling.h @@ -11,6 +11,7 @@ #ifndef XACT_HANDLING_H #define XACT_HANDLING_H + #include "pathman.h" #include "postgres.h" @@ -19,11 +20,7 @@ /* * Transaction locks. */ -bool xact_lock_partitioned_rel(Oid relid, bool nowait); -void xact_unlock_partitioned_rel(Oid relid); - -bool xact_lock_rel_exclusive(Oid relid, bool nowait); -void xact_unlock_rel_exclusive(Oid relid); +LockAcquireResult xact_lock_rel(Oid relid, LOCKMODE lockmode, bool nowait); /* * Utility checks. @@ -31,6 +28,9 @@ void xact_unlock_rel_exclusive(Oid relid); bool xact_bgw_conflicting_lock_exists(Oid relid); bool xact_is_level_read_committed(void); bool xact_is_transaction_stmt(Node *stmt); -bool xact_is_set_transaction_stmt(Node *stmt); +bool xact_is_set_stmt(Node *stmt, const char *name); +bool xact_is_alter_pathman_stmt(Node *stmt); +bool xact_object_is_visible(TransactionId obj_xmin); + -#endif +#endif /* XACT_HANDLING_H */ diff --git a/src/init.c b/src/init.c index 60eff1ad..1907d9dc 100644 --- a/src/init.c +++ b/src/init.c @@ -3,7 +3,7 @@ * init.c * Initialization functions * - * Copyright (c) 2015-2016, Postgres Professional + * Copyright (c) 2015-2020, Postgres Professional * * Portions Copyright (c) 1996-2015, PostgreSQL Global Development Group * Portions Copyright (c) 1994, Regents of the University of California @@ -11,6 +11,8 @@ * ------------------------------------------------------------------------ */ +#include "compat/pg_compat.h" + #include "hooks.h" #include "init.h" #include "pathman.h" @@ -19,15 +21,21 @@ #include "utils.h" #include "access/htup_details.h" +#include "access/heapam.h" +#include "access/genam.h" #include "access/sysattr.h" +#if PG_VERSION_NUM >= 120000 +#include "access/table.h" +#endif #include "catalog/indexing.h" -#include "catalog/pg_constraint.h" +#include "catalog/pg_extension.h" #include "catalog/pg_inherits.h" -#include "catalog/pg_inherits_fn.h" #include "catalog/pg_type.h" #include "miscadmin.h" +#if PG_VERSION_NUM >= 120000 +#include "nodes/nodeFuncs.h" +#endif #include "optimizer/clauses.h" -#include "utils/datum.h" #include "utils/inval.h" #include "utils/builtins.h" #include "utils/fmgroids.h" @@ -37,94 +45,150 @@ #include "utils/syscache.h" #include "utils/typcache.h" +#if PG_VERSION_NUM < 110000 +#include "catalog/pg_inherits_fn.h" +#endif -/* Help user in case of emergency */ -#define INIT_ERROR_HINT "pg_pathman will be disabled to allow you to resolve this issue" +#include -/* Initial size of 'partitioned_rels' table */ -#define PART_RELS_SIZE 10 -#define CHILD_FACTOR 500 +/* Various memory contexts for caches */ +MemoryContext TopPathmanContext = NULL; +MemoryContext PathmanParentsCacheContext = NULL; +MemoryContext PathmanStatusCacheContext = NULL; +MemoryContext PathmanBoundsCacheContext = NULL; -/* Storage for PartRelationInfos */ -HTAB *partitioned_rels = NULL; /* Storage for PartParentInfos */ -HTAB *parent_cache = NULL; +HTAB *parents_cache = NULL; + +/* Storage for PartStatusInfos */ +HTAB *status_cache = NULL; + +/* Storage for PartBoundInfos */ +HTAB *bounds_cache = NULL; /* pg_pathman's init status */ -PathmanInitState pg_pathman_init_state; +PathmanInitState pathman_init_state; + +/* pg_pathman's hooks state */ +bool pathman_hooks_enabled = true; -/* Shall we install new relcache callback? */ -static bool relcache_callback_needed = true; /* Functions for various local caches */ static bool init_pathman_relation_oids(void); static void fini_pathman_relation_oids(void); static void init_local_cache(void); static void fini_local_cache(void); -static void read_pathman_config(void); -static Expr *get_partition_constraint_expr(Oid partition, AttrNumber part_attno); +static bool validate_range_opexpr(const Expr *expr, + const PartRelationInfo *prel, + const TypeCacheEntry *tce, + Datum *lower, Datum *upper, + bool *lower_null, bool *upper_null); + +static bool read_opexpr_const(const OpExpr *opexpr, + const PartRelationInfo *prel, + Datum *value); -static int cmp_range_entries(const void *p1, const void *p2, void *arg); -static bool validate_range_constraint(const Expr *expr, - const PartRelationInfo *prel, - Datum *min, - Datum *max); +/* Validate SQL facade */ +static uint32 build_semver_uint32(char *version_cstr); +static uint32 get_plpgsql_frontend_version(void); +static void validate_plpgsql_frontend_version(uint32 current_ver, + uint32 compatible_ver); -static bool validate_hash_constraint(const Expr *expr, - const PartRelationInfo *prel, - uint32 *part_hash); -static bool read_opexpr_const(const OpExpr *opexpr, - const PartRelationInfo *prel, - Datum *val); +/* + * Safe hash search (takes care of disabled pg_pathman). + */ +void * +pathman_cache_search_relid(HTAB *cache_table, + Oid relid, + HASHACTION action, + bool *found) +{ + /* Table is NULL, take some actions */ + if (cache_table == NULL) + switch (action) + { + case HASH_FIND: + case HASH_ENTER: + case HASH_REMOVE: + elog(ERROR, "pg_pathman is not initialized yet"); + break; -static int oid_cmp(const void *p1, const void *p2); + /* Something strange has just happened */ + default: + elog(ERROR, "unexpected action in function " + CppAsString(pathman_cache_search_relid)); + break; + } + /* Everything is fine */ + return hash_search(cache_table, (const void *) &relid, action, found); +} /* * Save and restore main init state. */ void -save_pathman_init_state(PathmanInitState *temp_init_state) +save_pathman_init_state(volatile PathmanInitState *temp_init_state) { - *temp_init_state = pg_pathman_init_state; + *temp_init_state = pathman_init_state; } void -restore_pathman_init_state(const PathmanInitState *temp_init_state) +restore_pathman_init_state(const volatile PathmanInitState *temp_init_state) { - pg_pathman_init_state = *temp_init_state; + /* + * initialization_needed is not restored: it is not just a setting but + * internal thing, caches must be inited when it is set. Better would be + * to separate it from this struct entirely. + */ + pathman_init_state.pg_pathman_enable = temp_init_state->pg_pathman_enable; + pathman_init_state.auto_partition = temp_init_state->auto_partition; + pathman_init_state.override_copy = temp_init_state->override_copy; } /* - * Create main GUC. + * Create main GUCs. */ void -init_main_pathman_toggle(void) +init_main_pathman_toggles(void) { /* Main toggle, load_config() will enable it */ - DefineCustomBoolVariable("pg_pathman.enable", - "Enables pg_pathman's optimizations during the planner stage", + DefineCustomBoolVariable(PATHMAN_ENABLE, + "Enables pg_pathman's optimizations during planning stage", + NULL, + &pathman_init_state.pg_pathman_enable, + DEFAULT_PATHMAN_ENABLE, + PGC_SUSET, + 0, + pathman_enable_check_hook, + pathman_enable_assign_hook, + NULL); + + /* Global toggle for automatic partition creation */ + DefineCustomBoolVariable(PATHMAN_ENABLE_AUTO_PARTITION, + "Enables automatic partition creation", NULL, - &pg_pathman_init_state.pg_pathman_enable, - true, - PGC_USERSET, + &pathman_init_state.auto_partition, + DEFAULT_PATHMAN_AUTO, + PGC_SUSET, 0, NULL, - pg_pathman_enable_assign_hook, + NULL, NULL); - DefineCustomBoolVariable("pg_pathman.enable_auto_partition", - "Enables auto partition propagation", + /* Global toggle for COPY stmt handling */ + DefineCustomBoolVariable(PATHMAN_OVERRIDE_COPY, + "Override COPY statement handling", NULL, - &pg_pathman_init_state.auto_partition, - true, - PGC_USERSET, + &pathman_init_state.override_copy, + DEFAULT_PATHMAN_OVERRIDE_COPY, + PGC_SUSET, 0, NULL, NULL, @@ -138,6 +202,8 @@ init_main_pathman_toggle(void) bool load_config(void) { + static bool relcache_callback_needed = true; + /* * Try to cache important relids. * @@ -150,8 +216,12 @@ load_config(void) if (!init_pathman_relation_oids()) return false; /* remain 'uninitialized', exit before creating main caches */ - init_local_cache(); /* create 'partitioned_rels' hash table */ - read_pathman_config(); /* read PATHMAN_CONFIG table & fill cache */ + /* Validate pg_pathman's Pl/PgSQL facade (might be outdated) */ + validate_plpgsql_frontend_version(get_plpgsql_frontend_version(), + build_semver_uint32(LOWEST_COMPATIBLE_FRONT)); + + /* Create various hash tables (caches) */ + init_local_cache(); /* Register pathman_relcache_hook(), currently we can't unregister it */ if (relcache_callback_needed) @@ -161,7 +231,7 @@ load_config(void) } /* Mark pg_pathman as initialized */ - pg_pathman_init_state.initialization_needed = false; + pathman_init_state.initialization_needed = false; elog(DEBUG2, "pg_pathman's config has been loaded successfully [%u]", MyProcPid); @@ -181,7 +251,7 @@ unload_config(void) fini_local_cache(); /* Mark pg_pathman as uninitialized */ - pg_pathman_init_state.initialization_needed = true; + pathman_init_state.initialization_needed = true; elog(DEBUG2, "pg_pathman's config has been unloaded successfully [%u]", MyProcPid); } @@ -192,9 +262,7 @@ unload_config(void) Size estimate_pathman_shmem_size(void) { - return estimate_dsm_config_size() + - estimate_concurrent_part_task_slots_size() + - MAXALIGN(sizeof(PathmanState)); + return estimate_concurrent_part_task_slots_size(); } /* @@ -205,7 +273,8 @@ static bool init_pathman_relation_oids(void) { Oid schema = get_pathman_schema(); - Assert(schema != InvalidOid); + if (schema == InvalidOid) + return false; /* extension can be dropped by another backend */ /* Cache PATHMAN_CONFIG relation's Oid */ pathman_config_relid = get_relname_relid(PATHMAN_CONFIG, schema); @@ -244,22 +313,81 @@ init_local_cache(void) { HASHCTL ctl; + /* Destroy caches, just in case */ + hash_destroy(parents_cache); + hash_destroy(status_cache); + hash_destroy(bounds_cache); + + /* Reset pg_pathman's memory contexts */ + if (TopPathmanContext) + { + /* Check that child contexts exist */ + Assert(MemoryContextIsValid(PathmanParentsCacheContext)); + Assert(MemoryContextIsValid(PathmanStatusCacheContext)); + Assert(MemoryContextIsValid(PathmanBoundsCacheContext)); + + /* Clear children */ + MemoryContextReset(PathmanParentsCacheContext); + MemoryContextReset(PathmanStatusCacheContext); + MemoryContextReset(PathmanBoundsCacheContext); + } + /* Initialize pg_pathman's memory contexts */ + else + { + Assert(PathmanParentsCacheContext == NULL); + Assert(PathmanStatusCacheContext == NULL); + Assert(PathmanBoundsCacheContext == NULL); + + TopPathmanContext = + AllocSetContextCreate(TopMemoryContext, + PATHMAN_TOP_CONTEXT, + ALLOCSET_DEFAULT_SIZES); + + /* For PartParentInfo */ + PathmanParentsCacheContext = + AllocSetContextCreate(TopPathmanContext, + PATHMAN_PARENTS_CACHE, + ALLOCSET_SMALL_SIZES); + + /* For PartStatusInfo */ + PathmanStatusCacheContext = + AllocSetContextCreate(TopPathmanContext, + PATHMAN_STATUS_CACHE, + ALLOCSET_SMALL_SIZES); + + /* For PartBoundInfo */ + PathmanBoundsCacheContext = + AllocSetContextCreate(TopPathmanContext, + PATHMAN_BOUNDS_CACHE, + ALLOCSET_SMALL_SIZES); + } + memset(&ctl, 0, sizeof(ctl)); ctl.keysize = sizeof(Oid); - ctl.entrysize = sizeof(PartRelationInfo); - ctl.hcxt = TopMemoryContext; /* place data to persistent mcxt */ + ctl.entrysize = sizeof(PartParentInfo); + ctl.hcxt = PathmanParentsCacheContext; - partitioned_rels = hash_create("pg_pathman's partitioned relations cache", - PART_RELS_SIZE, &ctl, HASH_ELEM | HASH_BLOBS); + parents_cache = hash_create(PATHMAN_PARENTS_CACHE, + PART_RELS_SIZE, &ctl, + HASH_ELEM | HASH_BLOBS | HASH_CONTEXT); memset(&ctl, 0, sizeof(ctl)); ctl.keysize = sizeof(Oid); - ctl.entrysize = sizeof(PartParentInfo); - ctl.hcxt = TopMemoryContext; /* place data to persistent mcxt */ + ctl.entrysize = sizeof(PartStatusInfo); + ctl.hcxt = PathmanStatusCacheContext; - parent_cache = hash_create("pg_pathman's partition parents cache", - PART_RELS_SIZE * CHILD_FACTOR, - &ctl, HASH_ELEM | HASH_BLOBS); + status_cache = hash_create(PATHMAN_STATUS_CACHE, + PART_RELS_SIZE * CHILD_FACTOR, &ctl, + HASH_ELEM | HASH_BLOBS | HASH_CONTEXT); + + memset(&ctl, 0, sizeof(ctl)); + ctl.keysize = sizeof(Oid); + ctl.entrysize = sizeof(PartBoundInfo); + ctl.hcxt = PathmanBoundsCacheContext; + + bounds_cache = hash_create(PATHMAN_BOUNDS_CACHE, + PART_RELS_SIZE * CHILD_FACTOR, &ctl, + HASH_ELEM | HASH_BLOBS | HASH_CONTEXT); } /* @@ -268,179 +396,36 @@ init_local_cache(void) static void fini_local_cache(void) { - HASH_SEQ_STATUS status; - PartRelationInfo *prel; - - hash_seq_init(&status, partitioned_rels); - while((prel = (PartRelationInfo *) hash_seq_search(&status)) != NULL) - { - if (PrelIsValid(prel)) - { - FreeChildrenArray(prel); - FreeRangesArray(prel); - } - } - - /* Now we can safely destroy hash tables */ - hash_destroy(partitioned_rels); - hash_destroy(parent_cache); - partitioned_rels = NULL; - parent_cache = NULL; -} + /* First, destroy hash tables */ + hash_destroy(parents_cache); + hash_destroy(status_cache); + hash_destroy(bounds_cache); -/* - * Initializes pg_pathman's global state (PathmanState) & locks. - */ -void -init_shmem_config(void) -{ - bool found; + parents_cache = NULL; + status_cache = NULL; + bounds_cache = NULL; - /* Check if module was initialized in postmaster */ - pmstate = ShmemInitStruct("pg_pathman's global state", - sizeof(PathmanState), &found); - if (!found) + if (prel_resowner != NULL) { - /* - * Initialize locks in postmaster - */ - if (!IsUnderPostmaster) - { - /* NOTE: dsm_array is redundant, hence the commented code */ - /* pmstate->dsm_init_lock = LWLockAssign(); */ - } + hash_destroy(prel_resowner); + prel_resowner = NULL; } - /* Allocate some space for concurrent part slots */ - init_concurrent_part_task_slots(); -} - -/* - * Fill PartRelationInfo with partition-related info. - */ -void -fill_prel_with_partitions(const Oid *partitions, - const uint32 parts_count, - PartRelationInfo *prel) -{ - uint32 i; - Expr *con_expr; - MemoryContext mcxt = TopMemoryContext; - - /* Allocate memory for 'prel->children' & 'prel->ranges' (if needed) */ - prel->children = MemoryContextAllocZero(mcxt, parts_count * sizeof(Oid)); - if (prel->parttype == PT_RANGE) - prel->ranges = MemoryContextAllocZero(mcxt, parts_count * sizeof(RangeEntry)); - prel->children_count = parts_count; - - for (i = 0; i < PrelChildrenCount(prel); i++) + /* Now we can clear allocations */ + if (TopPathmanContext) { - con_expr = get_partition_constraint_expr(partitions[i], prel->attnum); - - /* Perform a partitioning_type-dependent task */ - switch (prel->parttype) - { - case PT_HASH: - { - uint32 hash; /* hash value < parts_count */ - - if (validate_hash_constraint(con_expr, prel, &hash)) - prel->children[hash] = partitions[i]; - else - { - DisablePathman(); /* disable pg_pathman since config is broken */ - ereport(ERROR, - (errmsg("Wrong constraint format for HASH partition \"%s\"", - get_rel_name_or_relid(partitions[i])), - errhint(INIT_ERROR_HINT))); - } - } - break; - - case PT_RANGE: - { - Datum range_min, range_max; - - if (validate_range_constraint(con_expr, prel, - &range_min, &range_max)) - { - prel->ranges[i].child_oid = partitions[i]; - prel->ranges[i].min = range_min; - prel->ranges[i].max = range_max; - } - else - { - DisablePathman(); /* disable pg_pathman since config is broken */ - ereport(ERROR, - (errmsg("Wrong constraint format for RANGE partition \"%s\"", - get_rel_name_or_relid(partitions[i])), - errhint(INIT_ERROR_HINT))); - } - } - break; - - default: - { - DisablePathman(); /* disable pg_pathman since config is broken */ - ereport(ERROR, - (errmsg("Unknown partitioning type for relation \"%s\"", - get_rel_name_or_relid(PrelParentRelid(prel))), - errhint(INIT_ERROR_HINT))); - } - } + MemoryContextReset(PathmanParentsCacheContext); + MemoryContextReset(PathmanStatusCacheContext); + MemoryContextReset(PathmanBoundsCacheContext); } - - /* Finalize 'prel' for a RANGE-partitioned table */ - if (prel->parttype == PT_RANGE) - { - MemoryContext old_mcxt; - - /* Sort partitions by RangeEntry->min asc */ - qsort_arg((void *) prel->ranges, PrelChildrenCount(prel), - sizeof(RangeEntry), cmp_range_entries, - (void *) &prel->cmp_proc); - - /* Initialize 'prel->children' array */ - for (i = 0; i < PrelChildrenCount(prel); i++) - prel->children[i] = prel->ranges[i].child_oid; - - /* Copy all min & max Datums to the persistent mcxt */ - old_mcxt = MemoryContextSwitchTo(TopMemoryContext); - for (i = 0; i < PrelChildrenCount(prel); i++) - { - prel->ranges[i].max = datumCopy(prel->ranges[i].max, - prel->attbyval, - prel->attlen); - - prel->ranges[i].min = datumCopy(prel->ranges[i].min, - prel->attbyval, - prel->attlen); - } - MemoryContextSwitchTo(old_mcxt); - - } - -#ifdef USE_ASSERT_CHECKING - /* Check that each partition Oid has been assigned properly */ - if (prel->parttype == PT_HASH) - for (i = 0; i < PrelChildrenCount(prel); i++) - { - if (prel->children[i] == InvalidOid) - { - DisablePathman(); /* disable pg_pathman since config is broken */ - elog(ERROR, "pg_pathman's cache for relation \"%s\" " - "has not been properly initialized", - get_rel_name_or_relid(PrelParentRelid(prel))); - } - } -#endif } + /* * find_inheritance_children * * Returns an array containing the OIDs of all relations which - * inherit *directly* from the relation with OID 'parentrelId'. + * inherit *directly* from the relation with OID 'parent_relid'. * * The specified lock type is acquired on each child relation (but not on the * given rel; caller should already have locked it). If lockmode is NoLock @@ -449,60 +434,64 @@ fill_prel_with_partitions(const Oid *partitions, * * borrowed from pg_inherits.c */ -Oid * -find_inheritance_children_array(Oid parentrelId, LOCKMODE lockmode, uint32 *size) +find_children_status +find_inheritance_children_array(Oid parent_relid, + LOCKMODE lockmode, + bool nowait, + uint32 *children_size, /* ret value #1 */ + Oid **children) /* ret value #2 */ { Relation relation; SysScanDesc scan; ScanKeyData key[1]; HeapTuple inheritsTuple; - Oid inhrelid; + Oid *oidarr; uint32 maxoids, - numoids, - i; + numoids; + + Oid *result = NULL; + uint32 nresult = 0; + + uint32 i; + + /* Init safe return values */ + *children_size = 0; + *children = NULL; /* - * Can skip the scan if pg_class shows the relation has never had a - * subclass. + * Can skip the scan if pg_class shows the + * relation has never had a subclass. */ - if (!has_subclass(parentrelId)) - { - *size = 0; - return NULL; - } + if (!has_subclass(parent_relid)) + return FCS_NO_CHILDREN; /* * Scan pg_inherits and build a working array of subclass OIDs. */ - maxoids = 32; - oidarr = (Oid *) palloc(maxoids * sizeof(Oid)); - numoids = 0; + ArrayAlloc(oidarr, maxoids, numoids, 32); - relation = heap_open(InheritsRelationId, AccessShareLock); + relation = heap_open_compat(InheritsRelationId, AccessShareLock); ScanKeyInit(&key[0], Anum_pg_inherits_inhparent, BTEqualStrategyNumber, F_OIDEQ, - ObjectIdGetDatum(parentrelId)); + ObjectIdGetDatum(parent_relid)); scan = systable_beginscan(relation, InheritsParentIndexId, true, NULL, 1, key); while ((inheritsTuple = systable_getnext(scan)) != NULL) { + Oid inhrelid; + inhrelid = ((Form_pg_inherits) GETSTRUCT(inheritsTuple))->inhrelid; - if (numoids >= maxoids) - { - maxoids *= 2; - oidarr = (Oid *) repalloc(oidarr, maxoids * sizeof(Oid)); - } - oidarr[numoids++] = inhrelid; + ArrayPush(oidarr, maxoids, numoids, inhrelid); } systable_endscan(scan); - heap_close(relation, AccessShareLock); + heap_close_compat(relation, AccessShareLock); /* * If we found more than one child, sort them by OID. This ensures @@ -513,17 +502,31 @@ find_inheritance_children_array(Oid parentrelId, LOCKMODE lockmode, uint32 *size if (numoids > 1) qsort(oidarr, numoids, sizeof(Oid), oid_cmp); - /* - * Acquire locks and build the result list. - */ + /* Acquire locks and build the result list */ for (i = 0; i < numoids; i++) { - inhrelid = oidarr[i]; + Oid inhrelid = oidarr[i]; if (lockmode != NoLock) { /* Get the lock to synchronize against concurrent drop */ - LockRelationOid(inhrelid, lockmode); + if (nowait) + { + if (!ConditionalLockRelationOid(inhrelid, lockmode)) + { + uint32 j; + + /* Unlock all previously locked children */ + for (j = 0; j < i; j++) + UnlockRelationOid(oidarr[j], lockmode); + + pfree(oidarr); + + /* We couldn't lock this child, retreat! */ + return FCS_COULD_NOT_LOCK; + } + } + else LockRelationOid(inhrelid, lockmode); /* * Now that we have the lock, double-check to see if the relation @@ -534,38 +537,114 @@ find_inheritance_children_array(Oid parentrelId, LOCKMODE lockmode, uint32 *size { /* Release useless lock */ UnlockRelationOid(inhrelid, lockmode); + /* And ignore this relation */ continue; } } + + /* Alloc array if it's the first time */ + if (nresult == 0) + result = palloc(numoids * sizeof(Oid)); + + /* Save Oid of the existing relation */ + result[nresult++] = inhrelid; } - *size = numoids; - return oidarr; + /* Set return values */ + *children_size = nresult; + *children = result; + + pfree(oidarr); + + /* Do we have children? */ + return nresult > 0 ? FCS_FOUND : FCS_NO_CHILDREN; } + + /* * Generate check constraint name for a partition. - * - * This function does not perform sanity checks at all. + * NOTE: this function does not perform sanity checks at all. + */ +char * +build_check_constraint_name_relid_internal(Oid relid) +{ + Assert(OidIsValid(relid)); + return build_check_constraint_name_relname_internal(get_rel_name(relid)); +} + +/* + * Generate check constraint name for a partition. + * NOTE: this function does not perform sanity checks at all. + */ +char * +build_check_constraint_name_relname_internal(const char *relname) +{ + Assert(relname != NULL); + return psprintf("pathman_%s_check", relname); +} + +/* + * Generate part sequence name for a parent. + * NOTE: this function does not perform sanity checks at all. */ char * -build_check_constraint_name_internal(Oid relid, AttrNumber attno) +build_sequence_name_relid_internal(Oid relid) { - return psprintf("pathman_%s_%u_check", get_rel_name(relid), attno); + Assert(OidIsValid(relid)); + return build_sequence_name_relname_internal(get_rel_name(relid)); } +/* + * Generate part sequence name for a parent. + * NOTE: this function does not perform sanity checks at all. + */ +char * +build_sequence_name_relname_internal(const char *relname) +{ + Assert(relname != NULL); + return psprintf("%s_seq", relname); +} + +/* + * Generate name for update trigger. + * NOTE: this function does not perform sanity checks at all. + */ +char * +build_update_trigger_name_internal(Oid relid) +{ + Assert(OidIsValid(relid)); + return psprintf("%s_upd_trig", get_rel_name(relid)); +} + +/* + * Generate name for update trigger's function. + * NOTE: this function does not perform sanity checks at all. + */ +char * +build_update_trigger_func_name_internal(Oid relid) +{ + Assert(OidIsValid(relid)); + return psprintf("%s_upd_trig_func", get_rel_name(relid)); +} + + + /* * Check that relation 'relid' is partitioned by pg_pathman. - * - * Extract tuple into 'values' and 'isnull' if they're provided. + * Extract tuple into 'values', 'isnull', 'xmin', 'iptr' if they're provided. */ bool pathman_config_contains_relation(Oid relid, Datum *values, bool *isnull, - TransactionId *xmin) + TransactionId *xmin, ItemPointerData* iptr) { Relation rel; +#if PG_VERSION_NUM >= 120000 + TableScanDesc scan; +#else HeapScanDesc scan; +#endif ScanKeyData key[1]; Snapshot snapshot; HeapTuple htup; @@ -577,18 +656,21 @@ pathman_config_contains_relation(Oid relid, Datum *values, bool *isnull, ObjectIdGetDatum(relid)); /* Open PATHMAN_CONFIG with latest snapshot available */ - rel = heap_open(get_pathman_config_relid(), AccessShareLock); + rel = heap_open_compat(get_pathman_config_relid(false), AccessShareLock); - /* Check that 'partrel' column is if regclass type */ - Assert(RelationGetDescr(rel)-> - attrs[Anum_pathman_config_partrel - 1]-> - atttypid == REGCLASSOID); + /* Check that 'partrel' column is of regclass type */ + Assert(TupleDescAttr(RelationGetDescr(rel), + Anum_pathman_config_partrel - 1)->atttypid == REGCLASSOID); /* Check that number of columns == Natts_pathman_config */ Assert(RelationGetDescr(rel)->natts == Natts_pathman_config); snapshot = RegisterSnapshot(GetLatestSnapshot()); +#if PG_VERSION_NUM >= 120000 + scan = table_beginscan(rel, snapshot, 1, key); +#else scan = heap_beginscan(rel, snapshot, 1, key); +#endif while ((htup = heap_getnext(scan, ForwardScanDirection)) != NULL) { @@ -597,50 +679,52 @@ pathman_config_contains_relation(Oid relid, Datum *values, bool *isnull, /* Extract data if necessary */ if (values && isnull) { + htup = heap_copytuple(htup); heap_deform_tuple(htup, RelationGetDescr(rel), values, isnull); /* Perform checks for non-NULL columns */ Assert(!isnull[Anum_pathman_config_partrel - 1]); - Assert(!isnull[Anum_pathman_config_attname - 1]); + Assert(!isnull[Anum_pathman_config_expr - 1]); Assert(!isnull[Anum_pathman_config_parttype - 1]); } /* Set xmin if necessary */ if (xmin) - { - Datum value; - bool isnull; - - value = heap_getsysattr(htup, - MinTransactionIdAttributeNumber, - RelationGetDescr(rel), - &isnull); + *xmin = HeapTupleGetXminCompat(htup); - Assert(!isnull); - *xmin = DatumGetTransactionId(value); - } + /* Set ItemPointer if necessary */ + if (iptr) + *iptr = htup->t_self; /* FIXME: callers should lock table beforehand */ } /* Clean resources */ +#if PG_VERSION_NUM >= 120000 + table_endscan(scan); +#else heap_endscan(scan); +#endif UnregisterSnapshot(snapshot); - heap_close(rel, AccessShareLock); + heap_close_compat(rel, AccessShareLock); - elog(DEBUG2, "PATHMAN_CONFIG table %s relation %u", + elog(DEBUG2, "PATHMAN_CONFIG %s relation %u", (contains_rel ? "contains" : "doesn't contain"), relid); return contains_rel; } /* - * Loads additional pathman parameters like 'enable_parent' or 'auto' - * from PATHMAN_CONFIG_PARAMS + * Loads additional pathman parameters like 'enable_parent' + * or 'auto' from PATHMAN_CONFIG_PARAMS. */ bool read_pathman_params(Oid relid, Datum *values, bool *isnull) { Relation rel; +#if PG_VERSION_NUM >= 120000 + TableScanDesc scan; +#else HeapScanDesc scan; +#endif ScanKeyData key[1]; Snapshot snapshot; HeapTuple htup; @@ -651,271 +735,258 @@ read_pathman_params(Oid relid, Datum *values, bool *isnull) BTEqualStrategyNumber, F_OIDEQ, ObjectIdGetDatum(relid)); - rel = heap_open(get_pathman_config_params_relid(), AccessShareLock); + rel = heap_open_compat(get_pathman_config_params_relid(false), AccessShareLock); snapshot = RegisterSnapshot(GetLatestSnapshot()); +#if PG_VERSION_NUM >= 120000 + scan = table_beginscan(rel, snapshot, 1, key); +#else scan = heap_beginscan(rel, snapshot, 1, key); +#endif /* There should be just 1 row */ if ((htup = heap_getnext(scan, ForwardScanDirection)) != NULL) { /* Extract data if necessary */ + htup = heap_copytuple(htup); heap_deform_tuple(htup, RelationGetDescr(rel), values, isnull); row_found = true; + + /* Perform checks for non-NULL columns */ + Assert(!isnull[Anum_pathman_config_params_partrel - 1]); + Assert(!isnull[Anum_pathman_config_params_enable_parent - 1]); + Assert(!isnull[Anum_pathman_config_params_auto - 1]); + Assert(!isnull[Anum_pathman_config_params_spawn_using_bgw - 1]); } /* Clean resources */ +#if PG_VERSION_NUM >= 120000 + table_endscan(scan); +#else heap_endscan(scan); +#endif UnregisterSnapshot(snapshot); - heap_close(rel, AccessShareLock); + heap_close_compat(rel, AccessShareLock); return row_found; } + /* - * Go through the PATHMAN_CONFIG table and create PartRelationInfo entries. + * Validates range constraint. It MUST have one of the following formats: + * 1) EXPRESSION >= CONST AND EXPRESSION < CONST + * 2) EXPRESSION >= CONST + * 3) EXPRESSION < CONST + * + * Writes 'lower' & 'upper' and 'lower_null' & 'upper_null' values on success. */ -static void -read_pathman_config(void) +bool +validate_range_constraint(const Expr *expr, + const PartRelationInfo *prel, + Datum *lower, Datum *upper, + bool *lower_null, bool *upper_null) { - Relation rel; - HeapScanDesc scan; - Snapshot snapshot; - HeapTuple htup; + const TypeCacheEntry *tce; - /* Open PATHMAN_CONFIG with latest snapshot available */ - rel = heap_open(get_pathman_config_relid(), AccessShareLock); - - /* Check that 'partrel' column is if regclass type */ - Assert(RelationGetDescr(rel)-> - attrs[Anum_pathman_config_partrel - 1]-> - atttypid == REGCLASSOID); + if (!expr) + return false; - /* Check that number of columns == Natts_pathman_config */ - Assert(RelationGetDescr(rel)->natts == Natts_pathman_config); + /* Set default values */ + *lower_null = *upper_null = true; - snapshot = RegisterSnapshot(GetLatestSnapshot()); - scan = heap_beginscan(rel, snapshot, 0, NULL); + /* Find type cache entry for partitioned expression type */ + tce = lookup_type_cache(prel->ev_type, TYPECACHE_BTREE_OPFAMILY); - /* Examine each row and create a PartRelationInfo in local cache */ - while((htup = heap_getnext(scan, ForwardScanDirection)) != NULL) + /* Is it an AND clause? */ + if (is_andclause_compat((Node *) expr)) { - Datum values[Natts_pathman_config]; - bool isnull[Natts_pathman_config]; - Oid relid; /* partitioned table */ - PartType parttype; /* partitioning type */ - text *attname; /* partitioned column name */ - - /* Extract Datums from tuple 'htup' */ - heap_deform_tuple(htup, RelationGetDescr(rel), values, isnull); - - /* These attributes are marked as NOT NULL, check anyway */ - Assert(!isnull[Anum_pathman_config_partrel - 1]); - Assert(!isnull[Anum_pathman_config_parttype - 1]); - Assert(!isnull[Anum_pathman_config_attname - 1]); - - /* Extract values from Datums */ - relid = DatumGetObjectId(values[Anum_pathman_config_partrel - 1]); - parttype = DatumGetPartType(values[Anum_pathman_config_parttype - 1]); - attname = DatumGetTextP(values[Anum_pathman_config_attname - 1]); + const BoolExpr *boolexpr = (const BoolExpr *) expr; + ListCell *lc; - /* Check that relation 'relid' exists */ - if (get_rel_type_id(relid) == InvalidOid) + /* Walk through boolexpr's args */ + foreach (lc, boolexpr->args) { - DisablePathman(); /* disable pg_pathman since config is broken */ - ereport(ERROR, - (errmsg("Table \"%s\" contains nonexistent relation %u", - PATHMAN_CONFIG, relid), - errhint(INIT_ERROR_HINT))); - } - - /* Create or update PartRelationInfo for this partitioned table */ - refresh_pathman_relation_info(relid, parttype, text_to_cstring(attname)); - } - - /* Clean resources */ - heap_endscan(scan); - UnregisterSnapshot(snapshot); - heap_close(rel, AccessShareLock); -} - -/* - * Get constraint expression tree for a partition. - * - * build_check_constraint_name_internal() is used to build conname. - */ -static Expr * -get_partition_constraint_expr(Oid partition, AttrNumber part_attno) -{ - Oid conid; /* constraint Oid */ - char *conname; /* constraint name */ - HeapTuple con_tuple; - Datum conbin_datum; - bool conbin_isnull; - Expr *expr; /* expression tree for constraint */ - - conname = build_check_constraint_name_internal(partition, part_attno); - conid = get_relation_constraint_oid(partition, conname, true); - if (conid == InvalidOid) - { - DisablePathman(); /* disable pg_pathman since config is broken */ - ereport(ERROR, - (errmsg("constraint \"%s\" for partition \"%s\" does not exist", - conname, get_rel_name_or_relid(partition)), - errhint(INIT_ERROR_HINT))); - } + const OpExpr *opexpr = (const OpExpr *) lfirst(lc); - con_tuple = SearchSysCache1(CONSTROID, ObjectIdGetDatum(conid)); - conbin_datum = SysCacheGetAttr(CONSTROID, con_tuple, - Anum_pg_constraint_conbin, - &conbin_isnull); - if (conbin_isnull) - { - DisablePathman(); /* disable pg_pathman since config is broken */ - ereport(WARNING, - (errmsg("constraint \"%s\" for partition \"%s\" has NULL conbin", - conname, get_rel_name_or_relid(partition)), - errhint(INIT_ERROR_HINT))); - pfree(conname); + /* Exit immediately if something is wrong */ + if (!validate_range_opexpr((const Expr *) opexpr, prel, tce, + lower, upper, lower_null, upper_null)) + return false; + } - return NULL; /* could not parse */ + /* Everything seems to be fine */ + return true; } - pfree(conname); - - /* Finally we get a constraint expression tree */ - expr = (Expr *) stringToNode(TextDatumGetCString(conbin_datum)); - - /* Don't foreget to release syscache tuple */ - ReleaseSysCache(con_tuple); - - return expr; -} - -/* qsort comparison function for RangeEntries */ -static int -cmp_range_entries(const void *p1, const void *p2, void *arg) -{ - const RangeEntry *v1 = (const RangeEntry *) p1; - const RangeEntry *v2 = (const RangeEntry *) p2; - - Oid cmp_proc_oid = *(Oid *) arg; - return OidFunctionCall2(cmp_proc_oid, v1->min, v2->min); + /* It might be just an OpExpr clause */ + else return validate_range_opexpr(expr, prel, tce, + lower, upper, lower_null, upper_null); } /* - * Validates range constraint. It MUST have this exact format: - * - * VARIABLE >= CONST AND VARIABLE < CONST - * - * Writes 'min' & 'max' values on success. + * Validates a single expression of kind: + * 1) EXPRESSION >= CONST + * 2) EXPRESSION < CONST */ static bool -validate_range_constraint(const Expr *expr, - const PartRelationInfo *prel, - Datum *min, - Datum *max) +validate_range_opexpr(const Expr *expr, + const PartRelationInfo *prel, + const TypeCacheEntry *tce, + Datum *lower, Datum *upper, + bool *lower_null, bool *upper_null) { - const TypeCacheEntry *tce; - const BoolExpr *boolexpr = (const BoolExpr *) expr; - const OpExpr *opexpr; + const OpExpr *opexpr; + Datum val; if (!expr) return false; - /* it should be an AND operator on top */ - if (!and_clause((Node *) expr)) + /* Fail fast if it's not an OpExpr node */ + if (!IsA(expr, OpExpr)) return false; - tce = lookup_type_cache(prel->atttype, TYPECACHE_BTREE_OPFAMILY); + /* Perform cast */ + opexpr = (const OpExpr *) expr; - /* check that left operand is >= operator */ - opexpr = (OpExpr *) linitial(boolexpr->args); - if (BTGreaterEqualStrategyNumber == get_op_opfamily_strategy(opexpr->opno, - tce->btree_opf)) - { - if (!read_opexpr_const(opexpr, prel, min)) - return false; - } - else + /* Try reading Const value */ + if (!read_opexpr_const(opexpr, prel, &val)) return false; - /* check that right operand is < operator */ - opexpr = (OpExpr *) lsecond(boolexpr->args); - if (BTLessStrategyNumber == get_op_opfamily_strategy(opexpr->opno, - tce->btree_opf)) + /* Examine the strategy (expect '>=' OR '<') */ + switch (get_op_opfamily_strategy(opexpr->opno, tce->btree_opf)) { - if (!read_opexpr_const(opexpr, prel, max)) + case BTGreaterEqualStrategyNumber: + { + /* Bound already exists */ + if (*lower_null == false) + return false; + + *lower_null = false; + *lower = val; + + return true; + } + + case BTLessStrategyNumber: + { + /* Bound already exists */ + if (*upper_null == false) + return false; + + *upper_null = false; + *upper = val; + + return true; + } + + default: return false; } - else - return false; - - return true; } /* - * Reads const value from expressions of kind: VAR >= CONST or VAR < CONST + * Reads const value from expressions of kind: + * 1) EXPRESSION >= CONST + * 2) EXPRESSION < CONST */ static bool read_opexpr_const(const OpExpr *opexpr, const PartRelationInfo *prel, - Datum *val) + Datum *value) { - const Node *left; const Node *right; - const Const *constant; + const Const *boundary; + bool cast_success; + /* There should be exactly 2 args */ if (list_length(opexpr->args) != 2) return false; - left = linitial(opexpr->args); + /* Fetch args of expression */ right = lsecond(opexpr->args); - if (!IsA(left, Var) || !IsA(right, Const)) - return false; - if (((Var *) left)->varoattno != prel->attnum) - return false; - if (((Const *) right)->constisnull) - return false; + /* Examine RIGHT argument */ + switch (nodeTag(right)) + { + case T_FuncExpr: + { + FuncExpr *func_expr = (FuncExpr *) right; + Const *constant; - constant = (Const *) right; + /* This node should represent a type cast */ + if (func_expr->funcformat != COERCE_EXPLICIT_CAST && + func_expr->funcformat != COERCE_IMPLICIT_CAST) + return false; - /* Check that types match */ - if (prel->atttype != constant->consttype) + /* This node should have exactly 1 argument */ + if (list_length(func_expr->args) != 1) + return false; + + /* Extract single argument */ + constant = linitial(func_expr->args); + + /* Argument should be a Const */ + if (!IsA(constant, Const)) + return false; + + /* Update RIGHT */ + right = (Node *) constant; + } + /* FALLTHROUGH */ + + case T_Const: + { + boundary = (Const *) right; + + /* CONST is NOT NULL */ + if (boundary->constisnull) + return false; + } + break; + + default: + return false; + } + + /* Cast Const to a proper type if needed */ + *value = perform_type_cast(boundary->constvalue, + getBaseType(boundary->consttype), + getBaseType(prel->ev_type), + &cast_success); + + if (!cast_success) { - elog(WARNING, "Constant type in some check constraint does " - "not match the partitioned column's type"); + elog(WARNING, "constant type in some check constraint " + "does not match the partitioned column's type"); + return false; } - *val = constant->constvalue; - return true; } /* * Validate hash constraint. It MUST have this exact format: * - * get_hash_part_idx(TYPE_HASH_PROC(VALUE), PARTITIONS_COUNT) = CUR_PARTITION_HASH + * get_hash_part_idx(TYPE_HASH_PROC(VALUE), PARTITIONS_COUNT) = CUR_PARTITION_IDX * - * Writes 'part_hash' hash value for this partition on success. + * Writes 'part_idx' hash value for this partition on success. */ -static bool +bool validate_hash_constraint(const Expr *expr, const PartRelationInfo *prel, - uint32 *part_hash) + uint32 *part_idx) { const TypeCacheEntry *tce; const OpExpr *eq_expr; const FuncExpr *get_hash_expr, *type_hash_proc_expr; - const Var *var; /* partitioned column */ if (!expr) return false; if (!IsA(expr, OpExpr)) return false; + eq_expr = (const OpExpr *) expr; /* Check that left expression is a function call */ @@ -932,31 +1003,21 @@ validate_hash_constraint(const Expr *expr, if (list_length(get_hash_expr->args) == 2) { - Node *first = linitial(get_hash_expr->args); /* arg #1: TYPE_HASH_PROC(VALUE) */ + Node *first = linitial(get_hash_expr->args); /* arg #1: TYPE_HASH_PROC(EXPRESSION) */ Node *second = lsecond(get_hash_expr->args); /* arg #2: PARTITIONS_COUNT */ - Const *cur_partition_hash; /* hash value for this partition */ + Const *cur_partition_idx; /* hash value for this partition */ if (!IsA(first, FuncExpr) || !IsA(second, Const)) return false; type_hash_proc_expr = (FuncExpr *) first; - /* Check that function is indeed TYPE_HASH_PROC */ - if (type_hash_proc_expr->funcid != prel->hash_proc || - !(IsA(linitial(type_hash_proc_expr->args), Var) || - IsA(linitial(type_hash_proc_expr->args), RelabelType))) - { + /* Check that function is indeed TYPE_HASH_PROC() */ + if (type_hash_proc_expr->funcid != prel->hash_proc) return false; - } - /* Extract argument into 'var' */ - if (IsA(linitial(type_hash_proc_expr->args), RelabelType)) - var = (Var *) ((RelabelType *) linitial(type_hash_proc_expr->args))->arg; - else - var = (Var *) linitial(type_hash_proc_expr->args); - - /* Check that 'var' is the partitioning key attribute */ - if (var->varoattno != prel->attnum) + /* There should be exactly 1 argument */ + if (list_length(type_hash_proc_expr->args) != 1) return false; /* Check that PARTITIONS_COUNT is equal to total amount of partitions */ @@ -967,14 +1028,15 @@ validate_hash_constraint(const Expr *expr, if (!IsA(lsecond(eq_expr->args), Const)) return false; - cur_partition_hash = lsecond(eq_expr->args); + /* Fetch CUR_PARTITION_IDX */ + cur_partition_idx = lsecond(eq_expr->args); /* Check that CUR_PARTITION_HASH is NOT NULL */ - if (cur_partition_hash->constisnull) + if (cur_partition_idx->constisnull) return false; - *part_hash = DatumGetUInt32(cur_partition_hash->constvalue); - if (*part_hash >= PrelChildrenCount(prel)) + *part_idx = DatumGetUInt32(cur_partition_idx->constvalue); + if (*part_idx >= PrelChildrenCount(prel)) return false; return true; /* everything seems to be ok */ @@ -983,16 +1045,128 @@ validate_hash_constraint(const Expr *expr, return false; } -/* needed for find_inheritance_children_array() function */ -static int -oid_cmp(const void *p1, const void *p2) + +/* Parse cstring and build uint32 representing the version */ +static uint32 +build_semver_uint32(char *version_cstr) +{ + uint32 version = 0; + bool expect_num_token = false; + long max_dots = 2; + char *pos = version_cstr; + + while (*pos) + { + /* Invert expected token type */ + expect_num_token = !expect_num_token; + + if (expect_num_token) + { + char *end_pos; + long num; + long i; + + /* Parse number */ + num = strtol(pos, &end_pos, 10); + + if (pos == end_pos || num > 99 || num < 0) + goto version_error; + + for (i = 0; i < max_dots; i++) + num *= 100; + + version += num; + + /* Move position */ + pos = end_pos; + } + else + { + /* Expect to see less dots */ + max_dots--; + + if (*pos != '.' || max_dots < 0) + goto version_error; + + /* Move position */ + pos++; + } + } + + if (!expect_num_token) + goto version_error; + + return version; + +version_error: + DisablePathman(); /* disable pg_pathman since config is broken */ + ereport(ERROR, (errmsg("wrong version: \"%s\"", version_cstr), + errhint(INIT_ERROR_HINT))); + return 0; /* keep compiler happy */ +} + +/* Get version of pg_pathman's facade written in Pl/PgSQL */ +static uint32 +get_plpgsql_frontend_version(void) { - Oid v1 = *((const Oid *) p1); - Oid v2 = *((const Oid *) p2); - - if (v1 < v2) - return -1; - if (v1 > v2) - return 1; - return 0; + Relation pg_extension_rel; + ScanKeyData skey; + SysScanDesc scan; + HeapTuple htup; + + Datum datum; + bool isnull; + char *version_cstr; + + /* Look up the extension */ + pg_extension_rel = heap_open_compat(ExtensionRelationId, AccessShareLock); + + ScanKeyInit(&skey, + Anum_pg_extension_extname, + BTEqualStrategyNumber, F_NAMEEQ, + CStringGetDatum("pg_pathman")); + + scan = systable_beginscan(pg_extension_rel, + ExtensionNameIndexId, + true, NULL, 1, &skey); + + htup = systable_getnext(scan); + + /* Exit if pg_pathman's missing */ + if (!HeapTupleIsValid(htup)) + return 0; + + datum = heap_getattr(htup, Anum_pg_extension_extversion, + RelationGetDescr(pg_extension_rel), &isnull); + Assert(isnull == false); /* extversion should not be NULL */ + + /* Extract pg_pathman's version as cstring */ + version_cstr = text_to_cstring(DatumGetTextPP(datum)); + + systable_endscan(scan); + heap_close_compat(pg_extension_rel, AccessShareLock); + + return build_semver_uint32(version_cstr); +} + +/* Check that current Pl/PgSQL facade is compatible with internals */ +static void +validate_plpgsql_frontend_version(uint32 current_ver, uint32 compatible_ver) +{ + Assert(current_ver > 0); + Assert(compatible_ver > 0); + + /* Compare ver to 'lowest compatible frontend' version */ + if (current_ver < compatible_ver) + { + elog(DEBUG1, "current version: %x, lowest compatible: %x", + current_ver, compatible_ver); + + DisablePathman(); /* disable pg_pathman since config is broken */ + ereport(ERROR, + (errmsg("pg_pathman's Pl/PgSQL frontend is incompatible with " + "its shared library"), + errdetail("consider performing an update procedure"), + errhint(INIT_ERROR_HINT))); + } } diff --git a/src/init.h b/src/init.h deleted file mode 100644 index 9375976d..00000000 --- a/src/init.h +++ /dev/null @@ -1,119 +0,0 @@ -/* ------------------------------------------------------------------------ - * - * init.h - * Initialization functions - * - * Copyright (c) 2015-2016, Postgres Professional - * - * ------------------------------------------------------------------------ - */ - -#ifndef PATHMAN_INIT_H -#define PATHMAN_INIT_H - -#include "relation_info.h" - -#include "postgres.h" -#include "storage/lmgr.h" -#include "utils/guc.h" -#include "utils/hsearch.h" -#include "utils/snapshot.h" - - -/* - * pg_pathman's initialization state structure. - */ -typedef struct -{ - bool pg_pathman_enable; /* GUC variable implementation */ - bool auto_partition; /* GUC variable for auto partition propagation */ - bool initialization_needed; /* do we need to perform init? */ -} PathmanInitState; - - -extern HTAB *partitioned_rels; -extern HTAB *parent_cache; - -/* pg_pathman's initialization state */ -extern PathmanInitState pg_pathman_init_state; - - -/* - * Check if pg_pathman is initialized. - */ -#define IsPathmanInitialized() ( !pg_pathman_init_state.initialization_needed ) - -/* - * Check if pg_pathman is enabled. - */ -#define IsPathmanEnabled() ( pg_pathman_init_state.pg_pathman_enable ) - -/* - * Check if pg_pathman is initialized & enabled. - */ -#define IsPathmanReady() ( IsPathmanInitialized() && IsPathmanEnabled() ) - -/* - * Check if auto partition propagation enabled - */ -#define IsAutoPartitionEnabled() ( pg_pathman_init_state.auto_partition ) - -/* - * Enable/disable auto partition propagation. Note that this only works if - * partitioned relation supports this. See enable_auto() and disable_auto() - * functions. - */ -#define SetAutoPartitionEnabled(value) \ - do { \ - pg_pathman_init_state.auto_partition = value; \ - } while (0) - -/* - * Emergency disable mechanism. - */ -#define DisablePathman() \ - do { \ - pg_pathman_init_state.pg_pathman_enable = false; \ - pg_pathman_init_state.initialization_needed = true; \ - } while (0) - - -/* - * Save and restore PathmanInitState. - */ -void save_pathman_init_state(PathmanInitState *temp_init_state); -void restore_pathman_init_state(const PathmanInitState *temp_init_state); - -/* - * Create main GUC variable. - */ -void init_main_pathman_toggle(void); - -Size estimate_pathman_shmem_size(void); -void init_shmem_config(void); - -bool load_config(void); -void unload_config(void); - - -void fill_prel_with_partitions(const Oid *partitions, - const uint32 parts_count, - PartRelationInfo *prel); - -Oid *find_inheritance_children_array(Oid parentrelId, - LOCKMODE lockmode, - uint32 *size); - -char *build_check_constraint_name_internal(Oid relid, - AttrNumber attno); - -bool pathman_config_contains_relation(Oid relid, - Datum *values, - bool *isnull, - TransactionId *xmin); - -bool read_pathman_params(Oid relid, - Datum *values, - bool *isnull); - -#endif diff --git a/src/nodes_common.c b/src/nodes_common.c index d092f625..f4ebc6b1 100644 --- a/src/nodes_common.c +++ b/src/nodes_common.c @@ -3,22 +3,32 @@ * nodes_common.c * Common code for custom nodes * - * Copyright (c) 2016, Postgres Professional + * Copyright (c) 2016-2020, Postgres Professional * * ------------------------------------------------------------------------ */ +#include "compat/pg_compat.h" +#include "init.h" #include "nodes_common.h" -#include "runtimeappend.h" +#include "runtime_append.h" #include "utils.h" -#include "optimizer/restrictinfo.h" +#include "nodes/nodeFuncs.h" +#if PG_VERSION_NUM >= 120000 +#include "optimizer/optimizer.h" +#else +#include "optimizer/clauses.h" +#include "optimizer/var.h" +#endif +#include "optimizer/tlist.h" +#include "rewrite/rewriteManip.h" #include "utils/memutils.h" +#include "utils/ruleutils.h" /* Allocation settings */ #define INITIAL_ALLOC_NUM 10 -#define ALLOC_EXP 2 /* Compare plans by 'original_order' */ @@ -46,9 +56,12 @@ transform_plans_into_states(RuntimeAppendState *scan_state, for (i = 0; i < n; i++) { - ChildScanCommon child = selected_plans[i]; + ChildScanCommon child; PlanState *ps; + Assert(selected_plans); + child = selected_plans[i]; + /* Create new node since this plan hasn't been used yet */ if (child->content_type != CHILD_PLAN_STATE) { @@ -82,12 +95,12 @@ transform_plans_into_states(RuntimeAppendState *scan_state, static ChildScanCommon * select_required_plans(HTAB *children_table, Oid *parts, int nparts, int *nres) { - int allocated = INITIAL_ALLOC_NUM; - int used = 0; + uint32 allocated, + used; ChildScanCommon *result; int i; - result = (ChildScanCommon *) palloc(allocated * sizeof(ChildScanCommon)); + ArrayAlloc(result, allocated, used, INITIAL_ALLOC_NUM); for (i = 0; i < nparts; i++) { @@ -97,78 +110,190 @@ select_required_plans(HTAB *children_table, Oid *parts, int nparts, int *nres) if (!child) continue; /* no plan for this partition */ - if (allocated <= used) - { - allocated *= ALLOC_EXP; - result = repalloc(result, allocated * sizeof(ChildScanCommon)); - } + ArrayPush(result, allocated, used, child); + } - result[used++] = child; + /* Get rid of useless array */ + if (used == 0) + { + pfree(result); + result = NULL; } *nres = used; return result; } -/* Replace Vars' varnos with the value provided by 'parent' */ +/* Adapt child's tlist for parent relation (change varnos and varattnos) */ static List * -replace_tlist_varnos(List *child_tlist, RelOptInfo *parent) +build_parent_tlist(List *tlist, AppendRelInfo *appinfo) { - ListCell *lc; - List *result = NIL; - int i = 1; /* resnos begin with 1 */ + List *temp_tlist, + *pulled_vars; + ListCell *lc1, + *lc2; + + temp_tlist = copyObject(tlist); + pulled_vars = pull_vars_of_level((Node *) temp_tlist, 0); - foreach (lc, child_tlist) + foreach (lc1, pulled_vars) { - Var *var = (Var *) ((TargetEntry *) lfirst(lc))->expr; - Var *newvar = (Var *) palloc(sizeof(Var)); + Var *tlist_var = (Var *) lfirst(lc1); + bool found_column = false; + AttrNumber attnum; - Assert(IsA(var, Var)); + /* Skip system attributes */ + if (tlist_var->varattno < InvalidAttrNumber) + continue; - *newvar = *var; - newvar->varno = parent->relid; - newvar->varnoold = parent->relid; + attnum = 0; + foreach (lc2, appinfo->translated_vars) + { + Var *translated_var = (Var *) lfirst(lc2); + + /* Don't forget to inc 'attunum'! */ + attnum++; + + /* Skip dropped columns */ + if (!translated_var) + continue; - result = lappend(result, makeTargetEntry((Expr *) newvar, - i++, /* item's index */ - NULL, false)); + /* Find this column in list of parent table columns */ + if (translated_var->varattno == tlist_var->varattno) + { + tlist_var->varattno = attnum; + found_column = true; /* successful mapping */ + break; + } + } + + /* Raise ERROR if mapping failed */ + if (!found_column) + elog(ERROR, + "table \"%s\" has no attribute %d of partition \"%s\"", + get_rel_name_or_relid(appinfo->parent_relid), + tlist_var->varattno, + get_rel_name_or_relid(appinfo->child_relid)); } - return result; + ChangeVarNodes((Node *) temp_tlist, + appinfo->child_relid, + appinfo->parent_relid, + 0); + + return temp_tlist; } -/* Append partition attribute in case it's not present in target list */ -static List * -append_part_attr_to_tlist(List *tlist, Index relno, const PartRelationInfo *prel) +#if PG_VERSION_NUM >= 140000 +/* + * Function "tlist_member_ignore_relabel" was removed in vanilla (375398244168) + * Function moved to pg_pathman. + */ +/* + * tlist_member_ignore_relabel + * Finds the (first) member of the given tlist whose expression is + * equal() to the given expression. Result is NULL if no such member. + * We ignore top-level RelabelType nodes + * while checking for a match. This is needed for some scenarios + * involving binary-compatible sort operations. + */ +TargetEntry * +tlist_member_ignore_relabel(Expr *node, List *targetlist) { - ListCell *lc; - bool part_attr_found = false; + ListCell *temp; + + while (node && IsA(node, RelabelType)) + node = ((RelabelType *) node)->arg; - foreach (lc, tlist) + foreach(temp, targetlist) + { + TargetEntry *tlentry = (TargetEntry *) lfirst(temp); + Expr *tlexpr = tlentry->expr; + + while (tlexpr && IsA(tlexpr, RelabelType)) + tlexpr = ((RelabelType *) tlexpr)->arg; + + if (equal(node, tlexpr)) + return tlentry; + } + return NULL; +} +#endif + +/* Is tlist 'a' subset of tlist 'b'? (in terms of Vars) */ +static bool +tlist_is_var_subset(List *a, List *b) +{ + ListCell *lc; + + foreach (lc, b) { TargetEntry *te = (TargetEntry *) lfirst(lc); - Var *var = (Var *) te->expr; - if (IsA(var, Var) && var->varoattno == prel->attnum) - part_attr_found = true; + if (!IsA(te->expr, Var) && !IsA(te->expr, RelabelType)) + continue; + + if (!tlist_member_ignore_relabel_compat(te->expr, a)) + return true; } - if (!part_attr_found) + return false; +} + +/* Append partition attribute in case it's not present in target list */ +static List * +append_part_attr_to_tlist(List *tlist, + AppendRelInfo *appinfo, + const PartRelationInfo *prel) +{ + ListCell *lc, + *lc_var; + List *vars_not_found = NIL; + + foreach (lc_var, prel->expr_vars) { - Var *newvar = makeVar(relno, - prel->attnum, - prel->atttype, - prel->atttypmod, - prel->attcollid, - 0); + bool part_attr_found = false; + Var *expr_var = (Var *) lfirst(lc_var), + *child_var; + + /* Get attribute number of partitioned column (may differ) */ + child_var = (Var *) list_nth(appinfo->translated_vars, + AttrNumberGetAttrOffset(expr_var->varattno)); + Assert(child_var); + foreach (lc, tlist) + { + TargetEntry *te = (TargetEntry *) lfirst(lc); + Var *var = (Var *) te->expr; + + if (IsA(var, Var) && var->varattno == child_var->varattno) + { + part_attr_found = true; + break; + } + } + + if (!part_attr_found) + vars_not_found = lappend(vars_not_found, child_var); + } + + foreach(lc, vars_not_found) + { Index last_item = list_length(tlist) + 1; + Var *newvar = (Var *) palloc(sizeof(Var)); + + /* copy Var */ + *newvar = *((Var *) lfirst(lc)); + + /* other fields except 'varno' should be correct */ + newvar->varno = appinfo->child_relid; tlist = lappend(tlist, makeTargetEntry((Expr *) newvar, last_item, NULL, false)); } + list_free(vars_not_found); return tlist; } @@ -249,17 +374,99 @@ unpack_runtimeappend_private(RuntimeAppendState *scan_state, CustomScan *cscan) } +/* Check that one of arguments of OpExpr is expression */ +static bool +clause_contains_prel_expr(Node *node, Node *prel_expr) +{ + if (node == NULL) + return false; + + if (match_expr_to_operand(prel_expr, node)) + return true; + + return expression_tree_walker(node, clause_contains_prel_expr, prel_expr); +} + + +/* Prepare CustomScan's custom expression for walk_expr_tree() */ +static Node * +canonicalize_custom_exprs_mutator(Node *node, void *cxt) +{ + if (node == NULL) + return NULL; + + if (IsA(node, Var)) + { + Var *var = palloc(sizeof(Var)); + *var = *(Var *) node; + +#if PG_VERSION_NUM >= 130000 +/* + * In >=13 (9ce77d75c5) varnoold and varoattno were changed to varnosyn and + * varattnosyn, and they are not consulted in _equalVar anymore. + */ + var->varattno = var->varattnosyn; +#else + /* Replace original 'varnoold' */ + var->varnoold = INDEX_VAR; + + /* Restore original 'varattno' */ + var->varattno = var->varoattno; +#endif + + return (Node *) var; + } + + return expression_tree_mutator_compat(node, canonicalize_custom_exprs_mutator, NULL); +} + +static List * +canonicalize_custom_exprs(List *custom_exps) +{ + return (List *) canonicalize_custom_exprs_mutator((Node *) custom_exps, NULL); +} + + +/* + * Filter all available clauses and extract relevant ones. + */ +List * +get_partitioning_clauses(List *restrictinfo_list, + const PartRelationInfo *prel, + Index partitioned_rel) +{ + List *result = NIL; + ListCell *l; + + foreach(l, restrictinfo_list) + { + RestrictInfo *rinfo = (RestrictInfo *) lfirst(l); + Node *prel_expr; + + Assert(IsA(rinfo, RestrictInfo)); + + prel_expr = PrelExpressionForRelid(prel, partitioned_rel); + + if (clause_contains_prel_expr((Node *) rinfo->clause, prel_expr)) + result = lappend(result, rinfo->clause); + } + return result; +} + + /* Transform partition ranges into plain array of partition Oids */ Oid * get_partition_oids(List *ranges, int *n, const PartRelationInfo *prel, bool include_parent) { ListCell *range_cell; - uint32 allocated = INITIAL_ALLOC_NUM; - uint32 used = 0; - Oid *result = (Oid *) palloc(allocated * sizeof(Oid)); + uint32 allocated, + used; + Oid *result; Oid *children = PrelGetChildrenArray(prel); + ArrayAlloc(result, allocated, used, INITIAL_ALLOC_NUM); + /* If required, add parent to result */ Assert(INITIAL_ALLOC_NUM >= 1); if (include_parent) @@ -269,19 +476,13 @@ get_partition_oids(List *ranges, int *n, const PartRelationInfo *prel, foreach (range_cell, ranges) { uint32 i; - uint32 a = lfirst_irange(range_cell).ir_lower, - b = lfirst_irange(range_cell).ir_upper; + uint32 a = irange_lower(lfirst_irange(range_cell)), + b = irange_upper(lfirst_irange(range_cell)); for (i = a; i <= b; i++) { - if (allocated <= used) - { - allocated *= ALLOC_EXP; - result = repalloc(result, allocated * sizeof(Oid)); - } - Assert(i < PrelChildrenCount(prel)); - result[used++] = children[i]; + ArrayPush(result, allocated, used, children[i]); } } @@ -327,14 +528,45 @@ create_append_path_common(PlannerInfo *root, result->nchildren = list_length(inner_append->subpaths); result->children = (ChildScanCommon *) - palloc(result->nchildren * sizeof(ChildScanCommon)); + palloc(result->nchildren * sizeof(ChildScanCommon)); + i = 0; foreach (lc, inner_append->subpaths) { - Path *path = lfirst(lc); - Index relindex = path->parent->relid; + Path *path = (Path *) lfirst(lc); + RelOptInfo *childrel = path->parent; ChildScanCommon child; + /* Do we have parameterization? */ + if (param_info) + { + Relids required_outer = param_info->ppi_req_outer; + + /* Rebuild path using new 'required_outer' */ + path = get_cheapest_parameterized_child_path(root, childrel, + required_outer); + } + + /* + * We were unable to re-parameterize child path, + * which means that we can't use Runtime[Merge]Append, + * since its children can't evaluate join quals. + */ + if (!path) + { + int j; + + for (j = 0; j < i; j++) + pfree(result->children[j]); + pfree(result->children); + + list_free_deep(result->cpath.custom_paths); + + pfree(result); + + return NULL; /* notify caller */ + } + child = (ChildScanCommon) palloc(sizeof(ChildScanCommonData)); result->cpath.path.startup_cost += path->startup_cost; @@ -342,7 +574,7 @@ create_append_path_common(PlannerInfo *root, child->content_type = CHILD_PATH; child->content.path = path; - child->relid = root->simple_rte_array[relindex]->relid; + child->relid = root->simple_rte_array[childrel->relid]->relid; Assert(child->relid != InvalidOid); result->cpath.custom_paths = lappend(result->cpath.custom_paths, @@ -364,40 +596,67 @@ create_append_plan_common(PlannerInfo *root, RelOptInfo *rel, List *clauses, List *custom_plans, CustomScanMethods *scan_methods) { - RuntimeAppendPath *rpath = (RuntimeAppendPath *) best_path; - const PartRelationInfo *prel; - CustomScan *cscan; + RuntimeAppendPath *rpath = (RuntimeAppendPath *) best_path; + PartRelationInfo *prel; + CustomScan *cscan; prel = get_pathman_relation_info(rpath->relid); - Assert(prel); + if (!prel) + ereport(ERROR, (errcode(ERRCODE_INVALID_PARAMETER_VALUE), + errmsg("table \"%s\" is not partitioned", + get_rel_name_or_relid(rpath->relid)))); cscan = makeNode(CustomScan); cscan->custom_scan_tlist = NIL; /* initial value (empty list) */ - cscan->scan.plan.targetlist = NIL; if (custom_plans) { ListCell *lc1, *lc2; + bool processed_rel_tlist = false; + + Assert(list_length(rpath->cpath.custom_paths) == list_length(custom_plans)); forboth (lc1, rpath->cpath.custom_paths, lc2, custom_plans) { Plan *child_plan = (Plan *) lfirst(lc2); - RelOptInfo *child_rel = ((Path *) lfirst(lc1))->parent; + RelOptInfo *child_rel = ((Path *) lfirst(lc1))->parent; + AppendRelInfo *appinfo; - /* Replace rel's tlist with a matching one */ - if (!cscan->scan.plan.targetlist) - tlist = replace_tlist_varnos(child_plan->targetlist, rel); + appinfo = find_childrel_appendrelinfo_compat(root, child_rel); + + /* Replace rel's tlist with a matching one (for ExecQual()) */ + if (!processed_rel_tlist) + { + List *temp_tlist = build_parent_tlist(child_plan->targetlist, + appinfo); + + /* + * HACK: PostgreSQL may return a physical tlist, + * which is bad (we may have child IndexOnlyScans). + * If we find out that CustomScan's tlist is a + * Var-superset of child's tlist, we replace it + * with the latter, else we'll have a broken tlist + * labeling (Assert). + * + * NOTE: physical tlist may only be used if we're not + * asked to produce tuples of exact format (CP_EXACT_TLIST). + */ + if (tlist_is_var_subset(temp_tlist, tlist)) + tlist = temp_tlist; + + /* Done, new target list has been built */ + processed_rel_tlist = true; + } /* Add partition attribute if necessary (for ExecQual()) */ child_plan->targetlist = append_part_attr_to_tlist(child_plan->targetlist, - child_rel->relid, - prel); + appinfo, prel); /* Now make custom_scan_tlist match child plans' targetlists */ if (!cscan->custom_scan_tlist) - cscan->custom_scan_tlist = replace_tlist_varnos(child_plan->targetlist, - rel); + cscan->custom_scan_tlist = build_parent_tlist(child_plan->targetlist, + appinfo); } } @@ -407,13 +666,16 @@ create_append_plan_common(PlannerInfo *root, RelOptInfo *rel, /* Since we're not scanning any real table directly */ cscan->scan.scanrelid = 0; - cscan->custom_exprs = get_actual_clauses(clauses); + cscan->custom_exprs = get_partitioning_clauses(clauses, prel, rel->relid); cscan->custom_plans = custom_plans; cscan->methods = scan_methods; /* Cache 'prel->enable_parent' as well */ pack_runtimeappend_private(cscan, rpath, prel->enable_parent); + /* Don't forget to close 'prel'! */ + close_pathman_relation_info(prel); + return &cscan->scan.plan; } @@ -445,23 +707,73 @@ begin_append_common(CustomScanState *node, EState *estate, int eflags) { RuntimeAppendState *scan_state = (RuntimeAppendState *) node; - scan_state->custom_expr_states = - (List *) ExecInitExpr((Expr *) scan_state->custom_exprs, - (PlanState *) scan_state); - +#if PG_VERSION_NUM < 100000 node->ss.ps.ps_TupFromTlist = false; +#endif + + scan_state->prel = get_pathman_relation_info(scan_state->relid); + /* + * scan_state->prel can be NULL in case execution of prepared query that + * was prepared before DROP/CREATE EXTENSION pg_pathman or after + * pathman_config table truncation etc. + */ + if (!scan_state->prel) + ereport(ERROR, (errcode(ERRCODE_INVALID_PARAMETER_VALUE), + errmsg("table \"%s\" is not partitioned", + get_rel_name_or_relid(scan_state->relid)))); + + /* Prepare expression according to set_set_customscan_references() */ + scan_state->prel_expr = PrelExpressionForRelid(scan_state->prel, INDEX_VAR); + + /* Prepare custom expression according to set_set_customscan_references() */ + scan_state->canon_custom_exprs = + canonicalize_custom_exprs(scan_state->custom_exprs); } TupleTableSlot * exec_append_common(CustomScanState *node, void (*fetch_next_tuple) (CustomScanState *node)) { - RuntimeAppendState *scan_state = (RuntimeAppendState *) node; + RuntimeAppendState *scan_state = (RuntimeAppendState *) node; + TupleTableSlot *result; /* ReScan if no plans are selected */ if (scan_state->ncur_plans == 0) ExecReScan(&node->ss.ps); +#if PG_VERSION_NUM >= 100000 + fetch_next_tuple(node); /* use specific callback */ + + if (TupIsNull(scan_state->slot)) + return NULL; + + if (!node->ss.ps.ps_ProjInfo) + { + /* + * ExecInitCustomScan carelessly promises that it will always (resultopsfixed) + * return TTSOpsVirtual slot. To keep the promise, convert raw + * BufferHeapTupleSlot to virtual even if we don't have any projection. + * + * BTW, why original code decided to invent its own scan_state->slot + * instead of using ss.ss_ScanTupleSlot? + */ +#if PG_VERSION_NUM >= 120000 + return ExecCopySlot(node->ss.ps.ps_ResultTupleSlot, scan_state->slot); +#else + return scan_state->slot; +#endif + } + + /* + * Assuming that current projection doesn't involve SRF. + * NOTE: Any SFR functions since 69f4b9c are evaluated in ProjectSet node. + */ + ResetExprContext(node->ss.ps.ps_ExprContext); + node->ss.ps.ps_ProjInfo->pi_exprContext->ecxt_scantuple = scan_state->slot; + result = ExecProject(node->ss.ps.ps_ProjInfo); + + return result; +#elif PG_VERSION_NUM >= 90500 for (;;) { /* Fetch next tuple if we're done with Projections */ @@ -476,11 +788,11 @@ exec_append_common(CustomScanState *node, if (node->ss.ps.ps_ProjInfo) { ExprDoneCond isDone; - TupleTableSlot *result; ResetExprContext(node->ss.ps.ps_ExprContext); - node->ss.ps.ps_ProjInfo->pi_exprContext->ecxt_scantuple = scan_state->slot; + node->ss.ps.ps_ProjInfo->pi_exprContext->ecxt_scantuple = + scan_state->slot; result = ExecProject(node->ss.ps.ps_ProjInfo, &isDone); if (isDone != ExprEndResult) @@ -495,6 +807,7 @@ exec_append_common(CustomScanState *node, else return scan_state->slot; } +#endif } void @@ -504,34 +817,32 @@ end_append_common(CustomScanState *node) clear_plan_states(&scan_state->css); hash_destroy(scan_state->children_table); + close_pathman_relation_info(scan_state->prel); } void rescan_append_common(CustomScanState *node) { - RuntimeAppendState *scan_state = (RuntimeAppendState *) node; - ExprContext *econtext = node->ss.ps.ps_ExprContext; - const PartRelationInfo *prel; - List *ranges; - ListCell *lc; - WalkerContext wcxt; - Oid *parts; - int nparts; - - prel = get_pathman_relation_info(scan_state->relid); - Assert(prel); + RuntimeAppendState *scan_state = (RuntimeAppendState *) node; + ExprContext *econtext = node->ss.ps.ps_ExprContext; + PartRelationInfo *prel = scan_state->prel; + List *ranges; + ListCell *lc; + WalkerContext wcxt; + Oid *parts; + int nparts; /* First we select all available partitions... */ - ranges = list_make1_irange(make_irange(0, PrelLastChild(prel), false)); + ranges = list_make1_irange_full(prel, IR_COMPLETE); - InitWalkerContext(&wcxt, prel, econtext, false); - foreach (lc, scan_state->custom_exprs) + InitWalkerContext(&wcxt, scan_state->prel_expr, prel, econtext); + foreach (lc, scan_state->canon_custom_exprs) { - WrapperNode *wn; + WrapperNode *wrap; /* ... then we cut off irrelevant ones using the provided clauses */ - wn = walk_expr_tree((Expr *) lfirst(lc), &wcxt); - ranges = irange_list_intersect(ranges, wn->rangeset); + wrap = walk_expr_tree((Expr *) lfirst(lc), &wcxt); + ranges = irange_list_intersection(ranges, wrap->rangeset); } /* Get Oids of the required partitions */ @@ -556,19 +867,47 @@ rescan_append_common(CustomScanState *node) } void -explain_append_common(CustomScanState *node, HTAB *children_table, ExplainState *es) +explain_append_common(CustomScanState *node, + List *ancestors, + ExplainState *es, + HTAB *children_table, + List *custom_exprs) { + List *deparse_context; + char *exprstr; + + /* Set up deparsing context */ +#if PG_VERSION_NUM >= 130000 +/* + * Since 6ef77cf46e8 + */ + deparse_context = set_deparse_context_plan(es->deparse_cxt, + node->ss.ps.plan, + ancestors); +#else + deparse_context = set_deparse_context_planstate(es->deparse_cxt, + (Node *) node, + ancestors); +#endif + + /* Deparse the expression */ + exprstr = deparse_expression((Node *) make_ands_explicit(custom_exprs), + deparse_context, true, false); + + /* And add to es->str */ + ExplainPropertyText("Prune by", exprstr, es); + /* Construct excess PlanStates */ if (!es->analyze) { - int allocated = INITIAL_ALLOC_NUM; - int used = 0; - ChildScanCommon *custom_ps; - ChildScanCommon child; + uint32 allocated, + used; + ChildScanCommon *custom_ps, + child; HASH_SEQ_STATUS seqstat; int i; - custom_ps = (ChildScanCommon *) palloc(allocated * sizeof(ChildScanCommon)); + ArrayAlloc(custom_ps, allocated, used, INITIAL_ALLOC_NUM); /* There can't be any nodes since we're not scanning anything */ Assert(!node->custom_ps); @@ -578,13 +917,7 @@ explain_append_common(CustomScanState *node, HTAB *children_table, ExplainState while ((child = (ChildScanCommon) hash_seq_search(&seqstat))) { - if (allocated <= used) - { - allocated *= ALLOC_EXP; - custom_ps = repalloc(custom_ps, allocated * sizeof(ChildScanCommon)); - } - - custom_ps[used++] = child; + ArrayPush(custom_ps, allocated, used, child); } /* diff --git a/src/partition_creation.c b/src/partition_creation.c new file mode 100644 index 00000000..d6080c85 --- /dev/null +++ b/src/partition_creation.c @@ -0,0 +1,2057 @@ +/*------------------------------------------------------------------------- + * + * partition_creation.c + * Various functions for partition creation. + * + * Copyright (c) 2016-2020, Postgres Professional + * + *------------------------------------------------------------------------- + */ + +#include "init.h" +#include "partition_creation.h" +#include "partition_filter.h" +#include "pathman.h" +#include "pathman_workers.h" +#include "compat/pg_compat.h" +#include "xact_handling.h" + +#include "access/htup_details.h" +#include "access/reloptions.h" +#include "access/sysattr.h" +#if PG_VERSION_NUM >= 120000 +#include "access/table.h" +#endif +#include "access/xact.h" +#include "catalog/heap.h" +#include "catalog/pg_authid.h" +#include "catalog/pg_proc.h" +#include "catalog/pg_trigger.h" +#include "catalog/pg_type.h" +#include "catalog/toasting.h" +#include "commands/defrem.h" +#include "commands/event_trigger.h" +#include "commands/sequence.h" +#include "commands/tablecmds.h" +#include "commands/tablespace.h" +#include "commands/trigger.h" +#include "executor/spi.h" +#include "miscadmin.h" +#include "nodes/nodeFuncs.h" +#include "parser/parse_func.h" +#include "parser/parse_utilcmd.h" +#include "parser/parse_relation.h" +#include "tcop/utility.h" +#if PG_VERSION_NUM >= 130000 +#include "utils/acl.h" +#endif +#include "utils/builtins.h" +#include "utils/datum.h" +#include "utils/fmgroids.h" +#include "utils/inval.h" +#include "utils/jsonb.h" +#include "utils/snapmgr.h" +#include "utils/lsyscache.h" +#include "utils/syscache.h" +#include "utils/typcache.h" + +#if PG_VERSION_NUM >= 100000 +#include "utils/regproc.h" +#endif + +static Oid spawn_partitions_val(Oid parent_relid, + const Bound *range_bound_min, + const Bound *range_bound_max, + Oid range_bound_type, + Datum interval_binary, + Oid interval_type, + Datum value, + Oid value_type, + Oid collid); + +static void create_single_partition_common(Oid parent_relid, + Oid partition_relid, + Constraint *check_constraint, + init_callback_params *callback_params, + List *trigger_columns); + +static Oid create_single_partition_internal(Oid parent_relid, + RangeVar *partition_rv, + char *tablespace); + +static char *choose_range_partition_name(Oid parent_relid, Oid parent_nsp); +static char *choose_hash_partition_name(Oid parent_relid, uint32 part_idx); + +static ObjectAddress create_table_using_stmt(CreateStmt *create_stmt, + Oid relowner); + +static void copy_foreign_keys(Oid parent_relid, Oid partition_oid); +static void copy_rel_options(Oid parent_relid, Oid partition_relid); +static void postprocess_child_table_and_atts(Oid parent_relid, Oid partition_relid); + +static Oid text_to_regprocedure(text *proname_args); + +static Constraint *make_constraint_common(char *name, Node *raw_expr); +#if PG_VERSION_NUM >= 150000 /* for commit 639a86e36aae */ +static String make_string_value_struct(char *str); +static Integer make_int_value_struct(int int_val); +#else +static Value make_string_value_struct(char* str); +static Value make_int_value_struct(int int_val); +#endif + +static Node *build_partitioning_expression(Oid parent_relid, + Oid *expr_type, + List **columns); + +/* + * --------------------------------------- + * Public interface (partition creation) + * --------------------------------------- + */ + +/* Create one RANGE partition [start_value, end_value) */ +Oid +create_single_range_partition_internal(Oid parent_relid, + const Bound *start_value, + const Bound *end_value, + Oid value_type, + RangeVar *partition_rv, + char *tablespace) +{ + Oid partition_relid; + Constraint *check_constr; + init_callback_params callback_params; + List *trigger_columns = NIL; + Node *expr; + Datum values[Natts_pathman_config]; + bool isnull[Natts_pathman_config]; + + + /* + * Sanity check. Probably needed only if some absurd init_callback + * decides to drop the table while we are creating partitions. + * It seems much better to use prel cache here, but this doesn't work + * because it regards tables with no partitions as not partitioned at all + * (build_pathman_relation_info returns NULL), and if I comment out that, + * tests fail for not immediately obvious reasons. Don't want to dig + * into this now. + */ + if (!pathman_config_contains_relation(parent_relid, values, isnull, NULL, NULL)) + { + elog(ERROR, "Can't create range partition: relid %u doesn't exist or not partitioned", parent_relid); + } + + /* Generate a name if asked to */ + if (!partition_rv) + { + Oid parent_nsp = get_rel_namespace(parent_relid); + char *parent_nsp_name = get_namespace_name(parent_nsp); + char *partition_name; + + partition_name = choose_range_partition_name(parent_relid, parent_nsp); + + partition_rv = makeRangeVar(parent_nsp_name, partition_name, -1); + } + + /* Check pathman config anld fill variables */ + expr = build_partitioning_expression(parent_relid, NULL, &trigger_columns); + + /* Create a partition & get 'partitioning expression' */ + partition_relid = create_single_partition_internal(parent_relid, + partition_rv, + tablespace); + + /* Build check constraint for RANGE partition */ + check_constr = build_range_check_constraint(partition_relid, + expr, + start_value, + end_value, + value_type); + + /* Cook args for init_callback */ + MakeInitCallbackRangeParams(&callback_params, + DEFAULT_PATHMAN_INIT_CALLBACK, + parent_relid, partition_relid, + *start_value, *end_value, value_type); + + /* Add constraint & execute init_callback */ + create_single_partition_common(parent_relid, + partition_relid, + check_constr, + &callback_params, + trigger_columns); + + /* Return the Oid */ + return partition_relid; +} + +/* Create one HASH partition */ +Oid +create_single_hash_partition_internal(Oid parent_relid, + uint32 part_idx, + uint32 part_count, + RangeVar *partition_rv, + char *tablespace) +{ + Oid partition_relid, + expr_type; + Constraint *check_constr; + init_callback_params callback_params; + List *trigger_columns = NIL; + Node *expr; + + /* Generate a name if asked to */ + if (!partition_rv) + { + Oid parent_nsp = get_rel_namespace(parent_relid); + char *parent_nsp_name = get_namespace_name(parent_nsp); + char *partition_name; + + partition_name = choose_hash_partition_name(parent_relid, part_idx); + + partition_rv = makeRangeVar(parent_nsp_name, partition_name, -1); + } + + /* Create a partition & get 'partitionining expression' */ + partition_relid = create_single_partition_internal(parent_relid, + partition_rv, + tablespace); + + /* check pathman config and fill variables */ + expr = build_partitioning_expression(parent_relid, &expr_type, &trigger_columns); + + /* Build check constraint for HASH partition */ + check_constr = build_hash_check_constraint(partition_relid, + expr, + part_idx, + part_count, + expr_type); + + /* Cook args for init_callback */ + MakeInitCallbackHashParams(&callback_params, + DEFAULT_PATHMAN_INIT_CALLBACK, + parent_relid, partition_relid); + + /* Add constraint & execute init_callback */ + create_single_partition_common(parent_relid, + partition_relid, + check_constr, + &callback_params, + trigger_columns); + + /* Return the Oid */ + return partition_relid; +} + +/* Add constraint & execute init_callback */ +void +create_single_partition_common(Oid parent_relid, + Oid partition_relid, + Constraint *check_constraint, + init_callback_params *callback_params, + List *trigger_columns) +{ + Relation child_relation; + + /* Open the relation and add new check constraint & fkeys */ + child_relation = heap_open_compat(partition_relid, AccessExclusiveLock); + AddRelationNewConstraintsCompat(child_relation, NIL, + list_make1(check_constraint), + false, true, true); + heap_close_compat(child_relation, NoLock); + + /* Make constraint visible */ + CommandCounterIncrement(); + + /* Make trigger visible */ + CommandCounterIncrement(); + + /* Finally invoke 'init_callback' */ + invoke_part_callback(callback_params); + + /* Make possible changes visible */ + CommandCounterIncrement(); +} + +/* + * Create RANGE partitions (if needed) using either BGW or current backend. + * + * Returns Oid of the partition to store 'value'. + */ +Oid +create_partitions_for_value(Oid relid, Datum value, Oid value_type) +{ + TransactionId rel_xmin; + Oid last_partition = InvalidOid; + + /* Check that table is partitioned and fetch xmin */ + if (pathman_config_contains_relation(relid, NULL, NULL, &rel_xmin, NULL)) + { + /* Take default values */ + bool spawn_using_bgw = DEFAULT_PATHMAN_SPAWN_USING_BGW, + enable_auto = DEFAULT_PATHMAN_AUTO; + + /* Values to be extracted from PATHMAN_CONFIG_PARAMS */ + Datum values[Natts_pathman_config_params]; + bool isnull[Natts_pathman_config_params]; + + /* Try fetching options from PATHMAN_CONFIG_PARAMS */ + if (read_pathman_params(relid, values, isnull)) + { + enable_auto = values[Anum_pathman_config_params_auto - 1]; + spawn_using_bgw = values[Anum_pathman_config_params_spawn_using_bgw - 1]; + } + + /* Emit ERROR if automatic partition creation is disabled */ + if (!enable_auto || !IsAutoPartitionEnabled()) + elog(ERROR, ERR_PART_ATTR_NO_PART, datum_to_cstring(value, value_type)); + + /* + * If table has been partitioned in some previous xact AND + * we don't hold any conflicting locks, run BGWorker. + */ + if (spawn_using_bgw && + xact_object_is_visible(rel_xmin) && + !xact_bgw_conflicting_lock_exists(relid)) + { + elog(DEBUG2, "create_partitions(): chose BGWorker [%u]", MyProcPid); + last_partition = create_partitions_for_value_bg_worker(relid, + value, + value_type); + } + /* Else it'd be better for the current backend to create partitions */ + else + { + elog(DEBUG2, "create_partitions(): chose backend [%u]", MyProcPid); + last_partition = create_partitions_for_value_internal(relid, + value, + value_type); + } + } + else + elog(ERROR, "table \"%s\" is not partitioned", + get_rel_name_or_relid(relid)); + + /* Check that 'last_partition' is valid */ + if (last_partition == InvalidOid) + elog(ERROR, "could not create new partitions for relation \"%s\"", + get_rel_name_or_relid(relid)); + + /* Make changes visible */ + AcceptInvalidationMessages(); + + return last_partition; +} + + +/* + * -------------------- + * Partition creation + * -------------------- + */ + +/* + * Create partitions (if needed) and return Oid of the partition to store 'value'. + * + * NB: This function should not be called directly, + * use create_partitions_for_value() instead. + */ +Oid +create_partitions_for_value_internal(Oid relid, Datum value, Oid value_type) +{ + Oid partid = InvalidOid; /* last created partition (or InvalidOid) */ + Datum values[Natts_pathman_config]; + bool isnull[Natts_pathman_config]; + + /* Get both PartRelationInfo & PATHMAN_CONFIG contents for this relation */ + if (pathman_config_contains_relation(relid, values, isnull, NULL, NULL)) + { + PartRelationInfo *prel; + LockAcquireResult lock_result; /* could we lock the parent? */ + Oid base_bound_type; /* base type of prel->ev_type */ + Oid base_value_type; /* base type of value_type */ + + /* Prevent modifications of partitioning scheme */ + lock_result = xact_lock_rel(relid, ShareUpdateExclusiveLock, false); + + /* Fetch PartRelationInfo by 'relid' */ + prel = get_pathman_relation_info(relid); + shout_if_prel_is_invalid(relid, prel, PT_RANGE); + + /* Fetch base types of prel->ev_type & value_type */ + base_bound_type = getBaseType(prel->ev_type); + base_value_type = getBaseType(value_type); + + /* + * Search for a suitable partition if we didn't hold it, + * since somebody might have just created it for us. + * + * If the table is locked, it means that we've + * already failed to find a suitable partition + * and called this function to do the job. + */ + Assert(lock_result != LOCKACQUIRE_NOT_AVAIL); + if (lock_result == LOCKACQUIRE_OK) + { + Oid *parts; + int nparts; + + /* Search for matching partitions */ + parts = find_partitions_for_value(value, value_type, prel, &nparts); + + /* Shout if there's more than one */ + if (nparts > 1) + elog(ERROR, ERR_PART_ATTR_MULTIPLE); + + /* It seems that we got a partition! */ + else if (nparts == 1) + { + /* Unlock the parent (we're not going to spawn) */ + UnlockRelationOid(relid, ShareUpdateExclusiveLock); + + /* Simply return the suitable partition */ + partid = parts[0]; + } + + /* Don't forget to free */ + pfree(parts); + } + + /* Else spawn a new one (we hold a lock on the parent) */ + if (partid == InvalidOid) + { + RangeEntry *ranges = PrelGetRangesArray(prel); + Bound bound_min, /* absolute MIN */ + bound_max; /* absolute MAX */ + + Oid interval_type = InvalidOid; + Datum interval_binary, /* assigned 'width' of one partition */ + interval_text; + + /* Copy datums in order to protect them from cache invalidation */ + bound_min = CopyBound(&ranges[0].min, + prel->ev_byval, + prel->ev_len); + + bound_max = CopyBound(&ranges[PrelLastChild(prel)].max, + prel->ev_byval, + prel->ev_len); + + /* Check if interval is set */ + if (isnull[Anum_pathman_config_range_interval - 1]) + { + ereport(ERROR, + (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), + errmsg("cannot spawn new partition for key '%s'", + datum_to_cstring(value, value_type)), + errdetail("default range interval is NULL"))); + } + + /* Retrieve interval as TEXT from tuple */ + interval_text = values[Anum_pathman_config_range_interval - 1]; + + /* Convert interval to binary representation */ + interval_binary = extract_binary_interval_from_text(interval_text, + base_bound_type, + &interval_type); + + /* At last, spawn partitions to store the value */ + partid = spawn_partitions_val(PrelParentRelid(prel), + &bound_min, &bound_max, base_bound_type, + interval_binary, interval_type, + value, base_value_type, + prel->ev_collid); + } + + /* Don't forget to close 'prel'! */ + close_pathman_relation_info(prel); + } + else + elog(ERROR, "table \"%s\" is not partitioned", + get_rel_name_or_relid(relid)); + + return partid; +} + +/* + * Append\prepend partitions if there's no partition to store 'value'. + * NOTE: Used by create_partitions_for_value_internal(). + */ +static Oid +spawn_partitions_val(Oid parent_relid, /* parent's Oid */ + const Bound *range_bound_min, /* parent's MIN boundary */ + const Bound *range_bound_max, /* parent's MAX boundary */ + Oid range_bound_type, /* type of boundary's value */ + Datum interval_binary, /* interval in binary form */ + Oid interval_type, /* INTERVALOID or prel->ev_type */ + Datum value, /* value to be INSERTed */ + Oid value_type, /* type of value */ + Oid collid) /* collation id */ +{ + bool should_append; /* append or prepend? */ + + Oid move_bound_op_func, /* operator's function */ + move_bound_op_ret_type; /* operator's ret type */ + + FmgrInfo cmp_value_bound_finfo, /* exec 'value (>=|<) bound' */ + move_bound_finfo; /* exec 'bound + interval' */ + + Datum cur_leading_bound, /* boundaries of a new partition */ + cur_following_bound; + + Bound value_bound = MakeBound(value); + + Oid last_partition = InvalidOid; + + + fill_type_cmp_fmgr_info(&cmp_value_bound_finfo, value_type, range_bound_type); + + /* Is it possible to append\prepend a partition? */ + if (IsInfinite(range_bound_min) && IsInfinite(range_bound_max)) + ereport(ERROR, (errmsg("cannot spawn a partition"), + errdetail("both bounds are infinite"))); + + /* value >= MAX_BOUNDARY */ + else if (cmp_bounds(&cmp_value_bound_finfo, collid, + &value_bound, range_bound_max) >= 0) + { + should_append = true; + cur_leading_bound = BoundGetValue(range_bound_max); + } + + /* value < MIN_BOUNDARY */ + else if (cmp_bounds(&cmp_value_bound_finfo, collid, + &value_bound, range_bound_min) < 0) + { + should_append = false; + cur_leading_bound = BoundGetValue(range_bound_min); + } + + /* There's a gap, halt and emit ERROR */ + else ereport(ERROR, (errmsg("cannot spawn a partition"), + errdetail("there is a gap"))); + + /* Fetch operator's underlying function and ret type */ + extract_op_func_and_ret_type(should_append ? "+" : "-", + range_bound_type, + interval_type, + &move_bound_op_func, + &move_bound_op_ret_type); + + /* Perform casts if types don't match (e.g. date + interval = timestamp) */ + if (move_bound_op_ret_type != range_bound_type) + { + /* Cast 'cur_leading_bound' to 'move_bound_op_ret_type' */ + cur_leading_bound = perform_type_cast(cur_leading_bound, + range_bound_type, + move_bound_op_ret_type, + NULL); /* might emit ERROR */ + + /* Update 'range_bound_type' */ + range_bound_type = move_bound_op_ret_type; + + /* Fetch new comparison function */ + fill_type_cmp_fmgr_info(&cmp_value_bound_finfo, + value_type, + range_bound_type); + + /* Since type has changed, fetch another operator */ + extract_op_func_and_ret_type(should_append ? "+" : "-", + range_bound_type, + interval_type, + &move_bound_op_func, + &move_bound_op_ret_type); + + /* What, again? Don't want to deal with this nightmare */ + if (move_bound_op_ret_type != range_bound_type) + elog(ERROR, "error in function " CppAsString(spawn_partitions_val)); + } + + /* Get operator's underlying function */ + fmgr_info(move_bound_op_func, &move_bound_finfo); + + /* Execute comparison function cmp(value, cur_leading_bound) */ + while (should_append ? + check_ge(&cmp_value_bound_finfo, collid, value, cur_leading_bound) : + check_lt(&cmp_value_bound_finfo, collid, value, cur_leading_bound)) + { + Bound bounds[2]; + int rc; + bool isnull; + char *create_sql; + HeapTuple typeTuple; + char *typname; + Oid parent_nsp = get_rel_namespace(parent_relid); + char *parent_nsp_name = get_namespace_name(parent_nsp); + char *partition_name = choose_range_partition_name(parent_relid, parent_nsp); + char *pathman_schema; + + /* Assign the 'following' boundary to current 'leading' value */ + cur_following_bound = cur_leading_bound; + + /* Move leading bound by interval (exec 'leading (+|-) INTERVAL') */ + cur_leading_bound = FunctionCall2(&move_bound_finfo, + cur_leading_bound, + interval_binary); + + bounds[0] = MakeBound(should_append ? cur_following_bound : cur_leading_bound); + bounds[1] = MakeBound(should_append ? cur_leading_bound : cur_following_bound); + + /* + * Instead of directly calling create_single_range_partition_internal() + * we are going to call it through SPI, to make it possible for various + * DDL-replicating extensions to catch that call and do something about + * it. --sk + */ + + /* Get typname of range_bound_type to perform cast */ + typeTuple = SearchSysCache1(TYPEOID, ObjectIdGetDatum(range_bound_type)); + if (!HeapTupleIsValid(typeTuple)) + elog(ERROR, "cache lookup failed for type %u", range_bound_type); + typname = pstrdup(NameStr(((Form_pg_type) GETSTRUCT(typeTuple))->typname)); + ReleaseSysCache(typeTuple); + + pathman_schema = get_namespace_name(get_pathman_schema()); + if (pathman_schema == NULL) + elog(ERROR, "pg_pathman schema not initialized"); + + /* Construct call to create_single_range_partition() */ + create_sql = psprintf( + "select %s.create_single_range_partition('%s.%s'::regclass, '%s'::%s, '%s'::%s, '%s.%s', NULL::text)", + quote_identifier(pathman_schema), + quote_identifier(parent_nsp_name), + quote_identifier(get_rel_name(parent_relid)), + IsInfinite(&bounds[0]) ? "NULL" : datum_to_cstring(bounds[0].value, range_bound_type), + typname, + IsInfinite(&bounds[1]) ? "NULL" : datum_to_cstring(bounds[1].value, range_bound_type), + typname, + quote_identifier(parent_nsp_name), + quote_identifier(partition_name) + ); + + /* ...and call it. */ + SPI_connect(); + PushActiveSnapshot(GetTransactionSnapshot()); + rc = SPI_execute(create_sql, false, 0); + if (rc <= 0 || SPI_processed != 1) + elog(ERROR, "Failed to create range partition"); + last_partition = DatumGetObjectId(SPI_getbinval(SPI_tuptable->vals[0], + SPI_tuptable->tupdesc, + 1, &isnull)); + Assert(!isnull); + SPI_finish(); + PopActiveSnapshot(); + +#ifdef USE_ASSERT_CHECKING + elog(DEBUG2, "%s partition with following='%s' & leading='%s' [%u]", + (should_append ? "Appending" : "Prepending"), + DebugPrintDatum(cur_following_bound, range_bound_type), + DebugPrintDatum(cur_leading_bound, range_bound_type), + MyProcPid); +#endif + } + + return last_partition; +} + +/* Choose a good name for a RANGE partition */ +static char * +choose_range_partition_name(Oid parent_relid, Oid parent_nsp) +{ + Datum part_num; + Oid part_seq_relid; + char *part_seq_nspname, + *part_seq_relname; + RangeVar *part_seq_rv; + Oid save_userid; + int save_sec_context; + bool need_priv_escalation = !superuser(); /* we might be a SU */ + char *relname; + int attempts_cnt = 1000; + + /* Dispatch sequence and lock it using AccessShareLock */ + part_seq_nspname = get_namespace_name(get_rel_namespace(parent_relid)); + part_seq_relname = build_sequence_name_relid_internal(parent_relid); + part_seq_rv = makeRangeVar(part_seq_nspname, part_seq_relname, -1); + part_seq_relid = RangeVarGetRelid(part_seq_rv, AccessShareLock, true); + + /* Could not find part number generating sequence */ + if (!OidIsValid(part_seq_relid)) + elog(ERROR, "auto naming sequence \"%s\" does not exist", part_seq_relname); + + pfree(part_seq_nspname); + pfree(part_seq_relname); + pfree(part_seq_rv); + + /* Do we have to escalate privileges? */ + if (need_priv_escalation) + { + /* Get current user's Oid and security context */ + GetUserIdAndSecContext(&save_userid, &save_sec_context); + + /* Become superuser in order to bypass sequence ACL checks */ + SetUserIdAndSecContext(BOOTSTRAP_SUPERUSERID, + save_sec_context | SECURITY_LOCAL_USERID_CHANGE); + } + + /* Generate unique name */ + while (true) + { + /* Get next integer for partition name */ + part_num = DirectFunctionCall1(nextval_oid, ObjectIdGetDatum(part_seq_relid)); + + relname = psprintf("%s_" UINT64_FORMAT, + get_rel_name(parent_relid), + (uint64) DatumGetInt64(part_num)); /* can't use UInt64 on 9.5 */ + + /* + * If we found a unique name or attempts number exceeds some reasonable + * value then we quit + * + * XXX Should we throw an exception if max attempts number is reached? + */ + if (get_relname_relid(relname, parent_nsp) == InvalidOid || attempts_cnt < 0) + break; + + pfree(relname); + attempts_cnt--; + } + + /* Restore user's privileges */ + if (need_priv_escalation) + SetUserIdAndSecContext(save_userid, save_sec_context); + + return relname; +} + +/* Choose a good name for a HASH partition */ +static char * +choose_hash_partition_name(Oid parent_relid, uint32 part_idx) +{ + return psprintf("%s_%u", get_rel_name(parent_relid), part_idx); +} + +/* Create a partition-like table (no constraints yet) */ +static Oid +create_single_partition_internal(Oid parent_relid, + RangeVar *partition_rv, + char *tablespace) +{ + /* Value to be returned */ + Oid partition_relid = InvalidOid; /* safety */ + + /* Parent's namespace and name */ + Oid parent_nsp; + char *parent_name, + *parent_nsp_name; + + /* Elements of the "CREATE TABLE" query tree */ + RangeVar *parent_rv; + TableLikeClause like_clause; + CreateStmt create_stmt; + List *create_stmts; + ListCell *lc; + + /* Current user and security context */ + Oid save_userid; + int save_sec_context; + bool need_priv_escalation = !superuser(); /* we might be a SU */ + + /* Lock parent and check if it exists */ + LockRelationOid(parent_relid, ShareUpdateExclusiveLock); + if (!SearchSysCacheExists1(RELOID, ObjectIdGetDatum(parent_relid))) + elog(ERROR, "relation %u does not exist", parent_relid); + + /* Check that table is registered in PATHMAN_CONFIG */ + if (!pathman_config_contains_relation(parent_relid, NULL, NULL, NULL, NULL)) + elog(ERROR, "table \"%s\" is not partitioned", + get_rel_name_or_relid(parent_relid)); + + /* Do we have to escalate privileges? */ + if (need_priv_escalation) + { + /* Get current user's Oid and security context */ + GetUserIdAndSecContext(&save_userid, &save_sec_context); + + /* Check that user's allowed to spawn partitions */ + if (ACLCHECK_OK != pg_class_aclcheck(parent_relid, save_userid, + ACL_SPAWN_PARTITIONS)) + ereport(ERROR, + (errcode(ERRCODE_INSUFFICIENT_PRIVILEGE), + errmsg("permission denied for parent relation \"%s\"", + get_rel_name_or_relid(parent_relid)), + errdetail("user is not allowed to create new partitions"), + errhint("consider granting INSERT privilege"))); + + /* Become superuser in order to bypass various ACL checks */ + SetUserIdAndSecContext(BOOTSTRAP_SUPERUSERID, + save_sec_context | SECURITY_LOCAL_USERID_CHANGE); + } + + /* Cache parent's namespace and name */ + parent_name = get_rel_name(parent_relid); + parent_nsp = get_rel_namespace(parent_relid); + parent_nsp_name = get_namespace_name(parent_nsp); + + /* Make up parent's RangeVar */ + parent_rv = makeRangeVar(parent_nsp_name, parent_name, -1); + + /* If no 'tablespace' is provided, get parent's tablespace */ + if (!tablespace) + tablespace = get_tablespace_name(get_rel_tablespace(parent_relid)); + + /* Initialize TableLikeClause structure */ + NodeSetTag(&like_clause, T_TableLikeClause); + like_clause.relation = copyObject(parent_rv); + like_clause.options = CREATE_TABLE_LIKE_DEFAULTS | + CREATE_TABLE_LIKE_INDEXES | + CREATE_TABLE_LIKE_STORAGE; + + /* Initialize CreateStmt structure */ + NodeSetTag(&create_stmt, T_CreateStmt); + create_stmt.relation = copyObject(partition_rv); + create_stmt.tableElts = list_make1(copyObject(&like_clause)); + create_stmt.inhRelations = list_make1(copyObject(parent_rv)); + create_stmt.ofTypename = NULL; + create_stmt.constraints = NIL; + create_stmt.options = NIL; + create_stmt.oncommit = ONCOMMIT_NOOP; + create_stmt.tablespacename = tablespace; + create_stmt.if_not_exists = false; +#if PG_VERSION_NUM >= 100000 + create_stmt.partbound = NULL; + create_stmt.partspec = NULL; +#endif +#if defined(PGPRO_EE) && PG_VERSION_NUM < 100000 + create_stmt.partition_info = NULL; +#endif +#if PG_VERSION_NUM >= 120000 + create_stmt.accessMethod = NULL; +#endif + + /* Obtain the sequence of Stmts to create partition and link it to parent */ + create_stmts = transformCreateStmt(&create_stmt, NULL); + + /* Create the partition and all required relations */ + foreach (lc, create_stmts) + { + Node *cur_stmt; + + /* Fetch current CreateStmt */ + cur_stmt = (Node *) lfirst(lc); + + if (IsA(cur_stmt, CreateStmt)) + { + Oid child_relowner; + + /* Partition should have the same owner as the parent */ + child_relowner = get_rel_owner(parent_relid); + + /* Create a partition and save its Oid */ + partition_relid = create_table_using_stmt((CreateStmt *) cur_stmt, + child_relowner).objectId; + + /* Copy attributes to partition */ + copy_rel_options(parent_relid, partition_relid); + + /* Copy FOREIGN KEYS of the parent table */ + copy_foreign_keys(parent_relid, partition_relid); + + /* Make changes visible */ + CommandCounterIncrement(); + + /* Copy ACL privileges of the parent table and set "attislocal" */ + postprocess_child_table_and_atts(parent_relid, partition_relid); + } + else if (IsA(cur_stmt, CreateForeignTableStmt)) + { + elog(ERROR, "FDW partition creation is not implemented yet"); + } + /* + * 3737965249cd fix (since 12.5, 11.10, etc) reworked LIKE handling + * to process it after DefineRelation. + */ +#if (PG_VERSION_NUM >= 130000) || \ + ((PG_VERSION_NUM < 130000) && (PG_VERSION_NUM >= 120005)) || \ + ((PG_VERSION_NUM < 120000) && (PG_VERSION_NUM >= 110010)) || \ + ((PG_VERSION_NUM < 110000) && (PG_VERSION_NUM >= 100015)) || \ + ((PG_VERSION_NUM < 100000) && (PG_VERSION_NUM >= 90620)) || \ + ((PG_VERSION_NUM < 90600) && (PG_VERSION_NUM >= 90524)) + else if (IsA(cur_stmt, TableLikeClause)) + { + /* + * Do delayed processing of LIKE options. This + * will result in additional sub-statements for us + * to process. We can just tack those onto the + * to-do list. + */ + TableLikeClause *like = (TableLikeClause *) cur_stmt; + RangeVar *rv = create_stmt.relation; + List *morestmts; + + morestmts = expandTableLikeClause(rv, like); + create_stmts = list_concat(create_stmts, morestmts); + + /* + * We don't need a CCI now + */ + continue; + } +#endif + else + { + /* + * Recurse for anything else. Note the recursive + * call will stash the objects so created into our + * event trigger context. + */ + ProcessUtilityCompat(cur_stmt, + "we have to provide a query string", + PROCESS_UTILITY_SUBCOMMAND, + NULL, + None_Receiver, + NULL); + } + + /* Update config one more time */ + CommandCounterIncrement(); + } + + /* Restore user's privileges */ + if (need_priv_escalation) + SetUserIdAndSecContext(save_userid, save_sec_context); + + return partition_relid; +} + +/* Create a new table using cooked CreateStmt */ +static ObjectAddress +create_table_using_stmt(CreateStmt *create_stmt, Oid relowner) +{ + ObjectAddress table_addr; + Datum toast_options; + static char *validnsps[] = HEAP_RELOPT_NAMESPACES; + int guc_level; + + /* Create new GUC level... */ + guc_level = NewGUCNestLevel(); + + /* ... and set client_min_messages = warning */ + (void) set_config_option(CppAsString(client_min_messages), "WARNING", + PGC_USERSET, PGC_S_SESSION, + GUC_ACTION_SAVE, true, 0, false); + + /* Create new partition owned by parent's posessor */ + table_addr = DefineRelationCompat(create_stmt, RELKIND_RELATION, relowner, + NULL); + + /* Save data about a simple DDL command that was just executed */ + EventTriggerCollectSimpleCommand(table_addr, + InvalidObjectAddress, + (Node *) create_stmt); + + /* + * Let NewRelationCreateToastTable decide if this + * one needs a secondary relation too. + */ + CommandCounterIncrement(); + + /* Parse and validate reloptions for the toast table */ + toast_options = transformRelOptions((Datum) 0, create_stmt->options, + "toast", validnsps, true, false); + + /* Parse options for a new toast table */ + (void) heap_reloptions(RELKIND_TOASTVALUE, toast_options, true); + + /* Now create the toast table if needed */ + NewRelationCreateToastTable(table_addr.objectId, toast_options); + + /* Restore original GUC values */ + AtEOXact_GUC(true, guc_level); + + /* Return the address */ + return table_addr; +} + +/* Copy ACL privileges of parent table and set "attislocal" = true */ +static void +postprocess_child_table_and_atts(Oid parent_relid, Oid partition_relid) +{ + Relation parent_rel, + partition_rel, + pg_class_rel, + pg_attribute_rel; + + TupleDesc pg_class_desc, + pg_attribute_desc; + + List *translated_vars; + + HeapTuple htup; + ScanKeyData skey[2]; + SysScanDesc scan; + + Datum acl_datum; + bool acl_null; + + Snapshot snapshot; + + /* Both parent & partition have already been locked */ + parent_rel = heap_open_compat(parent_relid, NoLock); + partition_rel = heap_open_compat(partition_relid, NoLock); + + make_inh_translation_list(parent_rel, partition_rel, 0, &translated_vars, NULL); + + heap_close_compat(parent_rel, NoLock); + heap_close_compat(partition_rel, NoLock); + + /* Open catalog's relations */ + pg_class_rel = heap_open_compat(RelationRelationId, RowExclusiveLock); + pg_attribute_rel = heap_open_compat(AttributeRelationId, RowExclusiveLock); + + /* Get most recent snapshot */ + snapshot = RegisterSnapshot(GetLatestSnapshot()); + + pg_class_desc = RelationGetDescr(pg_class_rel); + pg_attribute_desc = RelationGetDescr(pg_attribute_rel); + + htup = SearchSysCache1(RELOID, ObjectIdGetDatum(parent_relid)); + if (!HeapTupleIsValid(htup)) + elog(ERROR, "cache lookup failed for relation %u", parent_relid); + + /* Get parent's ACL */ + acl_datum = heap_getattr(htup, Anum_pg_class_relacl, pg_class_desc, &acl_null); + + /* Copy datum if it's not NULL */ + if (!acl_null) + { + Form_pg_attribute acl_column; + + acl_column = TupleDescAttr(pg_class_desc, Anum_pg_class_relacl - 1); + acl_datum = datumCopy(acl_datum, acl_column->attbyval, acl_column->attlen); + } + + /* Release 'htup' */ + ReleaseSysCache(htup); + + /* Search for 'partition_relid' */ + ScanKeyInit(&skey[0], +#if PG_VERSION_NUM >= 120000 + Anum_pg_class_oid, +#else + ObjectIdAttributeNumber, +#endif + BTEqualStrategyNumber, F_OIDEQ, + ObjectIdGetDatum(partition_relid)); + + scan = systable_beginscan(pg_class_rel, ClassOidIndexId, + true, snapshot, 1, skey); + + /* There should be exactly one tuple (our child) */ + if (HeapTupleIsValid(htup = systable_getnext(scan))) + { + ItemPointerData iptr; + Datum values[Natts_pg_class] = { (Datum) 0 }; + bool nulls[Natts_pg_class] = { false }; + bool replaces[Natts_pg_class] = { false }; + + /* Copy ItemPointer of this tuple */ + iptr = htup->t_self; + + values[Anum_pg_class_relacl - 1] = acl_datum; /* ACL array */ + nulls[Anum_pg_class_relacl - 1] = acl_null; /* do we have ACL? */ + replaces[Anum_pg_class_relacl - 1] = true; + + /* Build new tuple with parent's ACL */ + htup = heap_modify_tuple(htup, pg_class_desc, values, nulls, replaces); + + /* Update child's tuple with related indexes */ + CatalogTupleUpdate(pg_class_rel, &iptr, htup); + } + + systable_endscan(scan); + + + /* Search for 'parent_relid's columns */ + ScanKeyInit(&skey[0], + Anum_pg_attribute_attrelid, + BTEqualStrategyNumber, F_OIDEQ, + ObjectIdGetDatum(parent_relid)); + + /* Consider only user-defined columns (>0) */ + ScanKeyInit(&skey[1], + Anum_pg_attribute_attnum, + BTEqualStrategyNumber, F_INT2GT, + Int16GetDatum(InvalidAttrNumber)); + + scan = systable_beginscan(pg_attribute_rel, AttributeRelidNumIndexId, + true, snapshot, lengthof(skey), skey); + + /* Go through the list of parent's columns */ + while (HeapTupleIsValid(htup = systable_getnext(scan))) + { + ScanKeyData subskey[2]; + SysScanDesc subscan; + HeapTuple subhtup; + + AttrNumber cur_attnum; + bool cur_attnum_null; + Var *cur_var; + + /* Get parent column's ACL */ + acl_datum = heap_getattr(htup, Anum_pg_attribute_attacl, + pg_attribute_desc, &acl_null); + + /* Copy datum if it's not NULL */ + if (!acl_null) + { + Form_pg_attribute acl_column; + + acl_column = TupleDescAttr(pg_attribute_desc, Anum_pg_attribute_attacl - 1); + + acl_datum = datumCopy(acl_datum, + acl_column->attbyval, + acl_column->attlen); + } + + /* Fetch number of current column (parent) */ + cur_attnum = DatumGetInt16(heap_getattr(htup, Anum_pg_attribute_attnum, + pg_attribute_desc, &cur_attnum_null)); + Assert(cur_attnum_null == false); /* must not be NULL! */ + + /* Fetch Var of partition's corresponding column */ + cur_var = (Var *) list_nth(translated_vars, cur_attnum - 1); + if (!cur_var) + continue; /* column is dropped */ + + Assert(cur_var->varattno != InvalidAttrNumber); + + /* Search for 'partition_relid' */ + ScanKeyInit(&subskey[0], + Anum_pg_attribute_attrelid, + BTEqualStrategyNumber, F_OIDEQ, + ObjectIdGetDatum(partition_relid)); + + /* Search for 'partition_relid's columns */ + ScanKeyInit(&subskey[1], + Anum_pg_attribute_attnum, + BTEqualStrategyNumber, F_INT2EQ, + Int16GetDatum(cur_var->varattno)); /* partition's column */ + + subscan = systable_beginscan(pg_attribute_rel, AttributeRelidNumIndexId, + true, snapshot, lengthof(subskey), subskey); + + /* There should be exactly one tuple (our child's column) */ + if (HeapTupleIsValid(subhtup = systable_getnext(subscan))) + { + ItemPointerData iptr; + Datum values[Natts_pg_attribute] = { (Datum) 0 }; + bool nulls[Natts_pg_attribute] = { false }; + bool replaces[Natts_pg_attribute] = { false }; + + /* Copy ItemPointer of this tuple */ + iptr = subhtup->t_self; + + /* Change ACL of this column */ + values[Anum_pg_attribute_attacl - 1] = acl_datum; /* ACL array */ + nulls[Anum_pg_attribute_attacl - 1] = acl_null; /* do we have ACL? */ + replaces[Anum_pg_attribute_attacl - 1] = true; + + /* Change 'attislocal' for DROP COLUMN */ + values[Anum_pg_attribute_attislocal - 1] = false; /* should not be local */ + nulls[Anum_pg_attribute_attislocal - 1] = false; /* NOT NULL */ + replaces[Anum_pg_attribute_attislocal - 1] = true; + + /* Build new tuple with parent's ACL */ + subhtup = heap_modify_tuple(subhtup, pg_attribute_desc, + values, nulls, replaces); + + /* Update child's tuple and related indexes */ + CatalogTupleUpdate(pg_attribute_rel, &iptr, subhtup); + } + + systable_endscan(subscan); + } + + systable_endscan(scan); + + /* Don't forget to free snapshot */ + UnregisterSnapshot(snapshot); + + heap_close_compat(pg_class_rel, RowExclusiveLock); + heap_close_compat(pg_attribute_rel, RowExclusiveLock); +} + +/* Copy foreign keys of parent table (updates pg_class) */ +static void +copy_foreign_keys(Oid parent_relid, Oid partition_oid) +{ + Oid copy_fkeys_proc_args[] = { REGCLASSOID, REGCLASSOID }; + List *copy_fkeys_proc_name; + FmgrInfo copy_fkeys_proc_flinfo; +#if PG_VERSION_NUM >= 120000 + LOCAL_FCINFO(copy_fkeys_proc_fcinfo, 2); +#else + FunctionCallInfoData copy_fkeys_proc_fcinfo_data; + FunctionCallInfo copy_fkeys_proc_fcinfo = ©_fkeys_proc_fcinfo_data; +#endif + char *pathman_schema; + + /* Fetch pg_pathman's schema */ + pathman_schema = get_namespace_name(get_pathman_schema()); + if (pathman_schema == NULL) + elog(ERROR, "pg_pathman schema not initialized"); + + /* Build function's name */ + copy_fkeys_proc_name = list_make2(makeString(pathman_schema), + makeString(CppAsString(copy_foreign_keys))); + + /* Lookup function's Oid and get FmgrInfo */ + fmgr_info(LookupFuncName(copy_fkeys_proc_name, 2, + copy_fkeys_proc_args, false), + ©_fkeys_proc_flinfo); + + InitFunctionCallInfoData(*copy_fkeys_proc_fcinfo, ©_fkeys_proc_flinfo, + 2, InvalidOid, NULL, NULL); +#if PG_VERSION_NUM >= 120000 + copy_fkeys_proc_fcinfo->args[0].value = ObjectIdGetDatum(parent_relid); + copy_fkeys_proc_fcinfo->args[0].isnull = false; + copy_fkeys_proc_fcinfo->args[1].value = ObjectIdGetDatum(partition_oid); + copy_fkeys_proc_fcinfo->args[1].isnull = false; +#else + copy_fkeys_proc_fcinfo->arg[0] = ObjectIdGetDatum(parent_relid); + copy_fkeys_proc_fcinfo->argnull[0] = false; + copy_fkeys_proc_fcinfo->arg[1] = ObjectIdGetDatum(partition_oid); + copy_fkeys_proc_fcinfo->argnull[1] = false; +#endif + + /* Invoke the callback */ + FunctionCallInvoke(copy_fkeys_proc_fcinfo); + + /* Make changes visible */ + CommandCounterIncrement(); +} + +/* Copy reloptions of foreign table (updates pg_class) */ +static void +copy_rel_options(Oid parent_relid, Oid partition_relid) +{ + Relation pg_class_rel; + + HeapTuple parent_htup, + partition_htup, + new_htup; + + Datum reloptions; + bool reloptions_null; + Datum relpersistence; + + Datum values[Natts_pg_class]; + bool isnull[Natts_pg_class], + replace[Natts_pg_class] = { false }; + + pg_class_rel = heap_open_compat(RelationRelationId, RowExclusiveLock); + + parent_htup = SearchSysCache1(RELOID, ObjectIdGetDatum(parent_relid)); + partition_htup = SearchSysCache1(RELOID, ObjectIdGetDatum(partition_relid)); + + if (!HeapTupleIsValid(parent_htup)) + elog(ERROR, "cache lookup failed for relation %u", parent_relid); + + if (!HeapTupleIsValid(partition_htup)) + elog(ERROR, "cache lookup failed for relation %u", partition_relid); + + /* Extract parent's reloptions */ + reloptions = SysCacheGetAttr(RELOID, parent_htup, + Anum_pg_class_reloptions, + &reloptions_null); + + /* Extract parent's relpersistence */ + relpersistence = ((Form_pg_class) GETSTRUCT(parent_htup))->relpersistence; + + /* Fill in reloptions */ + values[Anum_pg_class_reloptions - 1] = reloptions; + isnull[Anum_pg_class_reloptions - 1] = reloptions_null; + replace[Anum_pg_class_reloptions - 1] = true; + + /* Fill in relpersistence */ + values[Anum_pg_class_relpersistence - 1] = relpersistence; + isnull[Anum_pg_class_relpersistence - 1] = false; + replace[Anum_pg_class_relpersistence - 1] = true; + + new_htup = heap_modify_tuple(partition_htup, + RelationGetDescr(pg_class_rel), + values, isnull, replace); + CatalogTupleUpdate(pg_class_rel, &new_htup->t_self, new_htup); + heap_freetuple(new_htup); + + ReleaseSysCache(parent_htup); + ReleaseSysCache(partition_htup); + + heap_close_compat(pg_class_rel, RowExclusiveLock); + + /* Make changes visible */ + CommandCounterIncrement(); +} + + +/* + * ----------------------------- + * Check constraint generation + * ----------------------------- + */ + +/* Drop pg_pathman's check constraint by 'relid' */ +void +drop_pathman_check_constraint(Oid relid) +{ + char *constr_name; +#if PG_VERSION_NUM >= 130000 + List *cmds; +#else + AlterTableStmt *stmt; +#endif + AlterTableCmd *cmd; + + /* Build a correct name for this constraint */ + constr_name = build_check_constraint_name_relid_internal(relid); + +#if PG_VERSION_NUM < 130000 + stmt = makeNode(AlterTableStmt); + stmt->relation = makeRangeVarFromRelid(relid); + stmt->relkind = OBJECT_TABLE; +#endif + + cmd = makeNode(AlterTableCmd); + cmd->subtype = AT_DropConstraint; + cmd->name = constr_name; + cmd->behavior = DROP_RESTRICT; + cmd->missing_ok = true; + +#if PG_VERSION_NUM >= 130000 + cmds = list_make1(cmd); + + /* + * Since 1281a5c907b AlterTable() was changed. + * recurse = true (see stmt->relation->inh makeRangeVarFromRelid() makeRangeVar()) + * Dropping constraint won't do parse analyze, so AlterTableInternal + * is enough. + */ + AlterTableInternal(relid, cmds, true); +#else + stmt->cmds = list_make1(cmd); + + /* See function AlterTableGetLockLevel() */ + AlterTable(relid, AccessExclusiveLock, stmt); +#endif +} + +/* Add pg_pathman's check constraint using 'relid' */ +void +add_pathman_check_constraint(Oid relid, Constraint *constraint) +{ + Relation part_rel = heap_open_compat(relid, AccessExclusiveLock); + + AddRelationNewConstraintsCompat(part_rel, NIL, + list_make1(constraint), + false, true, true); + + heap_close_compat(part_rel, NoLock); +} + + + +/* Build RANGE check constraint expression tree */ +Node * +build_raw_range_check_tree(Node *raw_expression, + const Bound *start_value, + const Bound *end_value, + Oid value_type) +{ +#if PG_VERSION_NUM >= 150000 /* for commit 639a86e36aae */ +#define BuildConstExpr(node, value, value_type) \ + do { \ + (node)->val.sval = make_string_value_struct( \ + datum_to_cstring((value), (value_type))); \ + (node)->location = -1; \ + } while (0) +#else +#define BuildConstExpr(node, value, value_type) \ + do { \ + (node)->val = make_string_value_struct( \ + datum_to_cstring((value), (value_type))); \ + (node)->location = -1; \ + } while (0) +#endif + +#define BuildCmpExpr(node, opname, expr, c) \ + do { \ + (node)->name = list_make1(makeString(opname)); \ + (node)->kind = AEXPR_OP; \ + (node)->lexpr = (Node *) (expr); \ + (node)->rexpr = (Node *) (c); \ + (node)->location = -1; \ + } while (0) + +#define CopyTypeCastExpr(node, src, argument) \ + do { \ + memcpy((node), (src), sizeof(TypeCast)); \ + (node)->arg = (Node *) (argument); \ + (node)->typeName = (TypeName *) copyObject((node)->typeName); \ + } while (0) + + BoolExpr *and_oper = makeNode(BoolExpr); + A_Expr *left_arg = makeNode(A_Expr), + *right_arg = makeNode(A_Expr); + A_Const *left_const = makeNode(A_Const), + *right_const = makeNode(A_Const); + + and_oper->boolop = AND_EXPR; + and_oper->args = NIL; + and_oper->location = -1; + + /* Left comparison (VAR >= start_value) */ + if (!IsInfinite(start_value)) + { + /* Build left boundary */ + BuildConstExpr(left_const, BoundGetValue(start_value), value_type); + + /* Build ">=" clause */ + BuildCmpExpr(left_arg, ">=", raw_expression, left_const); + + /* Cast const to expression's type (e.g. composite key, row type) */ + if (IsA(raw_expression, TypeCast)) + { + TypeCast *cast = makeNode(TypeCast); + + /* Copy cast to expression's type */ + CopyTypeCastExpr(cast, raw_expression, left_const); + + left_arg->rexpr = (Node *) cast; + } + + and_oper->args = lappend(and_oper->args, left_arg); + } + + /* Right comparison (VAR < end_value) */ + if (!IsInfinite(end_value)) + { + /* Build right boundary */ + BuildConstExpr(right_const, BoundGetValue(end_value), value_type); + + /* Build "<" clause */ + BuildCmpExpr(right_arg, "<", raw_expression, right_const); + + /* Cast const to expression's type (e.g. composite key, row type) */ + if (IsA(raw_expression, TypeCast)) + { + TypeCast *cast = makeNode(TypeCast); + + /* Copy cast to expression's type */ + CopyTypeCastExpr(cast, raw_expression, right_const); + + right_arg->rexpr = (Node *) cast; + } + + and_oper->args = lappend(and_oper->args, right_arg); + } + + /* (-inf, +inf) */ + if (and_oper->args == NIL) + elog(ERROR, "cannot create partition with range (-inf, +inf)"); + + return (Node *) and_oper; + +#undef BuildConstExpr +#undef BuildCmpExpr +#undef CopyTypeCastExpr +} + +/* Build complete RANGE check constraint */ +Constraint * +build_range_check_constraint(Oid child_relid, + Node *raw_expression, + const Bound *start_value, + const Bound *end_value, + Oid value_type) +{ + Constraint *range_constr; + char *range_constr_name; + + /* Build a correct name for this constraint */ + range_constr_name = build_check_constraint_name_relid_internal(child_relid); + + /* Initialize basic properties of a CHECK constraint */ + range_constr = make_constraint_common(range_constr_name, + build_raw_range_check_tree(raw_expression, + start_value, + end_value, + value_type)); + /* Everything seems to be fine */ + return range_constr; +} + +/* Check if range overlaps with any partitions */ +bool +check_range_available(Oid parent_relid, + const Bound *start, + const Bound *end, + Oid value_type, + bool raise_error) +{ + PartRelationInfo *prel; + bool result = true; + + /* Try fetching the PartRelationInfo structure */ + if ((prel = get_pathman_relation_info(parent_relid)) != NULL) + { + RangeEntry *ranges; + FmgrInfo cmp_func; + uint32 i; + + /* Emit an error if it is not partitioned by RANGE */ + shout_if_prel_is_invalid(parent_relid, prel, PT_RANGE); + + /* Fetch comparison function */ + fill_type_cmp_fmgr_info(&cmp_func, + getBaseType(value_type), + getBaseType(prel->ev_type)); + + ranges = PrelGetRangesArray(prel); + for (i = 0; i < PrelChildrenCount(prel); i++) + { + int c1, c2; + + c1 = cmp_bounds(&cmp_func, prel->ev_collid, start, &ranges[i].max); + c2 = cmp_bounds(&cmp_func, prel->ev_collid, end, &ranges[i].min); + + /* There's something! */ + if (c1 < 0 && c2 > 0) + { + if (raise_error) + { + elog(ERROR, "specified range [%s, %s) overlaps " + "with existing partitions", + BoundToCString(start, value_type), + BoundToCString(end, value_type)); + } + /* Too bad, so sad */ + else result = false; + } + } + + /* Don't forget to close 'prel'! */ + close_pathman_relation_info(prel); + } + else + { + ereport(WARNING, (errmsg("table \"%s\" is not partitioned", + get_rel_name_or_relid(parent_relid)))); + } + + return result; +} + +/* Build HASH check constraint expression tree */ +Node * +build_raw_hash_check_tree(Node *raw_expression, + uint32 part_idx, + uint32 part_count, + Oid relid, + Oid value_type) +{ + A_Expr *eq_oper = makeNode(A_Expr); + FuncCall *part_idx_call = makeNode(FuncCall), + *hash_call = makeNode(FuncCall); + A_Const *part_idx_c = makeNode(A_Const), + *part_count_c = makeNode(A_Const); + + List *get_hash_part_idx_proc; + + Oid hash_proc; + TypeCacheEntry *tce; + char *pathman_schema; + + tce = lookup_type_cache(value_type, TYPECACHE_HASH_PROC); + hash_proc = tce->hash_proc; + + /* Total amount of partitions */ +#if PG_VERSION_NUM >= 150000 /* for commit 639a86e36aae */ + part_count_c->val.ival = make_int_value_struct(part_count); +#else + part_count_c->val = make_int_value_struct(part_count); +#endif + part_count_c->location = -1; + + /* Index of this partition (hash % total amount) */ +#if PG_VERSION_NUM >= 150000 /* for commit 639a86e36aae */ + part_idx_c->val.ival = make_int_value_struct(part_idx); +#else + part_idx_c->val = make_int_value_struct(part_idx); +#endif + part_idx_c->location = -1; + + /* Call hash_proc() */ + hash_call->funcname = list_make1(makeString(get_func_name(hash_proc))); + hash_call->args = list_make1(raw_expression); + hash_call->agg_order = NIL; + hash_call->agg_filter = NULL; + hash_call->agg_within_group = false; + hash_call->agg_star = false; + hash_call->agg_distinct = false; + hash_call->func_variadic = false; + hash_call->over = NULL; + hash_call->location = -1; + + pathman_schema = get_namespace_name(get_pathman_schema()); + if (pathman_schema == NULL) + elog(ERROR, "pg_pathman schema not initialized"); + + /* Build schema-qualified name of function get_hash_part_idx() */ + get_hash_part_idx_proc = + list_make2(makeString(pathman_schema), + makeString("get_hash_part_idx")); + + /* Call get_hash_part_idx() */ + part_idx_call->funcname = get_hash_part_idx_proc; + part_idx_call->args = list_make2(hash_call, part_count_c); + part_idx_call->agg_order = NIL; + part_idx_call->agg_filter = NULL; + part_idx_call->agg_within_group = false; + part_idx_call->agg_star = false; + part_idx_call->agg_distinct = false; + part_idx_call->func_variadic = false; + part_idx_call->over = NULL; + part_idx_call->location = -1; + + /* Construct equality operator */ + eq_oper->kind = AEXPR_OP; + eq_oper->name = list_make1(makeString("=")); + eq_oper->lexpr = (Node *) part_idx_call; + eq_oper->rexpr = (Node *) part_idx_c; + eq_oper->location = -1; + + return (Node *) eq_oper; +} + +/* Build complete HASH check constraint */ +Constraint * +build_hash_check_constraint(Oid child_relid, + Node *raw_expression, + uint32 part_idx, + uint32 part_count, + Oid value_type) +{ + Constraint *hash_constr; + char *hash_constr_name; + + /* Build a correct name for this constraint */ + hash_constr_name = build_check_constraint_name_relid_internal(child_relid); + + /* Initialize basic properties of a CHECK constraint */ + hash_constr = make_constraint_common(hash_constr_name, + build_raw_hash_check_tree(raw_expression, + part_idx, + part_count, + child_relid, + value_type)); + /* Everything seems to be fine */ + return hash_constr; +} + +static Constraint * +make_constraint_common(char *name, Node *raw_expr) +{ + Constraint *constraint; + + /* Initialize basic properties of a CHECK constraint */ + constraint = makeNode(Constraint); + constraint->conname = name; + constraint->deferrable = false; + constraint->initdeferred = false; + constraint->location = -1; + constraint->contype = CONSTR_CHECK; + constraint->is_no_inherit = false; + + /* Validate existing data using this constraint */ + constraint->skip_validation = false; + constraint->initially_valid = true; + + /* Finally we should build an expression tree */ + constraint->raw_expr = raw_expr; + + return constraint; +} + +#if PG_VERSION_NUM >= 150000 /* for commits 639a86e36aae, c4cc2850f4d1 */ +static String +make_string_value_struct(char* str) +{ + String val; + + val.type = T_String; + val.sval = str; + + return val; +} + +static Integer +make_int_value_struct(int int_val) +{ + Integer val; + + val.type = T_Integer; + val.ival = int_val; + + return val; +} +#else +static Value +make_string_value_struct(char *str) +{ + Value val; + + val.type = T_String; + val.val.str = str; + + return val; +} + +static Value +make_int_value_struct(int int_val) +{ + Value val; + + val.type = T_Integer; + val.val.ival = int_val; + + return val; +} +#endif /* PG_VERSION_NUM >= 150000 */ + +/* + * --------------------- + * Callback invocation + * --------------------- + */ + +/* Invoke 'init_callback' for a partition */ +static void +invoke_init_callback_internal(init_callback_params *cb_params) +{ +#define JSB_INIT_VAL(value, val_type, val_cstring) \ + do { \ + if ((val_cstring) != NULL) \ + { \ + (value)->type = jbvString; \ + (value)->val.string.len = strlen(val_cstring); \ + (value)->val.string.val = val_cstring; \ + } \ + else \ + { \ + (value)->type = jbvNull; \ + Assert((val_type) != WJB_KEY); \ + } \ + \ + pushJsonbValue(&jsonb_state, val_type, (value)); \ + } while (0) + + Oid parent_oid = cb_params->parent_relid; + Oid partition_oid = cb_params->partition_relid; + + FmgrInfo cb_flinfo; +#if PG_VERSION_NUM >= 120000 + LOCAL_FCINFO(cb_fcinfo, 1); +#else + FunctionCallInfoData cb_fcinfo_data; + FunctionCallInfo cb_fcinfo = &cb_fcinfo_data; +#endif + + JsonbParseState *jsonb_state = NULL; + JsonbValue *result, + key, + val; + + char *parent_name, + *parent_namespace, + *partition_name, + *partition_namespace; + + + /* Fetch & cache callback's Oid if needed */ + if (!cb_params->callback_is_cached) + { + Datum param_values[Natts_pathman_config_params]; + bool param_isnull[Natts_pathman_config_params]; + + /* Search for init_callback entry in PATHMAN_CONFIG_PARAMS */ + if (read_pathman_params(parent_oid, param_values, param_isnull)) + { + Datum init_cb_datum; /* signature of init_callback */ + AttrNumber init_cb_attno = Anum_pathman_config_params_init_callback; + + /* Extract Datum storing callback's signature */ + init_cb_datum = param_values[init_cb_attno - 1]; + + /* Cache init_callback's Oid */ + if (init_cb_datum) + { + /* Try fetching callback's Oid */ + cb_params->callback = text_to_regprocedure(DatumGetTextP(init_cb_datum)); + + if (!RegProcedureIsValid(cb_params->callback)) + ereport(ERROR, + (errcode(ERRCODE_INTEGRITY_CONSTRAINT_VIOLATION), + errmsg("callback function \"%s\" does not exist", + TextDatumGetCString(init_cb_datum)))); + } + /* There's no callback */ + else cb_params->callback = InvalidOid; + + /* We've made a lookup */ + cb_params->callback_is_cached = true; + } + } + + /* No callback is set, exit */ + if (!OidIsValid(cb_params->callback)) + return; + + /* Validate the callback's signature */ + validate_part_callback(cb_params->callback, true); + + parent_name = get_rel_name(parent_oid); + parent_namespace = get_namespace_name(get_rel_namespace(parent_oid)); + + partition_name = get_rel_name(partition_oid); + partition_namespace = get_namespace_name(get_rel_namespace(partition_oid)); + + /* Generate JSONB we're going to pass to callback */ + switch (cb_params->parttype) + { + case PT_HASH: + { + pushJsonbValue(&jsonb_state, WJB_BEGIN_OBJECT, NULL); + + JSB_INIT_VAL(&key, WJB_KEY, "parent"); + JSB_INIT_VAL(&val, WJB_VALUE, parent_name); + JSB_INIT_VAL(&key, WJB_KEY, "parent_schema"); + JSB_INIT_VAL(&val, WJB_VALUE, parent_namespace); + JSB_INIT_VAL(&key, WJB_KEY, "partition"); + JSB_INIT_VAL(&val, WJB_VALUE, partition_name); + JSB_INIT_VAL(&key, WJB_KEY, "partition_schema"); + JSB_INIT_VAL(&val, WJB_VALUE, partition_namespace); + JSB_INIT_VAL(&key, WJB_KEY, "parttype"); + JSB_INIT_VAL(&val, WJB_VALUE, PartTypeToCString(PT_HASH)); + + result = pushJsonbValue(&jsonb_state, WJB_END_OBJECT, NULL); + } + break; + + case PT_RANGE: + { + char *start_value = NULL, + *end_value = NULL; + Bound sv_datum = cb_params->params.range_params.start_value, + ev_datum = cb_params->params.range_params.end_value; + Oid value_type = cb_params->params.range_params.value_type; + + /* Convert min to CSTRING */ + if (!IsInfinite(&sv_datum)) + start_value = BoundToCString(&sv_datum, value_type); + + /* Convert max to CSTRING */ + if (!IsInfinite(&ev_datum)) + end_value = BoundToCString(&ev_datum, value_type); + + pushJsonbValue(&jsonb_state, WJB_BEGIN_OBJECT, NULL); + + JSB_INIT_VAL(&key, WJB_KEY, "parent"); + JSB_INIT_VAL(&val, WJB_VALUE, parent_name); + JSB_INIT_VAL(&key, WJB_KEY, "parent_schema"); + JSB_INIT_VAL(&val, WJB_VALUE, parent_namespace); + JSB_INIT_VAL(&key, WJB_KEY, "partition"); + JSB_INIT_VAL(&val, WJB_VALUE, partition_name); + JSB_INIT_VAL(&key, WJB_KEY, "partition_schema"); + JSB_INIT_VAL(&val, WJB_VALUE, partition_namespace); + JSB_INIT_VAL(&key, WJB_KEY, "parttype"); + JSB_INIT_VAL(&val, WJB_VALUE, PartTypeToCString(PT_RANGE)); + + /* Lower bound */ + JSB_INIT_VAL(&key, WJB_KEY, "range_min"); + JSB_INIT_VAL(&val, WJB_VALUE, start_value); + + /* Upper bound */ + JSB_INIT_VAL(&key, WJB_KEY, "range_max"); + JSB_INIT_VAL(&val, WJB_VALUE, end_value); + + result = pushJsonbValue(&jsonb_state, WJB_END_OBJECT, NULL); + } + break; + + default: + WrongPartType(cb_params->parttype); + result = NULL; /* keep compiler happy */ + } + + /* Fetch function call data */ + fmgr_info(cb_params->callback, &cb_flinfo); + + InitFunctionCallInfoData(*cb_fcinfo, &cb_flinfo, 1, InvalidOid, NULL, NULL); +#if PG_VERSION_NUM >= 120000 + cb_fcinfo->args[0].value = PointerGetDatum(JsonbValueToJsonb(result)); + cb_fcinfo->args[0].isnull = false; +#else + cb_fcinfo->arg[0] = PointerGetDatum(JsonbValueToJsonb(result)); + cb_fcinfo->argnull[0] = false; +#endif + + /* Invoke the callback */ + FunctionCallInvoke(cb_fcinfo); +} + +/* Invoke a callback of a specified type */ +void +invoke_part_callback(init_callback_params *cb_params) +{ + switch (cb_params->cb_type) + { + case PT_INIT_CALLBACK: + invoke_init_callback_internal(cb_params); + break; + + default: + elog(ERROR, "Unknown callback type: %u", cb_params->cb_type); + } +} + +/* + * Checks that callback function meets specific requirements. + * It must have the only JSONB argument and BOOL return type. + */ +bool +validate_part_callback(Oid procid, bool emit_error) +{ + HeapTuple tp; + Form_pg_proc functup; + bool is_ok = true; + + if (procid == DEFAULT_PATHMAN_INIT_CALLBACK) + return true; + + tp = SearchSysCache1(PROCOID, ObjectIdGetDatum(procid)); + if (!HeapTupleIsValid(tp)) + elog(ERROR, "callback function %u does not exist", procid); + + functup = (Form_pg_proc) GETSTRUCT(tp); + + if (functup->pronargs != 1 || + functup->proargtypes.values[0] != JSONBOID || + functup->prorettype != VOIDOID) + is_ok = false; + + ReleaseSysCache(tp); + + if (emit_error && !is_ok) + elog(ERROR, + "callback function must have the following signature: " + "callback(arg JSONB) RETURNS VOID"); + + return is_ok; +} + +/* + * Utility function that converts signature of procedure into regprocedure. + * + * Precondition: proc_signature != NULL. + * + * Returns InvalidOid if proname_args is not found. + * Raise error if it's incorrect. + */ +static Oid +text_to_regprocedure(text *proc_signature) +{ +#if PG_VERSION_NUM >= 120000 + LOCAL_FCINFO(fcinfo, 1); +#else + FunctionCallInfoData fcinfo_data; + FunctionCallInfo fcinfo = &fcinfo_data; +#endif + Datum result; + + InitFunctionCallInfoData(*fcinfo, NULL, 1, InvalidOid, NULL, NULL); + +#if PG_VERSION_NUM >= 120000 + fcinfo->args[0].value = PointerGetDatum(proc_signature); + fcinfo->args[0].isnull = false; +#elif PG_VERSION_NUM >= 90600 + fcinfo->arg[0] = PointerGetDatum(proc_signature); + fcinfo->argnull[0] = false; +#else + fcinfo->arg[0] = CStringGetDatum(text_to_cstring(proc_signature)); + fcinfo->argnull[0] = false; +#endif + + result = to_regprocedure(fcinfo); + + return DatumGetObjectId(result); +} + +/* Extract column names from raw expression */ +static bool +extract_column_names(Node *node, List **columns) +{ + if (node == NULL) + return false; + + if (IsA(node, ColumnRef)) + { + ListCell *lc; + + foreach(lc, ((ColumnRef *) node)->fields) + if (IsA(lfirst(lc), String)) + *columns = lappend(*columns, lfirst(lc)); + } + + return raw_expression_tree_walker(node, extract_column_names, columns); +} + +/* Returns raw partitioning expression + expr_type + columns */ +static Node * +build_partitioning_expression(Oid parent_relid, + Oid *expr_type, /* ret val #1 */ + List **columns) /* ret val #2 */ +{ + /* Values extracted from PATHMAN_CONFIG */ + Datum values[Natts_pathman_config]; + bool isnull[Natts_pathman_config]; + char *expr_cstr; + Node *expr; + + /* Check that table is registered in PATHMAN_CONFIG */ + if (!pathman_config_contains_relation(parent_relid, values, isnull, NULL, NULL)) + elog(ERROR, "table \"%s\" is not partitioned", + get_rel_name_or_relid(parent_relid)); + + expr_cstr = TextDatumGetCString(values[Anum_pathman_config_expr - 1]); + expr = parse_partitioning_expression(parent_relid, expr_cstr, NULL, NULL); + + /* We need expression type for hash functions */ + if (expr_type) + { + /* Finally return expression type */ + *expr_type = exprType( + cook_partitioning_expression(parent_relid, expr_cstr, NULL)); + } + + if (columns) + { + /* Column list should be empty */ + Assert(*columns == NIL); + extract_column_names(expr, columns); + } + + pfree(expr_cstr); + return expr; +} diff --git a/src/partition_filter.c b/src/partition_filter.c index 71e1b894..3d5e4bd3 100644 --- a/src/partition_filter.c +++ b/src/partition_filter.c @@ -3,38 +3,110 @@ * partition_filter.c * Select partition for INSERT operation * - * Copyright (c) 2016, Postgres Professional + * Copyright (c) 2016-2020, Postgres Professional * * ------------------------------------------------------------------------ */ -#include "partition_filter.h" +#include "compat/pg_compat.h" +#include "init.h" #include "nodes_common.h" +#include "pathman.h" +#include "partition_creation.h" +#include "partition_filter.h" +#include "partition_router.h" #include "utils.h" -#include "init.h" +#include "access/htup_details.h" +#if PG_VERSION_NUM >= 120000 +#include "access/table.h" +#endif +#include "access/xact.h" +#include "catalog/pg_class.h" +#include "catalog/pg_type.h" +#include "foreign/fdwapi.h" +#include "foreign/foreign.h" +#include "nodes/nodeFuncs.h" +#if PG_VERSION_NUM >= 160000 /* for commit a61b1f74823c */ +#include "parser/parse_relation.h" +#endif +#include "rewrite/rewriteManip.h" #include "utils/guc.h" #include "utils/memutils.h" -#include "nodes/nodeFuncs.h" -#include "utils/lsyscache.h" +#include "utils/syscache.h" + + +#define ALLOC_EXP 2 + + +/* + * HACK: 'estate->es_query_cxt' as data storage + * + * We use this struct as an argument for fake + * MemoryContextCallback pf_memcxt_callback() + * in order to attach some additional info to + * EState (estate->es_query_cxt is involved). + */ +typedef struct +{ + int estate_alloc_result_rels; /* number of allocated result rels */ + bool estate_not_modified; /* did we modify EState somehow? */ +} estate_mod_data; + +/* + * Allow INSERTs into any FDW \ postgres_fdw \ no FDWs at all. + */ +typedef enum +{ + PF_FDW_INSERT_DISABLED = 0, /* INSERTs into FDWs are prohibited */ + PF_FDW_INSERT_POSTGRES, /* INSERTs into postgres_fdw are OK */ + PF_FDW_INSERT_ANY_FDW /* INSERTs into any FDWs are OK */ +} PF_insert_fdw_mode; + +static const struct config_enum_entry pg_pathman_insert_into_fdw_options[] = { + { "disabled", PF_FDW_INSERT_DISABLED, false }, + { "postgres", PF_FDW_INSERT_POSTGRES, false }, + { "any_fdw", PF_FDW_INSERT_ANY_FDW, false }, + { NULL, 0, false } +}; bool pg_pathman_enable_partition_filter = true; +int pg_pathman_insert_into_fdw = PF_FDW_INSERT_POSTGRES; CustomScanMethods partition_filter_plan_methods; CustomExecMethods partition_filter_exec_methods; -static List * pfilter_build_tlist(List *tlist); -static ResultRelInfo * getResultRelInfo(Oid partid, PartitionFilterState *state); +static ExprState *prepare_expr_state(const PartRelationInfo *prel, + Relation source_rel, + EState *estate); + +static void prepare_rri_for_insert(ResultRelInfoHolder *rri_holder, + const ResultPartsStorage *rps_storage); + +static void prepare_rri_returning_for_insert(ResultRelInfoHolder *rri_holder, + const ResultPartsStorage *rps_storage); + +static void prepare_rri_fdw_for_insert(ResultRelInfoHolder *rri_holder, + const ResultPartsStorage *rps_storage); + +static Node *fix_returning_list_mutator(Node *node, void *state); + +static Index append_rte_to_estate(EState *estate, RangeTblEntry *rte, Relation child_rel); +static int append_rri_to_estate(EState *estate, ResultRelInfo *rri); + +static void pf_memcxt_callback(void *arg); +static estate_mod_data * fetch_estate_mod_data(EState *estate); + void init_partition_filter_static_data(void) { - partition_filter_plan_methods.CustomName = "PartitionFilter"; + partition_filter_plan_methods.CustomName = INSERT_NODE_NAME; partition_filter_plan_methods.CreateCustomScanState = partition_filter_create_scan_state; - partition_filter_exec_methods.CustomName = "PartitionFilter"; + partition_filter_exec_methods.CustomName = INSERT_NODE_NAME; partition_filter_exec_methods.BeginCustomScan = partition_filter_begin; partition_filter_exec_methods.ExecCustomScan = partition_filter_exec; partition_filter_exec_methods.EndCustomScan = partition_filter_end; @@ -44,7 +116,7 @@ init_partition_filter_static_data(void) partition_filter_exec_methods.ExplainCustomScan = partition_filter_explain; DefineCustomBoolVariable("pg_pathman.enable_partitionfilter", - "Enables the planner's use of PartitionFilter custom node.", + "Enables the planner's use of " INSERT_NODE_NAME " custom node.", NULL, &pg_pathman_enable_partition_filter, true, @@ -53,31 +125,708 @@ init_partition_filter_static_data(void) NULL, NULL, NULL); + + DefineCustomEnumVariable("pg_pathman.insert_into_fdw", + "Allow INSERTS into FDW partitions.", + NULL, + &pg_pathman_insert_into_fdw, + PF_FDW_INSERT_POSTGRES, + pg_pathman_insert_into_fdw_options, + PGC_SUSET, + 0, + NULL, + NULL, + NULL); + + RegisterCustomScanMethods(&partition_filter_plan_methods); +} + + +/* + * --------------------------- + * Partition Storage (cache) + * --------------------------- + */ + +/* Initialize ResultPartsStorage (hash table etc) */ +void +init_result_parts_storage(ResultPartsStorage *parts_storage, + Oid parent_relid, + ResultRelInfo *current_rri, + EState *estate, + CmdType cmd_type, + bool close_relations, + bool speculative_inserts, + rri_holder_cb init_rri_holder_cb, + void *init_rri_holder_cb_arg, + rri_holder_cb fini_rri_holder_cb, + void *fini_rri_holder_cb_arg) +{ + HASHCTL *result_rels_table_config = &parts_storage->result_rels_table_config; + + memset(result_rels_table_config, 0, sizeof(HASHCTL)); + result_rels_table_config->keysize = sizeof(Oid); + result_rels_table_config->entrysize = sizeof(ResultPartsStorage); + + parts_storage->result_rels_table = hash_create("ResultRelInfo storage", 10, + result_rels_table_config, + HASH_ELEM | HASH_BLOBS); + Assert(current_rri); + parts_storage->base_rri = current_rri; + + Assert(estate); + parts_storage->estate = estate; + + /* ResultRelInfoHolder initialization callback */ + parts_storage->init_rri_holder_cb = init_rri_holder_cb; + parts_storage->init_rri_holder_cb_arg = init_rri_holder_cb_arg; + + /* ResultRelInfoHolder finalization callback */ + parts_storage->fini_rri_holder_cb = fini_rri_holder_cb; + parts_storage->fini_rri_holder_cb_arg = fini_rri_holder_cb_arg; + + Assert(cmd_type == CMD_INSERT || cmd_type == CMD_UPDATE); + parts_storage->command_type = cmd_type; + parts_storage->speculative_inserts = speculative_inserts; + + /* + * Should ResultPartsStorage do ExecCloseIndices and heap_close on + * finalization? + */ + parts_storage->close_relations = close_relations; + parts_storage->head_open_lock_mode = RowExclusiveLock; + + /* Fetch PartRelationInfo for this partitioned relation */ + parts_storage->prel = get_pathman_relation_info(parent_relid); + shout_if_prel_is_invalid(parent_relid, parts_storage->prel, PT_ANY); + + /* Build a partitioning expression state */ + parts_storage->prel_expr_state = prepare_expr_state(parts_storage->prel, + parts_storage->base_rri->ri_RelationDesc, + parts_storage->estate); + + /* Build expression context */ + parts_storage->prel_econtext = CreateExprContext(parts_storage->estate); +} + +/* Free ResultPartsStorage (close relations etc) */ +void +fini_result_parts_storage(ResultPartsStorage *parts_storage) +{ + HASH_SEQ_STATUS stat; + ResultRelInfoHolder *rri_holder; /* ResultRelInfo holder */ + + hash_seq_init(&stat, parts_storage->result_rels_table); + while ((rri_holder = (ResultRelInfoHolder *) hash_seq_search(&stat)) != NULL) + { + /* Call finalization callback if needed */ + if (parts_storage->fini_rri_holder_cb) + parts_storage->fini_rri_holder_cb(rri_holder, parts_storage); + + /* + * Close indices, unless ExecEndPlan won't do that for us (this is + * is CopyFrom which misses it, not usual executor run, essentially). + * Otherwise, it is always automaticaly closed; in <= 11, relcache + * refs of rris managed heap_open/close on their own, and ExecEndPlan + * closed them directly. Since 9ddef3, relcache management + * of executor was centralized; now rri refs are copies of ones in + * estate->es_relations, which are closed in ExecEndPlan. + * So we push our rel there, and it is also automatically closed. + */ + if (parts_storage->close_relations) + { + ExecCloseIndices(rri_holder->result_rel_info); + /* And relation itself */ + heap_close_compat(rri_holder->result_rel_info->ri_RelationDesc, + NoLock); + } + + /* Free conversion-related stuff */ + destroy_tuple_map(rri_holder->tuple_map); + + destroy_tuple_map(rri_holder->tuple_map_child); + + /* Don't forget to close 'prel'! */ + if (rri_holder->prel) + close_pathman_relation_info(rri_holder->prel); + } + + /* Finally destroy hash table */ + hash_destroy(parts_storage->result_rels_table); + + /* Don't forget to close 'prel'! */ + close_pathman_relation_info(parts_storage->prel); +} + +/* Find a ResultRelInfo for the partition using ResultPartsStorage */ +ResultRelInfoHolder * +scan_result_parts_storage(EState *estate, ResultPartsStorage *parts_storage, + Oid partid) +{ +#define CopyToResultRelInfo(field_name) \ + ( child_result_rel_info->field_name = parts_storage->base_rri->field_name ) + + ResultRelInfoHolder *rri_holder; + bool found; + + rri_holder = hash_search(parts_storage->result_rels_table, + (const void *) &partid, + HASH_FIND, &found); + + /* If not found, create & cache new ResultRelInfo */ + if (!found) + { + Relation child_rel, + base_rel; + RangeTblEntry *child_rte, + *parent_rte; + Index child_rte_idx; + ResultRelInfo *child_result_rel_info; + List *translated_vars; + MemoryContext old_mcxt; +#if PG_VERSION_NUM >= 160000 /* for commit a61b1f74823c */ + RTEPermissionInfo *parent_perminfo, + *child_perminfo; + /* ResultRelInfo of partitioned table. */ + RangeTblEntry *init_rte; +#endif + + /* Lock partition and check if it exists */ + LockRelationOid(partid, parts_storage->head_open_lock_mode); + if (!SearchSysCacheExists1(RELOID, ObjectIdGetDatum(partid))) + { + UnlockRelationOid(partid, parts_storage->head_open_lock_mode); + return NULL; + } + + /* Switch to query-level mcxt for allocations */ + old_mcxt = MemoryContextSwitchTo(parts_storage->estate->es_query_cxt); + + /* Create a new cache entry for this partition */ + rri_holder = hash_search(parts_storage->result_rels_table, + (const void *) &partid, + HASH_ENTER, NULL); + + parent_rte = rt_fetch(parts_storage->base_rri->ri_RangeTableIndex, + parts_storage->estate->es_range_table); + + /* Get base relation */ + base_rel = parts_storage->base_rri->ri_RelationDesc; + + /* Open child relation and check if it is a valid target */ + child_rel = heap_open_compat(partid, NoLock); + + /* Create RangeTblEntry for partition */ + child_rte = makeNode(RangeTblEntry); + child_rte->rtekind = RTE_RELATION; + child_rte->relid = partid; + child_rte->relkind = child_rel->rd_rel->relkind; + child_rte->eref = parent_rte->eref; +#if PG_VERSION_NUM >= 160000 /* for commit a61b1f74823c */ + /* Build Var translation list for 'inserted_cols' */ + make_inh_translation_list(parts_storage->init_rri->ri_RelationDesc, + child_rel, 0, &translated_vars, NULL); + + /* + * Need to use ResultRelInfo of partitioned table 'init_rri' because + * 'base_rri' can be ResultRelInfo of partition without any + * ResultRelInfo, see expand_single_inheritance_child(). + */ + init_rte = rt_fetch(parts_storage->init_rri->ri_RangeTableIndex, + parts_storage->estate->es_range_table); + parent_perminfo = getRTEPermissionInfo(estate->es_rteperminfos, init_rte); + + child_rte->perminfoindex = 0; /* expected by addRTEPermissionInfo() */ + child_perminfo = addRTEPermissionInfo(&estate->es_rteperminfos, child_rte); + child_perminfo->requiredPerms = parent_perminfo->requiredPerms; + child_perminfo->checkAsUser = parent_perminfo->checkAsUser; + child_perminfo->insertedCols = translate_col_privs(parent_perminfo->insertedCols, + translated_vars); + child_perminfo->updatedCols = translate_col_privs(parent_perminfo->updatedCols, + translated_vars); + + /* Check permissions for one partition */ + ExecCheckOneRtePermissions(child_rte, child_perminfo, true); +#else + /* Build Var translation list for 'inserted_cols' */ + make_inh_translation_list(base_rel, child_rel, 0, &translated_vars, NULL); + + child_rte->requiredPerms = parent_rte->requiredPerms; + child_rte->checkAsUser = parent_rte->checkAsUser; + child_rte->insertedCols = translate_col_privs(parent_rte->insertedCols, + translated_vars); + child_rte->updatedCols = translate_col_privs(parent_rte->updatedCols, + translated_vars); + + /* Check permissions for partition */ + ExecCheckRTPerms(list_make1(child_rte), true); +#endif + + /* Append RangeTblEntry to estate->es_range_table */ + child_rte_idx = append_rte_to_estate(parts_storage->estate, child_rte, child_rel); + + /* Create ResultRelInfo for partition */ + child_result_rel_info = makeNode(ResultRelInfo); + + InitResultRelInfoCompat(child_result_rel_info, + child_rel, + child_rte_idx, + parts_storage->estate->es_instrument); + + if (parts_storage->command_type != CMD_DELETE) + ExecOpenIndices(child_result_rel_info, parts_storage->speculative_inserts); + + /* Copy necessary fields from saved ResultRelInfo */ + CopyToResultRelInfo(ri_WithCheckOptions); + CopyToResultRelInfo(ri_WithCheckOptionExprs); + CopyToResultRelInfo(ri_projectReturning); +#if PG_VERSION_NUM >= 110000 + CopyToResultRelInfo(ri_onConflict); +#else + CopyToResultRelInfo(ri_onConflictSetProj); + CopyToResultRelInfo(ri_onConflictSetWhere); +#endif + +#if PG_VERSION_NUM < 140000 + /* field "ri_junkFilter" removed in 86dc90056dfd */ + if (parts_storage->command_type != CMD_UPDATE) + CopyToResultRelInfo(ri_junkFilter); + else + child_result_rel_info->ri_junkFilter = NULL; +#endif + + /* ri_ConstraintExprs will be initialized by ExecRelCheck() */ + child_result_rel_info->ri_ConstraintExprs = NULL; + + /* Check that this partition is a valid result relation */ + CheckValidResultRelCompat(child_result_rel_info, + parts_storage->command_type); + + /* Fill the ResultRelInfo holder */ + rri_holder->partid = partid; + rri_holder->result_rel_info = child_result_rel_info; + + /* + * Generate parent->child tuple transformation map. We need to + * convert tuples because e.g. parent's TupleDesc might have dropped + * columns which child doesn't have at all because it was created after + * the drop. + */ + rri_holder->tuple_map = build_part_tuple_map(base_rel, child_rel); + + /* + * Field for child->child tuple transformation map. We need to + * convert tuples because child TupleDesc might have extra + * columns ('ctid' etc.) and need remove them. + */ + rri_holder->tuple_map_child = NULL; + + /* Default values */ + rri_holder->prel = NULL; + rri_holder->prel_expr_state = NULL; + + if ((rri_holder->prel = get_pathman_relation_info(partid)) != NULL) + { + rri_holder->prel_expr_state = + prepare_expr_state(rri_holder->prel, /* NOTE: this prel! */ + parts_storage->base_rri->ri_RelationDesc, + parts_storage->estate); + } + + /* Call initialization callback if needed */ + if (parts_storage->init_rri_holder_cb) + parts_storage->init_rri_holder_cb(rri_holder, parts_storage); + + /* Append ResultRelInfo to storage->es_alloc_result_rels */ + append_rri_to_estate(parts_storage->estate, child_result_rel_info); + + /* Don't forget to switch back! */ + MemoryContextSwitchTo(old_mcxt); + } + + return rri_holder; +} + +/* Refresh PartRelationInfo for the partition in storage */ +PartRelationInfo * +refresh_result_parts_storage(ResultPartsStorage *parts_storage, Oid partid) +{ + if (partid == PrelParentRelid(parts_storage->prel)) + { + close_pathman_relation_info(parts_storage->prel); + parts_storage->prel = get_pathman_relation_info(partid); + shout_if_prel_is_invalid(partid, parts_storage->prel, PT_ANY); + + return parts_storage->prel; + } + else + { + ResultRelInfoHolder *rri_holder; + + rri_holder = hash_search(parts_storage->result_rels_table, + (const void *) &partid, + HASH_FIND, NULL); + + /* We must have entry (since we got 'prel' from it) */ + Assert(rri_holder && rri_holder->prel); + + close_pathman_relation_info(rri_holder->prel); + rri_holder->prel = get_pathman_relation_info(partid); + shout_if_prel_is_invalid(partid, rri_holder->prel, PT_ANY); + + return rri_holder->prel; + } +} + +/* Build tuple conversion map (e.g. parent has a dropped column) */ +TupleConversionMap * +build_part_tuple_map(Relation base_rel, Relation child_rel) +{ + TupleConversionMap *tuple_map; + TupleDesc child_tupdesc, + parent_tupdesc; + + /* HACK: use fake 'tdtypeid' in order to fool convert_tuples_by_name() */ + child_tupdesc = CreateTupleDescCopy(RelationGetDescr(child_rel)); + child_tupdesc->tdtypeid = InvalidOid; + + parent_tupdesc = CreateTupleDescCopy(RelationGetDescr(base_rel)); + parent_tupdesc->tdtypeid = InvalidOid; + + /* Generate tuple transformation map and some other stuff */ + tuple_map = convert_tuples_by_name_compat(parent_tupdesc, + child_tupdesc, + ERR_PART_DESC_CONVERT); + + /* If map is one-to-one, free unused TupleDescs */ + if (!tuple_map) + { + FreeTupleDesc(child_tupdesc); + FreeTupleDesc(parent_tupdesc); + } + + return tuple_map; +} + +/* + * Build tuple conversion map (e.g. partition tuple has extra column(s)). + * We create a special tuple map (tuple_map_child), which, when applied to the + * tuple of partition, translates the tuple attributes into the tuple + * attributes of the same partition, discarding service attributes like "ctid" + * (i.e. working like junkFilter). + */ +TupleConversionMap * +build_part_tuple_map_child(Relation child_rel) +{ + TupleConversionMap *tuple_map; + TupleDesc child_tupdesc1; + TupleDesc child_tupdesc2; + int n; +#if PG_VERSION_NUM >= 130000 + AttrMap *attrMap; +#else + AttrNumber *attrMap; +#endif + + child_tupdesc1 = CreateTupleDescCopy(RelationGetDescr(child_rel)); + child_tupdesc1->tdtypeid = InvalidOid; + + child_tupdesc2 = CreateTupleDescCopy(RelationGetDescr(child_rel)); + child_tupdesc2->tdtypeid = InvalidOid; + + /* Generate tuple transformation map */ +#if PG_VERSION_NUM >= 160000 /* for commit ad86d159b6ab */ + attrMap = build_attrmap_by_name(child_tupdesc1, child_tupdesc2, false); +#elif PG_VERSION_NUM >= 130000 + attrMap = build_attrmap_by_name(child_tupdesc1, child_tupdesc2); +#else + attrMap = convert_tuples_by_name_map(child_tupdesc1, child_tupdesc2, + ERR_PART_DESC_CONVERT); +#endif + + /* Prepare the map structure */ + tuple_map = (TupleConversionMap *) palloc(sizeof(TupleConversionMap)); + tuple_map->indesc = child_tupdesc1; + tuple_map->outdesc = child_tupdesc2; + tuple_map->attrMap = attrMap; + + /* preallocate workspace for Datum arrays */ + n = child_tupdesc1->natts; + tuple_map->outvalues = (Datum *) palloc(n * sizeof(Datum)); + tuple_map->outisnull = (bool *) palloc(n * sizeof(bool)); + + n = child_tupdesc1->natts + 1; /* +1 for NULL */ + tuple_map->invalues = (Datum *) palloc(n * sizeof(Datum)); + tuple_map->inisnull = (bool *) palloc(n * sizeof(bool)); + + tuple_map->invalues[0] = (Datum) 0; /* set up the NULL entry */ + tuple_map->inisnull[0] = true; + + return tuple_map; +} + +/* Destroy tuple conversion map */ +void +destroy_tuple_map(TupleConversionMap *tuple_map) +{ + if (tuple_map) + { + FreeTupleDesc(tuple_map->indesc); + FreeTupleDesc(tuple_map->outdesc); + + free_conversion_map(tuple_map); + } +} + +/* + * ----------------------------------- + * Partition search helper functions + * ----------------------------------- + */ + +/* + * Find matching partitions for 'value' using PartRelationInfo. + */ +Oid * +find_partitions_for_value(Datum value, Oid value_type, + const PartRelationInfo *prel, + int *nparts) +{ +#define CopyToTempConst(const_field, attr_field) \ + ( temp_const.const_field = prel->attr_field ) + + Const temp_const; /* temporary const for expr walker */ + WalkerContext wcxt; + List *ranges = NIL; + + /* Prepare dummy Const node */ + NodeSetTag(&temp_const, T_Const); + temp_const.location = -1; + + /* Fill const with value ... */ + temp_const.constvalue = value; + temp_const.consttype = value_type; + temp_const.constisnull = false; + + /* ... and some other important data */ + CopyToTempConst(consttypmod, ev_typmod); + CopyToTempConst(constcollid, ev_collid); + CopyToTempConst(constlen, ev_len); + CopyToTempConst(constbyval, ev_byval); + + /* We use 0 since varno doesn't matter for Const */ + InitWalkerContext(&wcxt, 0, prel, NULL); + ranges = walk_expr_tree((Expr *) &temp_const, &wcxt)->rangeset; + + return get_partition_oids(ranges, nparts, prel, false); +} + +/* + * Smart wrapper for scan_result_parts_storage(). + */ +ResultRelInfoHolder * +select_partition_for_insert(EState *estate, + ResultPartsStorage *parts_storage, + TupleTableSlot *slot) +{ + PartRelationInfo *prel = parts_storage->prel; + ExprState *expr_state = parts_storage->prel_expr_state; + ExprContext *expr_context = parts_storage->prel_econtext; + + Oid parent_relid = PrelParentRelid(prel), + partition_relid = InvalidOid; + + Datum value; + bool isnull; + bool compute_value = true; + + Oid *parts; + int nparts; + ResultRelInfoHolder *result; + + do + { + if (compute_value) + { + /* Prepare expression context */ + ResetExprContext(expr_context); + expr_context->ecxt_scantuple = slot; + + /* Execute expression */ + value = ExecEvalExprCompat(expr_state, expr_context, &isnull); + + if (isnull) + elog(ERROR, ERR_PART_ATTR_NULL); + + /* Ok, we have a value */ + compute_value = false; + } + + /* Search for matching partitions */ + parts = find_partitions_for_value(value, prel->ev_type, prel, &nparts); + + if (nparts > 1) + { + elog(ERROR, ERR_PART_ATTR_MULTIPLE); + } + else if (nparts == 0) + { + partition_relid = create_partitions_for_value(parent_relid, + value, prel->ev_type); + } + else partition_relid = parts[0]; + + /* Get ResultRelationInfo holder for the selected partition */ + result = scan_result_parts_storage(estate, parts_storage, partition_relid); + + /* Somebody has dropped or created partitions */ + if ((nparts == 0 || result == NULL) && !PrelIsFresh(prel)) + { + /* Try building a new 'prel' for this relation */ + prel = refresh_result_parts_storage(parts_storage, parent_relid); + } + + /* This partition is a parent itself */ + if (result && result->prel) + { + prel = result->prel; + expr_state = result->prel_expr_state; + parent_relid = result->partid; + compute_value = true; + + /* Repeat with a new dispatch */ + result = NULL; + } + + Assert(prel); + } + /* Loop until we get some result */ + while (result == NULL); + + return result; } +/* + * Since 13 (e1551f96e64) AttrNumber[] and map_length was combined + * into one struct AttrMap + */ +static ExprState * +prepare_expr_state(const PartRelationInfo *prel, + Relation source_rel, + EState *estate) +{ + ExprState *expr_state; + MemoryContext old_mcxt; + Node *expr; + + /* Make sure we use query memory context */ + old_mcxt = MemoryContextSwitchTo(estate->es_query_cxt); + + /* Fetch partitioning expression (we don't care about varno) */ + expr = PrelExpressionForRelid(prel, PART_EXPR_VARNO); + + /* Should we try using map? */ + if (PrelParentRelid(prel) != RelationGetRelid(source_rel)) + { +#if PG_VERSION_NUM >= 130000 + AttrMap *map; +#else + AttrNumber *map; + int map_length; +#endif + TupleDesc source_tupdesc = RelationGetDescr(source_rel); + + /* Remap expression attributes for source relation */ +#if PG_VERSION_NUM >= 130000 + map = PrelExpressionAttributesMap(prel, source_tupdesc); +#else + map = PrelExpressionAttributesMap(prel, source_tupdesc, &map_length); +#endif + + if (map) + { + bool found_whole_row; + +#if PG_VERSION_NUM >= 130000 + expr = map_variable_attnos(expr, PART_EXPR_VARNO, 0, map, + InvalidOid, + &found_whole_row); +#else + expr = map_variable_attnos_compat(expr, PART_EXPR_VARNO, 0, map, + map_length, InvalidOid, + &found_whole_row); +#endif + + if (found_whole_row) + elog(ERROR, "unexpected whole-row reference" + " found in partition key"); + +#if PG_VERSION_NUM >= 130000 + free_attrmap(map); +#else + pfree(map); +#endif + } + } + + /* Prepare state for expression execution */ + expr_state = ExecInitExpr((Expr *) expr, NULL); + MemoryContextSwitchTo(old_mcxt); + + return expr_state; +} + +/* + * -------------------------------- + * PartitionFilter implementation + * -------------------------------- + */ + Plan * -make_partition_filter(Plan *subplan, Oid partitioned_table, - OnConflictAction conflict_action) +make_partition_filter(Plan *subplan, + Oid parent_relid, + Index parent_rti, + OnConflictAction conflict_action, + CmdType command_type, + List *returning_list) { CustomScan *cscan = makeNode(CustomScan); - cscan->scan.plan.startup_cost = subplan->startup_cost; - cscan->scan.plan.total_cost = subplan->total_cost; - cscan->scan.plan.plan_rows = subplan->plan_rows; - cscan->scan.plan.plan_width = subplan->plan_width; + /* Currently we don't support ON CONFLICT clauses */ + if (conflict_action != ONCONFLICT_NONE) + ereport(ERROR, + (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), + errmsg("ON CONFLICT clause is not supported with partitioned tables"))); + /* Copy costs etc */ + cscan->scan.plan.startup_cost = subplan->startup_cost; + cscan->scan.plan.total_cost = subplan->total_cost; + cscan->scan.plan.plan_rows = subplan->plan_rows; + cscan->scan.plan.plan_width = subplan->plan_width; + + /* Setup methods and child plan */ cscan->methods = &partition_filter_plan_methods; cscan->custom_plans = list_make1(subplan); - cscan->scan.plan.targetlist = pfilter_build_tlist(subplan->targetlist); - - /* No relation will be scanned */ + /* No physical relation will be scanned */ cscan->scan.scanrelid = 0; - cscan->custom_scan_tlist = subplan->targetlist; + + /* Build an appropriate target list */ + cscan->scan.plan.targetlist = pfilter_build_tlist(subplan, parent_rti); /* Pack partitioned table's Oid and conflict_action */ - cscan->custom_private = list_make2_int(partitioned_table, - conflict_action); +#if PG_VERSION_NUM >= 160000 /* for commit 178ee1d858 */ + cscan->custom_private = list_make5(makeInteger(parent_relid), + makeInteger(conflict_action), + returning_list, + makeInteger(command_type), + makeInteger(parent_rti)); +#else + cscan->custom_private = list_make4(makeInteger(parent_relid), + makeInteger(conflict_action), + returning_list, + makeInteger(command_type)); +#endif return &cscan->scan.plan; } @@ -85,26 +834,31 @@ make_partition_filter(Plan *subplan, Oid partitioned_table, Node * partition_filter_create_scan_state(CustomScan *node) { - PartitionFilterState *state; + PartitionFilterState *state; state = (PartitionFilterState *) palloc0(sizeof(PartitionFilterState)); NodeSetTag(state, T_CustomScanState); - state->css.flags = node->flags; - state->css.methods = &partition_filter_exec_methods; + /* Initialize base CustomScanState */ + state->css.flags = node->flags; + state->css.methods = &partition_filter_exec_methods; /* Extract necessary variables */ - state->subplan = (Plan *) linitial(node->custom_plans); - state->partitioned_table = linitial_int(node->custom_private); - state->onConflictAction = lsecond_int(node->custom_private); + state->subplan = (Plan *) linitial(node->custom_plans); + state->partitioned_table = (Oid) intVal(linitial(node->custom_private)); + state->on_conflict_action = intVal(lsecond(node->custom_private)); + state->returning_list = (List *) lthird(node->custom_private); + state->command_type = (CmdType) intVal(lfourth(node->custom_private)); +#if PG_VERSION_NUM >= 160000 /* for commit 178ee1d858 */ + state->parent_rti = (Index) intVal(lfirst(list_nth_cell(node->custom_private, 4))); +#endif /* Check boundaries */ - Assert(state->onConflictAction >= ONCONFLICT_NONE || - state->onConflictAction <= ONCONFLICT_UPDATE); + Assert(state->on_conflict_action >= ONCONFLICT_NONE || + state->on_conflict_action <= ONCONFLICT_UPDATE); - /* Prepare dummy Const node */ - NodeSetTag(&state->temp_const, T_Const); - state->temp_const.location = -1; + /* There should be exactly one subplan */ + Assert(list_length(node->custom_plans) == 1); return (Node *) state; } @@ -113,32 +867,75 @@ void partition_filter_begin(CustomScanState *node, EState *estate, int eflags) { PartitionFilterState *state = (PartitionFilterState *) node; + Oid parent_relid = state->partitioned_table; + ResultRelInfo *current_rri; - HTAB *result_rels_table; - HASHCTL *result_rels_table_config = &state->result_rels_table_config; - + /* It's convenient to store PlanState in 'custom_ps' */ node->custom_ps = list_make1(ExecInitNode(state->subplan, estate, eflags)); - state->savedRelInfo = NULL; - memset(result_rels_table_config, 0, sizeof(HASHCTL)); - result_rels_table_config->keysize = sizeof(Oid); - result_rels_table_config->entrysize = sizeof(ResultRelInfoHolder); + /* Fetch current result relation (rri + rel) */ + current_rri = estate->es_result_relation_info; + + /* Init ResultRelInfo cache */ + init_result_parts_storage(&state->result_parts, + parent_relid, current_rri, + estate, state->command_type, + RPS_SKIP_RELATIONS, + state->on_conflict_action != ONCONFLICT_NONE, + RPS_RRI_CB(prepare_rri_for_insert, state), + RPS_RRI_CB(NULL, NULL)); +#if PG_VERSION_NUM >= 160000 /* for commit a61b1f74823c */ + /* ResultRelInfo of partitioned table. */ + { + RangeTblEntry *rte = rt_fetch(current_rri->ri_RangeTableIndex, estate->es_range_table); + + if (rte->perminfoindex > 0) + state->result_parts.init_rri = current_rri; + else + { + /* + * Additional changes for 178ee1d858d: we cannot use current_rri + * because RTE for this ResultRelInfo has perminfoindex = 0. Need + * to use parent_rti (modify_table->nominalRelation) instead. + */ + Assert(state->parent_rti > 0); + state->result_parts.init_rri = estate->es_result_relations[state->parent_rti - 1]; + if (!state->result_parts.init_rri) + elog(ERROR, "cannot determine result info for partitioned table"); + } + } +#endif +} - result_rels_table = hash_create("ResultRelInfo storage", 10, - result_rels_table_config, - HASH_ELEM | HASH_BLOBS); +#if PG_VERSION_NUM >= 140000 +/* + * Re-initialization of PartitionFilterState for using new partition with new + * "current_rri" + */ +static void +reint_partition_filter_state(PartitionFilterState *state, ResultRelInfo *current_rri) +{ + Oid parent_relid = state->partitioned_table; + EState *estate = state->result_parts.estate; + + fini_result_parts_storage(&state->result_parts); + + state->returning_list = current_rri->ri_returningList; - state->result_rels_table = result_rels_table; - state->warning_triggered = false; + /* Init ResultRelInfo cache */ + init_result_parts_storage(&state->result_parts, + parent_relid, current_rri, + estate, state->command_type, + RPS_SKIP_RELATIONS, + state->on_conflict_action != ONCONFLICT_NONE, + RPS_RRI_CB(prepare_rri_for_insert, state), + RPS_RRI_CB(NULL, NULL)); } +#endif TupleTableSlot * partition_filter_exec(CustomScanState *node) { -#define CopyToTempConst(const_field, attr_field) \ - ( state->temp_const.const_field = \ - slot->tts_tupleDescriptor->attrs[prel->attnum - 1]->attr_field ) - PartitionFilterState *state = (PartitionFilterState *) node; ExprContext *econtext = node->ss.ps.ps_ExprContext; @@ -148,91 +945,119 @@ partition_filter_exec(CustomScanState *node) slot = ExecProcNode(child_ps); - /* Save original ResultRelInfo */ - if (!state->savedRelInfo) - state->savedRelInfo = estate->es_result_relation_info; - if (!TupIsNull(slot)) { - const PartRelationInfo *prel; - - MemoryContext old_cxt; - - List *ranges; - int nparts; - Oid *parts; - Oid selected_partid; - - WalkerContext wcxt; - bool isnull; - Datum value; - - /* Fetch PartRelationInfo for this partitioned relation */ - prel = get_pathman_relation_info(state->partitioned_table); - if (!prel) - { - if (!state->warning_triggered) - elog(WARNING, "Relation \"%s\" is not partitioned, " - "PartitionFilter will behave as a normal INSERT", - get_rel_name_or_relid(state->partitioned_table)); - - return slot; + MemoryContext old_mcxt; + ResultRelInfoHolder *rri_holder; + ResultRelInfo *rri; + JunkFilter *junkfilter = NULL; +#if PG_VERSION_NUM >= 140000 + PartitionRouterState *pr_state = linitial(node->custom_ps); + + /* + * For 14: in case UPDATE command, we can scanning several partitions + * in one plan. Need to switch context each time partition is switched. + */ + if (IsPartitionRouterState(pr_state) && + state->result_parts.base_rri != pr_state->current_rri) + { /* + * Slot switched to new partition: need to + * reinitialize some PartitionFilterState variables + */ + reint_partition_filter_state(state, pr_state->current_rri); } - - /* Extract partitioned column value */ - value = slot_getattr(slot, prel->attnum, &isnull); - - /* Fill const with value ... */ - state->temp_const.constvalue = value; - state->temp_const.constisnull = isnull; - - /* ... and some other important data */ - CopyToTempConst(consttype, atttypid); - CopyToTempConst(consttypmod, atttypmod); - CopyToTempConst(constcollid, attcollation); - CopyToTempConst(constlen, attlen); - CopyToTempConst(constbyval, attbyval); - - InitWalkerContext(&wcxt, prel, econtext, true); +#else + junkfilter = estate->es_result_relation_info->ri_junkFilter; +#endif /* Switch to per-tuple context */ - old_cxt = MemoryContextSwitchTo(econtext->ecxt_per_tuple_memory); + old_mcxt = MemoryContextSwitchTo(GetPerTupleMemoryContext(estate)); - ranges = walk_expr_tree((Expr *) &state->temp_const, &wcxt)->rangeset; - parts = get_partition_oids(ranges, &nparts, prel, false); + /* Search for a matching partition */ + rri_holder = select_partition_for_insert(estate, &state->result_parts, slot); - if (nparts > 1) - elog(ERROR, "PartitionFilter selected more than one partition"); - else if (nparts == 0) + /* Switch back and clean up per-tuple context */ + MemoryContextSwitchTo(old_mcxt); + ResetExprContext(econtext); + + rri = rri_holder->result_rel_info; + + /* Magic: replace parent's ResultRelInfo with ours */ + estate->es_result_relation_info = rri; + + /* + * Besides 'transform map' we should process two cases: + * 1) CMD_UPDATE, row moved to other partition, junkfilter == NULL + * (filled in router_set_slot() for SELECT + INSERT); + * we should clear attribute 'ctid' (do not insert it into database); + * 2) CMD_INSERT/CMD_UPDATE operations for partitions with deleted column(s), + * junkfilter == NULL. + */ + /* If there's a transform map, rebuild the tuple */ + if (rri_holder->tuple_map || + (!junkfilter && + (state->command_type == CMD_INSERT || state->command_type == CMD_UPDATE) && + (slot->tts_tupleDescriptor->natts > rri->ri_RelationDesc->rd_att->natts /* extra fields */ +#if PG_VERSION_NUM < 120000 + /* + * If we have a regular physical tuple 'slot->tts_tuple' and + * it's locally palloc'd => we will use this tuple in + * ExecMaterializeSlot() instead of materialize the slot, so + * need to check number of attributes for this tuple: + */ + || (slot->tts_tuple && slot->tts_shouldFree && + HeapTupleHeaderGetNatts(slot->tts_tuple->t_data) > + rri->ri_RelationDesc->rd_att->natts /* extra fields */) +#endif + ))) { - /* - * If auto partition propagation is enabled then try to create - * new partitions for the key - */ - if (prel->auto_partition && IsAutoPartitionEnabled()) +#if PG_VERSION_NUM < 120000 + HeapTuple htup_old, + htup_new; +#endif + Relation child_rel = rri->ri_RelationDesc; + TupleConversionMap *tuple_map; + + if (rri_holder->tuple_map) + tuple_map = rri_holder->tuple_map; + else { - selected_partid = create_partitions(state->partitioned_table, - state->temp_const.constvalue, - state->temp_const.consttype); - - /* get_pathman_relation_info() will refresh this entry */ - invalidate_pathman_relation_info(state->partitioned_table, NULL); + if (!rri_holder->tuple_map_child) + { /* + * Generate child->child tuple transformation map. We need to + * convert tuples because child TupleDesc has extra + * columns ('ctid' etc.) and need remove them. + */ + rri_holder->tuple_map_child = build_part_tuple_map_child(child_rel); + } + tuple_map = rri_holder->tuple_map_child; } - else - elog(ERROR, - "There is no suitable partition for key '%s'", - datum_to_cstring(state->temp_const.constvalue, - state->temp_const.consttype)); - } - else - selected_partid = parts[0]; - /* Switch back and clean up per-tuple context */ - MemoryContextSwitchTo(old_cxt); - ResetExprContext(econtext); + /* xxx why old code decided to materialize it? */ +#if PG_VERSION_NUM < 120000 + htup_old = ExecMaterializeSlot(slot); + htup_new = do_convert_tuple(htup_old, tuple_map); + ExecClearTuple(slot); +#endif - /* Replace parent table with a suitable partition */ - estate->es_result_relation_info = getResultRelInfo(selected_partid, state); + /* + * Allocate new slot if needed. + * For 12, it is sort of important to create BufferHeapTuple, + * though we will store virtual one there. Otherwise, ModifyTable + * decides to copy it to mt_scans slot which has tupledesc of + * parent. + */ + if (!state->tup_convert_slot) + state->tup_convert_slot = MakeTupleTableSlotCompat(&TTSOpsBufferHeapTuple); + + /* TODO: why should we *always* set a new slot descriptor? */ + ExecSetSlotDescriptor(state->tup_convert_slot, RelationGetDescr(child_rel)); +#if PG_VERSION_NUM >= 120000 + slot = execute_attr_map_slot(tuple_map->attrMap, slot, state->tup_convert_slot); +#else + slot = ExecStoreTuple(htup_new, state->tup_convert_slot, InvalidBuffer, true); +#endif + } return slot; } @@ -245,28 +1070,21 @@ partition_filter_end(CustomScanState *node) { PartitionFilterState *state = (PartitionFilterState *) node; - HASH_SEQ_STATUS stat; - ResultRelInfoHolder *rri_handle; /* ResultRelInfo holder */ - - hash_seq_init(&stat, state->result_rels_table); - while ((rri_handle = (ResultRelInfoHolder *) hash_seq_search(&stat)) != NULL) - { - /* FIXME: add ResultRelInfos to estate->es_result_relations to fix triggers */ - ExecCloseIndices(rri_handle->resultRelInfo); - heap_close(rri_handle->resultRelInfo->ri_RelationDesc, - RowExclusiveLock); - } - hash_destroy(state->result_rels_table); + /* Executor will close rels via estate->es_result_relations */ + fini_result_parts_storage(&state->result_parts); Assert(list_length(node->custom_ps) == 1); ExecEndNode((PlanState *) linitial(node->custom_ps)); + + /* Free slot for tuple conversion */ + if (state->tup_convert_slot) + ExecDropSingleTupleTableSlot(state->tup_convert_slot); } void partition_filter_rescan(CustomScanState *node) { - Assert(list_length(node->custom_ps) == 1); - ExecReScan((PlanState *) linitial(node->custom_ps)); + elog(ERROR, "partition_filter_rescan is not implemented"); } void @@ -276,152 +1094,521 @@ partition_filter_explain(CustomScanState *node, List *ancestors, ExplainState *e } + /* - * Construct ResultRelInfo for a partition. + * Build partition filter's target list pointing to subplan tuple's elements. */ -static ResultRelInfo * -getResultRelInfo(Oid partid, PartitionFilterState *state) +List * +pfilter_build_tlist(Plan *subplan, Index varno) { -#define CopyToResultRelInfo(field_name) \ - ( resultRelInfo->field_name = state->savedRelInfo->field_name ) + List *result_tlist = NIL; + ListCell *lc; -#define ResizeTriggerField(field_name, field_type) \ - do { \ - if (resultRelInfo->field_name) \ - pfree(resultRelInfo->field_name); \ - resultRelInfo->field_name = (field_type *) \ - palloc0(resultRelInfo->ri_TrigDesc->numtriggers * sizeof(field_type)); \ - } while (0) + foreach (lc, subplan->targetlist) + { + TargetEntry *tle = (TargetEntry *) lfirst(lc), + *newtle = NULL; - ResultRelInfoHolder *resultRelInfoHandle; - bool found; + if (IsA(tle->expr, Const)) + { + /* TODO: maybe we should use copyObject(tle->expr)? */ + newtle = makeTargetEntry(tle->expr, + tle->resno, + tle->resname, + tle->resjunk); + } + else + { + Var *var = makeVar(varno, /* point to subplan's elements */ + tle->resno, + exprType((Node *) tle->expr), + exprTypmod((Node *) tle->expr), + exprCollation((Node *) tle->expr), + 0); + + newtle = makeTargetEntry((Expr *) var, + tle->resno, + tle->resname, + tle->resjunk); + } - resultRelInfoHandle = hash_search(state->result_rels_table, - (const void *) &partid, - HASH_ENTER, &found); + result_tlist = lappend(result_tlist, newtle); + } - if (!found) + return result_tlist; +} + +/* + * ---------------------------------------------- + * Additional init steps for ResultPartsStorage + * ---------------------------------------------- + */ + +/* Main trigger */ +static void +prepare_rri_for_insert(ResultRelInfoHolder *rri_holder, + const ResultPartsStorage *rps_storage) +{ + prepare_rri_returning_for_insert(rri_holder, rps_storage); + prepare_rri_fdw_for_insert(rri_holder, rps_storage); +} + +/* Prepare 'RETURNING *' tlist & projection */ +static void +prepare_rri_returning_for_insert(ResultRelInfoHolder *rri_holder, + const ResultPartsStorage *rps_storage) +{ + PartitionFilterState *pfstate; + List *returning_list; + ResultRelInfo *child_rri, + *parent_rri; + Index parent_rt_idx; + TupleTableSlot *result_slot; + + /* We don't need to do anything ff there's no map */ + if (!rri_holder->tuple_map) + return; + + pfstate = (PartitionFilterState *) rps_storage->init_rri_holder_cb_arg; + returning_list = pfstate->returning_list; + + /* Exit if there's no RETURNING list */ + if (!returning_list) + return; + + child_rri = rri_holder->result_rel_info; + parent_rri = rps_storage->base_rri; + parent_rt_idx = parent_rri->ri_RangeTableIndex; + + /* Replace parent's varattnos with child's */ + returning_list = (List *) + fix_returning_list_mutator((Node *) returning_list, + list_make2(makeInteger(parent_rt_idx), + rri_holder)); + + /* Specify tuple slot where will be place projection result in */ +#if PG_VERSION_NUM >= 100000 + result_slot = parent_rri->ri_projectReturning->pi_state.resultslot; +#elif PG_VERSION_NUM >= 90500 + result_slot = parent_rri->ri_projectReturning->pi_slot; +#endif + + /* Build new projection info */ + child_rri->ri_projectReturning = + ExecBuildProjectionInfoCompat(returning_list, pfstate->css.ss.ps.ps_ExprContext, + result_slot, NULL /* HACK: no PlanState */, + RelationGetDescr(child_rri->ri_RelationDesc)); +} + +/* Prepare FDW access structs */ +static void +prepare_rri_fdw_for_insert(ResultRelInfoHolder *rri_holder, + const ResultPartsStorage *rps_storage) +{ + ResultRelInfo *rri = rri_holder->result_rel_info; + FdwRoutine *fdw_routine = rri->ri_FdwRoutine; + Oid partid; + EState *estate; + + estate = rps_storage->estate; + + /* Nothing to do if not FDW */ + if (fdw_routine == NULL) + return; + + partid = RelationGetRelid(rri->ri_RelationDesc); + + /* Perform some checks according to 'pg_pathman_insert_into_fdw' */ + switch (pg_pathman_insert_into_fdw) { - bool grown_up; - ResultRelInfo *resultRelInfo = (ResultRelInfo *) palloc(sizeof(ResultRelInfo)); + case PF_FDW_INSERT_DISABLED: + elog(ERROR, "INSERTs into FDW partitions are disabled"); + break; - InitResultRelInfo(resultRelInfo, - heap_open(partid, RowExclusiveLock), - 0, - state->css.ss.ps.state->es_instrument); + case PF_FDW_INSERT_POSTGRES: + case PF_FDW_INSERT_ANY_FDW: + { + ForeignDataWrapper *fdw; + ForeignServer *fserver; + + /* Check if it's PostgreSQL FDW */ + fserver = GetForeignServer(GetForeignTable(partid)->serverid); + fdw = GetForeignDataWrapper(fserver->fdwid); + + /* Show message if not postgres_fdw */ + if (strcmp("postgres_fdw", fdw->fdwname) != 0) + switch (pg_pathman_insert_into_fdw) + { + case PF_FDW_INSERT_POSTGRES: + elog(ERROR, + "FDWs other than postgres_fdw are restricted"); + + break; + case PF_FDW_INSERT_ANY_FDW: + elog(WARNING, + "unrestricted FDW mode may lead to crashes"); + } + } + break; - ExecOpenIndices(resultRelInfo, state->onConflictAction != ONCONFLICT_NONE); + default: + elog(ERROR, "Mode is not implemented yet"); + break; + } - resultRelInfo->ri_TrigDesc = append_trigger_descs(resultRelInfo->ri_TrigDesc, - state->savedRelInfo->ri_TrigDesc, - &grown_up); - if (grown_up) + if (fdw_routine->PlanForeignModify) + { + RangeTblEntry *rte; + Query query; + PlanState pstate, + *pstate_ptr; + ModifyTableState mtstate; + PlannedStmt *plan; + + /* This is the value we'd like to get */ + List *fdw_private; + + TupleDesc tupdesc; + int target_attr, + i; + + /* Fetch RangeTblEntry for partition */ + rte = rt_fetch(rri->ri_RangeTableIndex, estate->es_range_table); + + /* Fetch tuple descriptor */ + tupdesc = RelationGetDescr(rri->ri_RelationDesc); + + /* Create fake Query node */ + memset((void *) &query, 0, sizeof(Query)); + NodeSetTag(&query, T_Query); + + query.commandType = CMD_INSERT; + query.querySource = QSRC_ORIGINAL; + query.resultRelation = 1; + query.rtable = list_make1(copyObject(rte)); + query.jointree = makeNode(FromExpr); + + query.targetList = NIL; + query.returningList = NIL; + +#if PG_VERSION_NUM >= 160000 /* for commit a61b1f74823c */ + /* + * Copy the RTEPermissionInfos into query as well, so that + * add_rte_to_flat_rtable() will work correctly. + */ + query.rteperminfos = estate->es_rteperminfos; +#endif + + /* Generate 'query.targetList' using 'tupdesc' */ + target_attr = 1; + for (i = 0; i < tupdesc->natts; i++) { - ResizeTriggerField(ri_TrigFunctions, FmgrInfo); - ResizeTriggerField(ri_TrigWhenExprs, List *); + Form_pg_attribute attr; + TargetEntry *te; + Param *param; - if (resultRelInfo->ri_TrigInstrument) - { - pfree(resultRelInfo->ri_TrigInstrument); + attr = TupleDescAttr(tupdesc, i); - resultRelInfo->ri_TrigInstrument = - InstrAlloc(resultRelInfo->ri_TrigDesc->numtriggers, - state->css.ss.ps.state->es_instrument); - } - } + if (attr->attisdropped) + continue; - /* Copy necessary fields from saved ResultRelInfo */ - CopyToResultRelInfo(ri_WithCheckOptions); - CopyToResultRelInfo(ri_WithCheckOptionExprs); - CopyToResultRelInfo(ri_junkFilter); - CopyToResultRelInfo(ri_projectReturning); - CopyToResultRelInfo(ri_onConflictSetProj); - CopyToResultRelInfo(ri_onConflictSetWhere); + param = makeNode(Param); + param->paramkind = PARAM_EXTERN; + param->paramid = target_attr; + param->paramtype = attr->atttypid; + param->paramtypmod = attr->atttypmod; + param->paramcollid = attr->attcollation; + param->location = -1; - /* ri_ConstraintExprs will be initialized by ExecRelCheck() */ - resultRelInfo->ri_ConstraintExprs = NULL; + te = makeTargetEntry((Expr *) param, target_attr, + pstrdup(NameStr(attr->attname)), + false); - resultRelInfoHandle->partid = partid; - resultRelInfoHandle->resultRelInfo = resultRelInfo; + query.targetList = lappend(query.targetList, te); - /* Make 'range table index' point to the parent relation */ - resultRelInfo->ri_RangeTableIndex = state->savedRelInfo->ri_RangeTableIndex; - } + target_attr++; + } - return resultRelInfoHandle->resultRelInfo; + /* HACK: plan a fake query for FDW access to be planned as well */ + elog(DEBUG1, "FDW(%u): plan fake query for fdw_private", partid); +#if PG_VERSION_NUM >= 130000 + plan = standard_planner(&query, NULL, 0, NULL); +#else + plan = standard_planner(&query, 0, NULL); +#endif + + /* HACK: create a fake PlanState */ + memset(&pstate, 0, sizeof(PlanState)); + pstate.plan = plan->planTree; + pstate_ptr = &pstate; + + /* HACK: create a fake ModifyTableState */ + memset(&mtstate, 0, sizeof(ModifyTableState)); + NodeSetTag(&mtstate, T_ModifyTableState); + mtstate.ps.state = estate; + mtstate.operation = CMD_INSERT; +#if PG_VERSION_NUM >= 140000 + /* + * Some fields ("mt_plans", "mt_nplans", "mt_whichplan") removed + * in 86dc90056dfd + */ + outerPlanState(&mtstate.ps) = pstate_ptr; + mtstate.mt_nrels = 1; +#else + mtstate.mt_plans = &pstate_ptr; + mtstate.mt_nplans = 1; + mtstate.mt_whichplan = 0; +#endif + mtstate.resultRelInfo = rri; +#if PG_VERSION_NUM < 110000 + mtstate.mt_onconflict = ONCONFLICT_NONE; +#endif + + /* Extract fdw_private from useless plan */ + elog(DEBUG1, "FDW(%u): extract fdw_private", partid); + fdw_private = linitial(((ModifyTable *) plan->planTree)->fdwPrivLists); + + /* HACK: call BeginForeignModify on 'rri' */ + elog(DEBUG1, "FDW(%u): call BeginForeignModify on a fake INSERT node", partid); + fdw_routine->BeginForeignModify(&mtstate, rri, fdw_private, 0, 0); + + /* Report success */ + elog(DEBUG1, "FDW(%u): success", partid); + } } -/* - * Build partition filter's target list pointing to subplan tuple's elements - */ -static List * -pfilter_build_tlist(List *tlist) +/* Make parent's Vars of returninig list point to child's tuple */ +static Node * +fix_returning_list_mutator(Node *node, void *state) { - List *result_tlist = NIL; - ListCell *lc; - int i = 1; + if (node == NULL) + return NULL; - foreach (lc, tlist) + if (IsA(node, Var)) { - TargetEntry *tle = (TargetEntry *) lfirst(lc); - - Var *var = makeVar(INDEX_VAR, /* point to subplan's elements */ - i, /* direct attribute mapping */ - exprType((Node *) tle->expr), - exprTypmod((Node *) tle->expr), - exprCollation((Node *) tle->expr), - 0); - - result_tlist = lappend(result_tlist, - makeTargetEntry((Expr *) var, - i, - NULL, - tle->resjunk)); - i++; /* next resno */ + /* Extract packed args */ + List *state_args = (List *) state; + Index parent_idx = intVal(linitial(state_args)); + ResultRelInfoHolder *rri_holder = (ResultRelInfoHolder *) lsecond(state_args); + Var *var; + + /* Copy base fields of Var */ + var = (Var *) palloc(sizeof(Var)); + *var = *(Var *) node; + + /* Make Var point to child's attribute */ + if (var->varno == parent_idx && + var->varattno >= 0) /* don't change sysattrs! */ + { + int i; + bool found_mapping = false; + + /* WHOLEROW reference, change row type */ + if (var->varattno == 0) + { + Relation child_rel = rri_holder->result_rel_info->ri_RelationDesc; + + /* Assign var->vartype a TupleDesc's type */ + var->vartype = RelationGetDescr(child_rel)->tdtypeid; + + return (Node *) var; + } + + /* Map: child_att => parent_att, so we have to run through it */ + for (i = 0; i < rri_holder->tuple_map->outdesc->natts; i++) + { + /* Good, 'varattno' of parent is child's 'i+1' */ +#if PG_VERSION_NUM >= 130000 + if (var->varattno == rri_holder->tuple_map->attrMap->attnums[i]) +#else + if (var->varattno == rri_holder->tuple_map->attrMap[i]) +#endif + { + var->varattno = i + 1; /* attnos begin with 1 */ + found_mapping = true; + break; + } + } + + /* Swear if we couldn't find mapping for this attribute */ + if (!found_mapping) + elog(ERROR, "could not bind attribute %d for returning statement", + var->varattno); + } + + return (Node *) var; } - return result_tlist; + return expression_tree_mutator_compat(node, fix_returning_list_mutator, state); } + /* - * Add partition filters to ModifyTable node's children - * - * 'context' should point to the PlannedStmt->rtable + * ------------------------------------- + * ExecutorState-related modifications + * ------------------------------------- */ -static void -partition_filter_visitor(Plan *plan, void *context) + +/* Append RangeTblEntry 'rte' to estate->es_range_table */ +static Index +append_rte_to_estate(EState *estate, RangeTblEntry *rte, Relation child_rel) { - List *rtable = (List *) context; - ModifyTable *modify_table = (ModifyTable *) plan; - ListCell *lc1, - *lc2; + estate_mod_data *emd_struct = fetch_estate_mod_data(estate); + + /* Copy estate->es_range_table if it's first time expansion */ + if (emd_struct->estate_not_modified) + estate->es_range_table = list_copy(estate->es_range_table); + + estate->es_range_table = lappend(estate->es_range_table, rte); + + /* Update estate_mod_data */ + emd_struct->estate_not_modified = false; + +#if PG_VERSION_NUM >= 120000 + estate->es_range_table_size = list_length(estate->es_range_table); +#endif +#if PG_VERSION_NUM >= 120000 && PG_VERSION_NUM < 130000 + /* + * On PG = 12, also add rte to es_range_table_array. This is horribly + * inefficient, yes. + * In 12 es_range_table_array ptr is not saved anywhere in + * core, so it is safe to repalloc. + * + * In >= 13 (3c92658) es_range_table_array was removed + */ + estate->es_range_table_array = (RangeTblEntry **) + repalloc(estate->es_range_table_array, + estate->es_range_table_size * sizeof(RangeTblEntry *)); + estate->es_range_table_array[estate->es_range_table_size - 1] = rte; +#endif + +#if PG_VERSION_NUM >= 120000 + /* + * Also reallocate es_relations, because es_range_table_size defines its + * len. This also ensures ExecEndPlan will close the rel. + */ + estate->es_relations = (Relation *) + repalloc(estate->es_relations, estate->es_range_table_size * sizeof(Relation)); + estate->es_relations[estate->es_range_table_size - 1] = child_rel; +#endif + + return list_length(estate->es_range_table); +} - /* Skip if not ModifyTable with 'INSERT' command */ - if (!IsA(modify_table, ModifyTable) || modify_table->operation != CMD_INSERT) - return; +/* Append ResultRelInfo 'rri' to estate->es_result_relations */ +static int +append_rri_to_estate(EState *estate, ResultRelInfo *rri) +{ + estate_mod_data *emd_struct = fetch_estate_mod_data(estate); + int result_rels_allocated = emd_struct->estate_alloc_result_rels; +#if PG_VERSION_NUM >= 140000 /* reworked in commit a04daa97a433 */ + ResultRelInfo **rri_array = estate->es_result_relations; + + /* + * We already increased variable "estate->es_range_table_size" in previous + * call append_rte_to_estate(): see + * "estate->es_range_table_size = list_length(estate->es_range_table)" + * after "lappend(estate->es_range_table, rte)". So we should append + * new value in "estate->es_result_relations" only. + */ + + /* Reallocate estate->es_result_relations if needed */ + if (result_rels_allocated < estate->es_range_table_size) + { + result_rels_allocated = result_rels_allocated * ALLOC_EXP + 1; + estate->es_result_relations = palloc(result_rels_allocated * + sizeof(ResultRelInfo *)); + memcpy(estate->es_result_relations, + rri_array, + (estate->es_range_table_size - 1) * sizeof(ResultRelInfo *)); + } - Assert(rtable && IsA(rtable, List)); + estate->es_result_relations[estate->es_range_table_size - 1] = rri; - forboth (lc1, modify_table->plans, lc2, modify_table->resultRelations) + estate->es_opened_result_relations = lappend(estate->es_opened_result_relations, rri); + + /* Update estate_mod_data */ + emd_struct->estate_alloc_result_rels = result_rels_allocated; + emd_struct->estate_not_modified = false; + + return estate->es_range_table_size; +#else + /* Reallocate estate->es_result_relations if needed */ + if (result_rels_allocated <= estate->es_num_result_relations) { - Index rindex = lfirst_int(lc2); - Oid relid = getrelid(rindex, rtable); - const PartRelationInfo *prel = get_pathman_relation_info(relid); - - /* Check that table is partitioned */ - if (prel) - lfirst(lc1) = make_partition_filter((Plan *) lfirst(lc1), - relid, - modify_table->onConflictAction); + ResultRelInfo *rri_array = estate->es_result_relations; + + /* HACK: we can't repalloc or free previous array (there might be users) */ + result_rels_allocated = result_rels_allocated * ALLOC_EXP + 1; + estate->es_result_relations = palloc(result_rels_allocated * + sizeof(ResultRelInfo)); + memcpy(estate->es_result_relations, + rri_array, + estate->es_num_result_relations * sizeof(ResultRelInfo)); } + + /* + * Append ResultRelInfo to 'es_result_relations' array. + * NOTE: this is probably safe since ResultRelInfo + * contains nothing but pointers to various structs. + */ + estate->es_result_relations[estate->es_num_result_relations] = *rri; + + /* Update estate_mod_data */ + emd_struct->estate_alloc_result_rels = result_rels_allocated; + emd_struct->estate_not_modified = false; + + return estate->es_num_result_relations++; +#endif } + /* - * Add PartitionFilter nodes to the plan tree + * -------------------------------------- + * Store data in 'estate->es_query_cxt' + * -------------------------------------- */ -void -add_partition_filters(List *rtable, Plan *plan) + +/* Used by fetch_estate_mod_data() to find estate_mod_data */ +static void +pf_memcxt_callback(void *arg) { elog(DEBUG1, "EState is destroyed"); } + +/* Fetch (or create) a estate_mod_data structure we've hidden inside es_query_cxt */ +static estate_mod_data * +fetch_estate_mod_data(EState *estate) { - if (pg_pathman_enable_partition_filter) - plan_tree_walker(plan, partition_filter_visitor, rtable); + MemoryContext estate_mcxt = estate->es_query_cxt; + estate_mod_data *emd_struct; + MemoryContextCallback *cb = estate_mcxt->reset_cbs; + + /* Go through callback list */ + while (cb != NULL) + { + /* This is the dummy callback we're looking for! */ + if (cb->func == pf_memcxt_callback) + return (estate_mod_data *) cb->arg; + + cb = cb->next; + } + + /* Have to create a new one */ + emd_struct = MemoryContextAlloc(estate_mcxt, sizeof(estate_mod_data)); + emd_struct->estate_not_modified = true; +#if PG_VERSION_NUM >= 140000 + /* + * Reworked in commit a04daa97a433: field "es_num_result_relations" + * removed + */ + emd_struct->estate_alloc_result_rels = estate->es_range_table_size; +#else + emd_struct->estate_alloc_result_rels = estate->es_num_result_relations; +#endif + + cb = MemoryContextAlloc(estate_mcxt, sizeof(MemoryContextCallback)); + cb->func = pf_memcxt_callback; + cb->arg = emd_struct; + + MemoryContextRegisterResetCallback(estate_mcxt, cb); + + return emd_struct; } diff --git a/src/partition_filter.h b/src/partition_filter.h deleted file mode 100644 index d16cb0c0..00000000 --- a/src/partition_filter.h +++ /dev/null @@ -1,80 +0,0 @@ -/* ------------------------------------------------------------------------ - * - * partition_filter.h - * Select partition for INSERT operation - * - * Copyright (c) 2016, Postgres Professional - * - * ------------------------------------------------------------------------ - */ - -#ifndef RUNTIME_INSERT_H -#define RUNTIME_INSERT_H - -#include "relation_info.h" -#include "pathman.h" - -#include "postgres.h" -#include "commands/explain.h" -#include "optimizer/planner.h" - - -typedef struct -{ - Oid partid; - ResultRelInfo *resultRelInfo; -} ResultRelInfoHolder; - -typedef struct -{ - CustomScanState css; - - Oid partitioned_table; - OnConflictAction onConflictAction; - ResultRelInfo *savedRelInfo; - - Plan *subplan; - Const temp_const; /* temporary const for expr walker */ - - HTAB *result_rels_table; - HASHCTL result_rels_table_config; - - bool warning_triggered; -} PartitionFilterState; - - -extern bool pg_pathman_enable_partition_filter; - -extern CustomScanMethods partition_filter_plan_methods; -extern CustomExecMethods partition_filter_exec_methods; - - -void rowmark_add_tableoids(Query *parse); - -void postprocess_lock_rows(List *rtable, Plan *plan); - -void add_partition_filters(List *rtable, Plan *plan); - -void init_partition_filter_static_data(void); - -Plan * make_partition_filter(Plan *subplan, - Oid partitioned_table, - OnConflictAction conflict_action); - -Node * partition_filter_create_scan_state(CustomScan *node); - -void partition_filter_begin(CustomScanState *node, - EState *estate, - int eflags); - -TupleTableSlot * partition_filter_exec(CustomScanState *node); - -void partition_filter_end(CustomScanState *node); - -void partition_filter_rescan(CustomScanState *node); - -void partition_filter_explain(CustomScanState *node, - List *ancestors, - ExplainState *es); - -#endif diff --git a/src/partition_overseer.c b/src/partition_overseer.c new file mode 100644 index 00000000..d858374a --- /dev/null +++ b/src/partition_overseer.c @@ -0,0 +1,189 @@ +#include "postgres.h" + +#include "partition_filter.h" +#include "partition_overseer.h" +#include "partition_router.h" +#include "planner_tree_modification.h" + +CustomScanMethods partition_overseer_plan_methods; +CustomExecMethods partition_overseer_exec_methods; + +void +init_partition_overseer_static_data(void) +{ + partition_overseer_plan_methods.CustomName = OVERSEER_NODE_NAME; + partition_overseer_plan_methods.CreateCustomScanState = partition_overseer_create_scan_state; + + partition_overseer_exec_methods.CustomName = OVERSEER_NODE_NAME; + partition_overseer_exec_methods.BeginCustomScan = partition_overseer_begin; + partition_overseer_exec_methods.ExecCustomScan = partition_overseer_exec; + partition_overseer_exec_methods.EndCustomScan = partition_overseer_end; + partition_overseer_exec_methods.ReScanCustomScan = partition_overseer_rescan; + partition_overseer_exec_methods.MarkPosCustomScan = NULL; + partition_overseer_exec_methods.RestrPosCustomScan = NULL; + partition_overseer_exec_methods.ExplainCustomScan = partition_overseer_explain; + + RegisterCustomScanMethods(&partition_overseer_plan_methods); +} + +Plan * +make_partition_overseer(Plan *subplan) +{ + CustomScan *cscan = makeNode(CustomScan); + + /* Copy costs etc */ + cscan->scan.plan.startup_cost = subplan->startup_cost; + cscan->scan.plan.total_cost = subplan->total_cost; + cscan->scan.plan.plan_rows = subplan->plan_rows; + cscan->scan.plan.plan_width = subplan->plan_width; + + /* Setup methods, child plan and param number for EPQ */ + cscan->methods = &partition_overseer_plan_methods; + cscan->custom_plans = list_make1(subplan); + cscan->custom_private = NIL; + + /* No physical relation will be scanned */ + cscan->scan.scanrelid = 0; + + /* Build an appropriate target list */ + cscan->scan.plan.targetlist = pfilter_build_tlist(subplan, INDEX_VAR); + cscan->custom_scan_tlist = subplan->targetlist; + + return &cscan->scan.plan; +} + + +Node * +partition_overseer_create_scan_state(CustomScan *node) +{ + CustomScanState *state = palloc0(sizeof(CustomScanState)); + NodeSetTag(state, T_CustomScanState); + + state->flags = node->flags; + state->methods = &partition_overseer_exec_methods; + + return (Node *) state; +} + +static void +set_mt_state_for_router(PlanState *state, void *context) +{ +#if PG_VERSION_NUM < 140000 + int i; +#endif + ModifyTableState *mt_state = (ModifyTableState *) state; + + if (!IsA(state, ModifyTableState)) + return; + +#if PG_VERSION_NUM >= 140000 + /* Fields "mt_plans", "mt_nplans" removed in 86dc90056dfd */ + { + CustomScanState *pf_state = (CustomScanState *) outerPlanState(mt_state); +#else + for (i = 0; i < mt_state->mt_nplans; i++) + { + CustomScanState *pf_state = (CustomScanState *) mt_state->mt_plans[i]; +#endif + /* Check if this is a PartitionFilter + PartitionRouter combo */ + if (IsPartitionFilterState(pf_state)) + { + PartitionRouterState *pr_state = linitial(pf_state->custom_ps); + if (IsPartitionRouterState(pr_state)) + { + /* HACK: point to ModifyTable in PartitionRouter */ + pr_state->mt_state = mt_state; + } + } + } +} + +void +partition_overseer_begin(CustomScanState *node, + EState *estate, + int eflags) +{ + CustomScan *css = (CustomScan *) node->ss.ps.plan; + Plan *plan = linitial(css->custom_plans); + + /* It's convenient to store PlanState in 'custom_ps' */ + node->custom_ps = list_make1(ExecInitNode(plan, estate, eflags)); + + /* Save ModifyTableState in PartitionRouterState structs */ + state_tree_visitor((PlanState *) linitial(node->custom_ps), + set_mt_state_for_router, + NULL); +} + +TupleTableSlot * +partition_overseer_exec(CustomScanState *node) +{ + ModifyTableState *mt_state = linitial(node->custom_ps); + + TupleTableSlot *slot; + int mt_plans_old, + mt_plans_new; + + /* Get initial signal */ +#if PG_VERSION_NUM >= 140000 /* field "mt_nplans" removed in 86dc90056dfd */ + mt_plans_old = mt_state->mt_nrels; +#else + mt_plans_old = mt_state->mt_nplans; +#endif + +restart: + /* Run ModifyTable */ + slot = ExecProcNode((PlanState *) mt_state); + + /* Get current signal */ +#if PG_VERSION_NUM >= 140000 /* field "mt_nplans" removed in 86dc90056dfd */ + mt_plans_new = MTHackField(mt_state, mt_nrels); +#else + mt_plans_new = MTHackField(mt_state, mt_nplans); +#endif + + /* Did PartitionRouter ask us to restart? */ + if (mt_plans_new != mt_plans_old) + { + /* Signal points to current plan */ +#if PG_VERSION_NUM < 140000 + int state_idx = -mt_plans_new; +#endif + + /* HACK: partially restore ModifyTable's state */ + MTHackField(mt_state, mt_done) = false; +#if PG_VERSION_NUM >= 140000 + /* Fields "mt_nplans", "mt_whichplan" removed in 86dc90056dfd */ + MTHackField(mt_state, mt_nrels) = mt_plans_old; +#else + MTHackField(mt_state, mt_nplans) = mt_plans_old; + MTHackField(mt_state, mt_whichplan) = state_idx; +#endif + + /* Rerun ModifyTable */ + goto restart; + } + + return slot; +} + +void +partition_overseer_end(CustomScanState *node) +{ + Assert(list_length(node->custom_ps) == 1); + ExecEndNode((PlanState *) linitial(node->custom_ps)); +} + +void +partition_overseer_rescan(CustomScanState *node) +{ + elog(ERROR, "partition_overseer_rescan is not implemented"); +} + +void +partition_overseer_explain(CustomScanState *node, + List *ancestors, + ExplainState *es) +{ + /* Nothing to do here now */ +} diff --git a/src/partition_router.c b/src/partition_router.c new file mode 100644 index 00000000..5f00e9b1 --- /dev/null +++ b/src/partition_router.c @@ -0,0 +1,746 @@ +/* ------------------------------------------------------------------------ + * + * partition_router.c + * Route row to a right partition in UPDATE operation + * + * Copyright (c) 2017, Postgres Professional + * Portions Copyright (c) 1996-2016, PostgreSQL Global Development Group + * Portions Copyright (c) 1994, Regents of the University of California + * + * ------------------------------------------------------------------------ + */ + +#include "partition_filter.h" +#include "partition_router.h" +#include "compat/pg_compat.h" + +#if PG_VERSION_NUM >= 120000 +#include "access/table.h" +#include "access/tableam.h" +#endif +#include "access/xact.h" +#if PG_VERSION_NUM >= 120000 +#include "access/heapam.h" /* direct heap_delete, no-no */ +#endif +#include "access/htup_details.h" +#include "catalog/pg_class.h" +#include "commands/trigger.h" +#include "executor/nodeModifyTable.h" +#include "foreign/fdwapi.h" +#if PG_VERSION_NUM >= 120000 +#include "nodes/makefuncs.h" /* make_ands_explicit */ +#include "optimizer/optimizer.h" +#endif +#include "optimizer/clauses.h" +#include "storage/bufmgr.h" +#include "utils/guc.h" +#include "utils/rel.h" + + +#define MTDisableStmtTriggers(mt_state, pr_state) \ + do { \ + TriggerDesc *triggers = (mt_state)->resultRelInfo->ri_TrigDesc; \ + \ + if (triggers) \ + { \ + (pr_state)->insert_stmt_triggers |= triggers->trig_insert_after_statement; \ + (pr_state)->update_stmt_triggers |= triggers->trig_update_after_statement; \ + triggers->trig_insert_after_statement = false; \ + triggers->trig_update_after_statement = false; \ + } \ + } while (0) + +#define MTEnableStmtTriggers(mt_state, pr_state) \ + do { \ + TriggerDesc *triggers = (mt_state)->resultRelInfo->ri_TrigDesc; \ + \ + if (triggers) \ + { \ + triggers->trig_insert_after_statement = (pr_state)->insert_stmt_triggers; \ + triggers->trig_update_after_statement = (pr_state)->update_stmt_triggers; \ + } \ + } while (0) + + + +bool pg_pathman_enable_partition_router = false; + +CustomScanMethods partition_router_plan_methods; +CustomExecMethods partition_router_exec_methods; + +static TupleTableSlot *router_set_slot(PartitionRouterState *state, + TupleTableSlot *slot, + CmdType operation); +static TupleTableSlot *router_get_slot(PartitionRouterState *state, + EState *estate, + bool *should_process); + +static void router_lazy_init_constraint(PartitionRouterState *state, bool recreate); + +static ItemPointerData router_extract_ctid(PartitionRouterState *state, + TupleTableSlot *slot); + +static TupleTableSlot *router_lock_or_delete_tuple(PartitionRouterState *state, + TupleTableSlot *slot, + ItemPointer tupleid, + bool *deleted); + +void +init_partition_router_static_data(void) +{ + partition_router_plan_methods.CustomName = UPDATE_NODE_NAME; + partition_router_plan_methods.CreateCustomScanState = partition_router_create_scan_state; + + partition_router_exec_methods.CustomName = UPDATE_NODE_NAME; + partition_router_exec_methods.BeginCustomScan = partition_router_begin; + partition_router_exec_methods.ExecCustomScan = partition_router_exec; + partition_router_exec_methods.EndCustomScan = partition_router_end; + partition_router_exec_methods.ReScanCustomScan = partition_router_rescan; + partition_router_exec_methods.MarkPosCustomScan = NULL; + partition_router_exec_methods.RestrPosCustomScan = NULL; + partition_router_exec_methods.ExplainCustomScan = partition_router_explain; + + DefineCustomBoolVariable("pg_pathman.enable_partitionrouter", + "Enables the planner's use of " UPDATE_NODE_NAME " custom node.", + NULL, + &pg_pathman_enable_partition_router, + false, + PGC_USERSET, + 0, + NULL, + NULL, + NULL); + + RegisterCustomScanMethods(&partition_router_plan_methods); +} + +Plan * +make_partition_router(Plan *subplan, int epq_param, Index parent_rti) +{ + CustomScan *cscan = makeNode(CustomScan); + + /* Copy costs etc */ + cscan->scan.plan.startup_cost = subplan->startup_cost; + cscan->scan.plan.total_cost = subplan->total_cost; + cscan->scan.plan.plan_rows = subplan->plan_rows; + cscan->scan.plan.plan_width = subplan->plan_width; + + /* Setup methods, child plan and param number for EPQ */ + cscan->methods = &partition_router_plan_methods; + cscan->custom_plans = list_make1(subplan); + cscan->custom_private = list_make1(makeInteger(epq_param)); + + /* No physical relation will be scanned */ + cscan->scan.scanrelid = 0; + + /* Build an appropriate target list */ + cscan->scan.plan.targetlist = pfilter_build_tlist(subplan, parent_rti); + + return &cscan->scan.plan; +} + +Node * +partition_router_create_scan_state(CustomScan *node) +{ + PartitionRouterState *state; + + state = (PartitionRouterState *) palloc0(sizeof(PartitionRouterState)); + NodeSetTag(state, T_CustomScanState); + + state->css.flags = node->flags; + state->css.methods = &partition_router_exec_methods; + + /* Extract necessary variables */ + state->epqparam = intVal(linitial(node->custom_private)); + state->subplan = (Plan *) linitial(node->custom_plans); + + return (Node *) state; +} + +void +partition_router_begin(CustomScanState *node, EState *estate, int eflags) +{ + PartitionRouterState *state = (PartitionRouterState *) node; + + /* Remember current relation we're going to delete from */ + state->current_rri = estate->es_result_relation_info; + + EvalPlanQualInit_compat(&state->epqstate, estate, + state->subplan, NIL, + state->epqparam); + + /* It's convenient to store PlanState in 'custom_ps' */ + node->custom_ps = list_make1(ExecInitNode(state->subplan, estate, eflags)); +} + +TupleTableSlot * +partition_router_exec(CustomScanState *node) +{ + EState *estate = node->ss.ps.state; + PartitionRouterState *state = (PartitionRouterState *) node; + TupleTableSlot *slot; + bool should_process; + +take_next_tuple: + /* Get next tuple for processing */ + slot = router_get_slot(state, estate, &should_process); + + if (should_process) + { + CmdType new_cmd; + bool deleted; + ItemPointerData ctid; + /* Variables for prepare a full "new" tuple, after 86dc90056dfd */ +#if PG_VERSION_NUM >= 140000 + TupleTableSlot *old_slot; + ResultRelInfo *rri; +#endif + TupleTableSlot *full_slot; + bool partition_changed = false; + + ItemPointerSetInvalid(&ctid); + +#if PG_VERSION_NUM < 140000 + full_slot = slot; + + /* Build new junkfilter if needed */ + if (state->junkfilter == NULL) + state->junkfilter = state->current_rri->ri_junkFilter; +#else + if (slot->tts_tableOid == InvalidOid) + elog(ERROR, "invalid table OID in returned tuple"); + + /* + * For 14: in case UPDATE command we can scanning several partitions + * in one plan. Need to switch context each time partition is switched. + */ + if (RelationGetRelid(state->current_rri->ri_RelationDesc) != slot->tts_tableOid) + { + /* + * Function router_get_slot() switched to new partition: need to + * reinitialize some PartitionRouterState variables + */ + state->current_rri = ExecLookupResultRelByOid(state->mt_state, + slot->tts_tableOid, false, false); + partition_changed = true; + } +#endif + + /* Build recheck constraint state lazily (and re-create constraint + * in case we start scan another relation) */ + router_lazy_init_constraint(state, partition_changed); + + /* Extract item pointer from current tuple */ + ctid = router_extract_ctid(state, slot); + Assert(ItemPointerIsValid(&ctid)); + + /* Magic: replace parent's ResultRelInfo with ours */ + estate->es_result_relation_info = state->current_rri; + +#if PG_VERSION_NUM >= 140000 /* after 86dc90056dfd */ + /* Store original slot */ + estate->es_original_tuple = slot; + /* + * "slot" contains new values of the changed columns plus row + * identity information such as CTID. + * Need to prepare a "newSlot" with full tuple for triggers in + * router_lock_or_delete_tuple(). But we should return old slot + * with CTID because this CTID is used in ExecModifyTable(). + */ + rri = state->current_rri; + + /* Initialize projection info if first time for this table. */ + if (unlikely(!rri->ri_projectNewInfoValid)) +#ifdef PG_HAVE_PGPRO_EXEC_INIT_UPDATE_PROJECTION + PgproExecInitUpdateProjection(state->mt_state, rri); +#else + ExecInitUpdateProjection(state->mt_state, rri); +#endif /* !PG_HAVE_PGPRO_EXEC_INIT_UPDATE_PROJECTION */ + + old_slot = rri->ri_oldTupleSlot; + /* Fetch the most recent version of old tuple. */ + if (!table_tuple_fetch_row_version(rri->ri_RelationDesc, + &ctid, SnapshotAny, old_slot)) + elog(ERROR, "failed to fetch partition tuple being updated"); + + /* Build full tuple (using "old_slot" + changed from "slot"): */ + full_slot = ExecGetUpdateNewTuple(rri, slot, old_slot); +#endif /* PG_VERSION_NUM >= 140000 */ + + /* Lock or delete tuple from old partition */ + full_slot = router_lock_or_delete_tuple(state, full_slot, + &ctid, &deleted); + + /* We require a tuple (previous one has vanished) */ + if (TupIsNull(full_slot)) + goto take_next_tuple; + + /* Should we use UPDATE or DELETE + INSERT? */ + new_cmd = deleted ? CMD_INSERT : CMD_UPDATE; + + /* Alter ModifyTable's state and return */ + return router_set_slot(state, full_slot, new_cmd); + } + + return slot; +} + +void +partition_router_end(CustomScanState *node) +{ + PartitionRouterState *state = (PartitionRouterState *) node; + + Assert(list_length(node->custom_ps) == 1); + ExecEndNode((PlanState *) linitial(node->custom_ps)); + + EvalPlanQualEnd(&state->epqstate); +} + +void +partition_router_rescan(CustomScanState *node) +{ + elog(ERROR, "partition_router_rescan is not implemented"); +} + +void +partition_router_explain(CustomScanState *node, + List *ancestors, + ExplainState *es) +{ + /* Nothing to do here now */ +} + +/* Return tuple OR yield it and change ModifyTable's operation */ +static TupleTableSlot * +router_set_slot(PartitionRouterState *state, + TupleTableSlot *slot, + CmdType operation) +{ + ModifyTableState *mt_state = state->mt_state; + + /* Fast path for correct operation type */ + if (mt_state->operation == operation) + return slot; + + /* HACK: alter ModifyTable's state */ +#if PG_VERSION_NUM >= 140000 + /* Fields "mt_nplans", "mt_whichplan" removed in 86dc90056dfd */ + MTHackField(mt_state, mt_nrels) = -mt_state->mt_nrels; +#else + MTHackField(mt_state, mt_nplans) = -mt_state->mt_whichplan; +#endif + MTHackField(mt_state, operation) = operation; + + /* HACK: disable AFTER STATEMENT triggers */ + MTDisableStmtTriggers(mt_state, state); + + if (!TupIsNull(slot)) + { + EState *estate = mt_state->ps.state; + +#if PG_VERSION_NUM < 140000 /* field "ri_junkFilter" removed in 86dc90056dfd */ + /* We should've cached junk filter already */ + Assert(state->junkfilter); + + /* HACK: conditionally disable junk filter in result relation */ + state->current_rri->ri_junkFilter = (operation == CMD_UPDATE) ? + state->junkfilter : + NULL; +#endif + + /* Don't forget to set saved_slot! */ + state->yielded_slot = ExecInitExtraTupleSlotCompat(estate, + slot->tts_tupleDescriptor, + &TTSOpsHeapTuple); + ExecCopySlot(state->yielded_slot, slot); +#if PG_VERSION_NUM >= 140000 + Assert(estate->es_original_tuple != NULL); + state->yielded_original_slot = ExecInitExtraTupleSlotCompat(estate, + estate->es_original_tuple->tts_tupleDescriptor, + &TTSOpsHeapTuple); + ExecCopySlot(state->yielded_original_slot, estate->es_original_tuple); +#endif + } + + /* Yield */ + state->yielded = true; + return NULL; +} + +/* Fetch next tuple (either fresh or yielded) */ +static TupleTableSlot * +router_get_slot(PartitionRouterState *state, + EState *estate, + bool *should_process) +{ + TupleTableSlot *slot; + + /* Do we have a preserved slot? */ + if (state->yielded) + { + /* HACK: enable AFTER STATEMENT triggers */ + MTEnableStmtTriggers(state->mt_state, state); + + /* Reset saved slot */ + slot = state->yielded_slot; + state->yielded_slot = NULL; +#if PG_VERSION_NUM >= 140000 + estate->es_original_tuple = state->yielded_original_slot; + state->yielded_original_slot = NULL; +#endif + state->yielded = false; + + /* We shouldn't process preserved slot... */ + *should_process = false; + } + else + { + /* Fetch next tuple */ + slot = ExecProcNode((PlanState *) linitial(state->css.custom_ps)); + + /* Restore operation type for AFTER STATEMENT triggers */ + if (TupIsNull(slot)) + slot = router_set_slot(state, NULL, CMD_UPDATE); + + /* But we have to process non-empty slot */ + *should_process = !TupIsNull(slot); + } + + return slot; +} + +static void +router_lazy_init_constraint(PartitionRouterState *state, bool reinit) +{ + if (state->constraint == NULL || reinit) + { + Relation rel = state->current_rri->ri_RelationDesc; + Oid relid = RelationGetRelid(rel); + List *clauses = NIL; + Expr *expr; + + while (OidIsValid(relid)) + { + /* It's probably OK if expression is NULL */ + expr = get_partition_constraint_expr(relid, false); + expr = expression_planner(expr); + + if (!expr) + break; + + /* Add this constraint to set */ + clauses = lappend(clauses, expr); + + /* Consider parent's check constraint as well */ + relid = get_parent_of_partition(relid); + } + + if (!clauses) + elog(ERROR, "no recheck constraint for relid %d", relid); + + state->constraint = ExecInitExpr(make_ands_explicit(clauses), NULL); + } +} + +/* Extract ItemPointer from tuple using JunkFilter */ +static ItemPointerData +router_extract_ctid(PartitionRouterState *state, TupleTableSlot *slot) +{ + Relation rel = state->current_rri->ri_RelationDesc; + char relkind = RelationGetForm(rel)->relkind; + + if (relkind == RELKIND_RELATION) + { + Datum ctid_datum; + bool ctid_isnull; + + ctid_datum = ExecGetJunkAttribute(slot, +#if PG_VERSION_NUM >= 140000 /* field "junkfilter" removed in 86dc90056dfd */ + state->current_rri->ri_RowIdAttNo, +#else + state->junkfilter->jf_junkAttNo, +#endif + &ctid_isnull); + + /* shouldn't ever get a null result... */ + if (ctid_isnull) + elog(ERROR, "ctid is NULL"); + + /* Get item pointer to tuple */ + return *(ItemPointer) DatumGetPointer(ctid_datum); + } + else if (relkind == RELKIND_FOREIGN_TABLE) + elog(ERROR, UPDATE_NODE_NAME " does not support foreign tables"); + else + elog(ERROR, UPDATE_NODE_NAME " cannot handle relkind %u", relkind); + return *(ItemPointer) NULL; /* keep compiler quiet, lol */ +} + +/* This is a heavily modified copy of ExecDelete from nodeModifyTable.c */ +static TupleTableSlot * +router_lock_or_delete_tuple(PartitionRouterState *state, + TupleTableSlot *slot, + ItemPointer tupleid, + bool *deleted /* return value #1 */) +{ + ResultRelInfo *rri; + Relation rel; + + EState *estate = state->css.ss.ps.state; + ExprContext *econtext = GetPerTupleExprContext(estate); + ExprState *constraint = state->constraint; + + /* Maintaining both >= 12 and earlier is quite horrible there, you know */ +#if PG_VERSION_NUM >= 120000 + TM_FailureData tmfd; + TM_Result result; +#else + HeapUpdateFailureData tmfd; + HTSU_Result result; +#endif + + EPQState *epqstate = &state->epqstate; + + LOCKMODE lockmode; + bool try_delete; + + *deleted = false; + + EvalPlanQualSetSlot(epqstate, slot); + + /* Get information on the (current) result relation */ + rri = estate->es_result_relation_info; + rel = rri->ri_RelationDesc; + lockmode = ExecUpdateLockMode(estate, rri); + +recheck: + /* Does tuple still belong to current partition? */ + econtext->ecxt_scantuple = slot; + try_delete = !ExecCheck(constraint, econtext); + + /* Lock or delete tuple */ + if (try_delete) + { + /* BEFORE ROW UPDATE triggers */ + if (rri->ri_TrigDesc && + rri->ri_TrigDesc->trig_update_before_row) + { +#if PG_VERSION_NUM >= 120000 + if (!ExecBRUpdateTriggersCompat(estate, epqstate, rri, tupleid, NULL, slot)) + return NULL; +#else + slot = ExecBRUpdateTriggers(estate, epqstate, rri, tupleid, NULL, slot); + if (TupIsNull(slot)) + return NULL; +#endif + } + + /* BEFORE ROW DELETE triggers */ + if (rri->ri_TrigDesc && + rri->ri_TrigDesc->trig_delete_before_row) + { + if (!ExecBRDeleteTriggersCompat(estate, epqstate, rri, tupleid, NULL, NULL)) + return NULL; + } + + /* Delete the tuple */ + result = heap_delete_compat(rel, tupleid, + estate->es_output_cid, + estate->es_crosscheck_snapshot, + true /* wait for commit */, &tmfd, + true /* changing partition */); + } + else + { + HeapTupleData tuple; + Buffer buffer; + + tuple.t_self = *tupleid; + /* xxx why we ever need this? */ + result = heap_lock_tuple(rel, &tuple, + estate->es_output_cid, + lockmode, LockWaitBlock, + false, &buffer, &tmfd); + + ReleaseBuffer(buffer); + } + + /* Check lock/delete status */ + switch (result) + { +#if PG_VERSION_NUM >= 120000 + case TM_SelfModified: +#else + case HeapTupleSelfUpdated: +#endif + if (tmfd.cmax != estate->es_output_cid) + ereport(ERROR, + (errcode(ERRCODE_TRIGGERED_DATA_CHANGE_VIOLATION), + errmsg("tuple to be updated was already modified by an operation triggered by the current command"), + errhint("Consider using an AFTER trigger instead of a BEFORE trigger to propagate changes to other rows."))); + + /* Already deleted by self; nothing to do */ + return NULL; + +#if PG_VERSION_NUM >= 120000 + case TM_Ok: +#else + case HeapTupleMayBeUpdated: +#endif + break; + +#if PG_VERSION_NUM >= 120000 /* TM_Deleted/TM_Updated */ + case TM_Updated: + { + /* not sure this stuff is correct at all */ + TupleTableSlot *inputslot; + TupleTableSlot *epqslot; + + if (IsolationUsesXactSnapshot()) + ereport(ERROR, + (errcode(ERRCODE_T_R_SERIALIZATION_FAILURE), + errmsg("could not serialize access due to concurrent update"))); + + /* + * Already know that we're going to need to do EPQ, so + * fetch tuple directly into the right slot. + */ + inputslot = EvalPlanQualSlot(epqstate, rel, rri->ri_RangeTableIndex); + + result = table_tuple_lock(rel, tupleid, + estate->es_snapshot, + inputslot, estate->es_output_cid, + LockTupleExclusive, LockWaitBlock, + TUPLE_LOCK_FLAG_FIND_LAST_VERSION, + &tmfd); + + switch (result) + { + case TM_Ok: + Assert(tmfd.traversed); + epqslot = EvalPlanQual(epqstate, + rel, + rri->ri_RangeTableIndex, + inputslot); + if (TupIsNull(epqslot)) + /* Tuple not passing quals anymore, exiting... */ + return NULL; + + /* just copied from below, ha */ + *tupleid = tmfd.ctid; + slot = epqslot; + goto recheck; + + case TM_SelfModified: + + /* + * This can be reached when following an update + * chain from a tuple updated by another session, + * reaching a tuple that was already updated in + * this transaction. If previously updated by this + * command, ignore the delete, otherwise error + * out. + * + * See also TM_SelfModified response to + * table_tuple_delete() above. + */ + if (tmfd.cmax != estate->es_output_cid) + ereport(ERROR, + (errcode(ERRCODE_TRIGGERED_DATA_CHANGE_VIOLATION), + errmsg("tuple to be deleted was already modified by an operation triggered by the current command"), + errhint("Consider using an AFTER trigger instead of a BEFORE trigger to propagate changes to other rows."))); + return NULL; + + case TM_Deleted: + /* tuple already deleted; nothing to do */ + return NULL; + + default: + + /* + * TM_Invisible should be impossible because we're + * waiting for updated row versions, and would + * already have errored out if the first version + * is invisible. + * + * TM_Updated should be impossible, because we're + * locking the latest version via + * TUPLE_LOCK_FLAG_FIND_LAST_VERSION. + */ + elog(ERROR, "unexpected table_tuple_lock status: %u", + result); + return NULL; + } + + Assert(false); + break; + } + + + case TM_Deleted: + if (IsolationUsesXactSnapshot()) + ereport(ERROR, + (errcode(ERRCODE_T_R_SERIALIZATION_FAILURE), + errmsg("could not serialize access due to concurrent delete"))); + /* tuple already deleted; nothing to do */ + return NULL; + +#else + case HeapTupleUpdated: + if (IsolationUsesXactSnapshot()) + ereport(ERROR, + (errcode(ERRCODE_T_R_SERIALIZATION_FAILURE), + errmsg("could not serialize access due to concurrent update"))); + if (ItemPointerIndicatesMovedPartitions(&tmfd.ctid)) + ereport(ERROR, + (errcode(ERRCODE_T_R_SERIALIZATION_FAILURE), + errmsg("tuple to be updated was already moved to another partition due to concurrent update"))); + + if (!ItemPointerEquals(tupleid, &tmfd.ctid)) + { + TupleTableSlot *epqslot; + + epqslot = EvalPlanQual(estate, + epqstate, + rel, + rri->ri_RangeTableIndex, + LockTupleExclusive, + &tmfd.ctid, + tmfd.xmax); + + if (!TupIsNull(epqslot)) + { + Assert(tupleid != NULL); + *tupleid = tmfd.ctid; + slot = epqslot; + goto recheck; + } + } + + /* Tuple already deleted; nothing to do */ + return NULL; +#endif /* TM_Deleted/TM_Updated */ + +#if PG_VERSION_NUM >= 120000 + case TM_Invisible: +#else + case HeapTupleInvisible: +#endif + elog(ERROR, "attempted to lock invisible tuple"); + break; + + default: + elog(ERROR, "unrecognized heap_delete status: %u", result); + break; + } + + /* Additional work for delete s*/ + if (try_delete) + { + /* AFTER ROW DELETE triggers */ + ExecARDeleteTriggersCompat(estate, rri, tupleid, NULL, NULL); + } + + *deleted = try_delete; + return slot; +} diff --git a/src/pathman.h b/src/pathman.h deleted file mode 100644 index 2c665895..00000000 --- a/src/pathman.h +++ /dev/null @@ -1,180 +0,0 @@ -/* ------------------------------------------------------------------------ - * - * pathman.h - * structures and prototypes for pathman functions - * - * Copyright (c) 2015-2016, Postgres Professional - * - * ------------------------------------------------------------------------ - */ - -#ifndef PATHMAN_H -#define PATHMAN_H - -#include "relation_info.h" -#include "rangeset.h" - -#include "postgres.h" -#include "nodes/makefuncs.h" -#include "nodes/primnodes.h" -#include "nodes/execnodes.h" -#include "optimizer/planner.h" -#include "parser/parsetree.h" - - -/* Check PostgreSQL version (9.5.4 contains an important fix for BGW) */ -#if PG_VERSION_NUM < 90503 - #error "Cannot build pg_pathman with PostgreSQL version lower than 9.5.3" -#elif PG_VERSION_NUM < 90504 - #warning "It is STRONGLY recommended to use pg_pathman with PostgreSQL 9.5.4 since it contains important fixes" -#endif - -/* Get CString representation of Datum (simple wrapper) */ -#ifdef USE_ASSERT_CHECKING - #include "utils.h" - #define DebugPrintDatum(datum, typid) ( datum_to_cstring((datum), (typid)) ) -#else - #define DebugPrintDatum(datum, typid) ( "[use --enable-cassert]" ) -#endif - - -/* - * Definitions for the "pathman_config" table. - */ -#define PATHMAN_CONFIG "pathman_config" -#define Natts_pathman_config 4 -#define Anum_pathman_config_partrel 1 /* partitioned relation (regclass) */ -#define Anum_pathman_config_attname 2 /* partitioned column (text) */ -#define Anum_pathman_config_parttype 3 /* partitioning type (1|2) */ -#define Anum_pathman_config_range_interval 4 /* interval for RANGE pt. (text) */ - -/* type modifier (typmod) for 'range_interval' */ -#define PATHMAN_CONFIG_interval_typmod -1 - -/* - * Definitions for the "pathman_config_params" table - */ -#define PATHMAN_CONFIG_PARAMS "pathman_config_params" -#define Natts_pathman_config_params 3 -#define Anum_pathman_config_params_partrel 1 /* primary key */ -#define Anum_pathman_config_params_enable_parent 2 /* include parent into plan */ -#define Anum_pathman_config_params_auto 3 /* auto partitions creation */ - -/* - * Cache current PATHMAN_CONFIG relid (set during load_config()). - */ -extern Oid pathman_config_relid; -extern Oid pathman_config_params_relid; - -/* - * Just to clarify our intentions (return the corresponding relid). - */ -Oid get_pathman_config_relid(void); -Oid get_pathman_config_params_relid(void); - -/* - * pg_pathman's global state structure. - */ -typedef struct PathmanState -{ - LWLock *dsm_init_lock; /* unused */ -} PathmanState; - - -/* - * Result of search_range_partition_eq(). - */ -typedef enum -{ - SEARCH_RANGEREL_OUT_OF_RANGE = 0, - SEARCH_RANGEREL_GAP, - SEARCH_RANGEREL_FOUND -} search_rangerel_result; - - -/* - * The list of partitioned relation relids that must be handled by pg_pathman - */ -extern List *inheritance_enabled_relids; - -/* - * This list is used to ensure that partitioned relation isn't used both - * with and without ONLY modifiers - */ -extern List *inheritance_disabled_relids; - -/* - * pg_pathman's global state. - */ -extern PathmanState *pmstate; - - -int append_child_relation(PlannerInfo *root, RelOptInfo *rel, Index rti, - RangeTblEntry *rte, int index, Oid childOID, List *wrappers); - -search_rangerel_result search_range_partition_eq(const Datum value, - FmgrInfo *cmp_func, - const PartRelationInfo *prel, - RangeEntry *out_re); - -uint32 hash_to_part_index(uint32 value, uint32 partitions); - -void handle_modification_query(Query *parse); -void disable_inheritance(Query *parse); -void disable_inheritance_cte(Query *parse); -void disable_inheritance_subselect(Query *parse); - -/* copied from allpaths.h */ -void set_append_rel_size(PlannerInfo *root, RelOptInfo *rel, - Index rti, RangeTblEntry *rte); -void set_append_rel_pathlist(PlannerInfo *root, RelOptInfo *rel, Index rti, - RangeTblEntry *rte, PathKey *pathkeyAsc, - PathKey *pathkeyDesc); - -typedef struct -{ - const Node *orig; /* examined expression */ - List *args; /* extracted from 'orig' */ - List *rangeset; /* IndexRanges representing selected parts */ - bool found_gap; /* were there any gaps? */ - double paramsel; /* estimated selectivity */ -} WrapperNode; - -typedef struct -{ - const PartRelationInfo *prel; /* main partitioning structure */ - ExprContext *econtext; /* for ExecEvalExpr() */ - bool for_insert; /* are we in PartitionFilter now? */ -} WalkerContext; - -/* - * Usual initialization procedure for WalkerContext. - */ -#define InitWalkerContext(context, prel_info, ecxt, for_ins) \ - do { \ - (context)->prel = (prel_info); \ - (context)->econtext = (ecxt); \ - (context)->for_insert = (for_ins); \ - } while (0) - -/* Check that WalkerContext contains ExprContext (plan execution stage) */ -#define WcxtHasExprContext(wcxt) ( (wcxt)->econtext ) - -/* - * Functions for partition creation, use create_partitions(). - */ -Oid create_partitions(Oid relid, Datum value, Oid value_type); -Oid create_partitions_bg_worker(Oid relid, Datum value, Oid value_type); -Oid create_partitions_internal(Oid relid, Datum value, Oid value_type); - -void select_range_partitions(const Datum value, - FmgrInfo *cmp_func, - const RangeEntry *ranges, - const int nranges, - const int strategy, - WrapperNode *result); - -/* Examine expression in order to select partitions. */ -WrapperNode *walk_expr_tree(Expr *expr, WalkerContext *context); - -#endif /* PATHMAN_H */ diff --git a/src/pathman_workers.c b/src/pathman_workers.c index c038dea9..bf23bd94 100644 --- a/src/pathman_workers.c +++ b/src/pathman_workers.c @@ -15,9 +15,11 @@ */ #include "init.h" +#include "partition_creation.h" #include "pathman_workers.h" #include "relation_info.h" #include "utils.h" +#include "xact_handling.h" #include "access/htup_details.h" #include "access/xact.h" @@ -29,10 +31,11 @@ #include "storage/dsm.h" #include "storage/ipc.h" #include "storage/latch.h" +#include "storage/proc.h" #include "utils/builtins.h" #include "utils/datum.h" -#include "utils/memutils.h" #include "utils/lsyscache.h" +#include "utils/syscache.h" #include "utils/typcache.h" #include "utils/resowner.h" #include "utils/snapmgr.h" @@ -45,15 +48,19 @@ PG_FUNCTION_INFO_V1( show_concurrent_part_tasks_internal ); PG_FUNCTION_INFO_V1( stop_concurrent_part_task ); +/* + * Dynamically resolve functions (for BGW API). + */ +extern PGDLLEXPORT void bgw_main_spawn_partitions(Datum main_arg); +extern PGDLLEXPORT void bgw_main_concurrent_part(Datum main_arg); + + static void handle_sigterm(SIGNAL_ARGS); static void bg_worker_load_config(const char *bgw_name); -static void start_bg_worker(const char bgworker_name[BGW_MAXLEN], - bgworker_main_type bgw_main_func, +static bool start_bgworker(const char *bgworker_name, + const char *bgworker_proc, Datum bgw_arg, bool wait_for_shutdown); -static void bgw_main_spawn_partitions(Datum main_arg); -static void bgw_main_concurrent_part(Datum main_arg); - /* * Function context for concurrent_part_tasks_internal() SRF. @@ -77,12 +84,16 @@ static const char *spawn_partitions_bgw = "SpawnPartitionsWorker"; static const char *concurrent_part_bgw = "ConcurrentPartWorker"; +/* Used for preventing spawn bgw recursion trouble */ +static bool am_spawn_bgw = false; + /* * Estimate amount of shmem needed for concurrent partitioning. */ Size estimate_concurrent_part_task_slots_size(void) { + /* NOTE: we suggest that max_worker_processes is in PGC_POSTMASTER */ return sizeof(ConcurrentPartSlot) * PART_WORKER_SLOTS; } @@ -118,6 +129,7 @@ init_concurrent_part_task_slots(void) /* * Handle SIGTERM in BGW's process. + * Use it in favor of bgworker_die(). */ static void handle_sigterm(SIGNAL_ARGS) @@ -153,9 +165,9 @@ bg_worker_load_config(const char *bgw_name) /* * Common function to start background worker. */ -static void -start_bg_worker(const char bgworker_name[BGW_MAXLEN], - bgworker_main_type bgw_main_func, +static bool +start_bgworker(const char *bgworker_name, + const char *bgworker_proc, Datum bgw_arg, bool wait_for_shutdown) { #define HandleError(condition, new_state) \ @@ -176,13 +188,21 @@ start_bg_worker(const char bgworker_name[BGW_MAXLEN], pid_t pid; /* Initialize worker struct */ - memcpy(worker.bgw_name, bgworker_name, BGW_MAXLEN); - worker.bgw_flags = BGWORKER_SHMEM_ACCESS | BGWORKER_BACKEND_DATABASE_CONNECTION; - worker.bgw_start_time = BgWorkerStart_RecoveryFinished; - worker.bgw_restart_time = BGW_NEVER_RESTART; - worker.bgw_main = bgw_main_func; - worker.bgw_main_arg = bgw_arg; - worker.bgw_notify_pid = MyProcPid; + memset(&worker, 0, sizeof(worker)); + + snprintf(worker.bgw_name, BGW_MAXLEN, "%s", bgworker_name); + snprintf(worker.bgw_function_name, BGW_MAXLEN, "%s", bgworker_proc); + snprintf(worker.bgw_library_name, BGW_MAXLEN, "pg_pathman"); + + worker.bgw_flags = BGWORKER_SHMEM_ACCESS | +#if defined(PGPRO_EE) && PG_VERSION_NUM >= 130000 && PG_VERSION_NUM < 140000 /* FIXME: need to remove last condition in future */ + BGWORKER_CLASS_PERSISTENT | +#endif + BGWORKER_BACKEND_DATABASE_CONNECTION; + worker.bgw_start_time = BgWorkerStart_RecoveryFinished; + worker.bgw_restart_time = BGW_NEVER_RESTART; + worker.bgw_main_arg = bgw_arg; + worker.bgw_notify_pid = MyProcPid; /* Start dynamic worker */ bgw_started = RegisterDynamicBackgroundWorker(&worker, &bgw_handle); @@ -205,20 +225,31 @@ start_bg_worker(const char bgworker_name[BGW_MAXLEN], switch (exec_state) { + /* Caller might want to handle this case */ case BGW_COULD_NOT_START: - elog(ERROR, "Unable to create background %s for pg_pathman", - bgworker_name); - break; + return false; case BGW_PM_DIED: ereport(ERROR, (errmsg("Postmaster died during the pg_pathman background worker process"), - errhint("More details may be available in the server log."))); + errhint("More details may be available in the server log."))); break; default: break; } + + return true; +} + +/* + * Show generic error message if we failed to start bgworker. + */ +static inline void +start_bgworker_errmsg(const char *bgworker_name) +{ + ereport(ERROR, (errmsg("could not start %s", bgworker_name), + errhint("consider increasing max_worker_processes"))); } @@ -251,12 +282,17 @@ create_partitions_bg_worker_segment(Oid relid, Datum value, Oid value_type) /* Initialize BGW args */ args = (SpawnPartitionArgs *) dsm_segment_address(segment); - args->userid = GetAuthenticatedUserId(); - + args->userid = GetUserId(); args->result = InvalidOid; args->dbid = MyDatabaseId; args->partitioned_table = relid; +#if PG_VERSION_NUM >= 90600 + /* Initialize args for BecomeLockGroupMember() */ + args->parallel_master_pgproc = MyProc; + args->parallel_master_pid = MyProcPid; +#endif + /* Write value-related stuff */ args->value_type = value_type; args->value_size = datum_size; @@ -275,23 +311,36 @@ create_partitions_bg_worker_segment(Oid relid, Datum value, Oid value_type) * NB: This function should not be called directly, use create_partitions() instead. */ Oid -create_partitions_bg_worker(Oid relid, Datum value, Oid value_type) +create_partitions_for_value_bg_worker(Oid relid, Datum value, Oid value_type) { dsm_segment *segment; dsm_handle segment_handle; SpawnPartitionArgs *bgw_args; Oid child_oid = InvalidOid; + if (am_spawn_bgw) + ereport(ERROR, + (errmsg("Attempt to spawn partition using bgw from bgw spawning partitions"), + errhint("Probably init_callback has INSERT to its table?"))); + /* Create a dsm segment for the worker to pass arguments */ segment = create_partitions_bg_worker_segment(relid, value, value_type); segment_handle = dsm_segment_handle(segment); bgw_args = (SpawnPartitionArgs *) dsm_segment_address(segment); +#if PG_VERSION_NUM >= 90600 + /* Become locking group leader */ + BecomeLockGroupLeader(); +#endif + /* Start worker and wait for it to finish */ - start_bg_worker(spawn_partitions_bgw, - bgw_main_spawn_partitions, - UInt32GetDatum(segment_handle), - true); + if (!start_bgworker(spawn_partitions_bgw, + CppAsString(bgw_main_spawn_partitions), + UInt32GetDatum(segment_handle), + true)) + { + start_bgworker_errmsg(spawn_partitions_bgw); + } /* Save the result (partition Oid) */ child_oid = bgw_args->result; @@ -300,9 +349,10 @@ create_partitions_bg_worker(Oid relid, Datum value, Oid value_type) dsm_detach(segment); if (child_oid == InvalidOid) - elog(ERROR, - "Attempt to append new partitions to relation \"%s\" failed", - get_rel_name_or_relid(relid)); + ereport(ERROR, + (errmsg("attempt to spawn new partitions of relation \"%s\" failed", + get_rel_name_or_relid(relid)), + errhint("See server log for more details."))); return child_oid; } @@ -310,13 +360,14 @@ create_partitions_bg_worker(Oid relid, Datum value, Oid value_type) /* * Entry point for SpawnPartitionsWorker's process. */ -static void +void bgw_main_spawn_partitions(Datum main_arg) { dsm_handle handle = DatumGetUInt32(main_arg); dsm_segment *segment; SpawnPartitionArgs *args; Datum value; + Oid result; /* Establish signal handlers before unblocking signals. */ pqsignal(SIGTERM, handle_sigterm); @@ -324,6 +375,8 @@ bgw_main_spawn_partitions(Datum main_arg) /* We're now ready to receive signals */ BackgroundWorkerUnblockSignals(); + am_spawn_bgw = true; + /* Create resource owner */ CurrentResourceOwner = ResourceOwnerCreate(NULL, spawn_partitions_bgw); @@ -337,8 +390,15 @@ bgw_main_spawn_partitions(Datum main_arg) spawn_partitions_bgw, MyProcPid); args = dsm_segment_address(segment); +#if PG_VERSION_NUM >= 90600 + /* Join locking group. If we can't join the group, quit */ + if (!BecomeLockGroupMember(args->parallel_master_pgproc, + args->parallel_master_pid)) + return; +#endif /* Establish connection and start transaction */ - BackgroundWorkerInitializeConnectionByOid(args->dbid, args->userid); + + BackgroundWorkerInitializeConnectionByOidCompat(args->dbid, args->userid); /* Start new transaction (syscache access etc.) */ StartTransactionCommand(); @@ -359,17 +419,17 @@ bgw_main_spawn_partitions(Datum main_arg) DebugPrintDatum(value, args->value_type), MyProcPid); #endif - /* Create partitions and save the Oid of the last one */ - args->result = create_partitions_internal(args->partitioned_table, - value, /* unpacked Datum */ - args->value_type); + /* + * Create partitions and save the Oid of the last one. + * If we fail here, args->result is 0 since it is zeroed on initialization. + */ + result = create_partitions_for_value_internal(args->partitioned_table, + value, /* unpacked Datum */ + args->value_type); /* Finish transaction in an appropriate way */ - if (args->result == InvalidOid) - AbortCurrentTransaction(); - else - CommitTransactionCommand(); - + CommitTransactionCommand(); + args->result = result; dsm_detach(segment); } @@ -380,19 +440,36 @@ bgw_main_spawn_partitions(Datum main_arg) * ------------------------------------- */ +/* Free bgworker's CPS slot */ +static void +free_cps_slot(int code, Datum arg) +{ + ConcurrentPartSlot *part_slot = (ConcurrentPartSlot *) DatumGetPointer(arg); + + cps_set_status(part_slot, CPS_FREE); +} + /* * Entry point for ConcurrentPartWorker's process. */ -static void +void bgw_main_concurrent_part(Datum main_arg) { - int rows; - bool failed; - int failures_count = 0; - char *sql = NULL; ConcurrentPartSlot *part_slot; + char *sql = NULL; + int64 rows; + volatile bool failed; + volatile int failures_count = 0; + LOCKMODE lockmode = RowExclusiveLock; - /* Establish signal handlers before unblocking signals. */ + /* Update concurrent part slot */ + part_slot = &concurrent_part_slots[DatumGetInt32(main_arg)]; + part_slot->pid = MyProcPid; + + /* Establish atexit callback that will fre CPS slot */ + on_proc_exit(free_cps_slot, PointerGetDatum(part_slot)); + + /* Establish signal handlers before unblocking signals */ pqsignal(SIGTERM, handle_sigterm); /* We're now ready to receive signals */ @@ -401,15 +478,11 @@ bgw_main_concurrent_part(Datum main_arg) /* Create resource owner */ CurrentResourceOwner = ResourceOwnerCreate(NULL, concurrent_part_bgw); - /* Update concurrent part slot */ - part_slot = &concurrent_part_slots[DatumGetInt32(main_arg)]; - part_slot->pid = MyProcPid; - /* Disable auto partition propagation */ SetAutoPartitionEnabled(false); /* Establish connection and start transaction */ - BackgroundWorkerInitializeConnectionByOid(part_slot->dbid, part_slot->userid); + BackgroundWorkerInitializeConnectionByOidCompat(part_slot->dbid, part_slot->userid); /* Initialize pg_pathman's local config */ StartTransactionCommand(); @@ -419,37 +492,47 @@ bgw_main_concurrent_part(Datum main_arg) /* Do the job */ do { - MemoryContext old_mcxt; + MemoryContext old_mcxt; Oid types[2] = { OIDOID, INT4OID }; Datum vals[2] = { part_slot->relid, part_slot->batch_size }; - bool nulls[2] = { false, false }; + + volatile bool rel_locked = false; /* Reset loop variables */ failed = false; rows = 0; + CHECK_FOR_INTERRUPTS(); + /* Start new transaction (syscache access etc.) */ StartTransactionCommand(); /* We'll need this to recover from errors */ old_mcxt = CurrentMemoryContext; - SPI_connect(); + if (SPI_connect() != SPI_OK_CONNECT) + elog(ERROR, "could not connect using SPI"); + PushActiveSnapshot(GetTransactionSnapshot()); /* Prepare the query if needed */ if (sql == NULL) { MemoryContext current_mcxt; + char *pathman_schema; + + pathman_schema = get_namespace_name(get_pathman_schema()); + if (pathman_schema == NULL) + elog(ERROR, "pg_pathman schema not initialized"); /* - * Allocate as SQL query in top memory context because current + * Allocate SQL query in TopPathmanContext because current * context will be destroyed after transaction finishes */ - current_mcxt = MemoryContextSwitchTo(TopMemoryContext); - sql = psprintf("SELECT %s._partition_data_concurrent($1::oid, p_limit:=$2)", - get_namespace_name(get_pathman_schema())); + current_mcxt = MemoryContextSwitchTo(TopPathmanContext); + sql = psprintf("SELECT %s._partition_data_concurrent($1::regclass, NULL::text, NULL::text, p_limit:=$2)", + pathman_schema); MemoryContextSwitchTo(current_mcxt); } @@ -459,76 +542,121 @@ bgw_main_concurrent_part(Datum main_arg) int ret; bool isnull; - ret = SPI_execute_with_args(sql, 2, types, vals, nulls, false, 0); - if (ret == SPI_OK_SELECT) + /* Lock relation for DELETE and INSERT */ + if (!ConditionalLockRelationOid(part_slot->relid, lockmode)) { - TupleDesc tupdesc = SPI_tuptable->tupdesc; - HeapTuple tuple = SPI_tuptable->vals[0]; + elog(ERROR, "could not take lock on relation %u", part_slot->relid); + } - Assert(SPI_processed == 1); /* there should be 1 result at most */ + /* Great, now relation is locked */ + rel_locked = true; - rows = DatumGetInt32(SPI_getbinval(tuple, tupdesc, 1, &isnull)); + /* Make sure that relation exists */ + if (!SearchSysCacheExists1(RELOID, ObjectIdGetDatum(part_slot->relid))) + { + /* Exit after we raise ERROR */ + failures_count = PART_WORKER_MAX_ATTEMPTS; + + elog(ERROR, "relation %u does not exist", part_slot->relid); + } + /* Make sure that relation has partitions */ + if (!has_pathman_relation_info(part_slot->relid)) + { + /* Exit after we raise ERROR */ + failures_count = PART_WORKER_MAX_ATTEMPTS; + + elog(ERROR, "relation \"%s\" is not partitioned", + get_rel_name(part_slot->relid)); + } + + /* Call concurrent partitioning function */ + ret = SPI_execute_with_args(sql, 2, types, vals, NULL, false, 0); + if (ret == SPI_OK_SELECT) + { + TupleDesc tupdesc = SPI_tuptable->tupdesc; + HeapTuple tuple = SPI_tuptable->vals[0]; + + /* There should be 1 result at most */ + Assert(SPI_processed == 1); + + /* Extract number of processed rows */ + rows = DatumGetInt64(SPI_getbinval(tuple, tupdesc, 1, &isnull)); + Assert(TupleDescAttr(tupdesc, 0)->atttypid == INT8OID); /* check type */ Assert(!isnull); /* ... and ofc it must not be NULL */ } + /* Else raise generic error */ + else elog(ERROR, "partitioning function returned %u", ret); + + /* Finally, unlock our partitioned table */ + UnlockRelationOid(part_slot->relid, lockmode); } PG_CATCH(); { - ErrorData *error; - char *sleep_time_str; + /* + * The most common exception we can catch here is a deadlock with + * concurrent user queries. Check that attempts count doesn't exceed + * some reasonable value. + */ + ErrorData *error; + + /* Unlock relation if we caught ERROR too early */ + if (rel_locked) + UnlockRelationOid(part_slot->relid, lockmode); + + /* Increase number of failures and set 'failed' status */ + failures_count++; + failed = true; /* Switch to the original context & copy edata */ MemoryContextSwitchTo(old_mcxt); error = CopyErrorData(); FlushErrorState(); - /* Print messsage for this BGWorker to server log */ - sleep_time_str = datum_to_cstring(Float8GetDatum(part_slot->sleep_time), - FLOAT8OID); - failures_count++; + /* Print message for this BGWorker to server log */ ereport(LOG, (errmsg("%s: %s", concurrent_part_bgw, error->message), - errdetail("Attempt: %d/%d, sleep time: %s", + errdetail("attempt: %d/%d, sleep time: %.2f", failures_count, PART_WORKER_MAX_ATTEMPTS, - sleep_time_str))); - pfree(sleep_time_str); /* free the time string */ + (float) part_slot->sleep_time))); + /* Finally, free error data */ FreeErrorData(error); - - /* - * The most common exception we can catch here is a deadlock with - * concurrent user queries. Check that attempts count doesn't exceed - * some reasonable value - */ - if (failures_count >= PART_WORKER_MAX_ATTEMPTS) - { - /* Mark slot as FREE */ - cps_set_status(part_slot, CPS_FREE); - - elog(LOG, - "Concurrent partitioning worker has canceled the task because " - "maximum amount of attempts (%d) had been exceeded. " - "See the error message below", - PART_WORKER_MAX_ATTEMPTS); - - return; /* exit quickly */ - } - - /* Set 'failed' flag */ - failed = true; } PG_END_TRY(); SPI_finish(); PopActiveSnapshot(); - if (failed) + /* We've run out of attempts, exit */ + if (failures_count >= PART_WORKER_MAX_ATTEMPTS) { - /* Abort transaction and sleep for a second */ AbortCurrentTransaction(); + + /* Mark slot as FREE */ + cps_set_status(part_slot, CPS_FREE); + + elog(LOG, + "concurrent partitioning worker has canceled the task because " + "maximum amount of attempts (%d) had been exceeded, " + "see the error message below", + PART_WORKER_MAX_ATTEMPTS); + + return; /* time to exit */ + } + + /* Failed this time, wait */ + else if (failed) + { + /* Abort transaction */ + AbortCurrentTransaction(); + + /* Sleep for a specified amount of time (default 1s) */ DirectFunctionCall1(pg_sleep, Float8GetDatum(part_slot->sleep_time)); } + + /* Everything is fine */ else { /* Commit transaction and reset 'failures_count' */ @@ -538,25 +666,22 @@ bgw_main_concurrent_part(Datum main_arg) /* Add rows to total_rows */ SpinLockAcquire(&part_slot->mutex); part_slot->total_rows += rows; -/* Report debug message */ + SpinLockRelease(&part_slot->mutex); + #ifdef USE_ASSERT_CHECKING - elog(DEBUG1, "%s: relocated %d rows, total: %lu [%u]", - concurrent_part_bgw, rows, part_slot->total_rows, MyProcPid); + /* Report debug message */ + elog(DEBUG1, "%s: " + "relocated" INT64_FORMAT "rows, " + "total: " INT64_FORMAT, + concurrent_part_bgw, rows, part_slot->total_rows); #endif - SpinLockRelease(&part_slot->mutex); } /* If other backend requested to stop us, quit */ if (cps_check_status(part_slot) == CPS_STOPPING) break; } - while(rows > 0 || failed); /* do while there's still rows to be relocated */ - - /* Reclaim the resources */ - pfree(sql); - - /* Mark slot as FREE */ - cps_set_status(part_slot, CPS_FREE); + while(rows > 0 || failed); /* do while there's still rows to be relocated */ } @@ -573,21 +698,49 @@ bgw_main_concurrent_part(Datum main_arg) Datum partition_table_concurrently(PG_FUNCTION_ARGS) { -#define tostr(str) ( #str ) /* convert function's name to literal */ - - Oid relid = PG_GETARG_OID(0); - int empty_slot_idx = -1; /* do we have a slot for BGWorker? */ - int i; + Oid relid = PG_GETARG_OID(0); + int32 batch_size = PG_GETARG_INT32(1); + float8 sleep_time = PG_GETARG_FLOAT8(2); + int empty_slot_idx = -1, /* do we have a slot for BGWorker? */ + i; + TransactionId rel_xmin; + LOCKMODE lockmode = ShareUpdateExclusiveLock; + char *pathman_schema; + + /* Check batch_size */ + if (batch_size < 1 || batch_size > 10000) + ereport(ERROR, (errcode(ERRCODE_INVALID_PARAMETER_VALUE), + errmsg("'batch_size' should not be less than 1" + " or greater than 10000"))); + + /* Check sleep_time */ + if (sleep_time < 0.5) + ereport(ERROR, (errcode(ERRCODE_INVALID_PARAMETER_VALUE), + errmsg("'sleep_time' should not be less than 0.5"))); + + check_relation_oid(relid); + + /* Prevent concurrent function calls */ + LockRelationOid(relid, lockmode); /* Check if relation is a partitioned table */ - shout_if_prel_is_invalid(relid, - /* We also lock the parent relation */ - get_pathman_relation_info_after_lock(relid, true), - /* Partitioning type does not matter here */ - PT_INDIFFERENT); + if (!has_pathman_relation_info(relid)) + shout_if_prel_is_invalid(relid, NULL, PT_ANY); + + /* Check that partitioning operation result is visible */ + if (pathman_config_contains_relation(relid, NULL, NULL, &rel_xmin, NULL)) + { + if (!xact_object_is_visible(rel_xmin)) + ereport(ERROR, (errmsg("cannot start %s", concurrent_part_bgw), + errdetail("table is being partitioned now"))); + } + else ereport(ERROR, (errcode(ERRCODE_INVALID_PARAMETER_VALUE), + errmsg("relation \"%s\" is not partitioned", + get_rel_name_or_relid(relid)))); + /* * Look for an empty slot and also check that a concurrent - * partitioning operation for this table hasn't been started yet + * partitioning operation for this table hasn't started yet. */ for (i = 0; i < PART_WORKER_SLOTS; i++) { @@ -616,9 +769,8 @@ partition_table_concurrently(PG_FUNCTION_ARGS) if (empty_slot_idx >= 0 && empty_slot_idx != i) SpinLockRelease(&concurrent_part_slots[empty_slot_idx].mutex); - elog(ERROR, - "Table \"%s\" is already being partitioned", - get_rel_name(relid)); + ereport(ERROR, (errmsg("table \"%s\" is already being partitioned", + get_rel_name(relid)))); } /* Normally we don't want to keep it */ @@ -628,31 +780,47 @@ partition_table_concurrently(PG_FUNCTION_ARGS) /* Looks like we could not find an empty slot */ if (empty_slot_idx < 0) - elog(ERROR, "No empty worker slots found"); + ereport(ERROR, (errcode(ERRCODE_CONFIGURATION_LIMIT_EXCEEDED), + errmsg("no empty worker slots found"), + errhint("consider increasing max_worker_processes"))); else { /* Initialize concurrent part slot */ InitConcurrentPartSlot(&concurrent_part_slots[empty_slot_idx], - GetAuthenticatedUserId(), CPS_WORKING, - MyDatabaseId, relid, 1000, 1.0); + GetUserId(), CPS_WORKING, MyDatabaseId, + relid, batch_size, sleep_time); /* Now we can safely unlock slot for new BGWorker */ SpinLockRelease(&concurrent_part_slots[empty_slot_idx].mutex); } /* Start worker (we should not wait) */ - start_bg_worker(concurrent_part_bgw, - bgw_main_concurrent_part, - Int32GetDatum(empty_slot_idx), - false); + if (!start_bgworker(concurrent_part_bgw, + CppAsString(bgw_main_concurrent_part), + Int32GetDatum(empty_slot_idx), + false)) + { + /* Couldn't start, free CPS slot */ + cps_set_status(&concurrent_part_slots[empty_slot_idx], CPS_FREE); + + start_bgworker_errmsg(concurrent_part_bgw); + } + + pathman_schema = get_namespace_name(get_pathman_schema()); + if (pathman_schema == NULL) + elog(ERROR, "pg_pathman schema not initialized"); /* Tell user everything's fine */ elog(NOTICE, - "Worker started. You can stop it " - "with the following command: select %s('%s');", - tostr(stop_concurrent_part_task), /* convert function's name to literal */ + "worker started, you can stop it " + "with the following command: select %s.%s('%s');", + pathman_schema, + CppAsString(stop_concurrent_part_task), get_rel_name(relid)); + /* We don't need this lock anymore */ + UnlockRelationOid(relid, lockmode); + PG_RETURN_VOID(); } @@ -683,7 +851,7 @@ show_concurrent_part_tasks_internal(PG_FUNCTION_ARGS) userctx->cur_idx = 0; /* Create tuple descriptor */ - tupdesc = CreateTemplateTupleDesc(Natts_pathman_cp_tasks, false); + tupdesc = CreateTemplateTupleDescCompat(Natts_pathman_cp_tasks, false); TupleDescInitEntry(tupdesc, Anum_pathman_cp_tasks_userid, "userid", REGROLEOID, -1, 0); @@ -694,7 +862,7 @@ show_concurrent_part_tasks_internal(PG_FUNCTION_ARGS) TupleDescInitEntry(tupdesc, Anum_pathman_cp_tasks_relid, "relid", REGCLASSOID, -1, 0); TupleDescInitEntry(tupdesc, Anum_pathman_cp_tasks_processed, - "processed", INT4OID, -1, 0); + "processed", INT8OID, -1, 0); TupleDescInitEntry(tupdesc, Anum_pathman_cp_tasks_status, "status", TEXTOID, -1, 0); @@ -710,40 +878,32 @@ show_concurrent_part_tasks_internal(PG_FUNCTION_ARGS) /* Iterate through worker slots */ for (i = userctx->cur_idx; i < PART_WORKER_SLOTS; i++) { - ConcurrentPartSlot *cur_slot = &concurrent_part_slots[i]; + ConcurrentPartSlot *cur_slot = &concurrent_part_slots[i], + slot_copy; HeapTuple htup = NULL; - HOLD_INTERRUPTS(); + /* Copy slot to process local memory */ SpinLockAcquire(&cur_slot->mutex); + memcpy(&slot_copy, cur_slot, sizeof(ConcurrentPartSlot)); + SpinLockRelease(&cur_slot->mutex); - if (cur_slot->worker_status != CPS_FREE) + if (slot_copy.worker_status != CPS_FREE) { Datum values[Natts_pathman_cp_tasks]; bool isnull[Natts_pathman_cp_tasks] = { 0 }; - values[Anum_pathman_cp_tasks_userid - 1] = cur_slot->userid; - values[Anum_pathman_cp_tasks_pid - 1] = cur_slot->pid; - values[Anum_pathman_cp_tasks_dbid - 1] = cur_slot->dbid; - values[Anum_pathman_cp_tasks_relid - 1] = cur_slot->relid; - values[Anum_pathman_cp_tasks_processed - 1] = cur_slot->total_rows; + values[Anum_pathman_cp_tasks_userid - 1] = slot_copy.userid; + values[Anum_pathman_cp_tasks_pid - 1] = slot_copy.pid; + values[Anum_pathman_cp_tasks_dbid - 1] = slot_copy.dbid; + values[Anum_pathman_cp_tasks_relid - 1] = slot_copy.relid; + + /* Record processed rows */ + values[Anum_pathman_cp_tasks_processed - 1] = + Int64GetDatum(slot_copy.total_rows); /* Now build a status string */ - switch(cur_slot->worker_status) - { - case CPS_WORKING: - values[Anum_pathman_cp_tasks_status - 1] = - PointerGetDatum(cstring_to_text("working")); - break; - - case CPS_STOPPING: - values[Anum_pathman_cp_tasks_status - 1] = - PointerGetDatum(cstring_to_text("stopping")); - break; - - default: - values[Anum_pathman_cp_tasks_status - 1] = - PointerGetDatum(cstring_to_text("[unknown]")); - } + values[Anum_pathman_cp_tasks_status - 1] = + CStringGetTextDatum(cps_print_status(slot_copy.worker_status)); /* Form output tuple */ htup = heap_form_tuple(funcctx->tuple_desc, values, isnull); @@ -752,9 +912,6 @@ show_concurrent_part_tasks_internal(PG_FUNCTION_ARGS) userctx->cur_idx = i + 1; } - SpinLockRelease(&cur_slot->mutex); - RESUME_INTERRUPTS(); - /* Return tuple if needed */ if (htup) SRF_RETURN_NEXT(funcctx, HeapTupleGetDatum(htup)); @@ -778,29 +935,28 @@ stop_concurrent_part_task(PG_FUNCTION_ARGS) { ConcurrentPartSlot *cur_slot = &concurrent_part_slots[i]; - HOLD_INTERRUPTS(); SpinLockAcquire(&cur_slot->mutex); if (cur_slot->worker_status != CPS_FREE && cur_slot->relid == relid && cur_slot->dbid == MyDatabaseId) { - elog(NOTICE, "Worker will stop after it finishes current batch"); - /* Change worker's state & set 'worker_found' */ cur_slot->worker_status = CPS_STOPPING; worker_found = true; } SpinLockRelease(&cur_slot->mutex); - RESUME_INTERRUPTS(); } if (worker_found) + { + elog(NOTICE, "worker will stop after it finishes current batch"); PG_RETURN_BOOL(true); + } else { - elog(ERROR, "Cannot find worker for relation \"%s\"", + elog(ERROR, "cannot find worker for relation \"%s\"", get_rel_name_or_relid(relid)); PG_RETURN_BOOL(false); /* keep compiler happy */ diff --git a/src/pg_pathman.c b/src/pg_pathman.c index 8f35b152..6e835a1f 100644 --- a/src/pg_pathman.c +++ b/src/pg_pathman.c @@ -4,1051 +4,870 @@ * This module sets planner hooks, handles SELECT queries and produces * paths for partitioned tables * - * Copyright (c) 2015-2016, Postgres Professional + * Copyright (c) 2015-2021, Postgres Professional * * ------------------------------------------------------------------------ */ -#include "pathman.h" +#include "compat/pg_compat.h" +#include "compat/rowmarks_fix.h" + #include "init.h" #include "hooks.h" -#include "utils.h" +#include "pathman.h" #include "partition_filter.h" -#include "runtimeappend.h" +#include "partition_router.h" +#include "partition_overseer.h" +#include "planner_tree_modification.h" +#include "runtime_append.h" #include "runtime_merge_append.h" -#include "xact_handling.h" #include "postgres.h" -#include "access/heapam.h" +#include "access/genam.h" #include "access/htup_details.h" -#include "access/transam.h" +#include "access/sysattr.h" +#if PG_VERSION_NUM >= 120000 +#include "access/table.h" +#endif #include "access/xact.h" -#include "catalog/pg_cast.h" +#include "catalog/pg_collation.h" +#include "catalog/indexing.h" #include "catalog/pg_type.h" -#include "executor/spi.h" +#include "catalog/pg_extension.h" +#include "commands/extension.h" #include "foreign/fdwapi.h" -#include "fmgr.h" #include "miscadmin.h" +#if PG_VERSION_NUM >= 120000 +#include "optimizer/optimizer.h" +#endif #include "optimizer/clauses.h" -#include "optimizer/prep.h" +#include "optimizer/plancat.h" #include "optimizer/restrictinfo.h" #include "optimizer/cost.h" -#include "utils/builtins.h" #include "utils/datum.h" -#include "utils/lsyscache.h" -#include "utils/memutils.h" +#include "utils/fmgroids.h" #include "utils/rel.h" +#include "utils/lsyscache.h" #include "utils/syscache.h" #include "utils/selfuncs.h" -#include "utils/snapmgr.h" #include "utils/typcache.h" PG_MODULE_MAGIC; -List *inheritance_disabled_relids = NIL; -List *inheritance_enabled_relids = NIL; -PathmanState *pmstate; -Oid pathman_config_relid = InvalidOid; -Oid pathman_config_params_relid = InvalidOid; +Oid pathman_config_relid = InvalidOid, + pathman_config_params_relid = InvalidOid; /* pg module functions */ void _PG_init(void); -/* Utility functions */ -static Node *wrapper_make_expression(WrapperNode *wrap, int index, bool *alwaysTrue); -static bool disable_inheritance_subselect_walker(Node *node, void *context); - -/* "Partition creation"-related functions */ -static Datum extract_binary_interval_from_text(Datum interval_text, - Oid part_atttype, - Oid *interval_type); -static bool spawn_partitions(Oid partitioned_rel, - Datum value, - Datum leading_bound, - Oid leading_bound_type, - FmgrInfo *cmp_proc, - Datum interval_binary, - Oid interval_type, - bool forward, - Oid *last_partition); /* Expression tree handlers */ -static WrapperNode *handle_const(const Const *c, WalkerContext *context); -static void handle_binary_opexpr(WalkerContext *context, WrapperNode *result, const Node *varnode, const Const *c); -static void handle_binary_opexpr_param(const PartRelationInfo *prel, WrapperNode *result, const Node *varnode); -static WrapperNode *handle_opexpr(const OpExpr *expr, WalkerContext *context); -static WrapperNode *handle_boolexpr(const BoolExpr *expr, WalkerContext *context); -static WrapperNode *handle_arrexpr(const ScalarArrayOpExpr *expr, WalkerContext *context); -static RestrictInfo *rebuild_restrictinfo(Node *clause, RestrictInfo *old_rinfo); -static bool pull_var_param(const WalkerContext *ctx, const OpExpr *expr, Node **var_ptr, Node **param_ptr); - -/* copied from allpaths.h */ -static void set_plain_rel_size(PlannerInfo *root, RelOptInfo *rel, - RangeTblEntry *rte); -static void set_plain_rel_pathlist(PlannerInfo *root, RelOptInfo *rel, RangeTblEntry *rte); -static List *accumulate_append_subpath(List *subpaths, Path *path); -static void generate_mergeappend_paths(PlannerInfo *root, RelOptInfo *rel, - List *live_childrels, - List *all_child_pathkeys, - PathKey *pathkeyAsc, - PathKey *pathkeyDesc); -static Path *get_cheapest_parameterized_child_path(PlannerInfo *root, RelOptInfo *rel, Relids required_outer); - +static Node *wrapper_make_expression(WrapperNode *wrap, int index, bool *alwaysTrue); -/* - * Compare two Datums with the given comarison function - * - * flinfo is a pointer to an instance of FmgrInfo - * arg1, arg2 are Datum instances - */ -#define check_lt(finfo, arg1, arg2) \ - ((int) FunctionCall2(finfo, arg1, arg2) < 0) -#define check_le(finfo, arg1, arg2) \ - ((int) FunctionCall2(finfo, arg1, arg2) <= 0) -#define check_eq(finfo, arg1, arg2) \ - ((int) FunctionCall2(finfo, arg1, arg2) == 0) -#define check_ge(finfo, arg1, arg2) \ - ((int) FunctionCall2(finfo, arg1, arg2) >= 0) -#define check_gt(finfo, arg1, arg2) \ - ((int) FunctionCall2(finfo, arg1, arg2) > 0) +static void handle_const(const Const *c, + const Oid collid, + const int strategy, + const WalkerContext *context, + WrapperNode *result); + +static void handle_array(ArrayType *array, + const Oid collid, + const int strategy, + const bool use_or, + const WalkerContext *context, + WrapperNode *result); + +static void handle_boolexpr(const BoolExpr *expr, + const WalkerContext *context, + WrapperNode *result); + +static void handle_arrexpr(const ScalarArrayOpExpr *expr, + const WalkerContext *context, + WrapperNode *result); + +static void handle_opexpr(const OpExpr *expr, + const WalkerContext *context, + WrapperNode *result); + +static Datum array_find_min_max(Datum *values, + bool *isnull, + int length, + Oid value_type, + Oid collid, + bool take_min, + bool *result_null); + + +/* Copied from PostgreSQL (allpaths.c) */ +static void set_plain_rel_size(PlannerInfo *root, + RelOptInfo *rel, + RangeTblEntry *rte); + +static void set_plain_rel_pathlist(PlannerInfo *root, + RelOptInfo *rel, + RangeTblEntry *rte); -/* We can transform Param into Const provided that 'econtext' is available */ -#define IsConstValue(wcxt, node) \ - ( IsA((node), Const) || (WcxtHasExprContext(wcxt) ? IsA((node), Param) : false) ) +static List *accumulate_append_subpath(List *subpaths, Path *path); -#define ExtractConst(wcxt, node) \ - ( IsA((node), Param) ? extract_const((wcxt), (Param *) (node)) : ((Const *) (node)) ) +static void generate_mergeappend_paths(PlannerInfo *root, + RelOptInfo *rel, + List *live_childrels, + List *all_child_pathkeys, + PathKey *pathkeyAsc, + PathKey *pathkeyDesc); -/* - * Set initial values for all Postmaster's forks. - */ -void -_PG_init(void) +/* Can we transform this node into a Const? */ +static bool +IsConstValue(Node *node, const WalkerContext *context) { - PathmanInitState temp_init_state; - - if (!process_shared_preload_libraries_in_progress) + switch (nodeTag(node)) { - elog(ERROR, "pg_pathman module must be initialized by Postmaster. " - "Put the following line to configuration file: " - "shared_preload_libraries='pg_pathman'"); - } + case T_Const: + return true; - /* Request additional shared resources */ - RequestAddinShmemSpace(estimate_pathman_shmem_size()); + case T_Param: + return WcxtHasExprContext(context); - /* NOTE: we don't need LWLocks now. RequestAddinLWLocks(1); */ + case T_RowExpr: + { + RowExpr *row = (RowExpr *) node; + ListCell *lc; - /* Assign pg_pathman's initial state */ - temp_init_state.initialization_needed = true; - temp_init_state.pg_pathman_enable = true; - - /* Apply initial state */ - restore_pathman_init_state(&temp_init_state); - - /* Initialize 'next' hook pointers */ - set_rel_pathlist_hook_next = set_rel_pathlist_hook; - set_rel_pathlist_hook = pathman_rel_pathlist_hook; - set_join_pathlist_next = set_join_pathlist_hook; - set_join_pathlist_hook = pathman_join_pathlist_hook; - shmem_startup_hook_next = shmem_startup_hook; - shmem_startup_hook = pathman_shmem_startup_hook; - post_parse_analyze_hook_next = post_parse_analyze_hook; - post_parse_analyze_hook = pathman_post_parse_analysis_hook; - planner_hook_next = planner_hook; - planner_hook = pathman_planner_hook; + /* Can't do anything about RECORD of wrong type */ + if (row->row_typeid != context->prel->ev_type) + return false; - /* Initialize static data for all subsystems */ - init_main_pathman_toggle(); - init_runtimeappend_static_data(); - init_runtime_merge_append_static_data(); - init_partition_filter_static_data(); + /* Check that args are const values */ + foreach (lc, row->args) + if (!IsConstValue((Node *) lfirst(lc), context)) + return false; + } + return true; + + default: + return false; + } } -/* - * Disables inheritance for partitioned by pathman relations. - * It must be done to prevent PostgresSQL from exhaustive search. - */ -void -disable_inheritance(Query *parse) +/* Extract a Const from node that has been checked by IsConstValue() */ +static Const * +ExtractConst(Node *node, const WalkerContext *context) { - const PartRelationInfo *prel; - RangeTblEntry *rte; - MemoryContext oldcontext; - ListCell *lc; + ExprState *estate; + ExprContext *econtext = context->econtext; + + Datum value; + bool isnull; - /* If query contains CTE (WITH statement) then handle subqueries too */ - disable_inheritance_cte(parse); + Oid typid, + collid; + int typmod; - /* If query contains subselects */ - disable_inheritance_subselect(parse); + /* Fast path for Consts */ + if (IsA(node, Const)) + return (Const *) node; - foreach(lc, parse->rtable) + /* Just a paranoid check */ + Assert(IsConstValue(node, context)); + + switch (nodeTag(node)) { - rte = (RangeTblEntry *) lfirst(lc); + case T_Param: + { + Param *param = (Param *) node; - switch(rte->rtekind) - { - case RTE_RELATION: - if (rte->inh) - { - /* Look up this relation in pathman local cache */ - prel = get_pathman_relation_info(rte->relid); - if (prel) - { - /* We'll set this flag later */ - rte->inh = false; - - /* - * Sometimes user uses the ONLY statement and in this case - * rte->inh is also false. We should differ the case - * when user uses ONLY statement from case when we - * make rte->inh false intentionally. - */ - oldcontext = MemoryContextSwitchTo(TopMemoryContext); - inheritance_enabled_relids = \ - lappend_oid(inheritance_enabled_relids, rte->relid); - MemoryContextSwitchTo(oldcontext); - - /* - * Check if relation was already found with ONLY modifier. In - * this case throw an error because we cannot handle - * situations when partitioned table used both with and - * without ONLY modifier in SELECT queries - */ - if (list_member_oid(inheritance_disabled_relids, rte->relid)) - goto disable_error; - - goto disable_next; - } - } + typid = param->paramtype; + typmod = param->paramtypmod; + collid = param->paramcollid; + + /* It must be provided */ + Assert(WcxtHasExprContext(context)); + } + break; - oldcontext = MemoryContextSwitchTo(TopMemoryContext); - inheritance_disabled_relids = \ - lappend_oid(inheritance_disabled_relids, rte->relid); - MemoryContextSwitchTo(oldcontext); + case T_RowExpr: + { + RowExpr *row = (RowExpr *) node; - /* Check if relation was already found withoud ONLY modifier */ - if (list_member_oid(inheritance_enabled_relids, rte->relid)) - goto disable_error; - break; - case RTE_SUBQUERY: - /* Recursively disable inheritance for subqueries */ - disable_inheritance(rte->subquery); - break; - default: - break; - } + typid = row->row_typeid; + typmod = -1; + collid = InvalidOid; -disable_next: - ; +#if PG_VERSION_NUM >= 100000 + /* If there's no context - create it! */ + if (!WcxtHasExprContext(context)) + econtext = CreateStandaloneExprContext(); +#endif + } + break; + + default: + elog(ERROR, "error in function " CppAsString(ExtractConst)); } - return; + /* Evaluate expression */ + estate = ExecInitExpr((Expr *) node, NULL); + value = ExecEvalExprCompat(estate, econtext, &isnull); + +#if PG_VERSION_NUM >= 100000 + /* Free temp econtext if needed */ + if (econtext && !WcxtHasExprContext(context)) + FreeExprContext(econtext, true); +#endif -disable_error: - elog(ERROR, "It is prohibited to query partitioned tables both " - "with and without ONLY modifier"); + /* Finally return Const */ + return makeConst(typid, typmod, collid, get_typlen(typid), + value, isnull, get_typbyval(typid)); } -void -disable_inheritance_cte(Query *parse) +/* + * Checks if expression is a KEY OP PARAM or PARAM OP KEY, + * where KEY is partitioning expression and PARAM is whatever. + * + * Returns: + * operator's Oid if KEY is a partitioning expr, + * otherwise InvalidOid. + */ +static Oid +IsKeyOpParam(const OpExpr *expr, + const WalkerContext *context, + Node **param_ptr) /* ret value #1 */ { - ListCell *lc; + Node *left = linitial(expr->args), + *right = lsecond(expr->args); + + /* Check number of arguments */ + if (list_length(expr->args) != 2) + return InvalidOid; + + /* KEY OP PARAM */ + if (match_expr_to_operand(context->prel_expr, left)) + { + *param_ptr = right; + + /* return the same operator */ + return expr->opno; + } - foreach(lc, parse->cteList) + /* PARAM OP KEY */ + if (match_expr_to_operand(context->prel_expr, right)) { - CommonTableExpr *cte = (CommonTableExpr*) lfirst(lc); + *param_ptr = left; - if (IsA(cte->ctequery, Query)) - disable_inheritance((Query *) cte->ctequery); + /* commute to (KEY OP PARAM) */ + return get_commutator(expr->opno); } + + return InvalidOid; } -void -disable_inheritance_subselect(Query *parse) +/* Selectivity estimator for common 'paramsel' */ +static inline double +estimate_paramsel_using_prel(const PartRelationInfo *prel, int strategy) { - Node *quals; + /* If it's "=", divide by partitions number */ + if (strategy == BTEqualStrategyNumber) + return 1.0 / (double) PrelChildrenCount(prel); - if (!parse->jointree || !parse->jointree->quals) - return; + /* Default selectivity estimate for inequalities */ + else if (prel->parttype == PT_RANGE && strategy > 0) + return DEFAULT_INEQ_SEL; - quals = parse->jointree->quals; - disable_inheritance_subselect_walker(quals, NULL); + /* Else there's not much to do */ + else return 1.0; } -static bool -disable_inheritance_subselect_walker(Node *node, void *context) +#if defined(PGPRO_EE) && PG_VERSION_NUM >= 100000 +/* + * Reset cache at start and at finish ATX transaction + */ +static void +pathman_xact_cb(XactEvent event, void *arg) { - if (node == NULL) - return false; - - if (IsA(node, SubLink)) + if (getNestLevelATX() > 0) { - disable_inheritance((Query *) (((SubLink *) node)->subselect)); - return false; + /* + * For each ATX transaction start/finish: need to reset pg_pathman + * cache because we shouldn't see uncommitted data in autonomous + * transaction and data of autonomous transaction in main transaction + */ + if ((event == XACT_EVENT_START /* start */) || + (event == XACT_EVENT_ABORT || + event == XACT_EVENT_PARALLEL_ABORT || + event == XACT_EVENT_COMMIT || + event == XACT_EVENT_PARALLEL_COMMIT || + event == XACT_EVENT_PREPARE /* finish */)) + { + pathman_relcache_hook(PointerGetDatum(NULL), InvalidOid); + } } - - return expression_tree_walker(node, disable_inheritance_subselect_walker, (void *) context); } +#endif /* - * Checks if query affects only one partition. If true then substitute + * ------------------- + * General functions + * ------------------- */ + +#if PG_VERSION_NUM >= 150000 /* for commit 4f2400cb3f10 */ +static shmem_request_hook_type prev_shmem_request_hook = NULL; +static void pg_pathman_shmem_request(void); +#endif + +/* Set initial values for all Postmaster's forks */ void -handle_modification_query(Query *parse) +_PG_init(void) { - const PartRelationInfo *prel; - List *ranges; - RangeTblEntry *rte; - WrapperNode *wrap; - Expr *expr; - WalkerContext context; + if (!process_shared_preload_libraries_in_progress) + { + elog(ERROR, "pg_pathman module must be initialized by Postmaster. " + "Put the following line to configuration file: " + "shared_preload_libraries='pg_pathman'"); + } - Assert(parse->commandType == CMD_UPDATE || - parse->commandType == CMD_DELETE); - Assert(parse->resultRelation > 0); + /* Request additional shared resources */ +#if PG_VERSION_NUM >= 150000 /* for commit 4f2400cb3f10 */ + prev_shmem_request_hook = shmem_request_hook; + shmem_request_hook = pg_pathman_shmem_request; +#else + RequestAddinShmemSpace(estimate_pathman_shmem_size()); +#endif - rte = rt_fetch(parse->resultRelation, parse->rtable); - prel = get_pathman_relation_info(rte->relid); + /* Assign pg_pathman's initial state */ + pathman_init_state.pg_pathman_enable = DEFAULT_PATHMAN_ENABLE; + pathman_init_state.auto_partition = DEFAULT_PATHMAN_AUTO; + pathman_init_state.override_copy = DEFAULT_PATHMAN_OVERRIDE_COPY; + pathman_init_state.initialization_needed = true; /* ofc it's needed! */ + + /* Set basic hooks */ + pathman_set_rel_pathlist_hook_next = set_rel_pathlist_hook; + set_rel_pathlist_hook = pathman_rel_pathlist_hook; + pathman_set_join_pathlist_next = set_join_pathlist_hook; + set_join_pathlist_hook = pathman_join_pathlist_hook; + pathman_shmem_startup_hook_next = shmem_startup_hook; + shmem_startup_hook = pathman_shmem_startup_hook; + pathman_post_parse_analyze_hook_next = post_parse_analyze_hook; + post_parse_analyze_hook = pathman_post_parse_analyze_hook; + pathman_planner_hook_next = planner_hook; + planner_hook = pathman_planner_hook; + pathman_process_utility_hook_next = ProcessUtility_hook; + ProcessUtility_hook = pathman_process_utility_hook; + pathman_executor_start_hook_prev = ExecutorStart_hook; + ExecutorStart_hook = pathman_executor_start_hook; - if (!prel) - return; + /* Initialize static data for all subsystems */ + init_main_pathman_toggles(); + init_relation_info_static_data(); + init_runtime_append_static_data(); + init_runtime_merge_append_static_data(); + init_partition_filter_static_data(); + init_partition_router_static_data(); + init_partition_overseer_static_data(); - /* Parse syntax tree and extract partition ranges */ - ranges = list_make1_irange(make_irange(0, PrelLastChild(prel), false)); - expr = (Expr *) eval_const_expressions(NULL, parse->jointree->quals); - if (!expr) - return; +#ifdef PGPRO_EE + /* Callbacks for reload relcache for ATX transactions */ + PgproRegisterXactCallback(pathman_xact_cb, NULL, XACT_EVENT_KIND_VANILLA | XACT_EVENT_KIND_ATX); +#endif +} - /* Parse syntax tree and extract partition ranges */ - InitWalkerContext(&context, prel, NULL, false); - wrap = walk_expr_tree(expr, &context); +#if PG_VERSION_NUM >= 150000 /* for commit 4f2400cb3f10 */ +static void +pg_pathman_shmem_request(void) +{ + if (prev_shmem_request_hook) + prev_shmem_request_hook(); - ranges = irange_list_intersect(ranges, wrap->rangeset); + RequestAddinShmemSpace(estimate_pathman_shmem_size()); +} +#endif - /* If only one partition is affected then substitute parent table with partition */ - if (irange_list_length(ranges) == 1) +/* Get cached PATHMAN_CONFIG relation Oid */ +Oid +get_pathman_config_relid(bool invalid_is_ok) +{ + if (!IsPathmanInitialized()) { - IndexRange irange = linitial_irange(ranges); - if (irange.ir_lower == irange.ir_upper) - { - Oid *children = PrelGetChildrenArray(prel); - rte->relid = children[irange.ir_lower]; - rte->inh = false; - } + if (invalid_is_ok) + return InvalidOid; + elog(ERROR, "pg_pathman is not initialized yet"); } - return; + /* Raise ERROR if Oid is invalid */ + if (!OidIsValid(pathman_config_relid) && !invalid_is_ok) + elog(ERROR, "unexpected error in function " + CppAsString(get_pathman_config_relid)); + + return pathman_config_relid; } -void -set_append_rel_size(PlannerInfo *root, RelOptInfo *rel, - Index rti, RangeTblEntry *rte) +/* Get cached PATHMAN_CONFIG_PARAMS relation Oid */ +Oid +get_pathman_config_params_relid(bool invalid_is_ok) { - double parent_rows = 0; - double parent_size = 0; - ListCell *l; - - foreach(l, root->append_rel_list) + if (!IsPathmanInitialized()) { - AppendRelInfo *appinfo = (AppendRelInfo *) lfirst(l); - Index childRTindex, - parentRTindex = rti; - RelOptInfo *childrel; + if (invalid_is_ok) + return InvalidOid; + elog(ERROR, "pg_pathman is not initialized yet"); + } - /* append_rel_list contains all append rels; ignore others */ - if (appinfo->parent_relid != parentRTindex) - continue; + /* Raise ERROR if Oid is invalid */ + if (!OidIsValid(pathman_config_relid) && !invalid_is_ok) + elog(ERROR, "unexpected error in function " + CppAsString(get_pathman_config_params_relid)); + + return pathman_config_params_relid; +} - childRTindex = appinfo->child_relid; +/* + * Return pg_pathman schema's Oid or InvalidOid if that's not possible. + */ +Oid +get_pathman_schema(void) +{ + Oid result; + Relation rel; + SysScanDesc scandesc; + HeapTuple tuple; + ScanKeyData entry[1]; + Oid ext_oid; + + /* It's impossible to fetch pg_pathman's schema now */ + if (!IsTransactionState()) + return InvalidOid; + + ext_oid = get_extension_oid("pg_pathman", true); + if (ext_oid == InvalidOid) + return InvalidOid; /* exit if pg_pathman does not exist */ + + ScanKeyInit(&entry[0], +#if PG_VERSION_NUM >= 120000 + Anum_pg_extension_oid, +#else + ObjectIdAttributeNumber, +#endif + BTEqualStrategyNumber, F_OIDEQ, + ObjectIdGetDatum(ext_oid)); - childrel = find_base_rel(root, childRTindex); - Assert(childrel->reloptkind == RELOPT_OTHER_MEMBER_REL); + rel = heap_open_compat(ExtensionRelationId, AccessShareLock); + scandesc = systable_beginscan(rel, ExtensionOidIndexId, true, + NULL, 1, entry); - /* - * Accumulate size information from each live child. - */ - Assert(childrel->rows > 0); + tuple = systable_getnext(scandesc); - parent_rows += childrel->rows; - parent_size += childrel->width * childrel->rows; - } + /* We assume that there can be at most one matching tuple */ + if (HeapTupleIsValid(tuple)) + result = ((Form_pg_extension) GETSTRUCT(tuple))->extnamespace; + else + result = InvalidOid; + + systable_endscan(scandesc); + + heap_close_compat(rel, AccessShareLock); - rel->rows = parent_rows; - rel->width = rint(parent_size / parent_rows); - rel->tuples = parent_rows; + return result; } + + +/* + * ---------------------------------------- + * RTE expansion (add RTE for partitions) + * ---------------------------------------- + */ + /* * Creates child relation and adds it to root. - * Returns child index in simple_rel_array + * Returns child index in simple_rel_array. + * + * NOTE: partially based on the expand_inherited_rtentry() function. */ -int -append_child_relation(PlannerInfo *root, RelOptInfo *rel, Index rti, - RangeTblEntry *rte, int index, Oid childOid, List *wrappers) +Index +append_child_relation(PlannerInfo *root, + Relation parent_relation, + PlanRowMark *parent_rowmark, + Index parent_rti, + int ir_index, + Oid child_oid, + List *wrappers) { - RangeTblEntry *childrte; - RelOptInfo *childrel; - Index childRTindex; + RangeTblEntry *parent_rte, + *child_rte; + RelOptInfo *parent_rel, + *child_rel; + Relation child_relation; AppendRelInfo *appinfo; - Node *node; - ListCell *lc, + Index child_rti; + PlanRowMark *child_rowmark = NULL; + Node *childqual; + List *childquals; + ListCell *lc1, *lc2; - Relation newrelation; - PlanRowMark *parent_rowmark; - PlanRowMark *child_rowmark; - AttrNumber i; + LOCKMODE lockmode; +#if PG_VERSION_NUM >= 130000 /* see commit 55a1954d */ + TupleDesc child_tupdesc; + List *parent_colnames; + List *child_colnames; +#endif - newrelation = heap_open(childOid, NoLock); + /* Choose a correct lock mode */ + if (parent_rti == root->parse->resultRelation) + lockmode = RowExclusiveLock; + else if (parent_rowmark && RowMarkRequiresRowShareLock(parent_rowmark->markType)) + lockmode = RowShareLock; + else + lockmode = AccessShareLock; - /* - * Create RangeTblEntry for child relation. - * This code partially based on expand_inherited_rtentry() function. - */ - childrte = copyObject(rte); - childrte->relid = childOid; - childrte->relkind = newrelation->rd_rel->relkind; - childrte->inh = false; - childrte->requiredPerms = 0; - root->parse->rtable = lappend(root->parse->rtable, childrte); - childRTindex = list_length(root->parse->rtable); - root->simple_rte_array[childRTindex] = childrte; - - /* Create RelOptInfo */ - childrel = build_simple_rel(root, childRTindex, RELOPT_OTHER_MEMBER_REL); - - /* Copy targetlist */ - childrel->reltargetlist = NIL; - foreach(lc, rel->reltargetlist) - { - Node *new_target; + /* Acquire a suitable lock on partition */ + LockRelationOid(child_oid, lockmode); - node = (Node *) lfirst(lc); - new_target = copyObject(node); - change_varnos(new_target, rel->relid, childrel->relid); - childrel->reltargetlist = lappend(childrel->reltargetlist, new_target); + /* Check that partition exists */ + if (!SearchSysCacheExists1(RELOID, ObjectIdGetDatum(child_oid))) + { + UnlockRelationOid(child_oid, lockmode); + return 0; } - /* Copy attr_needed & attr_widths */ - childrel->attr_needed = (Relids *) - palloc0((rel->max_attr - rel->min_attr + 1) * sizeof(Relids)); - childrel->attr_widths = (int32 *) - palloc0((rel->max_attr - rel->min_attr + 1) * sizeof(int32)); - - for (i = 0; i < rel->max_attr - rel->min_attr + 1; i++) - childrel->attr_needed[i] = bms_copy(rel->attr_needed[i]); + parent_rel = root->simple_rel_array[parent_rti]; - memcpy(childrel->attr_widths, rel->attr_widths, - (rel->max_attr - rel->min_attr + 1) * sizeof(int32)); - - /* - * Copy restrictions. If it's not the parent table then copy only those - * restrictions that reference to this partition - */ - childrel->baserestrictinfo = NIL; - if (rte->relid != childOid) - { - forboth(lc, wrappers, lc2, rel->baserestrictinfo) - { - bool alwaysTrue; - WrapperNode *wrap = (WrapperNode *) lfirst(lc); - Node *new_clause = wrapper_make_expression(wrap, index, &alwaysTrue); - RestrictInfo *old_rinfo = (RestrictInfo *) lfirst(lc2); + /* make clang analyzer quiet */ + if (!parent_rel) + elog(ERROR, "parent relation is NULL"); - if (alwaysTrue) - { - continue; - } - Assert(new_clause); + parent_rte = root->simple_rte_array[parent_rti]; - if (and_clause((Node *) new_clause)) - { - ListCell *alc; + /* Open child relation (we've just locked it) */ + child_relation = heap_open_compat(child_oid, NoLock); - foreach(alc, ((BoolExpr *) new_clause)->args) - { - Node *arg = (Node *) lfirst(alc); - RestrictInfo *new_rinfo = rebuild_restrictinfo(arg, old_rinfo); + /* Create RangeTblEntry for child relation */ +#if PG_VERSION_NUM >= 130000 /* see commit 55a1954d */ + child_rte = makeNode(RangeTblEntry); + memcpy(child_rte, parent_rte, sizeof(RangeTblEntry)); +#else + child_rte = copyObject(parent_rte); +#endif + child_rte->relid = child_oid; + child_rte->relkind = child_relation->rd_rel->relkind; +#if PG_VERSION_NUM >= 160000 /* for commit a61b1f74823c */ + /* No permission checking for the child RTE */ + child_rte->perminfoindex = 0; +#else + child_rte->requiredPerms = 0; /* perform all checks on parent */ +#endif + child_rte->inh = false; - change_varnos((Node *)new_rinfo, rel->relid, childrel->relid); - childrel->baserestrictinfo = lappend(childrel->baserestrictinfo, - new_rinfo); - } - } - else - { - RestrictInfo *new_rinfo = rebuild_restrictinfo(new_clause, old_rinfo); + /* Add 'child_rte' to rtable and 'root->simple_rte_array' */ + root->parse->rtable = lappend(root->parse->rtable, child_rte); + child_rti = list_length(root->parse->rtable); + root->simple_rte_array[child_rti] = child_rte; - /* Replace old relids with new ones */ - change_varnos((Node *)new_rinfo, rel->relid, childrel->relid); + /* Build an AppendRelInfo for this child */ + appinfo = makeNode(AppendRelInfo); + appinfo->parent_relid = parent_rti; + appinfo->child_relid = child_rti; + appinfo->parent_reloid = parent_rte->relid; - childrel->baserestrictinfo = lappend(childrel->baserestrictinfo, - (void *) new_rinfo); - } - } - } - /* If it's the parent table then copy all restrictions */ - else - { - foreach(lc, rel->baserestrictinfo) - { - RestrictInfo *rinfo = (RestrictInfo *) lfirst(lc); - RestrictInfo *new_rinfo = (RestrictInfo *) copyObject(rinfo); + /* Store table row types for wholerow references */ + appinfo->parent_reltype = RelationGetDescr(parent_relation)->tdtypeid; + appinfo->child_reltype = RelationGetDescr(child_relation)->tdtypeid; - change_varnos((Node *)new_rinfo, rel->relid, childrel->relid); - childrel->baserestrictinfo = lappend(childrel->baserestrictinfo, - (void *) new_rinfo); - } - } + make_inh_translation_list(parent_relation, child_relation, child_rti, + &appinfo->translated_vars, appinfo); - /* Build an AppendRelInfo for this parent and child */ - appinfo = makeNode(AppendRelInfo); - appinfo->parent_relid = rti; - appinfo->child_relid = childRTindex; - appinfo->parent_reloid = rte->relid; - root->append_rel_list = lappend(root->append_rel_list, appinfo); - root->total_table_pages += (double) childrel->pages; +#if PG_VERSION_NUM >= 130000 /* see commit 55a1954d */ + /* tablesample is probably null, but copy it */ + child_rte->tablesample = copyObject(parent_rte->tablesample); - /* Add equivalence members */ - foreach(lc, root->eq_classes) + /* + * Construct an alias clause for the child, which we can also use as eref. + * This is important so that EXPLAIN will print the right column aliases + * for child-table columns. (Since ruleutils.c doesn't have any easy way + * to reassociate parent and child columns, we must get the child column + * aliases right to start with. Note that setting childrte->alias forces + * ruleutils.c to use these column names, which it otherwise would not.) + */ + child_tupdesc = RelationGetDescr(child_relation); + parent_colnames = parent_rte->eref->colnames; + child_colnames = NIL; + for (int cattno = 0; cattno < child_tupdesc->natts; cattno++) { - EquivalenceClass *cur_ec = (EquivalenceClass *) lfirst(lc); + Form_pg_attribute att = TupleDescAttr(child_tupdesc, cattno); + const char *attname; - /* Copy equivalence member from parent and make some modifications */ - foreach(lc2, cur_ec->ec_members) + if (att->attisdropped) { - EquivalenceMember *cur_em = (EquivalenceMember *) lfirst(lc2); - EquivalenceMember *em; - - if (!bms_is_member(rti, cur_em->em_relids)) - continue; - - em = makeNode(EquivalenceMember); - em->em_expr = copyObject(cur_em->em_expr); - change_varnos((Node *) em->em_expr, rti, childRTindex); - em->em_relids = bms_add_member(NULL, childRTindex); - em->em_nullable_relids = cur_em->em_nullable_relids; - em->em_is_const = false; - em->em_is_child = true; - em->em_datatype = cur_em->em_datatype; - cur_ec->ec_members = lappend(cur_ec->ec_members, em); + /* Always insert an empty string for a dropped column */ + attname = ""; + } + else if (appinfo->parent_colnos[cattno] > 0 && + appinfo->parent_colnos[cattno] <= list_length(parent_colnames)) + { + /* Duplicate the query-assigned name for the parent column */ + attname = strVal(list_nth(parent_colnames, + appinfo->parent_colnos[cattno] - 1)); + } + else + { + /* New column, just use its real name */ + attname = NameStr(att->attname); } + child_colnames = lappend(child_colnames, makeString(pstrdup(attname))); } - childrel->has_eclass_joins = rel->has_eclass_joins; - /* Recalc parent relation tuples count */ - rel->tuples += childrel->tuples; + /* + * We just duplicate the parent's table alias name for each child. If the + * plan gets printed, ruleutils.c has to sort out unique table aliases to + * use, which it can handle. + */ + child_rte->alias = child_rte->eref = makeAlias(parent_rte->eref->aliasname, + child_colnames); +#endif + + /* Now append 'appinfo' to 'root->append_rel_list' */ + root->append_rel_list = lappend(root->append_rel_list, appinfo); + /* And to array in >= 11, it must be big enough */ +#if PG_VERSION_NUM >= 110000 + root->append_rel_array[child_rti] = appinfo; +#endif + + /* Create RelOptInfo for this child (and make some estimates as well) */ + child_rel = build_simple_rel_compat(root, child_rti, parent_rel); - heap_close(newrelation, NoLock); + /* Increase total_table_pages using the 'child_rel' */ + root->total_table_pages += (double) child_rel->pages; /* Create rowmarks required for child rels */ - parent_rowmark = get_plan_rowmark(root->rowMarks, rti); + /* + * XXX: vanilla recurses down with *top* rowmark, not immediate parent one. + * Not sure about example where this matters though. + */ if (parent_rowmark) { child_rowmark = makeNode(PlanRowMark); - child_rowmark->rti = childRTindex; - child_rowmark->prti = rti; - child_rowmark->rowmarkId = parent_rowmark->rowmarkId; + child_rowmark->rti = child_rti; + child_rowmark->prti = parent_rti; + child_rowmark->rowmarkId = parent_rowmark->rowmarkId; /* Reselect rowmark type, because relkind might not match parent */ - child_rowmark->markType = select_rowmark_type(childrte, - parent_rowmark->strength); - child_rowmark->allMarkTypes = (1 << child_rowmark->markType); - child_rowmark->strength = parent_rowmark->strength; - child_rowmark->waitPolicy = parent_rowmark->waitPolicy; - child_rowmark->isParent = false; + child_rowmark->markType = select_rowmark_type(child_rte, + parent_rowmark->strength); + child_rowmark->allMarkTypes = (1 << child_rowmark->markType); + child_rowmark->strength = parent_rowmark->strength; + child_rowmark->waitPolicy = parent_rowmark->waitPolicy; + child_rowmark->isParent = false; + + root->rowMarks = lappend(root->rowMarks, child_rowmark); + + /* Adjust tlist for RowMarks (see planner.c) */ + /* + * XXX Saner approach seems to + * 1) Add tle to top parent and processed_tlist once in rel_pathlist_hook. + * 2) Mark isParent = true + * *parent* knows it is parent, after all; why should child bother? + * 3) Recursion (code executed in childs) starts at 2) + */ + if (!parent_rowmark->isParent && !root->parse->setOperations) + { + append_tle_for_rowmark(root, parent_rowmark); + } /* Include child's rowmark type in parent's allMarkTypes */ parent_rowmark->allMarkTypes |= child_rowmark->allMarkTypes; + parent_rowmark->isParent = true; + } - root->rowMarks = lappend(root->rowMarks, child_rowmark); - parent_rowmark->isParent = true; +#if PG_VERSION_NUM < 160000 /* for commit a61b1f74823c */ + /* Translate column privileges for this child */ + if (parent_rte->relid != child_oid) + { + child_rte->selectedCols = translate_col_privs(parent_rte->selectedCols, + appinfo->translated_vars); + child_rte->insertedCols = translate_col_privs(parent_rte->insertedCols, + appinfo->translated_vars); + child_rte->updatedCols = translate_col_privs(parent_rte->updatedCols, + appinfo->translated_vars); + } +#if PG_VERSION_NUM >= 130000 /* see commit 55a1954d */ + else + { + child_rte->selectedCols = bms_copy(parent_rte->selectedCols); + child_rte->insertedCols = bms_copy(parent_rte->insertedCols); + child_rte->updatedCols = bms_copy(parent_rte->updatedCols); } +#endif +#endif /* PG_VERSION_NUM < 160000 */ - return childRTindex; -} + /* Here and below we assume that parent RelOptInfo exists */ + Assert(parent_rel); -/* Create new restriction based on clause */ -static RestrictInfo * -rebuild_restrictinfo(Node *clause, RestrictInfo *old_rinfo) -{ - return make_restrictinfo((Expr *) clause, - old_rinfo->is_pushed_down, - old_rinfo->outerjoin_delayed, - old_rinfo->pseudoconstant, - old_rinfo->required_relids, - old_rinfo->outer_relids, - old_rinfo->nullable_relids); -} + /* Adjust join quals for this child */ + child_rel->joininfo = (List *) adjust_appendrel_attrs_compat(root, + (Node *) parent_rel->joininfo, + appinfo); -/* Convert wrapper into expression for given index */ -static Node * -wrapper_make_expression(WrapperNode *wrap, int index, bool *alwaysTrue) -{ - bool lossy, found; + /* Adjust target list for this child */ + adjust_rel_targetlist_compat(root, child_rel, parent_rel, appinfo); - *alwaysTrue = false; /* - * TODO: use faster algorithm using knowledge that we enumerate indexes - * sequntially. + * Copy restrictions. If it's not the parent table, copy only + * those restrictions that are related to this partition. */ - found = irange_list_find(wrap->rangeset, index, &lossy); - - /* Return NULL for always true and always false. */ - if (!found) - return NULL; - if (!lossy) + if (parent_rte->relid != child_oid) { - *alwaysTrue = true; - return NULL; - } + childquals = NIL; - if (IsA(wrap->orig, BoolExpr)) - { - const BoolExpr *expr = (const BoolExpr *) wrap->orig; - BoolExpr *result; - - if (expr->boolop == OR_EXPR || expr->boolop == AND_EXPR) + forboth (lc1, wrappers, lc2, parent_rel->baserestrictinfo) { - ListCell *lc; - List *args = NIL; - - foreach (lc, wrap->args) - { - Node *arg; - bool childAlwaysTrue; - - arg = wrapper_make_expression((WrapperNode *) lfirst(lc), - index, &childAlwaysTrue); -#ifdef USE_ASSERT_CHECKING - /* - * We shouldn't get there for always true clause under OR and - * always false clause under AND. - */ - if (expr->boolop == OR_EXPR) - Assert(!childAlwaysTrue); - if (expr->boolop == AND_EXPR) - Assert(arg || childAlwaysTrue); -#endif - if (arg) - args = lappend(args, arg); - } + WrapperNode *wrap = (WrapperNode *) lfirst(lc1); + Node *new_clause; + bool always_true; - Assert(list_length(args) >= 1); + /* Generate a set of clauses for this child using WrapperNode */ + new_clause = wrapper_make_expression(wrap, ir_index, &always_true); - /* Remove redundant OR/AND when child is single. */ - if (list_length(args) == 1) - return (Node *) linitial(args); + /* Don't add this clause if it's always true */ + if (always_true) + continue; - result = makeNode(BoolExpr); - result->xpr.type = T_BoolExpr; - result->args = args; - result->boolop = expr->boolop; - result->location = expr->location; - return (Node *) result; + /* Clause should not be NULL */ + Assert(new_clause); + childquals = lappend(childquals, new_clause); } - else - return copyObject(wrap->orig); - } - else - return copyObject(wrap->orig); -} - -/* - * Recursive function to walk through conditions tree - */ -WrapperNode * -walk_expr_tree(Expr *expr, WalkerContext *context) -{ - BoolExpr *boolexpr; - OpExpr *opexpr; - ScalarArrayOpExpr *arrexpr; - WrapperNode *result; - - switch (expr->type) - { - /* Useful for INSERT optimization */ - case T_Const: - return handle_const((Const *) expr, context); - - /* AND, OR, NOT expressions */ - case T_BoolExpr: - boolexpr = (BoolExpr *) expr; - return handle_boolexpr(boolexpr, context); - - /* =, !=, <, > etc. */ - case T_OpExpr: - opexpr = (OpExpr *) expr; - return handle_opexpr(opexpr, context); - - /* IN expression */ - case T_ScalarArrayOpExpr: - arrexpr = (ScalarArrayOpExpr *) expr; - return handle_arrexpr(arrexpr, context); - - default: - result = (WrapperNode *) palloc(sizeof(WrapperNode)); - result->orig = (const Node *) expr; - result->args = NIL; - result->rangeset = list_make1_irange( - make_irange(0, PrelLastChild(context->prel), true)); - result->paramsel = 1.0; - return result; } -} - -/* - * Append\prepend partitions if there's no partition to store 'value'. - * - * Used by create_partitions_internal(). - * - * NB: 'value' type is not needed since we've already taken - * it into account while searching for the 'cmp_proc'. - */ -static bool -spawn_partitions(Oid partitioned_rel, /* parent's Oid */ - Datum value, /* value to be INSERTed */ - Datum leading_bound, /* current global min\max */ - Oid leading_bound_type, /* type of the boundary */ - FmgrInfo *cmp_proc, /* cmp(value, leading_bound) */ - Datum interval_binary, /* interval in binary form */ - Oid interval_type, /* INTERVALOID or prel->atttype */ - bool forward, /* append\prepend */ - Oid *last_partition) /* result (Oid of the last partition) */ -{ -/* Cache "+"(leading_bound, interval) or "-"(leading_bound, interval) operator */ -#define CacheOperator(finfo, opname, arg1, arg2, is_cached) \ - do { \ - if (!is_cached) \ - { \ - fmgr_info(get_binary_operator_oid((opname), (arg1), (arg2)), \ - (finfo)); \ - is_cached = true; \ - } \ - } while (0) - -/* Use "<" for prepend & ">=" for append */ -#define do_compare(compar, a, b, fwd) \ - ( \ - (fwd) ? \ - check_ge((compar), (a), (b)) : \ - check_lt((compar), (a), (b)) \ - ) - - FmgrInfo interval_move_bound; /* function to move upper\lower boundary */ - bool interval_move_bound_cached = false; /* is it cached already? */ - bool spawned = false; - - Datum cur_part_leading = leading_bound; - - char *query; - - /* Create querty statement */ - query = psprintf("SELECT part::regclass " - "FROM %s.create_single_range_partition($1, $2, $3) AS part", - get_namespace_name(get_pathman_schema())); - - /* Execute comparison function cmp(value, cur_part_leading) */ - while (do_compare(cmp_proc, value, cur_part_leading, forward)) + /* If it's the parent table, copy all restrictions */ + else childquals = get_all_actual_clauses(parent_rel->baserestrictinfo); + + /* Now it's time to change varnos and rebuld quals */ + childquals = (List *) adjust_appendrel_attrs_compat(root, + (Node *) childquals, + appinfo); + childqual = eval_const_expressions(root, (Node *) + make_ands_explicit(childquals)); + if (childqual && IsA(childqual, Const) && + (((Const *) childqual)->constisnull || + !DatumGetBool(((Const *) childqual)->constvalue))) { - char *nulls = NULL; /* no params are NULL */ - Oid types[3] = { REGCLASSOID, leading_bound_type, leading_bound_type }; - Datum values[3]; - int ret; - - /* Assign the 'following' boundary to current 'leading' value */ - Datum cur_part_following = cur_part_leading; - - CacheOperator(&interval_move_bound, (forward ? "+" : "-"), - leading_bound_type, interval_type, interval_move_bound_cached); - - /* Move leading bound by interval (leading +\- INTERVAL) */ - cur_part_leading = FunctionCall2(&interval_move_bound, - cur_part_leading, - interval_binary); - - /* Fill in 'values' with parent's Oid and correct boundaries... */ - values[0] = partitioned_rel; /* partitioned table's Oid */ - values[1] = forward ? cur_part_following : cur_part_leading; /* value #1 */ - values[2] = forward ? cur_part_leading : cur_part_following; /* value #2 */ - - /* ...and create partition */ - ret = SPI_execute_with_args(query, 3, types, values, nulls, false, 0); - if (ret != SPI_OK_SELECT) - elog(ERROR, "Could not spawn a partition"); - - /* Set 'last_partition' if necessary */ - if (last_partition) - { - HeapTuple htup = SPI_tuptable->vals[0]; - Datum partid; - bool isnull; - - Assert(SPI_processed == 1); - Assert(SPI_tuptable->tupdesc->natts == 1); - partid = SPI_getbinval(htup, SPI_tuptable->tupdesc, 1, &isnull); - - *last_partition = DatumGetObjectId(partid); - } - -#ifdef USE_ASSERT_CHECKING - elog(DEBUG2, "%s partition with following='%s' & leading='%s' [%u]", - (forward ? "Appending" : "Prepending"), - DebugPrintDatum(cur_part_following, leading_bound_type), - DebugPrintDatum(cur_part_leading, leading_bound_type), - MyProcPid); + /* + * Restriction reduces to constant FALSE or constant NULL after + * substitution, so this child need not be scanned. + */ +#if PG_VERSION_NUM >= 120000 + mark_dummy_rel(child_rel); +#else + set_dummy_rel_pathlist(child_rel); #endif - - /* We have spawned at least 1 partition */ - spawned = true; } + childquals = make_ands_implicit((Expr *) childqual); + childquals = make_restrictinfos_from_actual_clauses(root, childquals); - pfree(query); - - return spawned; -} - -/* - * Convert interval from TEXT to binary form using partitioned column's type. - */ -static Datum -extract_binary_interval_from_text(Datum interval_text, /* interval as TEXT */ - Oid part_atttype, /* partitioned column's type */ - Oid *interval_type) /* returned value */ -{ - Datum interval_binary; - const char *interval_cstring; + /* Set new shiny childquals */ + child_rel->baserestrictinfo = childquals; - interval_cstring = TextDatumGetCString(interval_text); - - /* If 'part_atttype' is a *date type*, cast 'range_interval' to INTERVAL */ - if (is_date_type_internal(part_atttype)) - { - int32 interval_typmod = PATHMAN_CONFIG_interval_typmod; - - /* Convert interval from CSTRING to internal form */ - interval_binary = DirectFunctionCall3(interval_in, - CStringGetDatum(interval_cstring), - ObjectIdGetDatum(InvalidOid), - Int32GetDatum(interval_typmod)); - if (interval_type) - *interval_type = INTERVALOID; - } - /* Otherwise cast it to the partitioned column's type */ - else + if (relation_excluded_by_constraints(root, child_rel, child_rte)) { - HeapTuple htup; - Oid typein_proc = InvalidOid; - - htup = SearchSysCache1(TYPEOID, ObjectIdGetDatum(part_atttype)); - if (HeapTupleIsValid(htup)) - { - typein_proc = ((Form_pg_type) GETSTRUCT(htup))->typinput; - ReleaseSysCache(htup); - } - else - elog(ERROR, "Cannot find input function for type %u", part_atttype); - - /* Convert interval from CSTRING to 'prel->atttype' */ - interval_binary = OidFunctionCall1(typein_proc, - CStringGetDatum(interval_cstring)); - if (interval_type) - *interval_type = part_atttype; + /* + * This child need not be scanned, so we can omit it from the + * appendrel. + */ +#if PG_VERSION_NUM >= 120000 + mark_dummy_rel(child_rel); +#else + set_dummy_rel_pathlist(child_rel); +#endif } - return interval_binary; -} - -/* - * Append partitions (if needed) and return Oid of the partition to contain value. - * - * NB: This function should not be called directly, use create_partitions() instead. - */ -Oid -create_partitions_internal(Oid relid, Datum value, Oid value_type) -{ - MemoryContext old_mcxt = CurrentMemoryContext; - Oid partid = InvalidOid; /* last created partition (or InvalidOid) */ + /* + * We have to make child entries in the EquivalenceClass data + * structures as well. + */ + if (parent_rel->has_eclass_joins || has_useful_pathkeys(root, parent_rel)) + add_child_rel_equivalences(root, appinfo, parent_rel, child_rel); + child_rel->has_eclass_joins = parent_rel->has_eclass_joins; - PG_TRY(); + /* Expand child partition if it might have subpartitions */ + if (parent_rte->relid != child_oid && + child_relation->rd_rel->relhassubclass) { - const PartRelationInfo *prel; - Datum values[Natts_pathman_config]; - bool isnull[Natts_pathman_config]; - - /* Get both PartRelationInfo & PATHMAN_CONFIG contents for this relation */ - if (pathman_config_contains_relation(relid, values, isnull, NULL)) - { - Datum min_rvalue, - max_rvalue; - - Oid interval_type = InvalidOid; - Datum interval_binary, /* assigned 'width' of a single partition */ - interval_text; - - FmgrInfo interval_type_cmp; - - /* Fetch PartRelationInfo by 'relid' */ - prel = get_pathman_relation_info(relid); - shout_if_prel_is_invalid(relid, prel, PT_RANGE); - - /* Read max & min range values from PartRelationInfo */ - min_rvalue = prel->ranges[0].min; - max_rvalue = prel->ranges[PrelLastChild(prel)].max; - - /* Retrieve interval as TEXT from tuple */ - interval_text = values[Anum_pathman_config_range_interval - 1]; - - /* Convert interval to binary representation */ - interval_binary = extract_binary_interval_from_text(interval_text, - prel->atttype, - &interval_type); - - /* Fill the FmgrInfo struct with a cmp(value, part_attribute) function */ - fill_type_cmp_fmgr_info(&interval_type_cmp, value_type, prel->atttype); - - if (SPI_connect() != SPI_OK_CONNECT) - elog(ERROR, "Could not connect using SPI"); - - /* while (value >= MAX) ... */ - spawn_partitions(PrelParentRelid(prel), value, max_rvalue, - prel->atttype, &interval_type_cmp, interval_binary, - interval_type, true, &partid); - - /* while (value < MIN) ... */ - spawn_partitions(PrelParentRelid(prel), value, min_rvalue, - prel->atttype, &interval_type_cmp, interval_binary, - interval_type, false, &partid); - - SPI_finish(); /* close SPI connection */ - } - else - elog(ERROR, "pg_pathman's config does not contain relation \"%s\"", - get_rel_name_or_relid(relid)); + /* See XXX above */ + if (child_rowmark) + child_rowmark->isParent = true; + + pathman_rel_pathlist_hook(root, + child_rel, + child_rti, + child_rte); } - PG_CATCH(); - { - ErrorData *edata; - - /* Switch to the original context & copy edata */ - MemoryContextSwitchTo(old_mcxt); - edata = CopyErrorData(); - FlushErrorState(); - - elog(LOG, "create_partitions_internal(): %s [%u]", - edata->message, MyProcPid); - FreeErrorData(edata); + /* Close child relations, but keep locks */ + heap_close_compat(child_relation, NoLock); - SPI_finish(); /* no problem if not connected */ - } - PG_END_TRY(); - - return partid; + return child_rti; } -/* - * Create RANGE partitions (if needed) using either BGW or current backend. - * - * Returns Oid of the partition to store 'value'. - */ -Oid -create_partitions(Oid relid, Datum value, Oid value_type) -{ - TransactionId rel_xmin; - Oid last_partition = InvalidOid; - - /* Check that table is partitioned and fetch xmin */ - if (pathman_config_contains_relation(relid, NULL, NULL, &rel_xmin)) - { - bool part_in_prev_xact = - TransactionIdPrecedes(rel_xmin, GetCurrentTransactionId()) || - TransactionIdEquals(rel_xmin, FrozenTransactionId); - - /* - * If table has been partitioned in some previous xact AND - * we don't hold any conflicting locks, run BGWorker. - */ - if (part_in_prev_xact && !xact_bgw_conflicting_lock_exists(relid)) - { - elog(DEBUG2, "create_partitions(): chose BGWorker [%u]", MyProcPid); - last_partition = create_partitions_bg_worker(relid, value, value_type); - } - /* Else it'd be better for the current backend to create partitions */ - else - { - elog(DEBUG2, "create_partitions(): chose backend [%u]", MyProcPid); - last_partition = create_partitions_internal(relid, value, value_type); - } - } - else - elog(ERROR, "Relation \"%s\" is not partitioned by pg_pathman", - get_rel_name_or_relid(relid)); - - /* Check that 'last_partition' is valid */ - if (last_partition == InvalidOid) - elog(ERROR, "Could not create new partitions for relation \"%s\"", - get_rel_name_or_relid(relid)); - return last_partition; -} /* - * Given RangeEntry array and 'value', return selected - * RANGE partitions inside the WrapperNode. + * ------------------------- + * RANGE partition pruning + * ------------------------- */ + +/* Given 'value' and 'ranges', return selected partitions list */ void select_range_partitions(const Datum value, + const Oid collid, FmgrInfo *cmp_func, const RangeEntry *ranges, const int nranges, const int strategy, - WrapperNode *result) + WrapperNode *result) /* returned partitions */ { - const RangeEntry *current_re; - bool lossy = false, - is_less, - is_greater; + bool lossy = false, + miss_left, /* 'value' is less than left bound */ + miss_right; /* 'value' is greater that right bound */ + + int startidx = 0, + endidx = nranges - 1, + cmp_min, + cmp_max, + i = 0; + + Bound value_bound = MakeBound(value); /* convert value to Bound */ #ifdef USE_ASSERT_CHECKING - bool found = false; - int counter = 0; + int counter = 0; #endif - int i, - startidx = 0, - endidx = nranges - 1, - cmp_min, - cmp_max; - /* Initial value (no missing partitions found) */ result->found_gap = false; - /* Check boundaries */ + /* Check 'ranges' array */ if (nranges == 0) { result->rangeset = NIL; return; } + + /* Check corner cases */ else { Assert(ranges); Assert(cmp_func); - /* Corner cases */ - cmp_min = FunctionCall2(cmp_func, value, ranges[startidx].min), - cmp_max = FunctionCall2(cmp_func, value, ranges[endidx].max); + /* Compare 'value' to absolute MIN and MAX bounds */ + cmp_min = cmp_bounds(cmp_func, collid, &value_bound, &ranges[startidx].min); + cmp_max = cmp_bounds(cmp_func, collid, &value_bound, &ranges[endidx].max); - if ((cmp_min <= 0 && strategy == BTLessStrategyNumber) || - (cmp_min < 0 && (strategy == BTLessEqualStrategyNumber || - strategy == BTEqualStrategyNumber))) + if ((cmp_min <= 0 && strategy == BTLessStrategyNumber) || + (cmp_min < 0 && (strategy == BTLessEqualStrategyNumber || + strategy == BTEqualStrategyNumber))) { result->rangeset = NIL; return; @@ -1062,17 +881,21 @@ select_range_partitions(const Datum value, return; } - if ((cmp_min < 0 && strategy == BTGreaterStrategyNumber) || + if ((cmp_min < 0 && strategy == BTGreaterStrategyNumber) || (cmp_min <= 0 && strategy == BTGreaterEqualStrategyNumber)) { - result->rangeset = list_make1_irange(make_irange(startidx, endidx, false)); + result->rangeset = list_make1_irange(make_irange(startidx, + endidx, + IR_COMPLETE)); return; } if (cmp_max >= 0 && (strategy == BTLessEqualStrategyNumber || strategy == BTLessStrategyNumber)) { - result->rangeset = list_make1_irange(make_irange(startidx, endidx, false)); + result->rangeset = list_make1_irange(make_irange(startidx, + endidx, + IR_COMPLETE)); return; } } @@ -1080,52 +903,71 @@ select_range_partitions(const Datum value, /* Binary search */ while (true) { + Assert(ranges); Assert(cmp_func); + /* Calculate new pivot */ i = startidx + (endidx - startidx) / 2; Assert(i >= 0 && i < nranges); - current_re = &ranges[i]; - - cmp_min = FunctionCall2(cmp_func, value, current_re->min); - cmp_max = FunctionCall2(cmp_func, value, current_re->max); + /* Compare 'value' to current MIN and MAX bounds */ + cmp_min = cmp_bounds(cmp_func, collid, &value_bound, &ranges[i].min); + cmp_max = cmp_bounds(cmp_func, collid, &value_bound, &ranges[i].max); - is_less = (cmp_min < 0 || (cmp_min == 0 && strategy == BTLessStrategyNumber)); - is_greater = (cmp_max > 0 || (cmp_max >= 0 && strategy != BTLessStrategyNumber)); + /* How is 'value' located with respect to left & right bounds? */ + miss_left = (cmp_min < 0 || (cmp_min == 0 && strategy == BTLessStrategyNumber)); + miss_right = (cmp_max > 0 || (cmp_max == 0 && strategy != BTLessStrategyNumber)); - if (!is_less && !is_greater) + /* Searched value is inside of partition */ + if (!miss_left && !miss_right) { - if (strategy == BTGreaterEqualStrategyNumber && cmp_min == 0) + /* 'value' == 'min' and we want everything on the right */ + if (cmp_min == 0 && strategy == BTGreaterEqualStrategyNumber) lossy = false; - else if (strategy == BTLessStrategyNumber && cmp_max == 0) + /* 'value' == 'max' and we want everything on the left */ + else if (cmp_max == 0 && strategy == BTLessStrategyNumber) lossy = false; - else - lossy = true; -#ifdef USE_ASSERT_CHECKING - found = true; -#endif - break; + /* We're somewhere in the middle */ + else lossy = true; + + break; /* just exit loop */ } - /* If we still haven't found partition then it doesn't exist */ + /* Indices have met, looks like there's no partition */ if (startidx >= endidx) { - result->rangeset = NIL; + result->rangeset = NIL; result->found_gap = true; - return; + + /* Return if it's "key = value" */ + if (strategy == BTEqualStrategyNumber) + return; + + /* + * Use current partition 'i' as a pivot that will be + * excluded by relation_excluded_by_constraints() if + * (lossy == true) & its WHERE clauses are trivial. + */ + if ((miss_left && (strategy == BTLessStrategyNumber || + strategy == BTLessEqualStrategyNumber)) || + (miss_right && (strategy == BTGreaterStrategyNumber || + strategy == BTGreaterEqualStrategyNumber))) + lossy = true; + else + lossy = false; + + break; /* just exit loop */ } - if (is_less) + if (miss_left) endidx = i - 1; - else if (is_greater) + else if (miss_right) startidx = i + 1; /* For debug's sake */ Assert(++counter < 100); } - Assert(found); - /* Filter partitions */ switch(strategy) { @@ -1133,39 +975,37 @@ select_range_partitions(const Datum value, case BTLessEqualStrategyNumber: if (lossy) { - result->rangeset = list_make1_irange(make_irange(i, i, true)); + result->rangeset = list_make1_irange(make_irange(i, i, IR_LOSSY)); if (i > 0) - result->rangeset = lcons_irange(make_irange(0, i - 1, false), + result->rangeset = lcons_irange(make_irange(0, i - 1, IR_COMPLETE), result->rangeset); } else { - result->rangeset = list_make1_irange(make_irange(0, i, false)); + result->rangeset = list_make1_irange(make_irange(0, i, IR_COMPLETE)); } break; case BTEqualStrategyNumber: - result->rangeset = list_make1_irange(make_irange(i, i, true)); + result->rangeset = list_make1_irange(make_irange(i, i, IR_LOSSY)); break; case BTGreaterEqualStrategyNumber: case BTGreaterStrategyNumber: if (lossy) { - result->rangeset = list_make1_irange(make_irange(i, i, true)); + result->rangeset = list_make1_irange(make_irange(i, i, IR_LOSSY)); if (i < nranges - 1) - result->rangeset = - lappend_irange(result->rangeset, - make_irange(i + 1, - nranges - 1, - false)); + result->rangeset = lappend_irange(result->rangeset, + make_irange(i + 1, + nranges - 1, + IR_COMPLETE)); } else { - result->rangeset = - list_make1_irange(make_irange(i, - nranges - 1, - false)); + result->rangeset = list_make1_irange(make_irange(i, + nranges - 1, + IR_COMPLETE)); } break; @@ -1175,459 +1015,712 @@ select_range_partitions(const Datum value, } } -/* - * This function determines which partitions should appear in query plan. - */ -static void -handle_binary_opexpr(WalkerContext *context, WrapperNode *result, - const Node *varnode, const Const *c) -{ - int strategy; - TypeCacheEntry *tce; - FmgrInfo cmp_func; - Oid vartype; - const OpExpr *expr = (const OpExpr *) result->orig; - const PartRelationInfo *prel = context->prel; - Assert(IsA(varnode, Var) || IsA(varnode, RelabelType)); - vartype = !IsA(varnode, RelabelType) ? - ((Var *) varnode)->vartype : - ((RelabelType *) varnode)->resulttype; +/* + * --------------------------------- + * walk_expr_tree() implementation + * --------------------------------- + */ - tce = lookup_type_cache(vartype, TYPECACHE_BTREE_OPFAMILY); - strategy = get_op_opfamily_strategy(expr->opno, tce->btree_opf); - fill_type_cmp_fmgr_info(&cmp_func, c->consttype, prel->atttype); +/* Examine expression in order to select partitions */ +WrapperNode * +walk_expr_tree(Expr *expr, const WalkerContext *context) +{ + WrapperNode *result = (WrapperNode *) palloc0(sizeof(WrapperNode)); - switch (prel->parttype) + switch (nodeTag(expr)) { - case PT_HASH: - if (strategy == BTEqualStrategyNumber) - { - Datum value = OidFunctionCall1(prel->hash_proc, c->constvalue); - uint32 idx = hash_to_part_index(DatumGetInt32(value), - PrelChildrenCount(prel)); + /* Useful for INSERT optimization */ + case T_Const: + handle_const((Const *) expr, ((Const *) expr)->constcollid, + BTEqualStrategyNumber, context, result); + return result; - result->rangeset = list_make1_irange(make_irange(idx, idx, true)); + /* AND, OR, NOT expressions */ + case T_BoolExpr: + handle_boolexpr((BoolExpr *) expr, context, result); + return result; - return; /* exit on equal */ - } - break; /* continue to function's end */ + /* =, !=, <, > etc. */ + case T_OpExpr: + handle_opexpr((OpExpr *) expr, context, result); + return result; - case PT_RANGE: - { - select_range_partitions(c->constvalue, - &cmp_func, - context->prel->ranges, - PrelChildrenCount(context->prel), - strategy, - result); - return; - } + /* ANY, ALL, IN expressions */ + case T_ScalarArrayOpExpr: + handle_arrexpr((ScalarArrayOpExpr *) expr, context, result); + return result; default: - elog(ERROR, "Unknown partitioning type %u", prel->parttype); - } - - result->rangeset = list_make1_irange(make_irange(0, PrelLastChild(prel), true)); - result->paramsel = 1.0; -} - -/* - * Estimate selectivity of parametrized quals. - */ -static void -handle_binary_opexpr_param(const PartRelationInfo *prel, - WrapperNode *result, const Node *varnode) -{ - const OpExpr *expr = (const OpExpr *)result->orig; - TypeCacheEntry *tce; - int strategy; - Oid vartype; - - Assert(IsA(varnode, Var) || IsA(varnode, RelabelType)); - - vartype = !IsA(varnode, RelabelType) ? - ((Var *) varnode)->vartype : - ((RelabelType *) varnode)->resulttype; - - /* Determine operator type */ - tce = lookup_type_cache(vartype, TYPECACHE_BTREE_OPFAMILY); - strategy = get_op_opfamily_strategy(expr->opno, tce->btree_opf); + result->orig = (const Node *) expr; + result->args = NIL; - result->rangeset = list_make1_irange(make_irange(0, PrelLastChild(prel), true)); + result->rangeset = list_make1_irange_full(context->prel, IR_LOSSY); + result->paramsel = 1.0; - if (strategy == BTEqualStrategyNumber) - { - result->paramsel = 1.0 / (double) PrelChildrenCount(prel); - } - else if (prel->parttype == PT_RANGE && strategy > 0) - { - result->paramsel = DEFAULT_INEQ_SEL; - } - else - { - result->paramsel = 1.0; + return result; } } -/* - * Convert hash value to the partition index. - */ -uint32 -hash_to_part_index(uint32 value, uint32 partitions) +/* Convert wrapper into expression for given index */ +static Node * +wrapper_make_expression(WrapperNode *wrap, int index, bool *alwaysTrue) { - return value % partitions; -} + bool lossy, found; -search_rangerel_result -search_range_partition_eq(const Datum value, - FmgrInfo *cmp_func, - const PartRelationInfo *prel, - RangeEntry *out_re) /* returned RangeEntry */ -{ - RangeEntry *ranges; - int nranges; - WrapperNode result; + *alwaysTrue = false; - ranges = PrelGetRangesArray(prel); - nranges = PrelChildrenCount(prel); + /* TODO: possible optimization (we enumerate indexes sequntially). */ + found = irange_list_find(wrap->rangeset, index, &lossy); - select_range_partitions(value, - cmp_func, - ranges, - nranges, - BTEqualStrategyNumber, - &result); + /* Return NULL for always true and always false. */ + if (!found) + return NULL; - if (result.found_gap) - { - return SEARCH_RANGEREL_GAP; - } - else if (result.rangeset == NIL) + if (!lossy) { - return SEARCH_RANGEREL_OUT_OF_RANGE; + *alwaysTrue = true; + return NULL; } - else - { - IndexRange irange = linitial_irange(result.rangeset); - Assert(list_length(result.rangeset) == 1); - Assert(irange.ir_lower == irange.ir_upper); - Assert(irange.ir_valid); + if (IsA(wrap->orig, BoolExpr)) + { + const BoolExpr *expr = (const BoolExpr *) wrap->orig; + BoolExpr *result; - /* Write result to the 'out_rentry' if necessary */ - if (out_re) - memcpy((void *) out_re, - (const void *) &ranges[irange.ir_lower], - sizeof(RangeEntry)); + if (expr->boolop == OR_EXPR || expr->boolop == AND_EXPR) + { + ListCell *lc; + List *args = NIL; - return SEARCH_RANGEREL_FOUND; - } -} + foreach (lc, wrap->args) + { + Node *arg; + bool childAlwaysTrue; -static Const * -extract_const(WalkerContext *wcxt, Param *param) -{ - ExprState *estate = ExecInitExpr((Expr *) param, NULL); - bool isnull; - Datum value = ExecEvalExpr(estate, wcxt->econtext, &isnull, NULL); + arg = wrapper_make_expression((WrapperNode *) lfirst(lc), + index, &childAlwaysTrue); + +#ifdef USE_ASSERT_CHECKING + /* + * We shouldn't get there for always true clause + * under OR and always false clause under AND. + */ + if (expr->boolop == OR_EXPR) + Assert(!childAlwaysTrue); + + if (expr->boolop == AND_EXPR) + Assert(arg || childAlwaysTrue); +#endif + + if (arg) + args = lappend(args, arg); + } - return makeConst(param->paramtype, param->paramtypmod, - param->paramcollid, get_typlen(param->paramtype), - value, isnull, get_typbyval(param->paramtype)); + Assert(list_length(args) >= 1); + + /* Remove redundant OR/AND when child is single. */ + if (list_length(args) == 1) + return (Node *) linitial(args); + + result = makeNode(BoolExpr); + result->args = args; + result->boolop = expr->boolop; + result->location = expr->location; + return (Node *) result; + } + else + return (Node *) copyObject(wrap->orig); + } + else + return (Node *) copyObject(wrap->orig); } -static WrapperNode * -handle_const(const Const *c, WalkerContext *context) + +/* Const handler */ +static void +handle_const(const Const *c, + const Oid collid, + const int strategy, + const WalkerContext *context, + WrapperNode *result) /* ret value #1 */ { const PartRelationInfo *prel = context->prel; - WrapperNode *result = (WrapperNode *) palloc(sizeof(WrapperNode)); - result->orig = (const Node *) c; + /* Deal with missing strategy */ + if (strategy == 0) + goto handle_const_return; /* * Had to add this check for queries like: - * select * from test.hash_rel where txt = NULL; + * select * from test.hash_rel where txt = NULL; */ - if (!context->for_insert) + if (c->constisnull) { - result->rangeset = list_make1_irange(make_irange(0, - PrelLastChild(prel), - true)); - result->paramsel = 1.0; + result->rangeset = NIL; + result->paramsel = 0.0; - return result; + return; /* done, exit */ + } + + /* + * Had to add this check for queries like: + * select * from test.hash_rel where true = false; + * select * from test.hash_rel where false; + * select * from test.hash_rel where $1; + */ + if (c->consttype == BOOLOID) + { + if (c->constvalue == BoolGetDatum(false)) + { + result->rangeset = NIL; + result->paramsel = 0.0; + } + else + { + result->rangeset = list_make1_irange_full(prel, IR_COMPLETE); + result->paramsel = 1.0; + } + + return; /* done, exit */ } switch (prel->parttype) { case PT_HASH: { - Datum value = OidFunctionCall1(prel->hash_proc, c->constvalue); - uint32 idx = hash_to_part_index(DatumGetInt32(value), - PrelChildrenCount(prel)); - result->rangeset = list_make1_irange(make_irange(idx, idx, true)); + Datum value, /* value to be hashed */ + hash; /* 32-bit hash */ + uint32 idx; /* index of partition */ + bool cast_success; + + /* Cannot do much about non-equal strategies */ + if (strategy != BTEqualStrategyNumber) + goto handle_const_return; + + /* Peform type cast if types mismatch */ + if (prel->ev_type != c->consttype) + { + value = perform_type_cast(c->constvalue, + getBaseType(c->consttype), + getBaseType(prel->ev_type), + &cast_success); + + if (!cast_success) + elog(ERROR, "Cannot select partition: " + "unable to perform type cast"); + } + /* Else use the Const's value */ + else value = c->constvalue; + /* + * Calculate 32-bit hash of 'value' and corresponding index. + * Since 12, hashtext requires valid collation. Since we never + * supported this, passing db default one will do. + */ + hash = OidFunctionCall1Coll(prel->hash_proc, + DEFAULT_COLLATION_OID, + value); + idx = hash_to_part_index(DatumGetInt32(hash), + PrelChildrenCount(prel)); + + result->rangeset = list_make1_irange(make_irange(idx, idx, IR_LOSSY)); + result->paramsel = 1.0; + + return; /* done, exit */ } - break; case PT_RANGE: { - TypeCacheEntry *tce; + FmgrInfo cmp_finfo; - tce = lookup_type_cache(c->consttype, TYPECACHE_CMP_PROC_FINFO); + /* Cannot do much about non-equal strategies + diff. collations */ + if (strategy != BTEqualStrategyNumber && collid != prel->ev_collid) + { + goto handle_const_return; + } + + fill_type_cmp_fmgr_info(&cmp_finfo, + getBaseType(c->consttype), + getBaseType(prel->ev_type)); select_range_partitions(c->constvalue, - &tce->cmp_proc_finfo, - context->prel->ranges, + collid, + &cmp_finfo, + PrelGetRangesArray(context->prel), PrelChildrenCount(context->prel), - BTEqualStrategyNumber, - result); + strategy, + result); /* result->rangeset = ... */ + result->paramsel = 1.0; + + return; /* done, exit */ } - break; default: - elog(ERROR, "Unknown partitioning type %u", prel->parttype); - break; + WrongPartType(prel->parttype); } - return result; +handle_const_return: + result->rangeset = list_make1_irange_full(prel, IR_LOSSY); + result->paramsel = 1.0; } -/* - * Operator expression handler - */ -static WrapperNode * -handle_opexpr(const OpExpr *expr, WalkerContext *context) +/* Array handler */ +static void +handle_array(ArrayType *array, + const Oid collid, + const int strategy, + const bool use_or, + const WalkerContext *context, + WrapperNode *result) /* ret value #1 */ { - WrapperNode *result = (WrapperNode *)palloc(sizeof(WrapperNode)); - Node *var, *param; const PartRelationInfo *prel = context->prel; - result->orig = (const Node *)expr; - result->args = NIL; + /* Elements of the array */ + Datum *elem_values; + bool *elem_isnull; + int elem_count; + + /* Element's properties */ + Oid elem_type; + int16 elem_len; + bool elem_byval; + char elem_align; + + /* + * Check if we can work with this strategy + * We can work only with BTLessStrategyNumber, BTLessEqualStrategyNumber, + * BTEqualStrategyNumber, BTGreaterEqualStrategyNumber and BTGreaterStrategyNumber. + * If new search strategies appear in the future, then access optimizations from + * this function will not work, and the default behavior (handle_array_return:) will work. + */ + if (strategy == InvalidStrategy || strategy > BTGreaterStrategyNumber) + goto handle_array_return; + + /* Get element's properties */ + elem_type = ARR_ELEMTYPE(array); + get_typlenbyvalalign(elem_type, &elem_len, &elem_byval, &elem_align); + + /* Extract values from the array */ + deconstruct_array(array, elem_type, elem_len, elem_byval, elem_align, + &elem_values, &elem_isnull, &elem_count); - if (list_length(expr->args) == 2) + /* Handle non-null Const arrays */ + if (elem_count > 0) { - if (pull_var_param(context, expr, &var, ¶m)) + List *ranges; + int i; + + /* This is only for paranoia's sake (checking correctness of following take_min calculation) */ + Assert(BTEqualStrategyNumber == 3 + && BTLessStrategyNumber < BTEqualStrategyNumber + && BTLessEqualStrategyNumber < BTEqualStrategyNumber + && BTGreaterEqualStrategyNumber > BTEqualStrategyNumber + && BTGreaterStrategyNumber > BTEqualStrategyNumber); + + /* Optimizations for <, <=, >=, > */ + if (strategy != BTEqualStrategyNumber) { - if (IsConstValue(context, param)) - { - handle_binary_opexpr(context, result, var, ExtractConst(context, param)); - return result; - } - else if (IsA(param, Param) || IsA(param, Var)) - { - handle_binary_opexpr_param(prel, result, var); - return result; - } - } - } + bool take_min; + Datum pivot; + bool pivot_null; - result->rangeset = list_make1_irange(make_irange(0, PrelLastChild(prel), true)); - result->paramsel = 1.0; - return result; -} + /* + * OR: Max for (< | <=); Min for (> | >=) + * AND: Min for (< | <=); Max for (> | >=) + */ + take_min = strategy < BTEqualStrategyNumber ? !use_or : use_or; -/* - * Checks if expression is a KEY OP PARAM or PARAM OP KEY, - * where KEY is partition key (it could be Var or RelableType) and PARAM is - * whatever. Function returns variable (or RelableType) and param via var_ptr - * and param_ptr pointers. If partition key isn't in expression then function - * returns false. - */ -static bool -pull_var_param(const WalkerContext *ctx, - const OpExpr *expr, - Node **var_ptr, - Node **param_ptr) -{ - Node *left = linitial(expr->args), - *right = lsecond(expr->args); - Var *v = NULL; + /* Extract Min (or Max) element */ + pivot = array_find_min_max(elem_values, elem_isnull, + elem_count, elem_type, collid, + take_min, &pivot_null); - /* Check the case when variable is on the left side */ - if (IsA(left, Var) || IsA(left, RelabelType)) - { - v = !IsA(left, RelabelType) ? - (Var *) left : - (Var *) ((RelabelType *) left)->arg; + /* Write data and "shrink" the array */ + elem_values[0] = pivot_null ? (Datum) 0 : pivot; + elem_isnull[0] = pivot_null; + elem_count = 1; - if (v->varoattno == ctx->prel->attnum) - { - *var_ptr = left; - *param_ptr = right; - return true; + /* If pivot is not NULL ... */ + if (!pivot_null) + { + /* ... append single NULL if array contains NULLs */ + if (array_contains_nulls(array)) + { + /* Make sure that we have enough space for 2 elements */ + Assert(ArrayGetNItems(ARR_NDIM(array), ARR_DIMS(array)) >= 2); + + elem_values[1] = (Datum) 0; + elem_isnull[1] = true; + elem_count = 2; + } + /* ... optimize clause ('orig') if array does not contain NULLs */ + else if (result->orig) + { + /* Should've been provided by the caller */ + ScalarArrayOpExpr *orig = (ScalarArrayOpExpr *) result->orig; + + /* Rebuild clause using 'pivot' */ + result->orig = (Node *) + make_opclause(orig->opno, BOOLOID, false, + (Expr *) linitial(orig->args), + (Expr *) makeConst(elem_type, + -1, + collid, + elem_len, + elem_values[0], + elem_isnull[0], + elem_byval), + InvalidOid, + collid); + } + } } - } - /* ... variable is on the right side */ - if (IsA(right, Var) || IsA(right, RelabelType)) - { - v = !IsA(right, RelabelType) ? - (Var *) right : - (Var *) ((RelabelType *) right)->arg; + /* Set default rangeset */ + ranges = use_or ? NIL : list_make1_irange_full(prel, IR_COMPLETE); - if (v->varoattno == ctx->prel->attnum) + /* Select partitions using values */ + for (i = 0; i < elem_count; i++) { - *var_ptr = right; - *param_ptr = left; - return true; + Const c; + WrapperNode wrap = InvalidWrapperNode; + + NodeSetTag(&c, T_Const); + c.consttype = elem_type; + c.consttypmod = -1; + c.constcollid = InvalidOid; + c.constlen = datumGetSize(elem_values[i], + elem_byval, + elem_len); + c.constvalue = elem_values[i]; + c.constisnull = elem_isnull[i]; + c.constbyval = elem_byval; + c.location = -1; + + handle_const(&c, collid, strategy, context, &wrap); + + /* Should we use OR | AND? */ + ranges = use_or ? + irange_list_union(ranges, wrap.rangeset) : + irange_list_intersection(ranges, wrap.rangeset); } + + /* Free resources */ + pfree(elem_values); + pfree(elem_isnull); + + result->rangeset = ranges; + result->paramsel = 1.0; + + return; /* done, exit */ } - /* Variable isn't a partitionig key */ - return false; +handle_array_return: + result->rangeset = list_make1_irange_full(prel, IR_LOSSY); + result->paramsel = 1.0; } -/* - * Boolean expression handler - */ -static WrapperNode * -handle_boolexpr(const BoolExpr *expr, WalkerContext *context) +/* Boolean expression handler */ +static void +handle_boolexpr(const BoolExpr *expr, + const WalkerContext *context, + WrapperNode *result) /* ret value #1 */ { - WrapperNode *result = (WrapperNode *)palloc(sizeof(WrapperNode)); - ListCell *lc; const PartRelationInfo *prel = context->prel; + List *ranges, + *args = NIL; + double paramsel = 1.0; + ListCell *lc; - result->orig = (const Node *)expr; - result->args = NIL; - result->paramsel = 1.0; - - if (expr->boolop == AND_EXPR) - result->rangeset = list_make1_irange(make_irange(0, - PrelLastChild(prel), - false)); - else - result->rangeset = NIL; + /* Set default rangeset */ + ranges = (expr->boolop == AND_EXPR) ? + list_make1_irange_full(prel, IR_COMPLETE) : + NIL; + /* Examine expressions */ foreach (lc, expr->args) { - WrapperNode *arg; + WrapperNode *wrap; + + wrap = walk_expr_tree((Expr *) lfirst(lc), context); + args = lappend(args, wrap); - arg = walk_expr_tree((Expr *)lfirst(lc), context); - result->args = lappend(result->args, arg); switch (expr->boolop) { case OR_EXPR: - // finish_least_greatest(arg, context); - result->rangeset = irange_list_union(result->rangeset, arg->rangeset); + ranges = irange_list_union(ranges, wrap->rangeset); break; + case AND_EXPR: - result->rangeset = irange_list_intersect(result->rangeset, arg->rangeset); - result->paramsel *= arg->paramsel; + ranges = irange_list_intersection(ranges, wrap->rangeset); + paramsel *= wrap->paramsel; break; + default: - result->rangeset = list_make1_irange(make_irange(0, - PrelLastChild(prel), - false)); + ranges = list_make1_irange_full(prel, IR_LOSSY); break; } } + /* Adjust paramsel for OR */ if (expr->boolop == OR_EXPR) { - int totallen = irange_list_length(result->rangeset); + int totallen = irange_list_length(ranges); - foreach (lc, result->args) + foreach (lc, args) { - WrapperNode *arg = (WrapperNode *) lfirst(lc); - int len = irange_list_length(arg->rangeset); + WrapperNode *arg = (WrapperNode *) lfirst(lc); + int len = irange_list_length(arg->rangeset); - result->paramsel *= (1.0 - arg->paramsel * (double)len / (double)totallen); + paramsel *= (1.0 - arg->paramsel * (double)len / (double)totallen); } - result->paramsel = 1.0 - result->paramsel; + + paramsel = 1.0 - paramsel; } - return result; + /* Save results */ + result->rangeset = ranges; + result->paramsel = paramsel; + result->orig = (const Node *) expr; + result->args = args; } -/* - * Scalar array expression - */ -static WrapperNode * -handle_arrexpr(const ScalarArrayOpExpr *expr, WalkerContext *context) +/* Scalar array expression handler */ +static void +handle_arrexpr(const ScalarArrayOpExpr *expr, + const WalkerContext *context, + WrapperNode *result) /* ret value #1 */ { - WrapperNode *result = (WrapperNode *)palloc(sizeof(WrapperNode)); - Node *varnode = (Node *) linitial(expr->args); - Var *var; - Node *arraynode = (Node *) lsecond(expr->args); - const PartRelationInfo *prel = context->prel; + Node *part_expr = (Node *) linitial(expr->args); + Node *array = (Node *) lsecond(expr->args); + const PartRelationInfo *prel = context->prel; + TypeCacheEntry *tce; + int strategy; - result->orig = (const Node *)expr; - result->args = NIL; - result->paramsel = 1.0; + /* Small sanity check */ + Assert(list_length(expr->args) == 2); + + tce = lookup_type_cache(prel->ev_type, TYPECACHE_BTREE_OPFAMILY); + strategy = get_op_opfamily_strategy(expr->opno, tce->btree_opf); + + /* Save expression */ + result->orig = (const Node *) expr; + + /* Check if expression tree is a partitioning expression */ + if (!match_expr_to_operand(context->prel_expr, part_expr)) + goto handle_arrexpr_all; - Assert(varnode != NULL); + /* Check if we can work with this strategy */ + if (strategy == 0) + goto handle_arrexpr_all; - /* If variable is not the partition key then skip it */ - if (IsA(varnode, Var) || IsA(varnode, RelabelType)) + /* Examine the array node */ + switch (nodeTag(array)) { - var = !IsA(varnode, RelabelType) ? - (Var *) varnode : - (Var *) ((RelabelType *) varnode)->arg; - if (var->varoattno != prel->attnum) - goto handle_arrexpr_return; + case T_Const: + { + Const *c = (Const *) array; + + /* Array is NULL */ + if (c->constisnull) + { + result->rangeset = NIL; + result->paramsel = 0.0; + + return; /* done, exit */ + } + + /* Examine array */ + handle_array(DatumGetArrayTypeP(c->constvalue), + expr->inputcollid, strategy, + expr->useOr, context, result); + + return; /* done, exit */ + } + + case T_ArrayExpr: + { + ArrayExpr *arr_expr = (ArrayExpr *) array; + Oid elem_type = arr_expr->element_typeid; + int array_params = 0; + double paramsel = 1.0; + List *ranges; + ListCell *lc; + + if (list_length(arr_expr->elements) == 0) + goto handle_arrexpr_all; + + /* Set default ranges for OR | AND */ + ranges = expr->useOr ? NIL : list_make1_irange_full(prel, IR_COMPLETE); + + /* Walk trough elements list */ + foreach (lc, arr_expr->elements) + { + Node *elem = lfirst(lc); + WrapperNode wrap = InvalidWrapperNode; + + /* Stop if ALL + quals evaluate to NIL */ + if (!expr->useOr && ranges == NIL) + break; + + /* Is this a const value? */ + if (IsConstValue(elem, context)) + { + Const *c = ExtractConst(elem, context); + + /* Is this an array?. */ + if (c->consttype != elem_type && !c->constisnull) + { + handle_array(DatumGetArrayTypeP(c->constvalue), + expr->inputcollid, strategy, + expr->useOr, context, &wrap); + } + /* ... or a single element? */ + else + { + handle_const(c, expr->inputcollid, + strategy, context, &wrap); + } + + /* Should we use OR | AND? */ + ranges = expr->useOr ? + irange_list_union(ranges, wrap.rangeset) : + irange_list_intersection(ranges, wrap.rangeset); + } + else array_params++; /* we've just met non-const nodes */ + } + + /* Check for PARAM-related optimizations */ + if (array_params > 0) + { + double sel = estimate_paramsel_using_prel(prel, strategy); + int i; + + if (expr->useOr) + { + /* We can't say anything if PARAMs + ANY */ + ranges = list_make1_irange_full(prel, IR_LOSSY); + + /* See handle_boolexpr() */ + for (i = 0; i < array_params; i++) + paramsel *= (1 - sel); + + paramsel = 1 - paramsel; + } + else + { + /* Recheck condition on a narrowed set of partitions */ + ranges = irange_list_set_lossiness(ranges, IR_LOSSY); + + /* See handle_boolexpr() */ + for (i = 0; i < array_params; i++) + paramsel *= sel; + } + } + + /* Save result */ + result->rangeset = ranges; + result->paramsel = paramsel; + + return; /* done, exit */ + } + + default: + break; } - else - goto handle_arrexpr_return; - if (arraynode && IsA(arraynode, Const) && - !((Const *) arraynode)->constisnull) +handle_arrexpr_all: + result->rangeset = list_make1_irange_full(prel, IR_LOSSY); + result->paramsel = 1.0; +} + +/* Operator expression handler */ +static void +handle_opexpr(const OpExpr *expr, + const WalkerContext *context, + WrapperNode *result) /* ret value #1 */ +{ + Node *param; + const PartRelationInfo *prel = context->prel; + Oid opid; /* operator's Oid */ + + /* Save expression */ + result->orig = (const Node *) expr; + + /* Is it KEY OP PARAM or PARAM OP KEY? */ + if (OidIsValid(opid = IsKeyOpParam(expr, context, ¶m))) { - ArrayType *arrayval; - int16 elmlen; - bool elmbyval; - char elmalign; - int num_elems; - Datum *elem_values; - bool *elem_nulls; - int i; - - /* Extract values from array */ - arrayval = DatumGetArrayTypeP(((Const *) arraynode)->constvalue); - get_typlenbyvalalign(ARR_ELEMTYPE(arrayval), - &elmlen, &elmbyval, &elmalign); - deconstruct_array(arrayval, - ARR_ELEMTYPE(arrayval), - elmlen, elmbyval, elmalign, - &elem_values, &elem_nulls, &num_elems); + TypeCacheEntry *tce; + int strategy; - result->rangeset = NIL; + tce = lookup_type_cache(prel->ev_type, TYPECACHE_BTREE_OPFAMILY); + strategy = get_op_opfamily_strategy(opid, tce->btree_opf); - /* Construct OIDs list */ - for (i = 0; i < num_elems; i++) + if (IsConstValue(param, context)) { - Datum value; - uint32 idx; - - /* Invoke base hash function for value type */ - value = OidFunctionCall1(prel->hash_proc, elem_values[i]); - idx = hash_to_part_index(DatumGetInt32(value), PrelChildrenCount(prel)); - result->rangeset = irange_list_union(result->rangeset, - list_make1_irange(make_irange(idx, - idx, - true))); - } + handle_const(ExtractConst(param, context), + expr->inputcollid, + strategy, context, result); - /* Free resources */ - pfree(elem_values); - pfree(elem_nulls); + return; /* done, exit */ + } + /* TODO: estimate selectivity for param if it's Var */ + else if (IsA(param, Param) || IsA(param, Var)) + { + result->rangeset = list_make1_irange_full(prel, IR_LOSSY); + result->paramsel = estimate_paramsel_using_prel(prel, strategy); - return result; + return; /* done, exit */ + } } - if (arraynode && IsA(arraynode, Param)) - result->paramsel = DEFAULT_INEQ_SEL; + result->rangeset = list_make1_irange_full(prel, IR_LOSSY); + result->paramsel = 1.0; +} + -handle_arrexpr_return: - result->rangeset = list_make1_irange(make_irange(0, PrelLastChild(prel), true)); - return result; +/* Find Max or Min value of array */ +static Datum +array_find_min_max(Datum *values, + bool *isnull, + int length, + Oid value_type, + Oid collid, + bool take_min, + bool *result_null) /* ret value #2 */ +{ + TypeCacheEntry *tce = lookup_type_cache(value_type, TYPECACHE_CMP_PROC_FINFO); + Datum *pivot = NULL; + int i; + + for (i = 0; i < length; i++) + { + if (isnull[i]) + continue; + + /* Update 'pivot' */ + if (pivot == NULL || (take_min ? + check_lt(&tce->cmp_proc_finfo, + collid, values[i], *pivot) : + check_gt(&tce->cmp_proc_finfo, + collid, values[i], *pivot))) + { + pivot = &values[i]; + } + } + + /* Return results */ + *result_null = (pivot == NULL); + return (pivot == NULL) ? (Datum) 0 : *pivot; } + /* - * Theres are functions below copied from allpaths.c with (or without) some - * modifications. Couldn't use original because of 'static' modifier. + * ---------------------------------------------------------------------------------- + * NOTE: The following functions below are copied from PostgreSQL with (or without) + * some modifications. Couldn't use original because of 'static' modifier. + * ---------------------------------------------------------------------------------- */ /* @@ -1641,7 +1734,7 @@ set_plain_rel_size(PlannerInfo *root, RelOptInfo *rel, RangeTblEntry *rte) * Test any partial indexes of rel for applicability. We must do this * first since partial unique indexes can affect size estimates. */ - check_partial_indexes(root, rel); + check_index_predicates_compat(root, rel); /* Mark rel with estimated output rows, width, etc */ set_baserel_size_estimates(root, rel); @@ -1654,8 +1747,8 @@ set_plain_rel_size(PlannerInfo *root, RelOptInfo *rel, RangeTblEntry *rte) static void set_plain_rel_pathlist(PlannerInfo *root, RelOptInfo *rel, RangeTblEntry *rte) { - Relids required_outer; - Path *path; + Relids required_outer; + Path *path; /* * We don't support pushing join clauses into the quals of a seqscan, but @@ -1671,7 +1764,12 @@ set_plain_rel_pathlist(PlannerInfo *root, RelOptInfo *rel, RangeTblEntry *rte) path = create_seqscan_path(root, rel, required_outer); #endif add_path(rel, path); - // set_pathkeys(root, rel, path); + +#if PG_VERSION_NUM >= 90600 + /* If appropriate, consider parallel sequential scan */ + if (rel->consider_parallel && required_outer == NULL) + create_plain_partial_paths_compat(root, rel); +#endif /* Consider index scans */ create_index_paths(root, rel); @@ -1681,46 +1779,391 @@ set_plain_rel_pathlist(PlannerInfo *root, RelOptInfo *rel, RangeTblEntry *rte) } /* - * set_foreign_size - * Set size estimates for a foreign table RTE + * set_foreign_size + * Set size estimates for a foreign table RTE + */ +static void +set_foreign_size(PlannerInfo *root, RelOptInfo *rel, RangeTblEntry *rte) +{ + /* Mark rel with estimated output rows, width, etc */ + set_foreign_size_estimates(root, rel); + + /* Let FDW adjust the size estimates, if it can */ + rel->fdwroutine->GetForeignRelSize(root, rel, rte->relid); + + /* ... but do not let it set the rows estimate to zero */ + rel->rows = clamp_row_est(rel->rows); +} + +/* + * set_foreign_pathlist + * Build access paths for a foreign table RTE + */ +static void +set_foreign_pathlist(PlannerInfo *root, RelOptInfo *rel, RangeTblEntry *rte) +{ + /* Call the FDW's GetForeignPaths function to generate path(s) */ + rel->fdwroutine->GetForeignPaths(root, rel, rte->relid); +} + + +static List * +accumulate_append_subpath(List *subpaths, Path *path) +{ + return lappend(subpaths, path); +} + + +/* + * generate_mergeappend_paths + * Generate MergeAppend paths for an append relation + * + * Generate a path for each ordering (pathkey list) appearing in + * all_child_pathkeys. + * + * We consider both cheapest-startup and cheapest-total cases, ie, for each + * interesting ordering, collect all the cheapest startup subpaths and all the + * cheapest total paths, and build a MergeAppend path for each case. + * + * We don't currently generate any parameterized MergeAppend paths. While + * it would not take much more code here to do so, it's very unclear that it + * is worth the planning cycles to investigate such paths: there's little + * use for an ordered path on the inside of a nestloop. In fact, it's likely + * that the current coding of add_path would reject such paths out of hand, + * because add_path gives no credit for sort ordering of parameterized paths, + * and a parameterized MergeAppend is going to be more expensive than the + * corresponding parameterized Append path. If we ever try harder to support + * parameterized mergejoin plans, it might be worth adding support for + * parameterized MergeAppends to feed such joins. (See notes in + * optimizer/README for why that might not ever happen, though.) + */ +static void +generate_mergeappend_paths(PlannerInfo *root, RelOptInfo *rel, + List *live_childrels, + List *all_child_pathkeys, + PathKey *pathkeyAsc, PathKey *pathkeyDesc) +{ + ListCell *lcp; + + foreach(lcp, all_child_pathkeys) + { + List *pathkeys = (List *) lfirst(lcp); + List *startup_subpaths = NIL; + List *total_subpaths = NIL; + bool startup_neq_total = false; + bool presorted = true; + ListCell *lcr; + + /* Select the child paths for this ordering... */ + foreach(lcr, live_childrels) + { + RelOptInfo *childrel = (RelOptInfo *) lfirst(lcr); + Path *cheapest_startup, + *cheapest_total; + + /* Locate the right paths, if they are available. */ + cheapest_startup = + get_cheapest_path_for_pathkeys_compat(childrel->pathlist, + pathkeys, + NULL, + STARTUP_COST, + false); + cheapest_total = + get_cheapest_path_for_pathkeys_compat(childrel->pathlist, + pathkeys, + NULL, + TOTAL_COST, + false); + + /* + * If we can't find any paths with the right order just use the + * cheapest-total path; we'll have to sort it later. + */ + if (cheapest_startup == NULL || cheapest_total == NULL) + { + cheapest_startup = cheapest_total = + childrel->cheapest_total_path; + /* Assert we do have an unparameterized path for this child */ + Assert(cheapest_total->param_info == NULL); + presorted = false; + } + + /* + * Notice whether we actually have different paths for the + * "cheapest" and "total" cases; frequently there will be no point + * in two create_merge_append_path() calls. + */ + if (cheapest_startup != cheapest_total) + startup_neq_total = true; + + startup_subpaths = + accumulate_append_subpath(startup_subpaths, cheapest_startup); + total_subpaths = + accumulate_append_subpath(total_subpaths, cheapest_total); + } + + /* + * When first pathkey matching ascending/descending sort by partition + * column then build path with Append node, because MergeAppend is not + * required in this case. + */ + if ((PathKey *) linitial(pathkeys) == pathkeyAsc && presorted) + { + Path *path; + + path = (Path *) create_append_path_compat(rel, startup_subpaths, + NULL, 0); + path->pathkeys = pathkeys; + add_path(rel, path); + + if (startup_neq_total) + { + path = (Path *) create_append_path_compat(rel, total_subpaths, + NULL, 0); + path->pathkeys = pathkeys; + add_path(rel, path); + } + } + else if ((PathKey *) linitial(pathkeys) == pathkeyDesc && presorted) + { + /* + * When pathkey is descending sort by partition column then we + * need to scan partitions in reversed order. + */ + Path *path; + + path = (Path *) create_append_path_compat(rel, + list_reverse(startup_subpaths), NULL, 0); + path->pathkeys = pathkeys; + add_path(rel, path); + + if (startup_neq_total) + { + path = (Path *) create_append_path_compat(rel, + list_reverse(total_subpaths), NULL, 0); + path->pathkeys = pathkeys; + add_path(rel, path); + } + } + else + { + /* ... and build the MergeAppend paths */ + add_path(rel, (Path *) create_merge_append_path_compat( + root, rel, startup_subpaths, pathkeys, NULL)); + if (startup_neq_total) + add_path(rel, (Path *) create_merge_append_path_compat( + root, rel, total_subpaths, pathkeys, NULL)); + } + } +} + + +/* + * translate_col_privs + * Translate a bitmapset representing per-column privileges from the + * parent rel's attribute numbering to the child's. + * + * The only surprise here is that we don't translate a parent whole-row + * reference into a child whole-row reference. That would mean requiring + * permissions on all child columns, which is overly strict, since the + * query is really only going to reference the inherited columns. Instead + * we set the per-column bits for all inherited columns. */ -static void -set_foreign_size(PlannerInfo *root, RelOptInfo *rel, RangeTblEntry *rte) +Bitmapset * +translate_col_privs(const Bitmapset *parent_privs, + List *translated_vars) { - /* Mark rel with estimated output rows, width, etc */ - set_foreign_size_estimates(root, rel); + Bitmapset *child_privs = NULL; + bool whole_row; + int attno; + ListCell *lc; - /* Let FDW adjust the size estimates, if it can */ - rel->fdwroutine->GetForeignRelSize(root, rel, rte->relid); + /* System attributes have the same numbers in all tables */ + for (attno = FirstLowInvalidHeapAttributeNumber + 1; attno < 0; attno++) + { + if (bms_is_member(attno - FirstLowInvalidHeapAttributeNumber, + parent_privs)) + child_privs = bms_add_member(child_privs, + attno - FirstLowInvalidHeapAttributeNumber); + } - /* ... but do not let it set the rows estimate to zero */ - rel->rows = clamp_row_est(rel->rows); + /* Check if parent has whole-row reference */ + whole_row = bms_is_member(InvalidAttrNumber - FirstLowInvalidHeapAttributeNumber, + parent_privs); + + /* And now translate the regular user attributes, using the vars list */ + attno = InvalidAttrNumber; + foreach(lc, translated_vars) + { + Var *var = (Var *) lfirst(lc); + + attno++; + if (var == NULL) /* ignore dropped columns */ + continue; + Assert(IsA(var, Var)); + if (whole_row || + bms_is_member(attno - FirstLowInvalidHeapAttributeNumber, + parent_privs)) + child_privs = bms_add_member(child_privs, + var->varattno - FirstLowInvalidHeapAttributeNumber); + } + + return child_privs; } + /* - * set_foreign_pathlist - * Build access paths for a foreign table RTE + * make_inh_translation_list + * Build the list of translations from parent Vars to child Vars for + * an inheritance child. + * + * For paranoia's sake, we match type/collation as well as attribute name. */ -static void -set_foreign_pathlist(PlannerInfo *root, RelOptInfo *rel, RangeTblEntry *rte) +void +make_inh_translation_list(Relation oldrelation, Relation newrelation, + Index newvarno, List **translated_vars, + AppendRelInfo *appinfo) { - /* Call the FDW's GetForeignPaths function to generate path(s) */ - rel->fdwroutine->GetForeignPaths(root, rel, rte->relid); + List *vars = NIL; + TupleDesc old_tupdesc = RelationGetDescr(oldrelation); + TupleDesc new_tupdesc = RelationGetDescr(newrelation); + int oldnatts = old_tupdesc->natts; + int newnatts = new_tupdesc->natts; + int old_attno; +#if PG_VERSION_NUM >= 130000 /* see commit ce76c0ba */ + AttrNumber *pcolnos = NULL; + + if (appinfo) + { + /* Initialize reverse-translation array with all entries zero */ + appinfo->num_child_cols = newnatts; + appinfo->parent_colnos = pcolnos = + (AttrNumber *) palloc0(newnatts * sizeof(AttrNumber)); + } +#endif + + for (old_attno = 0; old_attno < oldnatts; old_attno++) + { + Form_pg_attribute att; + char *attname; + Oid atttypid; + int32 atttypmod; + Oid attcollation; + int new_attno; + + att = TupleDescAttr(old_tupdesc, old_attno); + if (att->attisdropped) + { + /* Just put NULL into this list entry */ + vars = lappend(vars, NULL); + continue; + } + attname = NameStr(att->attname); + atttypid = att->atttypid; + atttypmod = att->atttypmod; + attcollation = att->attcollation; + + /* + * When we are generating the "translation list" for the parent table + * of an inheritance set, no need to search for matches. + */ + if (oldrelation == newrelation) + { + vars = lappend(vars, makeVar(newvarno, + (AttrNumber) (old_attno + 1), + atttypid, + atttypmod, + attcollation, + 0)); +#if PG_VERSION_NUM >= 130000 + if (pcolnos) + pcolnos[old_attno] = old_attno + 1; +#endif + continue; + } + + /* + * Otherwise we have to search for the matching column by name. + * There's no guarantee it'll have the same column position, because + * of cases like ALTER TABLE ADD COLUMN and multiple inheritance. + * However, in simple cases it will be the same column number, so try + * that before we go groveling through all the columns. + * + * Note: the test for (att = ...) != NULL cannot fail, it's just a + * notational device to include the assignment into the if-clause. + */ + if (old_attno < newnatts && + (att = TupleDescAttr(new_tupdesc, old_attno)) != NULL && + !att->attisdropped && att->attinhcount != 0 && + strcmp(attname, NameStr(att->attname)) == 0) + new_attno = old_attno; + else + { + for (new_attno = 0; new_attno < newnatts; new_attno++) + { + att = TupleDescAttr(new_tupdesc, new_attno); + + /* + * Make clang analyzer happy: + * + * Access to field 'attisdropped' results + * in a dereference of a null pointer + */ + if (!att) + elog(ERROR, "error in function " + CppAsString(make_inh_translation_list)); + + if (!att->attisdropped && att->attinhcount != 0 && + strcmp(attname, NameStr(att->attname)) == 0) + break; + } + if (new_attno >= newnatts) + elog(ERROR, "could not find inherited attribute \"%s\" of relation \"%s\"", + attname, RelationGetRelationName(newrelation)); + } + + /* Found it, check type and collation match */ + if (atttypid != att->atttypid || atttypmod != att->atttypmod) + elog(ERROR, "attribute \"%s\" of relation \"%s\" does not match parent's type", + attname, RelationGetRelationName(newrelation)); + if (attcollation != att->attcollation) + elog(ERROR, "attribute \"%s\" of relation \"%s\" does not match parent's collation", + attname, RelationGetRelationName(newrelation)); + + vars = lappend(vars, makeVar(newvarno, + (AttrNumber) (new_attno + 1), + atttypid, + atttypmod, + attcollation, + 0)); +#if PG_VERSION_NUM >= 130000 + if (pcolnos) + pcolnos[new_attno] = old_attno + 1; +#endif + } + + *translated_vars = vars; } /* * set_append_rel_pathlist * Build access paths for an "append relation" + * Similar to PG function with the same name. + * + * NOTE: this function is 'public' (used in hooks.c) */ void -set_append_rel_pathlist(PlannerInfo *root, RelOptInfo *rel, - Index rti, RangeTblEntry *rte, +set_append_rel_pathlist(PlannerInfo *root, RelOptInfo *rel, Index rti, PathKey *pathkeyAsc, PathKey *pathkeyDesc) { Index parentRTindex = rti; List *live_childrels = NIL; List *subpaths = NIL; bool subpaths_valid = true; +#if PG_VERSION_NUM >= 90600 + List *partial_subpaths = NIL; + bool partial_subpaths_valid = true; +#endif List *all_child_pathkeys = NIL; List *all_child_outers = NIL; ListCell *l; @@ -1733,65 +2176,115 @@ set_append_rel_pathlist(PlannerInfo *root, RelOptInfo *rel, */ foreach(l, root->append_rel_list) { - AppendRelInfo *appinfo = (AppendRelInfo *) lfirst(l); - Index childRTindex; - RangeTblEntry *childRTE; - RelOptInfo *childrel; - ListCell *lcp; + AppendRelInfo *appinfo = (AppendRelInfo *) lfirst(l); + Index child_rti; + RangeTblEntry *child_rte; + RelOptInfo *child_rel; + ListCell *lcp; /* append_rel_list contains all append rels; ignore others */ if (appinfo->parent_relid != parentRTindex) continue; /* Re-locate the child RTE and RelOptInfo */ - childRTindex = appinfo->child_relid; - childRTE = root->simple_rte_array[childRTindex]; - childrel = root->simple_rel_array[childRTindex]; + child_rti = appinfo->child_relid; + child_rte = root->simple_rte_array[child_rti]; + child_rel = root->simple_rel_array[child_rti]; + + if (!child_rel) + elog(ERROR, "could not make access paths to a relation"); +#if PG_VERSION_NUM >= 90600 /* - * Compute the child's access paths. + * If parallelism is allowable for this query in general and for parent + * appendrel, see whether it's allowable for this childrel in + * particular. + * + * For consistency, do this before calling set_rel_size() for the child. */ - if (childRTE->relkind == RELKIND_FOREIGN_TABLE) - { - set_foreign_size(root, childrel, childRTE); - set_foreign_pathlist(root, childrel, childRTE); - } - else + if (root->glob->parallelModeOK && rel->consider_parallel) + set_rel_consider_parallel_compat(root, child_rel, child_rte); +#endif + + /* Build a few paths for this relation */ + if (child_rel->pathlist == NIL) { - set_plain_rel_size(root, childrel, childRTE); - set_plain_rel_pathlist(root, childrel, childRTE); + /* Compute child's access paths & sizes */ + if (child_rte->relkind == RELKIND_FOREIGN_TABLE) + { + /* childrel->rows should be >= 1 */ + set_foreign_size(root, child_rel, child_rte); + + /* If child IS dummy, ignore it */ + if (IS_DUMMY_REL(child_rel)) + continue; + + set_foreign_pathlist(root, child_rel, child_rte); + } + else + { + /* childrel->rows should be >= 1 */ + set_plain_rel_size(root, child_rel, child_rte); + + /* If child IS dummy, ignore it */ + if (IS_DUMMY_REL(child_rel)) + continue; + + set_plain_rel_pathlist(root, child_rel, child_rte); + } } - set_cheapest(childrel); - /* - * If child is dummy, ignore it. - */ - if (IS_DUMMY_REL(childrel)) + /* Set cheapest path for child */ + set_cheapest(child_rel); + + /* If child BECAME dummy, ignore it */ + if (IS_DUMMY_REL(child_rel)) continue; /* * Child is live, so add it to the live_childrels list for use below. */ - live_childrels = lappend(live_childrels, childrel); + live_childrels = lappend(live_childrels, child_rel); + +#if PG_VERSION_NUM >= 90600 + /* + * If any live child is not parallel-safe, treat the whole appendrel + * as not parallel-safe. In future we might be able to generate plans + * in which some children are farmed out to workers while others are + * not; but we don't have that today, so it's a waste to consider + * partial paths anywhere in the appendrel unless it's all safe. + */ + if (!child_rel->consider_parallel) + rel->consider_parallel = false; +#endif /* * If child has an unparameterized cheapest-total path, add that to * the unparameterized Append path we are constructing for the parent. * If not, there's no workable unparameterized path. */ - if (childrel->cheapest_total_path->param_info == NULL) + if (child_rel->cheapest_total_path->param_info == NULL) subpaths = accumulate_append_subpath(subpaths, - childrel->cheapest_total_path); + child_rel->cheapest_total_path); else subpaths_valid = false; +#if PG_VERSION_NUM >= 90600 + /* Same idea, but for a partial plan. */ + if (child_rel->partial_pathlist != NIL) + partial_subpaths = accumulate_append_subpath(partial_subpaths, + linitial(child_rel->partial_pathlist)); + else + partial_subpaths_valid = false; +#endif + /* * Collect lists of all the available path orderings and * parameterizations for all the children. We use these as a * heuristic to indicate which sort orderings and parameterizations we * should build Append and MergeAppend paths for. */ - foreach(lcp, childrel->pathlist) + foreach(lcp, child_rel->pathlist) { Path *childpath = (Path *) lfirst(lcp); List *childkeys = childpath->pathkeys; @@ -1856,7 +2349,41 @@ set_append_rel_pathlist(PlannerInfo *root, RelOptInfo *rel, * if we have zero or one live subpath due to constraint exclusion.) */ if (subpaths_valid) - add_path(rel, (Path *) create_append_path(rel, subpaths, NULL)); + add_path(rel, + (Path *) create_append_path_compat(rel, subpaths, NULL, 0)); + +#if PG_VERSION_NUM >= 90600 + /* + * Consider an append of partial unordered, unparameterized partial paths. + */ + if (partial_subpaths_valid) + { + AppendPath *appendpath; + ListCell *lc; + int parallel_workers = 0; + + /* + * Decide on the number of workers to request for this append path. + * For now, we just use the maximum value from among the members. It + * might be useful to use a higher number if the Append node were + * smart enough to spread out the workers, but it currently isn't. + */ + foreach(lc, partial_subpaths) + { + Path *path = lfirst(lc); + + parallel_workers = Max(parallel_workers, path->parallel_workers); + } + + if (parallel_workers > 0) + { + /* Generate a partial append path. */ + appendpath = create_append_path_compat(rel, partial_subpaths, NULL, + parallel_workers); + add_partial_path(rel, (Path *) appendpath); + } + } +#endif /* * Also build unparameterized MergeAppend paths based on the collected @@ -1907,16 +2434,10 @@ set_append_rel_pathlist(PlannerInfo *root, RelOptInfo *rel, if (subpaths_valid) add_path(rel, (Path *) - create_append_path(rel, subpaths, required_outer)); + create_append_path_compat(rel, subpaths, required_outer, 0)); } } -static List * -accumulate_append_subpath(List *subpaths, Path *path) -{ - return lappend(subpaths, path); -} - /* * get_cheapest_parameterized_child_path * Get cheapest path for this relation that has exactly the requested @@ -1924,7 +2445,7 @@ accumulate_append_subpath(List *subpaths, Path *path) * * Returns NULL if unable to create such a path. */ -static Path * +Path * get_cheapest_parameterized_child_path(PlannerInfo *root, RelOptInfo *rel, Relids required_outer) { @@ -1936,10 +2457,11 @@ get_cheapest_parameterized_child_path(PlannerInfo *root, RelOptInfo *rel, * parameterization. If it has exactly the needed parameterization, we're * done. */ - cheapest = get_cheapest_path_for_pathkeys(rel->pathlist, - NIL, - required_outer, - TOTAL_COST); + cheapest = get_cheapest_path_for_pathkeys_compat(rel->pathlist, + NIL, + required_outer, + TOTAL_COST, + false); Assert(cheapest != NULL); if (bms_equal(PATH_REQ_OUTER(cheapest), required_outer)) return cheapest; @@ -1989,166 +2511,3 @@ get_cheapest_parameterized_child_path(PlannerInfo *root, RelOptInfo *rel, /* Return the best path, or NULL if we found no suitable candidate */ return cheapest; } - -/* - * generate_mergeappend_paths - * Generate MergeAppend paths for an append relation - * - * Generate a path for each ordering (pathkey list) appearing in - * all_child_pathkeys. - * - * We consider both cheapest-startup and cheapest-total cases, ie, for each - * interesting ordering, collect all the cheapest startup subpaths and all the - * cheapest total paths, and build a MergeAppend path for each case. - * - * We don't currently generate any parameterized MergeAppend paths. While - * it would not take much more code here to do so, it's very unclear that it - * is worth the planning cycles to investigate such paths: there's little - * use for an ordered path on the inside of a nestloop. In fact, it's likely - * that the current coding of add_path would reject such paths out of hand, - * because add_path gives no credit for sort ordering of parameterized paths, - * and a parameterized MergeAppend is going to be more expensive than the - * corresponding parameterized Append path. If we ever try harder to support - * parameterized mergejoin plans, it might be worth adding support for - * parameterized MergeAppends to feed such joins. (See notes in - * optimizer/README for why that might not ever happen, though.) - */ -static void -generate_mergeappend_paths(PlannerInfo *root, RelOptInfo *rel, - List *live_childrels, - List *all_child_pathkeys, - PathKey *pathkeyAsc, PathKey *pathkeyDesc) -{ - ListCell *lcp; - - foreach(lcp, all_child_pathkeys) - { - List *pathkeys = (List *) lfirst(lcp); - List *startup_subpaths = NIL; - List *total_subpaths = NIL; - bool startup_neq_total = false; - bool presorted = true; - ListCell *lcr; - - /* Select the child paths for this ordering... */ - foreach(lcr, live_childrels) - { - RelOptInfo *childrel = (RelOptInfo *) lfirst(lcr); - Path *cheapest_startup, - *cheapest_total; - - /* Locate the right paths, if they are available. */ - cheapest_startup = - get_cheapest_path_for_pathkeys(childrel->pathlist, - pathkeys, - NULL, - STARTUP_COST); - cheapest_total = - get_cheapest_path_for_pathkeys(childrel->pathlist, - pathkeys, - NULL, - TOTAL_COST); - - /* - * If we can't find any paths with the right order just use the - * cheapest-total path; we'll have to sort it later. - */ - if (cheapest_startup == NULL || cheapest_total == NULL) - { - cheapest_startup = cheapest_total = - childrel->cheapest_total_path; - /* Assert we do have an unparameterized path for this child */ - Assert(cheapest_total->param_info == NULL); - presorted = false; - } - - /* - * Notice whether we actually have different paths for the - * "cheapest" and "total" cases; frequently there will be no point - * in two create_merge_append_path() calls. - */ - if (cheapest_startup != cheapest_total) - startup_neq_total = true; - - startup_subpaths = - accumulate_append_subpath(startup_subpaths, cheapest_startup); - total_subpaths = - accumulate_append_subpath(total_subpaths, cheapest_total); - } - - /* - * When first pathkey matching ascending/descending sort by partition - * column then build path with Append node, because MergeAppend is not - * required in this case. - */ - if ((PathKey *) linitial(pathkeys) == pathkeyAsc && presorted) - { - Path *path; - - path = (Path *) create_append_path(rel, startup_subpaths, NULL); - path->pathkeys = pathkeys; - add_path(rel, path); - - if (startup_neq_total) - { - path = (Path *) create_append_path(rel, total_subpaths, NULL); - path->pathkeys = pathkeys; - add_path(rel, path); - } - } - else if ((PathKey *) linitial(pathkeys) == pathkeyDesc && presorted) - { - /* - * When pathkey is descending sort by partition column then we - * need to scan partitions in reversed order. - */ - Path *path; - - path = (Path *) create_append_path(rel, - list_reverse(startup_subpaths), NULL); - path->pathkeys = pathkeys; - add_path(rel, path); - - if (startup_neq_total) - { - path = (Path *) create_append_path(rel, - list_reverse(total_subpaths), NULL); - path->pathkeys = pathkeys; - add_path(rel, path); - } - } - else - { - /* ... and build the MergeAppend paths */ - add_path(rel, (Path *) create_merge_append_path(root, - rel, - startup_subpaths, - pathkeys, - NULL)); - if (startup_neq_total) - add_path(rel, (Path *) create_merge_append_path(root, - rel, - total_subpaths, - pathkeys, - NULL)); - } - } -} - -/* - * Get cached PATHMAN_CONFIG relation Oid. - */ -Oid -get_pathman_config_relid(void) -{ - return pathman_config_relid; -} - -/* - * Get cached PATHMAN_CONFIG_PARAMS relation Oid. - */ -Oid -get_pathman_config_params_relid(void) -{ - return pathman_config_params_relid; -} diff --git a/src/pl_funcs.c b/src/pl_funcs.c index a647a730..75c1c12a 100644 --- a/src/pl_funcs.c +++ b/src/pl_funcs.c @@ -3,581 +3,794 @@ * pl_funcs.c * Utility C functions for stored procedures * - * Copyright (c) 2015-2016, Postgres Professional + * Copyright (c) 2015-2020, Postgres Professional * * ------------------------------------------------------------------------ */ +#include "compat/pg_compat.h" + #include "init.h" #include "pathman.h" +#include "partition_creation.h" +#include "partition_filter.h" #include "relation_info.h" -#include "utils.h" #include "xact_handling.h" +#include "utils.h" #include "access/htup_details.h" -#include "access/nbtree.h" +#if PG_VERSION_NUM >= 120000 +#include "access/heapam.h" +#include "access/relscan.h" +#include "access/table.h" +#include "access/tableam.h" +#endif #include "access/xact.h" +#include "catalog/dependency.h" #include "catalog/indexing.h" -#include "commands/sequence.h" +#include "catalog/namespace.h" +#include "catalog/pg_type.h" +#include "commands/tablespace.h" +#include "commands/trigger.h" +#include "executor/executor.h" +#include "executor/spi.h" +#include "funcapi.h" #include "miscadmin.h" -#include "utils/array.h" +#include "nodes/nodeFuncs.h" #include "utils/builtins.h" -#include -#include "utils/memutils.h" +#include "utils/inval.h" +#include "utils/snapmgr.h" #include "utils/lsyscache.h" #include "utils/syscache.h" #include "utils/typcache.h" +#if PG_VERSION_NUM < 110000 +#include "catalog/pg_inherits_fn.h" +#endif -/* declarations */ -PG_FUNCTION_INFO_V1( on_partitions_created ); -PG_FUNCTION_INFO_V1( on_partitions_updated ); -PG_FUNCTION_INFO_V1( on_partitions_removed ); -PG_FUNCTION_INFO_V1( get_parent_of_partition_pl ); -PG_FUNCTION_INFO_V1( get_attribute_type_name ); -PG_FUNCTION_INFO_V1( find_or_create_range_partition); -PG_FUNCTION_INFO_V1( get_range_by_idx ); -PG_FUNCTION_INFO_V1( get_range_by_part_oid ); -PG_FUNCTION_INFO_V1( get_min_range_value ); -PG_FUNCTION_INFO_V1( get_max_range_value ); -PG_FUNCTION_INFO_V1( get_type_hash_func ); -PG_FUNCTION_INFO_V1( get_hash_part_idx ); -PG_FUNCTION_INFO_V1( check_overlap ); -PG_FUNCTION_INFO_V1( build_range_condition ); -PG_FUNCTION_INFO_V1( build_check_constraint_name_attnum ); -PG_FUNCTION_INFO_V1( build_check_constraint_name_attname ); -PG_FUNCTION_INFO_V1( build_update_trigger_func_name ); -PG_FUNCTION_INFO_V1( build_update_trigger_name ); -PG_FUNCTION_INFO_V1( is_date_type ); -PG_FUNCTION_INFO_V1( is_attribute_nullable ); -PG_FUNCTION_INFO_V1( add_to_pathman_config ); -PG_FUNCTION_INFO_V1( invalidate_relcache ); -PG_FUNCTION_INFO_V1( lock_partitioned_relation ); -PG_FUNCTION_INFO_V1( prevent_relation_modification ); -PG_FUNCTION_INFO_V1( debug_capture ); +/* Function declarations */ -static void on_partitions_created_internal(Oid partitioned_table, bool add_callbacks); -static void on_partitions_updated_internal(Oid partitioned_table, bool add_callbacks); -static void on_partitions_removed_internal(Oid partitioned_table, bool add_callbacks); +PG_FUNCTION_INFO_V1( get_number_of_partitions_pl ); +PG_FUNCTION_INFO_V1( get_partition_key_type_pl ); +PG_FUNCTION_INFO_V1( get_partition_cooked_key_pl ); +PG_FUNCTION_INFO_V1( get_cached_partition_cooked_key_pl ); +PG_FUNCTION_INFO_V1( get_parent_of_partition_pl ); +PG_FUNCTION_INFO_V1( get_base_type_pl ); +PG_FUNCTION_INFO_V1( get_tablespace_pl ); +PG_FUNCTION_INFO_V1( show_cache_stats_internal ); +PG_FUNCTION_INFO_V1( show_partition_list_internal ); -/* - * Extracted common check. - */ -static bool -check_relation_exists(Oid relid) -{ - return get_rel_type_id(relid) != InvalidOid; -} +PG_FUNCTION_INFO_V1( build_check_constraint_name ); +PG_FUNCTION_INFO_V1( validate_relname ); +PG_FUNCTION_INFO_V1( validate_expression ); +PG_FUNCTION_INFO_V1( is_date_type ); +PG_FUNCTION_INFO_V1( is_operator_supported ); +PG_FUNCTION_INFO_V1( is_tuple_convertible ); -/* - * Callbacks. - */ +PG_FUNCTION_INFO_V1( add_to_pathman_config ); +PG_FUNCTION_INFO_V1( pathman_config_params_trigger_func ); -static void -on_partitions_created_internal(Oid partitioned_table, bool add_callbacks) -{ - elog(DEBUG2, "on_partitions_created() [add_callbacks = %s] " - "triggered for relation %u", - (add_callbacks ? "true" : "false"), partitioned_table); -} +PG_FUNCTION_INFO_V1( prevent_part_modification ); +PG_FUNCTION_INFO_V1( prevent_data_modification ); -static void -on_partitions_updated_internal(Oid partitioned_table, bool add_callbacks) -{ - bool found; +PG_FUNCTION_INFO_V1( validate_part_callback_pl ); +PG_FUNCTION_INFO_V1( invoke_on_partition_created_callback ); - elog(DEBUG2, "on_partitions_updated() [add_callbacks = %s] " - "triggered for relation %u", - (add_callbacks ? "true" : "false"), partitioned_table); +PG_FUNCTION_INFO_V1( check_security_policy ); - invalidate_pathman_relation_info(partitioned_table, &found); -} +PG_FUNCTION_INFO_V1( debug_capture ); +PG_FUNCTION_INFO_V1( pathman_version ); -static void -on_partitions_removed_internal(Oid partitioned_table, bool add_callbacks) +/* User context for function show_partition_list_internal() */ +typedef struct { - elog(DEBUG2, "on_partitions_removed() [add_callbacks = %s] " - "triggered for relation %u", - (add_callbacks ? "true" : "false"), partitioned_table); -} - -/* - * Thin layer between pure C and pl/PgSQL. - */ + Relation pathman_config; +#if PG_VERSION_NUM >= 120000 + TableScanDesc pathman_config_scan; +#else + HeapScanDesc pathman_config_scan; +#endif + Snapshot snapshot; -Datum -on_partitions_created(PG_FUNCTION_ARGS) -{ - on_partitions_created_internal(PG_GETARG_OID(0), true); - PG_RETURN_NULL(); -} + PartRelationInfo *current_prel; /* selected PartRelationInfo */ -Datum -on_partitions_updated(PG_FUNCTION_ARGS) -{ - on_partitions_updated_internal(PG_GETARG_OID(0), true); - PG_RETURN_NULL(); -} + Size child_number; /* child we're looking at */ + SPITupleTable *tuptable; /* buffer for tuples */ +} show_partition_list_cxt; -Datum -on_partitions_removed(PG_FUNCTION_ARGS) +/* User context for function show_pathman_cache_stats_internal() */ +typedef struct { - on_partitions_removed_internal(PG_GETARG_OID(0), true); - PG_RETURN_NULL(); -} + MemoryContext pathman_contexts[PATHMAN_MCXT_COUNT]; + HTAB *pathman_htables[PATHMAN_MCXT_COUNT]; + int current_item; +} show_cache_stats_cxt; +/* + * ------------------------ + * Various useful getters + * ------------------------ + */ /* - * Get parent of a specified partition. + * Return parent of a specified partition. */ Datum get_parent_of_partition_pl(PG_FUNCTION_ARGS) { - Oid partition = PG_GETARG_OID(0); - PartParentSearch parent_search; - Oid parent; - - /* Fetch parent & write down search status */ - parent = get_parent_of_partition(partition, &parent_search); + Oid partition = PG_GETARG_OID(0), + parent = get_parent_of_partition(partition); - /* We MUST be sure :) */ - Assert(parent_search != PPS_NOT_SURE); + if (!OidIsValid(parent)) + ereport(ERROR, (errcode(ERRCODE_INVALID_PARAMETER_VALUE), + errmsg("\"%s\" is not a partition", + get_rel_name_or_relid(partition)))); - /* It must be parent known by pg_pathman */ - if (parent_search == PPS_ENTRY_PART_PARENT) - PG_RETURN_OID(parent); - else - { - elog(ERROR, "\%s\" is not pg_pathman's partition", - get_rel_name_or_relid(partition)); - - PG_RETURN_NULL(); - } + PG_RETURN_OID(parent); } /* - * Get type (as text) of a given attribute. + * Return partition key type. */ Datum -get_attribute_type_name(PG_FUNCTION_ARGS) +get_partition_key_type_pl(PG_FUNCTION_ARGS) { - Oid relid = PG_GETARG_OID(0); - text *attname = PG_GETARG_TEXT_P(1); - char *result; - HeapTuple tp; + Oid relid = PG_GETARG_OID(0); + Oid typid; + PartRelationInfo *prel; - /* NOTE: for now it's the most efficient way */ - tp = SearchSysCacheAttName(relid, text_to_cstring(attname)); - if (HeapTupleIsValid(tp)) - { - Form_pg_attribute att_tup = (Form_pg_attribute) GETSTRUCT(tp); - result = format_type_be(att_tup->atttypid); - ReleaseSysCache(tp); + prel = get_pathman_relation_info(relid); + shout_if_prel_is_invalid(relid, prel, PT_ANY); - PG_RETURN_TEXT_P(cstring_to_text(result)); - } - else - elog(ERROR, "Cannot find type name for attribute \"%s\" " - "of relation \"%s\"", - text_to_cstring(attname), get_rel_name_or_relid(relid)); + typid = prel->ev_type; - PG_RETURN_NULL(); /* keep compiler happy */ + close_pathman_relation_info(prel); + + PG_RETURN_OID(typid); } /* - * Returns partition oid for specified parent relid and value. - * In case when partition doesn't exist try to create one. + * Return cooked partition key. */ Datum -find_or_create_range_partition(PG_FUNCTION_ARGS) +get_partition_cooked_key_pl(PG_FUNCTION_ARGS) { - Oid parent_oid = PG_GETARG_OID(0); - Datum value = PG_GETARG_DATUM(1); - Oid value_type = get_fn_expr_argtype(fcinfo->flinfo, 1); - const PartRelationInfo *prel; - FmgrInfo cmp_func; - RangeEntry found_rentry; - search_rangerel_result search_state; + /* Values extracted from PATHMAN_CONFIG */ + Datum values[Natts_pathman_config]; + bool isnull[Natts_pathman_config]; - prel = get_pathman_relation_info(parent_oid); - shout_if_prel_is_invalid(parent_oid, prel, PT_RANGE); + Oid relid = PG_GETARG_OID(0); + char *expr_cstr; + Node *expr; + char *cooked_cstr; - fill_type_cmp_fmgr_info(&cmp_func, value_type, prel->atttype); + /* Check that table is registered in PATHMAN_CONFIG */ + if (!pathman_config_contains_relation(relid, values, isnull, NULL, NULL)) + elog(ERROR, "table \"%s\" is not partitioned", + get_rel_name_or_relid(relid)); - /* Use available PartRelationInfo to find partition */ - search_state = search_range_partition_eq(value, &cmp_func, prel, - &found_rentry); + expr_cstr = TextDatumGetCString(values[Anum_pathman_config_expr - 1]); + expr = cook_partitioning_expression(relid, expr_cstr, NULL); - /* - * If found then just return oid, else create new partitions - */ - if (search_state == SEARCH_RANGEREL_FOUND) - PG_RETURN_OID(found_rentry.child_oid); - /* - * If not found and value is between first and last partitions - */ - else if (search_state == SEARCH_RANGEREL_GAP) - PG_RETURN_NULL(); - else - { - Oid child_oid = create_partitions(parent_oid, value, value_type); +#if PG_VERSION_NUM >= 170000 /* for commit d20d8fbd3e4d */ + cooked_cstr = nodeToStringWithLocations(expr); +#else + cooked_cstr = nodeToString(expr); +#endif - /* get_pathman_relation_info() will refresh this entry */ - invalidate_pathman_relation_info(parent_oid, NULL); + pfree(expr_cstr); + pfree(expr); - PG_RETURN_OID(child_oid); - } + PG_RETURN_DATUM(CStringGetTextDatum(cooked_cstr)); } /* - * Returns range entry (min, max) (in form of array). + * Return cached cooked partition key. * - * arg #1 is the parent's Oid. - * arg #2 is the partition's Oid. + * Used in tests for invalidation. */ Datum -get_range_by_part_oid(PG_FUNCTION_ARGS) +get_cached_partition_cooked_key_pl(PG_FUNCTION_ARGS) { - Oid parent_oid = PG_GETARG_OID(0); - Oid child_oid = PG_GETARG_OID(1); - uint32 i; - RangeEntry *ranges; - const PartRelationInfo *prel; - - prel = get_pathman_relation_info(parent_oid); - shout_if_prel_is_invalid(parent_oid, prel, PT_RANGE); + Oid relid = PG_GETARG_OID(0); + PartRelationInfo *prel; + Datum res; - ranges = PrelGetRangesArray(prel); + prel = get_pathman_relation_info(relid); + shout_if_prel_is_invalid(relid, prel, PT_ANY); - /* Look for the specified partition */ - for (i = 0; i < PrelChildrenCount(prel); i++) - if (ranges[i].child_oid == child_oid) - { - ArrayType *arr; - Datum elems[2] = { ranges[i].min, ranges[i].max }; - - arr = construct_array(elems, 2, prel->atttype, - prel->attlen, prel->attbyval, - prel->attalign); +#if PG_VERSION_NUM >= 170000 /* for commit d20d8fbd3e4d */ + res = CStringGetTextDatum(nodeToStringWithLocations(prel->expr)); +#else + res = CStringGetTextDatum(nodeToString(prel->expr)); +#endif - PG_RETURN_ARRAYTYPE_P(arr); - } + close_pathman_relation_info(prel); - /* No partition found, report error */ - elog(ERROR, "Relation \"%s\" has no partition \"%s\"", - get_rel_name_or_relid(parent_oid), - get_rel_name_or_relid(child_oid)); - - PG_RETURN_NULL(); /* keep compiler happy */ + PG_RETURN_DATUM(res); } /* - * Returns N-th range entry (min, max) (in form of array). - * - * arg #1 is the parent's Oid. - * arg #2 is the index of the range - * (if it is negative then the last range will be returned). + * Extract basic type of a domain. */ Datum -get_range_by_idx(PG_FUNCTION_ARGS) +get_base_type_pl(PG_FUNCTION_ARGS) { - Oid parent_oid = PG_GETARG_OID(0); - int idx = PG_GETARG_INT32(1); - Datum elems[2]; - RangeEntry *ranges; - const PartRelationInfo *prel; - - prel = get_pathman_relation_info(parent_oid); - shout_if_prel_is_invalid(parent_oid, prel, PT_RANGE); - - /* Now we have to deal with 'idx' */ - if (idx < -1) - { - elog(ERROR, "Negative indices other than -1 (last partition) are not allowed"); - } - else if (idx == -1) - { - idx = PrelLastChild(prel); - } - else if (((uint32) abs(idx)) >= PrelChildrenCount(prel)) - { - elog(ERROR, "Partition #%d does not exist (total amount is %u)", - idx, PrelChildrenCount(prel)); - } - - ranges = PrelGetRangesArray(prel); - - elems[0] = ranges[idx].min; - elems[1] = ranges[idx].max; - - PG_RETURN_ARRAYTYPE_P(construct_array(elems, 2, - prel->atttype, - prel->attlen, - prel->attbyval, - prel->attalign)); + PG_RETURN_OID(getBaseType(PG_GETARG_OID(0))); } /* - * Returns min value of the first range for relation. + * Return tablespace name of a specified relation which must not be + * natively partitioned. */ Datum -get_min_range_value(PG_FUNCTION_ARGS) +get_tablespace_pl(PG_FUNCTION_ARGS) { - Oid parent_oid = PG_GETARG_OID(0); - RangeEntry *ranges; - const PartRelationInfo *prel; + Oid relid = PG_GETARG_OID(0); + Oid tablespace_id; + char *result; - prel = get_pathman_relation_info(parent_oid); - shout_if_prel_is_invalid(parent_oid, prel, PT_RANGE); + tablespace_id = get_rel_tablespace(relid); - ranges = PrelGetRangesArray(prel); + /* If tablespace id is InvalidOid then use the default tablespace */ + if (!OidIsValid(tablespace_id)) + { + tablespace_id = GetDefaultTablespaceCompat(get_rel_persistence(relid), false); + + /* If tablespace is still invalid then use database's default */ + if (!OidIsValid(tablespace_id)) + tablespace_id = MyDatabaseTableSpace; + } - PG_RETURN_DATUM(ranges[0].min); + result = get_tablespace_name(tablespace_id); + PG_RETURN_TEXT_P(cstring_to_text(result)); } /* - * Returns max value of the last range for relation. + * ---------------------- + * Common purpose VIEWs + * ---------------------- + */ + +/* + * List stats of all existing caches (memory contexts). */ Datum -get_max_range_value(PG_FUNCTION_ARGS) +show_cache_stats_internal(PG_FUNCTION_ARGS) { - Oid parent_oid = PG_GETARG_OID(0); - RangeEntry *ranges; - const PartRelationInfo *prel; + show_cache_stats_cxt *usercxt; + FuncCallContext *funccxt; - prel = get_pathman_relation_info(parent_oid); - shout_if_prel_is_invalid(parent_oid, prel, PT_RANGE); + /* + * Initialize tuple descriptor & function call context. + */ + if (SRF_IS_FIRSTCALL()) + { + TupleDesc tupdesc; + MemoryContext old_mcxt; - ranges = PrelGetRangesArray(prel); + funccxt = SRF_FIRSTCALL_INIT(); - PG_RETURN_DATUM(ranges[PrelLastChild(prel)].max); -} + if (!TopPathmanContext) + { + elog(ERROR, "pg_pathman's memory contexts are not initialized yet"); + } -/* - * Checks if range overlaps with existing partitions. - * Returns TRUE if overlaps and FALSE otherwise. - */ -Datum -check_overlap(PG_FUNCTION_ARGS) -{ - Oid parent_oid = PG_GETARG_OID(0); + old_mcxt = MemoryContextSwitchTo(funccxt->multi_call_memory_ctx); - Datum p1 = PG_GETARG_DATUM(1), - p2 = PG_GETARG_DATUM(2); + usercxt = (show_cache_stats_cxt *) palloc(sizeof(show_cache_stats_cxt)); - Oid p1_type = get_fn_expr_argtype(fcinfo->flinfo, 1), - p2_type = get_fn_expr_argtype(fcinfo->flinfo, 2); + usercxt->pathman_contexts[0] = TopPathmanContext; + usercxt->pathman_contexts[1] = PathmanParentsCacheContext; + usercxt->pathman_contexts[2] = PathmanStatusCacheContext; + usercxt->pathman_contexts[3] = PathmanBoundsCacheContext; - FmgrInfo cmp_func_1, - cmp_func_2; + usercxt->pathman_htables[0] = NULL; /* no HTAB for this entry */ + usercxt->pathman_htables[1] = parents_cache; + usercxt->pathman_htables[2] = status_cache; + usercxt->pathman_htables[3] = bounds_cache; - uint32 i; - RangeEntry *ranges; - const PartRelationInfo *prel; + usercxt->current_item = 0; - prel = get_pathman_relation_info(parent_oid); - shout_if_prel_is_invalid(parent_oid, prel, PT_RANGE); + /* Create tuple descriptor */ + tupdesc = CreateTemplateTupleDescCompat(Natts_pathman_cache_stats, false); - /* comparison functions */ - fill_type_cmp_fmgr_info(&cmp_func_1, p1_type, prel->atttype); - fill_type_cmp_fmgr_info(&cmp_func_2, p2_type, prel->atttype); + TupleDescInitEntry(tupdesc, Anum_pathman_cs_context, + "context", TEXTOID, -1, 0); + TupleDescInitEntry(tupdesc, Anum_pathman_cs_size, + "size", INT8OID, -1, 0); + TupleDescInitEntry(tupdesc, Anum_pathman_cs_used, + "used", INT8OID, -1, 0); + TupleDescInitEntry(tupdesc, Anum_pathman_cs_entries, + "entries", INT8OID, -1, 0); - ranges = PrelGetRangesArray(prel); - for (i = 0; i < PrelChildrenCount(prel); i++) - { - int c1 = FunctionCall2(&cmp_func_1, p1, ranges[i].max); - int c2 = FunctionCall2(&cmp_func_2, p2, ranges[i].min); + funccxt->tuple_desc = BlessTupleDesc(tupdesc); + funccxt->user_fctx = (void *) usercxt; - if (c1 < 0 && c2 > 0) - PG_RETURN_BOOL(true); + MemoryContextSwitchTo(old_mcxt); } - PG_RETURN_BOOL(false); -} + funccxt = SRF_PERCALL_SETUP(); + usercxt = (show_cache_stats_cxt *) funccxt->user_fctx; + if (usercxt->current_item < lengthof(usercxt->pathman_contexts)) + { + HTAB *current_htab; + MemoryContext current_mcxt; + HeapTuple htup; + Datum values[Natts_pathman_cache_stats]; + bool isnull[Natts_pathman_cache_stats] = { 0 }; -/* - * HASH-related stuff. - */ +#if PG_VERSION_NUM >= 90600 + MemoryContextCounters mcxt_stats; +#endif -/* Returns hash function's OID for a specified type. */ -Datum -get_type_hash_func(PG_FUNCTION_ARGS) -{ - TypeCacheEntry *tce; - Oid type_oid = PG_GETARG_OID(0); + /* Select current memory context and hash table (cache) */ + current_mcxt = usercxt->pathman_contexts[usercxt->current_item]; + current_htab = usercxt->pathman_htables[usercxt->current_item]; - tce = lookup_type_cache(type_oid, TYPECACHE_HASH_PROC); + values[Anum_pathman_cs_context - 1] = + CStringGetTextDatum(simplify_mcxt_name(current_mcxt)); - PG_RETURN_OID(tce->hash_proc); -} +/* We can't check stats of mcxt prior to 9.6 */ +#if PG_VERSION_NUM >= 90600 -/* Wrapper for hash_to_part_index() */ -Datum -get_hash_part_idx(PG_FUNCTION_ARGS) -{ - uint32 value = PG_GETARG_UINT32(0), - part_count = PG_GETARG_UINT32(1); + /* Prepare context counters */ + memset(&mcxt_stats, 0, sizeof(mcxt_stats)); - PG_RETURN_UINT32(hash_to_part_index(value, part_count)); -} + /* NOTE: we do not consider child contexts if it's TopPathmanContext */ + McxtStatsInternal(current_mcxt, 0, + (current_mcxt != TopPathmanContext), + &mcxt_stats); -/* - * Traits. - */ + values[Anum_pathman_cs_size - 1] = + Int64GetDatum(mcxt_stats.totalspace); -Datum -is_date_type(PG_FUNCTION_ARGS) -{ - PG_RETURN_BOOL(is_date_type_internal(PG_GETARG_OID(0))); -} + values[Anum_pathman_cs_used - 1] = + Int64GetDatum(mcxt_stats.totalspace - mcxt_stats.freespace); -Datum -is_attribute_nullable(PG_FUNCTION_ARGS) -{ - Oid relid = PG_GETARG_OID(0); - text *attname = PG_GETARG_TEXT_P(1); - bool result = true; - HeapTuple tp; +#else - tp = SearchSysCacheAttName(relid, text_to_cstring(attname)); - if (HeapTupleIsValid(tp)) - { - Form_pg_attribute att_tup = (Form_pg_attribute) GETSTRUCT(tp); - result = !att_tup->attnotnull; - ReleaseSysCache(tp); + /* Set unsupported fields to NULL */ + isnull[Anum_pathman_cs_size - 1] = true; + isnull[Anum_pathman_cs_used - 1] = true; +#endif + + values[Anum_pathman_cs_entries - 1] = + Int64GetDatum(current_htab ? + hash_get_num_entries(current_htab) : + 0); + + /* Switch to next item */ + usercxt->current_item++; + + /* Form output tuple */ + htup = heap_form_tuple(funccxt->tuple_desc, values, isnull); + + SRF_RETURN_NEXT(funccxt, HeapTupleGetDatum(htup)); } - else - elog(ERROR, "Cannot find type name for attribute \"%s\" " - "of relation \"%s\"", - text_to_cstring(attname), get_rel_name_or_relid(relid)); - PG_RETURN_BOOL(result); /* keep compiler happy */ + SRF_RETURN_DONE(funccxt); } - /* - * Useful string builders. + * List all existing partitions and their parents. + * + * In >=13 (bc8393cf277) struct SPITupleTable was changed + * (free removed and numvals added) */ - -/* Build range condition for a CHECK CONSTRAINT. */ Datum -build_range_condition(PG_FUNCTION_ARGS) +show_partition_list_internal(PG_FUNCTION_ARGS) { - text *attname = PG_GETARG_TEXT_P(0); + show_partition_list_cxt *usercxt; + FuncCallContext *funccxt; + MemoryContext old_mcxt; + SPITupleTable *tuptable; - Datum min_bound = PG_GETARG_DATUM(1), - max_bound = PG_GETARG_DATUM(2); + /* Initialize tuple descriptor & function call context */ + if (SRF_IS_FIRSTCALL()) + { + TupleDesc tupdesc; + MemoryContext tuptab_mcxt; + + funccxt = SRF_FIRSTCALL_INIT(); + + old_mcxt = MemoryContextSwitchTo(funccxt->multi_call_memory_ctx); + + usercxt = (show_partition_list_cxt *) palloc(sizeof(show_partition_list_cxt)); + + /* Open PATHMAN_CONFIG with latest snapshot available */ + usercxt->pathman_config = heap_open_compat(get_pathman_config_relid(false), + AccessShareLock); + usercxt->snapshot = RegisterSnapshot(GetLatestSnapshot()); +#if PG_VERSION_NUM >= 120000 + usercxt->pathman_config_scan = table_beginscan(usercxt->pathman_config, + usercxt->snapshot, 0, NULL); +#else + usercxt->pathman_config_scan = heap_beginscan(usercxt->pathman_config, + usercxt->snapshot, 0, NULL); +#endif + + usercxt->current_prel = NULL; + + /* Create tuple descriptor */ + tupdesc = CreateTemplateTupleDescCompat(Natts_pathman_partition_list, false); + + TupleDescInitEntry(tupdesc, Anum_pathman_pl_parent, + "parent", REGCLASSOID, -1, 0); + TupleDescInitEntry(tupdesc, Anum_pathman_pl_partition, + "partition", REGCLASSOID, -1, 0); + TupleDescInitEntry(tupdesc, Anum_pathman_pl_parttype, + "parttype", INT4OID, -1, 0); + TupleDescInitEntry(tupdesc, Anum_pathman_pl_partattr, + "expr", TEXTOID, -1, 0); + TupleDescInitEntry(tupdesc, Anum_pathman_pl_range_min, + "range_min", TEXTOID, -1, 0); + TupleDescInitEntry(tupdesc, Anum_pathman_pl_range_max, + "range_max", TEXTOID, -1, 0); + + funccxt->tuple_desc = BlessTupleDesc(tupdesc); + funccxt->user_fctx = (void *) usercxt; + + /* initialize tuple table context */ + tuptab_mcxt = AllocSetContextCreate(CurrentMemoryContext, + "tuptable for pathman_partition_list", + ALLOCSET_DEFAULT_SIZES); + MemoryContextSwitchTo(tuptab_mcxt); + + /* Initialize tuple table for partitions list, we use it as buffer */ + tuptable = (SPITupleTable *) palloc0(sizeof(SPITupleTable)); + usercxt->tuptable = tuptable; + tuptable->tuptabcxt = tuptab_mcxt; + + /* Set up initial allocations */ +#if PG_VERSION_NUM >= 130000 + tuptable->alloced = PART_RELS_SIZE * CHILD_FACTOR; + tuptable->numvals = 0; +#else + tuptable->alloced = tuptable->free = PART_RELS_SIZE * CHILD_FACTOR; +#endif + tuptable->vals = (HeapTuple *) palloc(tuptable->alloced * sizeof(HeapTuple)); - Oid min_bound_type = get_fn_expr_argtype(fcinfo->flinfo, 1), - max_bound_type = get_fn_expr_argtype(fcinfo->flinfo, 2); + MemoryContextSwitchTo(old_mcxt); - char *subst_str; /* substitution string */ - char *result; + /* Iterate through pathman cache */ + for (;;) + { + HeapTuple htup; + Datum values[Natts_pathman_partition_list]; + bool isnull[Natts_pathman_partition_list] = { 0 }; + PartRelationInfo *prel; + + /* Fetch next PartRelationInfo if needed */ + if (usercxt->current_prel == NULL) + { + HeapTuple pathman_config_htup; + Datum parent_table; + bool parent_table_isnull; + Oid parent_table_oid; + + pathman_config_htup = heap_getnext(usercxt->pathman_config_scan, + ForwardScanDirection); + if (!HeapTupleIsValid(pathman_config_htup)) + break; + + parent_table = heap_getattr(pathman_config_htup, + Anum_pathman_config_partrel, + RelationGetDescr(usercxt->pathman_config), + &parent_table_isnull); + + Assert(parent_table_isnull == false); + parent_table_oid = DatumGetObjectId(parent_table); + + usercxt->current_prel = get_pathman_relation_info(parent_table_oid); + if (usercxt->current_prel == NULL) + continue; + + usercxt->child_number = 0; + } + + /* Alias to 'usercxt->current_prel' */ + prel = usercxt->current_prel; + + /* If we've run out of partitions, switch to the next 'prel' */ + if (usercxt->child_number >= PrelChildrenCount(prel)) + { + /* Don't forget to close 'prel'! */ + close_pathman_relation_info(prel); + + usercxt->current_prel = NULL; + usercxt->child_number = 0; + + continue; + } + + /* Fill in common values */ + values[Anum_pathman_pl_parent - 1] = PrelParentRelid(prel); + values[Anum_pathman_pl_parttype - 1] = prel->parttype; + values[Anum_pathman_pl_partattr - 1] = CStringGetTextDatum(prel->expr_cstr); + + switch (prel->parttype) + { + case PT_HASH: + { + Oid *children = PrelGetChildrenArray(prel), + child_oid = children[usercxt->child_number]; + + values[Anum_pathman_pl_partition - 1] = child_oid; + isnull[Anum_pathman_pl_range_min - 1] = true; + isnull[Anum_pathman_pl_range_max - 1] = true; + } + break; + + case PT_RANGE: + { + RangeEntry *re; + + re = &PrelGetRangesArray(prel)[usercxt->child_number]; + + values[Anum_pathman_pl_partition - 1] = re->child_oid; + + /* Lower bound text */ + if (!IsInfinite(&re->min)) + { + Datum rmin = CStringGetTextDatum( + BoundToCString(&re->min, + prel->ev_type)); + + values[Anum_pathman_pl_range_min - 1] = rmin; + } + else isnull[Anum_pathman_pl_range_min - 1] = true; + + /* Upper bound text */ + if (!IsInfinite(&re->max)) + { + Datum rmax = CStringGetTextDatum( + BoundToCString(&re->max, + prel->ev_type)); + + values[Anum_pathman_pl_range_max - 1] = rmax; + } + else isnull[Anum_pathman_pl_range_max - 1] = true; + } + break; + + default: + WrongPartType(prel->parttype); + } + + /* Fill tuptable */ + old_mcxt = MemoryContextSwitchTo(tuptable->tuptabcxt); + + /* Form output tuple */ + htup = heap_form_tuple(funccxt->tuple_desc, values, isnull); + +#if PG_VERSION_NUM >= 130000 + if (tuptable->numvals == tuptable->alloced) +#else + if (tuptable->free == 0) +#endif + { + /* Double the size of the pointer array */ +#if PG_VERSION_NUM >= 130000 + tuptable->alloced += tuptable->alloced; +#else + tuptable->free = tuptable->alloced; + tuptable->alloced += tuptable->free; +#endif + + tuptable->vals = (HeapTuple *) + repalloc_huge(tuptable->vals, + tuptable->alloced * sizeof(HeapTuple)); + } + +#if PG_VERSION_NUM >= 130000 + /* Add tuple to table and increase 'numvals' */ + tuptable->vals[tuptable->numvals] = htup; + (tuptable->numvals)++; +#else + /* Add tuple to table and decrement 'free' */ + tuptable->vals[tuptable->alloced - tuptable->free] = htup; + (tuptable->free)--; +#endif + + MemoryContextSwitchTo(old_mcxt); + + /* Switch to the next child */ + usercxt->child_number++; + } - /* This is not going to trigger (not now, at least), just for the safety */ - if (min_bound_type != max_bound_type) - elog(ERROR, "Cannot build range condition: " - "boundaries should be of the same type"); + /* Clean resources */ +#if PG_VERSION_NUM >= 120000 + table_endscan(usercxt->pathman_config_scan); +#else + heap_endscan(usercxt->pathman_config_scan); +#endif + UnregisterSnapshot(usercxt->snapshot); + heap_close_compat(usercxt->pathman_config, AccessShareLock); - /* Check if we need single quotes */ - /* TODO: check for primitive types instead, that would be better */ - if (is_date_type_internal(min_bound_type) || - is_string_type_internal(min_bound_type)) - { - subst_str = "%1$s >= '%2$s' AND %1$s < '%3$s'"; + usercxt->child_number = 0; } - else - subst_str = "%1$s >= %2$s AND %1$s < %3$s"; - /* Create range condition CSTRING */ - result = psprintf(subst_str, - text_to_cstring(attname), - datum_to_cstring(min_bound, min_bound_type), - datum_to_cstring(max_bound, max_bound_type)); + funccxt = SRF_PERCALL_SETUP(); + usercxt = (show_partition_list_cxt *) funccxt->user_fctx; + tuptable = usercxt->tuptable; - PG_RETURN_TEXT_P(cstring_to_text(result)); + /* Iterate through used slots */ +#if PG_VERSION_NUM >= 130000 + if (usercxt->child_number < tuptable->numvals) +#else + if (usercxt->child_number < (tuptable->alloced - tuptable->free)) +#endif + { + HeapTuple htup = usercxt->tuptable->vals[usercxt->child_number++]; + + SRF_RETURN_NEXT(funccxt, HeapTupleGetDatum(htup)); + } + + SRF_RETURN_DONE(funccxt); } + +/* + * -------- + * Traits + * -------- + */ + +/* + * Check that relation exists. + * NOTE: we pass REGCLASS as text, hence the function's name. + */ Datum -build_check_constraint_name_attnum(PG_FUNCTION_ARGS) +validate_relname(PG_FUNCTION_ARGS) { - Oid relid = PG_GETARG_OID(0); - AttrNumber attnum = PG_GETARG_INT16(1); - const char *result; + Oid relid; - if (!check_relation_exists(relid)) - elog(ERROR, "Invalid relation %u", relid); + /* We don't accept NULL */ + if (PG_ARGISNULL(0)) + ereport(ERROR, (errcode(ERRCODE_INVALID_PARAMETER_VALUE), + errmsg("relation should not be NULL"), + errdetail("function " CppAsString(validate_relname) + " received NULL argument"))); - /* We explicitly do not support system attributes */ - if (attnum == InvalidAttrNumber || attnum < 0) - elog(ERROR, "Cannot build check constraint name: " - "invalid attribute number %i", attnum); + /* Fetch relation's Oid */ + relid = PG_GETARG_OID(0); - result = build_check_constraint_name_internal(relid, attnum); + if (!SearchSysCacheExists1(RELOID, ObjectIdGetDatum(relid))) + ereport(ERROR, (errcode(ERRCODE_INVALID_PARAMETER_VALUE), + errmsg("relation \"%u\" does not exist", relid), + errdetail("triggered in function " + CppAsString(validate_relname)))); - PG_RETURN_TEXT_P(cstring_to_text(quote_identifier(result))); + PG_RETURN_VOID(); } +/* + * Validate a partitioning expression. + * NOTE: We need this in range functions because + * we do many things before actual partitioning. + */ Datum -build_check_constraint_name_attname(PG_FUNCTION_ARGS) +validate_expression(PG_FUNCTION_ARGS) { - Oid relid = PG_GETARG_OID(0); - text *attname = PG_GETARG_TEXT_P(1); - AttrNumber attnum = get_attnum(relid, text_to_cstring(attname)); - const char *result; + Oid relid; + char *expression; - if (!check_relation_exists(relid)) - elog(ERROR, "Invalid relation %u", relid); + /* Fetch relation's Oid */ + if (!PG_ARGISNULL(0)) + { + relid = PG_GETARG_OID(0); + check_relation_oid(relid); + } + else ereport(ERROR, (errcode(ERRCODE_INVALID_PARAMETER_VALUE), + errmsg("'relid' should not be NULL"))); - if (attnum == InvalidAttrNumber) - elog(ERROR, "Relation \"%s\" has no column '%s'", - get_rel_name_or_relid(relid), text_to_cstring(attname)); + /* Protect relation from concurrent drop */ + LockRelationOid(relid, AccessShareLock); - result = build_check_constraint_name_internal(relid, attnum); + if (!SearchSysCacheExists1(RELOID, ObjectIdGetDatum(relid))) + ereport(ERROR, (errcode(ERRCODE_INVALID_PARAMETER_VALUE), + errmsg("relation \"%u\" does not exist", relid), + errdetail("triggered in function " + CppAsString(validate_expression)))); - PG_RETURN_TEXT_P(cstring_to_text(quote_identifier(result))); + if (!PG_ARGISNULL(1)) + { + expression = TextDatumGetCString(PG_GETARG_DATUM(1)); + } + else ereport(ERROR, (errcode(ERRCODE_INVALID_PARAMETER_VALUE), + errmsg("'expression' should not be NULL"))); + + /* Perform some checks */ + cook_partitioning_expression(relid, expression, NULL); + + UnlockRelationOid(relid, AccessShareLock); + + PG_RETURN_VOID(); } Datum -build_update_trigger_func_name(PG_FUNCTION_ARGS) +is_date_type(PG_FUNCTION_ARGS) { - Oid relid = PG_GETARG_OID(0), - nspid; - const char *result; + PG_RETURN_BOOL(is_date_type_internal(PG_GETARG_OID(0))); +} - /* Check that relation exists */ - if (!check_relation_exists(relid)) - elog(ERROR, "Invalid relation %u", relid); +/* + * Bail out with ERROR if rel1 tuple can't be converted to rel2 tuple. + */ +Datum +is_tuple_convertible(PG_FUNCTION_ARGS) +{ + Relation rel1, + rel2; +#if PG_VERSION_NUM >= 130000 + AttrMap *map; /* we don't actually need it */ +#else + void *map; /* we don't actually need it */ +#endif + + rel1 = heap_open_compat(PG_GETARG_OID(0), AccessShareLock); + rel2 = heap_open_compat(PG_GETARG_OID(1), AccessShareLock); + + /* Try to build a conversion map */ +#if PG_VERSION_NUM >= 160000 /* for commit ad86d159b6ab */ + map = build_attrmap_by_name(RelationGetDescr(rel1), + RelationGetDescr(rel2), false); +#elif PG_VERSION_NUM >= 130000 + map = build_attrmap_by_name(RelationGetDescr(rel1), + RelationGetDescr(rel2)); +#else + map = convert_tuples_by_name_map(RelationGetDescr(rel1), + RelationGetDescr(rel2), + ERR_PART_DESC_CONVERT); +#endif + + /* Now free map */ +#if PG_VERSION_NUM >= 130000 + free_attrmap(map); +#else + pfree(map); +#endif + + heap_close_compat(rel1, AccessShareLock); + heap_close_compat(rel2, AccessShareLock); + + /* still return true to avoid changing tests */ + PG_RETURN_BOOL(true); +} - nspid = get_rel_namespace(relid); - result = psprintf("%s.%s", - quote_identifier(get_namespace_name(nspid)), - quote_identifier(psprintf("%s_upd_trig_func", - get_rel_name(relid)))); - PG_RETURN_TEXT_P(cstring_to_text(result)); -} +/* + * ------------------------ + * Useful string builders + * ------------------------ + */ Datum -build_update_trigger_name(PG_FUNCTION_ARGS) +build_check_constraint_name(PG_FUNCTION_ARGS) { Oid relid = PG_GETARG_OID(0); - const char *result; /* trigger's name can't be qualified */ - - /* Check that relation exists */ - if (!check_relation_exists(relid)) - elog(ERROR, "Invalid relation %u", relid); + const char *result; - result = quote_identifier(psprintf("%s_upd_trig", get_rel_name(relid))); + if (!SearchSysCacheExists1(RELOID, ObjectIdGetDatum(relid))) + ereport(ERROR, (errcode(ERRCODE_INVALID_PARAMETER_VALUE), + errmsg("relation \"%u\" does not exist", relid))); - PG_RETURN_TEXT_P(cstring_to_text(result)); + result = build_check_constraint_name_relid_internal(relid); + PG_RETURN_TEXT_P(cstring_to_text(quote_identifier(result))); } +/* + * ------------------------ + * Cache & config updates + * ------------------------ + */ + /* * Try to add previously partitioned table to PATHMAN_CONFIG. */ @@ -585,119 +798,262 @@ Datum add_to_pathman_config(PG_FUNCTION_ARGS) { Oid relid; - text *attname; + char *expression; PartType parttype; + Oid *children; + uint32 children_count; + Relation pathman_config; Datum values[Natts_pathman_config]; bool isnull[Natts_pathman_config]; HeapTuple htup; - CatalogIndexState indstate; - PathmanInitState init_state; - MemoryContext old_mcxt = CurrentMemoryContext; + Oid expr_type; - if (PG_ARGISNULL(0)) - elog(ERROR, "parent_relid should not be null"); + volatile PathmanInitState init_state; - if (PG_ARGISNULL(1)) - elog(ERROR, "attname should not be null"); + if (!IsPathmanReady()) + elog(ERROR, "pg_pathman is disabled"); - /* Read parameters */ - relid = PG_GETARG_OID(0); - attname = PG_GETARG_TEXT_P(1); + if (!PG_ARGISNULL(0)) + { + relid = PG_GETARG_OID(0); + check_relation_oid(relid); + } + else ereport(ERROR, (errcode(ERRCODE_INVALID_PARAMETER_VALUE), + errmsg("'parent_relid' should not be NULL"))); + + /* Protect data + definition from concurrent modification */ + LockRelationOid(relid, AccessExclusiveLock); /* Check that relation exists */ - if (!check_relation_exists(relid)) - elog(ERROR, "Invalid relation %u", relid); + if (!SearchSysCacheExists1(RELOID, ObjectIdGetDatum(relid))) + ereport(ERROR, (errcode(ERRCODE_INVALID_PARAMETER_VALUE), + errmsg("relation \"%u\" does not exist", relid))); + + if (!PG_ARGISNULL(1)) + { + expression = TextDatumGetCString(PG_GETARG_DATUM(1)); + } + else ereport(ERROR, (errcode(ERRCODE_INVALID_PARAMETER_VALUE), + errmsg("'expression' should not be NULL"))); + + /* Check current user's privileges */ + if (!check_security_policy_internal(relid, GetUserId())) + { + ereport(ERROR, + (errcode(ERRCODE_INSUFFICIENT_PRIVILEGE), + errmsg("only the owner or superuser can change " + "partitioning configuration of table \"%s\"", + get_rel_name_or_relid(relid)))); + } - if (get_attnum(relid, text_to_cstring(attname)) == InvalidAttrNumber) - elog(ERROR, "Relation \"%s\" has no column '%s'", - get_rel_name_or_relid(relid), text_to_cstring(attname)); + /* Select partitioning type */ + switch (PG_NARGS()) + { + /* HASH */ + case 2: + { + parttype = PT_HASH; + + values[Anum_pathman_config_range_interval - 1] = (Datum) 0; + isnull[Anum_pathman_config_range_interval - 1] = true; + } + break; + + /* RANGE */ + case 3: + { + parttype = PT_RANGE; + + values[Anum_pathman_config_range_interval - 1] = PG_GETARG_DATUM(2); + isnull[Anum_pathman_config_range_interval - 1] = PG_ARGISNULL(2); + } + break; + + default: + elog(ERROR, "error in function " CppAsString(add_to_pathman_config)); + PG_RETURN_BOOL(false); /* keep compiler happy */ + } - /* Select partitioning type using 'range_interval' */ - parttype = PG_ARGISNULL(2) ? PT_HASH : PT_RANGE; + /* Parse and check expression */ + cook_partitioning_expression(relid, expression, &expr_type); + + /* Canonicalize user's expression (trim whitespaces etc) */ + expression = canonicalize_partitioning_expression(relid, expression); + + /* Check hash function for HASH partitioning */ + if (parttype == PT_HASH) + { + TypeCacheEntry *tce = lookup_type_cache(expr_type, TYPECACHE_HASH_PROC); + + if (!OidIsValid(tce->hash_proc)) + ereport(ERROR, + (errcode(ERRCODE_INVALID_PARAMETER_VALUE), + errmsg("no hash function for partitioning expression"))); + } /* * Initialize columns (partrel, attname, parttype, range_interval). */ - values[Anum_pathman_config_partrel - 1] = ObjectIdGetDatum(relid); - isnull[Anum_pathman_config_partrel - 1] = false; - - values[Anum_pathman_config_attname - 1] = PointerGetDatum(attname); - isnull[Anum_pathman_config_attname - 1] = false; + values[Anum_pathman_config_partrel - 1] = ObjectIdGetDatum(relid); + isnull[Anum_pathman_config_partrel - 1] = false; - values[Anum_pathman_config_parttype - 1] = Int32GetDatum(parttype); - isnull[Anum_pathman_config_parttype - 1] = false; + values[Anum_pathman_config_parttype - 1] = Int32GetDatum(parttype); + isnull[Anum_pathman_config_parttype - 1] = false; - values[Anum_pathman_config_range_interval - 1] = PG_GETARG_DATUM(2); - isnull[Anum_pathman_config_range_interval - 1] = PG_ARGISNULL(2); + values[Anum_pathman_config_expr - 1] = CStringGetTextDatum(expression); + isnull[Anum_pathman_config_expr - 1] = false; /* Insert new row into PATHMAN_CONFIG */ - pathman_config = heap_open(get_pathman_config_relid(), RowExclusiveLock); + pathman_config = heap_open_compat(get_pathman_config_relid(false), RowExclusiveLock); + htup = heap_form_tuple(RelationGetDescr(pathman_config), values, isnull); - simple_heap_insert(pathman_config, htup); - indstate = CatalogOpenIndexes(pathman_config); - CatalogIndexInsert(indstate, htup); - CatalogCloseIndexes(indstate); - heap_close(pathman_config, RowExclusiveLock); - - /* Now try to create a PartRelationInfo */ - PG_TRY(); + CatalogTupleInsert(pathman_config, htup); + + heap_close_compat(pathman_config, RowExclusiveLock); + + /* Make changes visible */ + CommandCounterIncrement(); + + /* Update caches only if this relation has children */ + if (FCS_FOUND == find_inheritance_children_array(relid, NoLock, true, + &children_count, + &children)) { - /* Some flags might change during refresh attempt */ - save_pathman_init_state(&init_state); + pfree(children); + + PG_TRY(); + { + /* Some flags might change during refresh attempt */ + save_pathman_init_state(&init_state); + + /* Now try to create a PartRelationInfo */ + has_pathman_relation_info(relid); + } + PG_CATCH(); + { + /* We have to restore changed flags */ + restore_pathman_init_state(&init_state); - refresh_pathman_relation_info(relid, parttype, text_to_cstring(attname)); + /* Rethrow ERROR */ + PG_RE_THROW(); + } + PG_END_TRY(); } - PG_CATCH(); + + /* Check if naming sequence exists */ + if (parttype == PT_RANGE) { - ErrorData *edata; + RangeVar *naming_seq_rv; + Oid naming_seq; - /* Switch to the original context & copy edata */ - MemoryContextSwitchTo(old_mcxt); - edata = CopyErrorData(); - FlushErrorState(); + naming_seq_rv = makeRangeVar(get_namespace_name(get_rel_namespace(relid)), + build_sequence_name_relid_internal(relid), + -1); - /* We have to restore all changed flags */ - restore_pathman_init_state(&init_state); + naming_seq = RangeVarGetRelid(naming_seq_rv, AccessShareLock, true); + if (OidIsValid(naming_seq)) + { + ObjectAddress parent, + sequence; - /* Show error message */ - elog(ERROR, "%s", edata->message); + ObjectAddressSet(parent, RelationRelationId, relid); + ObjectAddressSet(sequence, RelationRelationId, naming_seq); - FreeErrorData(edata); + /* Now this naming sequence is a "part" of partitioned relation */ + recordDependencyOn(&sequence, &parent, DEPENDENCY_NORMAL); + } } - PG_END_TRY(); + + CacheInvalidateRelcacheByRelid(relid); PG_RETURN_BOOL(true); } - /* - * Invalidate relcache for a specified relation. + * Invalidate relcache to refresh PartRelationInfo. */ Datum -invalidate_relcache(PG_FUNCTION_ARGS) +pathman_config_params_trigger_func(PG_FUNCTION_ARGS) { - Oid relid = PG_GETARG_OID(0); + TriggerData *trigdata = (TriggerData *) fcinfo->context; + Oid pathman_config_params; + Oid pathman_config; + Oid partrel; + Datum partrel_datum; + bool partrel_isnull; + + /* Fetch Oid of PATHMAN_CONFIG_PARAMS */ + pathman_config_params = get_pathman_config_params_relid(true); + pathman_config = get_pathman_config_relid(true); + + /* Handle "pg_pathman.enabled = f" case */ + if (!OidIsValid(pathman_config_params)) + goto pathman_config_params_trigger_func_return; + + /* Handle user calls */ + if (!CALLED_AS_TRIGGER(fcinfo)) + elog(ERROR, "this function should not be called directly"); + + /* Handle wrong fire mode */ + if (!TRIGGER_FIRED_FOR_ROW(trigdata->tg_event)) + elog(ERROR, "%s: must be fired for row", + trigdata->tg_trigger->tgname); + + /* Handle wrong relation */ + if (RelationGetRelid(trigdata->tg_relation) != pathman_config_params && + RelationGetRelid(trigdata->tg_relation) != pathman_config) + elog(ERROR, "%s: must be fired for relation \"%s\" or \"%s\"", + trigdata->tg_trigger->tgname, + get_rel_name(pathman_config_params), + get_rel_name(pathman_config)); - if (check_relation_exists(relid)) - CacheInvalidateRelcacheByRelid(relid); + /* + * Extract partitioned relation's Oid. + * Hacky: 1 is attrnum of relid for both pathman_config and pathman_config_params + */ + partrel_datum = heap_getattr(trigdata->tg_trigtuple, + Anum_pathman_config_params_partrel, + RelationGetDescr(trigdata->tg_relation), + &partrel_isnull); + Assert(partrel_isnull == false); /* partrel should not be NULL! */ + + partrel = DatumGetObjectId(partrel_datum); + + /* Finally trigger pg_pathman's cache invalidation event */ + if (SearchSysCacheExists1(RELOID, ObjectIdGetDatum(partrel))) + CacheInvalidateRelcacheByRelid(partrel); + +pathman_config_params_trigger_func_return: + /* Return the tuple we've been given */ + if (trigdata->tg_event & TRIGGER_EVENT_UPDATE) + PG_RETURN_POINTER(trigdata->tg_newtuple); + else + PG_RETURN_POINTER(trigdata->tg_trigtuple); - PG_RETURN_VOID(); } /* - * Acquire appropriate lock on a partitioned relation. + * -------------------------- + * Special locking routines + * -------------------------- + */ + +/* + * Prevent concurrent modifiction of partitioning schema. */ Datum -lock_partitioned_relation(PG_FUNCTION_ARGS) +prevent_part_modification(PG_FUNCTION_ARGS) { - Oid relid = PG_GETARG_OID(0); + Oid relid = PG_GETARG_OID(0); + + check_relation_oid(relid); /* Lock partitioned relation till transaction's end */ - xact_lock_partitioned_rel(relid, false); + LockRelationOid(relid, ShareUpdateExclusiveLock); PG_RETURN_VOID(); } @@ -706,9 +1062,11 @@ lock_partitioned_relation(PG_FUNCTION_ARGS) * Lock relation exclusively & check for current isolation level. */ Datum -prevent_relation_modification(PG_FUNCTION_ARGS) +prevent_data_modification(PG_FUNCTION_ARGS) { - Oid relid = PG_GETARG_OID(0); + Oid relid = PG_GETARG_OID(0); + + check_relation_oid(relid); /* * Check that isolation level is READ COMMITTED. @@ -720,24 +1078,168 @@ prevent_relation_modification(PG_FUNCTION_ARGS) (errmsg("Cannot perform blocking partitioning operation"), errdetail("Expected READ COMMITTED isolation level"))); - /* - * Check if table is being modified - * concurrently in a separate transaction. - */ - if (!xact_lock_rel_exclusive(relid, true)) - ereport(ERROR, - (errmsg("Cannot perform blocking partitioning operation"), - errdetail("Table \"%s\" is being modified concurrently", - get_rel_name_or_relid(relid)))); + LockRelationOid(relid, AccessExclusiveLock); PG_RETURN_VOID(); } /* - * NOTE: used for DEBUG, set breakpoint here. + * ------------------------------------------- + * User-defined partition creation callbacks + * ------------------------------------------- + */ + +/* + * Checks that callback function meets specific requirements. + * It must have the only JSONB argument and BOOL return type. */ Datum +validate_part_callback_pl(PG_FUNCTION_ARGS) +{ + PG_RETURN_BOOL(validate_part_callback(PG_GETARG_OID(0), + PG_GETARG_BOOL(1))); +} + +/* + * Builds JSONB object containing new partition parameters + * and invokes the callback. + */ +Datum +invoke_on_partition_created_callback(PG_FUNCTION_ARGS) +{ +#define ARG_PARENT 0 /* parent table */ +#define ARG_CHILD 1 /* partition */ +#define ARG_CALLBACK 2 /* callback to be invoked */ +#define ARG_RANGE_START 3 /* start_value */ +#define ARG_RANGE_END 4 /* end_value */ + + Oid parent_relid, + partition_relid; + + Oid callback_oid = InvalidOid; + init_callback_params callback_params; + + + /* NOTE: callback may be NULL */ + if (!PG_ARGISNULL(ARG_CALLBACK)) + { + callback_oid = PG_GETARG_OID(ARG_CALLBACK); + } + + /* If there's no callback function specified, we're done */ + if (callback_oid == InvalidOid) + PG_RETURN_VOID(); + + if (!PG_ARGISNULL(ARG_PARENT)) + { + parent_relid = PG_GETARG_OID(ARG_PARENT); + } + else ereport(ERROR, (errcode(ERRCODE_INVALID_PARAMETER_VALUE), + errmsg("'parent_relid' should not be NULL"))); + + if (!PG_ARGISNULL(ARG_CHILD)) + { + partition_relid = PG_GETARG_OID(ARG_CHILD); + } + else ereport(ERROR, (errcode(ERRCODE_INVALID_PARAMETER_VALUE), + errmsg("'partition_relid' should not be NULL"))); + + switch (PG_NARGS()) + { + case 3: + MakeInitCallbackHashParams(&callback_params, + callback_oid, + parent_relid, + partition_relid); + break; + + case 5: + { + Bound start, + end; + Oid value_type; + + /* Fetch start & end values for RANGE + their type */ + start = PG_ARGISNULL(ARG_RANGE_START) ? + MakeBoundInf(MINUS_INFINITY) : + MakeBound(PG_GETARG_DATUM(ARG_RANGE_START)); + + end = PG_ARGISNULL(ARG_RANGE_END) ? + MakeBoundInf(PLUS_INFINITY) : + MakeBound(PG_GETARG_DATUM(ARG_RANGE_END)); + + value_type = get_fn_expr_argtype(fcinfo->flinfo, ARG_RANGE_START); + + MakeInitCallbackRangeParams(&callback_params, + callback_oid, + parent_relid, + partition_relid, + start, + end, + value_type); + } + break; + + default: + elog(ERROR, "error in function " + CppAsString(invoke_on_partition_created_callback)); + } + + /* Now it's time to call it! */ + invoke_part_callback(&callback_params); + + PG_RETURN_VOID(); +} + +/* + * Function to be used for RLS rules on PATHMAN_CONFIG and + * PATHMAN_CONFIG_PARAMS tables. + * NOTE: check_security_policy_internal() is used under the hood. + */ +Datum +check_security_policy(PG_FUNCTION_ARGS) +{ + Oid relid = PG_GETARG_OID(0); + + if (!check_security_policy_internal(relid, GetUserId())) + { + elog(WARNING, "only the owner or superuser can change " + "partitioning configuration of table \"%s\"", + get_rel_name_or_relid(relid)); + + PG_RETURN_BOOL(false); + } + + /* Else return TRUE */ + PG_RETURN_BOOL(true); +} + +/* + * Check if type supports the specified operator ( + | - | etc ). + */ +Datum +is_operator_supported(PG_FUNCTION_ARGS) +{ + Oid opid, + typid = PG_GETARG_OID(0); + char *opname = TextDatumGetCString(PG_GETARG_DATUM(1)); + + opid = compatible_oper_opid(list_make1(makeString(opname)), + typid, typid, true); + + PG_RETURN_BOOL(OidIsValid(opid)); +} + + +/* + * ------- + * DEBUG + * ------- + */ + +/* NOTE: used for DEBUG, set breakpoint here */ +Datum debug_capture(PG_FUNCTION_ARGS) { static float8 sleep_time = 0; @@ -748,3 +1250,10 @@ debug_capture(PG_FUNCTION_ARGS) PG_RETURN_VOID(); } + +/* Return pg_pathman's shared library version */ +Datum +pathman_version(PG_FUNCTION_ARGS) +{ + PG_RETURN_CSTRING(CURRENT_LIB_VERSION); +} diff --git a/src/pl_hash_funcs.c b/src/pl_hash_funcs.c new file mode 100644 index 00000000..4b08c324 --- /dev/null +++ b/src/pl_hash_funcs.c @@ -0,0 +1,158 @@ +/* ------------------------------------------------------------------------ + * + * pl_hash_funcs.c + * Utility C functions for stored HASH procedures + * + * Copyright (c) 2016, Postgres Professional + * + * ------------------------------------------------------------------------ + */ + +#include "pathman.h" +#include "partition_creation.h" +#include "relation_info.h" +#include "utils.h" + +#include "utils/builtins.h" +#include "utils/typcache.h" +#include "utils/lsyscache.h" + + +/* Function declarations */ + +PG_FUNCTION_INFO_V1( create_hash_partitions_internal ); + +PG_FUNCTION_INFO_V1( get_hash_part_idx ); +PG_FUNCTION_INFO_V1( build_hash_condition ); + + +/* + * Create HASH partitions implementation (written in C). + */ +Datum +create_hash_partitions_internal(PG_FUNCTION_ARGS) +{ +/* Free allocated arrays */ +#define DeepFreeArray(arr, arr_len) \ + do { \ + int arr_elem; \ + if (!arr) break; \ + for (arr_elem = 0; arr_elem < arr_len; arr_elem++) \ + pfree(arr[arr_elem]); \ + pfree(arr); \ + } while (0) + + Oid parent_relid = PG_GETARG_OID(0); + uint32 partitions_count = PG_GETARG_INT32(2), + i; + + /* Partition names and tablespaces */ + char **partition_names = NULL, + **tablespaces = NULL; + int partition_names_size = 0, + tablespaces_size = 0; + RangeVar **rangevars = NULL; + + /* Check that there's no partitions yet */ + if (has_pathman_relation_info(parent_relid)) + ereport(ERROR, (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), + errmsg("cannot add new HASH partitions"))); + + /* Extract partition names */ + if (!PG_ARGISNULL(3)) + partition_names = deconstruct_text_array(PG_GETARG_DATUM(3), &partition_names_size); + + /* Extract partition tablespaces */ + if (!PG_ARGISNULL(4)) + tablespaces = deconstruct_text_array(PG_GETARG_DATUM(4), &tablespaces_size); + + /* Validate size of 'partition_names' */ + if (partition_names && partition_names_size != partitions_count) + ereport(ERROR, + (errcode(ERRCODE_INVALID_PARAMETER_VALUE), + errmsg("size of 'partition_names' must be equal to 'partitions_count'"))); + + /* Validate size of 'tablespaces' */ + if (tablespaces && tablespaces_size != partitions_count) + ereport(ERROR, + (errcode(ERRCODE_INVALID_PARAMETER_VALUE), + errmsg("size of 'tablespaces' must be equal to 'partitions_count'"))); + + /* Convert partition names into RangeVars */ + rangevars = qualified_relnames_to_rangevars(partition_names, partitions_count); + + /* Finally create HASH partitions */ + for (i = 0; i < partitions_count; i++) + { + RangeVar *partition_rv = rangevars ? rangevars[i] : NULL; + char *tablespace = tablespaces ? tablespaces[i] : NULL; + + /* Create a partition (copy FKs, invoke callbacks etc) */ + create_single_hash_partition_internal(parent_relid, i, partitions_count, + partition_rv, tablespace); + } + + /* Free arrays */ + DeepFreeArray(partition_names, partition_names_size); + DeepFreeArray(tablespaces, tablespaces_size); + DeepFreeArray(rangevars, partition_names_size); + + PG_RETURN_VOID(); +} + +/* + * Wrapper for hash_to_part_index(). + */ +Datum +get_hash_part_idx(PG_FUNCTION_ARGS) +{ + uint32 value = PG_GETARG_UINT32(0), + part_count = PG_GETARG_UINT32(1); + + PG_RETURN_UINT32(hash_to_part_index(value, part_count)); +} + +/* + * Build hash condition for a CHECK CONSTRAINT + */ +Datum +build_hash_condition(PG_FUNCTION_ARGS) +{ + Oid expr_type = PG_GETARG_OID(0); + char *expr_cstr = TextDatumGetCString(PG_GETARG_DATUM(1)); + uint32 part_count = PG_GETARG_UINT32(2), + part_idx = PG_GETARG_UINT32(3); + char *pathman_schema; + + TypeCacheEntry *tce; + + char *result; + + if (part_idx >= part_count) + ereport(ERROR, + (errcode(ERRCODE_INVALID_PARAMETER_VALUE), + errmsg("'partition_index' must be lower than 'partitions_count'"))); + + tce = lookup_type_cache(expr_type, TYPECACHE_HASH_PROC); + + /* Check that HASH function exists */ + if (!OidIsValid(tce->hash_proc)) + ereport(ERROR, + (errcode(ERRCODE_INVALID_PARAMETER_VALUE), + errmsg("no hash function for type %s", + format_type_be(expr_type)))); + + pathman_schema = get_namespace_name(get_pathman_schema()); + if (pathman_schema == NULL) + elog(ERROR, "pg_pathman schema not initialized"); + + /* Create hash condition CSTRING */ + result = psprintf("%s.get_hash_part_idx(%s(%s), %u) = %u", + pathman_schema, + get_func_name(tce->hash_proc), + expr_cstr, + part_count, + part_idx); + + PG_RETURN_TEXT_P(cstring_to_text(result)); +} diff --git a/src/pl_range_funcs.c b/src/pl_range_funcs.c new file mode 100644 index 00000000..19292a0a --- /dev/null +++ b/src/pl_range_funcs.c @@ -0,0 +1,1407 @@ +/* ------------------------------------------------------------------------ + * + * pl_range_funcs.c + * Utility C functions for stored RANGE procedures + * + * Copyright (c) 2016-2020, Postgres Professional + * + * ------------------------------------------------------------------------ + */ + +#include "init.h" +#include "pathman.h" +#include "partition_creation.h" +#include "relation_info.h" +#include "utils.h" +#include "xact_handling.h" + +#if PG_VERSION_NUM >= 120000 +#include "access/table.h" +#endif +#include "access/transam.h" +#include "access/xact.h" +#include "catalog/heap.h" +#include "catalog/namespace.h" +#include "catalog/pg_type.h" +#include "commands/tablecmds.h" +#include "executor/spi.h" +#include "nodes/nodeFuncs.h" +#include "parser/parse_relation.h" +#include "parser/parse_expr.h" +#include "utils/array.h" +#if PG_VERSION_NUM >= 120000 +#include "utils/float.h" +#endif +#include "utils/builtins.h" +#include "utils/lsyscache.h" +#include "utils/numeric.h" +#include "utils/ruleutils.h" +#include "utils/syscache.h" +#include "utils/snapmgr.h" + +#if PG_VERSION_NUM < 110000 +#include "catalog/pg_inherits_fn.h" +#endif + +#if PG_VERSION_NUM >= 100000 +#include "utils/regproc.h" +#include "utils/varlena.h" +#include +#endif + + +/* Function declarations */ + +PG_FUNCTION_INFO_V1( create_single_range_partition_pl ); +PG_FUNCTION_INFO_V1( create_range_partitions_internal ); +PG_FUNCTION_INFO_V1( check_range_available_pl ); +PG_FUNCTION_INFO_V1( generate_range_bounds_pl ); +PG_FUNCTION_INFO_V1( validate_interval_value ); +PG_FUNCTION_INFO_V1( split_range_partition ); +PG_FUNCTION_INFO_V1( merge_range_partitions ); +PG_FUNCTION_INFO_V1( drop_range_partition_expand_next ); + +PG_FUNCTION_INFO_V1( get_part_range_by_oid ); +PG_FUNCTION_INFO_V1( get_part_range_by_idx ); + +PG_FUNCTION_INFO_V1( build_range_condition ); +PG_FUNCTION_INFO_V1( build_sequence_name ); + + +static ArrayType *construct_bounds_array(Bound *elems, + int nelems, + Oid elmtype, + int elmlen, + bool elmbyval, + char elmalign); + +static char *deparse_constraint(Oid relid, Node *expr); + +static void modify_range_constraint(Oid partition_relid, + const char *expression, + Oid expression_type, + const Bound *lower, + const Bound *upper); + +static bool interval_is_trivial(Oid atttype, + Datum interval, + Oid interval_type); + + +/* + * ----------------------------- + * Partition creation & checks + * ----------------------------- + */ + +/* pl/PgSQL wrapper for the create_single_range_partition(). */ +Datum +create_single_range_partition_pl(PG_FUNCTION_ARGS) +{ + Oid parent_relid, + partition_relid; + + /* RANGE boundaries + value type */ + Bound start, + end; + Oid bounds_type; + + /* Optional: name & tablespace */ + RangeVar *partition_name_rv; + char *tablespace; + + Datum values[Natts_pathman_config]; + bool isnull[Natts_pathman_config]; + + + /* Handle 'parent_relid' */ + if (!PG_ARGISNULL(0)) + { + parent_relid = PG_GETARG_OID(0); + } + else ereport(ERROR, (errcode(ERRCODE_INVALID_PARAMETER_VALUE), + errmsg("'parent_relid' should not be NULL"))); + + /* Check that table is partitioned by RANGE */ + if (!pathman_config_contains_relation(parent_relid, values, isnull, NULL, NULL) || + DatumGetPartType(values[Anum_pathman_config_parttype - 1]) != PT_RANGE) + { + ereport(ERROR, (errcode(ERRCODE_INVALID_PARAMETER_VALUE), + errmsg("table \"%s\" is not partitioned by RANGE", + get_rel_name_or_relid(parent_relid)))); + } + + bounds_type = get_fn_expr_argtype(fcinfo->flinfo, 1); + + start = PG_ARGISNULL(1) ? + MakeBoundInf(MINUS_INFINITY) : + MakeBound(PG_GETARG_DATUM(1)); + + end = PG_ARGISNULL(2) ? + MakeBoundInf(PLUS_INFINITY) : + MakeBound(PG_GETARG_DATUM(2)); + + /* Fetch 'partition_name' */ + if (!PG_ARGISNULL(3)) + { + List *qualified_name; + text *partition_name; + + partition_name = PG_GETARG_TEXT_P(3); + qualified_name = textToQualifiedNameList(partition_name); + partition_name_rv = makeRangeVarFromNameList(qualified_name); + } + else partition_name_rv = NULL; /* default */ + + /* Fetch 'tablespace' */ + if (!PG_ARGISNULL(4)) + { + tablespace = TextDatumGetCString(PG_GETARG_DATUM(4)); + } + else tablespace = NULL; /* default */ + + /* Create a new RANGE partition and return its Oid */ + partition_relid = create_single_range_partition_internal(parent_relid, + &start, + &end, + bounds_type, + partition_name_rv, + tablespace); + + PG_RETURN_OID(partition_relid); +} + +Datum +create_range_partitions_internal(PG_FUNCTION_ARGS) +{ + Oid parent_relid; + int16 typlen; + bool typbyval; + char typalign; + FmgrInfo cmp_func; + + /* Partition names and tablespaces */ + char **partnames = NULL; + RangeVar **rangevars = NULL; + char **tablespaces = NULL; + int npartnames = 0; + int ntablespaces = 0; + + /* Bounds */ + ArrayType *bounds; + Oid bounds_type; + Datum *datums; + bool *nulls; + int ndatums; + int i; + + /* Extract parent's Oid */ + if (!PG_ARGISNULL(0)) + { + parent_relid = PG_GETARG_OID(0); + } + else ereport(ERROR, (errcode(ERRCODE_INVALID_PARAMETER_VALUE), + errmsg("'parent_relid' should not be NULL"))); + + /* Extract array of bounds */ + if (!PG_ARGISNULL(1)) + { + bounds = PG_GETARG_ARRAYTYPE_P(1); + bounds_type = ARR_ELEMTYPE(bounds); + } + else ereport(ERROR, (errcode(ERRCODE_INVALID_PARAMETER_VALUE), + errmsg("'bounds' should not be NULL"))); + + /* Extract partition names */ + if (!PG_ARGISNULL(2)) + { + partnames = deconstruct_text_array(PG_GETARG_DATUM(2), &npartnames); + rangevars = qualified_relnames_to_rangevars(partnames, npartnames); + } + + /* Extract partition tablespaces */ + if (!PG_ARGISNULL(3)) + tablespaces = deconstruct_text_array(PG_GETARG_DATUM(3), &ntablespaces); + + /* Extract bounds */ + get_typlenbyvalalign(bounds_type, &typlen, &typbyval, &typalign); + deconstruct_array(bounds, bounds_type, + typlen, typbyval, typalign, + &datums, &nulls, &ndatums); + + if (partnames && npartnames != ndatums - 1) + ereport(ERROR, (errcode(ERRCODE_INVALID_PARAMETER_VALUE), + errmsg("wrong length of 'partition_names' array"), + errdetail("number of 'partition_names' must be less than " + "'bounds' array length by one"))); + + if (tablespaces && ntablespaces != ndatums - 1) + ereport(ERROR, (errcode(ERRCODE_INVALID_PARAMETER_VALUE), + errmsg("wrong length of 'tablespaces' array"), + errdetail("number of 'tablespaces' must be less than " + "'bounds' array length by one"))); + + /* Check if bounds array is ascending */ + fill_type_cmp_fmgr_info(&cmp_func, + getBaseType(bounds_type), + getBaseType(bounds_type)); + + /* Validate bounds */ + for (i = 0; i < ndatums; i++) + { + /* Disregard 1st bound */ + if (i == 0) continue; + + /* Check that bound is valid */ + if (nulls[i]) + ereport(ERROR, (errcode(ERRCODE_INVALID_PARAMETER_VALUE), + errmsg("only first bound can be NULL"))); + + /* Check that bounds are ascending */ + if (!nulls[i - 1] && !check_le(&cmp_func, InvalidOid, + datums[i - 1], datums[i])) + ereport(ERROR, (errcode(ERRCODE_INVALID_PARAMETER_VALUE), + errmsg("'bounds' array must be ascending"))); + } + + /* Create partitions using provided bounds */ + for (i = 0; i < ndatums - 1; i++) + { + Bound start = nulls[i] ? + MakeBoundInf(MINUS_INFINITY) : + MakeBound(datums[i]), + + end = nulls[i + 1] ? + MakeBoundInf(PLUS_INFINITY) : + MakeBound(datums[i + 1]); + + RangeVar *name = rangevars ? rangevars[i] : NULL; + + char *tablespace = tablespaces ? tablespaces[i] : NULL; + + (void) create_single_range_partition_internal(parent_relid, + &start, + &end, + bounds_type, + name, + tablespace); + } + + /* Return number of partitions */ + PG_RETURN_INT32(ndatums - 1); +} + +/* Checks if range overlaps with existing partitions. */ +Datum +check_range_available_pl(PG_FUNCTION_ARGS) +{ + Oid parent_relid; + Bound start, + end; + Oid value_type = get_fn_expr_argtype(fcinfo->flinfo, 1); + + if (PG_ARGISNULL(0)) + ereport(ERROR, (errcode(ERRCODE_INVALID_PARAMETER_VALUE), + errmsg("'parent_relid' should not be NULL"))); + + parent_relid = PG_GETARG_OID(0); + + start = PG_ARGISNULL(1) ? + MakeBoundInf(MINUS_INFINITY) : + MakeBound(PG_GETARG_DATUM(1)); + + end = PG_ARGISNULL(2) ? + MakeBoundInf(PLUS_INFINITY) : + MakeBound(PG_GETARG_DATUM(2)); + + /* Raise ERROR if range overlaps with any partition */ + check_range_available(parent_relid, + &start, + &end, + value_type, + true); + + PG_RETURN_VOID(); +} + +/* Generate range bounds starting with 'value' using 'interval'. */ +Datum +generate_range_bounds_pl(PG_FUNCTION_ARGS) +{ + /* Input params */ + Datum value = PG_GETARG_DATUM(0); + Oid value_type = get_fn_expr_argtype(fcinfo->flinfo, 0); + Datum interval = PG_GETARG_DATUM(1); + Oid interval_type = get_fn_expr_argtype(fcinfo->flinfo, 1); + int count = PG_GETARG_INT32(2); + int i; + + /* Operator */ + Oid plus_op_func; + Datum plus_op_result; + Oid plus_op_result_type; + + /* Array */ + ArrayType *array; + int16 elemlen; + bool elembyval; + char elemalign; + Datum *datums; + + Assert(!PG_ARGISNULL(0)); + Assert(!PG_ARGISNULL(1)); + Assert(!PG_ARGISNULL(2)); + + if (count < 1) + ereport(ERROR, (errcode(ERRCODE_INVALID_PARAMETER_VALUE), + errmsg("'p_count' must be greater than zero"))); + + /* We must provide count+1 bounds */ + count += 1; + + /* Find suitable addition operator for given value and interval */ + extract_op_func_and_ret_type("+", value_type, interval_type, + &plus_op_func, + &plus_op_result_type); + + /* Fetch type's information for array */ + get_typlenbyvalalign(value_type, &elemlen, &elembyval, &elemalign); + + datums = palloc(sizeof(Datum) * count); + datums[0] = value; + + /* Calculate bounds */ + for (i = 1; i < count; i++) + { + /* Invoke addition operator and get a result */ + plus_op_result = OidFunctionCall2(plus_op_func, value, interval); + + /* Cast result to 'value_type' if needed */ + if (plus_op_result_type != value_type) + plus_op_result = perform_type_cast(plus_op_result, + plus_op_result_type, + value_type, NULL); + + /* Update 'value' and store current bound */ + value = datums[i] = plus_op_result; + } + + /* build an array based on calculated datums */ + array = construct_array(datums, count, value_type, + elemlen, elembyval, elemalign); + + pfree(datums); + + PG_RETURN_ARRAYTYPE_P(array); +} + +/* + * Takes text representation of interval value and checks + * if it corresponds to partitioning expression. + * NOTE: throws an ERROR if it fails to convert text to Datum. + */ +Datum +validate_interval_value(PG_FUNCTION_ARGS) +{ +#define ARG_PARTREL 0 +#define ARG_EXPRESSION 1 +#define ARG_PARTTYPE 2 +#define ARG_RANGE_INTERVAL 3 + + Oid partrel; + PartType parttype; + char *expr_cstr; + Oid expr_type; + + if (PG_ARGISNULL(ARG_PARTREL)) + { + ereport(ERROR, (errcode(ERRCODE_INVALID_PARAMETER_VALUE), + errmsg("'partrel' should not be NULL"))); + } + else partrel = PG_GETARG_OID(ARG_PARTREL); + + /* Check that relation exists */ + if (!SearchSysCacheExists1(RELOID, ObjectIdGetDatum(partrel))) + elog(ERROR, "relation \"%u\" does not exist", partrel); + + if (PG_ARGISNULL(ARG_EXPRESSION)) + { + ereport(ERROR, (errcode(ERRCODE_INVALID_PARAMETER_VALUE), + errmsg("'expression' should not be NULL"))); + } + else expr_cstr = TextDatumGetCString(PG_GETARG_DATUM(ARG_EXPRESSION)); + + if (PG_ARGISNULL(ARG_PARTTYPE)) + { + ereport(ERROR, (errcode(ERRCODE_INVALID_PARAMETER_VALUE), + errmsg("'parttype' should not be NULL"))); + } + else parttype = DatumGetPartType(PG_GETARG_DATUM(ARG_PARTTYPE)); + + /* + * Try to parse partitioning expression, could fail with ERROR. + */ + cook_partitioning_expression(partrel, expr_cstr, &expr_type); + + /* + * NULL interval is fine for both HASH and RANGE. + * But for RANGE we need to make some additional checks. + */ + if (!PG_ARGISNULL(ARG_RANGE_INTERVAL)) + { + Datum interval_text = PG_GETARG_DATUM(ARG_RANGE_INTERVAL), + interval_value; + Oid interval_type; + + if (parttype == PT_HASH) + ereport(ERROR, + (errcode(ERRCODE_INVALID_PARAMETER_VALUE), + errmsg("interval should be NULL for HASH partitioned table"))); + + /* Try converting textual representation */ + interval_value = extract_binary_interval_from_text(interval_text, + expr_type, + &interval_type); + + /* Check that interval isn't trivial */ + if (interval_is_trivial(expr_type, interval_value, interval_type)) + ereport(ERROR, + (errcode(ERRCODE_INVALID_PARAMETER_VALUE), + errmsg("interval should not be trivial"))); + } + + PG_RETURN_BOOL(true); +} + +Datum +split_range_partition(PG_FUNCTION_ARGS) +{ + Oid parent = InvalidOid, + partition1, + partition2; + RangeVar *part_name = NULL; + char *tablespace_name = NULL; + + Datum pivot_value; + Oid pivot_type; + + PartRelationInfo *prel; + Bound min_bound, + max_bound, + split_bound; + + Snapshot fresh_snapshot; + FmgrInfo finfo; + SPIPlanPtr plan; + char *query; + int i; + + if (!PG_ARGISNULL(0)) + { + partition1 = PG_GETARG_OID(0); + check_relation_oid(partition1); + } + else ereport(ERROR, (errcode(ERRCODE_INVALID_PARAMETER_VALUE), + errmsg("'partition1' should not be NULL"))); + + if (!PG_ARGISNULL(1)) + { + pivot_value = PG_GETARG_DATUM(1); + pivot_type = get_fn_expr_argtype(fcinfo->flinfo, 1); + } + else ereport(ERROR, (errcode(ERRCODE_INVALID_PARAMETER_VALUE), + errmsg("'split_value' should not be NULL"))); + + LockRelationOid(partition1, ExclusiveLock); + + /* Get parent of partition */ + parent = get_parent_of_partition(partition1); + if (!OidIsValid(parent)) + ereport(ERROR, (errcode(ERRCODE_INVALID_PARAMETER_VALUE), + errmsg("relation \"%s\" is not a partition", + get_rel_name_or_relid(partition1)))); + + /* This partition should not have children */ + if (has_pathman_relation_info(partition1)) + ereport(ERROR, (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), + errmsg("cannot split partition that has children"))); + + /* Prevent changes in partitioning scheme */ + LockRelationOid(parent, ShareUpdateExclusiveLock); + + /* Emit an error if it is not partitioned by RANGE */ + prel = get_pathman_relation_info(parent); + shout_if_prel_is_invalid(parent, prel, PT_RANGE); + + i = PrelHasPartition(prel, partition1) - 1; + Assert(i >= 0 && i < PrelChildrenCount(prel)); + + min_bound = PrelGetRangesArray(prel)[i].min; + max_bound = PrelGetRangesArray(prel)[i].max; + + split_bound = MakeBound(perform_type_cast(pivot_value, + getBaseType(pivot_type), + getBaseType(prel->ev_type), + NULL)); + + fmgr_info(prel->cmp_proc, &finfo); + + /* Validate pivot's value */ + if (cmp_bounds(&finfo, prel->ev_collid, &split_bound, &min_bound) <= 0 || + cmp_bounds(&finfo, prel->ev_collid, &split_bound, &max_bound) >= 0) + { + ereport(ERROR, (errcode(ERRCODE_INVALID_PARAMETER_VALUE), + errmsg("specified value does not fit into the range (%s, %s)", + BoundToCString(&min_bound, prel->ev_type), + BoundToCString(&max_bound, prel->ev_type)))); + } + + if (!PG_ARGISNULL(2)) + { + part_name = makeRangeVar(get_namespace_name(get_rel_namespace(parent)), + TextDatumGetCString(PG_GETARG_DATUM(2)), + 0); + } + + if (!PG_ARGISNULL(3)) + { + tablespace_name = TextDatumGetCString(PG_GETARG_DATUM(3)); + } + + /* Create a new partition */ + partition2 = create_single_range_partition_internal(parent, + &split_bound, + &max_bound, + prel->ev_type, + part_name, + tablespace_name); + + /* Make constraint visible */ + CommandCounterIncrement(); + + if (SPI_connect() != SPI_OK_CONNECT) + elog(ERROR, "could not connect using SPI"); + + /* + * Get latest snapshot to see data that might have been + * added to partitions before this transaction has started, + * but was committed a moment before we acquired the locks. + */ + fresh_snapshot = RegisterSnapshot(GetLatestSnapshot()); + + query = psprintf("WITH part_data AS ( " + "DELETE FROM %1$s WHERE (%3$s) >= $1 RETURNING " + "*) " + "INSERT INTO %2$s SELECT * FROM part_data", + get_qualified_rel_name(partition1), + get_qualified_rel_name(partition2), + prel->expr_cstr); + + plan = SPI_prepare(query, 1, &prel->ev_type); + + if (!plan) + elog(ERROR, "%s: SPI_prepare returned %d", + __FUNCTION__, SPI_result); + + SPI_execute_snapshot(plan, + &split_bound.value, NULL, + fresh_snapshot, + InvalidSnapshot, + false, true, 0); + + /* Free snapshot */ + UnregisterSnapshot(fresh_snapshot); + + SPI_finish(); + + /* Drop old constraint and create a new one */ + modify_range_constraint(partition1, + prel->expr_cstr, + prel->ev_type, + &min_bound, + &split_bound); + + /* Make constraint visible */ + CommandCounterIncrement(); + + /* Don't forget to close 'prel'! */ + close_pathman_relation_info(prel); + + PG_RETURN_OID(partition2); +} + +/* + * Merge multiple partitions. + * All data will be copied to the first one. + * The rest of partitions will be dropped. + */ +Datum +merge_range_partitions(PG_FUNCTION_ARGS) +{ + Oid parent = InvalidOid, + partition = InvalidOid; + ArrayType *arr = PG_GETARG_ARRAYTYPE_P(0); + + Oid *parts; + int nparts; + + Datum *datums; + bool *nulls; + int16 typlen; + bool typbyval; + char typalign; + + PartRelationInfo *prel; + Bound min_bound, + max_bound; + RangeEntry *bounds; + ObjectAddresses *objects = new_object_addresses(); + Snapshot fresh_snapshot; + FmgrInfo finfo; + int i; + + /* Validate array type */ + Assert(ARR_ELEMTYPE(arr) == REGCLASSOID); + + /* Extract Oids */ + get_typlenbyvalalign(REGCLASSOID, &typlen, &typbyval, &typalign); + deconstruct_array(arr, REGCLASSOID, + typlen, typbyval, typalign, + &datums, &nulls, &nparts); + + if (nparts < 2) + ereport(ERROR, (errmsg("cannot merge partitions"), + errdetail("there must be at least two partitions"))); + + /* Allocate arrays */ + parts = palloc(nparts * sizeof(Oid)); + bounds = palloc(nparts * sizeof(RangeEntry)); + + for (i = 0; i < nparts; i++) + { + Oid cur_parent; + + /* Extract partition Oids from array */ + parts[i] = DatumGetObjectId(datums[i]); + + /* Check if all partitions are from the same parent */ + cur_parent = get_parent_of_partition(parts[i]); + + /* If we couldn't find a parent, it's not a partition */ + if (!OidIsValid(cur_parent)) + ereport(ERROR, (errmsg("cannot merge partitions"), + errdetail("relation \"%s\" is not a partition", + get_rel_name_or_relid(parts[i])))); + + /* 'parent' is not initialized */ + if (parent == InvalidOid) + parent = cur_parent; /* save parent */ + + /* Oops, parent mismatch! */ + if (cur_parent != parent) + ereport(ERROR, (errmsg("cannot merge partitions"), + errdetail("all relations must share the same parent"))); + } + + /* Prevent changes in partitioning scheme */ + LockRelationOid(parent, ShareUpdateExclusiveLock); + + /* Prevent modification of partitions */ + for (i = 0; i < nparts; i++) + LockRelationOid(parts[i], AccessExclusiveLock); + + /* Emit an error if it is not partitioned by RANGE */ + prel = get_pathman_relation_info(parent); + shout_if_prel_is_invalid(parent, prel, PT_RANGE); + + /* Copy rentries from 'prel' */ + for (i = 0; i < nparts; i++) + { + uint32 idx = PrelHasPartition(prel, parts[i]); + Assert(idx > 0); + + bounds[i] = PrelGetRangesArray(prel)[idx - 1]; + } + + /* Sort rentries by increasing bound */ + qsort_range_entries(bounds, nparts, prel); + + fmgr_info(prel->cmp_proc, &finfo); + + /* Check that partitions are adjacent */ + for (i = 1; i < nparts; i++) + { + Bound cur_min = bounds[i].min, + prev_max = bounds[i - 1].max; + + if (cmp_bounds(&finfo, prel->ev_collid, &cur_min, &prev_max) != 0) + { + ereport(ERROR, (errcode(ERRCODE_INVALID_PARAMETER_VALUE), + errmsg("partitions \"%s\" and \"%s\" are not adjacent", + get_rel_name(bounds[i - 1].child_oid), + get_rel_name(bounds[i].child_oid)))); + } + } + + /* First determine the bounds of a new constraint */ + min_bound = bounds[0].min; + max_bound = bounds[nparts - 1].max; + partition = parts[0]; + + /* Drop old constraint and create a new one */ + modify_range_constraint(partition, + prel->expr_cstr, + prel->ev_type, + &min_bound, + &max_bound); + + /* Make constraint visible */ + CommandCounterIncrement(); + + if (SPI_connect() != SPI_OK_CONNECT) + elog(ERROR, "could not connect using SPI"); + + /* + * Get latest snapshot to see data that might have been + * added to partitions before this transaction has started, + * but was committed a moment before we acquired the locks. + */ + fresh_snapshot = RegisterSnapshot(GetLatestSnapshot()); + + /* Migrate the data from all partition to the first one */ + for (i = 1; i < nparts; i++) + { + ObjectAddress object; + + char *query = psprintf("WITH part_data AS ( " + "DELETE FROM %1$s RETURNING " + "*) " + "INSERT INTO %2$s SELECT * FROM part_data", + get_qualified_rel_name(parts[i]), + get_qualified_rel_name(parts[0])); + + SPIPlanPtr plan = SPI_prepare(query, 0, NULL); + + if (!plan) + elog(ERROR, "%s: SPI_prepare returned %d", + __FUNCTION__, SPI_result); + + SPI_execute_snapshot(plan, NULL, NULL, + fresh_snapshot, + InvalidSnapshot, + false, true, 0); + + pfree(query); + + /* To be deleted */ + ObjectAddressSet(object, RelationRelationId, parts[i]); + add_exact_object_address(&object, objects); + } + + /* Free snapshot */ + UnregisterSnapshot(fresh_snapshot); + + SPI_finish(); + + /* Drop obsolete partitions */ + performMultipleDeletions(objects, DROP_CASCADE, 0); + free_object_addresses(objects); + + pfree(bounds); + pfree(parts); + + /* Don't forget to close 'prel'! */ + close_pathman_relation_info(prel); + + PG_RETURN_OID(partition); +} + +/* + * Drops partition and expands the next partition + * so that it could cover the dropped one. + * + * This function was written in order to support + * Oracle-like ALTER TABLE ... DROP PARTITION. + * + * In Oracle partitions only have upper bound and when partition + * is dropped the next one automatically covers freed range. + */ +Datum +drop_range_partition_expand_next(PG_FUNCTION_ARGS) +{ + Oid partition = PG_GETARG_OID(0), + parent; + PartRelationInfo *prel; + ObjectAddress object; + RangeEntry *ranges; + int i; + + check_relation_oid(partition); + + /* Lock the partition we're going to drop */ + LockRelationOid(partition, AccessExclusiveLock); + + /* Get parent's relid */ + parent = get_parent_of_partition(partition); + if (!OidIsValid(parent)) + ereport(ERROR, (errcode(ERRCODE_INVALID_PARAMETER_VALUE), + errmsg("relation \"%s\" is not a partition", + get_rel_name_or_relid(partition)))); + + /* Prevent changes in partitioning scheme */ + LockRelationOid(parent, ShareUpdateExclusiveLock); + + /* Emit an error if it is not partitioned by RANGE */ + prel = get_pathman_relation_info(parent); + shout_if_prel_is_invalid(parent, prel, PT_RANGE); + + /* Fetch ranges array */ + ranges = PrelGetRangesArray(prel); + + /* Looking for partition in child relations */ + i = PrelHasPartition(prel, partition) - 1; + Assert(i >= 0 && i < PrelChildrenCount(prel)); + + /* Expand next partition if it exists */ + if (i < PrelLastChild(prel)) + { + RangeEntry *cur = &ranges[i], + *next = &ranges[i + 1]; + Oid next_partition = next->child_oid; + LOCKMODE lockmode = AccessExclusiveLock; + + /* Lock next partition */ + LockRelationOid(next_partition, lockmode); + + /* Does next partition exist? */ + if (SearchSysCacheExists1(RELOID, ObjectIdGetDatum(next_partition))) + { + /* Stretch next partition to cover range */ + modify_range_constraint(next_partition, + prel->expr_cstr, + prel->ev_type, + &cur->min, + &next->max); + } + /* Bad luck, unlock missing partition */ + else UnlockRelationOid(next_partition, lockmode); + } + + /* Drop partition */ + ObjectAddressSet(object, RelationRelationId, partition); + performDeletion(&object, DROP_CASCADE, 0); + + /* Don't forget to close 'prel'! */ + close_pathman_relation_info(prel); + + PG_RETURN_VOID(); +} + + +/* + * ------------------------ + * Various useful getters + * ------------------------ + */ + +/* + * Returns range entry (min, max) (in form of array). + * + * arg #1 is the parent's Oid. + * arg #2 is the partition's Oid. + */ +Datum +get_part_range_by_oid(PG_FUNCTION_ARGS) +{ + Oid partition_relid, + parent_relid; + Oid arg_type; + RangeEntry *ranges; + PartRelationInfo *prel; + uint32 idx; + + if (!PG_ARGISNULL(0)) + { + partition_relid = PG_GETARG_OID(0); + } + else ereport(ERROR, (errcode(ERRCODE_INVALID_PARAMETER_VALUE), + errmsg("'partition_relid' should not be NULL"))); + + parent_relid = get_parent_of_partition(partition_relid); + if (!OidIsValid(parent_relid)) + ereport(ERROR, (errcode(ERRCODE_INVALID_PARAMETER_VALUE), + errmsg("relation \"%s\" is not a partition", + get_rel_name_or_relid(partition_relid)))); + + /* Emit an error if it is not partitioned by RANGE */ + prel = get_pathman_relation_info(parent_relid); + shout_if_prel_is_invalid(parent_relid, prel, PT_RANGE); + + /* Check type of 'dummy' (for correct output) */ + arg_type = get_fn_expr_argtype(fcinfo->flinfo, 1); + if (getBaseType(arg_type) != getBaseType(prel->ev_type)) + ereport(ERROR, (errcode(ERRCODE_INVALID_PARAMETER_VALUE), + errmsg("pg_typeof(dummy) should be %s", + format_type_be(getBaseType(prel->ev_type))))); + + ranges = PrelGetRangesArray(prel); + + /* Look for the specified partition */ + if ((idx = PrelHasPartition(prel, partition_relid)) > 0) + { + ArrayType *arr; + Bound elems[2]; + + elems[0] = ranges[idx - 1].min; + elems[1] = ranges[idx - 1].max; + + arr = construct_bounds_array(elems, 2, + prel->ev_type, + prel->ev_len, + prel->ev_byval, + prel->ev_align); + + /* Don't forget to close 'prel'! */ + close_pathman_relation_info(prel); + + PG_RETURN_ARRAYTYPE_P(arr); + } + + /* No partition found, report error */ + ereport(ERROR, (errcode(ERRCODE_INVALID_PARAMETER_VALUE), + errmsg("relation \"%s\" has no partition \"%s\"", + get_rel_name_or_relid(parent_relid), + get_rel_name_or_relid(partition_relid)))); + + PG_RETURN_NULL(); /* keep compiler happy */ +} + +/* + * Returns N-th range entry (min, max) (in form of array). + * + * arg #1 is the parent's Oid. + * arg #2 is the index of the range + * (if it is negative then the last range will be returned). + */ +Datum +get_part_range_by_idx(PG_FUNCTION_ARGS) +{ + Oid parent_relid; + int partition_idx = 0; + Oid arg_type; + Bound elems[2]; + RangeEntry *ranges; + PartRelationInfo *prel; + ArrayType *arr; + + if (!PG_ARGISNULL(0)) + { + parent_relid = PG_GETARG_OID(0); + } + else ereport(ERROR, (errcode(ERRCODE_INVALID_PARAMETER_VALUE), + errmsg("'parent_relid' should not be NULL"))); + + if (!PG_ARGISNULL(1)) + { + partition_idx = PG_GETARG_INT32(1); + } + else ereport(ERROR, (errcode(ERRCODE_INVALID_PARAMETER_VALUE), + errmsg("'partition_idx' should not be NULL"))); + + /* Emit an error if it is not partitioned by RANGE */ + prel = get_pathman_relation_info(parent_relid); + shout_if_prel_is_invalid(parent_relid, prel, PT_RANGE); + + /* Check type of 'dummy' (for correct output) */ + arg_type = get_fn_expr_argtype(fcinfo->flinfo, 2); + if (getBaseType(arg_type) != getBaseType(prel->ev_type)) + ereport(ERROR, (errcode(ERRCODE_INVALID_PARAMETER_VALUE), + errmsg("pg_typeof(dummy) should be %s", + format_type_be(getBaseType(prel->ev_type))))); + + + /* Now we have to deal with 'idx' */ + if (partition_idx < -1) + { + ereport(ERROR, (errcode(ERRCODE_INVALID_PARAMETER_VALUE), + errmsg("negative indices other than -1" + " (last partition) are not allowed"))); + } + else if (partition_idx == -1) + { + partition_idx = PrelLastChild(prel); + } + else if (((uint32) abs(partition_idx)) >= PrelChildrenCount(prel)) + { + ereport(ERROR, (errcode(ERRCODE_INVALID_PARAMETER_VALUE), + errmsg("partition #%d does not exist (total amount is %u)", + partition_idx, PrelChildrenCount(prel)))); + } + + ranges = PrelGetRangesArray(prel); + + /* Build args for construct_infinitable_array() */ + elems[0] = ranges[partition_idx].min; + elems[1] = ranges[partition_idx].max; + + arr = construct_bounds_array(elems, 2, + prel->ev_type, + prel->ev_len, + prel->ev_byval, + prel->ev_align); + + /* Don't forget to close 'prel'! */ + close_pathman_relation_info(prel); + + PG_RETURN_ARRAYTYPE_P(arr); +} + + +/* + * ------------------------ + * Useful string builders + * ------------------------ + */ + +/* Build range condition for a CHECK CONSTRAINT. */ +Datum +build_range_condition(PG_FUNCTION_ARGS) +{ + Oid partition_relid; + char *expression; + Node *expr; + + Bound min, + max; + Oid bounds_type = get_fn_expr_argtype(fcinfo->flinfo, 2); + Constraint *con; + char *result; + + if (!PG_ARGISNULL(0)) + { + partition_relid = PG_GETARG_OID(0); + } + else ereport(ERROR, (errcode(ERRCODE_INVALID_PARAMETER_VALUE), + errmsg("'partition_relid' should not be NULL"))); + if (partition_relid < FirstNormalObjectId) + ereport(ERROR, (errcode(ERRCODE_INVALID_PARAMETER_VALUE), + errmsg("'partition_relid' must be normal object oid"))); + + if (!PG_ARGISNULL(1)) + { + expression = TextDatumGetCString(PG_GETARG_DATUM(1)); + } + else ereport(ERROR, (errcode(ERRCODE_INVALID_PARAMETER_VALUE), + errmsg("'expression' should not be NULL")));; + + /* lock the partition */ + LockRelationOid(partition_relid, ShareUpdateExclusiveLock); + min = PG_ARGISNULL(2) ? + MakeBoundInf(MINUS_INFINITY) : + MakeBound(PG_GETARG_DATUM(2)); + + max = PG_ARGISNULL(3) ? + MakeBoundInf(PLUS_INFINITY) : + MakeBound(PG_GETARG_DATUM(3)); + + expr = parse_partitioning_expression(partition_relid, expression, NULL, NULL); + con = build_range_check_constraint(partition_relid, + expr, + &min, &max, + bounds_type); + + result = deparse_constraint(partition_relid, con->raw_expr); + + PG_RETURN_TEXT_P(cstring_to_text(result)); +} + +/* Build name for sequence for auto partition naming */ +Datum +build_sequence_name(PG_FUNCTION_ARGS) +{ + Oid parent_relid = PG_GETARG_OID(0); + Oid parent_nsp; + char *seq_name; + char *result; + + if (!SearchSysCacheExists1(RELOID, ObjectIdGetDatum(parent_relid))) + ereport(ERROR, (errmsg("relation \"%u\" does not exist", parent_relid))); + + parent_nsp = get_rel_namespace(parent_relid); + seq_name = build_sequence_name_relid_internal(parent_relid); + + result = psprintf("%s.%s", + quote_identifier(get_namespace_name(parent_nsp)), + quote_identifier(seq_name)); + + PG_RETURN_TEXT_P(cstring_to_text(result)); +} + + +/* + * ------------------ + * Helper functions + * ------------------ + */ + +/* + * Check if interval is insignificant to avoid infinite loops while adding + * new partitions + * + * The main idea behind this function is to add specified interval to some + * default value (zero for numeric types and current date/timestamp for datetime + * types) and look if it is changed. If it is then return true. + */ +static bool +interval_is_trivial(Oid atttype, Datum interval, Oid interval_type) +{ + Oid plus_op_func; + Datum plus_op_result; + Oid plus_op_result_type; + + Datum default_value; + + FmgrInfo cmp_func; + int32 cmp_result; + + /* + * Generate default value. + * + * For float4 and float8 values we also check that they aren't NaN or INF. + */ + switch(atttype) + { + case INT2OID: + default_value = Int16GetDatum(0); + break; + + case INT4OID: + default_value = Int32GetDatum(0); + break; + + /* Take care of 32-bit platforms */ + case INT8OID: + default_value = Int64GetDatum(0); + break; + + case FLOAT4OID: + { + float4 f = DatumGetFloat4(interval); + + if (isnan(f) || is_infinite(f)) + ereport(ERROR, (errcode(ERRCODE_INVALID_PARAMETER_VALUE), + errmsg("invalid floating point interval"))); + default_value = Float4GetDatum(0); + } + break; + + case FLOAT8OID: + { + float8 f = DatumGetFloat8(interval); + + if (isnan(f) || is_infinite(f)) + ereport(ERROR, (errcode(ERRCODE_INVALID_PARAMETER_VALUE), + errmsg("invalid floating point interval"))); + default_value = Float8GetDatum(0); + } + break; + + case NUMERICOID: + { + Numeric ni = DatumGetNumeric(interval), + numeric; + + /* Test for NaN */ + if (numeric_is_nan(ni)) + ereport(ERROR, (errcode(ERRCODE_INVALID_PARAMETER_VALUE), + errmsg("invalid numeric interval"))); + + /* Building default value */ + numeric = DatumGetNumeric( + DirectFunctionCall3(numeric_in, + CStringGetDatum("0"), + ObjectIdGetDatum(InvalidOid), + Int32GetDatum(-1))); + default_value = NumericGetDatum(numeric); + } + break; + + case TIMESTAMPOID: + case TIMESTAMPTZOID: + default_value = TimestampGetDatum(GetCurrentTimestamp()); + break; + + case DATEOID: + { + Datum ts = TimestampGetDatum(GetCurrentTimestamp()); + + default_value = perform_type_cast(ts, TIMESTAMPTZOID, DATEOID, NULL); + } + break; + + default: + return false; + } + + /* Find suitable addition operator for default value and interval */ + extract_op_func_and_ret_type("+", atttype, interval_type, + &plus_op_func, + &plus_op_result_type); + + /* Invoke addition operator and get a result */ + plus_op_result = OidFunctionCall2(plus_op_func, default_value, interval); + + /* + * If operator result type isn't the same as original value then + * convert it. We need this to make sure that specified interval would + * change the _original_ value somehow. For example, if we add one second + * to a date then we'll get a timestamp which is one second later than + * original date (obviously). But when we convert it back to a date we will + * get the same original value meaning that one second interval wouldn't + * change original value anyhow. We should consider such interval as trivial + */ + if (plus_op_result_type != atttype) + { + plus_op_result = perform_type_cast(plus_op_result, + plus_op_result_type, + atttype, NULL); + plus_op_result_type = atttype; + } + + /* + * Compare it to the default_value. + * + * If they are the same then obviously interval is trivial. + */ + fill_type_cmp_fmgr_info(&cmp_func, + getBaseType(atttype), + getBaseType(plus_op_result_type)); + + cmp_result = DatumGetInt32(FunctionCall2(&cmp_func, + default_value, + plus_op_result)); + if (cmp_result == 0) + return true; + + else if (cmp_result > 0) /* Negative interval? */ + ereport(ERROR, (errcode(ERRCODE_INVALID_PARAMETER_VALUE), + errmsg("interval should not be negative"))); + + /* Everything is OK */ + return false; +} + +/* + * Drop old partition constraint and create + * a new one with specified boundaries + */ +static void +modify_range_constraint(Oid partition_relid, + const char *expression, + Oid expression_type, + const Bound *lower, + const Bound *upper) +{ + Node *expr; + Constraint *constraint; + + /* Drop old constraint */ + drop_pathman_check_constraint(partition_relid); + + /* Parse expression */ + expr = parse_partitioning_expression(partition_relid, expression, NULL, NULL); + + /* Build a new one */ + constraint = build_range_check_constraint(partition_relid, + expr, + lower, + upper, + expression_type); + + /* Add new constraint */ + add_pathman_check_constraint(partition_relid, constraint); +} + +/* + * Transform constraint into cstring + * + * In >=13 (5815696bc66) result type of addRangeTableEntryForRelationCompat() was changed + */ +static char * +deparse_constraint(Oid relid, Node *expr) +{ + Relation rel; +#if PG_VERSION_NUM >= 130000 + ParseNamespaceItem *nsitem; +#else + RangeTblEntry *rte; +#endif + Node *cooked_expr; + ParseState *pstate; + List *context; + char *result; + + context = deparse_context_for(get_rel_name(relid), relid); + + rel = heap_open_compat(relid, NoLock); + + /* Initialize parse state */ + pstate = make_parsestate(NULL); +#if PG_VERSION_NUM >= 130000 + nsitem = addRangeTableEntryForRelationCompat(pstate, rel, AccessShareLock, NULL, false, true); + addNSItemToQuery(pstate, nsitem, true, true, true); +#else + rte = addRangeTableEntryForRelationCompat(pstate, rel, AccessShareLock, NULL, false, true); + addRTEtoQuery(pstate, rte, true, true, true); +#endif + + /* Transform constraint into executable expression (i.e. cook it) */ + cooked_expr = transformExpr(pstate, expr, EXPR_KIND_CHECK_CONSTRAINT); + + /* Transform expression into string */ + result = deparse_expression(cooked_expr, context, false, false); + + heap_close_compat(rel, NoLock); + + return result; +} + +/* + * Build an 1d array of Bound elements. + * + * The main difference from construct_array() is that + * it will substitute infinite values with NULLs. + */ +static ArrayType * +construct_bounds_array(Bound *elems, + int nelems, + Oid elemtype, + int elemlen, + bool elembyval, + char elemalign) +{ + ArrayType *arr; + Datum *datums; + bool *nulls; + int dims[1] = { nelems }; + int lbs[1] = { 1 }; + int i; + + datums = palloc(sizeof(Datum) * nelems); + nulls = palloc(sizeof(bool) * nelems); + + for (i = 0; i < nelems; i++) + { + datums[i] = IsInfinite(&elems[i]) ? + (Datum) 0 : + BoundGetValue(&elems[i]); + nulls[i] = IsInfinite(&elems[i]); + } + + arr = construct_md_array(datums, nulls, 1, + dims, lbs, + elemtype, elemlen, + elembyval, elemalign); + + return arr; +} diff --git a/src/planner_tree_modification.c b/src/planner_tree_modification.c new file mode 100644 index 00000000..5b6a7982 --- /dev/null +++ b/src/planner_tree_modification.c @@ -0,0 +1,1211 @@ +/* ------------------------------------------------------------------------ + * + * planner_tree_modification.c + * Functions for query- and plan- tree modification + * + * Copyright (c) 2016-2020, Postgres Professional + * Portions Copyright (c) 1996-2016, PostgreSQL Global Development Group + * Portions Copyright (c) 1994, Regents of the University of California + * + * ------------------------------------------------------------------------ + */ + +#include "compat/rowmarks_fix.h" + +#include "declarative.h" +#include "partition_filter.h" +#include "partition_router.h" +#include "partition_overseer.h" +#include "planner_tree_modification.h" +#include "relation_info.h" +#include "rewrite/rewriteManip.h" + +#if PG_VERSION_NUM >= 120000 +#include "access/table.h" +#endif +#include "access/htup_details.h" +#include "foreign/fdwapi.h" +#include "miscadmin.h" +#include "optimizer/clauses.h" +#if PG_VERSION_NUM >= 160000 /* for commit a61b1f74823c */ +#include "parser/parse_relation.h" +#endif +#include "storage/lmgr.h" +#include "utils/syscache.h" + + +/* + * Drop conflicting macros for the sake of TRANSFORM_CONTEXT_FIELD(...). + * For instance, Windows.h contains a nasty "#define DELETE". + */ +#ifdef SELECT +#undef SELECT +#endif + +#ifdef INSERT +#undef INSERT +#endif + +#ifdef UPDATE +#undef UPDATE +#endif + +#ifdef DELETE +#undef DELETE +#endif + + +/* for assign_rel_parenthood_status() */ +#define PARENTHOOD_TAG CppAsString(PARENTHOOD) + +/* Build transform_query_cxt field name */ +#define TRANSFORM_CONTEXT_FIELD(command_type) \ + has_parent_##command_type##_query + +/* Check that transform_query_cxt field is TRUE */ +#define TRANSFORM_CONTEXT_HAS_PARENT(context, command_type) \ + ( (context)->TRANSFORM_CONTEXT_FIELD(command_type) ) + +/* Used in switch(CmdType) statements */ +#define TRANSFORM_CONTEXT_SWITCH_SET(context, command_type) \ + case CMD_##command_type: \ + (context)->TRANSFORM_CONTEXT_FIELD(command_type) = true; \ + break; \ + +#define TRANSFORM_CONTEXT_QUERY_IS_CTE_CTE(context, query) \ + ( (context)->parent_cte && \ + (context)->parent_cte->ctequery == (Node *) (query) ) + +#define TRANSFORM_CONTEXT_QUERY_IS_CTE_SL(context, query) \ + ( (context)->parent_sublink && \ + (context)->parent_sublink->subselect == (Node *) (query) && \ + (context)->parent_sublink->subLinkType == CTE_SUBLINK ) + +/* Check if 'query' is CTE according to 'context' */ +#define TRANSFORM_CONTEXT_QUERY_IS_CTE(context, query) \ + ( TRANSFORM_CONTEXT_QUERY_IS_CTE_CTE((context), (query)) || \ + TRANSFORM_CONTEXT_QUERY_IS_CTE_SL ((context), (query)) ) + +typedef struct +{ + /* Do we have a parent CmdType query? */ + bool TRANSFORM_CONTEXT_FIELD(SELECT), + TRANSFORM_CONTEXT_FIELD(INSERT), + TRANSFORM_CONTEXT_FIELD(UPDATE), + TRANSFORM_CONTEXT_FIELD(DELETE); + + /* Parameters for handle_modification_query() */ + ParamListInfo query_params; + + /* SubLink that might contain an examined query */ + SubLink *parent_sublink; + + /* CommonTableExpr that might contain an examined query */ + CommonTableExpr *parent_cte; +} transform_query_cxt; + +typedef struct +{ + Index child_varno; + Oid parent_relid, + parent_reltype, + child_reltype; + List *translated_vars; +} adjust_appendrel_varnos_cxt; + +static bool pathman_transform_query_walker(Node *node, void *context); +static bool pathman_post_analyze_query_walker(Node *node, void *context); + +static void disable_standard_inheritance(Query *parse, transform_query_cxt *context); +static void handle_modification_query(Query *parse, transform_query_cxt *context); + +static Plan *partition_filter_visitor(Plan *plan, void *context); +static Plan *partition_router_visitor(Plan *plan, void *context); + +static void state_visit_subplans(List *plans, void (*visitor) (PlanState *plan, void *context), void *context); +static void state_visit_members(PlanState **planstates, int nplans, void (*visitor) (PlanState *plan, void *context), void *context); + +static Oid find_deepest_partition(Oid relid, Index rti, Expr *quals); +static Node *eval_extern_params_mutator(Node *node, ParamListInfo params); +static Node *adjust_appendrel_varnos(Node *node, adjust_appendrel_varnos_cxt *context); +static bool inh_translation_list_is_trivial(List *translated_vars); +static bool modifytable_contains_fdw(List *rtable, ModifyTable *node); + + +/* + * HACK: We have to mark each Query with a unique + * id in order to recognize them properly. + */ +#define QUERY_ID_INITIAL 0 +static uint64 latest_query_id = QUERY_ID_INITIAL; + + +void +assign_query_id(Query *query) +{ + uint64 prev_id = latest_query_id++; + + if (prev_id > latest_query_id) + elog(WARNING, "assign_query_id(): queryId overflow"); + + query->queryId = latest_query_id; +} + +void +reset_query_id_generator(void) +{ + latest_query_id = QUERY_ID_INITIAL; +} + + +/* + * Basic plan tree walker. + * + * 'visitor' is applied right before return. + */ +Plan * +plan_tree_visitor(Plan *plan, + Plan *(*visitor) (Plan *plan, void *context), + void *context) +{ + ListCell *l; + + if (plan == NULL) + return NULL; + + check_stack_depth(); + + /* Plan-type-specific fixes */ + switch (nodeTag(plan)) + { + case T_SubqueryScan: + plan_tree_visitor(((SubqueryScan *) plan)->subplan, visitor, context); + break; + + case T_CustomScan: + foreach (l, ((CustomScan *) plan)->custom_plans) + plan_tree_visitor((Plan *) lfirst(l), visitor, context); + break; + +#if PG_VERSION_NUM < 140000 /* reworked in commit 86dc90056dfd */ + case T_ModifyTable: + foreach (l, ((ModifyTable *) plan)->plans) + plan_tree_visitor((Plan *) lfirst(l), visitor, context); + break; +#endif + + case T_Append: + foreach (l, ((Append *) plan)->appendplans) + plan_tree_visitor((Plan *) lfirst(l), visitor, context); + break; + + case T_MergeAppend: + foreach (l, ((MergeAppend *) plan)->mergeplans) + plan_tree_visitor((Plan *) lfirst(l), visitor, context); + break; + + case T_BitmapAnd: + foreach (l, ((BitmapAnd *) plan)->bitmapplans) + plan_tree_visitor((Plan *) lfirst(l), visitor, context); + break; + + case T_BitmapOr: + foreach (l, ((BitmapOr *) plan)->bitmapplans) + plan_tree_visitor((Plan *) lfirst(l), visitor, context); + break; + + default: + break; + } + + plan_tree_visitor(plan->lefttree, visitor, context); + plan_tree_visitor(plan->righttree, visitor, context); + + /* Apply visitor to the current node */ + return visitor(plan, context); +} + +void +state_tree_visitor(PlanState *state, + void (*visitor) (PlanState *plan, void *context), + void *context) +{ + Plan *plan; + ListCell *lc; + + if (state == NULL) + return; + + plan = state->plan; + + check_stack_depth(); + + /* Plan-type-specific fixes */ + switch (nodeTag(plan)) + { + case T_SubqueryScan: + state_tree_visitor(((SubqueryScanState *) state)->subplan, visitor, context); + break; + + case T_CustomScan: + foreach (lc, ((CustomScanState *) state)->custom_ps) + state_tree_visitor((PlanState *) lfirst(lc), visitor, context); + break; + +#if PG_VERSION_NUM < 140000 /* reworked in commit 86dc90056dfd */ + case T_ModifyTable: + state_visit_members(((ModifyTableState *) state)->mt_plans, + ((ModifyTableState *) state)->mt_nplans, + visitor, context); + break; +#endif + + case T_Append: + state_visit_members(((AppendState *) state)->appendplans, + ((AppendState *) state)->as_nplans, + visitor, context); + break; + + case T_MergeAppend: + state_visit_members(((MergeAppendState *) state)->mergeplans, + ((MergeAppendState *) state)->ms_nplans, + visitor, context); + break; + + case T_BitmapAnd: + state_visit_members(((BitmapAndState *) state)->bitmapplans, + ((BitmapAndState *) state)->nplans, + visitor, context); + break; + + case T_BitmapOr: + state_visit_members(((BitmapOrState *) state)->bitmapplans, + ((BitmapOrState *) state)->nplans, + visitor, context); + break; + + default: + break; + } + + state_visit_subplans(state->initPlan, visitor, context); + state_visit_subplans(state->subPlan, visitor, context); + + state_tree_visitor(state->lefttree, visitor, context); + state_tree_visitor(state->righttree, visitor, context); + + /* Apply visitor to the current node */ + visitor(state, context); +} + +/* + * Walk a list of SubPlans (or initPlans, which also use SubPlan nodes). + */ +static void +state_visit_subplans(List *plans, + void (*visitor) (PlanState *plan, void *context), + void *context) +{ + ListCell *lc; + + foreach (lc, plans) + { + SubPlanState *sps = lfirst_node(SubPlanState, lc); + state_tree_visitor(sps->planstate, visitor, context); + } +} + +/* + * Walk the constituent plans of a ModifyTable, Append, MergeAppend, + * BitmapAnd, or BitmapOr node. + */ +static void +state_visit_members(PlanState **planstates, int nplans, + void (*visitor) (PlanState *plan, void *context), void *context) +{ + int i; + + for (i = 0; i < nplans; i++) + state_tree_visitor(planstates[i], visitor, context); +} + + +/* + * ------------------------------- + * Walker for Query modification + * ------------------------------- + */ + +/* Perform some transformations on Query tree */ +void +pathman_transform_query(Query *parse, ParamListInfo params) +{ + transform_query_cxt context; + + /* Initialize context */ + memset((void *) &context, 0, sizeof(context)); + context.query_params = params; + + pathman_transform_query_walker((Node *) parse, (void *) &context); +} + +void +pathman_post_analyze_query(Query *parse) +{ + pathman_post_analyze_query_walker((Node *) parse, NULL); +} + +/* Walker for pathman_transform_query() */ +static bool +pathman_transform_query_walker(Node *node, void *context) +{ + if (node == NULL) + return false; + + else if (IsA(node, SubLink) || IsA(node, CommonTableExpr)) + { + transform_query_cxt *current_context = context, + next_context; + + /* Initialize next context for bottom subqueries */ + next_context = *current_context; + + if (IsA(node, SubLink)) + { + next_context.parent_sublink = (SubLink *) node; + next_context.parent_cte = NULL; + } + else + { + next_context.parent_sublink = NULL; + next_context.parent_cte = (CommonTableExpr *) node; + } + + /* Handle expression subtree */ + return expression_tree_walker(node, + pathman_transform_query_walker, + (void *) &next_context); + } + + else if (IsA(node, Query)) + { + Query *query = (Query *) node; + transform_query_cxt *current_context = context, + next_context; + + /* Initialize next context for bottom subqueries */ + next_context = *current_context; + switch (query->commandType) + { + TRANSFORM_CONTEXT_SWITCH_SET(&next_context, SELECT); + TRANSFORM_CONTEXT_SWITCH_SET(&next_context, INSERT); + TRANSFORM_CONTEXT_SWITCH_SET(&next_context, UPDATE); + TRANSFORM_CONTEXT_SWITCH_SET(&next_context, DELETE); + + default: + break; + } + next_context.parent_sublink = NULL; + next_context.parent_cte = NULL; + + /* Assign Query a 'queryId' */ + assign_query_id(query); + + /* Apply Query tree modifiers */ + disable_standard_inheritance(query, current_context); + handle_modification_query(query, current_context); + + /* Handle Query node */ + return query_tree_walker(query, + pathman_transform_query_walker, + (void *) &next_context, + 0); + } + + /* Handle expression subtree */ + return expression_tree_walker(node, + pathman_transform_query_walker, + context); +} + +static bool +pathman_post_analyze_query_walker(Node *node, void *context) +{ + if (node == NULL) + return false; + + else if (IsA(node, Query)) + { + Query *query = (Query *) node; + + /* Make changes for declarative syntax */ +#ifdef ENABLE_DECLARATIVE + modify_declarative_partitioning_query(query); +#endif + + /* Handle Query node */ + return query_tree_walker(query, + pathman_post_analyze_query_walker, + context, + 0); + } + + /* Handle expression subtree */ + return expression_tree_walker(node, + pathman_post_analyze_query_walker, + context); +} + +/* + * ---------------------- + * Query tree modifiers + * ---------------------- + */ + +/* Disable standard inheritance if table is partitioned by pg_pathman */ +static void +disable_standard_inheritance(Query *parse, transform_query_cxt *context) +{ + ListCell *lc; + Index current_rti; /* current range table entry index */ + +#ifdef LEGACY_ROWMARKS_95 + /* Don't process non-SELECT queries */ + if (parse->commandType != CMD_SELECT) + return; + + /* Don't process queries under UPDATE or DELETE (except for CTEs) */ + if ((TRANSFORM_CONTEXT_HAS_PARENT(context, UPDATE) || + TRANSFORM_CONTEXT_HAS_PARENT(context, DELETE)) && + !TRANSFORM_CONTEXT_QUERY_IS_CTE(context, parse)) + return; +#endif + + /* Walk through RangeTblEntries list */ + current_rti = 0; + foreach (lc, parse->rtable) + { + RangeTblEntry *rte = (RangeTblEntry *) lfirst(lc); + + current_rti++; /* increment RTE index */ + Assert(current_rti != 0); + + /* Process only non-result base relations */ + if (rte->rtekind != RTE_RELATION || + rte->relkind != RELKIND_RELATION || + parse->resultRelation == current_rti) /* is it a result relation? */ + { +#if PG_VERSION_NUM >= 150000 /* for commit 7103ebb7aae8 */ + if (parse->commandType == CMD_MERGE && + (rte->rtekind == RTE_RELATION || + rte->relkind == RELKIND_RELATION) && + rte->inh && has_pathman_relation_info(rte->relid)) + elog(ERROR, "pg_pathman doesn't support MERGE command yet"); +#endif + + continue; + } + + /* Table may be partitioned */ + if (rte->inh) + { +#ifdef LEGACY_ROWMARKS_95 + /* Don't process queries with RowMarks on 9.5 */ + if (get_parse_rowmark(parse, current_rti)) + continue; +#endif + + /* Proceed if table is partitioned by pg_pathman */ + if (has_pathman_relation_info(rte->relid)) + { + /* HACK: unset the 'inh' flag to disable standard planning */ + rte->inh = false; + + /* Try marking it using PARENTHOOD_ALLOWED */ + assign_rel_parenthood_status(rte, PARENTHOOD_ALLOWED); + } + } + /* Else try marking it using PARENTHOOD_DISALLOWED */ + else assign_rel_parenthood_status(rte, PARENTHOOD_DISALLOWED); + } +} + +/* Checks if query affects only one partition */ +static void +handle_modification_query(Query *parse, transform_query_cxt *context) +{ + RangeTblEntry *rte; + Oid child; + Node *quals; + Index result_rti = parse->resultRelation; + ParamListInfo params = context->query_params; + + /* Exit if it's not a DELETE or UPDATE query */ + if (result_rti == 0 || (parse->commandType != CMD_UPDATE && + parse->commandType != CMD_DELETE)) + return; + + /* can't set earlier because CMD_UTILITY doesn't have jointree */ + quals = parse->jointree->quals; + rte = rt_fetch(result_rti, parse->rtable); + + /* Exit if it's ONLY table */ + if (!rte->inh) + return; + + /* Check if we can replace PARAMs with CONSTs */ + if (params && clause_contains_params(quals)) + quals = eval_extern_params_mutator(quals, params); + + /* Evaluate constaint expressions */ + quals = eval_const_expressions(NULL, quals); + + /* Parse syntax tree and extract deepest partition if possible */ + child = find_deepest_partition(rte->relid, result_rti, (Expr *) quals); + + /* Substitute parent table with partition */ + if (OidIsValid(child)) + { + Relation child_rel, + parent_rel; + + LOCKMODE lockmode = RowExclusiveLock; /* UPDATE | DELETE */ + + HeapTuple syscache_htup; + char child_relkind; + Oid parent = rte->relid; + + List *translated_vars; + adjust_appendrel_varnos_cxt aav_cxt; +#if PG_VERSION_NUM >= 160000 /* for commit a61b1f74823c */ + RTEPermissionInfo *parent_perminfo, + *child_perminfo; +#endif + + /* Lock 'child' table */ + LockRelationOid(child, lockmode); + + /* Make sure that 'child' exists */ + syscache_htup = SearchSysCache1(RELOID, ObjectIdGetDatum(child)); + if (HeapTupleIsValid(syscache_htup)) + { + Form_pg_class reltup = (Form_pg_class) GETSTRUCT(syscache_htup); + + /* Fetch child's relkind and free cache entry */ + child_relkind = reltup->relkind; + ReleaseSysCache(syscache_htup); + } + else + { + UnlockRelationOid(child, lockmode); + return; /* nothing to do here */ + } + +#if PG_VERSION_NUM >= 160000 /* for commit a61b1f74823c */ + parent_perminfo = getRTEPermissionInfo(parse->rteperminfos, rte); +#endif + /* Update RTE's relid and relkind (for FDW) */ + rte->relid = child; + rte->relkind = child_relkind; + +#if PG_VERSION_NUM >= 160000 /* for commit a61b1f74823c */ + /* Copy parent RTEPermissionInfo. */ + rte->perminfoindex = 0; /* expected by addRTEPermissionInfo() */ + child_perminfo = addRTEPermissionInfo(&parse->rteperminfos, rte); + memcpy(child_perminfo, parent_perminfo, sizeof(RTEPermissionInfo)); + + /* Correct RTEPermissionInfo for child. */ + child_perminfo->relid = child; + child_perminfo->inh = false; +#endif + + /* HACK: unset the 'inh' flag (no children) */ + rte->inh = false; + + /* Both tables are already locked */ + child_rel = heap_open_compat(child, NoLock); + parent_rel = heap_open_compat(parent, NoLock); + + make_inh_translation_list(parent_rel, child_rel, 0, &translated_vars, NULL); + + /* Perform some additional adjustments */ + if (!inh_translation_list_is_trivial(translated_vars)) + { + /* Translate varnos for this child */ + aav_cxt.child_varno = result_rti; + aav_cxt.parent_relid = parent; + aav_cxt.parent_reltype = RelationGetDescr(parent_rel)->tdtypeid; + aav_cxt.child_reltype = RelationGetDescr(child_rel)->tdtypeid; + aav_cxt.translated_vars = translated_vars; + adjust_appendrel_varnos((Node *) parse, &aav_cxt); + +#if PG_VERSION_NUM >= 160000 /* for commit a61b1f74823c */ + child_perminfo->selectedCols = translate_col_privs(parent_perminfo->selectedCols, translated_vars); + child_perminfo->insertedCols = translate_col_privs(parent_perminfo->insertedCols, translated_vars); + child_perminfo->updatedCols = translate_col_privs(parent_perminfo->updatedCols, translated_vars); +#else + /* Translate column privileges for this child */ + rte->selectedCols = translate_col_privs(rte->selectedCols, translated_vars); + rte->insertedCols = translate_col_privs(rte->insertedCols, translated_vars); + rte->updatedCols = translate_col_privs(rte->updatedCols, translated_vars); +#endif + } + + /* Close relations (should remain locked, though) */ + heap_close_compat(child_rel, NoLock); + heap_close_compat(parent_rel, NoLock); + } +} + +/* Remap parent's attributes to child ones */ +static Node * +adjust_appendrel_varnos(Node *node, adjust_appendrel_varnos_cxt *context) +{ + if (node == NULL) + return NULL; + + if (IsA(node, Query)) + { + Query *query = (Query *) node; + ListCell *lc; + + /* FIXME: we might need to reorder TargetEntries */ + foreach (lc, query->targetList) + { + TargetEntry *te = (TargetEntry *) lfirst(lc); + Var *child_var; + + if (te->resjunk) + continue; + + if (te->resno > list_length(context->translated_vars)) + elog(ERROR, "attribute %d of relation \"%s\" does not exist", + te->resno, get_rel_name(context->parent_relid)); + + child_var = list_nth(context->translated_vars, te->resno - 1); + if (!child_var) + elog(ERROR, "attribute %d of relation \"%s\" does not exist", + te->resno, get_rel_name(context->parent_relid)); + + /* Transform attribute number */ + te->resno = child_var->varattno; + } + + /* NOTE: we shouldn't copy top-level Query */ + return (Node *) query_tree_mutator((Query *) node, + adjust_appendrel_varnos, + context, + (QTW_IGNORE_RC_SUBQUERIES | + QTW_DONT_COPY_QUERY)); + } + + if (IsA(node, Var)) + { + Var *var = (Var *) node; + + /* See adjust_appendrel_attrs_mutator() */ + if (var->varno == context->child_varno) + { + if (var->varattno > 0) + { + Var *child_var; + + var = copyObject(var); + + if (var->varattno > list_length(context->translated_vars)) + elog(ERROR, "attribute %d of relation \"%s\" does not exist", + var->varattno, get_rel_name(context->parent_relid)); + + child_var = list_nth(context->translated_vars, var->varattno - 1); + if (!child_var) + elog(ERROR, "attribute %d of relation \"%s\" does not exist", + var->varattno, get_rel_name(context->parent_relid)); + + /* Transform attribute number */ + var->varattno = child_var->varattno; + } + else if (var->varattno == 0) + { + ConvertRowtypeExpr *r = makeNode(ConvertRowtypeExpr); + + Assert(var->vartype = context->parent_reltype); + + r->arg = (Expr *) var; + r->resulttype = context->parent_reltype; + r->convertformat = COERCE_IMPLICIT_CAST; + r->location = -1; + + /* Make sure the Var node has the right type ID, too */ + var->vartype = context->child_reltype; + + return (Node *) r; + } + } + + return (Node *) var; + } + + if (IsA(node, SubLink)) + { + SubLink *sl = (SubLink *) node; + + /* Examine its expression */ + sl->testexpr = expression_tree_mutator_compat(sl->testexpr, + adjust_appendrel_varnos, + context); + return (Node *) sl; + } + + return expression_tree_mutator_compat(node, + adjust_appendrel_varnos, + context); +} + + +/* + * ---------------------------------------------------- + * PartitionFilter and PartitionRouter -related stuff + * ---------------------------------------------------- + */ + +/* Add PartitionFilter nodes to the plan tree */ +Plan * +add_partition_filters(List *rtable, Plan *plan) +{ + if (pg_pathman_enable_partition_filter) + return plan_tree_visitor(plan, partition_filter_visitor, rtable); + + return NULL; +} + +/* Add PartitionRouter nodes to the plan tree */ +Plan * +add_partition_routers(List *rtable, Plan *plan) +{ + if (pg_pathman_enable_partition_router) + return plan_tree_visitor(plan, partition_router_visitor, rtable); + + return NULL; +} + +/* + * Add PartitionFilters to ModifyTable node's children. + * + * 'context' should point to the PlannedStmt->rtable. + */ +static Plan * +partition_filter_visitor(Plan *plan, void *context) +{ + List *rtable = (List *) context; + ModifyTable *modify_table = (ModifyTable *) plan; +#if PG_VERSION_NUM >= 140000 /* for changes 86dc90056dfd */ + /* + * We have only one subplan for 14: need to modify it without + * using any cycle + */ + Plan *subplan = outerPlan(modify_table); + ListCell *lc2, + *lc3; +#else + ListCell *lc1, + *lc2, + *lc3; +#endif + + /* Skip if not ModifyTable with 'INSERT' command */ + if (!IsA(modify_table, ModifyTable) || modify_table->operation != CMD_INSERT) + return NULL; + + Assert(rtable && IsA(rtable, List)); + + lc3 = list_head(modify_table->returningLists); +#if PG_VERSION_NUM >= 140000 /* for changes 86dc90056dfd */ + lc2 = list_head(modify_table->resultRelations); +#else + forboth (lc1, modify_table->plans, + lc2, modify_table->resultRelations) +#endif + { + Index rindex = lfirst_int(lc2); + Oid relid = getrelid(rindex, rtable); + + /* Check that table is partitioned */ + if (has_pathman_relation_info(relid)) + { + List *returning_list = NIL; + + /* Extract returning list if possible */ + if (lc3) + { + returning_list = lfirst(lc3); +#if PG_VERSION_NUM < 140000 + lc3 = lnext_compat(modify_table->returningLists, lc3); +#endif + } + +#if PG_VERSION_NUM >= 140000 /* for changes 86dc90056dfd */ + outerPlan(modify_table) = make_partition_filter(subplan, relid, + modify_table->nominalRelation, + modify_table->onConflictAction, + modify_table->operation, + returning_list); +#else + lfirst(lc1) = make_partition_filter((Plan *) lfirst(lc1), relid, + modify_table->nominalRelation, + modify_table->onConflictAction, + modify_table->operation, + returning_list); +#endif + } + } + + return NULL; +} + +/* + * Add PartitionRouter to ModifyTable node's children. + * + * 'context' should point to the PlannedStmt->rtable. + */ +static Plan * +partition_router_visitor(Plan *plan, void *context) +{ + List *rtable = (List *) context; + ModifyTable *modify_table = (ModifyTable *) plan; +#if PG_VERSION_NUM >= 140000 /* for changes 86dc90056dfd */ + /* + * We have only one subplan for 14: need to modify it without + * using any cycle + */ + Plan *subplan = outerPlan(modify_table); + ListCell *lc2, + *lc3; +#else + ListCell *lc1, + *lc2, + *lc3; +#endif + bool changed = false; + + /* Skip if not ModifyTable with 'UPDATE' command */ + if (!IsA(modify_table, ModifyTable) || modify_table->operation != CMD_UPDATE) + return NULL; + + Assert(rtable && IsA(rtable, List)); + + if (modifytable_contains_fdw(rtable, modify_table)) + { + ereport(WARNING, + (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), + errmsg(UPDATE_NODE_NAME " does not support foreign data wrappers"))); + return NULL; + } + + lc3 = list_head(modify_table->returningLists); +#if PG_VERSION_NUM >= 140000 /* for changes 86dc90056dfd */ + lc2 = list_head(modify_table->resultRelations); +#else + forboth (lc1, modify_table->plans, + lc2, modify_table->resultRelations) +#endif + { + Index rindex = lfirst_int(lc2); + Oid relid = getrelid(rindex, rtable), + tmp_relid; + + /* Find topmost parent */ + while (OidIsValid(tmp_relid = get_parent_of_partition(relid))) + relid = tmp_relid; + + /* Check that table is partitioned */ + if (has_pathman_relation_info(relid)) + { + List *returning_list = NIL; + Plan *prouter, + *pfilter; + + /* Extract returning list if possible */ + if (lc3) + { + returning_list = lfirst(lc3); +#if PG_VERSION_NUM < 140000 + lc3 = lnext_compat(modify_table->returningLists, lc3); +#endif + } + +#if PG_VERSION_NUM >= 140000 /* for changes 86dc90056dfd */ + prouter = make_partition_router(subplan, + modify_table->epqParam, + modify_table->nominalRelation); +#else + prouter = make_partition_router((Plan *) lfirst(lc1), + modify_table->epqParam, + modify_table->nominalRelation); +#endif + + pfilter = make_partition_filter((Plan *) prouter, relid, + modify_table->nominalRelation, + ONCONFLICT_NONE, + CMD_UPDATE, + returning_list); + +#if PG_VERSION_NUM >= 140000 /* for changes in 86dc90056dfd */ + outerPlan(modify_table) = pfilter; +#else + lfirst(lc1) = pfilter; +#endif + changed = true; + } + } + + if (changed) + return make_partition_overseer(plan); + + return NULL; +} + + +/* + * ----------------------------------------------- + * Parenthood safety checks (SELECT * FROM ONLY) + * ----------------------------------------------- + */ + +#define RPS_STATUS_ASSIGNED ( (Index) 0x2 ) +#define RPS_ENABLE_PARENT ( (Index) 0x1 ) + +/* Set parenthood status (per query level) */ +void +assign_rel_parenthood_status(RangeTblEntry *rte, + rel_parenthood_status new_status) +{ + Assert(rte->rtekind != RTE_CTE); + + /* HACK: set relevant bits in RTE */ + rte->ctelevelsup |= RPS_STATUS_ASSIGNED; + if (new_status == PARENTHOOD_ALLOWED) + rte->ctelevelsup |= RPS_ENABLE_PARENT; +} + +/* Get parenthood status (per query level) */ +rel_parenthood_status +get_rel_parenthood_status(RangeTblEntry *rte) +{ + Assert(rte->rtekind != RTE_CTE); + + /* HACK: check relevant bits in RTE */ + if (rte->ctelevelsup & RPS_STATUS_ASSIGNED) + return (rte->ctelevelsup & RPS_ENABLE_PARENT) ? + PARENTHOOD_ALLOWED : + PARENTHOOD_DISALLOWED; + + /* Not found, return stub value */ + return PARENTHOOD_NOT_SET; +} + + +/* + * -------------------------- + * Various helper functions + * -------------------------- + */ + +/* Does ModifyTable node contain any FDWs? */ +static bool +modifytable_contains_fdw(List *rtable, ModifyTable *node) +{ + ListCell *lc; + + foreach(lc, node->resultRelations) + { + Index rti = lfirst_int(lc); + RangeTblEntry *rte = rt_fetch(rti, rtable); + + if (rte->relkind == RELKIND_FOREIGN_TABLE) + return true; + } + + return false; +} + +/* + * Find a single deepest subpartition using quals. + * It's always better to narrow down the set of tables to be scanned. + * Return InvalidOid if it's not possible (e.g. table is not partitioned). + */ +static Oid +find_deepest_partition(Oid relid, Index rti, Expr *quals) +{ + PartRelationInfo *prel; + Oid result = InvalidOid; + + /* Exit if there's no quals (no use) */ + if (!quals) + return result; + + /* Try pruning if table is partitioned */ + if ((prel = get_pathman_relation_info(relid)) != NULL) + { + Node *prel_expr; + WalkerContext context; + List *ranges; + WrapperNode *wrap; + + /* Prepare partitioning expression */ + prel_expr = PrelExpressionForRelid(prel, rti); + + /* First we select all available partitions... */ + ranges = list_make1_irange_full(prel, IR_COMPLETE); + + /* Parse syntax tree and extract partition ranges */ + InitWalkerContext(&context, prel_expr, prel, NULL); + wrap = walk_expr_tree(quals, &context); + ranges = irange_list_intersection(ranges, wrap->rangeset); + + switch (irange_list_length(ranges)) + { + /* Scan only parent (don't do constraint elimination) */ + case 0: + result = relid; + break; + + /* Handle the remaining partition */ + case 1: + if (!prel->enable_parent) + { + IndexRange irange = linitial_irange(ranges); + Oid *children = PrelGetChildrenArray(prel), + child = children[irange_lower(irange)]; + + /* Scan this partition */ + result = child; + + /* Try to go deeper and see if there are subpartitions */ + child = find_deepest_partition(child, rti, quals); + if (OidIsValid(child)) + result = child; + } + break; + + default: + break; + } + + /* Don't forget to close 'prel'! */ + close_pathman_relation_info(prel); + } + + return result; +} + +/* Replace extern param nodes with consts */ +static Node * +eval_extern_params_mutator(Node *node, ParamListInfo params) +{ + if (node == NULL) + return NULL; + + if (IsA(node, Param)) + { + Param *param = (Param *) node; + + Assert(params); + + /* Look to see if we've been given a value for this Param */ + if (param->paramkind == PARAM_EXTERN && + param->paramid > 0 && + param->paramid <= params->numParams) + { + ParamExternData prmdata; /* storage for 'prm' (PG 11) */ + ParamExternData *prm = CustomEvalParamExternCompat(param, + params, + &prmdata); + + if (OidIsValid(prm->ptype)) + { + /* OK to substitute parameter value? */ + if (prm->pflags & PARAM_FLAG_CONST) + { + /* + * Return a Const representing the param value. + * Must copy pass-by-ref datatypes, since the + * Param might be in a memory context + * shorter-lived than our output plan should be. + */ + int16 typLen; + bool typByVal; + Datum pval; + + Assert(prm->ptype == param->paramtype); + get_typlenbyval(param->paramtype, + &typLen, &typByVal); + if (prm->isnull || typByVal) + pval = prm->value; + else + pval = datumCopy(prm->value, typByVal, typLen); + return (Node *) makeConst(param->paramtype, + param->paramtypmod, + param->paramcollid, + (int) typLen, + pval, + prm->isnull, + typByVal); + } + } + } + } + + return expression_tree_mutator_compat(node, eval_extern_params_mutator, + (void *) params); +} + +/* Check whether Var translation list is trivial (no shuffle) */ +static bool +inh_translation_list_is_trivial(List *translated_vars) +{ + ListCell *lc; + AttrNumber i = 1; + + foreach (lc, translated_vars) + { + Var *var = (Var *) lfirst(lc); + + if (var && var->varattno != i) + return false; + + i++; + } + + return true; +} + + +/* + * ----------------------------------------------- + * Count number of times we've visited planner() + * ----------------------------------------------- + */ + +static int32 planner_calls = 0; + +void +incr_planner_calls_count(void) +{ + Assert(planner_calls < PG_INT32_MAX); + + planner_calls++; +} + +void +decr_planner_calls_count(void) +{ + Assert(planner_calls > 0); + + planner_calls--; +} + +int32 +get_planner_calls_count(void) +{ + return planner_calls; +} diff --git a/src/rangeset.c b/src/rangeset.c index beff56de..9f7b2aa1 100644 --- a/src/rangeset.c +++ b/src/rangeset.c @@ -3,221 +3,391 @@ * rangeset.c * IndexRange functions * - * Copyright (c) 2015-2016, Postgres Professional + * Copyright (c) 2015-2020, Postgres Professional * * ------------------------------------------------------------------------ */ +#include "compat/pg_compat.h" #include "rangeset.h" -/* Check if two ranges are intersecting */ -bool -irange_intersects(IndexRange a, IndexRange b) + +static IndexRange irange_handle_cover_internal(IndexRange ir_covering, + IndexRange ir_inner, + List **new_iranges); + +static IndexRange irange_union_internal(IndexRange first, + IndexRange second, + List **new_iranges); + + +/* Make union of two conjuncted ranges */ +IndexRange +irange_union_simple(IndexRange a, IndexRange b) { - return (a.ir_lower <= b.ir_upper) && - (b.ir_lower <= a.ir_upper); + /* Ranges should be connected somehow */ + Assert(iranges_intersect(a, b) || iranges_adjoin(a, b)); + + return make_irange(Min(irange_lower(a), irange_lower(b)), + Max(irange_upper(a), irange_upper(b)), + is_irange_lossy(a) && is_irange_lossy(b)); } -/* Check if two ranges are conjuncted */ -bool -irange_conjuncted(IndexRange a, IndexRange b) +/* Get intersection of two conjuncted ranges */ +IndexRange +irange_intersection_simple(IndexRange a, IndexRange b) { - return (a.ir_lower - 1 <= b.ir_upper) && - (b.ir_lower - 1 <= a.ir_upper); + /* Ranges should be connected somehow */ + Assert(iranges_intersect(a, b) || iranges_adjoin(a, b)); + + return make_irange(Max(irange_lower(a), irange_lower(b)), + Min(irange_upper(a), irange_upper(b)), + is_irange_lossy(a) || is_irange_lossy(b)); } -/* Make union of two ranges. They should have the same lossiness. */ -IndexRange -irange_union(IndexRange a, IndexRange b) + +/* Split covering IndexRange into several IndexRanges if needed */ +static IndexRange +irange_handle_cover_internal(IndexRange ir_covering, + IndexRange ir_inner, + List **new_iranges) { - Assert(a.ir_lossy == b.ir_lossy); - return make_irange(Min(a.ir_lower, b.ir_lower), - Max(a.ir_upper, b.ir_upper), - a.ir_lossy); + /* Equal lossiness should've been taken into cosideration earlier */ + Assert(is_irange_lossy(ir_covering) != is_irange_lossy(ir_inner)); + + /* range 'ir_inner' is lossy */ + if (is_irange_lossy(ir_covering) == false) + return ir_covering; + + /* range 'ir_covering' is lossy, 'ir_inner' is lossless! */ + else + { + IndexRange ret; /* IndexRange to be returned */ + + /* 'left_range_upper' should not be less than 'left_range_lower' */ + uint32 left_range_lower = irange_lower(ir_covering), + left_range_upper = Max(irb_pred(irange_lower(ir_inner)), + left_range_lower); + + /* 'right_range_lower' should not be greater than 'right_range_upper' */ + uint32 right_range_upper = irange_upper(ir_covering), + right_range_lower = Min(irb_succ(irange_upper(ir_inner)), + right_range_upper); + + /* We have to split the covering lossy IndexRange */ + Assert(is_irange_lossy(ir_covering) == true); + + /* 'ir_inner' should not cover leftmost IndexRange */ + if (irange_lower(ir_inner) > left_range_upper) + { + IndexRange left_range; + + /* Leftmost IndexRange is lossy */ + left_range = make_irange(left_range_lower, + left_range_upper, + IR_LOSSY); + + /* Append leftmost IndexRange ('left_range') to 'new_iranges' */ + *new_iranges = lappend_irange(*new_iranges, left_range); + } + + /* 'ir_inner' should not cover rightmost IndexRange */ + if (right_range_lower > irange_upper(ir_inner)) + { + IndexRange right_range; + + /* Rightmost IndexRange is also lossy */ + right_range = make_irange(right_range_lower, + right_range_upper, + IR_LOSSY); + + /* 'right_range' is indeed rightmost IndexRange */ + ret = right_range; + + /* Append medial IndexRange ('ir_inner') to 'new_iranges' */ + *new_iranges = lappend_irange(*new_iranges, ir_inner); + } + /* Else return 'ir_inner' as rightmost IndexRange */ + else ret = ir_inner; + + /* Return rightmost IndexRange (right_range | ir_inner) */ + return ret; + } } -/* Get intersection of two ranges */ -IndexRange -irange_intersect(IndexRange a, IndexRange b) +/* Calculate union of two IndexRanges, return rightmost IndexRange */ +static IndexRange +irange_union_internal(IndexRange first, + IndexRange second, + List **new_iranges) { - return make_irange(Max(a.ir_lower, b.ir_lower), - Min(a.ir_upper, b.ir_upper), - a.ir_lossy || b.ir_lossy); + /* Assert that both IndexRanges are valid */ + Assert(is_irange_valid(first)); + Assert(is_irange_valid(second)); + + /* Swap 'first' and 'second' if order is incorrect */ + if (irange_lower(first) > irange_lower(second)) + { + IndexRange temp; + + temp = first; + first = second; + second = temp; + } + + /* IndexRanges intersect */ + if (iranges_intersect(first, second)) + { + /* Calculate the union of 'first' and 'second' */ + IndexRange ir_union = irange_union_simple(first, second); + + /* if lossiness is the same, unite them and skip */ + if (is_irange_lossy(first) == is_irange_lossy(second)) + return ir_union; + + /* range 'first' covers 'second' */ + if (irange_eq_bounds(ir_union, first)) + { + /* Return rightmost IndexRange, save others to 'new_iranges' */ + return irange_handle_cover_internal(first, second, new_iranges); + } + /* range 'second' covers 'first' */ + else if (irange_eq_bounds(ir_union, second)) + { + /* Retun rightmost IndexRange, save others to 'new_iranges' */ + return irange_handle_cover_internal(second, first, new_iranges); + } + /* No obvious leader, lossiness differs */ + else + { + /* range 'second' is lossy */ + if (is_irange_lossy(first) == false) + { + IndexRange ret; + + /* Set new current IndexRange */ + ret = make_irange(irb_succ(irange_upper(first)), + irange_upper(second), + is_irange_lossy(second)); + + /* Append lower part to 'new_iranges' */ + *new_iranges = lappend_irange(*new_iranges, first); + + /* Return a part of 'second' */ + return ret; + } + /* range 'first' is lossy */ + else + { + IndexRange new_irange; + + new_irange = make_irange(irange_lower(first), + irb_pred(irange_lower(second)), + is_irange_lossy(first)); + + /* Append lower part to 'new_iranges' */ + *new_iranges = lappend_irange(*new_iranges, new_irange); + + /* Return 'second' */ + return second; + } + } + } + /* IndexRanges do not intersect */ + else + { + /* Try to unite these IndexRanges if it's possible */ + if (irange_cmp_lossiness(first, second) == IR_EQ_LOSSINESS && + iranges_adjoin(first, second)) + { + /* Return united IndexRange */ + return irange_union_simple(first, second); + } + /* IndexRanges are not adjoint */ + else + { + /* add 'first' to 'new_iranges' */ + *new_iranges = lappend_irange(*new_iranges, first); + + /* Return 'second' */ + return second; + } + } } -/* - * Make union of two index rage lists. - */ +/* Make union of two index rage lists */ List * irange_list_union(List *a, List *b) { - ListCell *ca, - *cb; - List *result = NIL; - IndexRange cur = InvalidIndexRange; - bool have_cur = false; + ListCell *ca, /* iterator of A */ + *cb; /* iterator of B */ + List *result = NIL; /* list of IndexRanges */ + IndexRange cur = InvalidIndexRange; /* current irange */ + /* Initialize iterators */ ca = list_head(a); cb = list_head(b); + /* Loop until we have no cells */ while (ca || cb) { IndexRange next = InvalidIndexRange; - /* Fetch next range with lesser lower bound */ + /* Fetch next irange with lesser lower bound */ if (ca && cb) { - if (lfirst_irange(ca).ir_lower <= lfirst_irange(cb).ir_lower) + if (irange_lower(lfirst_irange(ca)) <= irange_lower(lfirst_irange(cb))) { next = lfirst_irange(ca); - ca = lnext(ca); + ca = lnext_compat(a, ca); /* move to next cell */ } else { next = lfirst_irange(cb); - cb = lnext(cb); + cb = lnext_compat(b, cb); /* move to next cell */ } } + /* Fetch next irange from A */ else if (ca) { next = lfirst_irange(ca); - ca = lnext(ca); + ca = lnext_compat(a, ca); /* move to next cell */ } + /* Fetch next irange from B */ else if (cb) { next = lfirst_irange(cb); - cb = lnext(cb); + cb = lnext_compat(b, cb); /* move to next cell */ } - if (!have_cur) + /* Put this irange to 'cur' if don't have it yet */ + if (!is_irange_valid(cur)) { - /* Put this range as current value if don't have it yet */ cur = next; - have_cur = true; - } - else - { - if (irange_conjuncted(next, cur)) - { - /* - * Ranges are conjuncted, try to unify them. - */ - if (next.ir_lossy == cur.ir_lossy) - { - cur = irange_union(next, cur); - } - else - { - if (!cur.ir_lossy) - { - result = lappend_irange(result, cur); - cur = make_irange(cur.ir_upper + 1, - next.ir_upper, - next.ir_lossy); - } - else - { - result = lappend_irange(result, - make_irange(cur.ir_lower, - next.ir_lower - 1, - cur.ir_lossy)); - cur = next; - } - } - } - else - { - /* - * Next range is not conjuncted with current. Put current to the - * result list and put next as current. - */ - result = lappend_irange(result, cur); - cur = next; - } + continue; /* skip this iteration */ } + + /* Unite 'cur' and 'next' in an appropriate way */ + cur = irange_union_internal(cur, next, &result); } /* Put current value into result list if any */ - if (have_cur) + if (is_irange_valid(cur)) result = lappend_irange(result, cur); return result; } -/* - * Find intersection of two range lists. - */ +/* Find intersection of two range lists */ List * -irange_list_intersect(List *a, List *b) +irange_list_intersection(List *a, List *b) { - ListCell *ca, - *cb; - List *result = NIL; - IndexRange ra, rb; + ListCell *ca, /* iterator of A */ + *cb; /* iterator of B */ + List *result = NIL; /* list of IndexRanges */ + /* Initialize iterators */ ca = list_head(a); cb = list_head(b); + /* Loop until we have no cells */ while (ca && cb) { - ra = lfirst_irange(ca); - rb = lfirst_irange(cb); + IndexRange ra = lfirst_irange(ca), + rb = lfirst_irange(cb); + + /* Assert that both IndexRanges are valid */ + Assert(is_irange_valid(ra)); + Assert(is_irange_valid(rb)); /* Only care about intersecting ranges */ - if (irange_intersects(ra, rb)) + if (iranges_intersect(ra, rb)) { - IndexRange intersect, last; + IndexRange ir_intersection; + bool glued_to_last = false; /* - * Get intersection and try to "glue" it to previous range, - * put it separately otherwise. + * Get intersection and try to "glue" it to + * last irange, put it separately otherwise. */ - intersect = irange_intersect(ra, rb); + ir_intersection = irange_intersection_simple(ra, rb); if (result != NIL) { - last = llast_irange(result); - if (irange_conjuncted(last, intersect) && - last.ir_lossy == intersect.ir_lossy) - { - llast(result) = alloc_irange(irange_union(last, intersect)); - } - else + IndexRange last = llast_irange(result); + + /* Test if we can glue 'last' and 'ir_intersection' */ + if (irange_cmp_lossiness(last, ir_intersection) == IR_EQ_LOSSINESS && + iranges_adjoin(last, ir_intersection)) { - result = lappend_irange(result, intersect); + IndexRange ir_union = irange_union_simple(last, ir_intersection); + + /* We allocate a new IndexRange for safety */ + llast(result) = alloc_irange(ir_union); + + /* Successfully glued them */ + glued_to_last = true; } } - else - { - result = lappend_irange(result, intersect); - } + + /* Append IndexRange if we couldn't glue it */ + if (!glued_to_last) + result = lappend_irange(result, ir_intersection); } /* - * Fetch next ranges. We use upper bound of current range to determine - * which lists to fetch, since lower bound of next range is greater (or - * equal) to upper bound of current. + * Fetch next iranges. We use upper bound of current irange to + * determine which lists to fetch, since lower bound of next + * irange is greater (or equal) to upper bound of current. */ - if (ra.ir_upper <= rb.ir_upper) - ca = lnext(ca); - if (ra.ir_upper >= rb.ir_upper) - cb = lnext(cb); + if (irange_upper(ra) <= irange_upper(rb)) + ca = lnext_compat(a, ca); + if (irange_upper(ra) >= irange_upper(rb)) + cb = lnext_compat(b, cb); } return result; } +/* Set lossiness of rangeset */ +List * +irange_list_set_lossiness(List *ranges, bool lossy) +{ + List *result = NIL; + ListCell *lc; + + if (ranges == NIL) + return NIL; + + foreach (lc, ranges) + { + IndexRange ir = lfirst_irange(lc); + + result = lappend_irange(result, make_irange(irange_lower(ir), + irange_upper(ir), + lossy)); + } + + /* Unite adjacent and overlapping IndexRanges */ + return irange_list_union(result, NIL); +} + /* Get total number of elements in range list */ int irange_list_length(List *rangeset) { ListCell *lc; - int result = 0; + uint32 result = 0; foreach (lc, rangeset) { - IndexRange irange = lfirst_irange(lc); - result += irange.ir_upper - irange.ir_lower + 1; + IndexRange irange = lfirst_irange(lc); + uint32 diff = irange_upper(irange) - irange_lower(irange); + + Assert(irange_upper(irange) >= irange_lower(irange)); + + result += diff + 1; } - return result; + + return (int) result; } /* Find particular index in range list */ @@ -229,12 +399,15 @@ irange_list_find(List *rangeset, int index, bool *lossy) foreach (lc, rangeset) { IndexRange irange = lfirst_irange(lc); - if (index >= irange.ir_lower && index <= irange.ir_upper) + + if (index >= irange_lower(irange) && index <= irange_upper(irange)) { if (lossy) - *lossy = irange.ir_lossy; + *lossy = is_irange_lossy(irange); + return true; } } + return false; } diff --git a/src/rangeset.h b/src/rangeset.h deleted file mode 100644 index ffe7f31f..00000000 --- a/src/rangeset.h +++ /dev/null @@ -1,75 +0,0 @@ -/* ------------------------------------------------------------------------ - * - * rangeset.h - * IndexRange functions - * - * Copyright (c) 2015-2016, Postgres Professional - * - * ------------------------------------------------------------------------ - */ - -#ifndef PATHMAN_RANGESET_H -#define PATHMAN_RANGESET_H - - -#include "pathman.h" -#include "nodes/pg_list.h" - - -/* - * IndexRange contains a set of selected partitions. - */ -typedef struct { - bool ir_valid : 1; - bool ir_lossy : 1; /* should we use IndexScan? */ - uint32 ir_lower : 31; /* lower bound */ - uint32 ir_upper : 31; /* upper bound */ -} IndexRange; - - -#define RANGE_MASK 0xEFFFFFFF -#define InvalidIndexRange { false, false, 0, 0 } - - -inline static IndexRange -make_irange(uint32 lower, uint32 upper, bool lossy) -{ - IndexRange result; - - result.ir_valid = true; - result.ir_lossy = lossy; - result.ir_lower = (lower & RANGE_MASK); - result.ir_upper = (upper & RANGE_MASK); - - return result; -} - -inline static IndexRange * -alloc_irange(IndexRange irange) -{ - IndexRange *result = (IndexRange *) palloc(sizeof(IndexRange)); - - memcpy((void *) result, (void *) &irange, sizeof(IndexRange)); - - return result; -} - -#define lfirst_irange(lc) ( *(IndexRange *) lfirst(lc) ) -#define lappend_irange(list, irange) ( lappend((list), alloc_irange(irange)) ) -#define lcons_irange(irange, list) ( lcons(alloc_irange(irange), (list)) ) -#define list_make1_irange(irange) ( lcons(alloc_irange(irange), NIL) ) -#define llast_irange(list) ( lfirst_irange(list_tail(list)) ) -#define linitial_irange(list) ( lfirst_irange(list_head(list)) ) - - -/* rangeset.c */ -bool irange_intersects(IndexRange a, IndexRange b); -bool irange_conjuncted(IndexRange a, IndexRange b); -IndexRange irange_union(IndexRange a, IndexRange b); -IndexRange irange_intersect(IndexRange a, IndexRange b); -List *irange_list_union(List *a, List *b); -List *irange_list_intersect(List *a, List *b); -int irange_list_length(List *rangeset); -bool irange_list_find(List *rangeset, int index, bool *lossy); - -#endif diff --git a/src/relation_info.c b/src/relation_info.c index aaa3fd60..2794a183 100644 --- a/src/relation_info.c +++ b/src/relation_info.c @@ -3,656 +3,906 @@ * relation_info.c * Data structures describing partitioned relations * - * Copyright (c) 2016, Postgres Professional + * Copyright (c) 2016-2020, Postgres Professional * * ------------------------------------------------------------------------ */ +#include "compat/pg_compat.h" + #include "relation_info.h" #include "init.h" #include "utils.h" #include "xact_handling.h" #include "access/htup_details.h" +#if PG_VERSION_NUM >= 120000 +#include "access/genam.h" +#include "access/table.h" +#endif #include "access/xact.h" +#include "catalog/catalog.h" #include "catalog/indexing.h" +#include "catalog/pg_constraint.h" #include "catalog/pg_inherits.h" +#include "catalog/pg_type.h" #include "miscadmin.h" +#include "nodes/nodeFuncs.h" +#if PG_VERSION_NUM >= 120000 +#include "optimizer/optimizer.h" +#else +#include "optimizer/clauses.h" +#include "optimizer/var.h" +#endif +#include "parser/analyze.h" +#include "parser/parser.h" #include "storage/lmgr.h" +#include "tcop/tcopprot.h" #include "utils/builtins.h" #include "utils/fmgroids.h" #include "utils/hsearch.h" -#include "utils/lsyscache.h" +#include "utils/inval.h" #include "utils/memutils.h" -#include "utils/snapmgr.h" +#include "utils/resowner.h" +#include "utils/ruleutils.h" +#include "utils/syscache.h" +#include "utils/lsyscache.h" #include "utils/typcache.h" +#if PG_VERSION_NUM < 90600 +#include "optimizer/planmain.h" +#endif +#if PG_VERSION_NUM < 110000 && PG_VERSION_NUM >= 90600 +#include "catalog/pg_constraint_fn.h" +#endif -/* - * We delay all invalidation jobs received in relcache hook. - */ -static List *delayed_invalidation_parent_rels = NIL; -static List *delayed_invalidation_vague_rels = NIL; -static bool delayed_shutdown = false; /* pathman was dropped */ + +/* Error messages for partitioning expression */ +#define PARSE_PART_EXPR_ERROR "failed to parse partitioning expression \"%s\"" +#define COOK_PART_EXPR_ERROR "failed to analyze partitioning expression \"%s\"" -/* Add unique Oid to list, allocate in TopMemoryContext */ -#define list_add_unique(list, oid) \ +#ifdef USE_RELINFO_LEAK_TRACKER +#undef get_pathman_relation_info +#undef close_pathman_relation_info + +const char *prel_resowner_function = NULL; +int prel_resowner_line = 0; + +#define LeakTrackerAdd(prel) \ + do { \ + MemoryContext leak_tracker_add_old_mcxt = MemoryContextSwitchTo((prel)->mcxt); \ + (prel)->owners = \ + list_append_unique( \ + (prel)->owners, \ + list_make2(makeString((char *) prel_resowner_function), \ + makeInteger(prel_resowner_line))); \ + MemoryContextSwitchTo(leak_tracker_add_old_mcxt); \ + \ + (prel)->access_total++; \ + } while (0) + +#define LeakTrackerPrint(prel) \ do { \ - MemoryContext old_mcxt = MemoryContextSwitchTo(TopMemoryContext); \ - list = list_append_unique_oid(list, ObjectIdGetDatum(oid)); \ - MemoryContextSwitchTo(old_mcxt); \ + ListCell *leak_tracker_print_lc; \ + foreach (leak_tracker_print_lc, (prel)->owners) \ + { \ + char *fun = strVal(linitial(lfirst(leak_tracker_print_lc))); \ + int line = intVal(lsecond(lfirst(leak_tracker_print_lc))); \ + elog(WARNING, "PartRelationInfo referenced in %s:%d", fun, line); \ + } \ } while (0) -#define free_invalidation_list(list) \ +#define LeakTrackerFree(prel) \ do { \ - list_free(list); \ - list = NIL; \ + ListCell *leak_tracker_free_lc; \ + foreach (leak_tracker_free_lc, (prel)->owners) \ + { \ + list_free_deep(lfirst(leak_tracker_free_lc)); \ + } \ + list_free((prel)->owners); \ + (prel)->owners = NIL; \ } while (0) +#else +#define LeakTrackerAdd(prel) +#define LeakTrackerPrint(prel) +#define LeakTrackerFree(prel) +#endif + +/* Comparison function info */ +typedef struct cmp_func_info +{ + FmgrInfo flinfo; + Oid collid; +} cmp_func_info; -static bool try_perform_parent_refresh(Oid parent); -static Oid try_syscache_parent_search(Oid partition, PartParentSearch *status); -static Oid get_parent_of_partition_internal(Oid partition, - PartParentSearch *status, - HASHACTION action); +typedef struct prel_resowner_info +{ + ResourceOwner owner; + List *prels; +} prel_resowner_info; /* - * refresh\invalidate\get\remove PartRelationInfo functions. + * For pg_pathman.enable_bounds_cache GUC. */ +bool pg_pathman_enable_bounds_cache = true; -/* Create or update PartRelationInfo in local cache. Might emit ERROR. */ -const PartRelationInfo * -refresh_pathman_relation_info(Oid relid, - PartType partitioning_type, - const char *part_column_name) -{ - const LOCKMODE lockmode = AccessShareLock; - const TypeCacheEntry *typcache; - Oid *prel_children; - uint32 prel_children_count = 0, - i; - bool found; - PartRelationInfo *prel; - Datum param_values[Natts_pathman_config_params]; - bool param_isnull[Natts_pathman_config_params]; - - prel = (PartRelationInfo *) hash_search(partitioned_rels, - (const void *) &relid, - HASH_ENTER, &found); - elog(DEBUG2, - found ? - "Refreshing record for relation %u in pg_pathman's cache [%u]" : - "Creating new record for relation %u in pg_pathman's cache [%u]", - relid, MyProcPid); - /* - * NOTE: Trick clang analyzer (first access without NULL pointer check). - * Access to field 'valid' results in a dereference of a null pointer. - */ - prel->cmp_proc = InvalidOid; +/* + * We delay all invalidation jobs received in relcache hook. + */ +static bool delayed_shutdown = false; /* pathman was dropped */ - /* Clear outdated resources */ - if (found && PrelIsValid(prel)) - { - /* Free these arrays iff they're not NULL */ - FreeChildrenArray(prel); - FreeRangesArray(prel); - } +/* + * PartRelationInfo is controlled by ResourceOwner; + * resowner -> List of controlled PartRelationInfos by this ResourceOwner + */ +HTAB *prel_resowner = NULL; - /* First we assume that this entry is invalid */ - prel->valid = false; - /* Make both arrays point to NULL */ - prel->children = NULL; - prel->ranges = NULL; +/* Handy wrappers for Oids */ +#define bsearch_oid(key, array, array_size) \ + bsearch((const void *) &(key), (array), (array_size), sizeof(Oid), oid_cmp) - /* Set partitioning type */ - prel->parttype = partitioning_type; - /* Initialize PartRelationInfo using syscache & typcache */ - prel->attnum = get_attnum(relid, part_column_name); +static PartRelationInfo *build_pathman_relation_info(Oid relid, Datum *values); +static void free_pathman_relation_info(PartRelationInfo *prel); +static void invalidate_psin_entries_using_relid(Oid relid); +static void invalidate_psin_entry(PartStatusInfo *psin); - /* Attribute number sanity check */ - if (prel->attnum == InvalidAttrNumber) - elog(ERROR, "Relation \"%s\" has no column \"%s\"", - get_rel_name_or_relid(relid), part_column_name); +static PartRelationInfo *resowner_prel_add(PartRelationInfo *prel); +static PartRelationInfo *resowner_prel_del(PartRelationInfo *prel); +static void resonwner_prel_callback(ResourceReleasePhase phase, + bool isCommit, + bool isTopLevel, + void *arg); - /* Fetch atttypid, atttypmod, and attcollation in a single cache lookup */ - get_atttypetypmodcoll(relid, prel->attnum, - &prel->atttype, &prel->atttypmod, &prel->attcollid); +static void fill_prel_with_partitions(PartRelationInfo *prel, + const Oid *partitions, + const uint32 parts_count); - /* Fetch HASH & CMP fuctions and other stuff from type cache */ - typcache = lookup_type_cache(prel->atttype, - TYPECACHE_CMP_PROC | TYPECACHE_HASH_PROC); +static void fill_pbin_with_bounds(PartBoundInfo *pbin, + const PartRelationInfo *prel, + const Expr *constraint_expr); - prel->attbyval = typcache->typbyval; - prel->attlen = typcache->typlen; - prel->attalign = typcache->typalign; +static int cmp_range_entries(const void *p1, const void *p2, void *arg); - prel->cmp_proc = typcache->cmp_proc; - prel->hash_proc = typcache->hash_proc; +static void forget_bounds_of_partition(Oid partition); - LockRelationOid(relid, lockmode); - prel_children = find_inheritance_children_array(relid, lockmode, - &prel_children_count); - UnlockRelationOid(relid, lockmode); +static bool query_contains_subqueries(Node *node, void *context); - /* If there's no children at all, remove this entry */ - if (prel_children_count == 0) - { - remove_pathman_relation_info(relid); - return NULL; - } - /* - * Fill 'prel' with partition info, raise ERROR if anything is wrong. - * This way PartRelationInfo will remain 'invalid', and 'get' procedure - * will try to refresh it again (and again), until the error is fixed - * by user manually (i.e. invalid check constraints etc). - */ - fill_prel_with_partitions(prel_children, prel_children_count, prel); +void +init_relation_info_static_data(void) +{ + DefineCustomBoolVariable("pg_pathman.enable_bounds_cache", + "Make updates of partition dispatch cache faster", + NULL, + &pg_pathman_enable_bounds_cache, + true, + PGC_USERSET, + 0, + NULL, + NULL, + NULL); +} - /* Add "partition+parent" tuple to cache */ - for (i = 0; i < prel_children_count; i++) - cache_parent_of_partition(prel_children[i], relid); - pfree(prel_children); +/* + * Status cache routines. + */ - /* Read additional parameters ('enable_parent' and 'auto' at the moment) */ - if (read_pathman_params(relid, param_values, param_isnull)) - { - prel->enable_parent = param_values[Anum_pathman_config_params_enable_parent - 1]; - prel->auto_partition = param_values[Anum_pathman_config_params_auto - 1]; - } - /* Else set default values if they cannot be found */ - else - { - prel->enable_parent = false; - prel->auto_partition = true; - } +/* Invalidate PartStatusInfo for 'relid' */ +void +forget_status_of_relation(Oid relid) +{ + PartStatusInfo *psin; + PartParentInfo *ppar; - /* We've successfully built a cache entry */ - prel->valid = true; + /* Find status cache entry for this relation */ + psin = pathman_cache_search_relid(status_cache, + relid, HASH_FIND, + NULL); + if (psin) + invalidate_psin_entry(psin); - return prel; + /* + * Find parent of this relation. + * + * We don't want to use get_parent_of_partition() + * since it relies upon the syscache. + */ + ppar = pathman_cache_search_relid(parents_cache, + relid, HASH_FIND, + NULL); + + /* Invalidate parent directly */ + if (ppar) + { + /* Find status cache entry for parent */ + psin = pathman_cache_search_relid(status_cache, + ppar->parent_relid, HASH_FIND, + NULL); + if (psin) + invalidate_psin_entry(psin); + } + /* Otherwise, look through all entries */ + else invalidate_psin_entries_using_relid(relid); } -/* Invalidate PartRelationInfo cache entry. Create new entry if 'found' is NULL. */ +/* Invalidate all PartStatusInfo entries */ void -invalidate_pathman_relation_info(Oid relid, bool *found) +invalidate_status_cache(void) { - bool prel_found; - HASHACTION action = found ? HASH_FIND : HASH_ENTER; - PartRelationInfo *prel; + invalidate_psin_entries_using_relid(InvalidOid); +} - prel = hash_search(partitioned_rels, - (const void *) &relid, - action, &prel_found); +/* Invalidate PartStatusInfo entry referencing 'relid' */ +static void +invalidate_psin_entries_using_relid(Oid relid) +{ + HASH_SEQ_STATUS status; + PartStatusInfo *psin; - if ((action == HASH_FIND || - (action == HASH_ENTER && prel_found)) && PrelIsValid(prel)) - { - FreeChildrenArray(prel); - FreeRangesArray(prel); + hash_seq_init(&status, status_cache); - prel->valid = false; /* now cache entry is invalid */ - } - /* Handle invalid PartRelationInfo */ - else if (prel) + while ((psin = (PartStatusInfo *) hash_seq_search(&status)) != NULL) { - prel->children = NULL; - prel->ranges = NULL; + if (!OidIsValid(relid) || + psin->relid == relid || + (psin->prel && PrelHasPartition(psin->prel, relid))) + { + /* Perform invalidation */ + invalidate_psin_entry(psin); - prel->valid = false; /* now cache entry is invalid */ + /* Exit if exact match */ + if (OidIsValid(relid)) + { + hash_seq_term(&status); + break; + } + } } - - /* Set 'found' if necessary */ - if (found) *found = prel_found; - - elog(DEBUG2, - "Invalidating record for relation %u in pg_pathman's cache [%u]", - relid, MyProcPid); } -/* Get PartRelationInfo from local cache. */ -const PartRelationInfo * -get_pathman_relation_info(Oid relid) +/* Invalidate single PartStatusInfo entry */ +static void +invalidate_psin_entry(PartStatusInfo *psin) { - const PartRelationInfo *prel = hash_search(partitioned_rels, - (const void *) &relid, - HASH_FIND, NULL); +#ifdef USE_RELINFO_LOGGING + elog(DEBUG2, "invalidation message for relation %u [%u]", + psin->relid, MyProcPid); +#endif - /* Refresh PartRelationInfo if needed */ - if (prel && !PrelIsValid(prel)) + if (psin->prel) { - Datum values[Natts_pathman_config]; - bool isnull[Natts_pathman_config]; - - /* Check that PATHMAN_CONFIG table contains this relation */ - if (pathman_config_contains_relation(relid, values, isnull, NULL)) + if (PrelReferenceCount(psin->prel) > 0) { - PartType part_type; - const char *attname; - - /* We can't use 'part_type' & 'attname' from invalid prel */ - part_type = DatumGetPartType(values[Anum_pathman_config_parttype - 1]); - attname = TextDatumGetCString(values[Anum_pathman_config_attname - 1]); - - /* Refresh partitioned table cache entry */ - /* TODO: possible refactoring, pass found 'prel' instead of searching */ - prel = refresh_pathman_relation_info(relid, - part_type, - attname); - Assert(PrelIsValid(prel)); /* it MUST be valid if we got here */ + /* Mark entry as outdated and detach it */ + PrelIsFresh(psin->prel) = false; + } + else + { + free_pathman_relation_info(psin->prel); } - /* Else clear remaining cache entry */ - else remove_pathman_relation_info(relid); } - elog(DEBUG2, - "Fetching %s record for relation %u from pg_pathman's cache [%u]", - (prel ? "live" : "NULL"), relid, MyProcPid); + (void) pathman_cache_search_relid(status_cache, + psin->relid, + HASH_REMOVE, + NULL); +} - return prel; + +/* + * Dispatch cache routines. + */ + +/* Close PartRelationInfo entry */ +void +close_pathman_relation_info(PartRelationInfo *prel) +{ + Assert(prel); + + (void) resowner_prel_del(prel); } -/* Acquire lock on a table and try to get PartRelationInfo */ -const PartRelationInfo * -get_pathman_relation_info_after_lock(Oid relid, bool unlock_if_not_found) +/* Check if relation is partitioned by pg_pathman */ +bool +has_pathman_relation_info(Oid relid) { - const PartRelationInfo *prel; + PartRelationInfo *prel; - /* Restrict concurrent partition creation (it's dangerous) */ - xact_lock_partitioned_rel(relid, false); + if ((prel = get_pathman_relation_info(relid)) != NULL) + { + close_pathman_relation_info(prel); - prel = get_pathman_relation_info(relid); - if (!prel && unlock_if_not_found) - xact_unlock_partitioned_rel(relid); + return true; + } - return prel; + return false; } -/* Remove PartRelationInfo from local cache. */ -void -remove_pathman_relation_info(Oid relid) +/* Get PartRelationInfo from local cache */ +PartRelationInfo * +get_pathman_relation_info(Oid relid) { - PartRelationInfo *prel = hash_search(partitioned_rels, - (const void *) &relid, - HASH_FIND, NULL); - if (prel && PrelIsValid(prel)) + PartStatusInfo *psin; + + if (!IsPathmanReady()) + elog(ERROR, "pg_pathman is disabled"); + + /* Should always be called in transaction */ + Assert(IsTransactionState()); + + /* We don't create entries for catalog */ + if (relid < FirstNormalObjectId) + return NULL; + + /* Do we know anything about this relation? */ + psin = pathman_cache_search_relid(status_cache, + relid, HASH_FIND, + NULL); + + if (!psin) { - /* Free these arrays iff they're not NULL */ - FreeChildrenArray(prel); - FreeRangesArray(prel); + PartRelationInfo *prel = NULL; + ItemPointerData iptr; + Datum values[Natts_pathman_config]; + bool isnull[Natts_pathman_config]; + bool found; + + /* + * Check if PATHMAN_CONFIG table contains this relation and + * build a partitioned table cache entry (might emit ERROR). + */ + if (pathman_config_contains_relation(relid, values, isnull, NULL, &iptr)) + prel = build_pathman_relation_info(relid, values); + + /* Create a new entry for this relation */ + psin = pathman_cache_search_relid(status_cache, + relid, HASH_ENTER, + &found); + Assert(!found); /* it shouldn't just appear out of thin air */ + + /* Cache fresh entry */ + psin->prel = prel; } - /* Now let's remove the entry completely */ - hash_search(partitioned_rels, - (const void *) &relid, - HASH_REMOVE, NULL); + /* Check invariants */ + Assert(!psin->prel || PrelIsFresh(psin->prel)); +#ifdef USE_RELINFO_LOGGING elog(DEBUG2, - "Removing record for relation %u in pg_pathman's cache [%u]", - relid, MyProcPid); -} - - -/* - * Functions for delayed invalidation. - */ + "fetching %s record for parent %u [%u]", + (psin->prel ? "live" : "NULL"), relid, MyProcPid); +#endif -/* Add new delayed pathman shutdown job (DROP EXTENSION) */ -void -delay_pathman_shutdown(void) -{ - delayed_shutdown = true; + return resowner_prel_add(psin->prel); } -/* Add new delayed invalidation job for a [ex-]parent relation */ -void -delay_invalidation_parent_rel(Oid parent) +/* Build a new PartRelationInfo for partitioned relation */ +static PartRelationInfo * +build_pathman_relation_info(Oid relid, Datum *values) { - list_add_unique(delayed_invalidation_parent_rels, parent); -} + const LOCKMODE lockmode = AccessShareLock; + MemoryContext prel_mcxt; + PartRelationInfo *prel; -/* Add new delayed invalidation job for a vague relation */ -void -delay_invalidation_vague_rel(Oid vague_rel) -{ - list_add_unique(delayed_invalidation_vague_rels, vague_rel); -} + AssertTemporaryContext(); -/* Finish all pending invalidation jobs if possible */ -void -finish_delayed_invalidation(void) -{ - /* Exit early if there's nothing to do */ - if (delayed_invalidation_parent_rels == NIL && - delayed_invalidation_vague_rels == NIL && - delayed_shutdown == false) - { - return; - } + /* Lock parent table */ + LockRelationOid(relid, lockmode); - /* Check that current state is transactional */ - if (IsTransactionState()) + /* Check if parent exists */ + if (!SearchSysCacheExists1(RELOID, ObjectIdGetDatum(relid))) { - ListCell *lc; + /* Nope, it doesn't, remove this entry and exit */ + UnlockRelationOid(relid, lockmode); + return NULL; /* exit */ + } - /* Handle the probable 'DROP EXTENSION' case */ - if (delayed_shutdown) - { - Oid cur_pathman_config_relid; + /* Create a new memory context to store expression tree etc */ + prel_mcxt = AllocSetContextCreate(PathmanParentsCacheContext, + "build_pathman_relation_info", + ALLOCSET_SMALL_SIZES); - /* Unset 'shutdown' flag */ - delayed_shutdown = false; + /* Create a new PartRelationInfo */ + prel = MemoryContextAllocZero(prel_mcxt, sizeof(PartRelationInfo)); + prel->relid = relid; + prel->refcount = 0; + prel->fresh = true; + prel->mcxt = prel_mcxt; - /* Get current PATHMAN_CONFIG relid */ - cur_pathman_config_relid = get_relname_relid(PATHMAN_CONFIG, - get_pathman_schema()); + /* Memory leak and cache protection */ + PG_TRY(); + { + MemoryContext old_mcxt; + const TypeCacheEntry *typcache; + Datum param_values[Natts_pathman_config_params]; + bool param_isnull[Natts_pathman_config_params]; + Oid *prel_children; + uint32 prel_children_count = 0, + i; + + /* Make both arrays point to NULL */ + prel->children = NULL; + prel->ranges = NULL; + + /* Set partitioning type */ + prel->parttype = DatumGetPartType(values[Anum_pathman_config_parttype - 1]); + + /* Switch to persistent memory context */ + old_mcxt = MemoryContextSwitchTo(prel->mcxt); + + /* Build partitioning expression tree */ + prel->expr_cstr = TextDatumGetCString(values[Anum_pathman_config_expr - 1]); + prel->expr = cook_partitioning_expression(relid, prel->expr_cstr, NULL); + fix_opfuncids(prel->expr); + + /* Extract Vars and varattnos of partitioning expression */ + prel->expr_vars = NIL; + prel->expr_atts = NULL; + prel->expr_vars = pull_var_clause_compat(prel->expr, 0, 0); + pull_varattnos((Node *) prel->expr_vars, PART_EXPR_VARNO, &prel->expr_atts); + + MemoryContextSwitchTo(old_mcxt); + + /* First, fetch type of partitioning expression */ + prel->ev_type = exprType(prel->expr); + prel->ev_typmod = exprTypmod(prel->expr); + prel->ev_collid = exprCollation(prel->expr); + + /* Fetch HASH & CMP fuctions and other stuff from type cache */ + typcache = lookup_type_cache(prel->ev_type, + TYPECACHE_CMP_PROC | TYPECACHE_HASH_PROC); + + prel->ev_byval = typcache->typbyval; + prel->ev_len = typcache->typlen; + prel->ev_align = typcache->typalign; + + prel->cmp_proc = typcache->cmp_proc; + prel->hash_proc = typcache->hash_proc; + + /* Try searching for children */ + (void) find_inheritance_children_array(relid, lockmode, false, + &prel_children_count, + &prel_children); + + /* Fill 'prel' with partition info, raise ERROR if anything is wrong */ + fill_prel_with_partitions(prel, prel_children, prel_children_count); + + /* Unlock the parent */ + UnlockRelationOid(relid, lockmode); + + /* Now it's time to take care of children */ + for (i = 0; i < prel_children_count; i++) + { + /* Cache this child */ + cache_parent_of_partition(prel_children[i], relid); - /* Check that PATHMAN_CONFIG table has indeed been dropped */ - if (cur_pathman_config_relid == InvalidOid || - cur_pathman_config_relid != get_pathman_config_relid()) - { - /* Ok, let's unload pg_pathman's config */ - unload_config(); + /* Unlock this child */ + UnlockRelationOid(prel_children[i], lockmode); + } - /* Disregard all remaining invalidation jobs */ - free_invalidation_list(delayed_invalidation_parent_rels); - free_invalidation_list(delayed_invalidation_vague_rels); + if (prel_children) + pfree(prel_children); - /* No need to continue, exit */ - return; - } + /* Read additional parameters ('enable_parent' at the moment) */ + if (read_pathman_params(relid, param_values, param_isnull)) + { + prel->enable_parent = + param_values[Anum_pathman_config_params_enable_parent - 1]; } - - /* Process relations that are (or were) definitely partitioned */ - foreach (lc, delayed_invalidation_parent_rels) + /* Else set default values if they cannot be found */ + else { - Oid parent = lfirst_oid(lc); - - if (!pathman_config_contains_relation(parent, NULL, NULL, NULL)) - remove_pathman_relation_info(parent); - else - invalidate_pathman_relation_info(parent, NULL); + prel->enable_parent = DEFAULT_PATHMAN_ENABLE_PARENT; } + } + PG_CATCH(); + { + /* + * If we managed to create some children but failed later, bounds + * cache now might have obsolete data for something that probably is + * not a partitioned table at all. Remove it. + */ + if (!IsPathmanInitialized()) + /* + * ... unless failure was so hard that caches were already destoyed, + * i.e. extension disabled + */ + PG_RE_THROW(); - /* Process all other vague cases */ - foreach (lc, delayed_invalidation_vague_rels) + if (prel->children != NULL) { - Oid vague_rel = lfirst_oid(lc); + uint32 i; - /* It might be a partitioned table or a partition */ - if (!try_perform_parent_refresh(vague_rel)) + for (i = 0; i < PrelChildrenCount(prel); i++) { - PartParentSearch search; - Oid parent; - - parent = get_parent_of_partition(vague_rel, &search); - - switch (search) + Oid child; + + /* + * We rely on children and ranges array allocated with 0s, not + * random data + */ + if (prel->parttype == PT_HASH) + child = prel->children[i]; + else { - /* It's still parent */ - case PPS_ENTRY_PART_PARENT: - try_perform_parent_refresh(parent); - break; - - /* It *might have been* parent before (not in PATHMAN_CONFIG) */ - case PPS_ENTRY_PARENT: - remove_pathman_relation_info(parent); - break; - - /* How come we still don't know?? */ - case PPS_NOT_SURE: - elog(ERROR, "Unknown table status, this should never happen"); - break; - - default: - break; + Assert(prel->parttype == PT_RANGE); + child = prel->ranges[i].child_oid; } + + forget_bounds_of_partition(child); } } - free_invalidation_list(delayed_invalidation_parent_rels); - free_invalidation_list(delayed_invalidation_vague_rels); - } -} - - -/* - * cache\forget\get PartParentInfo functions. - */ - -/* Create "partition+parent" pair in local cache */ -void -cache_parent_of_partition(Oid partition, Oid parent) -{ - bool found; - PartParentInfo *ppar; + /* Free this entry */ + free_pathman_relation_info(prel); - ppar = hash_search(parent_cache, - (const void *) &partition, - HASH_ENTER, &found); + /* Rethrow ERROR further */ + PG_RE_THROW(); + } + PG_END_TRY(); - elog(DEBUG2, - found ? - "Refreshing record for child %u in pg_pathman's cache [%u]" : - "Creating new record for child %u in pg_pathman's cache [%u]", - partition, MyProcPid); + /* Free trivial entries */ + if (PrelChildrenCount(prel) == 0) + { + free_pathman_relation_info(prel); + prel = NULL; + } - ppar->child_rel = partition; - ppar->parent_rel = parent; + return prel; } -/* Remove "partition+parent" pair from cache & return parent's Oid */ -Oid -forget_parent_of_partition(Oid partition, PartParentSearch *status) +/* Free PartRelationInfo struct safely */ +static void +free_pathman_relation_info(PartRelationInfo *prel) { - return get_parent_of_partition_internal(partition, status, HASH_REMOVE); + MemoryContextDelete(prel->mcxt); } -/* Return partition parent's Oid */ -Oid -get_parent_of_partition(Oid partition, PartParentSearch *status) +static PartRelationInfo * +resowner_prel_add(PartRelationInfo *prel) { - return get_parent_of_partition_internal(partition, status, HASH_FIND); -} + if (!prel_resowner) + { + HASHCTL ctl; -/* - * Get [and remove] "partition+parent" pair from cache, - * also check syscache if 'status' is provided. - * - * "status == NULL" implies that we don't care about - * neither syscache nor PATHMAN_CONFIG table contents. - */ -static Oid -get_parent_of_partition_internal(Oid partition, - PartParentSearch *status, - HASHACTION action) -{ - const char *action_str; /* "Fetching"\"Resetting" */ - Oid parent; - PartParentInfo *ppar = hash_search(parent_cache, - (const void *) &partition, - HASH_FIND, NULL); - - /* Set 'action_str' */ - switch (action) - { - case HASH_REMOVE: - action_str = "Resetting"; - break; + memset(&ctl, 0, sizeof(ctl)); + ctl.keysize = sizeof(ResourceOwner); + ctl.entrysize = sizeof(prel_resowner_info); + ctl.hcxt = TopPathmanContext; - case HASH_FIND: - action_str = "Fetching"; - break; + prel_resowner = hash_create("prel resowner", + PART_RELS_SIZE, &ctl, + HASH_ELEM | HASH_BLOBS | HASH_CONTEXT); - default: - elog(ERROR, "Unexpected HTAB action %u", action); + RegisterResourceReleaseCallback(resonwner_prel_callback, NULL); } - elog(DEBUG2, - "%s %s record for child %u from pg_pathman's cache [%u]", - action_str, (ppar ? "live" : "NULL"), partition, MyProcPid); - - if (ppar) + if (prel) { - if (status) *status = PPS_ENTRY_PART_PARENT; - parent = ppar->parent_rel; + ResourceOwner resowner = CurrentResourceOwner; + prel_resowner_info *info; + bool found; + MemoryContext old_mcxt; - /* Remove entry if necessary */ - if (action == HASH_REMOVE) - hash_search(parent_cache, - (const void *) &partition, - HASH_REMOVE, NULL); - } - /* Try fetching parent from syscache if 'status' is provided */ - else if (status) - parent = try_syscache_parent_search(partition, status); - else - parent = InvalidOid; /* we don't have to set status */ + info = hash_search(prel_resowner, + (void *) &resowner, + HASH_ENTER, + &found); - return parent; -} + if (!found) + info->prels = NIL; -/* Try to find parent of a partition using syscache & PATHMAN_CONFIG */ -static Oid -try_syscache_parent_search(Oid partition, PartParentSearch *status) -{ - if (!IsTransactionState()) - { - /* We could not perform search */ - if (status) *status = PPS_NOT_SURE; + /* Register this 'prel' */ + old_mcxt = MemoryContextSwitchTo(TopPathmanContext); + info->prels = lappend(info->prels, prel); + MemoryContextSwitchTo(old_mcxt); - return InvalidOid; + /* Save current caller (function:line) */ + LeakTrackerAdd(prel); + + /* Finally, increment refcount */ + PrelReferenceCount(prel) += 1; } - else - { - Relation relation; - Snapshot snapshot; - ScanKeyData key[1]; - SysScanDesc scan; - HeapTuple inheritsTuple; - Oid parent = InvalidOid; - /* At first we assume parent does not exist (not a partition) */ - if (status) *status = PPS_ENTRY_NOT_FOUND; + return prel; +} - relation = heap_open(InheritsRelationId, AccessShareLock); +static PartRelationInfo * +resowner_prel_del(PartRelationInfo *prel) +{ + /* Must be active! */ + Assert(prel_resowner); - ScanKeyInit(&key[0], - Anum_pg_inherits_inhrelid, - BTEqualStrategyNumber, F_OIDEQ, - ObjectIdGetDatum(partition)); + if (prel) + { + ResourceOwner resowner = CurrentResourceOwner; + prel_resowner_info *info; - snapshot = RegisterSnapshot(GetLatestSnapshot()); - scan = systable_beginscan(relation, InheritsRelidSeqnoIndexId, - true, NULL, 1, key); + info = hash_search(prel_resowner, + (void *) &resowner, + HASH_FIND, + NULL); - while ((inheritsTuple = systable_getnext(scan)) != NULL) + if (info) { - parent = ((Form_pg_inherits) GETSTRUCT(inheritsTuple))->inhparent; + /* Check that 'prel' is registered! */ + Assert(list_member_ptr(info->prels, prel)); - /* - * NB: don't forget that 'inh' flag does not immediately - * mean that this is a pg_pathman's partition. It might - * be just a casual inheriting table. - */ - if (status) *status = PPS_ENTRY_PARENT; + /* Remove it from list */ + info->prels = list_delete_ptr(info->prels, prel); + } - /* Check that PATHMAN_CONFIG contains this table */ - if (pathman_config_contains_relation(parent, NULL, NULL, NULL)) - { - /* We've found the entry, update status */ - if (status) *status = PPS_ENTRY_PART_PARENT; - } + /* Check that refcount is valid */ + Assert(PrelReferenceCount(prel) > 0); - break; /* there should be no more rows */ - } + /* Decrease refcount */ + PrelReferenceCount(prel) -= 1; - systable_endscan(scan); - UnregisterSnapshot(snapshot); - heap_close(relation, AccessShareLock); + /* Free list of owners */ + if (PrelReferenceCount(prel) == 0) + { + LeakTrackerFree(prel); + } - return parent; + /* Free this entry if it's time */ + if (PrelReferenceCount(prel) == 0 && !PrelIsFresh(prel)) + { + free_pathman_relation_info(prel); + } } + + return prel; } -/* - * Try to refresh cache entry for relation 'parent'. - * - * Return true on success. - */ -static bool -try_perform_parent_refresh(Oid parent) +static void +resonwner_prel_callback(ResourceReleasePhase phase, + bool isCommit, + bool isTopLevel, + void *arg) { - Datum values[Natts_pathman_config]; - bool isnull[Natts_pathman_config]; + ResourceOwner resowner = CurrentResourceOwner; + prel_resowner_info *info; - if (pathman_config_contains_relation(parent, values, isnull, NULL)) + if (prel_resowner) { - text *attname; - PartType parttype; + ListCell *lc; - parttype = DatumGetPartType(values[Anum_pathman_config_parttype - 1]); - attname = DatumGetTextP(values[Anum_pathman_config_attname - 1]); + info = hash_search(prel_resowner, + (void *) &resowner, + HASH_FIND, + NULL); - /* If anything went wrong, return false (actually, it might throw ERROR) */ - if (!PrelIsValid(refresh_pathman_relation_info(parent, parttype, - text_to_cstring(attname)))) - return false; - } - /* Not a partitioned relation */ - else return false; + if (info) + { + foreach (lc, info->prels) + { + PartRelationInfo *prel = lfirst(lc); - return true; -} + if (isCommit) + { + /* Print verbose list of *possible* owners */ + LeakTrackerPrint(prel); -/* - * Safe PartType wrapper. - */ -PartType -DatumGetPartType(Datum datum) -{ - uint32 val = DatumGetUInt32(datum); + elog(WARNING, + "cache reference leak: PartRelationInfo(%d) has count %d", + PrelParentRelid(prel), PrelReferenceCount(prel)); + } + + /* Check that refcount is valid */ + Assert(PrelReferenceCount(prel) > 0); + + /* Decrease refcount */ + PrelReferenceCount(prel) -= 1; + + /* Free list of owners */ + LeakTrackerFree(prel); + + /* Free this entry if it's time */ + if (PrelReferenceCount(prel) == 0 && !PrelIsFresh(prel)) + { + free_pathman_relation_info(prel); + } + } - if (val < 1 || val > 2) - elog(ERROR, "Unknown partitioning type %u", val); + list_free(info->prels); - return (PartType) val; + hash_search(prel_resowner, + (void *) &resowner, + HASH_REMOVE, + NULL); + } + } } -/* - * Common PartRelationInfo checks. Emit ERROR if anything is wrong. - */ -void -shout_if_prel_is_invalid(Oid parent_oid, - const PartRelationInfo *prel, - PartType expected_part_type) +/* Fill PartRelationInfo with partition-related info */ +static void +fill_prel_with_partitions(PartRelationInfo *prel, + const Oid *partitions, + const uint32 parts_count) { - if (!prel) - elog(ERROR, "Relation \"%s\" is not partitioned by pg_pathman", - get_rel_name_or_relid(parent_oid)); +/* Allocate array if partitioning type matches 'prel' (or "ANY") */ +#define AllocZeroArray(part_type, context, elem_num, elem_type) \ + ( \ + ((part_type) == PT_ANY || (part_type) == prel->parttype) ? \ + MemoryContextAllocZero((context), (elem_num) * sizeof(elem_type)) : \ + NULL \ + ) + + uint32 i; + MemoryContext temp_mcxt, /* reference temporary mcxt */ + old_mcxt; /* reference current mcxt */ + + AssertTemporaryContext(); + + /* Allocate memory for 'prel->children' & 'prel->ranges' (if needed) */ + prel->children = AllocZeroArray(PT_ANY, prel->mcxt, parts_count, Oid); + prel->ranges = AllocZeroArray(PT_RANGE, prel->mcxt, parts_count, RangeEntry); + + /* Set number of children */ + PrelChildrenCount(prel) = parts_count; + + /* Create temporary memory context for loop */ + temp_mcxt = AllocSetContextCreate(CurrentMemoryContext, + CppAsString(fill_prel_with_partitions), + ALLOCSET_SMALL_SIZES); + + /* Initialize bounds of partitions */ + for (i = 0; i < PrelChildrenCount(prel); i++) + { + PartBoundInfo *pbin; - if (!PrelIsValid(prel)) - elog(ERROR, "pg_pathman's cache contains invalid entry " - "for relation \"%s\" [%u]", - get_rel_name_or_relid(parent_oid), - MyProcPid); + /* Clear all previous allocations */ + MemoryContextReset(temp_mcxt); - /* Check partitioning type unless it's "indifferent" */ - if (expected_part_type != PT_INDIFFERENT && - expected_part_type != prel->parttype) - { - char *expected_str; + /* Switch to the temporary memory context */ + old_mcxt = MemoryContextSwitchTo(temp_mcxt); + { + /* Fetch constraint's expression tree */ + pbin = get_bounds_of_partition(partitions[i], prel); + } + MemoryContextSwitchTo(old_mcxt); - switch (expected_part_type) + /* Copy bounds from bound cache */ + switch (prel->parttype) { case PT_HASH: - expected_str = "HASH"; + /* + * This might be the case if hash part was dropped, and thus + * children array alloc'ed smaller than needed, but parts + * bound cache still keeps entries with high indexes. + */ + if (pbin->part_idx >= PrelChildrenCount(prel)) + { + /* purged caches will destoy prel, save oid for reporting */ + Oid parent_relid = PrelParentRelid(prel); + + DisablePathman(); /* disable pg_pathman since config is broken */ + ereport(ERROR, (errmsg("pg_pathman's cache for relation %d " + "has not been properly initialized. " + "Looks like one of hash partitions was dropped.", + parent_relid), + errhint(INIT_ERROR_HINT))); + } + + prel->children[pbin->part_idx] = pbin->child_relid; + break; + + case PT_RANGE: + { + /* Copy child's Oid */ + prel->ranges[i].child_oid = pbin->child_relid; + + /* Copy all min & max Datums to the persistent mcxt */ + old_mcxt = MemoryContextSwitchTo(prel->mcxt); + { + prel->ranges[i].min = CopyBound(&pbin->range_min, + prel->ev_byval, + prel->ev_len); + + prel->ranges[i].max = CopyBound(&pbin->range_max, + prel->ev_byval, + prel->ev_len); + } + MemoryContextSwitchTo(old_mcxt); + } + break; + + default: + { + DisablePathman(); /* disable pg_pathman since config is broken */ + WrongPartType(prel->parttype); + } + break; + } + } + + /* Drop temporary memory context */ + MemoryContextDelete(temp_mcxt); + + /* Finalize 'prel' for a RANGE-partitioned table */ + if (prel->parttype == PT_RANGE) + { + qsort_range_entries(PrelGetRangesArray(prel), + PrelChildrenCount(prel), + prel); + + /* Initialize 'prel->children' array */ + for (i = 0; i < PrelChildrenCount(prel); i++) + prel->children[i] = prel->ranges[i].child_oid; + } + + /* Check that each partition Oid has been assigned properly */ + if (prel->parttype == PT_HASH) + for (i = 0; i < PrelChildrenCount(prel); i++) + { + if (!OidIsValid(prel->children[i])) + { + DisablePathman(); /* disable pg_pathman since config is broken */ + ereport(ERROR, (errmsg("pg_pathman's cache for relation \"%s\" " + "has not been properly initialized", + get_rel_name_or_relid(PrelParentRelid(prel))), + errhint(INIT_ERROR_HINT))); + } + } +} + +/* qsort() comparison function for RangeEntries */ +static int +cmp_range_entries(const void *p1, const void *p2, void *arg) +{ + const RangeEntry *v1 = (const RangeEntry *) p1; + const RangeEntry *v2 = (const RangeEntry *) p2; + cmp_func_info *info = (cmp_func_info *) arg; + + return cmp_bounds(&info->flinfo, info->collid, &v1->min, &v2->min); +} + +void +qsort_range_entries(RangeEntry *entries, int nentries, + const PartRelationInfo *prel) +{ + cmp_func_info cmp_info; + + /* Prepare function info */ + fmgr_info(prel->cmp_proc, &cmp_info.flinfo); + cmp_info.collid = prel->ev_collid; + + /* Sort partitions by RangeEntry->min asc */ + qsort_arg(entries, nentries, + sizeof(RangeEntry), + cmp_range_entries, + (void *) &cmp_info); +} + +/* + * Common PartRelationInfo checks. Emit ERROR if anything is wrong. + */ +void +shout_if_prel_is_invalid(const Oid parent_oid, + const PartRelationInfo *prel, + const PartType expected_part_type) +{ + if (!prel) + elog(ERROR, "relation \"%s\" has no partitions", + get_rel_name_or_relid(parent_oid)); + + /* Check partitioning type unless it's "ANY" */ + if (expected_part_type != PT_ANY && + expected_part_type != prel->parttype) + { + char *expected_str; + + switch (expected_part_type) + { + case PT_HASH: + expected_str = "HASH"; break; case PT_RANGE: @@ -660,13 +910,828 @@ shout_if_prel_is_invalid(Oid parent_oid, break; default: - elog(ERROR, - "expected_str selection not implemented for type %d", - expected_part_type); + WrongPartType(expected_part_type); + expected_str = NULL; /* keep compiler happy */ } - elog(ERROR, "Relation \"%s\" is not partitioned by %s", + elog(ERROR, "relation \"%s\" is not partitioned by %s", get_rel_name_or_relid(parent_oid), expected_str); } } + +/* + * Remap partitioning expression columns for tuple source relation. + * This is a simplified version of functions that return TupleConversionMap. + * It should be faster if expression uses a few fields of relation. + */ +#if PG_VERSION_NUM >= 130000 +AttrMap * +PrelExpressionAttributesMap(const PartRelationInfo *prel, + TupleDesc source_tupdesc) +#else +AttrNumber * +PrelExpressionAttributesMap(const PartRelationInfo *prel, + TupleDesc source_tupdesc, + int *map_length) +#endif +{ + Oid parent_relid = PrelParentRelid(prel); + int source_natts = source_tupdesc->natts, + expr_natts = 0; +#if PG_VERSION_NUM >= 130000 + AttrMap *result; +#else + AttrNumber *result; +#endif + AttrNumber i; + bool is_trivial = true; + + /* Get largest attribute number used in expression */ + i = -1; + while ((i = bms_next_member(prel->expr_atts, i)) >= 0) + expr_natts = i; + +#if PG_VERSION_NUM >= 130000 + result = make_attrmap(expr_natts); +#else + /* Allocate array for map */ + result = (AttrNumber *) palloc0(expr_natts * sizeof(AttrNumber)); +#endif + + /* Find a match for each attribute */ + i = -1; + while ((i = bms_next_member(prel->expr_atts, i)) >= 0) + { + AttrNumber attnum = i + FirstLowInvalidHeapAttributeNumber; + char *attname = get_attname_compat(parent_relid, attnum); + int j; + + Assert(attnum <= expr_natts); + + for (j = 0; j < source_natts; j++) + { + Form_pg_attribute att = TupleDescAttr(source_tupdesc, j); + + if (att->attisdropped) + continue; /* attrMap[attnum - 1] is already 0 */ + + if (strcmp(NameStr(att->attname), attname) == 0) + { +#if PG_VERSION_NUM >= 130000 + result->attnums[attnum - 1] = (AttrNumber) (j + 1); +#else + result[attnum - 1] = (AttrNumber) (j + 1); +#endif + break; + } + } + +#if PG_VERSION_NUM >= 130000 + if (result->attnums[attnum - 1] == 0) +#else + if (result[attnum - 1] == 0) +#endif + elog(ERROR, "cannot find column \"%s\" in child relation", attname); + +#if PG_VERSION_NUM >= 130000 + if (result->attnums[attnum - 1] != attnum) +#else + if (result[attnum - 1] != attnum) +#endif + is_trivial = false; + } + + /* Check if map is trivial */ + if (is_trivial) + { +#if PG_VERSION_NUM >= 130000 + free_attrmap(result); +#else + pfree(result); +#endif + return NULL; + } + +#if PG_VERSION_NUM < 130000 + *map_length = expr_natts; +#endif + return result; +} + + +/* + * Bounds cache routines. + */ + +/* Remove partition's constraint from cache */ +static void +forget_bounds_of_partition(Oid partition) +{ + PartBoundInfo *pbin; + + /* Should we search in bounds cache? */ + pbin = pg_pathman_enable_bounds_cache ? + pathman_cache_search_relid(bounds_cache, + partition, + HASH_FIND, + NULL) : + NULL; /* don't even bother */ + + if (pbin) + { + /* Free this entry */ + FreePartBoundInfo(pbin); + + /* Finally remove this entry from cache */ + pathman_cache_search_relid(bounds_cache, + partition, + HASH_REMOVE, + NULL); + } + +} + +/* + * Remove rel's constraint from cache, if relid is partition; + * Remove all children constraints, if it is parent. + */ +void +forget_bounds_of_rel(Oid relid) +{ + PartStatusInfo *psin; + + forget_bounds_of_partition(relid); + + /* + * If it was the parent who got invalidated, purge children's bounds. + * We assume here that if bounds_cache has something, parent must be also + * in status_cache. Fragile, but seems better then blowing out full bounds + * cache or digging pathman_config on each relcache invalidation. + */ + + /* Find status cache entry for this relation */ + psin = pathman_cache_search_relid(status_cache, + relid, HASH_FIND, + NULL); + if (psin != NULL && psin->prel != NULL) + { + uint32 i; + PartRelationInfo *prel = psin->prel; + Oid *children = PrelGetChildrenArray(prel); + + for (i = 0; i < PrelChildrenCount(prel); i++) + { + forget_bounds_of_partition(children[i]); + } + } +} + +/* Return partition's constraint as expression tree */ +PartBoundInfo * +get_bounds_of_partition(Oid partition, const PartRelationInfo *prel) +{ + PartBoundInfo *pbin; + + /* + * We might end up building the constraint + * tree that we wouldn't want to keep. + */ + AssertTemporaryContext(); + + /* PartRelationInfo must be provided */ + Assert(prel != NULL); + + /* Should always be called in transaction */ + Assert(IsTransactionState()); + + /* Should we search in bounds cache? */ + pbin = pg_pathman_enable_bounds_cache ? + pathman_cache_search_relid(bounds_cache, + partition, + HASH_FIND, + NULL) : + NULL; /* don't even bother */ + + /* Build new entry */ + if (!pbin) + { + PartBoundInfo pbin_local; + Expr *con_expr; + + /* Initialize other fields */ + pbin_local.child_relid = partition; + pbin_local.byval = prel->ev_byval; + + /* Try to build constraint's expression tree (may emit ERROR) */ + con_expr = get_partition_constraint_expr(partition, true); + + /* Grab bounds/hash and fill in 'pbin_local' (may emit ERROR) */ + fill_pbin_with_bounds(&pbin_local, prel, con_expr); + + /* We strive to delay the creation of cache's entry */ + pbin = pg_pathman_enable_bounds_cache ? + pathman_cache_search_relid(bounds_cache, + partition, + HASH_ENTER, + NULL) : + palloc(sizeof(PartBoundInfo)); + + /* Copy data from 'pbin_local' */ + memcpy(pbin, &pbin_local, sizeof(PartBoundInfo)); + } + + return pbin; +} + +void +invalidate_bounds_cache(void) +{ + HASH_SEQ_STATUS status; + PartBoundInfo *pbin; + + Assert(offsetof(PartBoundInfo, child_relid) == 0); + + hash_seq_init(&status, bounds_cache); + + while ((pbin = hash_seq_search(&status)) != NULL) + { + FreePartBoundInfo(pbin); + + pathman_cache_search_relid(bounds_cache, + pbin->child_relid, + HASH_REMOVE, NULL); + } +} + +/* + * Get constraint expression tree of a partition. + * + * build_check_constraint_name_relid_internal() is used to build conname. + */ +Expr * +get_partition_constraint_expr(Oid partition, bool raise_error) +{ + Oid conid; /* constraint Oid */ + char *conname; /* constraint name */ + HeapTuple con_tuple; + Datum conbin_datum; + bool conbin_isnull; + Expr *expr; /* expression tree for constraint */ + + conname = build_check_constraint_name_relid_internal(partition); + conid = get_relation_constraint_oid(partition, conname, true); + + if (!OidIsValid(conid)) + { + if (!raise_error) + return NULL; + + ereport(ERROR, + (errmsg("constraint \"%s\" of partition \"%s\" does not exist", + conname, get_rel_name_or_relid(partition)))); + } + + con_tuple = SearchSysCache1(CONSTROID, ObjectIdGetDatum(conid)); + if (!HeapTupleIsValid(con_tuple)) + { + if (!raise_error) + return NULL; + + ereport(ERROR, + (errmsg("cache lookup failed for constraint \"%s\" of partition \"%s\"", + conname, get_rel_name_or_relid(partition)))); + } + + conbin_datum = SysCacheGetAttr(CONSTROID, con_tuple, + Anum_pg_constraint_conbin, + &conbin_isnull); + if (conbin_isnull) + { + if (!raise_error) + return NULL; + + ereport(ERROR, + (errmsg("constraint \"%s\" of partition \"%s\" has NULL conbin", + conname, get_rel_name_or_relid(partition)))); + } + pfree(conname); + + /* Finally we get a constraint expression tree */ + expr = (Expr *) stringToNode(TextDatumGetCString(conbin_datum)); + + /* Don't foreget to release syscache tuple */ + ReleaseSysCache(con_tuple); + + return expr; +} + +/* Fill PartBoundInfo with bounds/hash */ +static void +fill_pbin_with_bounds(PartBoundInfo *pbin, + const PartRelationInfo *prel, + const Expr *constraint_expr) +{ + AssertTemporaryContext(); + + /* Copy partitioning type to 'pbin' */ + pbin->parttype = prel->parttype; + + /* Perform a partitioning_type-dependent task */ + switch (prel->parttype) + { + case PT_HASH: + { + if (!validate_hash_constraint(constraint_expr, + prel, &pbin->part_idx)) + { + DisablePathman(); /* disable pg_pathman since config is broken */ + ereport(ERROR, + (errmsg("wrong constraint format for HASH partition \"%s\"", + get_rel_name_or_relid(pbin->child_relid)), + errhint(INIT_ERROR_HINT))); + } + } + break; + + case PT_RANGE: + { + Datum lower, upper; + bool lower_null, upper_null; + + if (validate_range_constraint(constraint_expr, + prel, &lower, &upper, + &lower_null, &upper_null)) + { + MemoryContext old_mcxt; + + /* Switch to the persistent memory context */ + old_mcxt = MemoryContextSwitchTo(PathmanBoundsCacheContext); + + pbin->range_min = lower_null ? + MakeBoundInf(MINUS_INFINITY) : + MakeBound(datumCopy(lower, + prel->ev_byval, + prel->ev_len)); + + pbin->range_max = upper_null ? + MakeBoundInf(PLUS_INFINITY) : + MakeBound(datumCopy(upper, + prel->ev_byval, + prel->ev_len)); + + /* Switch back */ + MemoryContextSwitchTo(old_mcxt); + } + else + { + DisablePathman(); /* disable pg_pathman since config is broken */ + ereport(ERROR, + (errmsg("wrong constraint format for RANGE partition \"%s\"", + get_rel_name_or_relid(pbin->child_relid)), + errhint(INIT_ERROR_HINT))); + } + } + break; + + default: + { + DisablePathman(); /* disable pg_pathman since config is broken */ + WrongPartType(prel->parttype); + } + break; + } +} + + +/* + * Parents cache routines. + */ + +/* Add parent of partition to cache */ +void +cache_parent_of_partition(Oid partition, Oid parent) +{ + PartParentInfo *ppar; + + /* Why would we want to call it not in transaction? */ + Assert(IsTransactionState()); + + /* Create a new cache entry */ + ppar = pathman_cache_search_relid(parents_cache, + partition, + HASH_ENTER, + NULL); + + /* Fill entry with parent */ + ppar->parent_relid = parent; +} + +/* Remove parent of partition from cache */ +void +forget_parent_of_partition(Oid partition) +{ + pathman_cache_search_relid(parents_cache, + partition, + HASH_REMOVE, + NULL); +} + +/* Return parent of partition */ +Oid +get_parent_of_partition(Oid partition) +{ + PartParentInfo *ppar; + + /* Should always be called in transaction */ + Assert(IsTransactionState()); + + /* We don't cache catalog objects */ + if (partition < FirstNormalObjectId) + return InvalidOid; + + ppar = pathman_cache_search_relid(parents_cache, + partition, + HASH_FIND, + NULL); + + /* Nice, we have a cached entry */ + if (ppar) + { + return ppar->parent_relid; + } + /* Bad luck, let's search in catalog */ + else + { + Relation relation; + ScanKeyData key[1]; + SysScanDesc scan; + HeapTuple htup; + Oid parent = InvalidOid; + + relation = heap_open_compat(InheritsRelationId, AccessShareLock); + + ScanKeyInit(&key[0], + Anum_pg_inherits_inhrelid, + BTEqualStrategyNumber, F_OIDEQ, + ObjectIdGetDatum(partition)); + + scan = systable_beginscan(relation, InheritsRelidSeqnoIndexId, + true, NULL, 1, key); + + while ((htup = systable_getnext(scan)) != NULL) + { + /* Extract parent from catalog tuple */ + Oid inhparent = ((Form_pg_inherits) GETSTRUCT(htup))->inhparent; + + /* Check that PATHMAN_CONFIG contains this table */ + if (pathman_config_contains_relation(inhparent, NULL, NULL, NULL, NULL)) + { + /* We should return this parent */ + parent = inhparent; + + /* Now, let's cache this parent */ + cache_parent_of_partition(partition, parent); + } + + break; /* there should be no more rows */ + } + + systable_endscan(scan); + heap_close_compat(relation, AccessShareLock); + + return parent; + } +} + +void +invalidate_parents_cache(void) +{ + HASH_SEQ_STATUS status; + PartParentInfo *ppar; + + Assert(offsetof(PartParentInfo, child_relid) == 0); + + hash_seq_init(&status, parents_cache); + + while ((ppar = hash_seq_search(&status)) != NULL) + { + /* This is a plain structure, no need to pfree() */ + + pathman_cache_search_relid(parents_cache, + ppar->child_relid, + HASH_REMOVE, NULL); + } +} + + +/* + * Partitioning expression routines. + */ + +/* Wraps expression in SELECT query and returns parse tree */ +Node * +parse_partitioning_expression(const Oid relid, + const char *expr_cstr, + char **query_string_out, /* ret value #1 */ + Node **parsetree_out) /* ret value #2 */ +{ + SelectStmt *select_stmt; + List *parsetree_list; + MemoryContext old_mcxt; + + const char *sql = "SELECT (%s) FROM ONLY %s.%s"; + char *relname = get_rel_name(relid), + *nspname = get_namespace_name(get_rel_namespace(relid)); + char *query_string = psprintf(sql, expr_cstr, + quote_identifier(nspname), + quote_identifier(relname)); + + old_mcxt = CurrentMemoryContext; + + PG_TRY(); + { + parsetree_list = raw_parser_compat(query_string); + } + PG_CATCH(); + { + ErrorData *error; + + /* Switch to the original context & copy edata */ + MemoryContextSwitchTo(old_mcxt); + error = CopyErrorData(); + FlushErrorState(); + + /* Adjust error message */ + error->detail = error->message; + error->message = psprintf(PARSE_PART_EXPR_ERROR, expr_cstr); + error->sqlerrcode = ERRCODE_INVALID_PARAMETER_VALUE; + error->cursorpos = 0; + error->internalpos = 0; + + ReThrowError(error); + } + PG_END_TRY(); + + if (list_length(parsetree_list) != 1) + elog(ERROR, "expression \"%s\" produced more than one query", expr_cstr); + +#if PG_VERSION_NUM >= 100000 + select_stmt = (SelectStmt *) ((RawStmt *) linitial(parsetree_list))->stmt; +#else + select_stmt = (SelectStmt *) linitial(parsetree_list); +#endif + + if (query_string_out) + *query_string_out = query_string; + + if (parsetree_out) + *parsetree_out = (Node *) linitial(parsetree_list); + + return ((ResTarget *) linitial(select_stmt->targetList))->val; +} + +/* Parse partitioning expression and return its type and nodeToString() + * (or nodeToStringWithLocations() in version 17 and higher) as TEXT */ +Node * +cook_partitioning_expression(const Oid relid, + const char *expr_cstr, + Oid *expr_type_out) /* ret value #1 */ +{ + Node *expr; + Node *parse_tree; + List *query_tree_list; + + char *query_string; + + MemoryContext parse_mcxt, + old_mcxt; + + AssertTemporaryContext(); + + /* + * We use separate memory context here, just to make sure we won't + * leave anything behind after parsing, rewriting and planning. + */ + parse_mcxt = AllocSetContextCreate(CurrentMemoryContext, + CppAsString(cook_partitioning_expression), + ALLOCSET_SMALL_SIZES); + + /* Switch to mcxt for cooking :) */ + old_mcxt = MemoryContextSwitchTo(parse_mcxt); + + /* First we have to build a raw AST */ + (void) parse_partitioning_expression(relid, expr_cstr, + &query_string, &parse_tree); + + /* We don't need pg_pathman's magic here */ + pathman_hooks_enabled = false; + + PG_TRY(); + { + Query *query; + int expr_attr; + Relids expr_varnos; + Bitmapset *expr_varattnos = NULL; + + /* This will fail with ERROR in case of wrong expression */ + query_tree_list = pg_analyze_and_rewrite_compat(parse_tree, query_string, + NULL, 0, NULL); + + /* Sanity check #1 */ + if (list_length(query_tree_list) != 1) + elog(ERROR, "partitioning expression produced more than 1 query"); + + query = (Query *) linitial(query_tree_list); + + /* Sanity check #2 */ + if (list_length(query->targetList) != 1) + elog(ERROR, "there should be exactly 1 partitioning expression"); + + /* Sanity check #3 */ + if (query_tree_walker(query, query_contains_subqueries, NULL, 0)) + elog(ERROR, "subqueries are not allowed in partitioning expression"); + + expr = (Node *) ((TargetEntry *) linitial(query->targetList))->expr; + expr = eval_const_expressions(NULL, expr); + + /* Sanity check #4 */ + if (contain_mutable_functions(expr)) + ereport(ERROR, + (errcode(ERRCODE_INVALID_OBJECT_DEFINITION), + errmsg("functions in partitioning expression" + " must be marked IMMUTABLE"))); + + /* Sanity check #5 */ + expr_varnos = pull_varnos_compat(NULL, expr); + if (bms_num_members(expr_varnos) != 1 || + relid != ((RangeTblEntry *) linitial(query->rtable))->relid) + { + elog(ERROR, "partitioning expression should reference table \"%s\"", + get_rel_name(relid)); + } + + /* Sanity check #6 */ + pull_varattnos(expr, bms_singleton_member(expr_varnos), &expr_varattnos); + expr_attr = -1; + while ((expr_attr = bms_next_member(expr_varattnos, expr_attr)) >= 0) + { + AttrNumber attnum = expr_attr + FirstLowInvalidHeapAttributeNumber; + HeapTuple htup; + + /* Check that there's no system attributes in expression */ + if (attnum < InvalidAttrNumber) + ereport(ERROR, (errcode(ERRCODE_INVALID_PARAMETER_VALUE), + errmsg("system attributes are not supported"))); + + htup = SearchSysCache2(ATTNUM, + ObjectIdGetDatum(relid), + Int16GetDatum(attnum)); + if (HeapTupleIsValid(htup)) + { + bool nullable; + + /* Fetch 'nullable' and free syscache tuple */ + nullable = !((Form_pg_attribute) GETSTRUCT(htup))->attnotnull; + ReleaseSysCache(htup); + + if (nullable) + ereport(ERROR, (errcode(ERRCODE_NOT_NULL_VIOLATION), + errmsg("column \"%s\" should be marked NOT NULL", + get_attname_compat(relid, attnum)))); + } + } + + /* Free sets */ + bms_free(expr_varnos); + bms_free(expr_varattnos); + + Assert(expr); + + /* Set 'expr_type_out' if needed */ + if (expr_type_out) + *expr_type_out = exprType(expr); + } + PG_CATCH(); + { + ErrorData *error; + + /* Don't forget to enable pg_pathman's hooks */ + pathman_hooks_enabled = true; + + /* Switch to the original context & copy edata */ + MemoryContextSwitchTo(old_mcxt); + error = CopyErrorData(); + FlushErrorState(); + + /* Adjust error message */ + error->detail = error->message; + error->message = psprintf(COOK_PART_EXPR_ERROR, expr_cstr); + error->sqlerrcode = ERRCODE_INVALID_PARAMETER_VALUE; + error->cursorpos = 0; + error->internalpos = 0; + + ReThrowError(error); + } + PG_END_TRY(); + + /* Don't forget to enable pg_pathman's hooks */ + pathman_hooks_enabled = true; + + /* Switch to previous mcxt */ + MemoryContextSwitchTo(old_mcxt); + + /* Get Datum of serialized expression (right mcxt) */ + expr = copyObject(expr); + + /* Free memory */ + MemoryContextDelete(parse_mcxt); + + return expr; +} + +/* Canonicalize user's expression (trim whitespaces etc) */ +char * +canonicalize_partitioning_expression(const Oid relid, + const char *expr_cstr) +{ + Node *parse_tree; + Expr *expr; + char *query_string; + Query *query; + + AssertTemporaryContext(); + + /* First we have to build a raw AST */ + (void) parse_partitioning_expression(relid, expr_cstr, + &query_string, &parse_tree); + + query = parse_analyze_compat(parse_tree, query_string, NULL, 0, NULL); + expr = ((TargetEntry *) linitial(query->targetList))->expr; + + /* We don't care about memory efficiency here */ + return deparse_expression((Node *) expr, + deparse_context_for(get_rel_name(relid), relid), + false, false); +} + +/* Check if query has subqueries */ +static bool +query_contains_subqueries(Node *node, void *context) +{ + if (node == NULL) + return false; + + /* We've met a subquery */ + if (IsA(node, Query)) + return true; + + return expression_tree_walker(node, query_contains_subqueries, NULL); +} + + +/* + * Functions for delayed invalidation. + */ + +/* Add new delayed pathman shutdown job (DROP EXTENSION) */ +void +delay_pathman_shutdown(void) +{ + delayed_shutdown = true; +} + +/* Finish all pending invalidation jobs if possible */ +void +finish_delayed_invalidation(void) +{ + /* Check that current state is transactional */ + if (IsTransactionState()) + { + AcceptInvalidationMessages(); + + /* Handle the probable 'DROP EXTENSION' case */ + if (delayed_shutdown) + { + Oid cur_pathman_config_relid; + + /* Unset 'shutdown' flag */ + delayed_shutdown = false; + + /* Get current PATHMAN_CONFIG relid */ + cur_pathman_config_relid = get_relname_relid(PATHMAN_CONFIG, + get_pathman_schema()); + + /* Check that PATHMAN_CONFIG table has indeed been dropped */ + if (cur_pathman_config_relid == InvalidOid || + cur_pathman_config_relid != get_pathman_config_relid(true)) + { + /* Ok, let's unload pg_pathman's config */ + unload_config(); + + /* No need to continue, exit */ + return; + } + } + } +} diff --git a/src/relation_info.h b/src/relation_info.h deleted file mode 100644 index 1ed99933..00000000 --- a/src/relation_info.h +++ /dev/null @@ -1,200 +0,0 @@ -/* ------------------------------------------------------------------------ - * - * relation_info.h - * Data structures describing partitioned relations - * - * Copyright (c) 2016, Postgres Professional - * - * ------------------------------------------------------------------------ - */ - -#ifndef RELATION_INFO_H -#define RELATION_INFO_H - -#include "dsm_array.h" - -#include "postgres.h" -#include "access/attnum.h" -#include "port/atomics.h" - - -/* - * Partitioning type. - */ -typedef enum -{ - PT_INDIFFERENT = 0, /* for part type traits (virtual type) */ - PT_HASH, - PT_RANGE -} PartType; - -/* - * Child relation info for RANGE partitioning - */ -typedef struct -{ - Oid child_oid; - - Datum min, - max; -} RangeEntry; - -/* - * PartRelationInfo - * Per-relation partitioning information - */ -typedef struct -{ - Oid key; /* partitioned table's Oid */ - bool valid; /* is this entry valid? */ - bool enable_parent; /* include parent to the plan */ - bool auto_partition; /* auto partition creation */ - - uint32 children_count; - Oid *children; /* Oids of child partitions */ - RangeEntry *ranges; /* per-partition range entry or NULL */ - - PartType parttype; /* partitioning type (HASH | RANGE) */ - AttrNumber attnum; /* partitioned column's index */ - Oid atttype; /* partitioned column's type */ - int32 atttypmod; /* partitioned column type modifier */ - bool attbyval; /* is partitioned column stored by value? */ - int16 attlen; /* length of the partitioned column's type */ - int attalign; /* alignment of the part column's type */ - Oid attcollid; /* collation of the partitioned column */ - - Oid cmp_proc, /* comparison fuction for 'atttype' */ - hash_proc; /* hash function for 'atttype' */ -} PartRelationInfo; - -/* - * RelParentInfo - * Cached parent of the specified partition. - * Allows us to quickly search for PartRelationInfo. - */ -typedef struct -{ - Oid child_rel; /* key */ - Oid parent_rel; -} PartParentInfo; - -/* - * PartParentSearch - * Represents status of a specific cached entry. - * Returned by [for]get_parent_of_partition(). - */ -typedef enum -{ - PPS_ENTRY_NOT_FOUND = 0, - PPS_ENTRY_PARENT, /* entry was found, but pg_pathman doesn't know it */ - PPS_ENTRY_PART_PARENT, /* entry is parent and is known by pg_pathman */ - PPS_NOT_SURE /* can't determine (not transactional state) */ -} PartParentSearch; - - -/* - * PartRelationInfo field access macros. - */ - -#define PrelParentRelid(prel) ( (prel)->key ) - -#define PrelGetChildrenArray(prel) ( (prel)->children ) - -#define PrelGetRangesArray(prel) ( (prel)->ranges ) - -#define PrelChildrenCount(prel) ( (prel)->children_count ) - -#define PrelIsValid(prel) ( (prel) && (prel)->valid ) - -inline static uint32 -PrelLastChild(const PartRelationInfo *prel) -{ - Assert(PrelIsValid(prel)); - - if (PrelChildrenCount(prel) == 0) - elog(ERROR, "pg_pathman's cache entry for relation %u has 0 children", - PrelParentRelid(prel)); - - return PrelChildrenCount(prel) - 1; /* last partition */ -} - - -const PartRelationInfo *refresh_pathman_relation_info(Oid relid, - PartType partitioning_type, - const char *part_column_name); -void invalidate_pathman_relation_info(Oid relid, bool *found); -void remove_pathman_relation_info(Oid relid); -const PartRelationInfo *get_pathman_relation_info(Oid relid); -const PartRelationInfo *get_pathman_relation_info_after_lock(Oid relid, - bool unlock_if_not_found); - -void delay_pathman_shutdown(void); -void delay_invalidation_parent_rel(Oid parent); -void delay_invalidation_vague_rel(Oid vague_rel); -void finish_delayed_invalidation(void); - -void cache_parent_of_partition(Oid partition, Oid parent); -Oid forget_parent_of_partition(Oid partition, PartParentSearch *status); -Oid get_parent_of_partition(Oid partition, PartParentSearch *status); - -PartType DatumGetPartType(Datum datum); - -void shout_if_prel_is_invalid(Oid parent_oid, - const PartRelationInfo *prel, - PartType expected_part_type); - - -/* - * Useful static functions for freeing memory. - */ - -static inline void -FreeChildrenArray(PartRelationInfo *prel) -{ - uint32 i; - - Assert(PrelIsValid(prel)); - - /* Remove relevant PartParentInfos */ - if ((prel)->children) - { - for (i = 0; i < PrelChildrenCount(prel); i++) - { - Oid child = (prel)->children[i]; - - /* If it's *always been* relid's partition, free cache */ - if (PrelParentRelid(prel) == get_parent_of_partition(child, NULL)) - forget_parent_of_partition(child, NULL); - } - - pfree((prel)->children); - (prel)->children = NULL; - } -} - -static inline void -FreeRangesArray(PartRelationInfo *prel) -{ - uint32 i; - - Assert(PrelIsValid(prel)); - - /* Remove RangeEntries array */ - if ((prel)->ranges) - { - /* Remove persistent entries if not byVal */ - if (!(prel)->attbyval) - { - for (i = 0; i < PrelChildrenCount(prel); i++) - { - pfree(DatumGetPointer((prel)->ranges[i].min)); - pfree(DatumGetPointer((prel)->ranges[i].max)); - } - } - - pfree((prel)->ranges); - (prel)->ranges = NULL; - } -} - -#endif diff --git a/src/runtimeappend.c b/src/runtime_append.c similarity index 51% rename from src/runtimeappend.c rename to src/runtime_append.c index 7260ab2c..a90c101a 100644 --- a/src/runtimeappend.c +++ b/src/runtime_append.c @@ -8,10 +8,10 @@ * ------------------------------------------------------------------------ */ -#include "runtimeappend.h" +#include "compat/pg_compat.h" + +#include "runtime_append.h" -#include "postgres.h" -#include "utils/memutils.h" #include "utils/guc.h" @@ -23,25 +23,25 @@ CustomExecMethods runtimeappend_exec_methods; void -init_runtimeappend_static_data(void) +init_runtime_append_static_data(void) { - runtimeappend_path_methods.CustomName = "RuntimeAppend"; - runtimeappend_path_methods.PlanCustomPath = create_runtimeappend_plan; + runtimeappend_path_methods.CustomName = RUNTIME_APPEND_NODE_NAME; + runtimeappend_path_methods.PlanCustomPath = create_runtime_append_plan; - runtimeappend_plan_methods.CustomName = "RuntimeAppend"; - runtimeappend_plan_methods.CreateCustomScanState = runtimeappend_create_scan_state; + runtimeappend_plan_methods.CustomName = RUNTIME_APPEND_NODE_NAME; + runtimeappend_plan_methods.CreateCustomScanState = runtime_append_create_scan_state; - runtimeappend_exec_methods.CustomName = "RuntimeAppend"; - runtimeappend_exec_methods.BeginCustomScan = runtimeappend_begin; - runtimeappend_exec_methods.ExecCustomScan = runtimeappend_exec; - runtimeappend_exec_methods.EndCustomScan = runtimeappend_end; - runtimeappend_exec_methods.ReScanCustomScan = runtimeappend_rescan; + runtimeappend_exec_methods.CustomName = RUNTIME_APPEND_NODE_NAME; + runtimeappend_exec_methods.BeginCustomScan = runtime_append_begin; + runtimeappend_exec_methods.ExecCustomScan = runtime_append_exec; + runtimeappend_exec_methods.EndCustomScan = runtime_append_end; + runtimeappend_exec_methods.ReScanCustomScan = runtime_append_rescan; runtimeappend_exec_methods.MarkPosCustomScan = NULL; runtimeappend_exec_methods.RestrPosCustomScan = NULL; - runtimeappend_exec_methods.ExplainCustomScan = runtimeappend_explain; + runtimeappend_exec_methods.ExplainCustomScan = runtime_append_explain; DefineCustomBoolVariable("pg_pathman.enable_runtimeappend", - "Enables the planner's use of RuntimeAppend custom node.", + "Enables the planner's use of " RUNTIME_APPEND_NODE_NAME " custom node.", NULL, &pg_pathman_enable_runtimeappend, true, @@ -50,13 +50,15 @@ init_runtimeappend_static_data(void) NULL, NULL, NULL); + + RegisterCustomScanMethods(&runtimeappend_plan_methods); } Path * -create_runtimeappend_path(PlannerInfo *root, - AppendPath *inner_append, - ParamPathInfo *param_info, - double sel) +create_runtime_append_path(PlannerInfo *root, + AppendPath *inner_append, + ParamPathInfo *param_info, + double sel) { return create_append_path_common(root, inner_append, param_info, @@ -66,9 +68,9 @@ create_runtimeappend_path(PlannerInfo *root, } Plan * -create_runtimeappend_plan(PlannerInfo *root, RelOptInfo *rel, - CustomPath *best_path, List *tlist, - List *clauses, List *custom_plans) +create_runtime_append_plan(PlannerInfo *root, RelOptInfo *rel, + CustomPath *best_path, List *tlist, + List *clauses, List *custom_plans) { return create_append_plan_common(root, rel, best_path, tlist, @@ -77,7 +79,7 @@ create_runtimeappend_plan(PlannerInfo *root, RelOptInfo *rel, } Node * -runtimeappend_create_scan_state(CustomScan *node) +runtime_append_create_scan_state(CustomScan *node) { return create_append_scan_state_common(node, &runtimeappend_exec_methods, @@ -85,7 +87,7 @@ runtimeappend_create_scan_state(CustomScan *node) } void -runtimeappend_begin(CustomScanState *node, EState *estate, int eflags) +runtime_append_begin(CustomScanState *node, EState *estate, int eflags) { begin_append_common(node, estate, eflags); } @@ -94,63 +96,53 @@ static void fetch_next_tuple(CustomScanState *node) { RuntimeAppendState *scan_state = (RuntimeAppendState *) node; - TupleTableSlot *slot = NULL; while (scan_state->running_idx < scan_state->ncur_plans) { ChildScanCommon child = scan_state->cur_plans[scan_state->running_idx]; PlanState *state = child->content.plan_state; - bool quals; for (;;) { - slot = ExecProcNode(state); + TupleTableSlot *slot = ExecProcNode(state); if (TupIsNull(slot)) break; - node->ss.ps.ps_ExprContext->ecxt_scantuple = slot; - quals = ExecQual(scan_state->custom_expr_states, - node->ss.ps.ps_ExprContext, false); - - ResetExprContext(node->ss.ps.ps_ExprContext); - - if (quals) - { - scan_state->slot = slot; - return; - } + scan_state->slot = slot; + return; } scan_state->running_idx++; } - scan_state->slot = slot; - return; + scan_state->slot = NULL; } TupleTableSlot * -runtimeappend_exec(CustomScanState *node) +runtime_append_exec(CustomScanState *node) { return exec_append_common(node, fetch_next_tuple); } void -runtimeappend_end(CustomScanState *node) +runtime_append_end(CustomScanState *node) { end_append_common(node); } void -runtimeappend_rescan(CustomScanState *node) +runtime_append_rescan(CustomScanState *node) { rescan_append_common(node); } void -runtimeappend_explain(CustomScanState *node, List *ancestors, ExplainState *es) +runtime_append_explain(CustomScanState *node, List *ancestors, ExplainState *es) { RuntimeAppendState *scan_state = (RuntimeAppendState *) node; - explain_append_common(node, scan_state->children_table, es); + explain_append_common(node, ancestors, es, + scan_state->children_table, + scan_state->custom_exprs); } diff --git a/src/runtime_merge_append.c b/src/runtime_merge_append.c index ad638933..5edd803c 100644 --- a/src/runtime_merge_append.c +++ b/src/runtime_merge_append.c @@ -3,28 +3,34 @@ * runtime_merge_append.c * RuntimeMergeAppend node's function definitions and global variables * - * Copyright (c) 2016, Postgres Professional + * Copyright (c) 2016-2020, Postgres Professional + * Portions Copyright (c) 1996-2015, PostgreSQL Global Development Group + * Portions Copyright (c) 1994, Regents of the University of California * * ------------------------------------------------------------------------ */ +#include "compat/pg_compat.h" + #include "runtime_merge_append.h" -#include "pathman.h" #include "postgres.h" #include "catalog/pg_collation.h" #include "miscadmin.h" #include "nodes/nodeFuncs.h" -#include "optimizer/clauses.h" +#include "nodes/plannodes.h" +#if PG_VERSION_NUM >= 120000 +#include "optimizer/optimizer.h" +#else #include "optimizer/cost.h" +#include "optimizer/var.h" +#endif #include "optimizer/planmain.h" #include "optimizer/tlist.h" -#include "optimizer/var.h" #include "utils/builtins.h" #include "utils/guc.h" #include "utils/lsyscache.h" #include "utils/typcache.h" -#include "utils/memutils.h" #include "utils/ruleutils.h" #include "lib/binaryheap.h" @@ -189,23 +195,23 @@ unpack_runtimemergeappend_private(RuntimeMergeAppendState *scan_state, void init_runtime_merge_append_static_data(void) { - runtime_merge_append_path_methods.CustomName = "RuntimeMergeAppend"; - runtime_merge_append_path_methods.PlanCustomPath = create_runtimemergeappend_plan; + runtime_merge_append_path_methods.CustomName = RUNTIME_MERGE_APPEND_NODE_NAME; + runtime_merge_append_path_methods.PlanCustomPath = create_runtime_merge_append_plan; - runtime_merge_append_plan_methods.CustomName = "RuntimeMergeAppend"; - runtime_merge_append_plan_methods.CreateCustomScanState = runtimemergeappend_create_scan_state; + runtime_merge_append_plan_methods.CustomName = RUNTIME_MERGE_APPEND_NODE_NAME; + runtime_merge_append_plan_methods.CreateCustomScanState = runtime_merge_append_create_scan_state; - runtime_merge_append_exec_methods.CustomName = "RuntimeMergeAppend"; - runtime_merge_append_exec_methods.BeginCustomScan = runtimemergeappend_begin; - runtime_merge_append_exec_methods.ExecCustomScan = runtimemergeappend_exec; - runtime_merge_append_exec_methods.EndCustomScan = runtimemergeappend_end; - runtime_merge_append_exec_methods.ReScanCustomScan = runtimemergeappend_rescan; + runtime_merge_append_exec_methods.CustomName = RUNTIME_MERGE_APPEND_NODE_NAME; + runtime_merge_append_exec_methods.BeginCustomScan = runtime_merge_append_begin; + runtime_merge_append_exec_methods.ExecCustomScan = runtime_merge_append_exec; + runtime_merge_append_exec_methods.EndCustomScan = runtime_merge_append_end; + runtime_merge_append_exec_methods.ReScanCustomScan = runtime_merge_append_rescan; runtime_merge_append_exec_methods.MarkPosCustomScan = NULL; runtime_merge_append_exec_methods.RestrPosCustomScan = NULL; - runtime_merge_append_exec_methods.ExplainCustomScan = runtimemergeappend_explain; + runtime_merge_append_exec_methods.ExplainCustomScan = runtime_merge_append_explain; DefineCustomBoolVariable("pg_pathman.enable_runtimemergeappend", - "Enables the planner's use of RuntimeMergeAppend custom node.", + "Enables the planner's use of " RUNTIME_MERGE_APPEND_NODE_NAME " custom node.", NULL, &pg_pathman_enable_runtime_merge_append, true, @@ -214,13 +220,15 @@ init_runtime_merge_append_static_data(void) NULL, NULL, NULL); + + RegisterCustomScanMethods(&runtime_merge_append_plan_methods); } Path * -create_runtimemergeappend_path(PlannerInfo *root, - AppendPath *inner_append, - ParamPathInfo *param_info, - double sel) +create_runtime_merge_append_path(PlannerInfo *root, + AppendPath *inner_append, + ParamPathInfo *param_info, + double sel) { RelOptInfo *rel = inner_append->path.parent; Path *path; @@ -243,9 +251,9 @@ create_runtimemergeappend_path(PlannerInfo *root, } Plan * -create_runtimemergeappend_plan(PlannerInfo *root, RelOptInfo *rel, - CustomPath *best_path, List *tlist, - List *clauses, List *custom_plans) +create_runtime_merge_append_plan(PlannerInfo *root, RelOptInfo *rel, + CustomPath *best_path, List *tlist, + List *clauses, List *custom_plans) { CustomScan *node; Plan *plan; @@ -335,7 +343,7 @@ create_runtimemergeappend_plan(PlannerInfo *root, RelOptInfo *rel, } Node * -runtimemergeappend_create_scan_state(CustomScan *node) +runtime_merge_append_create_scan_state(CustomScan *node) { Node *state; state = create_append_scan_state_common(node, @@ -348,7 +356,7 @@ runtimemergeappend_create_scan_state(CustomScan *node) } void -runtimemergeappend_begin(CustomScanState *node, EState *estate, int eflags) +runtime_merge_append_begin(CustomScanState *node, EState *estate, int eflags) { begin_append_common(node, estate, eflags); } @@ -366,7 +374,8 @@ fetch_next_tuple(CustomScanState *node) for (i = 0; i < scan_state->rstate.ncur_plans; i++) { ChildScanCommon child = scan_state->rstate.cur_plans[i]; - PlanState *ps = child->content.plan_state; + + ps = child->content.plan_state; Assert(child->content_type == CHILD_PLAN_STATE); @@ -384,8 +393,6 @@ fetch_next_tuple(CustomScanState *node) for (;;) { - bool quals; - scan_state->ms_slots[i] = ExecProcNode(ps); if (TupIsNull(scan_state->ms_slots[i])) @@ -394,17 +401,8 @@ fetch_next_tuple(CustomScanState *node) break; } - node->ss.ps.ps_ExprContext->ecxt_scantuple = scan_state->ms_slots[i]; - quals = ExecQual(rstate->custom_expr_states, - node->ss.ps.ps_ExprContext, false); - - ResetExprContext(node->ss.ps.ps_ExprContext); - - if (quals) - { - binaryheap_replace_first(scan_state->ms_heap, Int32GetDatum(i)); - break; - } + binaryheap_replace_first(scan_state->ms_heap, Int32GetDatum(i)); + break; } } @@ -412,24 +410,22 @@ fetch_next_tuple(CustomScanState *node) { /* All the subplans are exhausted, and so is the heap */ rstate->slot = NULL; - return; } else { i = DatumGetInt32(binaryheap_first(scan_state->ms_heap)); rstate->slot = scan_state->ms_slots[i]; - return; } } TupleTableSlot * -runtimemergeappend_exec(CustomScanState *node) +runtime_merge_append_exec(CustomScanState *node) { return exec_append_common(node, fetch_next_tuple); } void -runtimemergeappend_end(CustomScanState *node) +runtime_merge_append_end(CustomScanState *node) { RuntimeMergeAppendState *scan_state = (RuntimeMergeAppendState *) node; @@ -440,7 +436,7 @@ runtimemergeappend_end(CustomScanState *node) } void -runtimemergeappend_rescan(CustomScanState *node) +runtime_merge_append_rescan(CustomScanState *node) { RuntimeMergeAppendState *scan_state = (RuntimeMergeAppendState *) node; int nplans; @@ -486,11 +482,13 @@ runtimemergeappend_rescan(CustomScanState *node) } void -runtimemergeappend_explain(CustomScanState *node, List *ancestors, ExplainState *es) +runtime_merge_append_explain(CustomScanState *node, List *ancestors, ExplainState *es) { RuntimeMergeAppendState *scan_state = (RuntimeMergeAppendState *) node; - explain_append_common(node, scan_state->rstate.children_table, es); + explain_append_common(node, ancestors, es, + scan_state->rstate.children_table, + scan_state->rstate.custom_exprs); /* We should print sort keys as well */ show_sort_group_keys((PlanState *) &node->ss.ps, "Sort Key", @@ -724,10 +722,11 @@ prepare_sort_from_pathkeys(PlannerInfo *root, Plan *lefttree, List *pathkeys, foreach(j, ec->ec_members) { - EquivalenceMember *em = (EquivalenceMember *) lfirst(j); List *exprvars; ListCell *k; + em = (EquivalenceMember *) lfirst(j); + /* * We shouldn't be trying to sort by an equivalence class that * contains a constant, so no need to consider such cases any @@ -745,9 +744,9 @@ prepare_sort_from_pathkeys(PlannerInfo *root, Plan *lefttree, List *pathkeys, continue; sortexpr = em->em_expr; - exprvars = pull_var_clause((Node *) sortexpr, - PVC_INCLUDE_AGGREGATES, - PVC_INCLUDE_PLACEHOLDERS); + exprvars = pull_var_clause_compat((Node *) sortexpr, + PVC_INCLUDE_AGGREGATES, + PVC_INCLUDE_PLACEHOLDERS); foreach(k, exprvars) { if (!tlist_member_ignore_relabel(lfirst(k), tlist)) @@ -771,8 +770,8 @@ prepare_sort_from_pathkeys(PlannerInfo *root, Plan *lefttree, List *pathkeys, { /* copy needed so we don't modify input's tlist below */ tlist = copyObject(tlist); - lefttree = (Plan *) make_result(root, tlist, NULL, - lefttree); + lefttree = (Plan *) make_result_compat(root, tlist, NULL, + lefttree); } /* Don't bother testing is_projection_capable_plan again */ @@ -901,9 +900,15 @@ show_sort_group_keys(PlanState *planstate, const char *qlabel, initStringInfo(&sortkeybuf); /* Set up deparsing context */ +#if PG_VERSION_NUM >= 130000 + context = set_deparse_context_plan(es->deparse_cxt, + plan, + ancestors); +#else context = set_deparse_context_planstate(es->deparse_cxt, (Node *) planstate, ancestors); +#endif useprefix = (list_length(es->rtable) > 1 || es->verbose); for (keyno = 0; keyno < nkeys; keyno++) diff --git a/src/utility_stmt_hooking.c b/src/utility_stmt_hooking.c new file mode 100644 index 00000000..83bfa680 --- /dev/null +++ b/src/utility_stmt_hooking.c @@ -0,0 +1,954 @@ +/* ------------------------------------------------------------------------ + * + * utility_stmt_hooking.c + * Override COPY TO/FROM and ALTER TABLE ... RENAME statements + * for partitioned tables + * + * Copyright (c) 2016-2020, Postgres Professional + * Portions Copyright (c) 1996-2015, PostgreSQL Global Development Group + * Portions Copyright (c) 1994, Regents of the University of California + * + * ------------------------------------------------------------------------ + */ + +#include "compat/debug_compat_features.h" +#include "compat/pg_compat.h" +#include "init.h" +#include "utility_stmt_hooking.h" +#include "partition_filter.h" + +#include "access/htup_details.h" +#if PG_VERSION_NUM >= 120000 +#include "access/heapam.h" +#include "access/table.h" +#endif +#include "access/sysattr.h" +#include "access/xact.h" +#include "catalog/namespace.h" +#include "commands/copy.h" +#if PG_VERSION_NUM >= 160000 /* for commit a61b1f74823c */ +#include "commands/copyfrom_internal.h" +#endif +#include "commands/defrem.h" +#include "commands/trigger.h" +#include "commands/tablecmds.h" +#include "foreign/fdwapi.h" +#include "miscadmin.h" +#include "nodes/makefuncs.h" +#if PG_VERSION_NUM >= 160000 /* for commit a61b1f74823c */ +#include "parser/parse_relation.h" +#endif +#include "utils/builtins.h" +#include "utils/lsyscache.h" +#include "utils/memutils.h" +#include "utils/rls.h" + +/* we avoid includig libpq.h because it requires openssl.h */ +#include "libpq/pqcomm.h" +extern PGDLLIMPORT ProtocolVersion FrontendProtocol; +extern void pq_endmsgread(void); + +/* Determine whether we should enable COPY or not (PostgresPro has a fix) */ +#if defined(WIN32) && \ + (!defined(ENABLE_PGPRO_PATCHES) || \ + !defined(ENABLE_PATHMAN_AWARE_COPY_WIN32) || \ + !defined(PGPRO_PATHMAN_AWARE_COPY)) +#define DISABLE_PATHMAN_COPY +#endif + +/* + * While building PostgreSQL on Windows the msvc compiler produces .def file + * which contains all the symbols that were declared as external except the ones + * that were declared but not defined. We redefine variables below to prevent + * 'unresolved symbol' errors on Windows. But we have to disable COPY feature + * on Windows. + */ +#ifdef DISABLE_PATHMAN_COPY +bool XactReadOnly = false; +ProtocolVersion FrontendProtocol = (ProtocolVersion) 0; +#endif + + +#define PATHMAN_COPY_READ_LOCK AccessShareLock +#define PATHMAN_COPY_WRITE_LOCK RowExclusiveLock + + +static uint64 PathmanCopyFrom( +#if PG_VERSION_NUM >= 140000 /* Structure changed in c532d15dddff */ + CopyFromState cstate, +#else + CopyState cstate, +#endif + Relation parent_rel, + List *range_table, + bool old_protocol); + +static void prepare_rri_for_copy(ResultRelInfoHolder *rri_holder, + const ResultPartsStorage *rps_storage); + +static void finish_rri_for_copy(ResultRelInfoHolder *rri_holder, + const ResultPartsStorage *rps_storage); + + +/* + * Is pg_pathman supposed to handle this COPY stmt? + */ +bool +is_pathman_related_copy(Node *parsetree) +{ + CopyStmt *copy_stmt = (CopyStmt *) parsetree; + Oid parent_relid; + + Assert(IsPathmanReady()); + + if (!IsOverrideCopyEnabled()) + { + elog(DEBUG1, "COPY statement hooking is disabled"); + return false; + } + + /* Check that it's a CopyStmt */ + if (!IsA(parsetree, CopyStmt)) + return false; + + /* Also check that stmt->relation exists */ + if (!copy_stmt->relation) + return false; + + /* Get partition's Oid while locking it */ + parent_relid = RangeVarGetRelid(copy_stmt->relation, + (copy_stmt->is_from ? + PATHMAN_COPY_WRITE_LOCK : + PATHMAN_COPY_READ_LOCK), + true); + + /* Skip relation if it does not exist (for Citus compatibility) */ + if (!OidIsValid(parent_relid)) + return false; + + /* Check that relation is partitioned */ + if (has_pathman_relation_info(parent_relid)) + { + ListCell *lc; + + /* Analyze options list */ + foreach (lc, copy_stmt->options) + { + DefElem *defel = (DefElem *) lfirst(lc); + + /* We do not support freeze */ + /* + * It would be great to allow copy.c extract option value and + * check it ready. However, there is no possibility (hooks) to do + * that before messaging 'ok, begin streaming data' to the client, + * which is ugly and confusing: e.g. it would require us to + * actually send something in regression tests before we notice + * the error. + */ + if (strcmp(defel->defname, "freeze") == 0 && defGetBoolean(defel)) + elog(ERROR, "freeze is not supported for partitioned tables"); + } + + /* Emit ERROR if we can't see the necessary symbols */ + #ifdef DISABLE_PATHMAN_COPY + elog(ERROR, "COPY is not supported for partitioned tables on Windows"); + #else + elog(DEBUG1, "Overriding default behavior for COPY [%u]", + parent_relid); + #endif + + return true; + } + + return false; +} + +/* + * Is pg_pathman supposed to handle this table rename stmt? + */ +bool +is_pathman_related_table_rename(Node *parsetree, + Oid *relation_oid_out, /* ret value #1 */ + bool *is_parent_out) /* ret value #2 */ +{ + RenameStmt *rename_stmt = (RenameStmt *) parsetree; + Oid relation_oid, + parent_relid; + + Assert(IsPathmanReady()); + + /* Set default values */ + if (relation_oid_out) *relation_oid_out = InvalidOid; + + if (!IsA(parsetree, RenameStmt)) + return false; + + /* Are we going to rename some table? */ + if (rename_stmt->renameType != OBJECT_TABLE) + return false; + + /* Fetch Oid of this relation */ + relation_oid = RangeVarGetRelid(rename_stmt->relation, + AccessShareLock, + rename_stmt->missing_ok); + + /* Check ALTER TABLE ... IF EXISTS of nonexistent table */ + if (rename_stmt->missing_ok && relation_oid == InvalidOid) + return false; + + /* Assume it's a parent */ + if (has_pathman_relation_info(relation_oid)) + { + if (relation_oid_out) + *relation_oid_out = relation_oid; + if (is_parent_out) + *is_parent_out = true; + return true; + } + + /* Assume it's a partition, fetch its parent */ + parent_relid = get_parent_of_partition(relation_oid); + if (!OidIsValid(parent_relid)) + return false; + + /* Is parent partitioned? */ + if (has_pathman_relation_info(parent_relid)) + { + if (relation_oid_out) + *relation_oid_out = relation_oid; + if (is_parent_out) + *is_parent_out = false; + return true; + } + + return false; +} + +/* + * Is pg_pathman supposed to handle this ALTER COLUMN TYPE stmt? + */ +bool +is_pathman_related_alter_column_type(Node *parsetree, + Oid *parent_relid_out, + AttrNumber *attr_number_out, + PartType *part_type_out) +{ + AlterTableStmt *alter_table_stmt = (AlterTableStmt *) parsetree; + ListCell *lc; + Oid parent_relid; + bool result = false; + PartRelationInfo *prel; + + Assert(IsPathmanReady()); + + if (!IsA(alter_table_stmt, AlterTableStmt)) + return false; + + /* Are we going to modify some table? */ +#if PG_VERSION_NUM >= 140000 + if (alter_table_stmt->objtype != OBJECT_TABLE) +#else + if (alter_table_stmt->relkind != OBJECT_TABLE) +#endif + return false; + + /* Assume it's a parent, fetch its Oid */ + parent_relid = RangeVarGetRelid(alter_table_stmt->relation, + AccessShareLock, + alter_table_stmt->missing_ok); + + /* Check ALTER TABLE ... IF EXISTS of nonexistent table */ + if (alter_table_stmt->missing_ok && parent_relid == InvalidOid) + return false; + + /* Is parent partitioned? */ + if ((prel = get_pathman_relation_info(parent_relid)) != NULL) + { + /* Return 'parent_relid' and 'prel->parttype' */ + if (parent_relid_out) *parent_relid_out = parent_relid; + if (part_type_out) *part_type_out = prel->parttype; + } + else return false; + + /* Examine command list */ + foreach (lc, alter_table_stmt->cmds) + { + AlterTableCmd *alter_table_cmd = (AlterTableCmd *) lfirst(lc); + AttrNumber attnum; + int adjusted_attnum; + + if (!IsA(alter_table_cmd, AlterTableCmd)) + continue; + + /* Is it an ALTER COLUMN TYPE statement? */ + if (alter_table_cmd->subtype != AT_AlterColumnType) + continue; + + /* Is it a column that used in expression? */ + attnum = get_attnum(parent_relid, alter_table_cmd->name); + adjusted_attnum = attnum - FirstLowInvalidHeapAttributeNumber; + if (!bms_is_member(adjusted_attnum, prel->expr_atts)) + continue; + + /* Return 'attr_number_out' if asked to */ + if (attr_number_out) *attr_number_out = attnum; + + /* Success! */ + result = true; + } + + close_pathman_relation_info(prel); + + return result; +} + +/* + * PathmanCopyGetAttnums - build an integer list of attnums to be copied + * + * The input attnamelist is either the user-specified column list, + * or NIL if there was none (in which case we want all the non-dropped + * columns). + * + * rel can be NULL ... it's only used for error reports. + */ +static List * +PathmanCopyGetAttnums(TupleDesc tupDesc, Relation rel, List *attnamelist) +{ + List *attnums = NIL; + + if (attnamelist == NIL) + { + /* Generate default column list */ + int attr_count = tupDesc->natts; + int i; + + for (i = 0; i < attr_count; i++) + { + if (TupleDescAttr(tupDesc, i)->attisdropped) + continue; + attnums = lappend_int(attnums, i + 1); + } + } + else + { + /* Validate the user-supplied list and extract attnums */ + ListCell *l; + + foreach(l, attnamelist) + { + char *name = strVal(lfirst(l)); + int attnum; + int i; + + /* Lookup column name */ + attnum = InvalidAttrNumber; + for (i = 0; i < tupDesc->natts; i++) + { + Form_pg_attribute att = TupleDescAttr(tupDesc, i); + + if (att->attisdropped) + continue; + if (namestrcmp(&(att->attname), name) == 0) + { + attnum = att->attnum; + break; + } + } + if (attnum == InvalidAttrNumber) + { + if (rel != NULL) + ereport(ERROR, + (errcode(ERRCODE_UNDEFINED_COLUMN), + errmsg("column \"%s\" of relation \"%s\" does not exist", + name, RelationGetRelationName(rel)))); + else + ereport(ERROR, + (errcode(ERRCODE_UNDEFINED_COLUMN), + errmsg("column \"%s\" does not exist", + name))); + } + /* Check for duplicates */ + if (list_member_int(attnums, attnum)) + ereport(ERROR, + (errcode(ERRCODE_DUPLICATE_COLUMN), + errmsg("column \"%s\" specified more than once", + name))); + attnums = lappend_int(attnums, attnum); + } + } + + return attnums; +} + +/* + * Execute COPY TO/FROM statement for a partitioned table. + * NOTE: based on DoCopy() (see copy.c). + */ +void +PathmanDoCopy(const CopyStmt *stmt, + const char *queryString, + int stmt_location, + int stmt_len, + uint64 *processed) +{ +#if PG_VERSION_NUM >= 140000 /* Structure changed in c532d15dddff */ + CopyFromState cstate; +#else + CopyState cstate; +#endif + ParseState *pstate; + Relation rel; + List *range_table = NIL; + bool is_from = stmt->is_from, + pipe = (stmt->filename == NULL), + is_old_protocol = PG_PROTOCOL_MAJOR(FrontendProtocol) < 3 && pipe; + + /* Disallow COPY TO/FROM file or program except to superusers. */ + if (!pipe && !superuser()) + { + if (stmt->is_program) + ereport(ERROR, + (errcode(ERRCODE_INSUFFICIENT_PRIVILEGE), + errmsg("must be superuser to COPY to or from an external program"), + errhint("Anyone can COPY to stdout or from stdin. " + "psql's \\copy command also works for anyone."))); + else + ereport(ERROR, + (errcode(ERRCODE_INSUFFICIENT_PRIVILEGE), + errmsg("must be superuser to COPY to or from a file"), + errhint("Anyone can COPY to stdout or from stdin. " + "psql's \\copy command also works for anyone."))); + } + + pstate = make_parsestate(NULL); + pstate->p_sourcetext = queryString; + + /* Check that we have a relation */ + if (stmt->relation) + { + TupleDesc tupDesc; + AclMode required_access = (is_from ? ACL_INSERT : ACL_SELECT); + List *attnums; + ListCell *cur; + RangeTblEntry *rte; +#if PG_VERSION_NUM >= 160000 /* for commit a61b1f74823c */ + RTEPermissionInfo *perminfo; +#endif + + Assert(!stmt->query); + + /* Open the relation (we've locked it in is_pathman_related_copy()) */ + rel = heap_openrv_compat(stmt->relation, NoLock); + + rte = makeNode(RangeTblEntry); + rte->rtekind = RTE_RELATION; + rte->relid = RelationGetRelid(rel); + rte->relkind = rel->rd_rel->relkind; +#if PG_VERSION_NUM >= 160000 /* for commit a61b1f74823c */ + pstate->p_rtable = lappend(pstate->p_rtable, rte); + perminfo = addRTEPermissionInfo(&pstate->p_rteperminfos, rte); + perminfo->requiredPerms = required_access; +#else + rte->requiredPerms = required_access; +#endif + range_table = list_make1(rte); + + tupDesc = RelationGetDescr(rel); + attnums = PathmanCopyGetAttnums(tupDesc, rel, stmt->attlist); +#if PG_VERSION_NUM >= 160000 /* for commit a61b1f74823c */ + foreach(cur, attnums) + { + int attno; + Bitmapset **bms; + + attno = lfirst_int(cur) - FirstLowInvalidHeapAttributeNumber; + bms = is_from ? &perminfo->insertedCols : &perminfo->selectedCols; + + *bms = bms_add_member(*bms, attno); + } + ExecCheckPermissions(pstate->p_rtable, list_make1(perminfo), true); +#else + foreach(cur, attnums) + { + int attnum = lfirst_int(cur) - FirstLowInvalidHeapAttributeNumber; + + if (is_from) + rte->insertedCols = bms_add_member(rte->insertedCols, attnum); + else + rte->selectedCols = bms_add_member(rte->selectedCols, attnum); + } + ExecCheckRTPerms(range_table, true); +#endif + + /* Disable COPY FROM if table has RLS */ + if (is_from && check_enable_rls(rte->relid, InvalidOid, false) == RLS_ENABLED) + { + ereport(ERROR, + (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), + errmsg("COPY FROM not supported with row-level security"), + errhint("Use INSERT statements instead."))); + } + + /* Disable COPY TO */ + if (!is_from) + { + ereport(WARNING, + (errmsg("COPY TO will only select rows from parent table \"%s\"", + RelationGetRelationName(rel)), + errhint("Consider using the COPY (SELECT ...) TO variant."))); + } + } + + /* This should never happen (see is_pathman_related_copy()) */ + else elog(ERROR, "error in function " CppAsString(PathmanDoCopy)); + + if (is_from) + { + /* check read-only transaction and parallel mode */ + if (XactReadOnly && !rel->rd_islocaltemp) + PreventCommandIfReadOnly("COPY FROM"); + PreventCommandIfParallelMode("COPY FROM"); + + cstate = BeginCopyFromCompat(pstate, rel, stmt->filename, + stmt->is_program, NULL, stmt->attlist, + stmt->options); + *processed = PathmanCopyFrom(cstate, rel, range_table, is_old_protocol); + EndCopyFrom(cstate); + } + else + { +#if PG_VERSION_NUM >= 160000 /* for commit f75cec4fff87 */ + /* + * Forget current RangeTblEntries and RTEPermissionInfos. + * Standard DoCopy will create new ones. + */ + pstate->p_rtable = NULL; + pstate->p_rteperminfos = NULL; +#endif + /* Call standard DoCopy using a new CopyStmt */ + DoCopyCompat(pstate, stmt, stmt_location, stmt_len, processed); + } + + /* Close the relation, but keep it locked */ + heap_close_compat(rel, (is_from ? NoLock : PATHMAN_COPY_READ_LOCK)); +} + +/* + * Copy FROM file to relation. + */ +static uint64 +PathmanCopyFrom( +#if PG_VERSION_NUM >= 140000 /* Structure changed in c532d15dddff */ + CopyFromState cstate, +#else + CopyState cstate, +#endif + Relation parent_rel, + List *range_table, bool old_protocol) +{ + HeapTuple tuple; + TupleDesc tupDesc; + Datum *values; + bool *nulls; + + ResultPartsStorage parts_storage; + ResultRelInfo *parent_rri; + Oid parent_relid = RelationGetRelid(parent_rel); + + MemoryContext query_mcxt = CurrentMemoryContext; + EState *estate = CreateExecutorState(); /* for ExecConstraints() */ + TupleTableSlot *myslot; + + uint64 processed = 0; + + tupDesc = RelationGetDescr(parent_rel); + + parent_rri = makeNode(ResultRelInfo); + InitResultRelInfoCompat(parent_rri, + parent_rel, + 1, /* dummy rangetable index */ + 0); + ExecOpenIndices(parent_rri, false); + +#if PG_VERSION_NUM >= 140000 /* reworked in 1375422c7826 */ + /* + * Call ExecInitRangeTable() should be first because in 14+ it initializes + * field "estate->es_result_relations": + */ +#if PG_VERSION_NUM >= 160000 + ExecInitRangeTable(estate, range_table, cstate->rteperminfos); +#else + ExecInitRangeTable(estate, range_table); +#endif + estate->es_result_relations = + (ResultRelInfo **) palloc0(list_length(range_table) * sizeof(ResultRelInfo *)); + estate->es_result_relations[0] = parent_rri; + /* + * Saving in the list allows to avoid needlessly traversing the whole + * array when only a few of its entries are possibly non-NULL. + */ + estate->es_opened_result_relations = + lappend(estate->es_opened_result_relations, parent_rri); + estate->es_result_relation_info = parent_rri; +#else + estate->es_result_relations = parent_rri; + estate->es_num_result_relations = 1; + estate->es_result_relation_info = parent_rri; +#if PG_VERSION_NUM >= 120000 + ExecInitRangeTable(estate, range_table); +#else + estate->es_range_table = range_table; +#endif +#endif + /* Initialize ResultPartsStorage */ + init_result_parts_storage(&parts_storage, + parent_relid, parent_rri, + estate, CMD_INSERT, + RPS_CLOSE_RELATIONS, + RPS_DEFAULT_SPECULATIVE, + RPS_RRI_CB(prepare_rri_for_copy, cstate), + RPS_RRI_CB(finish_rri_for_copy, NULL)); +#if PG_VERSION_NUM >= 160000 /* for commit a61b1f74823c */ + /* ResultRelInfo of partitioned table. */ + parts_storage.init_rri = parent_rri; + + /* + * Copy the RTEPermissionInfos into estate as well, so that + * scan_result_parts_storage() et al will work correctly. + */ + estate->es_rteperminfos = cstate->rteperminfos; +#endif + + /* Set up a tuple slot too */ + myslot = ExecInitExtraTupleSlotCompat(estate, NULL, &TTSOpsHeapTuple); + /* Triggers might need a slot as well */ +#if PG_VERSION_NUM < 120000 + estate->es_trig_tuple_slot = ExecInitExtraTupleSlotCompat(estate, tupDesc, nothing_here); +#endif + + /* Prepare to catch AFTER triggers. */ + AfterTriggerBeginQuery(); + + /* + * Check BEFORE STATEMENT insertion triggers. It's debatable whether we + * should do this for COPY, since it's not really an "INSERT" statement as + * such. However, executing these triggers maintains consistency with the + * EACH ROW triggers that we already fire on COPY. + */ + ExecBSInsertTriggers(estate, parent_rri); + + values = (Datum *) palloc(tupDesc->natts * sizeof(Datum)); + nulls = (bool *) palloc(tupDesc->natts * sizeof(bool)); + + for (;;) + { + TupleTableSlot *slot; + bool skip_tuple = false; +#if PG_VERSION_NUM < 120000 + Oid tuple_oid = InvalidOid; +#endif + ExprContext *econtext = GetPerTupleExprContext(estate); + + ResultRelInfoHolder *rri_holder; + ResultRelInfo *child_rri; + + CHECK_FOR_INTERRUPTS(); + + ResetPerTupleExprContext(estate); + + /* Switch into per tuple memory context */ + MemoryContextSwitchTo(GetPerTupleMemoryContext(estate)); + + if (!NextCopyFromCompat(cstate, econtext, values, nulls, &tuple_oid)) + break; + + /* We can form the input tuple */ + tuple = heap_form_tuple(tupDesc, values, nulls); + +#if PG_VERSION_NUM < 120000 + if (tuple_oid != InvalidOid) + HeapTupleSetOid(tuple, tuple_oid); +#endif + + /* Place tuple in tuple slot --- but slot shouldn't free it */ + slot = myslot; + ExecSetSlotDescriptor(slot, tupDesc); +#if PG_VERSION_NUM >= 120000 + ExecStoreHeapTuple(tuple, slot, false); +#else + ExecStoreTuple(tuple, slot, InvalidBuffer, false); +#endif + + /* Search for a matching partition */ + rri_holder = select_partition_for_insert(estate, &parts_storage, slot); + child_rri = rri_holder->result_rel_info; + + /* Magic: replace parent's ResultRelInfo with ours */ + estate->es_result_relation_info = child_rri; + + /* + * Constraints might reference the tableoid column, so initialize + * t_tableOid before evaluating them. + */ + tuple->t_tableOid = RelationGetRelid(child_rri->ri_RelationDesc); + + /* If there's a transform map, rebuild the tuple */ + if (rri_holder->tuple_map) + { + HeapTuple tuple_old; + + tuple_old = tuple; +#if PG_VERSION_NUM >= 120000 + tuple = execute_attr_map_tuple(tuple, rri_holder->tuple_map); +#else + tuple = do_convert_tuple(tuple, rri_holder->tuple_map); +#endif + heap_freetuple(tuple_old); + } + + /* Now we can set proper tuple descriptor according to child relation */ + ExecSetSlotDescriptor(slot, RelationGetDescr(child_rri->ri_RelationDesc)); +#if PG_VERSION_NUM >= 120000 + ExecStoreHeapTuple(tuple, slot, false); +#else + ExecStoreTuple(tuple, slot, InvalidBuffer, false); +#endif + + /* Triggers and stuff need to be invoked in query context. */ + MemoryContextSwitchTo(query_mcxt); + + /* BEFORE ROW INSERT Triggers */ + if (child_rri->ri_TrigDesc && + child_rri->ri_TrigDesc->trig_insert_before_row) + { +#if PG_VERSION_NUM >= 120000 + if (!ExecBRInsertTriggers(estate, child_rri, slot)) + skip_tuple = true; + else /* trigger might have changed tuple */ + tuple = ExecFetchSlotHeapTuple(slot, false, NULL); +#else + slot = ExecBRInsertTriggers(estate, child_rri, slot); + + if (slot == NULL) /* "do nothing" */ + skip_tuple = true; + else /* trigger might have changed tuple */ + { + tuple = ExecMaterializeSlot(slot); + } +#endif + } + + /* Proceed if we still have a tuple */ + if (!skip_tuple) + { + List *recheckIndexes = NIL; + + /* Check the constraints of the tuple */ + if (child_rri->ri_RelationDesc->rd_att->constr) + ExecConstraints(child_rri, slot, estate); + + /* Handle local tables */ + if (!child_rri->ri_FdwRoutine) + { + /* OK, now store the tuple... */ + simple_heap_insert(child_rri->ri_RelationDesc, tuple); +#if PG_VERSION_NUM >= 120000 /* since 12, tid lives directly in slot */ + ItemPointerCopy(&tuple->t_self, &slot->tts_tid); + /* and we must stamp tableOid as we go around table_tuple_insert */ + slot->tts_tableOid = RelationGetRelid(child_rri->ri_RelationDesc); +#endif + + /* ... and create index entries for it */ + if (child_rri->ri_NumIndices > 0) + recheckIndexes = ExecInsertIndexTuplesCompat(estate->es_result_relation_info, + slot, &(tuple->t_self), estate, false, false, NULL, NIL, false); + } +#ifdef PG_SHARDMAN + /* Handle foreign tables */ + else + { + child_rri->ri_FdwRoutine->ForeignNextCopyFrom(estate, + child_rri, + cstate); + } +#endif + + /* AFTER ROW INSERT Triggers (FIXME: NULL transition) */ +#if PG_VERSION_NUM >= 120000 + ExecARInsertTriggersCompat(estate, child_rri, slot, + recheckIndexes, NULL); +#else + ExecARInsertTriggersCompat(estate, child_rri, tuple, + recheckIndexes, NULL); +#endif + + list_free(recheckIndexes); + + /* + * We count only tuples not suppressed by a BEFORE INSERT trigger; + * this is the same definition used by execMain.c for counting + * tuples inserted by an INSERT command. + */ + processed++; + } + } + + /* Switch back to query context */ + MemoryContextSwitchTo(query_mcxt); + + /* Required for old protocol */ + if (old_protocol) + pq_endmsgread(); + + /* Execute AFTER STATEMENT insertion triggers (FIXME: NULL transition) */ + ExecASInsertTriggersCompat(estate, parent_rri, NULL); + + /* Handle queued AFTER triggers */ + AfterTriggerEndQuery(estate); + + pfree(values); + pfree(nulls); + + /* Release resources for tuple table */ + ExecResetTupleTable(estate->es_tupleTable, false); + + /* Close partitions and destroy hash table */ + fini_result_parts_storage(&parts_storage); + + /* Close parent's indices */ + ExecCloseIndices(parent_rri); + + /* Release an EState along with all remaining working storage */ + FreeExecutorState(estate); + + return processed; +} + +/* + * Init COPY FROM, if supported. + */ +static void +prepare_rri_for_copy(ResultRelInfoHolder *rri_holder, + const ResultPartsStorage *rps_storage) +{ + ResultRelInfo *rri = rri_holder->result_rel_info; + FdwRoutine *fdw_routine = rri->ri_FdwRoutine; + + if (fdw_routine != NULL) + { + /* + * If this PostgreSQL edition has no idea about shardman, behave as usual: + * vanilla Postgres doesn't support COPY FROM to foreign partitions. + * However, shardman patches to core extend FDW API to allow it. + */ +#ifdef PG_SHARDMAN + /* shardman COPY FROM requested? */ + if (*find_rendezvous_variable( + "shardman_pathman_copy_from_rendezvous") != NULL && + FdwCopyFromIsSupported(fdw_routine)) + { + CopyState cstate = (CopyState) rps_storage->init_rri_holder_cb_arg; + ResultRelInfo *parent_rri = rps_storage->base_rri; + EState *estate = rps_storage->estate; + + fdw_routine->BeginForeignCopyFrom(estate, rri, cstate, parent_rri); + return; + } +#endif + + elog(ERROR, "cannot copy to foreign partition \"%s\"", + get_rel_name(RelationGetRelid(rri->ri_RelationDesc))); + } +} + +/* + * Shutdown FDWs. + */ +static void +finish_rri_for_copy(ResultRelInfoHolder *rri_holder, + const ResultPartsStorage *rps_storage) +{ +#ifdef PG_SHARDMAN + ResultRelInfo *resultRelInfo = rri_holder->result_rel_info; + + if (resultRelInfo->ri_FdwRoutine) + resultRelInfo->ri_FdwRoutine->EndForeignCopyFrom(rps_storage->estate, + resultRelInfo); +#endif +} + +/* + * Rename RANGE\HASH check constraint of a partition on table rename event. + */ +void +PathmanRenameConstraint(Oid partition_relid, /* partition Oid */ + const RenameStmt *rename_stmt) /* partition rename stmt */ +{ + char *old_constraint_name, + *new_constraint_name; + RenameStmt rename_con_stmt; + + /* Generate old constraint name */ + old_constraint_name = + build_check_constraint_name_relid_internal(partition_relid); + + /* Generate new constraint name */ + new_constraint_name = + build_check_constraint_name_relname_internal(rename_stmt->newname); + + /* Build check constraint RENAME statement */ + memset((void *) &rename_con_stmt, 0, sizeof(RenameStmt)); + NodeSetTag(&rename_con_stmt, T_RenameStmt); + rename_con_stmt.renameType = OBJECT_TABCONSTRAINT; + rename_con_stmt.relation = rename_stmt->relation; + rename_con_stmt.subname = old_constraint_name; + rename_con_stmt.newname = new_constraint_name; + rename_con_stmt.missing_ok = false; + + /* Finally, rename partitioning constraint */ + RenameConstraint(&rename_con_stmt); + + pfree(old_constraint_name); + pfree(new_constraint_name); + + /* Make changes visible */ + CommandCounterIncrement(); +} + +/* + * Rename auto naming sequence of a parent on table rename event. + */ +void +PathmanRenameSequence(Oid parent_relid, /* parent Oid */ + const RenameStmt *rename_stmt) /* parent rename stmt */ +{ + char *old_seq_name, + *new_seq_name, + *seq_nsp_name; + RangeVar *seq_rv; + Oid seq_relid; + + /* Produce old & new names and RangeVar */ + seq_nsp_name = get_namespace_name(get_rel_namespace(parent_relid)); + old_seq_name = build_sequence_name_relid_internal(parent_relid); + new_seq_name = build_sequence_name_relname_internal(rename_stmt->newname); + seq_rv = makeRangeVar(seq_nsp_name, old_seq_name, -1); + + /* Fetch Oid of sequence */ + seq_relid = RangeVarGetRelid(seq_rv, AccessExclusiveLock, true); + + /* Do nothing if there's no naming sequence */ + if (!OidIsValid(seq_relid)) + return; + + /* Finally, rename auto naming sequence */ + RenameRelationInternalCompat(seq_relid, new_seq_name, false, false); + + pfree(seq_nsp_name); + pfree(old_seq_name); + pfree(new_seq_name); + pfree(seq_rv); + + /* Make changes visible */ + CommandCounterIncrement(); +} diff --git a/src/utils.c b/src/utils.c index 4176cdd8..9402d618 100644 --- a/src/utils.c +++ b/src/utils.c @@ -4,71 +4,51 @@ * definitions of various support functions * * Copyright (c) 2016, Postgres Professional + * Portions Copyright (c) 1996-2016, PostgreSQL Global Development Group + * Portions Copyright (c) 1994, Regents of the University of California * * ------------------------------------------------------------------------ */ +#include "pathman.h" #include "utils.h" #include "access/htup_details.h" #include "access/nbtree.h" #include "access/sysattr.h" -#include "access/xact.h" -#include "catalog/heap.h" +#include "catalog/namespace.h" +#include "catalog/pg_class.h" +#include "catalog/pg_operator.h" #include "catalog/pg_type.h" -#include "catalog/pg_extension.h" -#include "commands/extension.h" #include "miscadmin.h" -#include "optimizer/var.h" -#include "optimizer/restrictinfo.h" +#include "nodes/nodeFuncs.h" +#include "parser/parse_coerce.h" #include "parser/parse_oper.h" +#include "utils/array.h" #include "utils/builtins.h" +#include "utils/datetime.h" #include "utils/fmgroids.h" #include "utils/lsyscache.h" #include "utils/syscache.h" #include "utils/typcache.h" +#if PG_VERSION_NUM >= 100000 +#include "utils/regproc.h" +#endif -#define TABLEOID_STR(subst) ( "pathman_tableoid" subst ) -#define TABLEOID_STR_BASE_LEN ( sizeof(TABLEOID_STR("")) - 1 ) - - -static bool clause_contains_params_walker(Node *node, void *context); -static void change_varnos_in_restrinct_info(RestrictInfo *rinfo, - change_varno_context *context); -static bool change_varno_walker(Node *node, change_varno_context *context); -static List *get_tableoids_list(List *tlist); -static void lock_rows_visitor(Plan *plan, void *context); - - -/* - * Execute 'cb_proc' on 'xact_context' reset. - */ -void -execute_on_xact_mcxt_reset(MemoryContext xact_context, - MemoryContextCallbackFunction cb_proc, - void *arg) +static const Node * +drop_irrelevant_expr_wrappers(const Node *expr) { - MemoryContextCallback *mcxt_cb = MemoryContextAlloc(xact_context, - sizeof(MemoryContextCallback)); - - /* Initialize MemoryContextCallback */ - mcxt_cb->arg = arg; - mcxt_cb->func = cb_proc; - mcxt_cb->next = NULL; - - MemoryContextRegisterResetCallback(xact_context, mcxt_cb); -} + switch (nodeTag(expr)) + { + /* Strip relabeling */ + case T_RelabelType: + return (const Node *) ((const RelabelType *) expr)->arg; -/* - * Check whether clause contains PARAMs or not - */ -bool -clause_contains_params(Node *clause) -{ - return expression_tree_walker(clause, - clause_contains_params_walker, - NULL); + /* no special actions required */ + default: + return expr; + } } static bool @@ -76,686 +56,485 @@ clause_contains_params_walker(Node *node, void *context) { if (node == NULL) return false; + if (IsA(node, Param)) return true; + return expression_tree_walker(node, clause_contains_params_walker, context); } /* - * Extract target entries with resnames beginning with TABLEOID_STR - * and var->varoattno == TableOidAttributeNumber + * Check whether clause contains PARAMs or not. */ -static List * -get_tableoids_list(List *tlist) +bool +clause_contains_params(Node *clause) { - List *result = NIL; - ListCell *lc; - - foreach (lc, tlist) - { - TargetEntry *te = (TargetEntry *) lfirst(lc); - Var *var = (Var *) te->expr; - - if (!IsA(var, Var)) - continue; - - if (strlen(te->resname) > TABLEOID_STR_BASE_LEN && - 0 == strncmp(te->resname, TABLEOID_STR(""), TABLEOID_STR_BASE_LEN) && - var->varoattno == TableOidAttributeNumber) - { - result = lappend(result, te); - } - } - - return result; + return expression_tree_walker(clause, + clause_contains_params_walker, + NULL); } /* - * Find 'TABLEOID_STR%u' attributes that were manually - * created for partitioned tables and replace Oids - * (used for '%u') with expected rc->rowmarkIds + * Check if this is a "date"-related type. */ -static void -lock_rows_visitor(Plan *plan, void *context) +bool +is_date_type_internal(Oid typid) { - List *rtable = (List *) context; - LockRows *lock_rows = (LockRows *) plan; - Plan *lock_child = outerPlan(plan); - List *tableoids; - ListCell *lc; + return typid == TIMESTAMPOID || + typid == TIMESTAMPTZOID || + typid == DATEOID; +} - if (!IsA(lock_rows, LockRows)) - return; +/* + * Check if user can alter/drop specified relation. This function is used to + * make sure that current user can change pg_pathman's config. Returns true + * if user can manage relation, false otherwise. + * + * XXX currently we just check if user is a table owner. Probably it's + * better to check user permissions in order to let other users participate. + */ +bool +check_security_policy_internal(Oid relid, Oid role) +{ + Oid owner; - Assert(rtable && IsA(rtable, List) && lock_child); + /* Superuser is allowed to do anything */ + if (superuser()) + return true; - /* Select tableoid attributes that must be renamed */ - tableoids = get_tableoids_list(lock_child->targetlist); - if (!tableoids) - return; /* this LockRows has nothing to do with partitioned table */ + /* Fetch the owner */ + owner = get_rel_owner(relid); - foreach (lc, lock_rows->rowMarks) - { - PlanRowMark *rc = (PlanRowMark *) lfirst(lc); - Oid parent_oid = getrelid(rc->rti, rtable); - ListCell *mark_lc; - List *finished_tes = NIL; /* postprocessed target entries */ + /* + * Sometimes the relation doesn't exist anymore but there is still + * a record in config. For instance, it happens in DDL event trigger. + * Still we should be able to remove this record. + */ + if (owner == InvalidOid) + return true; - foreach (mark_lc, tableoids) - { - TargetEntry *te = (TargetEntry *) lfirst(mark_lc); - const char *cur_oid_str = &(te->resname[TABLEOID_STR_BASE_LEN]); - Datum cur_oid_datum; + /* Check if current user is the owner of the relation */ + if (owner != role) + return false; - cur_oid_datum = DirectFunctionCall1(oidin, CStringGetDatum(cur_oid_str)); + return true; +} - if (DatumGetObjectId(cur_oid_datum) == parent_oid) - { - char resname[64]; +/* Compare clause operand with expression */ +bool +match_expr_to_operand(const Node *expr, const Node *operand) +{ + expr = drop_irrelevant_expr_wrappers(expr); + operand = drop_irrelevant_expr_wrappers(operand); - /* Replace 'TABLEOID_STR:Oid' with 'tableoid:rowmarkId' */ - snprintf(resname, sizeof(resname), "tableoid%u", rc->rowmarkId); - te->resname = pstrdup(resname); + /* compare expressions and return result right away */ + return equal(expr, operand); +} - finished_tes = lappend(finished_tes, te); - } - } - /* Remove target entries that have been processed in this step */ - foreach (mark_lc, finished_tes) - tableoids = list_delete_ptr(tableoids, lfirst(mark_lc)); +List * +list_reverse(List *l) +{ + List *result = NIL; + ListCell *lc; - if (list_length(tableoids) == 0) - break; /* nothing to do */ + foreach (lc, l) + { + result = lcons(lfirst(lc), result); } + return result; } -/* NOTE: Used for debug */ -#ifdef __GNUC__ -__attribute__((unused)) -#endif -static char * -bms_print(Bitmapset *bms) -{ - StringInfoData str; - int x; - - initStringInfo(&str); - x = -1; - while ((x = bms_next_member(bms, x)) >= 0) - appendStringInfo(&str, " %d", x); - - return str.data; -} /* - * Copied from util/plancat.c - * - * Build a targetlist representing the columns of the specified index. + * Get relation owner. */ -List * -build_index_tlist(PlannerInfo *root, IndexOptInfo *index, - Relation heapRelation) +Oid +get_rel_owner(Oid relid) { - List *tlist = NIL; - Index varno = index->rel->relid; - ListCell *indexpr_item; - int i; + HeapTuple tp; + Oid owner; - indexpr_item = list_head(index->indexprs); - for (i = 0; i < index->ncolumns; i++) + tp = SearchSysCache1(RELOID, ObjectIdGetDatum(relid)); + if (HeapTupleIsValid(tp)) { - int indexkey = index->indexkeys[i]; - Expr *indexvar; + Form_pg_class reltup = (Form_pg_class) GETSTRUCT(tp); - if (indexkey != 0) - { - /* simple column */ - Form_pg_attribute att_tup; - - if (indexkey < 0) - att_tup = SystemAttributeDefinition(indexkey, - heapRelation->rd_rel->relhasoids); - else - att_tup = heapRelation->rd_att->attrs[indexkey - 1]; - - indexvar = (Expr *) makeVar(varno, - indexkey, - att_tup->atttypid, - att_tup->atttypmod, - att_tup->attcollation, - 0); - } - else - { - /* expression column */ - if (indexpr_item == NULL) - elog(ERROR, "wrong number of index expressions"); - indexvar = (Expr *) lfirst(indexpr_item); - indexpr_item = lnext(indexpr_item); - } + owner = reltup->relowner; + ReleaseSysCache(tp); - tlist = lappend(tlist, - makeTargetEntry(indexvar, - i + 1, - NULL, - false)); + return owner; } - if (indexpr_item != NULL) - elog(ERROR, "wrong number of index expressions"); - return tlist; + return InvalidOid; } /* - * We should ensure that 'rel->baserestrictinfo' or 'ppi->ppi_clauses' contain - * Var which corresponds to partition attribute before creating RuntimeXXX - * paths since they are used by create_scan_plan() to form 'scan_clauses' - * that are passed to create_customscan_plan(). + * Try to get relname or at least relid as cstring. */ -bool -check_rinfo_for_partitioned_attr(List *rinfo, Index varno, AttrNumber varattno) +char * +get_rel_name_or_relid(Oid relid) { - List *vars; - List *clauses; - ListCell *lc; + char *relname = get_rel_name(relid); - clauses = get_actual_clauses(rinfo); + if (!relname) + return DatumGetCString(DirectFunctionCall1(oidout, ObjectIdGetDatum(relid))); - vars = pull_var_clause((Node *) clauses, - PVC_REJECT_AGGREGATES, - PVC_REJECT_PLACEHOLDERS); + return relname; +} - foreach (lc, vars) - { - Var *var = (Var *) lfirst(lc); +/* + * Return palloced fully qualified relation name as a cstring + */ +char * +get_qualified_rel_name(Oid relid) +{ + Oid nspid = get_rel_namespace(relid); - if (var->varno == varno && var->varoattno == varattno) - return true; - } + return psprintf("%s.%s", + quote_identifier(get_namespace_name(nspid)), + quote_identifier(get_rel_name(relid))); +} + +RangeVar * +makeRangeVarFromRelid(Oid relid) +{ + char *relname = get_rel_name(relid); + char *nspname = get_namespace_name(get_rel_namespace(relid)); - return false; + return makeRangeVar(nspname, relname, -1); } + + /* - * Append trigger info contained in 'more' to 'src', both remain unmodified. + * Try to find binary operator. * - * This allows us to execute some of main table's triggers on children. - * See ExecInsert() for more details. + * Returns operator function's Oid or throws an ERROR on InvalidOid. */ -TriggerDesc * -append_trigger_descs(TriggerDesc *src, TriggerDesc *more, bool *grown_up) +Operator +get_binary_operator(char *oprname, Oid arg1, Oid arg2) { -#define CopyToTriggerDesc(bool_field_name) \ - ( new_desc->bool_field_name |= (src->bool_field_name || more->bool_field_name) ) + Operator op; - TriggerDesc *new_desc = (TriggerDesc *) palloc0(sizeof(TriggerDesc)); - Trigger *cur_trigger; - int i; + op = compatible_oper(NULL, list_make1(makeString(oprname)), + arg1, arg2, true, -1); - /* Quick choices */ - if (!src && !more) - { - *grown_up = false; - return NULL; - } - else if (!src) - { - *grown_up = true; /* expand space for new triggers */ - return more; - } - else if (!more) - { - *grown_up = false; /* no new triggers will be added */ - return src; - } + if (!op) + elog(ERROR, "cannot find operator %s(%s, %s)", + oprname, + format_type_be(arg1), + format_type_be(arg2)); - *grown_up = true; - new_desc->numtriggers = src->numtriggers + more->numtriggers; - new_desc->triggers = (Trigger *) palloc(new_desc->numtriggers * sizeof(Trigger)); - - cur_trigger = new_desc->triggers; - - /* Copy triggers from 'a' */ - for (i = 0; i < src->numtriggers; i++) - memcpy(cur_trigger++, &(src->triggers[i]), sizeof(Trigger)); - - /* Copy triggers from 'b' */ - for (i = 0; i < more->numtriggers; i++) - memcpy(cur_trigger++, &(more->triggers[i]), sizeof(Trigger)); - - /* Copy insert bool flags */ - CopyToTriggerDesc(trig_insert_before_row); - CopyToTriggerDesc(trig_insert_after_row); - CopyToTriggerDesc(trig_insert_instead_row); - CopyToTriggerDesc(trig_insert_before_statement); - CopyToTriggerDesc(trig_insert_after_statement); - - /* Copy update bool flags */ - CopyToTriggerDesc(trig_update_before_row); - CopyToTriggerDesc(trig_update_after_row); - CopyToTriggerDesc(trig_update_instead_row); - CopyToTriggerDesc(trig_update_before_statement); - CopyToTriggerDesc(trig_update_after_statement); - - /* Copy delete bool flags */ - CopyToTriggerDesc(trig_delete_before_row); - CopyToTriggerDesc(trig_delete_after_row); - CopyToTriggerDesc(trig_delete_instead_row); - CopyToTriggerDesc(trig_delete_before_statement); - CopyToTriggerDesc(trig_delete_after_statement); - - /* Copy truncate bool flags */ - CopyToTriggerDesc(trig_truncate_before_statement); - CopyToTriggerDesc(trig_truncate_after_statement); - - return new_desc; + return op; } /* - * Get BTORDER_PROC for two types described by Oids + * Get BTORDER_PROC for two types described by Oids. */ void fill_type_cmp_fmgr_info(FmgrInfo *finfo, Oid type1, Oid type2) { Oid cmp_proc_oid; - TypeCacheEntry *tce; + TypeCacheEntry *tce_1, + *tce_2; + + /* Check type compatibility */ + if (IsBinaryCoercible(type1, type2)) + type1 = type2; + + else if (IsBinaryCoercible(type2, type1)) + type2 = type1; + + tce_1 = lookup_type_cache(type1, TYPECACHE_BTREE_OPFAMILY); + tce_2 = lookup_type_cache(type2, TYPECACHE_BTREE_OPFAMILY); - tce = lookup_type_cache(type1, TYPECACHE_BTREE_OPFAMILY); + /* Both types should belong to the same opfamily */ + if (tce_1->btree_opf != tce_2->btree_opf) + goto fill_type_cmp_fmgr_info_error; - cmp_proc_oid = get_opfamily_proc(tce->btree_opf, - type1, - type2, + cmp_proc_oid = get_opfamily_proc(tce_1->btree_opf, + tce_1->btree_opintype, + tce_2->btree_opintype, BTORDER_PROC); - fmgr_info(cmp_proc_oid, finfo); - return; -} + /* No such function, emit ERROR */ + if (!OidIsValid(cmp_proc_oid)) + goto fill_type_cmp_fmgr_info_error; -List * -list_reverse(List *l) -{ - List *result = NIL; - ListCell *lc; + /* Fill FmgrInfo struct */ + fmgr_info(cmp_proc_oid, finfo); - foreach (lc, l) - { - result = lcons(lfirst(lc), result); - } - return result; + return; /* everything is OK */ + +/* Handle errors (no such function) */ +fill_type_cmp_fmgr_info_error: + elog(ERROR, "missing comparison function for types %s & %s", + format_type_be(type1), format_type_be(type2)); } /* - * Changes varno attribute in all variables nested in the node + * Fetch binary operator by name and return it's function and ret type. */ void -change_varnos(Node *node, Oid old_varno, Oid new_varno) +extract_op_func_and_ret_type(char *opname, + Oid type1, Oid type2, + Oid *op_func, /* ret value #1 */ + Oid *op_ret_type) /* ret value #2 */ { - change_varno_context context; - context.old_varno = old_varno; - context.new_varno = new_varno; + Operator op; - change_varno_walker(node, &context); -} + /* Get "move bound operator" descriptor */ + op = get_binary_operator(opname, type1, type2); + Assert(op); -static bool -change_varno_walker(Node *node, change_varno_context *context) -{ - ListCell *lc; - Var *var; - EquivalenceClass *ec; - EquivalenceMember *em; - - if (node == NULL) - return false; - - switch(node->type) - { - case T_Var: - var = (Var *) node; - if (var->varno == context->old_varno) - { - var->varno = context->new_varno; - var->varnoold = context->new_varno; - } - return false; + *op_func = oprfuncid(op); + *op_ret_type = ((Form_pg_operator) GETSTRUCT(op))->oprresult; - case T_RestrictInfo: - change_varnos_in_restrinct_info((RestrictInfo *) node, context); - return false; - - case T_PathKey: - change_varno_walker((Node *) ((PathKey *) node)->pk_eclass, context); - return false; - - case T_EquivalenceClass: - ec = (EquivalenceClass *) node; - - foreach(lc, ec->ec_members) - change_varno_walker((Node *) lfirst(lc), context); - foreach(lc, ec->ec_derives) - change_varno_walker((Node *) lfirst(lc), context); - return false; + /* Don't forget to release system cache */ + ReleaseSysCache(op); +} - case T_EquivalenceMember: - em = (EquivalenceMember *) node; - change_varno_walker((Node *) em->em_expr, context); - if (bms_is_member(context->old_varno, em->em_relids)) - { - em->em_relids = bms_del_member(em->em_relids, context->old_varno); - em->em_relids = bms_add_member(em->em_relids, context->new_varno); - } - return false; - case T_TargetEntry: - change_varno_walker((Node *) ((TargetEntry *) node)->expr, context); - return false; - case T_List: - foreach(lc, (List *) node) - change_varno_walker((Node *) lfirst(lc), context); - return false; +/* + * Get CSTRING representation of Datum using the type Oid. + */ +char * +datum_to_cstring(Datum datum, Oid typid) +{ + char *result; + HeapTuple tup = SearchSysCache1(TYPEOID, ObjectIdGetDatum(typid)); - default: - break; + if (HeapTupleIsValid(tup)) + { + Form_pg_type typtup = (Form_pg_type) GETSTRUCT(tup); + result = OidOutputFunctionCall(typtup->typoutput, datum); + ReleaseSysCache(tup); } + else + result = pstrdup("[error]"); - /* Should not find an unplanned subquery */ - Assert(!IsA(node, Query)); - - return expression_tree_walker(node, change_varno_walker, (void *) context); + return result; } -static void -change_varnos_in_restrinct_info(RestrictInfo *rinfo, change_varno_context *context) -{ - ListCell *lc; - - change_varno_walker((Node *) rinfo->clause, context); - if (rinfo->left_em) - change_varno_walker((Node *) rinfo->left_em->em_expr, context); - - if (rinfo->right_em) - change_varno_walker((Node *) rinfo->right_em->em_expr, context); - - if (rinfo->orclause) - foreach(lc, ((BoolExpr *) rinfo->orclause)->args) - { - Node *node = (Node *) lfirst(lc); - change_varno_walker(node, context); - } - if (bms_is_member(context->old_varno, rinfo->clause_relids)) - { - rinfo->clause_relids = bms_del_member(rinfo->clause_relids, context->old_varno); - rinfo->clause_relids = bms_add_member(rinfo->clause_relids, context->new_varno); - } - if (bms_is_member(context->old_varno, rinfo->left_relids)) - { - rinfo->left_relids = bms_del_member(rinfo->left_relids, context->old_varno); - rinfo->left_relids = bms_add_member(rinfo->left_relids, context->new_varno); - } - if (bms_is_member(context->old_varno, rinfo->right_relids)) - { - rinfo->right_relids = bms_del_member(rinfo->right_relids, context->old_varno); - rinfo->right_relids = bms_add_member(rinfo->right_relids, context->new_varno); - } -} /* - * Basic plan tree walker + * Try casting value of type 'in_type' to 'out_type'. * - * 'visitor' is applied right before return + * This function might emit ERROR. */ -void -plan_tree_walker(Plan *plan, - void (*visitor) (Plan *plan, void *context), - void *context) +Datum +perform_type_cast(Datum value, Oid in_type, Oid out_type, bool *success) { - ListCell *l; + CoercionPathType ret; + Oid castfunc = InvalidOid; + + /* Speculative success */ + if (success) *success = true; + + /* Fast and trivial path */ + if (in_type == out_type) + return value; - if (plan == NULL) - return; + /* Check that types are binary coercible */ + if (IsBinaryCoercible(in_type, out_type)) + return value; - check_stack_depth(); + /* If not, try to perform a type cast */ + ret = find_coercion_pathway(out_type, in_type, + COERCION_EXPLICIT, + &castfunc); - /* Plan-type-specific fixes */ - switch (nodeTag(plan)) + /* Handle coercion paths */ + switch (ret) { - case T_SubqueryScan: - plan_tree_walker(((SubqueryScan *) plan)->subplan, visitor, context); - break; - - case T_CustomScan: - foreach(l, ((CustomScan *) plan)->custom_plans) - plan_tree_walker((Plan *) lfirst(l), visitor, context); - break; - - case T_ModifyTable: - foreach (l, ((ModifyTable *) plan)->plans) - plan_tree_walker((Plan *) lfirst(l), visitor, context); - break; - - /* Since they look alike */ - case T_MergeAppend: - case T_Append: - foreach(l, ((Append *) plan)->appendplans) - plan_tree_walker((Plan *) lfirst(l), visitor, context); - break; - - case T_BitmapAnd: - foreach(l, ((BitmapAnd *) plan)->bitmapplans) - plan_tree_walker((Plan *) lfirst(l), visitor, context); - break; - - case T_BitmapOr: - foreach(l, ((BitmapOr *) plan)->bitmapplans) - plan_tree_walker((Plan *) lfirst(l), visitor, context); - break; + /* There's a function */ + case COERCION_PATH_FUNC: + { + /* Perform conversion */ + Assert(castfunc != InvalidOid); + return OidFunctionCall1(castfunc, value); + } - default: - break; - } + /* Types are binary compatible (no implicit cast) */ + case COERCION_PATH_RELABELTYPE: + { + /* We don't perform any checks here */ + return value; + } - plan_tree_walker(plan->lefttree, visitor, context); - plan_tree_walker(plan->righttree, visitor, context); + /* TODO: implement these casts if needed */ + case COERCION_PATH_ARRAYCOERCE: + case COERCION_PATH_COERCEVIAIO: - /* Apply visitor to the current node */ - visitor(plan, context); + /* There's no cast available */ + case COERCION_PATH_NONE: + default: + { + /* Oops, something is wrong */ + if (success) + *success = false; + else + elog(ERROR, "cannot cast %s to %s", + format_type_be(in_type), + format_type_be(out_type)); + + return (Datum) 0; + } + } } /* - * Add missing 'TABLEOID_STR%u' junk attributes for inherited partitions - * - * This is necessary since preprocess_targetlist() heavily - * depends on the 'inh' flag which we have to unset. - * - * postprocess_lock_rows() will later transform 'TABLEOID_STR:Oid' - * relnames into 'tableoid:rowmarkId'. + * Convert interval from TEXT to binary form using partitioninig expression type. */ -void -rowmark_add_tableoids(Query *parse) +Datum +extract_binary_interval_from_text(Datum interval_text, /* interval as TEXT */ + Oid part_atttype, /* expression type */ + Oid *interval_type) /* ret value #1 */ { - ListCell *lc; + Datum interval_binary; + const char *interval_cstring; - check_stack_depth(); + interval_cstring = TextDatumGetCString(interval_text); - foreach(lc, parse->rtable) + /* If 'part_atttype' is a *date type*, cast 'range_interval' to INTERVAL */ + if (is_date_type_internal(part_atttype)) { - RangeTblEntry *rte = (RangeTblEntry *) lfirst(lc); + int32 interval_typmod = PATHMAN_CONFIG_interval_typmod; + + /* Convert interval from CSTRING to internal form */ + interval_binary = DirectFunctionCall3(interval_in, + CStringGetDatum(interval_cstring), + ObjectIdGetDatum(InvalidOid), + Int32GetDatum(interval_typmod)); + if (interval_type) + *interval_type = INTERVALOID; + } + /* Otherwise cast it to the partitioned column's type */ + else + { + HeapTuple htup; + Oid typein_proc = InvalidOid; - switch(rte->rtekind) + htup = SearchSysCache1(TYPEOID, ObjectIdGetDatum(part_atttype)); + if (HeapTupleIsValid(htup)) { - case RTE_SUBQUERY: - rowmark_add_tableoids(rte->subquery); - break; - - default: - break; + typein_proc = ((Form_pg_type) GETSTRUCT(htup))->typinput; + ReleaseSysCache(htup); } + else + elog(ERROR, "cannot find input function for type %u", part_atttype); + + /* + * Convert interval from CSTRING to 'prel->ev_type'. + * + * Note: We pass 3 arguments in case + * 'typein_proc' also takes Oid & typmod. + */ + interval_binary = OidFunctionCall3(typein_proc, + CStringGetDatum(interval_cstring), + ObjectIdGetDatum(part_atttype), + Int32GetDatum(-1)); + if (interval_type) + *interval_type = part_atttype; } - /* Generate 'tableoid' for partitioned table rowmark */ - foreach (lc, parse->rowMarks) - { - RowMarkClause *rc = (RowMarkClause *) lfirst(lc); - Oid parent = getrelid(rc->rti, parse->rtable); - Var *var; - TargetEntry *tle; - char resname[64]; - - /* Check that table is partitioned */ - if (!get_pathman_relation_info(parent)) - continue; - - var = makeVar(rc->rti, - TableOidAttributeNumber, - OIDOID, - -1, - InvalidOid, - 0); - - /* Use parent's Oid as TABLEOID_STR's key (%u) */ - snprintf(resname, sizeof(resname), TABLEOID_STR("%u"), parent); - - tle = makeTargetEntry((Expr *) var, - list_length(parse->targetList) + 1, - pstrdup(resname), - true); - - /* There's no problem here since new attribute is junk */ - parse->targetList = lappend(parse->targetList, tle); - } + return interval_binary; } -/* - * Final rowmark processing for partitioned tables - */ -void -postprocess_lock_rows(List *rtable, Plan *plan) +/* Convert Datum into CSTRING array */ +char ** +deconstruct_text_array(Datum array, int *array_size) { - plan_tree_walker(plan, lock_rows_visitor, rtable); -} + ArrayType *array_ptr = DatumGetArrayTypeP(array); + int16 elemlen; + bool elembyval; + char elemalign; -/* - * Returns pg_pathman schema's Oid or InvalidOid if that's not possible. - */ -Oid -get_pathman_schema(void) -{ - Oid result; - Relation rel; - SysScanDesc scandesc; - HeapTuple tuple; - ScanKeyData entry[1]; - Oid ext_schema; - - /* It's impossible to fetch pg_pathman's schema now */ - if (!IsTransactionState()) - return InvalidOid; - - ext_schema = get_extension_oid("pg_pathman", true); - if (ext_schema == InvalidOid) - return InvalidOid; /* exit if pg_pathman does not exist */ - - ScanKeyInit(&entry[0], - ObjectIdAttributeNumber, - BTEqualStrategyNumber, F_OIDEQ, - ObjectIdGetDatum(ext_schema)); - - rel = heap_open(ExtensionRelationId, AccessShareLock); - scandesc = systable_beginscan(rel, ExtensionOidIndexId, true, - NULL, 1, entry); - - tuple = systable_getnext(scandesc); - - /* We assume that there can be at most one matching tuple */ - if (HeapTupleIsValid(tuple)) - result = ((Form_pg_extension) GETSTRUCT(tuple))->extnamespace; - else - result = InvalidOid; + Datum *elem_values; + bool *elem_nulls; - systable_endscan(scandesc); + int arr_size = 0; - heap_close(rel, AccessShareLock); + /* Check type invariant */ + Assert(ARR_ELEMTYPE(array_ptr) == TEXTOID); - return result; -} + /* Check number of dimensions */ + if (ARR_NDIM(array_ptr) > 1) + ereport(ERROR, (errcode(ERRCODE_INVALID_PARAMETER_VALUE), + errmsg("array should contain only 1 dimension"))); -/* - * Check if this is a "date"-related type. - */ -bool -is_date_type_internal(Oid typid) -{ - return typid == TIMESTAMPOID || - typid == TIMESTAMPTZOID || - typid == DATEOID; -} + get_typlenbyvalalign(ARR_ELEMTYPE(array_ptr), + &elemlen, &elembyval, &elemalign); -/* - * Check if this is a string type. - */ -bool -is_string_type_internal(Oid typid) -{ - return typid == TEXTOID || - typid == CSTRINGOID; -} + deconstruct_array(array_ptr, + ARR_ELEMTYPE(array_ptr), + elemlen, elembyval, elemalign, + &elem_values, &elem_nulls, &arr_size); + /* If there are actual values, convert them into CSTRINGs */ + if (arr_size > 0) + { + char **strings = palloc(arr_size * sizeof(char *)); + int i; -/* - * Try to find binary operator. - * - * Returns operator function's Oid or throws an ERROR on InvalidOid. - */ -Oid -get_binary_operator_oid(char *oprname, Oid arg1, Oid arg2) -{ - Oid funcid = InvalidOid; - Operator op; + for (i = 0; i < arr_size; i++) + { + if (elem_nulls[i]) + ereport(ERROR, (errcode(ERRCODE_INVALID_PARAMETER_VALUE), + errmsg("array should not contain NULLs"))); - op = oper(NULL, list_make1(makeString(oprname)), arg1, arg2, true, -1); - if (op) - { - funcid = oprfuncid(op); - ReleaseSysCache(op); + strings[i] = TextDatumGetCString(elem_values[i]); + } + + /* Return an array and it's size */ + *array_size = arr_size; + return strings; } - else - elog(ERROR, "Cannot find operator \"%s\"(%u, %u)", oprname, arg1, arg2); + /* Else emit ERROR */ + else ereport(ERROR, (errcode(ERRCODE_INVALID_PARAMETER_VALUE), + errmsg("array should not be empty"))); - return funcid; + /* Keep compiler happy */ + return NULL; } /* - * Get CSTRING representation of Datum using the type Oid. + * Convert schema qualified relation names array to RangeVars array */ -char * -datum_to_cstring(Datum datum, Oid typid) +RangeVar ** +qualified_relnames_to_rangevars(char **relnames, size_t nrelnames) { - char *result; - HeapTuple tup = SearchSysCache1(TYPEOID, ObjectIdGetDatum(typid)); + RangeVar **rangevars = NULL; + int i; - if (HeapTupleIsValid(tup)) + /* Convert partition names into RangeVars */ + if (relnames) { - Form_pg_type typtup = (Form_pg_type) GETSTRUCT(tup); - result = OidOutputFunctionCall(typtup->typoutput, datum); - ReleaseSysCache(tup); + rangevars = palloc(sizeof(RangeVar *) * nrelnames); + for (i = 0; i < nrelnames; i++) + { + List *nl = stringToQualifiedNameListCompat(relnames[i]); + + rangevars[i] = makeRangeVarFromNameList(nl); + } } - else - result = pstrdup("[error]"); - return result; + return rangevars; } /* - * Try to get relname or at least relid as cstring. + * Checks that Oid is valid (it need to do before relation locking: locking of + * invalid Oid causes an error on replica). */ -char * -get_rel_name_or_relid(Oid relid) +void +check_relation_oid(Oid relid) { - return DatumGetCString(DirectFunctionCall1(regclassout, - ObjectIdGetDatum(relid))); + if (relid < FirstNormalObjectId) + ereport(ERROR, (errcode(ERRCODE_INVALID_PARAMETER_VALUE), + errmsg("identifier \"%u\" must be normal Oid", relid))); } diff --git a/src/utils.h b/src/utils.h deleted file mode 100644 index cd8419c3..00000000 --- a/src/utils.h +++ /dev/null @@ -1,79 +0,0 @@ -/* ------------------------------------------------------------------------ - * - * utils.h - * prototypes of various support functions - * - * Copyright (c) 2016, Postgres Professional - * - * ------------------------------------------------------------------------ - */ - -#ifndef PATHMAN_UTILS_H -#define PATHMAN_UTILS_H - -#include "pathman.h" - -#include "postgres.h" -#include "utils/rel.h" -#include "nodes/relation.h" -#include "nodes/nodeFuncs.h" - - -typedef struct -{ - Oid old_varno; - Oid new_varno; -} change_varno_context; - - -/* - * Plan tree modification. - */ -void plan_tree_walker(Plan *plan, - void (*visitor) (Plan *plan, void *context), - void *context); -List * build_index_tlist(PlannerInfo *root, - IndexOptInfo *index, - Relation heapRelation); -void change_varnos(Node *node, Oid old_varno, Oid new_varno); -TriggerDesc * append_trigger_descs(TriggerDesc *src, - TriggerDesc *more, - bool *grown_up); - -/* - * Rowmark processing. - */ -void rowmark_add_tableoids(Query *parse); -void postprocess_lock_rows(List *rtable, Plan *plan); - -/* - * Various traits. - */ -bool clause_contains_params(Node *clause); -bool is_date_type_internal(Oid typid); -bool is_string_type_internal(Oid typid); -bool check_rinfo_for_partitioned_attr(List *rinfo, - Index varno, - AttrNumber varattno); - -/* - * Misc. - */ -Oid get_pathman_schema(void); -List * list_reverse(List *l); - -/* - * Handy execution-stage functions. - */ -char * get_rel_name_or_relid(Oid relid); -Oid get_binary_operator_oid(char *opname, Oid arg1, Oid arg2); -void fill_type_cmp_fmgr_info(FmgrInfo *finfo, - Oid type1, - Oid type2); -void execute_on_xact_mcxt_reset(MemoryContext xact_context, - MemoryContextCallbackFunction cb_proc, - void *arg); -char * datum_to_cstring(Datum datum, Oid typid); - - -#endif diff --git a/src/xact_handling.c b/src/xact_handling.c index 44d9195b..31fb5d13 100644 --- a/src/xact_handling.c +++ b/src/xact_handling.c @@ -9,70 +9,51 @@ */ #include "xact_handling.h" +#include "utils.h" #include "postgres.h" +#include "access/transam.h" #include "access/xact.h" #include "catalog/catalog.h" #include "miscadmin.h" #include "storage/lmgr.h" +#include "utils/inval.h" static inline void SetLocktagRelationOid(LOCKTAG *tag, Oid relid); static inline bool do_we_hold_the_lock(Oid relid, LOCKMODE lockmode); -/* - * Lock certain partitioned relation to disable concurrent access. - */ -bool -xact_lock_partitioned_rel(Oid relid, bool nowait) + +static LockAcquireResult +LockAcquireOid(Oid relid, LOCKMODE lockmode, bool sessionLock, bool dontWait) { - if (nowait) - { - if (ConditionalLockRelationOid(relid, ShareUpdateExclusiveLock)) - return true; - return false; - } - else - LockRelationOid(relid, ShareUpdateExclusiveLock); + LOCKTAG tag; + LockAcquireResult res; - return true; -} + /* Create a tag for lock */ + SetLocktagRelationOid(&tag, relid); -/* - * Unlock partitioned relation. - */ -void -xact_unlock_partitioned_rel(Oid relid) -{ - UnlockRelationOid(relid, ShareUpdateExclusiveLock); -} + res = LockAcquire(&tag, lockmode, sessionLock, dontWait); -/* - * Lock relation exclusively (SELECTs are possible). - */ -bool -xact_lock_rel_exclusive(Oid relid, bool nowait) -{ - if (nowait) - { - if (ConditionalLockRelationOid(relid, ExclusiveLock)) - return true; - return false; - } - else - LockRelationOid(relid, ExclusiveLock); + /* + * Now that we have the lock, check for invalidation messages; + * see notes in LockRelationOid. + */ + if (res != LOCKACQUIRE_ALREADY_HELD) + AcceptInvalidationMessages(); - return true; + return res; } + /* - * Unlock relation (exclusive lock). + * Acquire lock and return LockAcquireResult. */ -void -xact_unlock_rel_exclusive(Oid relid) +LockAcquireResult +xact_lock_rel(Oid relid, LOCKMODE lockmode, bool nowait) { - UnlockRelationOid(relid, ExclusiveLock); + return LockAcquireOid(relid, lockmode, false, nowait); } /* @@ -82,6 +63,10 @@ xact_unlock_rel_exclusive(Oid relid) bool xact_bgw_conflicting_lock_exists(Oid relid) { +#if PG_VERSION_NUM >= 90600 + /* We use locking groups for 9.6+ */ + return false; +#else LOCKMODE lockmode; /* Try each lock >= ShareUpdateExclusiveLock */ @@ -94,6 +79,7 @@ xact_bgw_conflicting_lock_exists(Oid relid) } return false; +#endif } @@ -110,7 +96,7 @@ xact_is_level_read_committed(void) } /* - * Check if 'stmt' is BEGIN\ROLLBACK etc transaction statement. + * Check if 'stmt' is BEGIN/ROLLBACK/etc [TRANSACTION] statement. */ bool xact_is_transaction_stmt(Node *stmt) @@ -125,29 +111,67 @@ xact_is_transaction_stmt(Node *stmt) } /* - * Check if 'stmt' is SET TRANSACTION statement. + * Check if 'stmt' is SET ('name' | [TRANSACTION]) statement. */ bool -xact_is_set_transaction_stmt(Node *stmt) +xact_is_set_stmt(Node *stmt, const char *name) { + /* Check that SET TRANSACTION is implemented via VariableSetStmt */ + Assert(VAR_SET_MULTI > 0); + if (!stmt) return false; - if (IsA(stmt, VariableSetStmt)) + if (!IsA(stmt, VariableSetStmt)) + return false; + + if (!name) + return true; + else { - VariableSetStmt *var_set_stmt = (VariableSetStmt *) stmt; + char *set_name = ((VariableSetStmt *) stmt)->name; - /* special case for SET TRANSACTION ... */ - if (var_set_stmt->kind == VAR_SET_MULTI) + if (set_name && pg_strcasecmp(name, set_name) == 0) return true; } return false; } +/* + * Check if 'stmt' is ALTER EXTENSION pg_pathman. + */ +bool +xact_is_alter_pathman_stmt(Node *stmt) +{ + if (!stmt) + return false; + + if (!IsA(stmt, AlterExtensionStmt)) + return false; + + if (pg_strcasecmp(((AlterExtensionStmt *) stmt)->extname, "pg_pathman") == 0) + return true; + + return false; +} + +/* + * Check if object is visible to newer transactions. + */ +bool +xact_object_is_visible(TransactionId obj_xmin) +{ + return TransactionIdEquals(obj_xmin, FrozenTransactionId) || + TransactionIdPrecedes(obj_xmin, GetCurrentTransactionId()); +} + /* * Do we hold the specified lock? */ +#ifdef __GNUC__ +__attribute__((unused)) +#endif static inline bool do_we_hold_the_lock(Oid relid, LOCKMODE lockmode) { diff --git a/tests/cmocka/.gitignore b/tests/cmocka/.gitignore new file mode 100644 index 00000000..91500ef0 --- /dev/null +++ b/tests/cmocka/.gitignore @@ -0,0 +1 @@ +rangeset_tests diff --git a/tests/cmocka/Makefile b/tests/cmocka/Makefile new file mode 100644 index 00000000..5216a467 --- /dev/null +++ b/tests/cmocka/Makefile @@ -0,0 +1,34 @@ +PG_CONFIG = pg_config +TOP_SRC_DIR = ../../src + +CC = gcc +CFLAGS += -I $(TOP_SRC_DIR) -I $(shell $(PG_CONFIG) --includedir-server) +CFLAGS += -I$(CURDIR)/../../src/include -I. +CFLAGS += $(shell $(PG_CONFIG) --cflags_sl) +CFLAGS += $(shell $(PG_CONFIG) --cflags) +CFLAGS += $(CFLAGS_SL) +CFLAGS += $(PG_CPPFLAGS) +CFLAGS += -D_GNU_SOURCE +LDFLAGS += -lcmocka +TEST_BIN = rangeset_tests + +OBJ = missing_basic.o missing_list.o missing_stringinfo.o \ + missing_bitmapset.o rangeset_tests.o $(TOP_SRC_DIR)/rangeset.o + + +all: build_extension $(TEST_BIN) + +$(TEST_BIN): $(OBJ) + $(CC) -o $@ $^ $(CFLAGS) $(LDFLAGS) + +%.o: %.c + $(CC) -c -o $@ $< $(CFLAGS) + +build_extension: + $(MAKE) -C $(TOP_SRC_DIR)/.. + +clean: + rm -f $(OBJ) $(TEST_BIN) + +check: all + ./$(TEST_BIN) diff --git a/tests/cmocka/missing_basic.c b/tests/cmocka/missing_basic.c new file mode 100644 index 00000000..d20eb87f --- /dev/null +++ b/tests/cmocka/missing_basic.c @@ -0,0 +1,58 @@ +#include + +#include "postgres.h" +#include "undef_printf.h" + + +void * +palloc(Size size) +{ + return malloc(size); +} + +void * +repalloc(void *pointer, Size size) +{ + return realloc(pointer, size); +} + +void +pfree(void *pointer) +{ + free(pointer); +} + +void +ExceptionalCondition(const char *conditionName, +#if PG_VERSION_NUM < 160000 + const char *errorType, +#endif + const char *fileName, + int lineNumber) +{ + if (!PointerIsValid(conditionName) || !PointerIsValid(fileName) +#if PG_VERSION_NUM < 160000 + || !PointerIsValid(errorType) +#endif + ) + { + printf("TRAP: ExceptionalCondition: bad arguments\n"); + } + else + { + printf("TRAP: %s(\"%s\", File: \"%s\", Line: %d)\n", +#if PG_VERSION_NUM < 160000 + errorType, +#else + "", +#endif + conditionName, + fileName, lineNumber); + + } + + /* Usually this shouldn't be needed, but make sure the msg went out */ + fflush(stderr); + + abort(); +} diff --git a/tests/cmocka/missing_bitmapset.c b/tests/cmocka/missing_bitmapset.c new file mode 100644 index 00000000..84e7e771 --- /dev/null +++ b/tests/cmocka/missing_bitmapset.c @@ -0,0 +1,17 @@ +#include "postgres.h" +#include "undef_printf.h" +#include "nodes/bitmapset.h" + + +int +bms_next_member(const Bitmapset *a, int prevbit); + + +int +bms_next_member(const Bitmapset *a, int prevbit) +{ + printf("bms_next_member(): not implemented yet\n"); + fflush(stdout); + + abort(); +} diff --git a/tests/cmocka/missing_list.c b/tests/cmocka/missing_list.c new file mode 100644 index 00000000..b85eed94 --- /dev/null +++ b/tests/cmocka/missing_list.c @@ -0,0 +1,447 @@ +/*------------------------------------------------------------------------- + * + * list.c + * implementation for PostgreSQL generic list package + * + * + * Portions Copyright (c) 1996-2020, PostgreSQL Global Development Group + * Portions Copyright (c) 1994, Regents of the University of California + * + * + * IDENTIFICATION + * src/backend/nodes/list.c + * + *------------------------------------------------------------------------- + */ +#include "postgres.h" + +#include "nodes/pg_list.h" + +#if PG_VERSION_NUM < 130000 + +#define IsPointerList(l) ((l) == NIL || IsA((l), List)) +#define IsIntegerList(l) ((l) == NIL || IsA((l), IntList)) +#define IsOidList(l) ((l) == NIL || IsA((l), OidList)) + + +static List * +new_list(NodeTag type); + +static void +new_tail_cell(List *list); + +static void +new_head_cell(List *list); + +static void +check_list_invariants(const List *list); + + +/* + * ------------- + * Definitions + * ------------- + */ + +static List * +new_list(NodeTag type) +{ + List *new_list; + ListCell *new_head; + + new_head = (ListCell *) palloc(sizeof(*new_head)); + new_head->next = NULL; + /* new_head->data is left undefined! */ + + new_list = (List *) palloc(sizeof(*new_list)); + new_list->type = type; + new_list->length = 1; + new_list->head = new_head; + new_list->tail = new_head; + + return new_list; + +} + +static void +new_tail_cell(List *list) +{ + ListCell *new_tail; + + new_tail = (ListCell *) palloc(sizeof(*new_tail)); + new_tail->next = NULL; + + list->tail->next = new_tail; + list->tail = new_tail; + list->length++; + +} + +static void +new_head_cell(List *list) +{ + ListCell *new_head; + + new_head = (ListCell *) palloc(sizeof(*new_head)); + new_head->next = list->head; + + list->head = new_head; + list->length++; + +} + +static void +check_list_invariants(const List *list) +{ + if (list == NIL) + return; + + Assert(list->length > 0); + Assert(list->head != NULL); + Assert(list->tail != NULL); + + Assert(list->type == T_List || + list->type == T_IntList || + list->type == T_OidList); + + if (list->length == 1) + Assert(list->head == list->tail); + if (list->length == 2) + Assert(list->head->next == list->tail); + Assert(list->tail->next == NULL); + +} + +List * +lappend(List *list, void *datum) +{ + Assert(IsPointerList(list)); + + if (list == NIL) + list = new_list(T_List); + else + new_tail_cell(list); + + lfirst(list->tail) = datum; + check_list_invariants(list); + return list; +} + +List * +lcons(void *datum, List *list) +{ + Assert(IsPointerList(list)); + + if (list == NIL) + list = new_list(T_List); + else + new_head_cell(list); + + lfirst(list->head) = datum; + check_list_invariants(list); + return list; + +} + +#else /* PG_VERSION_NUM >= 130000 */ + +/*------------------------------------------------------------------------- + * + * This was taken from src/backend/nodes/list.c PostgreSQL-13 source code. + * We only need lappend() and lcons() and their dependencies. + * There is one change: we use palloc() instead MemoryContextAlloc() in + * enlarge_list() (see #defines). + * + *------------------------------------------------------------------------- + */ +#include "port/pg_bitutils.h" +#include "utils/memdebug.h" +#include "utils/memutils.h" + +#define MemoryContextAlloc(c, s) palloc(s) +#define GetMemoryChunkContext(l) 0 + +/* + * The previous List implementation, since it used a separate palloc chunk + * for each cons cell, had the property that adding or deleting list cells + * did not move the storage of other existing cells in the list. Quite a + * bit of existing code depended on that, by retaining ListCell pointers + * across such operations on a list. There is no such guarantee in this + * implementation, so instead we have debugging support that is meant to + * help flush out now-broken assumptions. Defining DEBUG_LIST_MEMORY_USAGE + * while building this file causes the List operations to forcibly move + * all cells in a list whenever a cell is added or deleted. In combination + * with MEMORY_CONTEXT_CHECKING and/or Valgrind, this can usually expose + * broken code. It's a bit expensive though, as there's many more palloc + * cycles and a lot more data-copying than in a default build. + * + * By default, we enable this when building for Valgrind. + */ +#ifdef USE_VALGRIND +#define DEBUG_LIST_MEMORY_USAGE +#endif + +/* Overhead for the fixed part of a List header, measured in ListCells */ +#define LIST_HEADER_OVERHEAD \ + ((int) ((offsetof(List, initial_elements) - 1) / sizeof(ListCell) + 1)) + +/* + * Macros to simplify writing assertions about the type of a list; a + * NIL list is considered to be an empty list of any type. + */ +#define IsPointerList(l) ((l) == NIL || IsA((l), List)) +#define IsIntegerList(l) ((l) == NIL || IsA((l), IntList)) +#define IsOidList(l) ((l) == NIL || IsA((l), OidList)) + +#ifdef USE_ASSERT_CHECKING +/* + * Check that the specified List is valid (so far as we can tell). + */ +static void +check_list_invariants(const List *list) +{ + if (list == NIL) + return; + + Assert(list->length > 0); + Assert(list->length <= list->max_length); + Assert(list->elements != NULL); + + Assert(list->type == T_List || + list->type == T_IntList || + list->type == T_OidList); +} +#else +#define check_list_invariants(l) ((void) 0) +#endif /* USE_ASSERT_CHECKING */ + +/* + * Return a freshly allocated List with room for at least min_size cells. + * + * Since empty non-NIL lists are invalid, new_list() sets the initial length + * to min_size, effectively marking that number of cells as valid; the caller + * is responsible for filling in their data. + */ +static List * +new_list(NodeTag type, int min_size) +{ + List *newlist; + int max_size; + + Assert(min_size > 0); + + /* + * We allocate all the requested cells, and possibly some more, as part of + * the same palloc request as the List header. This is a big win for the + * typical case of short fixed-length lists. It can lose if we allocate a + * moderately long list and then it gets extended; we'll be wasting more + * initial_elements[] space than if we'd made the header small. However, + * rounding up the request as we do in the normal code path provides some + * defense against small extensions. + */ + +#ifndef DEBUG_LIST_MEMORY_USAGE + + /* + * Normally, we set up a list with some extra cells, to allow it to grow + * without a repalloc. Prefer cell counts chosen to make the total + * allocation a power-of-2, since palloc would round it up to that anyway. + * (That stops being true for very large allocations, but very long lists + * are infrequent, so it doesn't seem worth special logic for such cases.) + * + * The minimum allocation is 8 ListCell units, providing either 4 or 5 + * available ListCells depending on the machine's word width. Counting + * palloc's overhead, this uses the same amount of space as a one-cell + * list did in the old implementation, and less space for any longer list. + * + * We needn't worry about integer overflow; no caller passes min_size + * that's more than twice the size of an existing list, so the size limits + * within palloc will ensure that we don't overflow here. + */ + max_size = pg_nextpower2_32(Max(8, min_size + LIST_HEADER_OVERHEAD)); + max_size -= LIST_HEADER_OVERHEAD; +#else + + /* + * For debugging, don't allow any extra space. This forces any cell + * addition to go through enlarge_list() and thus move the existing data. + */ + max_size = min_size; +#endif + + newlist = (List *) palloc(offsetof(List, initial_elements) + + max_size * sizeof(ListCell)); + newlist->type = type; + newlist->length = min_size; + newlist->max_length = max_size; + newlist->elements = newlist->initial_elements; + + return newlist; +} + +/* + * Enlarge an existing non-NIL List to have room for at least min_size cells. + * + * This does *not* update list->length, as some callers would find that + * inconvenient. (list->length had better be the correct number of existing + * valid cells, though.) + */ +static void +enlarge_list(List *list, int min_size) +{ + int new_max_len; + + Assert(min_size > list->max_length); /* else we shouldn't be here */ + +#ifndef DEBUG_LIST_MEMORY_USAGE + + /* + * As above, we prefer power-of-two total allocations; but here we need + * not account for list header overhead. + */ + + /* clamp the minimum value to 16, a semi-arbitrary small power of 2 */ + new_max_len = pg_nextpower2_32(Max(16, min_size)); + +#else + /* As above, don't allocate anything extra */ + new_max_len = min_size; +#endif + + if (list->elements == list->initial_elements) + { + /* + * Replace original in-line allocation with a separate palloc block. + * Ensure it is in the same memory context as the List header. (The + * previous List implementation did not offer any guarantees about + * keeping all list cells in the same context, but it seems reasonable + * to create such a guarantee now.) + */ + list->elements = (ListCell *) + MemoryContextAlloc(GetMemoryChunkContext(list), + new_max_len * sizeof(ListCell)); + memcpy(list->elements, list->initial_elements, + list->length * sizeof(ListCell)); + + /* + * We must not move the list header, so it's unsafe to try to reclaim + * the initial_elements[] space via repalloc. In debugging builds, + * however, we can clear that space and/or mark it inaccessible. + * (wipe_mem includes VALGRIND_MAKE_MEM_NOACCESS.) + */ +#ifdef CLOBBER_FREED_MEMORY + wipe_mem(list->initial_elements, + list->max_length * sizeof(ListCell)); +#else + VALGRIND_MAKE_MEM_NOACCESS(list->initial_elements, + list->max_length * sizeof(ListCell)); +#endif + } + else + { +#ifndef DEBUG_LIST_MEMORY_USAGE + /* Normally, let repalloc deal with enlargement */ + list->elements = (ListCell *) repalloc(list->elements, + new_max_len * sizeof(ListCell)); +#else + /* + * repalloc() might enlarge the space in-place, which we don't want + * for debugging purposes, so forcibly move the data somewhere else. + */ + ListCell *newelements; + + newelements = (ListCell *) + MemoryContextAlloc(GetMemoryChunkContext(list), + new_max_len * sizeof(ListCell)); + memcpy(newelements, list->elements, + list->length * sizeof(ListCell)); + pfree(list->elements); + list->elements = newelements; +#endif + } + + list->max_length = new_max_len; +} + +/* + * Make room for a new head cell in the given (non-NIL) list. + * + * The data in the new head cell is undefined; the caller should be + * sure to fill it in + */ +static void +new_head_cell(List *list) +{ + /* Enlarge array if necessary */ + if (list->length >= list->max_length) + enlarge_list(list, list->length + 1); + /* Now shove the existing data over */ + memmove(&list->elements[1], &list->elements[0], + list->length * sizeof(ListCell)); + list->length++; +} + +/* + * Make room for a new tail cell in the given (non-NIL) list. + * + * The data in the new tail cell is undefined; the caller should be + * sure to fill it in + */ +static void +new_tail_cell(List *list) +{ + /* Enlarge array if necessary */ + if (list->length >= list->max_length) + enlarge_list(list, list->length + 1); + list->length++; +} + +/* + * Append a pointer to the list. A pointer to the modified list is + * returned. Note that this function may or may not destructively + * modify the list; callers should always use this function's return + * value, rather than continuing to use the pointer passed as the + * first argument. + */ +List * +lappend(List *list, void *datum) +{ + Assert(IsPointerList(list)); + + if (list == NIL) + list = new_list(T_List, 1); + else + new_tail_cell(list); + + lfirst(list_tail(list)) = datum; + check_list_invariants(list); + return list; +} + +/* + * Prepend a new element to the list. A pointer to the modified list + * is returned. Note that this function may or may not destructively + * modify the list; callers should always use this function's return + * value, rather than continuing to use the pointer passed as the + * second argument. + * + * Caution: before Postgres 8.0, the original List was unmodified and + * could be considered to retain its separate identity. This is no longer + * the case. + */ +List * +lcons(void *datum, List *list) +{ + Assert(IsPointerList(list)); + + if (list == NIL) + list = new_list(T_List, 1); + else + new_head_cell(list); + + lfirst(list_head(list)) = datum; + check_list_invariants(list); + return list; +} + +#endif /* PG_VERSION_NUM */ diff --git a/tests/cmocka/missing_stringinfo.c b/tests/cmocka/missing_stringinfo.c new file mode 100644 index 00000000..80710a4e --- /dev/null +++ b/tests/cmocka/missing_stringinfo.c @@ -0,0 +1,300 @@ +/*------------------------------------------------------------------------- + * + * stringinfo.c + * + * StringInfo provides an indefinitely-extensible string data type. + * It can be used to buffer either ordinary C strings (null-terminated text) + * or arbitrary binary data. All storage is allocated with palloc(). + * + * Portions Copyright (c) 1996-2016, PostgreSQL Global Development Group + * Portions Copyright (c) 1994, Regents of the University of California + * + * src/backend/lib/stringinfo.c + * + *------------------------------------------------------------------------- + */ +#include "postgres.h" +#include "undef_printf.h" + +#include "lib/stringinfo.h" +#include "utils/memutils.h" + + +/* + * makeStringInfo + * + * Create an empty 'StringInfoData' & return a pointer to it. + */ +StringInfo +makeStringInfo(void) +{ + StringInfo res; + + res = (StringInfo) palloc(sizeof(StringInfoData)); + + initStringInfo(res); + + return res; +} + +/* + * initStringInfo + * + * Initialize a StringInfoData struct (with previously undefined contents) + * to describe an empty string. + */ +void +initStringInfo(StringInfo str) +{ + int size = 1024; /* initial default buffer size */ + + str->data = (char *) palloc(size); + str->maxlen = size; + resetStringInfo(str); +} + +/* + * resetStringInfo + * + * Reset the StringInfo: the data buffer remains valid, but its + * previous content, if any, is cleared. + */ +void +resetStringInfo(StringInfo str) +{ + str->data[0] = '\0'; + str->len = 0; + str->cursor = 0; +} + +/* + * appendStringInfo + * + * Format text data under the control of fmt (an sprintf-style format string) + * and append it to whatever is already in str. More space is allocated + * to str if necessary. This is sort of like a combination of sprintf and + * strcat. + */ +void +appendStringInfo(StringInfo str, const char *fmt,...) +{ + for (;;) + { + va_list args; + int needed; + + /* Try to format the data. */ + va_start(args, fmt); + needed = appendStringInfoVA(str, fmt, args); + va_end(args); + + if (needed == 0) + break; /* success */ + + /* Increase the buffer size and try again. */ + enlargeStringInfo(str, needed); + } +} + +/* + * appendStringInfoVA + * + * Attempt to format text data under the control of fmt (an sprintf-style + * format string) and append it to whatever is already in str. If successful + * return zero; if not (because there's not enough space), return an estimate + * of the space needed, without modifying str. Typically the caller should + * pass the return value to enlargeStringInfo() before trying again; see + * appendStringInfo for standard usage pattern. + * + * XXX This API is ugly, but there seems no alternative given the C spec's + * restrictions on what can portably be done with va_list arguments: you have + * to redo va_start before you can rescan the argument list, and we can't do + * that from here. + */ +int +appendStringInfoVA(StringInfo str, const char *fmt, va_list args) +{ + int avail; + size_t nprinted; + + Assert(str != NULL); + + /* + * If there's hardly any space, don't bother trying, just fail to make the + * caller enlarge the buffer first. We have to guess at how much to + * enlarge, since we're skipping the formatting work. + */ + avail = str->maxlen - str->len; + if (avail < 16) + return 32; + + nprinted = vsnprintf(str->data + str->len, (size_t) avail, fmt, args); + + if (nprinted < (size_t) avail) + { + /* Success. Note nprinted does not include trailing null. */ + str->len += (int) nprinted; + return 0; + } + + /* Restore the trailing null so that str is unmodified. */ + str->data[str->len] = '\0'; + + /* + * Return pvsnprintf's estimate of the space needed. (Although this is + * given as a size_t, we know it will fit in int because it's not more + * than MaxAllocSize.) + */ + return (int) nprinted; +} + +/* + * appendStringInfoString + * + * Append a null-terminated string to str. + * Like appendStringInfo(str, "%s", s) but faster. + */ +void +appendStringInfoString(StringInfo str, const char *s) +{ + appendBinaryStringInfo(str, s, strlen(s)); +} + +/* + * appendStringInfoChar + * + * Append a single byte to str. + * Like appendStringInfo(str, "%c", ch) but much faster. + */ +void +appendStringInfoChar(StringInfo str, char ch) +{ + /* Make more room if needed */ + if (str->len + 1 >= str->maxlen) + enlargeStringInfo(str, 1); + + /* OK, append the character */ + str->data[str->len] = ch; + str->len++; + str->data[str->len] = '\0'; +} + +/* + * appendStringInfoSpaces + * + * Append the specified number of spaces to a buffer. + */ +void +appendStringInfoSpaces(StringInfo str, int count) +{ + if (count > 0) + { + /* Make more room if needed */ + enlargeStringInfo(str, count); + + /* OK, append the spaces */ + while (--count >= 0) + str->data[str->len++] = ' '; + str->data[str->len] = '\0'; + } +} + +/* + * appendBinaryStringInfo + * + * Append arbitrary binary data to a StringInfo, allocating more space + * if necessary. + */ +void +appendBinaryStringInfo(StringInfo str, +#if PG_VERSION_NUM < 160000 + const char *data, +#else + const void *data, +#endif + int datalen) +{ + Assert(str != NULL); + + /* Make more room if needed */ + enlargeStringInfo(str, datalen); + + /* OK, append the data */ + memcpy(str->data + str->len, data, datalen); + str->len += datalen; + + /* + * Keep a trailing null in place, even though it's probably useless for + * binary data. (Some callers are dealing with text but call this because + * their input isn't null-terminated.) + */ + str->data[str->len] = '\0'; +} + +/* + * enlargeStringInfo + * + * Make sure there is enough space for 'needed' more bytes + * ('needed' does not include the terminating null). + * + * External callers usually need not concern themselves with this, since + * all stringinfo.c routines do it automatically. However, if a caller + * knows that a StringInfo will eventually become X bytes large, it + * can save some palloc overhead by enlarging the buffer before starting + * to store data in it. + * + * NB: because we use repalloc() to enlarge the buffer, the string buffer + * will remain allocated in the same memory context that was current when + * initStringInfo was called, even if another context is now current. + * This is the desired and indeed critical behavior! + */ +void +enlargeStringInfo(StringInfo str, int needed) +{ + int newlen; + + /* + * Guard against out-of-range "needed" values. Without this, we can get + * an overflow or infinite loop in the following. + */ + if (needed < 0) /* should not happen */ + { + printf("invalid string enlargement request size: %d\n", needed); + fflush(stderr); + abort(); + } + if (((Size) needed) >= (MaxAllocSize - (Size) str->len)) + { + printf("out of memory\n"); + fflush(stderr); + abort(); + } + + needed += str->len + 1; /* total space required now */ + + /* Because of the above test, we now have needed <= MaxAllocSize */ + + if (needed <= str->maxlen) + return; /* got enough space already */ + + /* + * We don't want to allocate just a little more space with each append; + * for efficiency, double the buffer size each time it overflows. + * Actually, we might need to more than double it if 'needed' is big... + */ + newlen = 2 * str->maxlen; + while (needed > newlen) + newlen = 2 * newlen; + + /* + * Clamp to MaxAllocSize in case we went past it. Note we are assuming + * here that MaxAllocSize <= INT_MAX/2, else the above loop could + * overflow. We will still have newlen >= needed. + */ + if (newlen > (int) MaxAllocSize) + newlen = (int) MaxAllocSize; + + str->data = (char *) repalloc(str->data, newlen); + + str->maxlen = newlen; +} diff --git a/tests/cmocka/rangeset_tests.c b/tests/cmocka/rangeset_tests.c new file mode 100644 index 00000000..1f700bc3 --- /dev/null +++ b/tests/cmocka/rangeset_tests.c @@ -0,0 +1,535 @@ +#include +#include +#include +#include + +#include "rangeset.h" + +/* for "print" functions */ +#include "debug_print.c" + + +/* + * ----------------------- + * Declarations of tests + * ----------------------- + */ + +static void test_irange_basic(void **state); +static void test_irange_change_lossiness(void **state); + +static void test_irange_list_union_merge(void **state); +static void test_irange_list_union_lossy_cov(void **state); +static void test_irange_list_union_complete_cov(void **state); +static void test_irange_list_union_intersecting(void **state); + +static void test_irange_list_intersection(void **state); + + +/* Entrypoint */ +int +main(void) +{ + /* Array of test functions */ + const struct CMUnitTest tests[] = + { + cmocka_unit_test(test_irange_basic), + cmocka_unit_test(test_irange_change_lossiness), + cmocka_unit_test(test_irange_list_union_merge), + cmocka_unit_test(test_irange_list_union_lossy_cov), + cmocka_unit_test(test_irange_list_union_complete_cov), + cmocka_unit_test(test_irange_list_union_intersecting), + cmocka_unit_test(test_irange_list_intersection), + }; + + /* Run series of tests */ + return cmocka_run_group_tests(tests, NULL, NULL); +} + +/* + * ---------------------- + * Definitions of tests + * ---------------------- + */ + +/* Basic behavior tests */ +static void +test_irange_basic(void **state) +{ + IndexRange irange; + List *irange_list; + + /* test irb_pred() */ + assert_int_equal(99, irb_pred(100)); + assert_int_equal(0, irb_pred(1)); + assert_int_equal(0, irb_pred(0)); + + /* test irb_succ() */ + assert_int_equal(100, irb_succ(99)); + assert_int_equal(IRANGE_BOUNDARY_MASK, irb_succ(IRANGE_BOUNDARY_MASK)); + assert_int_equal(IRANGE_BOUNDARY_MASK, irb_succ(IRANGE_BOUNDARY_MASK + 1)); + + /* test convenience macros */ + irange = make_irange(0, IRANGE_BOUNDARY_MASK, IR_LOSSY); + assert_int_equal(irange_lower(irange), 0); + assert_int_equal(irange_upper(irange), IRANGE_BOUNDARY_MASK); + assert_true(is_irange_lossy(irange)); + assert_true(is_irange_valid(irange)); + + /* test allocation */ + irange = make_irange(100, 200, IR_LOSSY); + irange_list = lappend_irange(NIL, irange); + assert_memory_equal(&irange, &linitial_irange(irange_list), sizeof(IndexRange)); + assert_memory_equal(&irange, &llast_irange(irange_list), sizeof(IndexRange)); + + /* test length */ + irange_list = NIL; + assert_int_equal(irange_list_length(irange_list), 0); + irange_list = lappend_irange(irange_list, make_irange(10, 20, IR_LOSSY)); + assert_int_equal(irange_list_length(irange_list), 11); + irange_list = lappend_irange(irange_list, make_irange(21, 30, IR_LOSSY)); + assert_int_equal(irange_list_length(irange_list), 21); +} + + +/* Test lossiness switcher */ +static void +test_irange_change_lossiness(void **state) +{ + List *irange_list; + + /* test lossiness change (NIL) */ + irange_list = irange_list_set_lossiness(NIL, IR_LOSSY); + assert_ptr_equal(irange_list, NIL); + irange_list = irange_list_set_lossiness(NIL, IR_COMPLETE); + assert_ptr_equal(irange_list, NIL); + + /* test lossiness change (no-op) #1 */ + irange_list = list_make1_irange(make_irange(10, 20, IR_LOSSY)); + irange_list = irange_list_set_lossiness(irange_list, IR_LOSSY); + assert_string_equal(rangeset_print(irange_list), "[10-20]L"); + + /* test lossiness change (no-op) #2 */ + irange_list = list_make1_irange(make_irange(30, 40, IR_COMPLETE)); + irange_list = irange_list_set_lossiness(irange_list, IR_COMPLETE); + assert_string_equal(rangeset_print(irange_list), "[30-40]C"); + + /* test lossiness change (single element) #1 */ + irange_list = list_make1_irange(make_irange(10, 20, IR_LOSSY)); + irange_list = irange_list_set_lossiness(irange_list, IR_COMPLETE); + assert_string_equal(rangeset_print(irange_list), "[10-20]C"); + + /* test lossiness change (single element) #2 */ + irange_list = list_make1_irange(make_irange(30, 40, IR_COMPLETE)); + irange_list = irange_list_set_lossiness(irange_list, IR_LOSSY); + assert_string_equal(rangeset_print(irange_list), "[30-40]L"); + + /* test lossiness change (multiple elements, adjacent) #1 */ + irange_list = list_make1_irange(make_irange(10, 20, IR_LOSSY)); + irange_list = lappend_irange(irange_list, make_irange(21, 40, IR_COMPLETE)); + irange_list = irange_list_set_lossiness(irange_list, IR_COMPLETE); + assert_string_equal(rangeset_print(irange_list), "[10-40]C"); + + /* test lossiness change (multiple elements, adjacent) #2 */ + irange_list = list_make1_irange(make_irange(10, 20, IR_COMPLETE)); + irange_list = lappend_irange(irange_list, make_irange(21, 40, IR_LOSSY)); + irange_list = irange_list_set_lossiness(irange_list, IR_LOSSY); + assert_string_equal(rangeset_print(irange_list), "[10-40]L"); + + /* test lossiness change (multiple elements, non-adjacent) #1 */ + irange_list = list_make1_irange(make_irange(10, 15, IR_COMPLETE)); + irange_list = lappend_irange(irange_list, make_irange(21, 40, IR_LOSSY)); + irange_list = irange_list_set_lossiness(irange_list, IR_COMPLETE); + assert_string_equal(rangeset_print(irange_list), "[10-15]C, [21-40]C"); + + /* test lossiness change (multiple elements, non-adjacent) #2 */ + irange_list = list_make1_irange(make_irange(10, 15, IR_LOSSY)); + irange_list = lappend_irange(irange_list, make_irange(21, 40, IR_COMPLETE)); + irange_list = irange_list_set_lossiness(irange_list, IR_LOSSY); + assert_string_equal(rangeset_print(irange_list), "[10-15]L, [21-40]L"); +} + + +/* Test merges of adjoint IndexRanges */ +static void +test_irange_list_union_merge(void **state) +{ + IndexRange a, b; + List *unmerged, + *union_result; + + + /* Subtest #0 */ + a = make_irange(0, 8, IR_COMPLETE); + unmerged = NIL; + unmerged = lappend_irange(unmerged, make_irange(9, 10, IR_COMPLETE)); + unmerged = lappend_irange(unmerged, make_irange(11, 11, IR_LOSSY)); + unmerged = lappend_irange(unmerged, make_irange(12, 12, IR_COMPLETE)); + unmerged = lappend_irange(unmerged, make_irange(13, 13, IR_COMPLETE)); + unmerged = lappend_irange(unmerged, make_irange(14, 24, IR_COMPLETE)); + unmerged = lappend_irange(unmerged, make_irange(15, 20, IR_COMPLETE)); + + union_result = irange_list_union(list_make1_irange(a), unmerged); + + assert_string_equal(rangeset_print(union_result), + "[0-10]C, 11L, [12-24]C"); + + union_result = irange_list_union(unmerged, unmerged); + + assert_string_equal(rangeset_print(union_result), + "[9-10]C, 11L, [12-24]C"); + + + /* Subtest #1 */ + a = make_irange(0, 10, IR_COMPLETE); + b = make_irange(12, 20, IR_COMPLETE); + union_result = irange_list_union(list_make1_irange(a), + list_make1_irange(b)); + + assert_string_equal(rangeset_print(union_result), + "[0-10]C, [12-20]C"); + + /* Subtest #2 */ + a = make_irange(0, 10, IR_LOSSY); + b = make_irange(11, 20, IR_LOSSY); + union_result = irange_list_union(list_make1_irange(a), + list_make1_irange(b)); + + assert_string_equal(rangeset_print(union_result), + "[0-20]L"); + +} + +/* Lossy IndexRange covers complete IndexRange */ +static void +test_irange_list_union_lossy_cov(void **state) +{ + IndexRange a, b; + List *union_result; + + + /* Subtest #0 */ + a = make_irange(0, 100, IR_LOSSY); + b = make_irange(0, 100, IR_LOSSY); + union_result = irange_list_union(list_make1_irange(a), + list_make1_irange(b)); + + assert_string_equal(rangeset_print(union_result), + "[0-100]L"); + + /* Subtest #1 */ + a = make_irange(0, 100, IR_LOSSY); + b = make_irange(0, 100, IR_COMPLETE); + union_result = irange_list_union(list_make1_irange(a), + list_make1_irange(b)); + + assert_string_equal(rangeset_print(union_result), + "[0-100]C"); + + /* Subtest #2 */ + a = make_irange(0, 100, IR_LOSSY); + b = make_irange(0, 50, IR_COMPLETE); + union_result = irange_list_union(list_make1_irange(a), + list_make1_irange(b)); + + assert_string_equal(rangeset_print(union_result), + "[0-50]C, [51-100]L"); + + /* Subtest #3 */ + a = make_irange(0, 100, IR_LOSSY); + b = make_irange(50, 100, IR_COMPLETE); + union_result = irange_list_union(list_make1_irange(a), + list_make1_irange(b)); + + assert_string_equal(rangeset_print(union_result), + "[0-49]L, [50-100]C"); + + /* Subtest #4 */ + a = make_irange(0, 100, IR_LOSSY); + b = make_irange(50, 99, IR_COMPLETE); + union_result = irange_list_union(list_make1_irange(a), + list_make1_irange(b)); + + assert_string_equal(rangeset_print(union_result), + "[0-49]L, [50-99]C, 100L"); + + /* Subtest #5 */ + a = make_irange(0, 100, IR_LOSSY); + b = make_irange(1, 100, IR_COMPLETE); + union_result = irange_list_union(list_make1_irange(a), + list_make1_irange(b)); + + assert_string_equal(rangeset_print(union_result), + "0L, [1-100]C"); + + /* Subtest #6 */ + a = make_irange(0, 100, IR_LOSSY); + b = make_irange(20, 50, IR_COMPLETE); + union_result = irange_list_union(list_make1_irange(a), + list_make1_irange(b)); + + assert_string_equal(rangeset_print(union_result), + "[0-19]L, [20-50]C, [51-100]L"); +} + +/* Complete IndexRange covers lossy IndexRange */ +static void +test_irange_list_union_complete_cov(void **state) +{ + IndexRange a, b; + List *union_result; + + + /* Subtest #0 */ + a = make_irange(0, 100, IR_COMPLETE); + b = make_irange(0, 100, IR_LOSSY); + union_result = irange_list_union(list_make1_irange(a), + list_make1_irange(b)); + + assert_string_equal(rangeset_print(union_result), + "[0-100]C"); + + /* Subtest #1 */ + a = make_irange(0, 100, IR_COMPLETE); + b = make_irange(20, 50, IR_LOSSY); + union_result = irange_list_union(list_make1_irange(a), + list_make1_irange(b)); + + assert_string_equal(rangeset_print(union_result), + "[0-100]C"); + + /* Subtest #2 */ + a = make_irange(0, 100, IR_COMPLETE); + b = make_irange(0, 50, IR_LOSSY); + union_result = irange_list_union(list_make1_irange(a), + list_make1_irange(b)); + + assert_string_equal(rangeset_print(union_result), + "[0-100]C"); + + /* Subtest #3 */ + a = make_irange(0, 100, IR_COMPLETE); + b = make_irange(50, 100, IR_LOSSY); + union_result = irange_list_union(list_make1_irange(a), + list_make1_irange(b)); + + assert_string_equal(rangeset_print(union_result), + "[0-100]C"); +} + +/* Several IndexRanges intersect, unite them */ +static void +test_irange_list_union_intersecting(void **state) +{ + IndexRange a, b; + List *unmerged, + *union_result; + + + /* Subtest #0 */ + a = make_irange(0, 55, IR_COMPLETE); + b = make_irange(55, 100, IR_COMPLETE); + union_result = irange_list_union(list_make1_irange(a), + list_make1_irange(b)); + + assert_string_equal(rangeset_print(union_result), + "[0-100]C"); + + /* Subtest #1 */ + a = make_irange(0, 55, IR_COMPLETE); + b = make_irange(55, 100, IR_LOSSY); + union_result = irange_list_union(list_make1_irange(a), + list_make1_irange(b)); + + assert_string_equal(rangeset_print(union_result), + "[0-55]C, [56-100]L"); + + /* Subtest #2 */ + unmerged = NIL; + unmerged = lappend_irange(unmerged, make_irange(0, 45, IR_LOSSY)); + unmerged = lappend_irange(unmerged, make_irange(100, 100, IR_LOSSY)); + b = make_irange(40, 65, IR_COMPLETE); + union_result = irange_list_union(unmerged, list_make1_irange(b)); + + assert_string_equal(rangeset_print(union_result), + "[0-39]L, [40-65]C, 100L"); + + /* Subtest #3 */ + unmerged = NIL; + unmerged = lappend_irange(unmerged, make_irange(0, 45, IR_LOSSY)); + unmerged = lappend_irange(unmerged, make_irange(64, 100, IR_LOSSY)); + b = make_irange(40, 65, IR_COMPLETE); + union_result = irange_list_union(unmerged, list_make1_irange(b)); + + assert_string_equal(rangeset_print(union_result), + "[0-39]L, [40-65]C, [66-100]L"); + + /* Subtest #4 */ + unmerged = NIL; + unmerged = lappend_irange(unmerged, make_irange(0, 45, IR_COMPLETE)); + unmerged = lappend_irange(unmerged, make_irange(64, 100, IR_COMPLETE)); + b = make_irange(40, 65, IR_COMPLETE); + union_result = irange_list_union(unmerged, list_make1_irange(b)); + + assert_string_equal(rangeset_print(union_result), + "[0-100]C"); + + /* Subtest #5 */ + unmerged = NIL; + unmerged = lappend_irange(unmerged, make_irange(0, 45, IR_COMPLETE)); + unmerged = lappend_irange(unmerged, make_irange(64, 100, IR_COMPLETE)); + b = make_irange(40, 65, IR_LOSSY); + union_result = irange_list_union(unmerged, list_make1_irange(b)); + + assert_string_equal(rangeset_print(union_result), + "[0-45]C, [46-63]L, [64-100]C"); +} + + +/* Test intersection of IndexRanges */ +static void +test_irange_list_intersection(void **state) +{ + IndexRange a, b; + List *intersection_result, + *left_list, + *right_list; + + + /* Subtest #0 */ + a = make_irange(0, 100, IR_LOSSY); + b = make_irange(10, 20, IR_LOSSY); + + intersection_result = irange_list_intersection(list_make1_irange(a), + list_make1_irange(b)); + + assert_string_equal(rangeset_print(intersection_result), + "[10-20]L"); + + /* Subtest #1 */ + a = make_irange(0, 100, IR_LOSSY); + b = make_irange(10, 20, IR_COMPLETE); + + intersection_result = irange_list_intersection(list_make1_irange(a), + list_make1_irange(b)); + + assert_string_equal(rangeset_print(intersection_result), + "[10-20]L"); + + /* Subtest #2 */ + a = make_irange(0, 100, IR_COMPLETE); + b = make_irange(10, 20, IR_LOSSY); + + intersection_result = irange_list_intersection(list_make1_irange(a), + list_make1_irange(b)); + + assert_string_equal(rangeset_print(intersection_result), + "[10-20]L"); + + /* Subtest #3 */ + a = make_irange(15, 25, IR_COMPLETE); + b = make_irange(10, 20, IR_LOSSY); + + intersection_result = irange_list_intersection(list_make1_irange(a), + list_make1_irange(b)); + + assert_string_equal(rangeset_print(intersection_result), + "[15-20]L"); + + /* Subtest #4 */ + a = make_irange(15, 25, IR_COMPLETE); + b = make_irange(10, 20, IR_COMPLETE); + + intersection_result = irange_list_intersection(list_make1_irange(a), + list_make1_irange(b)); + + assert_string_equal(rangeset_print(intersection_result), + "[15-20]C"); + + /* Subtest #5 */ + left_list = NIL; + left_list = lappend_irange(left_list, make_irange(0, 11, IR_LOSSY)); + left_list = lappend_irange(left_list, make_irange(12, 20, IR_COMPLETE)); + right_list = NIL; + right_list = lappend_irange(right_list, make_irange(1, 15, IR_COMPLETE)); + right_list = lappend_irange(right_list, make_irange(16, 20, IR_LOSSY)); + + intersection_result = irange_list_intersection(left_list, right_list); + + assert_string_equal(rangeset_print(intersection_result), + "[1-11]L, [12-15]C, [16-20]L"); + + /* Subtest #6 */ + left_list = NIL; + left_list = lappend_irange(left_list, make_irange(0, 11, IR_LOSSY)); + left_list = lappend_irange(left_list, make_irange(12, 20, IR_COMPLETE)); + right_list = NIL; + right_list = lappend_irange(right_list, make_irange(1, 15, IR_COMPLETE)); + right_list = lappend_irange(right_list, make_irange(16, 20, IR_COMPLETE)); + + intersection_result = irange_list_intersection(left_list, right_list); + + assert_string_equal(rangeset_print(intersection_result), + "[1-11]L, [12-20]C"); + + /* Subtest #7 */ + a = make_irange(0, 10, IR_COMPLETE); + b = make_irange(20, 20, IR_COMPLETE); + + intersection_result = irange_list_intersection(list_make1_irange(a), + list_make1_irange(b)); + + assert_string_equal(rangeset_print(intersection_result), + ""); /* empty set */ + + /* Subtest #8 */ + a = make_irange(0, 10, IR_LOSSY); + right_list = NIL; + right_list = lappend_irange(right_list, make_irange(10, 10, IR_COMPLETE)); + right_list = lappend_irange(right_list, make_irange(16, 20, IR_LOSSY)); + + intersection_result = irange_list_intersection(list_make1_irange(a), + right_list); + + assert_string_equal(rangeset_print(intersection_result), + "10L"); + + /* Subtest #9 */ + left_list = NIL; + left_list = lappend_irange(left_list, make_irange(15, 15, IR_LOSSY)); + left_list = lappend_irange(left_list, make_irange(25, 25, IR_COMPLETE)); + right_list = NIL; + right_list = lappend_irange(right_list, make_irange(0, 20, IR_COMPLETE)); + right_list = lappend_irange(right_list, make_irange(21, 40, IR_LOSSY)); + + intersection_result = irange_list_intersection(left_list, right_list); + + assert_string_equal(rangeset_print(intersection_result), + "15L, 25L"); + + /* Subtest #10 */ + left_list = NIL; + left_list = lappend_irange(left_list, make_irange(21, 21, IR_LOSSY)); + left_list = lappend_irange(left_list, make_irange(22, 22, IR_COMPLETE)); + right_list = NIL; + right_list = lappend_irange(right_list, make_irange(0, 21, IR_COMPLETE)); + right_list = lappend_irange(right_list, make_irange(22, 40, IR_LOSSY)); + + intersection_result = irange_list_intersection(left_list, right_list); + + assert_string_equal(rangeset_print(intersection_result), + "[21-22]L"); + + /* Subtest #11 */ + left_list = NIL; + left_list = lappend_irange(left_list, make_irange(21, 21, IR_LOSSY)); + left_list = lappend_irange(left_list, make_irange(22, 25, IR_COMPLETE)); + right_list = NIL; + right_list = lappend_irange(right_list, make_irange(0, 21, IR_COMPLETE)); + right_list = lappend_irange(right_list, make_irange(22, 40, IR_COMPLETE)); + + intersection_result = irange_list_intersection(left_list, right_list); + + assert_string_equal(rangeset_print(intersection_result), + "21L, [22-25]C"); +} diff --git a/tests/cmocka/undef_printf.h b/tests/cmocka/undef_printf.h new file mode 100644 index 00000000..63ba700c --- /dev/null +++ b/tests/cmocka/undef_printf.h @@ -0,0 +1,24 @@ +#ifdef vsnprintf +#undef vsnprintf +#endif +#ifdef snprintf +#undef snprintf +#endif +#ifdef vsprintf +#undef vsprintf +#endif +#ifdef sprintf +#undef sprintf +#endif +#ifdef vfprintf +#undef vfprintf +#endif +#ifdef fprintf +#undef fprintf +#endif +#ifdef vprintf +#undef vprintf +#endif +#ifdef printf +#undef printf +#endif diff --git a/tests/partitioning_test.py b/tests/partitioning_test.py deleted file mode 100644 index 6dd589b8..00000000 --- a/tests/partitioning_test.py +++ /dev/null @@ -1,249 +0,0 @@ -#coding: utf-8 -""" - concurrent_partitioning_test.py - Tests concurrent partitioning worker with simultaneous update queries - - Copyright (c) 2015-2016, Postgres Professional -""" - -import unittest -from testgres import get_new_node, clean_all, stop_all -from subprocess import Popen, PIPE -import subprocess -import time - - -class PartitioningTests(unittest.TestCase): - - def setUp(self): - self.setup_cmd = [ - 'create extension pg_pathman', - 'create table abc(id serial, t text)', - 'insert into abc select generate_series(1, 300000)', - 'select create_hash_partitions(\'abc\', \'id\', 3, partition_data := false)', - ] - - def tearDown(self): - stop_all() - # clean_all() - - def init_test_data(self, node): - """Initialize pg_pathman extension and test data""" - for cmd in self.setup_cmd: - node.safe_psql('postgres', cmd) - - def catchup_replica(self, master, replica): - """Wait until replica synchronizes with master""" - master.poll_query_until( - 'postgres', - 'SELECT pg_current_xlog_location() <= replay_location ' - 'FROM pg_stat_replication WHERE application_name = \'%s\'' - % replica.name) - - def printlog(self, logfile): - with open(logfile, 'r') as log: - for line in log.readlines(): - print line - - def test_concurrent(self): - """Tests concurrent partitioning""" - node = get_new_node('test') - try: - node.init() - node.append_conf('postgresql.conf', 'shared_preload_libraries=\'pg_pathman\'\n') - node.start() - self.init_test_data(node) - - node.psql('postgres', 'select partition_table_concurrently(\'abc\')') - - while True: - # update some rows to check for deadlocks - # import ipdb; ipdb.set_trace() - node.safe_psql('postgres', - ''' - update abc set t = 'test' - where id in (select (random() * 300000)::int from generate_series(1, 3000)) - ''') - - count = node.execute('postgres', 'select count(*) from pathman_concurrent_part_tasks') - - # if there is no active workers then it means work is done - if count[0][0] == 0: - break - time.sleep(1) - - data = node.execute('postgres', 'select count(*) from only abc') - self.assertEqual(data[0][0], 0) - data = node.execute('postgres', 'select count(*) from abc') - self.assertEqual(data[0][0], 300000) - - node.stop() - except Exception, e: - self.printlog(node.logs_dir + '/postgresql.log') - raise e - - def test_replication(self): - """Tests how pg_pathman works with replication""" - node = get_new_node('master') - replica = get_new_node('repl') - - try: - # initialize master server - node.init(allows_streaming=True) - node.append_conf('postgresql.conf', 'shared_preload_libraries=\'pg_pathman\'\n') - node.start() - node.backup('my_backup') - - # initialize replica from backup - replica.init_from_backup(node, 'my_backup', has_streaming=True) - replica.start() - - # initialize pg_pathman extension and some test data - self.init_test_data(node) - - # wait until replica catches up - self.catchup_replica(node, replica) - - # check that results are equal - self.assertEqual( - node.psql('postgres', 'explain (costs off) select * from abc'), - replica.psql('postgres', 'explain (costs off) select * from abc') - ) - - # enable parent and see if it is enabled in replica - node.psql('postgres', 'select enable_parent(\'abc\'') - - self.catchup_replica(node, replica) - self.assertEqual( - node.psql('postgres', 'explain (costs off) select * from abc'), - replica.psql('postgres', 'explain (costs off) select * from abc') - ) - self.assertEqual( - node.psql('postgres', 'select * from abc'), - replica.psql('postgres', 'select * from abc') - ) - self.assertEqual( - node.execute('postgres', 'select count(*) from abc')[0][0], - 300000 - ) - - # check that direct UPDATE in pathman_config_params invalidates - # cache - node.psql( - 'postgres', - 'update pathman_config_params set enable_parent = false') - self.catchup_replica(node, replica) - self.assertEqual( - node.psql('postgres', 'explain (costs off) select * from abc'), - replica.psql('postgres', 'explain (costs off) select * from abc') - ) - self.assertEqual( - node.psql('postgres', 'select * from abc'), - replica.psql('postgres', 'select * from abc') - ) - self.assertEqual( - node.execute('postgres', 'select count(*) from abc')[0][0], - 0 - ) - except Exception, e: - self.printlog(node.logs_dir + '/postgresql.log') - self.printlog(replica.logs_dir + '/postgresql.log') - raise e - - def test_locks(self): - """Test that a session trying to create new partitions waits for other - sessions if they doing the same""" - - import threading - import time - - class Flag: - def __init__(self, value): - self.flag = value - - def set(self, value): - self.flag = value - - def get(self): - return self.flag - - # There is one flag for each thread which shows if thread have done - # its work - flags = [Flag(False) for i in xrange(3)] - - # All threads synchronizes though this lock - lock = threading.Lock() - - # Define thread function - def add_partition(node, flag, query): - """ We expect that this query will wait until another session - commits or rolls back""" - node.safe_psql('postgres', query) - with lock: - flag.set(True) - - # Initialize master server - node = get_new_node('master') - - try: - node.init() - node.append_conf('postgresql.conf', 'shared_preload_libraries=\'pg_pathman\'\n') - node.start() - node.safe_psql( - 'postgres', - 'create extension pg_pathman; ' - + 'create table abc(id serial, t text); ' - + 'insert into abc select generate_series(1, 100000); ' - + 'select create_range_partitions(\'abc\', \'id\', 1, 50000);' - ) - - # Start transaction that will create partition - con = node.connect() - con.begin() - con.execute('select append_range_partition(\'abc\')') - - # Start threads that suppose to add new partitions and wait some time - query = [ - 'select prepend_range_partition(\'abc\')', - 'select append_range_partition(\'abc\')', - 'select add_range_partition(\'abc\', 500000, 550000)', - ] - threads = [] - for i in range(3): - thread = \ - threading.Thread(target=add_partition, args=(node, flags[i], query[i])) - threads.append(thread) - thread.start() - time.sleep(3) - - # This threads should wait until current transaction finished - with lock: - for i in range(3): - self.assertEqual(flags[i].get(), False) - - # Commit transaction. Since then other sessions can create partitions - con.commit() - - # Now wait until each thread finishes - for i in range(3): - threads[i].join() - - # Check flags, it should be true which means that threads are finished - with lock: - for i in range(3): - self.assertEqual(flags[i].get(), True) - - # Check that all partitions are created - self.assertEqual( - node.safe_psql( - 'postgres', - 'select count(*) from pg_inherits where inhparent=\'abc\'::regclass' - ), - '6\n' - ) - except Exception, e: - self.printlog(node.logs_dir + '/postgresql.log') - raise e - -if __name__ == "__main__": - unittest.main() diff --git a/tests/python/.flake8 b/tests/python/.flake8 new file mode 100644 index 00000000..7d6f9f71 --- /dev/null +++ b/tests/python/.flake8 @@ -0,0 +1,2 @@ +[flake8] +ignore = E241, E501 diff --git a/tests/python/.gitignore b/tests/python/.gitignore new file mode 100644 index 00000000..750ecf9f --- /dev/null +++ b/tests/python/.gitignore @@ -0,0 +1 @@ +tests.log diff --git a/tests/python/.style.yapf b/tests/python/.style.yapf new file mode 100644 index 00000000..88f004bb --- /dev/null +++ b/tests/python/.style.yapf @@ -0,0 +1,5 @@ +[style] +based_on_style = pep8 +spaces_before_comment = 4 +split_before_logical_operator = false +column_limit=100 diff --git a/tests/python/Makefile b/tests/python/Makefile new file mode 100644 index 00000000..8311bb12 --- /dev/null +++ b/tests/python/Makefile @@ -0,0 +1,6 @@ +partitioning_tests: +ifneq ($(CASE),) + python3 -u partitioning_test.py Tests.$(CASE) +else + python3 -u partitioning_test.py +endif diff --git a/tests/python/README.md b/tests/python/README.md new file mode 100644 index 00000000..4e065e11 --- /dev/null +++ b/tests/python/README.md @@ -0,0 +1,34 @@ +# Tests + +This directory contains script to tests some features which cannot be tested +with only regression tests + +## Running + +First of all you need to install `testgres` python module which contains useful +functions to start postgres clusters and make queries: + +``` +pip3 install testgres +``` + +To run tests execute: + +``` +python3 -m unittest partitioning_test +``` + +from current directory. If you want to run a specific postgres build then +you should specify the path to your pg_config executable by setting PG_CONFIG +environment variable: + +``` +export PG_CONFIG=/path/to/pg_config +``` + +To test FDW features you need to install postgres_fdw contrib module first. +If you want to skip FDW tests set the FDW_DISABLED environment variable: + +``` +export FDW_DISABLED=1 +``` diff --git a/tests/python/__init__.py b/tests/python/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/tests/python/partitioning_test.py b/tests/python/partitioning_test.py new file mode 100644 index 00000000..ba4b205f --- /dev/null +++ b/tests/python/partitioning_test.py @@ -0,0 +1,1138 @@ +#!/usr/bin/env python3 +# coding: utf-8 +""" +partitioning_test.py + Various stuff that looks out of place in regression tests + + Copyright (c) 2015-2020, Postgres Professional +""" + +import functools +import json +import math +import multiprocessing +import os +import random +import re +import subprocess +import sys +import threading +import time +import unittest + +from packaging.version import Version +from testgres import get_new_node, get_pg_version, configure_testgres + +# set setup base logging config, it can be turned on by `use_python_logging` +# parameter on node setup +# configure_testgres(use_python_logging=True) + +import logging +import logging.config + +logfile = os.path.join(os.path.dirname(os.path.realpath(__file__)), 'tests.log') +LOG_CONFIG = { + 'version': 1, + 'handlers': { + 'console': { + 'class': 'logging.StreamHandler', + 'formatter': 'base_format', + 'level': logging.DEBUG, + }, + 'file': { + 'class': 'logging.FileHandler', + 'filename': logfile, + 'formatter': 'base_format', + 'level': logging.DEBUG, + }, + }, + 'formatters': { + 'base_format': { + 'format': '%(node)-5s: %(message)s', + }, + }, + 'root': { + 'handlers': ('file', ), + 'level': 'DEBUG', + }, +} + +logging.config.dictConfig(LOG_CONFIG) +version = Version(get_pg_version()) + + +# Helper function for json equality +def ordered(obj, skip_keys=None): + if isinstance(obj, dict): + return sorted((k, ordered(v, skip_keys=skip_keys)) for k, v in obj.items() + if skip_keys is None or (skip_keys and k not in skip_keys)) + if isinstance(obj, list): + return sorted(ordered(x, skip_keys=skip_keys) for x in obj) + else: + return obj + + +# Check if postgres_fdw is available +@functools.lru_cache(maxsize=1) +def is_postgres_fdw_ready(): + with get_new_node().init().start() as node: + result = node.execute(""" + select count(*) from pg_available_extensions where name = 'postgres_fdw' + """) + + return result[0][0] > 0 + + +class Tests(unittest.TestCase): + def set_trace(self, con, command="pg_debug"): + pid = con.execute("select pg_backend_pid()")[0][0] + p = subprocess.Popen([command], stdin=subprocess.PIPE) + p.communicate(str(pid).encode()) + + def start_new_pathman_cluster(self, + allow_streaming=False, + test_data=False, + enable_partitionrouter=False): + + node = get_new_node() + node.init(allow_streaming=allow_streaming) + node.append_conf("shared_preload_libraries='pg_pathman'\n") + if enable_partitionrouter: + node.append_conf("pg_pathman.enable_partitionrouter=on\n") + + node.start() + node.psql('create extension pg_pathman') + + if test_data: + node.safe_psql(""" + create table abc(id serial, t text); + insert into abc select generate_series(1, 300000); + select create_hash_partitions('abc', 'id', 3, partition_data := false); + """) + + node.safe_psql('vacuum analyze') + + return node + + def test_concurrent(self): + """ Test concurrent partitioning """ + + with self.start_new_pathman_cluster(test_data=True) as node: + node.psql("select partition_table_concurrently('abc')") + + while True: + # update some rows to check for deadlocks + node.safe_psql(""" + update abc set t = 'test' + where id in (select (random() * 300000)::int + from generate_series(1, 3000)) + """) + + count = node.execute(""" + select count(*) from pathman_concurrent_part_tasks + """) + + # if there is no active workers then it means work is done + if count[0][0] == 0: + break + time.sleep(1) + + data = node.execute('select count(*) from only abc') + self.assertEqual(data[0][0], 0) + data = node.execute('select count(*) from abc') + self.assertEqual(data[0][0], 300000) + node.stop() + + def test_replication(self): + """ Test how pg_pathman works with replication """ + + with self.start_new_pathman_cluster(allow_streaming=True, test_data=True) as node: + with node.replicate() as replica: + replica.start() + replica.catchup() + + # check that results are equal + self.assertEqual( + node.psql('explain (costs off) select * from abc'), + replica.psql('explain (costs off) select * from abc')) + + # enable parent and see if it is enabled in replica + node.psql("select enable_parent('abc')") + + # wait until replica catches up + replica.catchup() + + self.assertEqual( + node.psql('explain (costs off) select * from abc'), + replica.psql('explain (costs off) select * from abc')) + self.assertEqual( + node.psql('select * from abc'), + replica.psql('select * from abc')) + self.assertEqual( + node.execute('select count(*) from abc')[0][0], 300000) + + # check that UPDATE in pathman_config_params invalidates cache + node.psql('update pathman_config_params set enable_parent = false') + + # wait until replica catches up + replica.catchup() + + self.assertEqual( + node.psql('explain (costs off) select * from abc'), + replica.psql('explain (costs off) select * from abc')) + self.assertEqual( + node.psql('select * from abc'), + replica.psql('select * from abc')) + self.assertEqual( + node.execute('select count(*) from abc')[0][0], 0) + + def test_locks(self): + """ + Test that a session trying to create new partitions + waits for other sessions if they are doing the same + """ + + class Flag: + def __init__(self, value): + self.flag = value + + def set(self, value): + self.flag = value + + def get(self): + return self.flag + + # There is one flag for each thread which shows if thread have done its work + flags = [Flag(False) for i in range(3)] + + # All threads synchronize though this lock + lock = threading.Lock() + + # Define thread function + def add_partition(node, flag, query): + """ + We expect that this query will wait until + another session commits or rolls back + """ + node.safe_psql(query) + with lock: + flag.set(True) + + # Initialize master server + with get_new_node() as node: + node.init() + node.append_conf("shared_preload_libraries='pg_pathman'") + node.start() + + node.safe_psql(""" + create extension pg_pathman; + create table abc(id serial, t text); + insert into abc select generate_series(1, 100000); + select create_range_partitions('abc', 'id', 1, 50000); + """) + + # Start transaction that will create partition + with node.connect() as con: + con.begin() + con.execute("select append_range_partition('abc')") + + # Start threads that suppose to add new partitions and wait some + # time + query = ( + "select prepend_range_partition('abc')", + "select append_range_partition('abc')", + "select add_range_partition('abc', 500000, 550000)", + ) + + threads = [] + for i in range(3): + thread = threading.Thread( + target=add_partition, args=(node, flags[i], query[i])) + threads.append(thread) + thread.start() + time.sleep(3) + + # These threads should wait until current transaction finished + with lock: + for i in range(3): + self.assertEqual(flags[i].get(), False) + + # Commit transaction. Since then other sessions can create + # partitions + con.commit() + + # Now wait until each thread finishes + for thread in threads: + thread.join() + + # Check flags, it should be true which means that threads are + # finished + with lock: + for i in range(3): + self.assertEqual(flags[i].get(), True) + + # Check that all partitions are created + self.assertEqual( + node.safe_psql( + "select count(*) from pg_inherits where inhparent='abc'::regclass"), + b'6\n') + + def test_tablespace(self): + """ Check tablespace support """ + + def check_tablespace(node, tablename, tablespace): + res = node.execute("select get_tablespace('{}')".format(tablename)) + if len(res) == 0: + return False + + return res[0][0] == tablespace + + with get_new_node() as node: + node.init() + node.append_conf("shared_preload_libraries='pg_pathman'") + node.start() + node.psql('create extension pg_pathman') + + # create tablespace + path = os.path.join(node.data_dir, 'test_space_location') + os.mkdir(path) + node.psql("create tablespace test_space location '{}'".format(path)) + + # create table in this tablespace + node.psql('create table abc(a serial, b int) tablespace test_space') + + # create three partitions. Excpect that they will be created in the + # same tablespace as the parent table + node.psql("select create_range_partitions('abc', 'a', 1, 10, 3)") + self.assertTrue(check_tablespace(node, 'abc', 'test_space')) + + # check tablespace for appended partition + node.psql("select append_range_partition('abc', 'abc_appended')") + self.assertTrue(check_tablespace(node, 'abc_appended', 'test_space')) + + # check tablespace for prepended partition + node.psql("select prepend_range_partition('abc', 'abc_prepended')") + self.assertTrue(check_tablespace(node, 'abc_prepended', 'test_space')) + + # check tablespace for prepended partition + node.psql("select add_range_partition('abc', 41, 51, 'abc_added')") + self.assertTrue(check_tablespace(node, 'abc_added', 'test_space')) + + # check tablespace for split + node.psql("select split_range_partition('abc_added', 45, 'abc_splitted')") + self.assertTrue(check_tablespace(node, 'abc_splitted', 'test_space')) + + # now let's specify tablespace explicitly + node.psql( + "select append_range_partition('abc', 'abc_appended_2', 'pg_default')" + ) + node.psql( + "select prepend_range_partition('abc', 'abc_prepended_2', 'pg_default')" + ) + node.psql( + "select add_range_partition('abc', 61, 71, 'abc_added_2', 'pg_default')" + ) + node.psql( + "select split_range_partition('abc_added_2', 65, 'abc_splitted_2', 'pg_default')" + ) + + # yapf: disable + self.assertTrue(check_tablespace(node, 'abc_appended_2', 'pg_default')) + self.assertTrue(check_tablespace(node, 'abc_prepended_2', 'pg_default')) + self.assertTrue(check_tablespace(node, 'abc_added_2', 'pg_default')) + self.assertTrue(check_tablespace(node, 'abc_splitted_2', 'pg_default')) + + @unittest.skipUnless(is_postgres_fdw_ready(), 'FDW might be missing') + def test_foreign_table(self): + """ Test foreign tables """ + + # Start master server + with get_new_node() as master, get_new_node() as fserv: + master.init() + master.append_conf(""" + shared_preload_libraries='pg_pathman, postgres_fdw'\n + """) + master.start() + master.psql('create extension pg_pathman') + master.psql('create extension postgres_fdw') + + # RANGE partitioning test with FDW: + # - create range partitioned table in master + # - create foreign server + # - create foreign table and insert some data into it + # - attach foreign table to partitioned one + # - try inserting data into foreign partition via parent + # - drop partitions + master.psql(""" + create table abc(id serial, name text); + select create_range_partitions('abc', 'id', 0, 10, 2) + """) + + # Current user name (needed for user mapping) + username = master.execute('select current_user')[0][0] + + fserv.init().start() + fserv.safe_psql("create table ftable(id serial, name text)") + fserv.safe_psql("insert into ftable values (25, 'foreign')") + + # Create foreign table and attach it to partitioned table + master.safe_psql(""" + create server fserv + foreign data wrapper postgres_fdw + options (dbname 'postgres', host '127.0.0.1', port '{}') + """.format(fserv.port)) + + master.safe_psql(""" + create user mapping for {0} server fserv + options (user '{0}') + """.format(username)) + + master.safe_psql(""" + import foreign schema public limit to (ftable) + from server fserv into public + """) + + master.safe_psql( + "select attach_range_partition('abc', 'ftable', 20, 30)") + + # Check that table attached to partitioned table + self.assertEqual( + master.safe_psql('select * from ftable'), + b'25|foreign\n') + + # Check that we can successfully insert new data into foreign partition + master.safe_psql("insert into abc values (26, 'part')") + self.assertEqual( + master.safe_psql('select * from ftable order by id'), + b'25|foreign\n26|part\n') + + # Testing drop partitions (including foreign partitions) + master.safe_psql("select drop_partitions('abc')") + + # HASH partitioning with FDW: + # - create hash partitioned table in master + # - create foreign table + # - replace local partition with foreign one + # - insert data + # - drop partitions + master.psql(""" + create table hash_test(id serial, name text); + select create_hash_partitions('hash_test', 'id', 2) + """) + fserv.safe_psql('create table f_hash_test(id serial, name text)') + + master.safe_psql(""" + import foreign schema public limit to (f_hash_test) + from server fserv into public + """) + master.safe_psql(""" + select replace_hash_partition('hash_test_1', 'f_hash_test') + """) + master.safe_psql('insert into hash_test select generate_series(1,10)') + + self.assertEqual( + master.safe_psql('select * from hash_test'), + b'1|\n2|\n5|\n6|\n8|\n9|\n3|\n4|\n7|\n10|\n') + master.safe_psql("select drop_partitions('hash_test')") + + @unittest.skipUnless(is_postgres_fdw_ready(), 'FDW might be missing') + def test_parallel_nodes(self): + """ Test parallel queries under partitions """ + + # Init and start postgres instance with preload pg_pathman module + with get_new_node() as node: + node.init() + node.append_conf( + "shared_preload_libraries='pg_pathman, postgres_fdw'") + node.start() + + # Check version of postgres server + # If version < 9.6 skip all tests for parallel queries + if version < Version('9.6.0'): + return + + # Prepare test database + node.psql('create extension pg_pathman') + node.psql(""" + create table range_partitioned as + select generate_series(1, 1e4::integer) i; + + alter table range_partitioned alter column i set not null; + select create_range_partitions('range_partitioned', 'i', 1, 1e3::integer); + + create table hash_partitioned as + select generate_series(1, 1e4::integer) i; + + alter table hash_partitioned alter column i set not null; + select create_hash_partitions('hash_partitioned', 'i', 10); + """) + + # create statistics for both partitioned tables + node.psql('vacuum analyze') + + node.psql(""" + create or replace function query_plan(query text) + returns jsonb as $$ + declare + plan jsonb; + begin + execute 'explain (costs off, format json)' || query into plan; + return plan; + end; + $$ language plpgsql; + """) + + # Test parallel select + with node.connect() as con: + con.execute('set max_parallel_workers_per_gather = 2') + if version >= Version('10'): + con.execute('set min_parallel_table_scan_size = 0') + else: + con.execute('set min_parallel_relation_size = 0') + con.execute('set parallel_setup_cost = 0') + con.execute('set parallel_tuple_cost = 0') + + # Check parallel aggregate plan + test_query = 'select count(*) from range_partitioned where i < 1500' + plan = con.execute("select query_plan('%s')" % test_query)[0][0] + expected = json.loads(""" + [ + { + "Plan": { + "Node Type": "Aggregate", + "Strategy": "Plain", + "Partial Mode": "Finalize", + "Parallel Aware": false, + "Plans": [ + { + "Node Type": "Gather", + "Parent Relationship": "Outer", + "Parallel Aware": false, + "Workers Planned": 2, + "Single Copy": false, + "Plans": [ + { + "Node Type": "Aggregate", + "Strategy": "Plain", + "Partial Mode": "Partial", + "Parent Relationship": "Outer", + "Parallel Aware": false, + "Plans": [ + { + "Node Type": "Append", + "Parent Relationship": "Outer", + "Parallel Aware": false, + "Plans": [ + { + "Node Type": "Seq Scan", + "Parent Relationship": "Member", + "Parallel Aware": true, + "Relation Name": "range_partitioned_2", + "Alias": "range_partitioned_2", + "Filter": "(i < 1500)" + }, + { + "Node Type": "Seq Scan", + "Parent Relationship": "Member", + "Parallel Aware": true, + "Relation Name": "range_partitioned_1", + "Alias": "range_partitioned_1" + } + ] + } + ] + } + ] + } + ] + } + } + ] + """) + self.assertEqual(ordered(plan, skip_keys=['Subplans Removed', 'Async Capable']), ordered(expected)) + + # Check count of returned tuples + count = con.execute( + 'select count(*) from range_partitioned where i < 1500')[0][0] + self.assertEqual(count, 1499) + + # Check simple parallel seq scan plan with limit + test_query = 'select * from range_partitioned where i < 1500 limit 5' + plan = con.execute("select query_plan('%s')" % test_query)[0][0] + expected = json.loads(""" + [ + { + "Plan": { + "Node Type": "Limit", + "Parallel Aware": false, + "Plans": [ + { + "Node Type": "Gather", + "Parent Relationship": "Outer", + "Parallel Aware": false, + "Workers Planned": 2, + "Single Copy": false, + "Plans": [ + { + "Node Type": "Append", + "Parent Relationship": "Outer", + "Parallel Aware": false, + "Plans": [ + { + "Node Type": "Seq Scan", + "Parent Relationship": "Member", + "Parallel Aware": true, + "Relation Name": "range_partitioned_2", + "Alias": "range_partitioned_2", + "Filter": "(i < 1500)" + }, + { + "Node Type": "Seq Scan", + "Parent Relationship": "Member", + "Parallel Aware": true, + "Relation Name": "range_partitioned_1", + "Alias": "range_partitioned_1" + } + ] + } + ] + } + ] + } + } + ] + """) + self.assertEqual(ordered(plan, skip_keys=['Subplans Removed', 'Async Capable']), ordered(expected)) + + # Check tuples returned by query above + res_tuples = con.execute( + 'select * from range_partitioned where i < 1500 limit 5') + res_tuples = sorted(map(lambda x: x[0], res_tuples)) + expected = [1, 2, 3, 4, 5] + self.assertEqual(res_tuples, expected) + + # Check the case when none partition is selected in result plan + test_query = 'select * from range_partitioned where i < 1' + plan = con.execute("select query_plan('%s')" % test_query)[0][0] + expected = json.loads(""" + [ + { + "Plan": { + "Node Type": "Result", + "Parallel Aware": false, + "One-Time Filter": "false" + } + } + ] + """) + self.assertEqual(ordered(plan, skip_keys=['Async Capable']), ordered(expected)) + + # Remove all objects for testing + node.psql('drop table range_partitioned cascade') + node.psql('drop table hash_partitioned cascade') + node.psql('drop extension pg_pathman cascade') + + def test_conc_part_drop_runtime_append(self): + """ Test concurrent partition drop + SELECT (RuntimeAppend) """ + + # Create and start new instance + with self.start_new_pathman_cluster(allow_streaming=False) as node: + # Create table 'drop_test' and partition it + with node.connect() as con0: + # yapf: disable + con0.begin() + con0.execute("create table drop_test(val int not null)") + con0.execute("insert into drop_test select generate_series(1, 1000)") + con0.execute("select create_range_partitions('drop_test', 'val', 1, 10)") + con0.commit() + + # Create two separate connections for this test + with node.connect() as con1, node.connect() as con2: + try: + from queue import Queue + except ImportError: + from Queue import Queue + + # return values from thread + queue = Queue() + + # Thread for connection #2 (it has to wait) + def con2_thread(): + con2.begin() + con2.execute('set enable_hashjoin = f') + con2.execute('set enable_mergejoin = f') + + res = con2.execute(""" + explain (analyze, costs off, timing off) + select * from drop_test + where val = any (select generate_series(22, 40, 13)) + """) # query selects from drop_test_3 and drop_test_4 + + con2.commit() + + has_runtime_append = False + has_drop_test_3 = False + has_drop_test_4 = False + + for row in res: + if row[0].find('RuntimeAppend') >= 0: + has_runtime_append = True + continue + + if row[0].find('drop_test_3') >= 0: + has_drop_test_3 = True + continue + + if row[0].find('drop_test_4') >= 0: + has_drop_test_4 = True + continue + + # return all values in tuple + queue.put((has_runtime_append, has_drop_test_3, has_drop_test_4)) + + # Step 1: cache partitioned table in con1 + con1.begin() + con1.execute('select count(*) from drop_test') # load pathman's cache + con1.commit() + + # Step 2: cache partitioned table in con2 + con2.begin() + con2.execute('select count(*) from drop_test') # load pathman's cache + con2.commit() + + # Step 3: drop first partition of 'drop_test' + con1.begin() + con1.execute('drop table drop_test_3') + + # Step 4: try executing select (RuntimeAppend) + t = threading.Thread(target=con2_thread) + t.start() + + # Step 5: wait until 't' locks + while True: + with node.connect() as con0: + locks = con0.execute(""" + select count(*) from pg_locks where granted = 'f' + """) + + if int(locks[0][0]) > 0: + break + + # Step 6: commit 'DROP TABLE' + con1.commit() + + # Step 7: wait for con2 + t.join() + + rows = con1.execute(""" + select * from pathman_partition_list + where parent = 'drop_test'::regclass + order by range_min, range_max + """) + + # check number of partitions + self.assertEqual(len(rows), 99) + + # check RuntimeAppend + selected partitions + (has_runtime_append, has_drop_test_3, has_drop_test_4) = queue.get() + self.assertTrue(has_runtime_append) + self.assertFalse(has_drop_test_3) + self.assertTrue(has_drop_test_4) + + def test_conc_part_creation_insert(self): + """ Test concurrent partition creation on INSERT """ + + # Create and start new instance + with self.start_new_pathman_cluster(allow_streaming=False) as node: + # Create table 'ins_test' and partition it + with node.connect() as con0: + # yapf: disable + con0.begin() + con0.execute("create table ins_test(val int not null)") + con0.execute("insert into ins_test select generate_series(1, 50)") + con0.execute("select create_range_partitions('ins_test', 'val', 1, 10)") + con0.commit() + + # Create two separate connections for this test + with node.connect() as con1, node.connect() as con2: + + # Thread for connection #2 (it has to wait) + def con2_thread(): + con2.execute('insert into ins_test values(51)') + con2.commit() + + # Step 1: lock partitioned table in con1 + con1.begin() + con1.execute('select count(*) from ins_test') # load pathman's cache + con1.execute('lock table ins_test in share update exclusive mode') + + # Step 2: try inserting new value in con2 (waiting) + con2.begin() + con2.execute('select count(*) from ins_test') # load pathman's cache + t = threading.Thread(target=con2_thread) + t.start() + + # Step 3: wait until 't' locks + while True: + with node.connect() as con0: + locks = con0.execute(""" + select count(*) from pg_locks where granted = 'f' + """) + + if int(locks[0][0]) > 0: + break + + # Step 4: try inserting new value in con1 (success, unlock) + con1.execute('insert into ins_test values(52)') + con1.commit() + + # Step 5: wait for con2 + t.join() + + rows = con1.execute(""" + select * from pathman_partition_list + where parent = 'ins_test'::regclass + order by range_min, range_max + """) + + # check number of partitions + self.assertEqual(len(rows), 6) + + # check range_max of partitions + self.assertEqual(int(rows[0][5]), 11) + self.assertEqual(int(rows[1][5]), 21) + self.assertEqual(int(rows[2][5]), 31) + self.assertEqual(int(rows[3][5]), 41) + self.assertEqual(int(rows[4][5]), 51) + self.assertEqual(int(rows[5][5]), 61) + + def test_conc_part_merge_insert(self): + """ Test concurrent merge_range_partitions() + INSERT """ + + # Create and start new instance + with self.start_new_pathman_cluster(allow_streaming=False) as node: + # Create table 'ins_test' and partition it + with node.connect() as con0: + # yapf: disable + con0.begin() + con0.execute("create table ins_test(val int not null)") + con0.execute("select create_range_partitions('ins_test', 'val', 1, 10, 10)") + con0.commit() + + # Create two separate connections for this test + with node.connect() as con1, node.connect() as con2: + + # Thread for connection #2 (it has to wait) + def con2_thread(): + con2.begin() + con2.execute('insert into ins_test values(20)') + con2.commit() + + # Step 1: initilize con1 + con1.begin() + con1.execute('select count(*) from ins_test') # load pathman's cache + + # Step 2: initilize con2 + con2.begin() + con2.execute('select count(*) from ins_test') # load pathman's cache + con2.commit() # unlock relations + + # Step 3: merge 'ins_test1' + 'ins_test_2' in con1 (success) + con1.execute( + "select merge_range_partitions('ins_test_1', 'ins_test_2')") + + # Step 4: try inserting new value in con2 (waiting) + t = threading.Thread(target=con2_thread) + t.start() + + # Step 5: wait until 't' locks + while True: + with node.connect() as con0: + locks = con0.execute(""" + select count(*) from pg_locks where granted = 'f' + """) + + if int(locks[0][0]) > 0: + break + + # Step 6: finish merge in con1 (success, unlock) + con1.commit() + + # Step 7: wait for con2 + t.join() + + rows = con1.execute("select *, tableoid::regclass::text from ins_test") + + # check number of rows in table + self.assertEqual(len(rows), 1) + + # check value that has been inserted + self.assertEqual(int(rows[0][0]), 20) + + # check partition that was chosen for insert + self.assertEqual(str(rows[0][1]), 'ins_test_1') + + def test_pg_dump(self): + with self.start_new_pathman_cluster() as node: + node.safe_psql('create database copy') + + node.safe_psql(""" + create table test_hash(val int not null); + select create_hash_partitions('test_hash', 'val', 10); + insert into test_hash select generate_series(1, 90); + + create table test_range(val int not null); + select create_range_partitions('test_range', 'val', 1, 10, 10); + insert into test_range select generate_series(1, 95); + """) + + dump = node.dump() + node.restore(dbname='copy', filename=dump) + os.remove(dump) + + # HASH + a = node.execute('postgres', 'select * from test_hash order by val') + b = node.execute('copy', 'select * from test_hash order by val') + self.assertEqual(a, b) + c = node.execute('postgres', 'select * from only test_hash order by val') + d = node.execute('copy', 'select * from only test_hash order by val') + self.assertEqual(c, d) + + # RANGE + a = node.execute('postgres', 'select * from test_range order by val') + b = node.execute('copy', 'select * from test_range order by val') + self.assertEqual(a, b) + c = node.execute('postgres', 'select * from only test_range order by val') + d = node.execute('copy', 'select * from only test_range order by val') + self.assertEqual(c, d) + + # check partition sets + p1 = node.execute('postgres', 'select * from pathman_partition_list') + p2 = node.execute('copy', 'select * from pathman_partition_list') + self.assertEqual(sorted(p1), sorted(p2)) + + def test_concurrent_detach(self): + """ + Test concurrent detach partition with contiguous + tuple inserting and spawning new partitions + """ + + # Init parameters + num_insert_workers = 8 + detach_timeout = 0.1 # time in sec between successive inserts and detachs + num_detachs = 100 # estimated number of detachs + inserts_advance = 1 # abvance in sec of inserts process under detachs + test_interval = int(math.ceil(detach_timeout * num_detachs)) + + insert_pgbench_script = os.path.dirname(os.path.realpath(__file__)) \ + + "/pgbench_scripts/insert_current_timestamp.pgbench" + detach_pgbench_script = os.path.dirname(os.path.realpath(__file__)) \ + + "/pgbench_scripts/detachs_in_timeout.pgbench" + + # Check pgbench scripts on existance + self.assertTrue( + os.path.isfile(insert_pgbench_script), + msg="pgbench script with insert timestamp doesn't exist") + + self.assertTrue( + os.path.isfile(detach_pgbench_script), + msg="pgbench script with detach letfmost partition doesn't exist") + + # Create and start new instance + with self.start_new_pathman_cluster(allow_streaming=False) as node: + # Create partitioned table for testing that spawns new partition on each next *detach_timeout* sec + with node.connect() as con0: + con0.begin() + con0.execute( + 'create table ts_range_partitioned(ts timestamp not null)') + + # yapf: disable + con0.execute(""" + select create_range_partitions('ts_range_partitioned', + 'ts', + current_timestamp, + interval '%f', + 1) + """ % detach_timeout) + con0.commit() + + # Run in background inserts and detachs processes + with open(os.devnull, 'w') as fnull: + # init pgbench's utility tables + init_pgbench = node.pgbench(stdout=fnull, stderr=fnull, options=["-i"]) + init_pgbench.wait() + + inserts = node.pgbench( + stdout=fnull, + stderr=subprocess.PIPE, + options=[ + "-j", + "%i" % num_insert_workers, "-c", + "%i" % num_insert_workers, "-f", insert_pgbench_script, "-T", + "%i" % (test_interval + inserts_advance) + ]) + time.sleep(inserts_advance) + detachs = node.pgbench( + stdout=fnull, + stderr=fnull, + options=[ + "-D", + "timeout=%f" % detach_timeout, "-f", detach_pgbench_script, + "-T", + "%i" % test_interval + ]) + + # Wait for completion of processes + _, stderrdata = inserts.communicate() + detachs.wait() + + # Obtain error log from inserts process + self.assertIsNone( + re.search("ERROR|FATAL|PANIC", str(stderrdata)), + msg=""" + Race condition between detach and concurrent + inserts with append partition is expired + """) + + def test_update_node_plan1(self): + ''' + Test scan on all partititions when using update node. + We can't use regression tests here because 9.5 and 9.6 give + different plans + ''' + + with get_new_node('test_update_node') as node: + node.init() + node.append_conf('postgresql.conf', """ + shared_preload_libraries=\'pg_pathman\' + pg_pathman.override_copy=false + pg_pathman.enable_partitionrouter=on + """) + node.start() + + # Prepare test database + node.psql('postgres', 'CREATE EXTENSION pg_pathman;') + node.psql('postgres', 'CREATE SCHEMA test_update_node;') + node.psql('postgres', 'CREATE TABLE test_update_node.test_range(val NUMERIC NOT NULL, comment TEXT)') + node.psql('postgres', 'INSERT INTO test_update_node.test_range SELECT i, i FROM generate_series(1, 100) i;') + node.psql('postgres', "SELECT create_range_partitions('test_update_node.test_range', 'val', 1, 10);") + + node.psql('postgres', """ + create or replace function query_plan(query text) returns jsonb as $$ + declare + plan jsonb; + begin + execute 'explain (costs off, format json)' || query into plan; + return plan; + end; + $$ language plpgsql; + """) + + with node.connect() as con: + test_query = "UPDATE test_update_node.test_range SET val = 14 WHERE comment=''15''" + plan = con.execute('SELECT query_plan(\'%s\')' % test_query)[0][0] + plan = plan[0]["Plan"] + + # PartitionOverseer + self.assertEqual(plan["Node Type"], "Custom Scan") + self.assertEqual(plan["Custom Plan Provider"], 'PartitionOverseer') + + # ModifyTable + plan = plan["Plans"][0] + self.assertEqual(plan["Node Type"], "ModifyTable") + self.assertEqual(plan["Operation"], "Update") + self.assertEqual(plan["Relation Name"], "test_range") + self.assertEqual(len(plan["Target Tables"]), 11) + + # Plan was seriously changed in vanilla since v14 + if version < Version('14'): + expected_format = ''' + { + "Plans": [ + { + "Plans": [ + { + "Filter": "(comment = '15'::text)", + "Node Type": "Seq Scan", + "Relation Name": "test_range%s", + "Parent Relationship": "child" + } + ], + "Node Type": "Custom Scan", + "Parent Relationship": "child", + "Custom Plan Provider": "PartitionRouter" + } + ], + "Node Type": "Custom Scan", + "Parent Relationship": "Member", + "Custom Plan Provider": "PartitionFilter" + } + ''' + + for i, f in enumerate([''] + list(map(str, range(1, 10)))): + num = '_' + f if f else '' + expected = json.loads(expected_format % num) + p = ordered(plan["Plans"][i], skip_keys=['Parallel Aware', 'Alias']) + self.assertEqual(p, ordered(expected)) + + node.psql('postgres', 'DROP SCHEMA test_update_node CASCADE;') + node.psql('postgres', 'DROP EXTENSION pg_pathman CASCADE;') + + def test_concurrent_updates(self): + ''' + Test whether conncurrent updates work correctly between + partitions. + ''' + + create_sql = ''' + CREATE TABLE test1(id INT, b INT NOT NULL); + INSERT INTO test1 + SELECT i, i FROM generate_series(1, 100) i; + SELECT create_range_partitions('test1', 'b', 1, 5); + ''' + + with self.start_new_pathman_cluster(enable_partitionrouter=True) as node: + node.safe_psql(create_sql) + + pool = multiprocessing.Pool(processes=4) + for count in range(1, 200): + pool.apply_async(make_updates, (node, count, )) + + pool.close() + pool.join() + + # check all data is there and not duplicated + with node.connect() as con: + for i in range(1, 100): + row = con.execute("select count(*) from test1 where id = %d" % i)[0] + self.assertEqual(row[0], 1) + + self.assertEqual(node.execute("select count(*) from test1")[0][0], 100) + + +def make_updates(node, count): + update_sql = ''' + BEGIN; + UPDATE test1 SET b = trunc(random() * 100 + 1) WHERE id in (%s); + COMMIT; + ''' + + with node.connect() as con: + for i in range(count): + rows_to_update = random.randint(20, 50) + ids = set([str(random.randint(1, 100)) for i in range(rows_to_update)]) + con.execute(update_sql % ','.join(ids)) + + +if __name__ == "__main__": + if len(sys.argv) > 1: + suite = unittest.TestLoader().loadTestsFromName(sys.argv[1], + module=sys.modules[__name__]) + else: + suite = unittest.TestLoader().loadTestsFromTestCase(Tests) + + configure_testgres(use_python_logging=True) + + result = unittest.TextTestRunner(verbosity=2, failfast=True).run(suite) + if not result.wasSuccessful(): + sys.exit(1) diff --git a/tests/python/pgbench_scripts/detachs_in_timeout.pgbench b/tests/python/pgbench_scripts/detachs_in_timeout.pgbench new file mode 100644 index 00000000..ff2fe861 --- /dev/null +++ b/tests/python/pgbench_scripts/detachs_in_timeout.pgbench @@ -0,0 +1,2 @@ +select detach_range_partition(partition) from (select partition from pathman_partition_list where parent='ts_range_partitioned'::regclass order by range_min limit 1) t; +select pg_sleep(:timeout); diff --git a/tests/python/pgbench_scripts/insert_current_timestamp.pgbench b/tests/python/pgbench_scripts/insert_current_timestamp.pgbench new file mode 100644 index 00000000..d0276b11 --- /dev/null +++ b/tests/python/pgbench_scripts/insert_current_timestamp.pgbench @@ -0,0 +1 @@ +insert into ts_range_partitioned values (current_timestamp); diff --git a/tests/update/README.md b/tests/update/README.md new file mode 100644 index 00000000..fd042822 --- /dev/null +++ b/tests/update/README.md @@ -0,0 +1,17 @@ +## pg_pathman's update checker + +It's necessary to check that `ALTER EXTENSION pg_pathman UPDATE` produces an SQL frontend that is exactly the same as a fresh install. + +Usage: + +```bash +PG_CONFIG=... ./dump_pathman_objects %DBNAME% + +diff file_1 file_2 +``` + +check_update.py script tries to verify that update is ok automatically. For +instance, +```bash +tests/update/check_update.py d34a77e worktree +``` diff --git a/tests/update/check_update.py b/tests/update/check_update.py new file mode 100755 index 00000000..4bd740f6 --- /dev/null +++ b/tests/update/check_update.py @@ -0,0 +1,287 @@ +#!/usr/bin/env python3 +#coding: utf-8 + +import shutil +import os +import contextlib +import sys +import argparse +import testgres +import subprocess +import time + +my_dir = os.path.dirname(os.path.abspath(__file__)) +repo_dir = os.path.abspath(os.path.join(my_dir, '../../')) +print(repo_dir) + +# just bunch of tables to create +run_sql = ''' +CREATE EXTENSION pg_pathman; + +CREATE TABLE hash_rel ( + id SERIAL PRIMARY KEY, + value INTEGER NOT NULL); +INSERT INTO hash_rel VALUES (1, 1); +INSERT INTO hash_rel VALUES (2, 2); +INSERT INTO hash_rel VALUES (3, 3); + +SELECT create_hash_partitions('hash_rel', 'Value', 3); + +CREATE TABLE range_rel ( + id SERIAL PRIMARY KEY, + dt TIMESTAMP not null, + txt TEXT); +CREATE INDEX ON range_rel (dt); +INSERT INTO range_rel (dt, txt) +SELECT g, md5(g::TEXT) FROM generate_series('2015-01-01', '2015-04-30', '1 day'::interval) as g; +SELECT create_range_partitions('range_rel', 'DT', '2015-01-01'::DATE, '1 month'::INTERVAL); + +CREATE TABLE num_range_rel ( + id SERIAL PRIMARY KEY, + txt TEXT); +SELECT create_range_partitions('num_range_rel', 'id', 0, 1000, 4); +INSERT INTO num_range_rel + SELECT g, md5(g::TEXT) FROM generate_series(1, 3000) as g; + +CREATE TABLE improved_dummy_test1 (id BIGSERIAL, name TEXT NOT NULL); +INSERT INTO improved_dummy_test1 (name) SELECT md5(g::TEXT) FROM generate_series(1, 100) as g; +SELECT create_range_partitions('improved_dummy_test1', 'id', 1, 10); +INSERT INTO improved_dummy_test1 (name) VALUES ('test'); /* spawns new partition */ +ALTER TABLE improved_dummy_test1 ADD CHECK (name != 'ib'); + +CREATE TABLE test_improved_dummy_test2 (val INT NOT NULL); +SELECT create_range_partitions('test_improved_dummy_test2', 'val', + generate_range_bounds(1, 1, 2), + partition_names := '{p1, p2}'); + +CREATE TABLE insert_into_select(val int NOT NULL); +INSERT INTO insert_into_select SELECT generate_series(1, 100); +SELECT create_range_partitions('insert_into_select', 'val', 1, 20); +CREATE TABLE insert_into_select_copy (LIKE insert_into_select); /* INSERT INTO ... SELECT ... */ + +-- just a lot of actions + +SELECT split_range_partition('num_range_rel_1', 500); +SELECT split_range_partition('range_rel_1', '2015-01-15'::DATE); + +/* Merge two partitions into one */ +SELECT merge_range_partitions('num_range_rel_1', 'num_range_rel_' || currval('num_range_rel_seq')); +SELECT merge_range_partitions('range_rel_1', 'range_rel_' || currval('range_rel_seq')); + +/* Append and prepend partitions */ +SELECT append_range_partition('num_range_rel'); +SELECT prepend_range_partition('num_range_rel'); +SELECT drop_range_partition('num_range_rel_7'); + +SELECT drop_range_partition_expand_next('num_range_rel_4'); +SELECT drop_range_partition_expand_next('num_range_rel_6'); + +SELECT append_range_partition('range_rel'); +SELECT prepend_range_partition('range_rel'); +SELECT drop_range_partition('range_rel_7'); +SELECT add_range_partition('range_rel', '2014-12-01'::DATE, '2015-01-01'::DATE); + +CREATE TABLE range_rel_minus_infinity (LIKE range_rel INCLUDING ALL); +SELECT attach_range_partition('range_rel', 'range_rel_minus_infinity', NULL, '2014-12-01'::DATE); +INSERT INTO range_rel (dt) VALUES ('2012-06-15'); +INSERT INTO range_rel (dt) VALUES ('2015-12-15'); + +CREATE TABLE hash_rel_extern (LIKE hash_rel INCLUDING ALL); +SELECT replace_hash_partition('hash_rel_0', 'hash_rel_extern'); + +-- automatic partitions creation +CREATE TABLE range_rel_test1 ( + id SERIAL PRIMARY KEY, + dt TIMESTAMP NOT NULL, + data TEXT); +SELECT create_range_partitions('range_rel_test1', 'dt', '2015-01-01'::DATE, '10 days'::INTERVAL, 1); +INSERT INTO range_rel_test1 (dt) +SELECT generate_series('2015-01-01', '2015-04-30', '1 day'::interval); + +INSERT INTO range_rel_test1 (dt) +SELECT generate_series('2014-12-31', '2014-12-01', '-1 day'::interval); + +/* CaMeL cAsE table names and attributes */ +CREATE TABLE "TeSt" (a INT NOT NULL, b INT); +SELECT create_hash_partitions('"TeSt"', 'a', 3); +INSERT INTO "TeSt" VALUES (1, 1); +INSERT INTO "TeSt" VALUES (2, 2); +INSERT INTO "TeSt" VALUES (3, 3); + +CREATE TABLE "RangeRel" ( + id SERIAL PRIMARY KEY, + dt TIMESTAMP NOT NULL, + txt TEXT); +INSERT INTO "RangeRel" (dt, txt) +SELECT g, md5(g::TEXT) FROM generate_series('2015-01-01', '2015-01-03', '1 day'::interval) as g; +SELECT create_range_partitions('"RangeRel"', 'dt', '2015-01-01'::DATE, '1 day'::INTERVAL); +SELECT append_range_partition('"RangeRel"'); +SELECT prepend_range_partition('"RangeRel"'); +SELECT merge_range_partitions('"RangeRel_1"', '"RangeRel_' || currval('"RangeRel_seq"') || '"'); +SELECT split_range_partition('"RangeRel_1"', '2015-01-01'::DATE); + +CREATE TABLE hash_rel_next1 ( + id SERIAL PRIMARY KEY, + value INTEGER NOT NULL); +INSERT INTO hash_rel_next1 (value) SELECT g FROM generate_series(1, 10000) as g; +SELECT create_hash_partitions('hash_rel_next1', 'value', 3); +''' + +def shell(cmd): + print(cmd) + cp = subprocess.run(cmd, shell=True) + if cp.returncode != 0: + raise subprocess.CalledProcessError(cp.returncode, cmd) + # print(subprocess.check_output(cmd, shell=True).decode("utf-8")) + +def shell_call(cmd): + print(cmd) + return subprocess.run(cmd, shell=True) + +def reinstall_pathman(tmp_pathman_path, revision): + if revision == 'worktree': + shutil.rmtree(tmp_pathman_path) + shutil.copytree(repo_dir, tmp_pathman_path) + os.chdir(tmp_pathman_path) + else: + os.chdir(tmp_pathman_path) + shell("git clean -fdx") + shell("git reset --hard") + shell("git checkout %s" % revision) + shell('make USE_PGXS=1 clean && make USE_PGXS=1 install -j4') + +if __name__ == '__main__': + parser = argparse.ArgumentParser(description=''' + pg_pathman update checker. Testgres is used. Junks into /tmp/pathman_check_update. + First do some partitioned stuff on new version. Save full database dump to + dump_new.sql and pathman object definitions to pathman_objects_new.sql. + Then run old version, do the same stuff. Upgrade and make dumps. Ensure + dumps are the same. Finally, run regressions tests on upgraded version. + ''') + parser.add_argument('branches', nargs=2, + help='specify branches , e.g. "d34a77e master". Special value "worktree" means, well, working tree.') + args = parser.parse_args() + old_branch, new_branch = args.branches[0], args.branches[1] + + pathman_objs_script = os.path.join(my_dir, 'dump_pathman_objects.sql') + + data_prefix = "/tmp/pathman_check_update" + if os.path.isdir(data_prefix): + shutil.rmtree(data_prefix) + dump_new_path = os.path.join(data_prefix, 'dump_new.sql') + dump_updated_path = os.path.join(data_prefix, 'dump_updated.sql') + dump_diff_path = os.path.join(data_prefix, 'dump.diff') + pathman_objs_new_path = os.path.join(data_prefix, 'pathman_objects_new.sql') + pathman_objs_updated_path = os.path.join(data_prefix, 'pathman_objects_updated.sql') + pathman_objs_diff_path = os.path.join(data_prefix, 'pathman_objs.diff') + tmp_pathman_path = os.path.join(data_prefix, "pg_pathman") + + shutil.copytree(repo_dir, tmp_pathman_path) + + reinstall_pathman(tmp_pathman_path, new_branch) + with testgres.get_new_node('brand_new') as node: + node.init() + node.append_conf("shared_preload_libraries='pg_pathman'\n") + node.start() + node.safe_psql('postgres', run_sql) + node.dump(dump_new_path, 'postgres') + # default user is current OS one + shell("psql -p {} -h {} -f {} -X -q -a -At > {} 2>&1".format(node.port, node.host, pathman_objs_script, pathman_objs_new_path)) + node.stop() + + # now install old version... + reinstall_pathman(tmp_pathman_path, old_branch) + with testgres.get_new_node('updated') as node: + node.init() + node.append_conf("shared_preload_libraries='pg_pathman'\n") + + node.start() + # do the same stuff... + node.safe_psql('postgres', run_sql) + # and prepare regression db, see below + node.safe_psql('postgres', 'create database contrib_regression') + node.safe_psql('contrib_regression', 'create extension pg_pathman') + + # and upgrade pathman + node.stop() + reinstall_pathman(tmp_pathman_path, new_branch) + node.start() + print("Running updated db on port {}, datadir {}".format(node.port, node.base_dir)) + node.safe_psql('postgres', "alter extension pg_pathman update") + node.safe_psql('postgres', "set pg_pathman.enable = t;") + + # regression tests db, see below + node.safe_psql('contrib_regression', "alter extension pg_pathman update") + node.safe_psql('contrib_regression', "set pg_pathman.enable = t;") + + node.dump(dump_updated_path, 'postgres') + # time.sleep(432432) + # default user is current OS one + shell("psql -p {} -h {} -f {} -X -q -a -At > {} 2>&1".format(node.port, node.host, pathman_objs_script, pathman_objs_updated_path)) + + # check diffs + shell_call("diff -U3 {} {} > {} 2>&1".format(dump_updated_path, dump_new_path, dump_diff_path)) + if os.stat(dump_diff_path).st_size != 0: + msg = "DB dumps are not equal, check out the diff at {}\nProbably that's actually ok, please eyeball the diff manually and say, continue?".format(dump_diff_path) + if input("%s (y/N) " % msg).lower() != 'y': + sys.exit(1) + shell_call("diff -U3 {} {} > {} 2>&1".format(pathman_objs_updated_path, pathman_objs_new_path, pathman_objs_diff_path)) + if os.stat(pathman_objs_diff_path).st_size != 0: + print("pathman objects dumps are not equal, check out the diff at {}".format(pathman_objs_diff_path)) + # sys.exit(1) + + print("just in case, checking that dump can be restored...") + node.safe_psql('postgres', 'create database tmp') + node.restore(dump_updated_path, 'tmp') + + print("finally, run (some) pathman regression tests") + # This is a bit tricky because we want to run tests on exactly this + # installation of extension. It means we must create db beforehand, + # tell pg_regress not create it and discard all create/drop extension + # from tests. + # Not all tests can be thus adapted instantly, so I think that's enough + # for now. + # generated with smth like ls ~/postgres/pg_pathman/sql/ | sort | sed 's/.sql//' | xargs -n 1 printf "'%s',\n" + os.chdir(tmp_pathman_path) + REGRESS = ['pathman_array_qual', + 'pathman_bgw', + 'pathman_callbacks', + 'pathman_column_type', + 'pathman_cte', + 'pathman_domains', + 'pathman_dropped_cols', + 'pathman_expressions', + 'pathman_foreign_keys', + 'pathman_gaps', + 'pathman_inserts', + 'pathman_interval', + 'pathman_lateral', + 'pathman_only', + 'pathman_param_upd_del', + 'pathman_permissions', + 'pathman_rebuild_deletes', + 'pathman_rebuild_updates', + 'pathman_rowmarks', + 'pathman_subpartitions', + 'pathman_update_node', + 'pathman_update_triggers', + 'pathman_utility_stmt', + 'pathman_views' + ] + outfiles = os.listdir(os.path.join(tmp_pathman_path, 'expected')) + for tname in REGRESS: + shell("sed -i '/CREATE EXTENSION pg_pathman;/d' sql/{}.sql".format(tname)) + # CASCADE also removed + shell("sed -i '/DROP EXTENSION pg_pathman/d' sql/{}.sql".format(tname)) + # there might be more then one .out file + for outfile in outfiles: + if outfile.startswith(tname): + shell("sed -i '/CREATE EXTENSION pg_pathman;/d' expected/{}".format(outfile)) + shell("sed -i '/DROP EXTENSION pg_pathman/d' expected/{}".format(outfile)) + + # time.sleep(43243242) + shell("make USE_PGXS=1 PGPORT={} EXTRA_REGRESS_OPTS=--use-existing REGRESS='{}' installcheck 2>&1".format(node.port, " ".join(REGRESS))) + + node.stop() + + print("It's Twelve O'clock and All's Well.") diff --git a/tests/update/dump_pathman_objects.sql b/tests/update/dump_pathman_objects.sql new file mode 100644 index 00000000..e1a632ca --- /dev/null +++ b/tests/update/dump_pathman_objects.sql @@ -0,0 +1,16 @@ +CREATE EXTENSION IF NOT EXISTS pg_pathman; + +SELECT pg_get_functiondef(objid) +FROM pg_catalog.pg_depend JOIN pg_proc ON pg_proc.oid = pg_depend.objid +WHERE refclassid = 'pg_catalog.pg_extension'::REGCLASS AND + refobjid = (SELECT oid + FROM pg_catalog.pg_extension + WHERE extname = 'pg_pathman') AND + deptype = 'e' +ORDER BY objid::regprocedure::TEXT ASC; + +\d+ pathman_config +\d+ pathman_config_params +\d+ pathman_partition_list +\d+ pathman_cache_stats +\d+ pathman_concurrent_part_tasks diff --git a/tests/update/get_sql_diff b/tests/update/get_sql_diff new file mode 100755 index 00000000..876717a8 --- /dev/null +++ b/tests/update/get_sql_diff @@ -0,0 +1,39 @@ +#!/usr/bin/env bash + +PG_VER=$1 +WORK_DIR=/tmp/pg_pathman +BRANCH_1=$2 +BRANCH_2=$3 + + +if [ -z "$PG_VER" ]; then + PG_VER=10 +fi + +if [ -z "$BRANCH_1" ]; then + BRANCH_1=master +fi + +if [ -z "$BRANCH_1" ]; then + BRANCH_2=$(git tag | sort -V | tail -1) +fi + + +printf "PG:\\t$PG_VER\\n" +printf "BRANCH_1:\\t$BRANCH_1\\n" +printf "BRANCH_2:\\t$BRANCH_2\\n" + + +cp -R "$(dirname $0)" "$WORK_DIR" + +git checkout "$BRANCH_1" + +norsu pgxs "$PG_VER" -- clean install +norsu run "$PG_VER" --pgxs --psql < "$WORK_DIR"/dump_pathman_objects.sql > "$WORK_DIR"/dump_1 + +git checkout "$BRANCH_2" + +norsu pgxs "$PG_VER" -- clean install +norsu run "$PG_VER" --pgxs --psql < "$WORK_DIR"/dump_pathman_objects.sql > "$WORK_DIR"/dump_2 + +diff -u "$WORK_DIR"/dump_1 "$WORK_DIR"/dump_2 > "$WORK_DIR"/diff diff --git a/travis/apt.postgresql.org.sh b/travis/apt.postgresql.org.sh deleted file mode 100644 index 22814fa7..00000000 --- a/travis/apt.postgresql.org.sh +++ /dev/null @@ -1,136 +0,0 @@ -#!/bin/sh - -# script to add apt.postgresql.org to sources.list - -# from command like -CODENAME="$1" -# lsb_release is the best interface, but not always available -if [ -z "$CODENAME" ]; then - CODENAME=$(lsb_release -cs 2>/dev/null) -fi -# parse os-release (unreliable, does not work on Ubuntu) -if [ -z "$CODENAME" -a -f /etc/os-release ]; then - . /etc/os-release - # Debian: VERSION="7.0 (wheezy)" - # Ubuntu: VERSION="13.04, Raring Ringtail" - CODENAME=$(echo $VERSION | sed -ne 's/.*(\(.*\)).*/\1/') -fi -# guess from sources.list -if [ -z "$CODENAME" ]; then - CODENAME=$(grep '^deb ' /etc/apt/sources.list | head -n1 | awk '{ print $3 }') -fi -# complain if no result yet -if [ -z "$CODENAME" ]; then - cat < /etc/apt/sources.list.d/pgdg.list < cppcheck.log - - if [ -s cppcheck.log ]; then - cat cppcheck.log - status=1 # error - fi - - exit $status - fi - - # don't forget to "make clean" - make clean USE_PGXS=1 PG_CONFIG=$config_path -fi - -# build pg_pathman -make USE_PGXS=1 PG_CONFIG=$config_path -sudo make install USE_PGXS=1 PG_CONFIG=$config_path - -# add pg_pathman to shared_preload_libraries and restart cluster 'test' -sudo bash -c "echo \"shared_preload_libraries = 'pg_pathman'\" >> /etc/postgresql/$PGVERSION/test/postgresql.conf" -sudo pg_ctlcluster $PGVERSION test restart - -# run regression tests -PGPORT=55435 make installcheck USE_PGXS=1 PGUSER=postgres PG_CONFIG=$config_path || status=$? - -# show diff if it exists -if test -f regression.diffs; then cat regression.diffs; fi - -set +u - -# create a virtual environment and activate it -virtualenv /tmp/envs/pg_pathman -source /tmp/envs/pg_pathman/bin/activate - -# install pip packages -pip install $pip_packages - -# set permission to write postgres locks -sudo chmod a+w /var/run/postgresql/ - -# run python tests -cd tests -PG_CONFIG=$config_path python -m unittest partitioning_test || status=$? - -set -u - -exit $status